text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
isSingleton : Bool -> Type isSingleton True = Nat isSingleton False = List Nat mkSingle : (x : Bool) -> isSingleton x mkSingle True = 0 mkSingle False = [] sum : (single : Bool) -> isSingleton single -> Nat sum True x = x sum False [] = 0 sum False (x::xs) = x + sum False xs
{"hexsha": "de1a12455033ba2b47eb40c74685ddea523944f6", "size": 291, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "src/types.idr", "max_stars_repo_name": "0918nobita/idris", "max_stars_repo_head_hexsha": "e4ae7fbb95d7cd580b366e93747a069ccbeae31d", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-23T06:49:52.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-23T06:49:52.000Z", "max_issues_repo_path": "src/types.idr", "max_issues_repo_name": "0918nobita/idris", "max_issues_repo_head_hexsha": "e4ae7fbb95d7cd580b366e93747a069ccbeae31d", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/types.idr", "max_forks_repo_name": "0918nobita/idris", "max_forks_repo_head_hexsha": "e4ae7fbb95d7cd580b366e93747a069ccbeae31d", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3846153846, "max_line_length": 50, "alphanum_fraction": 0.6323024055, "num_tokens": 90}
# Note that this script can accept some limited command-line arguments, run # `julia build_tarballs.jl --help` to see a usage message. using BinaryBuilder name = "IpoptBuilder" version = v"3.12.10" # Collection of sources required to build IpoptBuilder sources = [ "https://github.com/coin-or/Ipopt/archive/releases/3.12.10.tar.gz" => "dfd29dc95ec815e1ff0a3b7dc86ecc8944b24977e40724c35dac25aa192ac3cd", ] # Bash recipe for building across all platforms script = raw""" cd $WORKSPACE/srcdir cd Ipopt-releases-3.12.10/ update_configure_scripts mkdir build cd build/ for path in ${LD_LIBRARY_PATH//:/ }; do for file in $(ls $path/*.la); do echo "$file" baddir=$(sed -n "s|libdir=||p" $file) sed -i~ -e "s|$baddir|'$path'|g" $file done done if [ $target = "x86_64-apple-darwin14" ]; then # seems static linking requires apple's ar export AR=/opt/x86_64-apple-darwin14/bin/x86_64-apple-darwin14-ar # Ignore the "# Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds" # seems to work for the current version and otherwise a long list of non-Clp symbols are exported sed -i~ -e "s|~nmedit -s \$output_objdir/\${libname}-symbols.expsym \${lib}| -exported_symbols_list \$output_objdir/\${libname}-symbols.expsym|g" ../configure fi export CPPFLAGS="-DCOIN_USE_MUMPS_MPI_H" ## STATIC BUILD START # Staticly link all dependencies and export only Ipopt symbols # force only exporting symbols related to Ipopt # SetIntermediateCallback is to fix https://github.com/JuliaOpt/IpoptBuilder/issues/2 sed -i~ -e 's|LT_LDFLAGS="-no-undefined"|LT_LDFLAGS="-no-undefined -export-symbols-regex \\\\"Ipopt\|SetIntermediateCallback\\\\""|g' ../configure sed -i~ -e 's|LT_LDFLAGS="-no-undefined"|LT_LDFLAGS="-no-undefined -export-symbols-regex \\\\"Ipopt\|SetIntermediateCallback\\\\""|g' ../Ipopt/configure ../configure --prefix=$prefix --with-pic --disable-pkg-config --host=${target} --enable-shared --disable-static \ --enable-dependency-linking lt_cv_deplibs_check_method=pass_all \ --with-asl-lib="-L${prefix}/lib -lasl" --with-asl-incdir="$prefix/include/asl" \ --with-blas="-L${prefix}/lib -lcoinblas -lgfortran" \ --with-lapack="-L${prefix}/lib -lcoinlapack" \ --with-metis-lib="-L${prefix}/lib -lcoinmetis" --with-metis-incdir="$prefix/include/coin/ThirdParty" \ --with-mumps-lib="-L${prefix}/lib -lcoinmumps -lcoinmetis" --with-mumps-incdir="$prefix/include/coin/ThirdParty" ## STATIC BUILD END ## DYNAMIC BUILD START #../configure --prefix=$prefix --with-pic --disable-pkg-config --host=${target} --enable-shared --enable-static \ #--enable-dependency-linking lt_cv_deplibs_check_method=pass_all \ #--with-asl-lib="-L${prefix}/lib -lasl" --with-asl-incdir="$prefix/include/asl" \ #--with-blas="-L${prefix}/lib -lcoinblas -lgfortran" \ #--with-lapack="-L${prefix}/lib -lcoinlapack" \ #--with-metis-lib="-L${prefix}/lib -lcoinmetis" --with-metis-incdir="$prefix/include/coin/ThirdParty" \ #--with-mumps-lib="-L${prefix}/lib -lcoinmumps" --with-mumps-incdir="$prefix/include/coin/ThirdParty" ## DYNAMIC BUILD END make -j${nproc} make install """ # These are the platforms we will build for by default, unless further # platforms are passed in on the command line platforms = [ Linux(:i686, libc=:glibc), Linux(:x86_64, libc=:glibc), Linux(:aarch64, libc=:glibc), Linux(:armv7l, libc=:glibc, call_abi=:eabihf), MacOS(:x86_64), Windows(:i686), Windows(:x86_64) ] platforms = expand_gcc_versions(platforms) # To fix gcc4 bug in Windows #platforms = setdiff(platforms, [Windows(:x86_64, compiler_abi=CompilerABI(:gcc4)), Windows(:i686, compiler_abi=CompilerABI(:gcc4))]) push!(platforms, Windows(:i686,compiler_abi=CompilerABI(:gcc6))) push!(platforms, Windows(:x86_64,compiler_abi=CompilerABI(:gcc6))) # The products that we will ensure are always built products(prefix) = [ LibraryProduct(prefix, "libipopt", :libipopt), ExecutableProduct(prefix, "ipopt", :amplexe) ] # Dependencies that must be installed before this package can be built dependencies = [ "https://github.com/juan-pablo-vielma/ASLBuilder/releases/download/v3.1.0-1-static/build_ASLBuilder.v3.1.0.jl", "https://github.com/juan-pablo-vielma/COINBLASBuilder/releases/download/v1.4.6-1-static/build_COINBLASBuilder.v1.4.6.jl", "https://github.com/juan-pablo-vielma/COINLapackBuilder/releases/download/v1.5.6-1-static/build_COINLapackBuilder.v1.5.6.jl", "https://github.com/juan-pablo-vielma/COINMetisBuilder/releases/download/v1.3.5-1-static/build_COINMetisBuilder.v1.3.5.jl", "https://github.com/juan-pablo-vielma/COINMumpsBuilder/releases/download/v1.6.0-1-static/build_COINMumpsBuilder.v1.6.0.jl" ] # Build the tarballs, and possibly a `build.jl` as well. build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies)
{"hexsha": "9f9905aad611ed887579be4ab82335b1f6d06ffe", "size": 4863, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "build_tarballs.jl", "max_stars_repo_name": "juan-pablo-vielma/IpoptBuilder", "max_stars_repo_head_hexsha": "ae1aea31c8a3e0bc1b42c4f5e260de5da845b4af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-08T13:25:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-30T03:22:33.000Z", "max_issues_repo_path": "build_tarballs.jl", "max_issues_repo_name": "juan-pablo-vielma/IpoptBuilder", "max_issues_repo_head_hexsha": "ae1aea31c8a3e0bc1b42c4f5e260de5da845b4af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-10-16T22:39:35.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-15T21:13:55.000Z", "max_forks_repo_path": "build_tarballs.jl", "max_forks_repo_name": "juan-pablo-vielma/IpoptBuilder", "max_forks_repo_head_hexsha": "ae1aea31c8a3e0bc1b42c4f5e260de5da845b4af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:28:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:28:19.000Z", "avg_line_length": 44.6146788991, "max_line_length": 160, "alphanum_fraction": 0.7291795188, "num_tokens": 1516}
from unittest.mock import MagicMock, patch import numpy as np from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub from chitra.serve.tf_serving.client import GrpcClient, create_grpc_stub, grpc_request def test_create_grpc_stub(): assert isinstance(create_grpc_stub(), PredictionServiceStub) def test_request(): client = GrpcClient() assert isinstance(client.stub, PredictionServiceStub) @patch("chitra.serve.tf_serving.client.predict_pb2") def test_grpc_request(mock_predict_pb2): mock_predict_pb2.PredictRequest = MagicMock() stub = MagicMock() stub.Predict = MagicMock() stub.Predict.return_value = True data = np.random.randn(224, 224, 3) result = grpc_request( stub, data, input_name="input", model_name="test_model", signature_name="test", ) assert result stub.Predict.assert_called() mock_predict_pb2.PredictRequest.assert_called() stub.Predict.assert_called_with(mock_predict_pb2.PredictRequest(), 20)
{"hexsha": "7e5b6aaed5f1bdb2b16cb957fafd26de49637fab", "size": 1049, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/serve/test_tf_serving_client.py", "max_stars_repo_name": "aniketmaurya/Chitra", "max_stars_repo_head_hexsha": "e040311c25ccf2e101df5596662450ae532bee08", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 158, "max_stars_repo_stars_event_min_datetime": "2020-01-27T05:35:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T16:15:23.000Z", "max_issues_repo_path": "tests/serve/test_tf_serving_client.py", "max_issues_repo_name": "aniketmaurya/Chitra", "max_issues_repo_head_hexsha": "e040311c25ccf2e101df5596662450ae532bee08", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 112, "max_issues_repo_issues_event_min_datetime": "2020-02-15T15:12:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-22T13:18:14.000Z", "max_forks_repo_path": "tests/serve/test_tf_serving_client.py", "max_forks_repo_name": "aniketmaurya/Chitra", "max_forks_repo_head_hexsha": "e040311c25ccf2e101df5596662450ae532bee08", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 33, "max_forks_repo_forks_event_min_datetime": "2020-09-25T13:49:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-01T13:05:23.000Z", "avg_line_length": 26.8974358974, "max_line_length": 85, "alphanum_fraction": 0.745471878, "include": true, "reason": "import numpy", "num_tokens": 238}
[STATEMENT] lemma Bernstein_nonneg: "\<lbrakk>0 \<le> x; x \<le> 1\<rbrakk> \<Longrightarrow> 0 \<le> Bernstein n k x" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>0 \<le> x; x \<le> 1\<rbrakk> \<Longrightarrow> 0 \<le> Bernstein n k x [PROOF STEP] by (simp add: Bernstein_def)
{"llama_tokens": 127, "file": null, "length": 1}
// auto-generated header by CodeFromTemplate // CodeFromTemplate Version: 0.3 alpha // // NEVER TOUCH this file! #include <exception> #include <boost/assign/list_of.hpp> #include "Store_Manager_Ringbuffer_Stub_UnitTest_GeneratorTestCode.h" // --> Do NOT EDIT <-- namespace ConnectedVision { namespace UnitTest { namespace DataHandling { // --> Do NOT EDIT <-- int Store_Manager_Ringbuffer_Stub_UnitTest_GeneratorTestCode::creationCount = 0; // --> Do NOT EDIT <-- Store_Manager_Ringbuffer_Stub_UnitTest_GeneratorTestCode::Store_Manager_Ringbuffer_Stub_UnitTest_GeneratorTestCode(const int64_t storeCount, const int64_t ringbufferSize, const int64_t poolSize) : ConnectedVision::DataHandling::Store_Manager_Ringbuffer_Pool<Class_UnitTest_GeneratorTestCode> ( storeCount, ringbufferSize, poolSize ) { if ( creationCount > 0 ) { // we have one instance already throw ConnectedVision::runtime_error("just one instance of Store_Manager_Ringbuffer_Stub_UnitTest_GeneratorTestCode allowed"); } creationCount = 1; } // --> Do NOT EDIT <-- Store_Manager_Ringbuffer_Stub_UnitTest_GeneratorTestCode::~Store_Manager_Ringbuffer_Stub_UnitTest_GeneratorTestCode() { creationCount = 0; } } // namespace DataHandling } // namespace UnitTest } // namespace ConnectedVision
{"hexsha": "70f11950312285c3c37c0054ce798236ceea0749", "size": 1273, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/UnitTest/generated/stubs/Store_Manager_Ringbuffer_Stub_UnitTest_GeneratorTestCode.cpp", "max_stars_repo_name": "ConnectedVision/ConnectedVision", "max_stars_repo_head_hexsha": "210e49205ca50f73584178b6cedb298a74cea798", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2017-08-12T18:14:00.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-19T09:15:35.000Z", "max_issues_repo_path": "test/UnitTest/generated/stubs/Store_Manager_Ringbuffer_Stub_UnitTest_GeneratorTestCode.cpp", "max_issues_repo_name": "ConnectedVision/ConnectedVision", "max_issues_repo_head_hexsha": "210e49205ca50f73584178b6cedb298a74cea798", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/UnitTest/generated/stubs/Store_Manager_Ringbuffer_Stub_UnitTest_GeneratorTestCode.cpp", "max_forks_repo_name": "ConnectedVision/ConnectedVision", "max_forks_repo_head_hexsha": "210e49205ca50f73584178b6cedb298a74cea798", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2018-11-09T15:57:13.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-09T15:57:13.000Z", "avg_line_length": 32.641025641, "max_line_length": 196, "alphanum_fraction": 0.8004713276, "num_tokens": 315}
(* Author: Tobias Nipkow *) subsection "Collecting Semantics of Commands" theory Collecting imports Complete_Lattice Big_Step ACom begin subsubsection "The generic Step function" notation sup (infixl "\<squnion>" 65) and inf (infixl "\<sqinter>" 70) and bot ("\<bottom>") and top ("\<top>") context fixes f :: "vname \<Rightarrow> aexp \<Rightarrow> 'a \<Rightarrow> 'a::sup" fixes g :: "bexp \<Rightarrow> 'a \<Rightarrow> 'a" begin fun Step :: "'a \<Rightarrow> 'a acom \<Rightarrow> 'a acom" where "Step S (SKIP {Q}) = (SKIP {S})" | "Step S (x ::= e {Q}) = x ::= e {f x e S}" | "Step S (C1;; C2) = Step S C1;; Step (post C1) C2" | "Step S (IF b THEN {P1} C1 ELSE {P2} C2 {Q}) = IF b THEN {g b S} Step P1 C1 ELSE {g (Not b) S} Step P2 C2 {post C1 \<squnion> post C2}" | "Step S ({I} WHILE b DO {P} C {Q}) = {S \<squnion> post C} WHILE b DO {g b I} Step P C {g (Not b) I}" end lemma strip_Step[simp]: "strip(Step f g S C) = strip C" by(induct C arbitrary: S) auto subsubsection "Annotated commands as a complete lattice" instantiation acom :: (order) order begin definition less_eq_acom :: "('a::order)acom \<Rightarrow> 'a acom \<Rightarrow> bool" where "C1 \<le> C2 \<longleftrightarrow> strip C1 = strip C2 \<and> (\<forall>p<size(annos C1). anno C1 p \<le> anno C2 p)" definition less_acom :: "'a acom \<Rightarrow> 'a acom \<Rightarrow> bool" where "less_acom x y = (x \<le> y \<and> \<not> y \<le> x)" instance proof (standard, goal_cases) case 1 show ?case by(simp add: less_acom_def) next case 2 thus ?case by(auto simp: less_eq_acom_def) next case 3 thus ?case by(fastforce simp: less_eq_acom_def size_annos) next case 4 thus ?case by(fastforce simp: le_antisym less_eq_acom_def size_annos eq_acom_iff_strip_anno) qed end lemma less_eq_acom_annos: "C1 \<le> C2 \<longleftrightarrow> strip C1 = strip C2 \<and> list_all2 (\<le>) (annos C1) (annos C2)" by(auto simp add: less_eq_acom_def anno_def list_all2_conv_all_nth size_annos_same2) lemma SKIP_le[simp]: "SKIP {S} \<le> c \<longleftrightarrow> (\<exists>S'. c = SKIP {S'} \<and> S \<le> S')" by (cases c) (auto simp:less_eq_acom_def anno_def) lemma Assign_le[simp]: "x ::= e {S} \<le> c \<longleftrightarrow> (\<exists>S'. c = x ::= e {S'} \<and> S \<le> S')" by (cases c) (auto simp:less_eq_acom_def anno_def) lemma Seq_le[simp]: "C1;;C2 \<le> C \<longleftrightarrow> (\<exists>C1' C2'. C = C1';;C2' \<and> C1 \<le> C1' \<and> C2 \<le> C2')" apply (cases C) apply(auto simp: less_eq_acom_annos list_all2_append size_annos_same2) done lemma If_le[simp]: "IF b THEN {p1} C1 ELSE {p2} C2 {S} \<le> C \<longleftrightarrow> (\<exists>p1' p2' C1' C2' S'. C = IF b THEN {p1'} C1' ELSE {p2'} C2' {S'} \<and> p1 \<le> p1' \<and> p2 \<le> p2' \<and> C1 \<le> C1' \<and> C2 \<le> C2' \<and> S \<le> S')" apply (cases C) apply(auto simp: less_eq_acom_annos list_all2_append size_annos_same2) done lemma While_le[simp]: "{I} WHILE b DO {p} C {P} \<le> W \<longleftrightarrow> (\<exists>I' p' C' P'. W = {I'} WHILE b DO {p'} C' {P'} \<and> C \<le> C' \<and> p \<le> p' \<and> I \<le> I' \<and> P \<le> P')" apply (cases W) apply(auto simp: less_eq_acom_annos list_all2_append size_annos_same2) done lemma mono_post: "C \<le> C' \<Longrightarrow> post C \<le> post C'" using annos_ne[of C'] by(auto simp: post_def less_eq_acom_def last_conv_nth[OF annos_ne] anno_def dest: size_annos_same) definition Inf_acom :: "com \<Rightarrow> 'a::complete_lattice acom set \<Rightarrow> 'a acom" where "Inf_acom c M = annotate (\<lambda>p. INF C\<in>M. anno C p) c" global_interpretation Complete_Lattice "{C. strip C = c}" "Inf_acom c" for c proof (standard, goal_cases) case 1 thus ?case by(auto simp: Inf_acom_def less_eq_acom_def size_annos intro:INF_lower) next case 2 thus ?case by(auto simp: Inf_acom_def less_eq_acom_def size_annos intro:INF_greatest) next case 3 thus ?case by(auto simp: Inf_acom_def) qed subsubsection "Collecting semantics" definition "step = Step (\<lambda>x e S. {s(x := aval e s) |s. s \<in> S}) (\<lambda>b S. {s:S. bval b s})" definition CS :: "com \<Rightarrow> state set acom" where "CS c = lfp c (step UNIV)" lemma mono2_Step: fixes C1 C2 :: "'a::semilattice_sup acom" assumes "!!x e S1 S2. S1 \<le> S2 \<Longrightarrow> f x e S1 \<le> f x e S2" "!!b S1 S2. S1 \<le> S2 \<Longrightarrow> g b S1 \<le> g b S2" shows "C1 \<le> C2 \<Longrightarrow> S1 \<le> S2 \<Longrightarrow> Step f g S1 C1 \<le> Step f g S2 C2" proof(induction S1 C1 arbitrary: C2 S2 rule: Step.induct) case 1 thus ?case by(auto) next case 2 thus ?case by (auto simp: assms(1)) next case 3 thus ?case by(auto simp: mono_post) next case 4 thus ?case by(auto simp: subset_iff assms(2)) (metis mono_post le_supI1 le_supI2)+ next case 5 thus ?case by(auto simp: subset_iff assms(2)) (metis mono_post le_supI1 le_supI2)+ qed lemma mono2_step: "C1 \<le> C2 \<Longrightarrow> S1 \<subseteq> S2 \<Longrightarrow> step S1 C1 \<le> step S2 C2" unfolding step_def by(rule mono2_Step) auto lemma mono_step: "mono (step S)" by(blast intro: monoI mono2_step) lemma strip_step: "strip(step S C) = strip C" by (induction C arbitrary: S) (auto simp: step_def) lemma lfp_cs_unfold: "lfp c (step S) = step S (lfp c (step S))" apply(rule lfp_unfold[OF _ mono_step]) apply(simp add: strip_step) done lemma CS_unfold: "CS c = step UNIV (CS c)" by (metis CS_def lfp_cs_unfold) lemma strip_CS[simp]: "strip(CS c) = c" by(simp add: CS_def index_lfp[simplified]) subsubsection "Relation to big-step semantics" lemma asize_nz: "asize(c::com) \<noteq> 0" by (metis length_0_conv length_annos_annotate annos_ne) lemma post_Inf_acom: "\<forall>C\<in>M. strip C = c \<Longrightarrow> post (Inf_acom c M) = \<Inter>(post ` M)" apply(subgoal_tac "\<forall>C\<in>M. size(annos C) = asize c") apply(simp add: post_anno_asize Inf_acom_def asize_nz neq0_conv[symmetric]) apply(simp add: size_annos) done lemma post_lfp: "post(lfp c f) = (\<Inter>{post C|C. strip C = c \<and> f C \<le> C})" by(auto simp add: lfp_def post_Inf_acom) lemma big_step_post_step: "\<lbrakk> (c, s) \<Rightarrow> t; strip C = c; s \<in> S; step S C \<le> C \<rbrakk> \<Longrightarrow> t \<in> post C" proof(induction arbitrary: C S rule: big_step_induct) case Skip thus ?case by(auto simp: strip_eq_SKIP step_def post_def) next case Assign thus ?case by(fastforce simp: strip_eq_Assign step_def post_def) next case Seq thus ?case by(fastforce simp: strip_eq_Seq step_def post_def last_append annos_ne) next case IfTrue thus ?case apply(auto simp: strip_eq_If step_def post_def) by (metis (lifting,full_types) mem_Collect_eq subsetD) next case IfFalse thus ?case apply(auto simp: strip_eq_If step_def post_def) by (metis (lifting,full_types) mem_Collect_eq subsetD) next case (WhileTrue b s1 c' s2 s3) from WhileTrue.prems(1) obtain I P C' Q where "C = {I} WHILE b DO {P} C' {Q}" "strip C' = c'" by(auto simp: strip_eq_While) from WhileTrue.prems(3) \<open>C = _\<close> have "step P C' \<le> C'" "{s \<in> I. bval b s} \<le> P" "S \<le> I" "step (post C') C \<le> C" by (auto simp: step_def post_def) have "step {s \<in> I. bval b s} C' \<le> C'" by (rule order_trans[OF mono2_step[OF order_refl \<open>{s \<in> I. bval b s} \<le> P\<close>] \<open>step P C' \<le> C'\<close>]) have "s1 \<in> {s\<in>I. bval b s}" using \<open>s1 \<in> S\<close> \<open>S \<subseteq> I\<close> \<open>bval b s1\<close> by auto note s2_in_post_C' = WhileTrue.IH(1)[OF \<open>strip C' = c'\<close> this \<open>step {s \<in> I. bval b s} C' \<le> C'\<close>] from WhileTrue.IH(2)[OF WhileTrue.prems(1) s2_in_post_C' \<open>step (post C') C \<le> C\<close>] show ?case . next case (WhileFalse b s1 c') thus ?case by (force simp: strip_eq_While step_def post_def) qed lemma big_step_lfp: "\<lbrakk> (c,s) \<Rightarrow> t; s \<in> S \<rbrakk> \<Longrightarrow> t \<in> post(lfp c (step S))" by(auto simp add: post_lfp intro: big_step_post_step) lemma big_step_CS: "(c,s) \<Rightarrow> t \<Longrightarrow> t \<in> post(CS c)" by(simp add: CS_def big_step_lfp) end
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/IMP/Collecting.thy"}
# FileManager FileManager() = FileManager(create_file_manager()) """ create_file_manager() -> CXFileManager Return a pointer to a `clang::FileManager` object. For now, `FileSystemOptions` is set to nothing and `llvm::vfs::FileSystem` defaults to the "real" file system, as seen by the operating system. TODO: support custom `FileSystemOptions` and `llvm::vfs::FileSystem` """ function create_file_manager() status = Ref{CXInit_Error}(CXInit_NoError) mgr = clang_FileManager_create(status) @assert status[] == CXInit_NoError return mgr end dispose(x::FileManager) = clang_FileManager_dispose(x) function PrintStats(mgr::FileManager) @check_ptrs mgr return clang_FileManager_PrintStats(mgr) end """ getFileEntry(filemgr::FileManager, filename::AbstractString; open_file::Bool=false, cache_failure::Bool=true) -> FileEntry Get a file entry from the file manager. If `open_file` is true, the file will be opened. If `cache_failure` is true, the failure that this file does not exist will be cached. """ function getFileEntry(filemgr::FileManager, filename::AbstractString; open_file::Bool=false, cache_failure::Bool=true) @check_ptrs filemgr GC.@preserve filename begin ref = clang_FileManager_getFileRef(filemgr, filename, open_file, cache_failure) @assert ref != C_NULL "failed to create a FileRef to $filename." entry = clang_FileEntryRef_getFileEntry(ref) clang_FileEntryRef_dispose(ref) end return FileEntry(entry) end
{"hexsha": "21ae372c58a10940be94ecfe209c46a405d32af3", "size": 1529, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/clang/api/Basic/FileManager.jl", "max_stars_repo_name": "vchuravy/ClangCompiler.jl", "max_stars_repo_head_hexsha": "47080072b059465f8176349c6e67bc678fa238d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2021-08-24T04:01:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T00:43:19.000Z", "max_issues_repo_path": "src/clang/api/Basic/FileManager.jl", "max_issues_repo_name": "vchuravy/ClangCompiler.jl", "max_issues_repo_head_hexsha": "47080072b059465f8176349c6e67bc678fa238d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-07-17T12:50:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-22T20:27:02.000Z", "max_forks_repo_path": "src/clang/api/Basic/FileManager.jl", "max_forks_repo_name": "vchuravy/ClangCompiler.jl", "max_forks_repo_head_hexsha": "47080072b059465f8176349c6e67bc678fa238d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-03T20:49:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T20:49:33.000Z", "avg_line_length": 33.9777777778, "max_line_length": 126, "alphanum_fraction": 0.740353172, "num_tokens": 354}
@testset "sample.jl" begin @testset "Basic sampling" begin @testset "REPL" begin empty!(LOGGERS) Random.seed!(1234) N = 1_000 chain = sample(MyModel(), MySampler(), N; sleepy = true, loggers = true) @test length(LOGGERS) == 1 logger = first(LOGGERS) @test logger isa TeeLogger @test logger.loggers[1].logger isa (Sys.iswindows() && VERSION < v"1.5.3" ? ProgressLogger : TerminalLogger) @test logger.loggers[2].logger === CURRENT_LOGGER @test Logging.current_logger() === CURRENT_LOGGER # test output type and size @test chain isa Vector{<:MySample} @test length(chain) == N # test some statistical properties tail_chain = @view chain[2:end] @test mean(x.a for x in tail_chain) ≈ 0.5 atol=6e-2 @test var(x.a for x in tail_chain) ≈ 1 / 12 atol=5e-3 @test mean(x.b for x in tail_chain) ≈ 0.0 atol=5e-2 @test var(x.b for x in tail_chain) ≈ 1 atol=6e-2 end @testset "Juno" begin empty!(LOGGERS) Random.seed!(1234) N = 10 logger = JunoProgressLogger() Logging.with_logger(logger) do sample(MyModel(), MySampler(), N; sleepy = true, loggers = true) end @test length(LOGGERS) == 1 @test first(LOGGERS) === logger @test Logging.current_logger() === CURRENT_LOGGER end @testset "IJulia" begin # emulate running IJulia kernel @eval IJulia begin inited = true end empty!(LOGGERS) Random.seed!(1234) N = 10 sample(MyModel(), MySampler(), N; sleepy = true, loggers = true) @test length(LOGGERS) == 1 logger = first(LOGGERS) @test logger isa TeeLogger @test logger.loggers[1].logger isa ProgressLogger @test logger.loggers[2].logger === CURRENT_LOGGER @test Logging.current_logger() === CURRENT_LOGGER @eval IJulia begin inited = false end end @testset "Custom logger" begin empty!(LOGGERS) Random.seed!(1234) N = 10 logger = Logging.ConsoleLogger(stderr, Logging.LogLevel(-1)) Logging.with_logger(logger) do sample(MyModel(), MySampler(), N; sleepy = true, loggers = true) end @test length(LOGGERS) == 1 @test first(LOGGERS) === logger @test Logging.current_logger() === CURRENT_LOGGER end @testset "Suppress output" begin logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do sample(MyModel(), MySampler(), 100; progress = false, sleepy = true) end @test all(l.level > Logging.LogLevel(-1) for l in logs) # disable progress logging globally @test !(@test_logs (:info, "progress logging is disabled globally") AbstractMCMC.setprogress!(false)) @test !AbstractMCMC.PROGRESS[] logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do sample(MyModel(), MySampler(), 100; sleepy = true) end @test all(l.level > Logging.LogLevel(-1) for l in logs) # enable progress logging globally @test (@test_logs (:info, "progress logging is enabled globally") AbstractMCMC.setprogress!(true)) @test AbstractMCMC.PROGRESS[] end end if VERSION ≥ v"1.3" @testset "Multithreaded sampling" begin if Threads.nthreads() == 1 warnregex = r"^Only a single thread available" @test_logs (:warn, warnregex) sample(MyModel(), MySampler(), MCMCThreads(), 10, 10) end # No dedicated chains type N = 10_000 chains = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000) @test chains isa Vector{<:Vector{<:MySample}} @test length(chains) == 1000 @test all(length(x) == N for x in chains) Random.seed!(1234) chains = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000; chain_type = MyChain) # test output type and size @test chains isa Vector{<:MyChain} @test length(chains) == 1000 @test all(x -> length(x.as) == length(x.bs) == N, chains) # test some statistical properties @test all(x -> isapprox(mean(@view x.as[2:end]), 0.5; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.as[2:end]), 1 / 12; atol=5e-3), chains) @test all(x -> isapprox(mean(@view x.bs[2:end]), 0; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.bs[2:end]), 1; atol=5e-2), chains) # test reproducibility Random.seed!(1234) chains2 = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000; chain_type = MyChain) @test all(c1.as[i] === c2.as[i] for (c1, c2) in zip(chains, chains2), i in 1:N) @test all(c1.bs[i] === c2.bs[i] for (c1, c2) in zip(chains, chains2), i in 1:N) # Unexpected order of arguments. str = "Number of chains (10) is greater than number of samples per chain (5)" @test_logs (:warn, str) match_mode=:any sample(MyModel(), MySampler(), MCMCThreads(), 5, 10; chain_type = MyChain) # Suppress output. logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do sample(MyModel(), MySampler(), MCMCThreads(), 10_000, 1000; progress = false, chain_type = MyChain) end @test all(l.level > Logging.LogLevel(-1) for l in logs) # Smoke test for nchains < nthreads if Threads.nthreads() == 2 sample(MyModel(), MySampler(), MCMCThreads(), N, 1) end end end @testset "Multicore sampling" begin if nworkers() == 1 warnregex = r"^Only a single process available" @test_logs (:warn, warnregex) sample(MyModel(), MySampler(), MCMCDistributed(), 10, 10; chain_type = MyChain) end # Add worker processes. addprocs() # Load all required packages (`interface.jl` needs Random). @everywhere begin using AbstractMCMC using AbstractMCMC: sample using Random include("utils.jl") end # No dedicated chains type N = 10_000 chains = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000) @test chains isa Vector{<:Vector{<:MySample}} @test length(chains) == 1000 @test all(length(x) == N for x in chains) Random.seed!(1234) chains = sample(MyModel(), MySampler(), MCMCDistributed(), N, 1000; chain_type = MyChain) # Test output type and size. @test chains isa Vector{<:MyChain} @test all(c.as[1] === missing for c in chains) @test length(chains) == 1000 @test all(x -> length(x.as) == length(x.bs) == N, chains) # Test some statistical properties. @test all(x -> isapprox(mean(@view x.as[2:end]), 0.5; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.as[2:end]), 1 / 12; atol=5e-3), chains) @test all(x -> isapprox(mean(@view x.bs[2:end]), 0; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.bs[2:end]), 1; atol=5e-2), chains) # Test reproducibility. Random.seed!(1234) chains2 = sample(MyModel(), MySampler(), MCMCDistributed(), N, 1000; chain_type = MyChain) @test all(c1.as[i] === c2.as[i] for (c1, c2) in zip(chains, chains2), i in 1:N) @test all(c1.bs[i] === c2.bs[i] for (c1, c2) in zip(chains, chains2), i in 1:N) # Unexpected order of arguments. str = "Number of chains (10) is greater than number of samples per chain (5)" @test_logs (:warn, str) match_mode=:any sample(MyModel(), MySampler(), MCMCDistributed(), 5, 10; chain_type = MyChain) # Suppress output. logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do sample(MyModel(), MySampler(), MCMCDistributed(), 10_000, 100; progress = false, chain_type = MyChain) end @test all(l.level > Logging.LogLevel(-1) for l in logs) end @testset "Serial sampling" begin # No dedicated chains type N = 10_000 chains = sample(MyModel(), MySampler(), MCMCSerial(), N, 1000) @test chains isa Vector{<:Vector{<:MySample}} @test length(chains) == 1000 @test all(length(x) == N for x in chains) Random.seed!(1234) chains = sample(MyModel(), MySampler(), MCMCSerial(), N, 1000; chain_type = MyChain) # Test output type and size. @test chains isa Vector{<:MyChain} @test all(c.as[1] === missing for c in chains) @test length(chains) == 1000 @test all(x -> length(x.as) == length(x.bs) == N, chains) # Test some statistical properties. @test all(x -> isapprox(mean(@view x.as[2:end]), 0.5; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.as[2:end]), 1 / 12; atol=5e-3), chains) @test all(x -> isapprox(mean(@view x.bs[2:end]), 0; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.bs[2:end]), 1; atol=5e-2), chains) # Test reproducibility. Random.seed!(1234) chains2 = sample(MyModel(), MySampler(), MCMCSerial(), N, 1000; chain_type = MyChain) @test all(c1.as[i] === c2.as[i] for (c1, c2) in zip(chains, chains2), i in 1:N) @test all(c1.bs[i] === c2.bs[i] for (c1, c2) in zip(chains, chains2), i in 1:N) # Unexpected order of arguments. str = "Number of chains (10) is greater than number of samples per chain (5)" @test_logs (:warn, str) match_mode=:any sample(MyModel(), MySampler(), MCMCSerial(), 5, 10; chain_type = MyChain) # Suppress output. logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do sample(MyModel(), MySampler(), MCMCSerial(), 10_000, 100; progress = false, chain_type = MyChain) end @test all(l.level > Logging.LogLevel(-1) for l in logs) end @testset "Chain constructors" begin chain1 = sample(MyModel(), MySampler(), 100; sleepy = true) chain2 = sample(MyModel(), MySampler(), 100; sleepy = true, chain_type = MyChain) @test chain1 isa Vector{<:MySample} @test chain2 isa MyChain end @testset "Sample stats" begin chain = sample(MyModel(), MySampler(), 1000; chain_type = MyChain) @test chain.stats.stop > chain.stats.start @test chain.stats.duration == chain.stats.stop - chain.stats.start end @testset "Discard initial samples" begin chain = sample(MyModel(), MySampler(), 100; sleepy = true, discard_initial = 50) @test length(chain) == 100 @test !ismissing(chain[1].a) end @testset "Thin chain by a factor of `thinning`" begin # Run a thinned chain with `N` samples thinned by factor of `thinning`. Random.seed!(1234) N = 100 thinning = 3 chain = sample(MyModel(), MySampler(), N; sleepy = true, thinning = thinning) @test length(chain) == N @test ismissing(chain[1].a) # Repeat sampling without thinning. Random.seed!(1234) ref_chain = sample(MyModel(), MySampler(), N * thinning; sleepy = true) @test all(chain[i].a === ref_chain[(i - 1) * thinning + 1].a for i in 1:N) end @testset "Sample without predetermined N" begin Random.seed!(1234) chain = sample(MyModel(), MySampler()) bmean = mean(x.b for x in chain) @test ismissing(chain[1].a) @test abs(bmean) <= 0.001 && length(chain) < 10_000 # Discard initial samples. chain = sample(MyModel(), MySampler(); discard_initial = 50) bmean = mean(x.b for x in chain) @test !ismissing(chain[1].a) @test abs(bmean) <= 0.001 && length(chain) < 10_000 # Thin chain by a factor of `thinning`. chain = sample(MyModel(), MySampler(); thinning = 3) bmean = mean(x.b for x in chain) @test ismissing(chain[1].a) @test abs(bmean) <= 0.001 && length(chain) < 10_000 end @testset "Sample vector of `NamedTuple`s" begin chain = sample(MyModel(), MySampler(), 1_000; chain_type = Vector{NamedTuple}) # Check output type @test chain isa Vector{<:NamedTuple} @test length(chain) == 1_000 @test all(keys(x) == (:a, :b) for x in chain) # Check some statistical properties @test ismissing(chain[1].a) @test mean(x.a for x in view(chain, 2:1_000)) ≈ 0.5 atol=6e-2 @test var(x.a for x in view(chain, 2:1_000)) ≈ 1 / 12 atol=1e-2 @test mean(x.b for x in chain) ≈ 0 atol=0.1 @test var(x.b for x in chain) ≈ 1 atol=0.15 end @testset "Testing callbacks" begin function count_iterations(rng, model, sampler, sample, state, i; iter_array, kwargs...) push!(iter_array, i) end N = 100 it_array = Float64[] sample(MyModel(), MySampler(), N; callback=count_iterations, iter_array=it_array) @test it_array == collect(1:N) # sampling without predetermined N it_array = Float64[] chain = sample(MyModel(), MySampler(); callback=count_iterations, iter_array=it_array) @test it_array == collect(1:size(chain, 1)) end end
{"hexsha": "6e876d48c98e3d1af1982b5fc596bbc62fa5c30e", "size": 14479, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/sample.jl", "max_stars_repo_name": "kaandocal/AbstractMCMC.jl", "max_stars_repo_head_hexsha": "349799146c2cfe85d24a1efbfbc6b162f8850ca4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 57, "max_stars_repo_stars_event_min_datetime": "2019-11-07T14:35:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T08:27:02.000Z", "max_issues_repo_path": "test/sample.jl", "max_issues_repo_name": "kaandocal/AbstractMCMC.jl", "max_issues_repo_head_hexsha": "349799146c2cfe85d24a1efbfbc6b162f8850ca4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 89, "max_issues_repo_issues_event_min_datetime": "2019-11-24T21:27:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T08:44:37.000Z", "max_forks_repo_path": "test/sample.jl", "max_forks_repo_name": "kaandocal/AbstractMCMC.jl", "max_forks_repo_head_hexsha": "349799146c2cfe85d24a1efbfbc6b162f8850ca4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-01-24T17:10:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T08:18:31.000Z", "avg_line_length": 40.108033241, "max_line_length": 120, "alphanum_fraction": 0.5430623662, "num_tokens": 3820}
import damselfly as df import numpy as np import pickle as pkl import os import matplotlib.pyplot as plt temp = 10.0 result_date = '210607' result_train_dset = '210607_df1_multiclass_ch3' result_test_dset = '210607_df2_multiclass_test_ch3' result_model = 'df_conv6_fc2_multiclass_3ch' result_domain = 'freq' result_epoch = 54 plot_date = '210607' plot_font_size = 18 #saved_networks = '/home/az396/project/deepfiltering/training/checkpoints' #datasets = '/home/az396/project/deepfiltering/data/datasets' #dataset = f'{checkpoint_domain}/{checkpoint_dset}_temp{temp}.pkl' #checkpoint = f'date{checkpoint_date}_dset_name{checkpoint_dset}_temp{temp}_model{checkpoint_model}_domain_{checkpoint_domain}/epoch{epoch}.pth' confusion_matrix_result_name = f'{result_date}_confusion_matrix_train_dset_{result_train_dset}_test_dset_{result_test_dset}_model_{result_model}_domain_{result_domain}_epoch{result_epoch}.pkl' results = '/home/az396/project/damselfly/analysis/results' plots = '/home/az396/project/damselfly/analysis/plotting/plots' # load the matrices, with open(os.path.join(results, confusion_matrix_result_name), 'rb') as infile: matrices = pkl.load(infile) for s in ['train', 'test']: plot = df.plot.ConfusionMatrix(matrices[s], font_size=plot_font_size) plot_name = f'{plot_date}_{s}_confusion_matrix_train_dset_{result_train_dset}_test_dset_{result_test_dset}_model_{result_model}_domain_{result_domain}_epoch_{result_epoch}.png' plot[1].set_title(f'Confusion Matrix, Set = {s}', size=24) plt.savefig(os.path.join(plots, plot_name)) #tpr_list = [] #fpr_list = [] #eff_mats_train = [] #eff_mats_test = [] #for temp in compare_temps: # model_path = date + '_temp' + str(temp) + model + epoch_str # train_path = os.path.join(top, model_path, 'train_mat.pkl') # test_path = os.path.join(top, model_path, 'test_mat.pkl') # tpr_path = os.path.join(top, model_path, 'tpr.pkl') # fpr_path = os.path.join(top, model_path, 'fpr.pkl') # with open(train_path, 'rb') as infile: # train_matrices.append(pkl.load(infile)) # with open(test_path, 'rb') as infile: # test_matrices.append(pkl.load(infile)) # with open(tpr_path, 'rb') as infile: # tpr_list.append(pkl.load(infile)) # with open(fpr_path, 'rb') as infile: # fpr_list.append(pkl.load(infile)) #### # do the same for the efficiency temps #for temp in efficiency_temps: # train_path = os.path.join(top, date + '_temp' + str(temp) + model + epoch_str, 'train_mat.pkl') # test_path = os.path.join(top, date + '_temp' + str(temp) + model + epoch_str, 'test_mat.pkl') # with open(train_path, 'rb') as infile: # eff_mats_train.append(pkl.load(infile)) # with open(test_path, 'rb') as infile: # eff_mats_test.append(pkl.load(infile)) #### ## plot selected classification matrices #train_name = date + '_compare_train' + model + '.png' #test_name = date + '_compare_test' + model + '.png' #df.plot.ClassificationMatrix(train_matrices, compare_temps, save_path, train_name) #df.plot.ClassificationMatrix(test_matrices, compare_temps, save_path, test_name) #### ## plot true positive rate, false alarms ## #det_eff_name = date + '_det_eff' + model + '.png' #fa_name = date + '_false_alarms' + model + '.png' #df.plot.ClassMatrixObservable(eff_mats_train, eff_mats_test, efficiency_temps, [0, 0], save_path, det_eff_name) #df.plot.ClassMatrixObservable(eff_mats_train, eff_mats_test, efficiency_temps, [1, 0], save_path, fa_name) #### ## ROC curve #roc_name = date + '_roc' + model + '.png' #df.plot.ROC(tpr_list, fpr_list, compare_temps, save_path, roc_name) #info_list = [] #name = date + '_compare_10epoch' + model + '.png' #for temp in compare_temps: # print(temp) # info_path = os.path.join(top, date + '_temp' + str(temp) + model, 'info.pkl') # with open(info_path, 'rb') as infile: # info_list.append(pkl.load(infile)) #save_path = '/home/az396/project/deepfiltering/analysis/plot/training_loss/compare_loss' #df.plot.CompareTrainLoss(info_list, compare_temps, save_path, name)
{"hexsha": "9cb136c60f59a551fd2956a0e41b20f978350ef2", "size": 3994, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/plotting/scripts/2021415_plot_classification.py", "max_stars_repo_name": "zieglerad/damselfly", "max_stars_repo_head_hexsha": "d0ee9b13aa5981a91f62765cba6a263b584e7f25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/plotting/scripts/2021415_plot_classification.py", "max_issues_repo_name": "zieglerad/damselfly", "max_issues_repo_head_hexsha": "d0ee9b13aa5981a91f62765cba6a263b584e7f25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/plotting/scripts/2021415_plot_classification.py", "max_forks_repo_name": "zieglerad/damselfly", "max_forks_repo_head_hexsha": "d0ee9b13aa5981a91f62765cba6a263b584e7f25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2833333333, "max_line_length": 192, "alphanum_fraction": 0.7443665498, "include": true, "reason": "import numpy", "num_tokens": 1139}
import numpy as np import tensorflow as tf import random from dataloader import Gen_Data_loader, Dis_dataloader from generator import Generator from discriminator import Discriminator from rollout import ROLLOUT import pickle import time ######################################################################################### # Generator Hyper-parameters ###################################################################################### EMB_DIM = 30 # embedding dimension (pretrained: 200, pk: 30) HIDDEN_DIM = 300 # hidden state dimension of lstm cell SEQ_LENGTH = 30 # sequence length START_TOKEN = 0 PRE_EPOCH_NUM = 120 # supervise (maximum likelihood estimation) epochs SEED = 88 BATCH_SIZE = 64 ######################################################################################### # Discriminator Hyper-parameters ######################################################################################### dis_embedding_dim = EMB_DIM dis_filter_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 30] dis_num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160] dis_dropout_keep_prob = 0.75 dis_l2_reg_lambda = 0.2 dis_batch_size = 64 ######################################################################################### # Basic Training Parameters ######################################################################################### TOTAL_BATCH = 200 generated_num = 100 sample_num = 10 # original seqgan parameter # HIDDEN_DIM = 32 # PRE_EPOCH_NUM = 120 # TOTAL_BATCH = 200 # generated_num = 10000 positive_file = './data/3_pk_data_index.txt' negative_file = 'save/negative_sample.txt' eval_file = 'save/eval_file.txt' # "pretrain" or "poke" embed_flag = "poke" a = open('./data/3_pk_data_index.pkl', 'rb') real_data = pickle.load(a) a = open('./data/pk_pos2idx.pkl', 'rb') vocab_to_int = pickle.load(a) a = open('./data/pk_idx2pos.pkl', 'rb') int_to_vocab = pickle.load(a) print(int_to_vocab) if embed_flag == "pretrain": a = open('./data/pretrain_embedding_vec.pkl', 'rb') elif embed_flag == "poke": a = open('./data/pk_embedding_vec.pkl', 'rb') word_embedding_matrix = pickle.load(a) word_embedding_matrix = word_embedding_matrix.astype(np.float32) # a = open('./data/word_dict.pickle', 'rb') # word_dict = pickle.load(a) real_data_vocab = [[int_to_vocab[i] for i in sample if int_to_vocab[i] != '<PAD>'] for sample in real_data] real_data_vocab = [' '.join(sample) for sample in real_data_vocab] print(len(real_data_vocab)) def generate_samples(sess, trainable_model, batch_size, generated_num, output_file, word_embedding_matrix): # Generate Samples generated_samples = [] for _ in range(int(generated_num / batch_size)): generated_samples.extend(trainable_model.generate(sess, word_embedding_matrix)) with open(output_file, 'w') as fout: for poem in generated_samples: buffer = ' '.join([str(x) for x in poem]) + '\n' fout.write(buffer) def pre_train_epoch(sess, trainable_model, data_loader, word_embedding_matrix): # Pre-train the generator using MLE for one epoch supervised_g_losses = [] data_loader.reset_pointer() for it in range(data_loader.num_batch): batch = data_loader.next_batch() _, g_loss = trainable_model.pretrain_step(sess, batch, word_embedding_matrix) supervised_g_losses.append(g_loss) return np.mean(supervised_g_losses) def make_sample(eval_file, int_to_vocab, sample_num): samples = [] with open(eval_file, 'r') as f: for line in f: line = line.strip() line = line.split() parse_line = [int(x) for x in line] samples.append(parse_line) sample_int = samples[:sample_num] sample_vocab = [[int_to_vocab[i] for i in sample] for sample in sample_int] sample_vocab = [' '.join(sample) for sample in sample_vocab] return sample_vocab ################################## main() ######################################### # 시간측정 start_time = time.time() tf.reset_default_graph() random.seed(SEED) np.random.seed(SEED) gen_data_loader = Gen_Data_loader(BATCH_SIZE, SEQ_LENGTH) vocab_size = len(vocab_to_int) # 6447 print(vocab_size) dis_data_loader = Dis_dataloader(BATCH_SIZE, SEQ_LENGTH) generator = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN) discriminator = Discriminator(sequence_length=SEQ_LENGTH, num_classes=2, word_embedding_matrix=word_embedding_matrix, embedding_size=dis_embedding_dim, filter_sizes=dis_filter_sizes, num_filters=dis_num_filters, l2_reg_lambda=dis_l2_reg_lambda) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) # First, use the oracle model to provide the positive examples, which are sampled from the oracle data distribution # pre-train generator gen_data_loader.create_batches(positive_file) gen_sample = open('save/pretrain_sample.txt', 'w') print('Start pre-training...') gen_sample.write('pre-training...\n') for epoch in range(PRE_EPOCH_NUM): loss = pre_train_epoch(sess, generator, gen_data_loader, word_embedding_matrix) if epoch % 5 == 0: generate_samples(sess, generator, BATCH_SIZE, generated_num, eval_file, word_embedding_matrix) sample_vocab = make_sample(eval_file, int_to_vocab, sample_num) print('pre-train epoch ', epoch) buffer = 'epoch:\t' + str(epoch) + '\n' gen_sample.write(buffer) for sample in sample_vocab: print(sample) buffer = sample + '\n' gen_sample.write(buffer) # pre-train discriminator print('Start pre-training discriminator...') # Train 3 epoch on the generated data and do this for 50 times for _ in range(25): generate_samples(sess, generator, BATCH_SIZE, generated_num, negative_file, word_embedding_matrix) dis_data_loader.load_train_data(positive_file, negative_file) for _ in range(3): dis_data_loader.reset_pointer() for it in range(dis_data_loader.num_batch): x_batch, y_batch = dis_data_loader.next_batch() feed = { discriminator.input_x: x_batch, discriminator.input_y: y_batch, discriminator.dropout_keep_prob: dis_dropout_keep_prob } _ = sess.run(discriminator.train_op, feed) rollout = ROLLOUT(generator, 0.8, word_embedding_matrix) print('#########################################################################') print('Start Adversarial Training...') gen_sample.write('adversarial training...\n') for total_batch in range(TOTAL_BATCH): # Train the generator for one step for it in range(1): samples = generator.generate(sess, word_embedding_matrix) rewards = rollout.get_reward(sess, samples, 16, discriminator) feed = {generator.x: samples, generator.rewards: rewards, generator.word_embedding_matrix: word_embedding_matrix} _ = sess.run(generator.g_updates, feed_dict=feed) # Test if total_batch % 5 == 0 or total_batch == TOTAL_BATCH - 1: generate_samples(sess, generator, BATCH_SIZE, generated_num, eval_file, word_embedding_matrix) sample_vocab = make_sample(eval_file, int_to_vocab, sample_num) print('total_batch: ', total_batch) buffer = 'epoch:\t' + str(total_batch) + '\n' gen_sample.write(buffer) for sample in sample_vocab: print(sample) buffer = sample + '\n' gen_sample.write(buffer) # Update roll-out parameters rollout.update_params() # Train the discriminator for _ in range(5): generate_samples(sess, generator, BATCH_SIZE, generated_num, negative_file, word_embedding_matrix) dis_data_loader.load_train_data(positive_file, negative_file) for _ in range(3): dis_data_loader.reset_pointer() for it in range(dis_data_loader.num_batch): x_batch, y_batch = dis_data_loader.next_batch() feed = { discriminator.input_x: x_batch, discriminator.input_y: y_batch, discriminator.dropout_keep_prob: dis_dropout_keep_prob } _ = sess.run(discriminator.train_op, feed) if total_batch % 5 == 0 or total_batch == TOTAL_BATCH - 1: saver.save(sess, './checkpoint/seqGAN_ours') gen_sample.close() # 걸린 시간 출력 time_check = "--- total {} seconds ---".\ format(time.time() - start_time) print(time_check) generate_samples(sess, generator, BATCH_SIZE, generated_num, eval_file, word_embedding_matrix) samples = make_sample(eval_file, int_to_vocab, generated_num) samples = [[word for word in sample.split() if word != 'UNK'] for sample in samples] samples = [' '.join(sample) for sample in samples] f = open('./save/final_output_vocab.txt', 'w') for token in samples: token = token + '\n' f.write(token) f.close() # write the training time f = open('./save/_parameters.txt', 'w') f.write("Training time : {}\n".format(time_check)) f.write("add <start> signal as zero in word2vec lookup table\n") f.close()
{"hexsha": "4355ceee07077432a2201e0010fe0e1685d55583", "size": 9255, "ext": "py", "lang": "Python", "max_stars_repo_path": "2_seqgan/sequence_gan.py", "max_stars_repo_name": "557mp/pk_story", "max_stars_repo_head_hexsha": "90399709a1b75c40867316beec12e32168bf9c34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2018-07-22T11:46:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-07T14:34:22.000Z", "max_issues_repo_path": "2_seqgan/sequence_gan.py", "max_issues_repo_name": "MrSyee/pokemon_story_generator", "max_issues_repo_head_hexsha": "adcee7cfcc3b5d95601565066a6e8e7587974059", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-08-02T14:28:02.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-23T04:34:14.000Z", "max_forks_repo_path": "2_seqgan/sequence_gan.py", "max_forks_repo_name": "MrSyee/pokemon_story_generator", "max_forks_repo_head_hexsha": "adcee7cfcc3b5d95601565066a6e8e7587974059", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-07-22T11:44:46.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-09T18:15:48.000Z", "avg_line_length": 36.581027668, "max_line_length": 121, "alphanum_fraction": 0.6477579687, "include": true, "reason": "import numpy", "num_tokens": 2091}
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation, writers from mpl_toolkits.mplot3d import Axes3D import numpy as np import torch def render_animation(data, skeleton, fps, output='interactive', bitrate=1000): """ Render or show an animation. The supported output modes are: -- 'interactive': display an interactive figure (also works on notebooks if associated with %matplotlib inline) -- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...). -- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg). -- 'filename.gif': render and export the animation a gif file (requires imagemagick). """ x = 0 y = 1 z = 2 radius = torch.max(skeleton.offsets()).item() * 5 # Heuristic that works well with many skeletons skeleton_parents = skeleton.parents() plt.ioff() fig = plt.figure(figsize=(4, 4)) ax = fig.add_subplot(1, 1, 1, projection='3d') ax.view_init(elev=20., azim=30) ax.set_xlim3d([-radius/2, radius/2]) ax.set_zlim3d([0, radius]) ax.set_ylim3d([-radius/2, radius/2]) #ax.set_aspect('equal') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_zticklabels([]) ax.dist = 9.5 # 7.5 lines = [] initialized = False trajectory = data[:, 0, [0, 1]] avg_segment_length = np.mean(np.linalg.norm(np.diff(trajectory, axis=0), axis=1)) + 1e-3 draw_offset = int(25/avg_segment_length) spline_line, = ax.plot(*trajectory.T) camera_pos = trajectory height_offset = np.min(data[:, :, 2]) # Min height data = data.copy() data[:, :, 2] -= height_offset def update(frame): nonlocal initialized ax.set_xlim3d([-radius/2 + camera_pos[frame, 0], radius/2 + camera_pos[frame, 0]]) ax.set_ylim3d([-radius/2 + camera_pos[frame, 1], radius/2 + camera_pos[frame, 1]]) positions_world = data[frame] for i in range(positions_world.shape[0]): if skeleton_parents[i] == -1: continue if not initialized: col = 'red' if i in skeleton.joints_right() else 'black' # As in audio cables :) lines.append(ax.plot([positions_world[i, x], positions_world[skeleton_parents[i], x]], [positions_world[i, y], positions_world[skeleton_parents[i], y]], [positions_world[i, z], positions_world[skeleton_parents[i], z]], c=col)) else: lines[i-1][0].set_xdata([positions_world[i, x], positions_world[skeleton_parents[i], x]]) lines[i-1][0].set_ydata([positions_world[i, y], positions_world[skeleton_parents[i], y]]) lines[i-1][0].set_3d_properties([positions_world[i, z], positions_world[skeleton_parents[i], z]] ) l = max(frame-draw_offset, 0) r = min(frame+draw_offset, trajectory.shape[0]) spline_line.set_xdata(trajectory[l:r, 0]) spline_line.set_ydata(trajectory[l:r, 1]) spline_line.set_3d_properties(np.zeros_like(trajectory[l:r, 0])) initialized = True if output == 'interactive' and frame == data.shape[0] - 1: plt.close('all') fig.tight_layout() anim = FuncAnimation(fig, update, frames=np.arange(0, data.shape[0]), interval=1000/fps, repeat=False) if output == 'interactive': plt.show() return anim elif output == 'html': return anim.to_html5_video() elif output.endswith('.mp4'): Writer = writers['ffmpeg'] writer = Writer(fps=fps, metadata={}, bitrate=bitrate) anim.save(output, writer=writer) elif output.endswith('.gif'): anim.save(output, dpi=80, writer='imagemagick') else: raise ValueError('Unsupported output format (only html, .mp4, and .gif are supported)') plt.close()
{"hexsha": "bfba40fba95fb2760b7f1e7858c2807d50348fff", "size": 4122, "ext": "py", "lang": "Python", "max_stars_repo_path": "common/visualization.py", "max_stars_repo_name": "kamisoel/kinematic_pose_estimation", "max_stars_repo_head_hexsha": "fd0fa7ce87b8b690e86572b2689604763c283d73", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "common/visualization.py", "max_issues_repo_name": "kamisoel/kinematic_pose_estimation", "max_issues_repo_head_hexsha": "fd0fa7ce87b8b690e86572b2689604763c283d73", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-23T17:32:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T17:32:33.000Z", "max_forks_repo_path": "common/visualization.py", "max_forks_repo_name": "kamisoel/kinematic_pose_estimation", "max_forks_repo_head_hexsha": "fd0fa7ce87b8b690e86572b2689604763c283d73", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6363636364, "max_line_length": 112, "alphanum_fraction": 0.6312469675, "include": true, "reason": "import numpy", "num_tokens": 1039}
from aocd import get_data, submit import numpy as np DAY = 7 YEAR = 2021 def part1(data: str) -> str: nums = [int(n) for n in data.split(',')] median = int(np.median(nums)) ans = 0 for i in nums: ans += abs(i - median) return str(ans) def part2(data: str) -> str: nums = [int(n) for n in data.split(',')] mean = round(np.mean(nums)) - 1 ans = 0 for i in nums: d = abs(i - mean) ans += d * (d+1) // 2 return str(ans) if __name__ == '__main__': input_data = get_data(day=DAY, year=YEAR) ans1 = part1(input_data) #submit(answer=ans1, day=DAY, year=YEAR, part=1) ans2 = part2(input_data) submit(answer=ans2, day=DAY, year=YEAR, part=2)
{"hexsha": "fe114127c28a057b9d98e93c4d13d7483ebaddbc", "size": 723, "ext": "py", "lang": "Python", "max_stars_repo_path": "days/day7.py", "max_stars_repo_name": "vanHavel/AdventOfCode2021", "max_stars_repo_head_hexsha": "a83ee21cffff56ba3f49de7af5113bf0b11fea7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "days/day7.py", "max_issues_repo_name": "vanHavel/AdventOfCode2021", "max_issues_repo_head_hexsha": "a83ee21cffff56ba3f49de7af5113bf0b11fea7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "days/day7.py", "max_forks_repo_name": "vanHavel/AdventOfCode2021", "max_forks_repo_head_hexsha": "a83ee21cffff56ba3f49de7af5113bf0b11fea7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.9090909091, "max_line_length": 52, "alphanum_fraction": 0.5767634855, "include": true, "reason": "import numpy", "num_tokens": 237}
# Copyright 2021 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import pandas as pd from legate import pandas as lp start = pd.to_datetime("2015-01-01") end = pd.to_datetime("2020-01-01") start_u = start.value // 10 ** 9 end_u = end.value // 10 ** 9 n = 30 s = pd.Series( 10 ** 9 * np.random.randint(start_u, end_u, n, dtype=np.int64), dtype=np.dtype("datetime64[ns]"), ) for i in range(n): if i % 3 == 0: s[i] = pd.NaT ls = lp.Series(s) fields = ["year", "month", "day", "hour", "minute", "second", "weekday"] for field in fields: print("Testing " + field) out_s = getattr(s.dt, field).fillna(0.0) out_ls = getattr(ls.dt, field).fillna(0).astype(np.double) assert out_ls.equals(out_s)
{"hexsha": "b40f5c2fc1d703b51ccf14605a2288d616148615", "size": 1268, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/pandas/sr_unary_datetime_with_null.py", "max_stars_repo_name": "marcinz/legate.pandas", "max_stars_repo_head_hexsha": "94c21c436f59c06cfba454c6569e9f5d7109d839", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 67, "max_stars_repo_stars_event_min_datetime": "2021-04-12T18:06:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T06:51:05.000Z", "max_issues_repo_path": "tests/pandas/sr_unary_datetime_with_null.py", "max_issues_repo_name": "marcinz/legate.pandas", "max_issues_repo_head_hexsha": "94c21c436f59c06cfba454c6569e9f5d7109d839", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-06-22T00:30:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-01T22:12:43.000Z", "max_forks_repo_path": "tests/pandas/sr_unary_datetime_with_null.py", "max_forks_repo_name": "marcinz/legate.pandas", "max_forks_repo_head_hexsha": "94c21c436f59c06cfba454c6569e9f5d7109d839", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-04-14T21:28:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T09:45:25.000Z", "avg_line_length": 28.8181818182, "max_line_length": 74, "alphanum_fraction": 0.6869085174, "include": true, "reason": "import numpy", "num_tokens": 352}
import cv2 import numpy as np import glob img_array = [] for filename in glob.glob('DATA/baseline/results/highway_MOG/*jpg'): img = cv2.imread(filename) height, width, layers = img.shape size = (width, height) img_array.append(img) out = cv2.VideoWriter('resultTest.avi', cv2.VideoWriter_fourcc(*'DIVX'), 24, size) for i in range(len(img_array)): out.write(img_array[i]) out.release() """ Pasos para implementar el exponential filter: 1.Construir el modelo de background a partir de los primeros n frames 1.1. """
{"hexsha": "e6931289acfc4c7f29cd48deedd10251e3a3db0f", "size": 546, "ext": "py", "lang": "Python", "max_stars_repo_path": "BackgroundSubtraction/test_files/im_to_vid.py", "max_stars_repo_name": "JanuszJSzturo/ImageAnalysis2020", "max_stars_repo_head_hexsha": "8ef432ecdc9e5c04834acd4752848302d75d7856", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BackgroundSubtraction/test_files/im_to_vid.py", "max_issues_repo_name": "JanuszJSzturo/ImageAnalysis2020", "max_issues_repo_head_hexsha": "8ef432ecdc9e5c04834acd4752848302d75d7856", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BackgroundSubtraction/test_files/im_to_vid.py", "max_forks_repo_name": "JanuszJSzturo/ImageAnalysis2020", "max_forks_repo_head_hexsha": "8ef432ecdc9e5c04834acd4752848302d75d7856", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.7391304348, "max_line_length": 82, "alphanum_fraction": 0.7051282051, "include": true, "reason": "import numpy", "num_tokens": 145}
#!/usr/bin/python3 # -*- coding: utf-8 -*- """ Train the model. Usage: train.py [<output>] [--ckpt=<ckpt>] [--batch_size=<batch_size>] Options: -h --help Show this help. <batch_size> Batch size to train on <output> Ouput folder. By default: ./outputs/ <ckpt> Path to the checkpoints to restore """ from keras.preprocessing.image import ImageDataGenerator from PIL import Image from PIL import Image, ImageEnhance from docopt import docopt from sklearn.model_selection import train_test_split import tensorflow as tf import numpy as np import random import pickle import os import sys from model import FaceRec from data_handler import get_face_data BATCH_SIZE = 10 EARLY_STOPPING_COUNT = 10 def train(batch_size=None, ckpt=None, output=None): """ Train the model **input: ** *dataset: (String) Dataset folder to used *ckpt: (String) [Optional] Path to the ckpt file to restore *output: (String) [Optional] Path to the output folder to used. ./outputs/ by default """ print(sys.version) if not batch_size: batch_size = BATCH_SIZE batch_size = int(batch_size) print("Batch size: %s" %(batch_size)) X_train, y_train, X_test, y_test = get_face_data() X_train = X_train / 255 X_test = X_test / 255 ## do a quick check on the number of labels expected by model ## and number of models passed in nb_labels_dataset = max(len(set(y_train)), len(set(y_test))) if nb_labels_dataset != FaceRec.NB_LABELS: print("Number of labels is mismatched.. can't train this way") return train_datagen = ImageDataGenerator() train_datagen_augmented = ImageDataGenerator( width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True) inference_datagen = ImageDataGenerator() train_datagen.fit(X_train) train_datagen_augmented.fit(X_train) inference_datagen.fit(X_test) # Utils method to print the current progression def plot_progression(b, cost, acc, label): print("[%s] Batch ID = %s, loss = %s, acc = %s" % (label, b, cost, acc)) # Init model model = FaceRec("FaceRec", output_folder=output) if ckpt is None: model.init() else: model.load(ckpt) # Training pipeline b = 0 best_validation_loss = float('inf') augmented_factor = 0.99 decrease_factor = 0.80 train_batches = train_datagen.flow(X_train, y_train, batch_size=batch_size) augmented_train_batches = train_datagen_augmented.flow(X_train, y_train, batch_size=batch_size) valid_batch = inference_datagen.flow(X_test, y_test, batch_size=batch_size) while True: next_batch = next( augmented_train_batches if random.uniform(0, 1) < augmented_factor else train_batches) ### Training x_batch, y_batch = next_batch cost, acc = model.optimize(x_batch, y_batch) ### Validation --> with test data # Retrieve the cost and acc on this validation batch and save it in tensorboard x_batch, y_batch = next(valid_batch, None) cost_val, acc_val = model.evaluate(x_batch, y_batch, tb_test_save=True) count = EARLY_STOPPING_COUNT # Plot the last results if b % 10 == 0: plot_progression(b, cost, acc, "Train") plot_progression(b, cost_val, acc_val, "Validation") # Early stopping logic; if there is EARLY_STOPPING_COUNT # worth of consecutive accuracies of 100%, we stop if acc == 1.0: count -= 1 else: count = EARLY_STOPPING_COUNT if not count: print("model has hit 100% accuracy and met early stopping criteria") model.save() break # every 100 batch sizes, we check if the model should be saved based # on if the model's loss on 80% of the test dataset if b % 100 == 0: # We decide whether to checkpoint based on 30% of the test dataset # this is just to speed up computation _, save_x_test, _, save_y_test = train_test_split(X_test, y_test, test_size=0.8, random_state=b) loss, acc, _ = model.evaluate_dataset(save_x_test, save_y_test) print("Current loss: %s Best loss: %s" % (loss, best_validation_loss)) if loss < best_validation_loss: best_validation_loss = loss model.save() # as we get better result we do less augmentation augmented_factor = augmented_factor * decrease_factor print("Augmented Factor = %s" % augmented_factor) b += 1 if __name__ == '__main__': arguments = docopt(__doc__) train(arguments["--batch_size"], arguments["--ckpt"], arguments["<output>"])
{"hexsha": "40f6e3a74728126b754da66973f55aa51fc3fba7", "size": 5004, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_for_faces.py", "max_stars_repo_name": "krishnr/CapsNet4Faces", "max_stars_repo_head_hexsha": "1863b9d9524f6659e08625402053332ed6ea1415", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-03-21T16:29:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-27T09:20:37.000Z", "max_issues_repo_path": "train_for_faces.py", "max_issues_repo_name": "krishnr/CapsNet4Faces", "max_issues_repo_head_hexsha": "1863b9d9524f6659e08625402053332ed6ea1415", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_for_faces.py", "max_forks_repo_name": "krishnr/CapsNet4Faces", "max_forks_repo_head_hexsha": "1863b9d9524f6659e08625402053332ed6ea1415", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-08-31T16:33:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-03T11:36:07.000Z", "avg_line_length": 34.0408163265, "max_line_length": 99, "alphanum_fraction": 0.6270983213, "include": true, "reason": "import numpy", "num_tokens": 1178}
# Utility functions ord = [ 21, 17, 13, 9, 5, 1, 22, 18, 14, 10, 6, 2, 23, 19, 15, 11, 7, 3, 24, 20, 16, 12, 8, 4] function estimate_snr(x::AbstractVector; fs=10) # Estimate SNR for a 30-102.4s segment pow = power(periodogram(x, nfft=1024, window=hanning)) ps = argmax(pow) n = mean(pow[Not(ps, 2ps)]) * 0.73 w = (pow[ps] * fs / 1024 - n) / n return w end """ moving_stats(x::AbstractVector, L::Int) Return moving average and variance of x using a window length L """ function moving_stats(x::AbstractVector{T}, L::Int) where T # Use filters to calculate running mean and var avg_filt = ones(T, L) ./ L var_filt = ones(T, L) ./ (L - 1) # unbiased moving_avg = filt(avg_filt, 1, x) moving_var = filt(var_filt, 1, (x .- moving_avg) .^ 2) return moving_avg, moving_var end """ moving_stats(x::AbstractVector, w::AbstractVector{Int}) Return moving average and variance of x using a window lengths w """ function moving_stats(x::AbstractVector{T}, w::AbstractVector{Int}) where T moving_avg = zeros(T, length(x)) moving_var = zeros(T, length(x)) @views for i in eachindex(x) win = i < w[i] ? i : w[i] moving_avg[i] = mean(x[i-win+1:i]) moving_var[i] = sum((x[i-win+1:i] .- moving_avg[i-win+1:i]) .^ 2) / (w[i]-1) end return moving_avg, moving_var end """ apply2seg(f::Function, x::AbstractMatrix{T} , n::Integer) Convenience function to apply a function f to segments of length n to the matrix x. """ function apply2seg(f::Function, x::AbstractMatrix{T} , n::Integer) where T # Assumes a mat comes in and a vec goes out # Not much better than a mapreduce but it includes last segment ra = 1:n:size(x, 1)-n out = Vector{T}(undef, size(x, 1)) for i in ra out[i:i+n-1] = f(@view x[i:i+n-1, :]) end out[ra.stop+n:end] = f(@view x[ra.stop+n:end, :]) return out end """ mat_shape(x, n) Reorders the columns of a matrix to be in the proper sensor order. """ function mat_shape(x::AbstractVector, n=3) return view(x, mapreduce(i -> ord .+ (24 * i), vcat, 0:n-1)) end function mat_shape(x::AbstractMatrix, n=3) return view(x, :, mapreduce(i -> ord .+ (24 * i), vcat, 0:n-1)) end """ reshape_psm(psm, n_mats) Takes in a PSM and reshapes it to be in the correct order. ex. Nx72 -> 9x8xN ex. Nx24 -> 3x8xN """ function reshape_psm(x::AbstractMatrix, n=div(size(x, 2), 24)) new_shape = reshape(mat_shape(x, n)', 3, 8, n, :) return reduce(vcat, eachslice(new_shape, dims=3)) end """ active_sensors(x, thresh) Return the indices of sensors whose mean value is greater than thresh. """ active_sensors(x::AbstractMatrix, thresh=0.4*2046) = vec(mean(x, dims=1) .> thresh) """ choose_ref(x) Return the index of the sensor with the greatest power. """ function choose_ref(x::AbstractMatrix) # Choose out = sum(abs2, x, dims=1) |> argmax return out[2] end extract_ref(x::AbstractMatrix) = x[:, choose_ref(x)] """ polarity_flip(x) Flips the polarity of sensors based on their PCC with the reference sensor. """ polarity_flip(x) = sign.(cor(x, @view x[:, choose_ref(x)]))' .* x """ sfm(x) Returns the spectral flatness measure of a signal. """ function sfm(x) s = abs2.(fft(x)) return geomean(s) / mean(s) end """ active_sfm(x, n, thresh) Return the indices of sensors with a mean spectral flatness measure, taken in segments of length n, above the threshold in dB. """ function active_sfm(x::AbstractMatrix, n, thresh=-50) fo(sig) = mapreduce(i -> sfm(sig[i:i+n-1]), vcat, 1:n:size(x, 1)-n) s = mapreduce(fo, hcat, eachcol(x)) actives = pow2db.(s) .> thresh return vec(mean(actives, dims=1) .> 0.4) end
{"hexsha": "70b8f2825ac30351eb08a1c4f576d37672e583cb", "size": 3744, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utils.jl", "max_stars_repo_name": "carterjgreen/PressureSensitiveMats.jl", "max_stars_repo_head_hexsha": "5efe25c27bddd92291d1f90238c4105c603a68e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/utils.jl", "max_issues_repo_name": "carterjgreen/PressureSensitiveMats.jl", "max_issues_repo_head_hexsha": "5efe25c27bddd92291d1f90238c4105c603a68e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-06T21:19:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-28T20:06:56.000Z", "max_forks_repo_path": "src/utils.jl", "max_forks_repo_name": "carterjgreen/PressureSensitiveMats.jl", "max_forks_repo_head_hexsha": "5efe25c27bddd92291d1f90238c4105c603a68e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8206896552, "max_line_length": 89, "alphanum_fraction": 0.6380876068, "num_tokens": 1210}
from __future__ import annotations import numpy as np import pyvista as pv from .._doc import doc from ._mesh import Mesh @doc(Mesh, prefix='Data class for tetrahedral meshes', dim_points='3', dim_cells='4') class TetraMesh(Mesh, cell_dim=4): cell_type = 'tetra' def to_open3d(self): """Return instance of :class:`open3d.geometry.TetraMesh`.""" import open3d return open3d.geometry.TetraMesh( vertices=open3d.utility.Vector3dVector(self.points), tetras=open3d.utility.Vector4iVector(self.cells)) @classmethod def from_open3d(cls, mesh) -> TetraMesh: """Return instance of :class:`TetraMesh` from :mod:`open3d`.""" points = np.asarray(mesh.vertices) cells = np.asarray(mesh.tetras) return cls(points=points, cells=cells) @classmethod def from_pyvista_unstructured_grid(cls, grid: 'pv.UnstructuredGrid'): """Return infance of :class:`TetraMesh` from :class:`pyvista.UnstructuredGrid`.""" assert grid.cells[0] == 4 cells = grid.cells.reshape(grid.n_cells, 5)[:, 1:] points = np.array(grid.points) return cls(points=points, cells=cells) def plot(self, **kwargs): """Shortcut for :meth:`TetraMesh.plot_pyvista`.""" return self.plot_pyvista(**kwargs) def plot_pyvista(self, **kwargs): """Show grid using :mod:`pyvista`. Parameters ---------- **kwargs These parameters are first passed to :func:`pyvista.plot` and then to :func:`pyvista.Plotter().add_mesh`. """ return self.to_pyvista_unstructured_grid().plot(**kwargs) def plot_submesh( self, index: int = None, along: str = 'x', invert: bool = False, show: bool = True, backend: str = None, **kwargs, ): """Show submesh using :mod:`pyvista`. Parameters ---------- index : int, optional Index of where to cut the mesh. Shows all tetrahedra with cell center < index. Picks the half-way point along the axis by default. along : str, optional Direction along which to cut. invert : bool, optional Invert the cutting operation, and show all tetrahedra with cell center > index. show : bool, optional If true, show the plot **kwargs: Keyword arguments passed to :meth:`pyvista.Plotter.add_mesh`. plotter : :class:`pyvista.Plotter` Return plotter instance. """ grid = self.to_pyvista_unstructured_grid() # get cell centroids cells = grid.cells.reshape(-1, 5)[:, 1:] cell_center = grid.points[cells].mean(1) # extract cells below index axis = 'zyx'.index(along) if index is None: # pick half-way point i, j = axis * 2, axis * 2 + 2 index = np.mean(grid.bounds[i:j]) mask = cell_center[:, axis] < index if invert: mask = ~mask cell_ind = mask.nonzero()[0] subgrid = grid.extract_cells(cell_ind) plotter = pv.Plotter() plotter.add_mesh(subgrid, **kwargs) if show: plotter.show(jupyter_backend=backend) return plotter
{"hexsha": "5458feb01547def64728ce66754f0856c2278ad3", "size": 3369, "ext": "py", "lang": "Python", "max_stars_repo_path": "nanomesh/mesh/_tetra.py", "max_stars_repo_name": "hpgem/nanomesher", "max_stars_repo_head_hexsha": "06e7648ff8b9ecf4cc1faa967469db6270c0ba5d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nanomesh/mesh/_tetra.py", "max_issues_repo_name": "hpgem/nanomesher", "max_issues_repo_head_hexsha": "06e7648ff8b9ecf4cc1faa967469db6270c0ba5d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nanomesh/mesh/_tetra.py", "max_forks_repo_name": "hpgem/nanomesher", "max_forks_repo_head_hexsha": "06e7648ff8b9ecf4cc1faa967469db6270c0ba5d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5526315789, "max_line_length": 73, "alphanum_fraction": 0.5829623034, "include": true, "reason": "import numpy", "num_tokens": 833}
import numpy as np import pandas as pd from PIL import Image import torch import torch.nn as nn from torch.utils.data import DataLoader import os from shutil import copyfile from tqdm import tqdm import argparse from QataCovDataset import QataCovDataset from model.unet import UNet import gc def create_predict_data(path,img_list,out,net,dataloader,device,img_size): masks_out = os.path.join(out,'predict_Ground-truths') croped_out = os.path.join(out,'predict_crop_images') """Iterate over data""" print("predict masks and croped images") predicted_masks=[] data_iter = tqdm(enumerate(dataloader), total=len(dataloader)) for batch_idx, sample in data_iter: imgs, true_masks = sample['image'], sample['mask'] imgs = imgs.to(device=device, dtype=torch.float32) # mask_type = torch.float32 if net.n_classes == 1 else torch.long with torch.set_grad_enabled(False): masks_pred = net(imgs) pred = torch.sigmoid(masks_pred) > 0.5 #print(pred.size()) pred = torch.squeeze(pred) #print(pred.size()) masks = pred.detach().cpu().numpy().astype(np.uint8) predicted_masks.append(masks) predicted_masks_array = np.concatenate(predicted_masks, axis=0) del predicted_masks gc.collect() for i,img_name in tqdm(enumerate(img_list)): img = Image.open(os.path.join(path,'image/'+img_name)).convert('L') mask = (predicted_masks_array[i,:,:]*255).astype(np.uint8) mask_img = Image.fromarray(mask).resize(img.size,Image.LANCZOS) mask_img.save(os.path.join(masks_out,'mask_'+img_name)) croped = np.where(np.array(mask_img) == 0, 0, np.array(img)).astype(np.uint8) Image.fromarray(croped).save(os.path.join(croped_out,img_name)) def get_args(): parser = argparse.ArgumentParser(description = "Qata_Covid19 Segmentation" , formatter_class=argparse.ArgumentDefaultsHelpFormatter) # set your environment parser.add_argument('--path',type=str,default='./data/Qata_COV') parser.add_argument('--gpu', type=str, default = '0') # arguments for training parser.add_argument('--img_size', type = int , default = 224) parser.add_argument('--load_model', type=str, default='best_checkpoint.pt', help='.pth file path to load model') parser.add_argument('--out', type=str, default='./dataset') return parser.parse_args() def main(): args = get_args() if ~ os.path.exists(args.out): print("path created") os.mkdir(args.out) #os.mkdir(os.path.join(args.out,'Images')) #os.mkdir(os.path.join(args.out,'Ground-truths')) os.mkdir(os.path.join(args.out,'predict_Ground-truths')) #os.mkdir(os.path.join(args.out,'original_crop_images')) os.mkdir(os.path.join(args.out,'predict_crop_images')) # set GPU device os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu # default: '0' device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # set model model = UNet(n_channels=1, n_classes=1).to(device) checkpoint = torch.load(args.load_model) model.load_state_dict(checkpoint['model_state_dict']) """set img size - UNet type architecture require input img size be divisible by 2^N, - Where N is the number of the Max Pooling layers (in the Vanila UNet N = 5) """ img_size = args.img_size #default: 224 # set transforms for dataset import torchvision.transforms as transforms from my_transforms import RandomHorizontalFlip,RandomVerticalFlip,ColorJitter,GrayScale,Resize,ToTensor eval_transforms = transforms.Compose([ GrayScale(), Resize(img_size), ToTensor() ]) img_path = os.path.join(args.path,'image') img_list = os.listdir(img_path) dataset = QataCovDataset(root_dir = args.path,split=img_list,transforms=eval_transforms) dataloader = DataLoader(dataset = dataset , batch_size=16) #create_original_data(args.path,args.out) create_predict_data(args.path,img_list,args.out,model,dataloader,device,args.img_size) img_crop_path = args.out+'/predict_crop_images' create_zipfile(img_crop_path) #df = create_annotation(args.path) #df.to_csv(os.path.join(args.out,'target.csv'),index=False) def get_all_file_paths(directory): # initializing empty file paths list file_paths = [] # crawling through directory and subdirectories print("get all file paths") for root, directories, files in tqdm(os.walk(directory)): for filename in files: # join the two strings in order to form the full filepath. filepath = os.path.join(root, filename) file_paths.append(filepath) # returning all file paths return file_paths def create_zipfile(directory): # path to folder which needs to be zipped #path = '/Users/Salem Rezzag/Desktop/New folder' #directory = './images rename' from zipfile import ZipFile import os # calling function to get all file paths in the directory file_paths = get_all_file_paths(directory) # printing the list of all files to be zipped #print('Following files will be zipped in this program:') #for file_name in file_paths: # print(file_name) # writing files to a zipfile print("writing files to a zipfile") with ZipFile('myzipfile.zip','w') as zip: # writing each file one by one for file in tqdm(file_paths): zip.write(file) print('All files zipped successfully!') if __name__ == '__main__': main()
{"hexsha": "b762d539e7e30da83f46c07de07add1e75ee7da2", "size": 5694, "ext": "py", "lang": "Python", "max_stars_repo_path": "croped_data.py", "max_stars_repo_name": "salem-devloper/COVID-Lung-Segment", "max_stars_repo_head_hexsha": "6896f6b0c56dac6d32e005afd4a94d59b1917b44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "croped_data.py", "max_issues_repo_name": "salem-devloper/COVID-Lung-Segment", "max_issues_repo_head_hexsha": "6896f6b0c56dac6d32e005afd4a94d59b1917b44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "croped_data.py", "max_forks_repo_name": "salem-devloper/COVID-Lung-Segment", "max_forks_repo_head_hexsha": "6896f6b0c56dac6d32e005afd4a94d59b1917b44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7783783784, "max_line_length": 116, "alphanum_fraction": 0.6745697225, "include": true, "reason": "import numpy", "num_tokens": 1322}
C$Procedure EKFIND ( EK, find data ) SUBROUTINE EKFIND ( QUERY, NMROWS, ERROR, ERRMSG ) C$ Abstract C C Find E-kernel data that satisfy a set of constraints. C C$ Disclaimer C C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S. C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE C ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS" C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE C SOFTWARE AND RELATED MATERIALS, HOWEVER USED. C C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS, C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY. C C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE. C C$ Required_Reading C C EK C C$ Keywords C C EK C PARSE C SEARCH C C$ Declarations INCLUDE 'ekquery.inc' INCLUDE 'ekqlimit.inc' INCLUDE 'ektype.inc' INCLUDE 'ekopcd.inc' INCLUDE 'ekcnamsz.inc' CHARACTER*(*) QUERY INTEGER NMROWS LOGICAL ERROR CHARACTER*(*) ERRMSG C$ Brief_I/O C C Variable I/O Description C -------- --- -------------------------------------------------- C QUERY I Query specifying data to be found. C NMROWS O Number of matching rows. C ERROR O Flag indicating whether query parsed correctly. C ERRMSG O Parse error description. C C$ Detailed_Input C C QUERY is a character string that specifies a set of EK C data to select from those present in currently C loaded EK files. The selected data will be C retrievable via the EK fetch routines EKGC, EKGD, C and EKGI. C C The query consists of four clauses, the third and C fourth of which are optional. The general form C of a query is C C SELECT <column list> C FROM <table list> C [WHERE <constraint list>] C [ORDER BY <ORDER BY column list>] C C where brackets indicate optional items. The C elements of the query shown above are called, C respectively, the `SELECT clause', the C `FROM clause', the `WHERE clause', and the C `ORDER BY clause'. The result of a query may be C thought of as a new table, whose columns are those C specified in the SELECT clause, whose rows are C those satisfying the constraints of the WHERE C clause, and whose rows are ordered according to C the ORDER BY clause. C C The SELECT clause specifies a list of columns C from which data are to be selected. In a simple C (non-join) query, these columns must belong to C the single table specified in the FROM clause. C C The form of a SELECT clause is C C SELECT <column name> [ ,<column name>...] C C In queries having multiple tables in the FROM C clause, column names are ambiguous if they occur C in more than one table in the FROM clause. Such C column names must be qualified with table C identifiers. These identifiers may be the names of C the tables to which the columns belong, or table C `aliases', names (usually short ones) associated C with tables in the FROM clause. Table aliases have C duration limited to the execution of the query to C which they belong. C C The form of a qualified column name is C C <table name>.<column name> C C or C C <table alias>.<column name> C C C The FROM clause specifies the tables from which C data are to be selected. In simple queries, only C one table is listed. In this case the form of C the FROM clause is C C FROM <table name> C C In queries involving multiple tables, the form of C the FROM clause becomes C C FROM <table name> [<table alias>] C [ , <table name> [<table alias>] ... ] C C The aliases associated with the table names must C be distinct and must not be the actual names of C loaded EK tables. C C Queries involving multiple tables are called C `joins'. C C The meaning of a FROM clause containing multiple C tables is that the output is to be a subset of C the rows of the Cartesian product of the listed C tables. Normally, WHERE clause constraints are C supplied to reduce the selected rows to a set of C interest. C C The most common example of a join is a query with C two tables listed in the FROM clause, and a WHERE C clause constraint enforcing equality of members C of a column in the first table with members of C column in the second table. Such a query is C called an `equi-join'. A join in which columns C of different tables are related by an inequality C is called a `non-equi-join'. Any type of join C other than an equi-join may be very slow to C evaluate, due to the large number of elements that C may be contained in the Cartesian C product of the listed tables. C C The WHERE clause lists constraints that must C be met by each row satisfying the query. The C constraints are specified as a logical combination C of relational expressions. The form of the C constraint list is C C WHERE <constraint expression> C C where each <constraint expression> consists of one C or more simple relational expressions of the form C C <column name> <operator> <RHS symbol> C C where C C <RHS symbol> C C is a column name, a literal value, or the special C symbol C C NULL C C and C C <operator> C C is any of C C EQ, GE, GT, LE, LIKE, LT, NE, NOT LIKE, <, <=, C =, >, >=, !=, <> C C For comparison with null values, the special C syntaxes C C <column name> IS NULL C <column name> IS NOT NULL C C are allowed, in addition to the standard C comparison syntaxes using the equality or C inequality operators. C C The LIKE operator allows comparison of a string C value against a template. The template syntax C is that allowed by the SPICELIB routine MATCHI. C Templates may include literal characters, the C wild string marker '*', and the wild character C marker '%'. Case is significant in templates. C C Templates are bracketed by quote characters, just C as are literal strings. C C The query language also supports the BETWEEN and C NOT BETWEEN constructs C C <column> BETWEEN <symbol 1> AND <symbol 2> C C <column> NOT BETWEEN <symbol 1> AND <symbol 2> C C The tokens C C <symbol 1> C <symbol 2> C C may be literal values or column names. C C The BETWEEN operator considers values that match C the bounds to satisfy the condition: the BETWEEN C operator tests for inclusion in the closed interval C defined by the bounds. C C In the WHERE clause, simple relational expressions C may be combined using the logical operators AND, C OR, and NOT, as in the Fortran programming C language. Parentheses may be used to enforce a C desired order of evaluation of logical expressions. C C The expression syntax is NOT symmetric: literal C values must not appear on the left hand side of the C operators that apply to them. C C The columns named in a constraint clause must C belong to the tables listed in the FROM clause. C If the query is a join, qualifying table names or C aliases are required wherever their omission would C result in ambiguity. C C Data types of the columns or constants used on the C right-hand-sides of operators must match the data C types of the corresponding columns on the C left-hand-sides, except that comparison of integer C and double precision quantities is permitted. C C Literal strings used in constraints are always C bracketed by quotes. Either single quotes (') C or double quotes (") may be used, but the same C quote character must be used to start and end any C literal string. Within character string values, C quote characters must be doubled in order to be C recognized. Case is significant in character C except in comparisions using the LIKE and NOT LIKE C operators, which ignore case: the expression C C ANIMAL LIKE "*A*" C C would be considered true when ANIMAL takes the C value C C "cat" C C Time values are considered to be strings and C require bracketing quotes. Currently, the C only time values allowed are UTC times in ISO C format, UTC times represented in forms accepted by C the SPICELIB routine TPARSE, and SCLK strings in C NAIF format. C C The ORDER BY clause indicates which columns to C use to order the output generated by the query. C The columns in the ORDER BY clause define a C dictionary ordering, with the first listed column C acting as a primary key, the second column acting C as a secondary key, and so on. C C For each ORDER BY column, the keywords ASC or DESC C may be supplied to indicate whether the items in C that column are to be listed in ascending or C descending order. Ascending order is the default. C The direction in which data items increase is C referred to as the `order sense'. C C The ORDER BY clause, if present, must appear C last in the query. C C The form of the ORDER BY clause is C C ORDER BY <column name> [<order sense>] C [ ,<column name> [<order sense>]...] C C Rows satisfying the query constraints will be C returned so that the entries of the first column C specified in the ORDER BY clause will be appear in C the order specified by the order sense keyword, C which is assumed to be ASC if absent. When entries C in the first through Nth ORDER BY column are equal, C the entries in the (N+1)st ORDER BY column C determine the order of the rows, and so on. C C As in the WHERE clause, column names must be C qualified by table names or table aliases where C they would otherwise be ambiguous. C C The query language is word-oriented, and some C indicate whether the words are reserved. Reserved C words must be separated from other words by white C space. It is not necessary to use white space C to separate words and punctuation characters. C The list of reserved words is C C AND C BETWEEN C BY C COLUMN C EQ C FROM C GE C GT C IS C LE C LT C LIKE C NE C NOT C NULL C OR C ORDER C SELECT C WHERE C C The left and right parenthesis characters are also C reserved; they may not be used in queries outside C of quoted strings. C C Case is not significant in queries, except within C literal strings. C C$ Detailed_Output C C NMROWS is the number of rows that match the query C criteria. NMROWS is defined if and only if C ERROR is returned .FALSE. C C ERROR is a logical flag indicating whether the query C failed to parse correctly. C C ERRMSG is a character string that describes EKFIND's C diagnosis of a parse error, should one occur. C Otherwise, ERRMSG will be returned blank. C C$ Parameters C C See the include files. C C$ Exceptions C C 1) Most of the exceptions that can occur on a call to C EKFIND are caused by errors in the input query. EKFIND C attempts to diagnose these via the output error flag and C error message, instead of signalling errors. The following C classes of errors are detected: C C Scanning errors---these result from badly formed query C in which EKFIND could not identify all of the tokens. C When these errors occur, EKFIND may be too confused to C give a helpful diagnostic message. C C Parsing errors---these result from a badly formed C query that EKFIND was able to separate into tokens C but that EKFIND determined to be syntactically invalid: C C Name resolution errors---these result from referencing C invalid or ambiguous column or table names in a query. C C Time resolution errors---these result from use of time C strings that cannot be parsed. C C Semantic errors---these result from a syntactically C valid query that violates a limit or a restriction on C values used in a query. C C C Some problems with queries are not trapped by EKFIND but C instead cause errors to be signalled. These are listed below. C C C 2) If no E-kernels are loaded at the time this routine is called, C an error will be signalled by routines called by this routine. C C 3) If a leapseconds kernel is is not loaded before this routine C is called, UTC time values may not be used in queries. If C they are, an error will be signalled by routines called by C this routine. C C 4) If an SCLK kernel for the appropriate spacecraft clock C has not been loaded before this routine is called, SCLK C values for that clock may not be used in queries. If C they are, an error will be signalled by routines called by C this routine. C C$ Files C C None. C C$ Particulars C C This routine operates almost entirely by side effects: it C prepares the EK fetch routines to return event data that C satisfy the input query. See the header of the routine C EKQMGR or the EK Required Reading for examples of use of this C routine in conjunction with the EK fetch routines. C C$ Examples C C 1) Examples of strings containing syntactically valid queries: C C SELECT COL1 FROM TAB1 C C select col1 from tab1 where col1 gt 5 C C SELECT COL2 FROM TAB1 WHERE COL2 > 5.7D0 ORDER BY COL2 C C SELECT COL2 FROM TAB1 WHERE COL1 != 5 C C SELECT COL2 FROM TAB1 WHERE COL1 GE COL2 C C SELECT COL1, COL2, COL3 FROM TAB1 ORDER BY COL1 C C SELECT COL3 FROM TAB1 WHERE COL5 EQ "ABC" C C SELECT COL3 FROM TAB1 WHERE COL5 = 'ABC' C C SELECT COL3 FROM TAB1 WHERE COL5 LIKE 'A*' C C SELECT COL3 FROM TAB1 WHERE COL5 LIKE 'A%%' C C SELECT COL4 FROM TAB1 WHERE COL4 = '1995 JAN 1 12:38:09.7' C C SELECT COL4 FROM TAB1 WHERE COL4 = "1995 JAN 1 12:38:09.7" C C SELECT COL4 FROM TAB1 WHERE C COL4 NE 'GLL SCLK 02724646:67:7:2' C C SELECT COL1 FROM TAB1 WHERE COL1 != NULL C C SELECT COL1 FROM TAB1 WHERE COL1 IS NULL C C SELECT COL1 FROM TAB1 WHERE COL1 IS NOT NULL C C SELECT COL1, COL2, COL3 FROM TAB1 C WHERE (COL1 BETWEEN 4 AND 6) AND (COL3 NOT LIKE "A%%") C ORDER BY COL1, COL3 C C SELECT COL4 FROM TAB1 C WHERE COL4 BETWEEN "1995 JAN 1 12:38" AND C "October 23, 1995" C C SELECT COL1, COL2 FROM TAB1 WHERE C NOT ( ( ( COL1 < COL2 ) AND ( COL1 > 5 ) ) OR C ( ( COL1 >= COL2 ) AND ( COL2 <= 10 ) ) ) C C C SELECT T1.COL1, T1.COL2, T2.COL2, T2.COL3 C FROM TABLE1 T1, TABLE2 T2 C WHERE T1.COL1 = T2.COL1 C AND T1.COL2 > 5 C ORDER BY T1.COL1, T2.COL2 C C C 2) Examples of syntactically invalid queries: C C SELECT TIME WHERE TIME C LT 1991 JAN 1 {FROM clause is absent} C C select time from table1 where C time lt 1991 jan 1 {time string is not C quoted} C C select time from table1 C where time .lt. '1991 jan 1' {operator should be lt} C C select cmd from table1 C where "cmd,6tmchg" != cmd {value is on left side C of operator} C C select event_type from table1 C where event_type eq "" {quoted string is empty C ---use " " to indicate C a blank string} C C select event_type from table1 C where event_type = "COMMENT" C order TIME {ORDER BY phrase is C lacking BY keyword} C C select COL1 from table where C where COL1 eq MOC_EVENT {literal string on C right-hand-side of C operator is not quoted} C C C C In the following examples, we'll assume that the program C calling EKFIND has loaded an EK containing two segments C having columns having the following names and attributes: C C C TABLE1: C ========== C C Column name Data type Size Indexed? C ----------- --------- ---- -------- C EVENT_TYPE CHARACTER*32 1 YES C EVENT_PARAMETERS CHARACTER*(*) 1 NO C COMMENT CHARACTER*80 VARIABLE NO C C C TABLE2: C ========== C C Column name Data type Size Indexed? C ----------- --------- ---- -------- C EVENT_TYPE CHARACTER*32 1 YES C EVENT_PARAMETERS CHARACTER*80 1 NO C COMMENT CHARACTER*80 VARIABLE NO C COMMAND CHARACTER*80 1 YES C C C Then the following queries are semantically invalid: C C SELECT EVENT_PARAMETERS C FROM TABLE1 C WHERE EVENT_DURATION = 7.0 {No column called C EVENT_DURATION C is present in a loaded C EK} C C SELECT COMMENT FROM TABLE2 C WHERE COMMENT EQ "N/A" {The COMMENT column does C not have size 1 and C therefore cannot be C referenced in a query} C C$ Restrictions C C 1) A leapseconds kernel must be loaded before this routine may C be called, if UTC time values are used in input queries. C C 2) An appropriate SCLK kernel must be loaded before this routine C may be called, if SCLK values are used in input queries. C C 3) Data found in response to a query become unavailable C when a fast load is initiated via EKIFLD. Any desired C fetches of the data must be performed before a fast C load or any other operation that modifies the EK scratch C area is initiated. C C$ Literature_References C C None. C C$ Author_and_Institution C C N.J. Bachman (JPL) C C$ Version C C- SPICELIB Version 1.0.4, 18-MAY-2010 (BVS) C C Removed "C$" marker from text in the header. C C- SPICELIB Version 1.0.3, 19-DEC-2001 (NJB) C C Restrictions section was updated. C C- SPICELIB Version 1.0.2, 14-JAN-1997 (NJB) C C Syntax descriptions for comparisons using null values have been C added. The $Examples section was augmented with sample queries C demonstrating use of the IS NULL and IS NOT NULL comparison C operators. C C- SPICELIB Version 1.0.1, 16-AUG-1996 (NJB) C C Exceptions section of header was updated to indicate that C calling this routine while no E-kernels are loaded will cause C an error to be signalled. Previous version line was changed C from "Beta" to "SPICELIB," and the previous version was C corrected to 1.0.0. C C- SPICELIB Version 1.0.0, 24-OCT-1995 (NJB) C C-& C$ Index_Entries C C find EK data C issue EK query C C-& C C SPICELIB functions C LOGICAL RETURN C C Local parameters C INTEGER LBCELL PARAMETER ( LBCELL = -5 ) C C Storage limits: C INTEGER MAXCOL PARAMETER ( MAXCOL = 100 ) C C Local variables C CHARACTER*(MAXCLN) CHRBUF CHARACTER*(MAXCLN) EQRYC DOUBLE PRECISION EQRYD ( MAXQNM ) DOUBLE PRECISION NUMVLS ( MAXQNM ) INTEGER CHBEGS ( MAXTOK ) INTEGER CHENDS ( MAXTOK ) INTEGER EQRYI ( LBCELL : EQIMIN ) INTEGER ERRPTR INTEGER LXBEGS ( MAXTOK ) INTEGER LXENDS ( MAXTOK ) INTEGER NTOKEN INTEGER TOKENS ( MAXTOK ) INTEGER VALUES ( MAXTOK ) C C Standard SPICE error handling. C IF ( RETURN () ) THEN RETURN ELSE CALL CHKIN ( 'EKFIND' ) END IF C C Initialize the encoded query each time, for safety. C CALL ZZEKQINI ( EQIMIN, MAXQNM, EQRYI, EQRYC, EQRYD ) C C Find the tokens in the input query. C CALL ZZEKSCAN ( QUERY, MAXTOK, MAXQNM, NTOKEN, TOKENS, . LXBEGS, LXENDS, VALUES, NUMVLS, CHRBUF, . CHBEGS, CHENDS, ERROR, ERRMSG ) IF ( ERROR ) THEN CALL CHKOUT ( 'EKFIND' ) RETURN END IF C C Now parse the query. C CALL ZZEKPARS ( QUERY, NTOKEN, LXBEGS, LXENDS, . TOKENS, VALUES, NUMVLS, CHRBUF, . CHBEGS, CHENDS, EQRYI, EQRYC, . EQRYD, ERROR, ERRMSG ) IF ( ERROR ) THEN CALL CHKOUT ( 'EKFIND' ) RETURN END IF C C Resolve names. C CALL ZZEKNRES ( QUERY, EQRYI, EQRYC, . ERROR, ERRMSG, ERRPTR ) IF ( ERROR ) THEN CALL CHKOUT ( 'EKFIND' ) RETURN END IF C C Resolve time values, if necessary. C CALL ZZEKTRES ( QUERY, EQRYI, EQRYC, EQRYD, . ERROR, ERRMSG, ERRPTR ) IF ( ERROR ) THEN CALL CHKOUT ( 'EKFIND' ) RETURN END IF C C Perform semantic checks. C CALL ZZEKSEMC ( QUERY, EQRYI, EQRYC, . ERROR, ERRMSG, ERRPTR ) IF ( ERROR ) THEN CALL CHKOUT ( 'EKFIND' ) RETURN END IF C C If we arrived here, the encoded query is ready for execution. C Find the data satisfying the constraints. C CALL EKSRCH ( EQRYI, EQRYC, EQRYD, NMROWS, ERROR, ERRMSG ) CALL CHKOUT ( 'EKFIND' ) RETURN END
{"hexsha": "b7413fbc9d38c276aeaf15eb7d6e398f06d84538", "size": 27081, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/nasa_f/ekfind.f", "max_stars_repo_name": "agforero/FTFramework", "max_stars_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-08-19T21:43:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T02:57:25.000Z", "max_issues_repo_path": "source/nasa_f/ekfind.f", "max_issues_repo_name": "agforero/fortran-testing-framework", "max_issues_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-07T21:17:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-09T02:18:07.000Z", "max_forks_repo_path": "source/nasa_f/ekfind.f", "max_forks_repo_name": "agforero/fortran-testing-framework", "max_forks_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T08:41:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:41:53.000Z", "avg_line_length": 36.6454668471, "max_line_length": 72, "alphanum_fraction": 0.5410804623, "num_tokens": 6146}
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests that relate to using quantities/units on parameters of models. """ import numpy as np import pytest from ..core import Model, Fittable1DModel, InputParameterError from ..parameters import Parameter, ParameterDefinitionError from ..models import (Gaussian1D, Pix2Sky_TAN, RotateNative2Celestial, Rotation2D) from ... import units as u from ...units import UnitsError from ...tests.helper import assert_quantity_allclose from ... import coordinates as coord class BaseTestModel(Fittable1DModel): @staticmethod def evaluate(x, a): return x def test_parameter_quantity(): """ Basic tests for initializing general models (that do not require units) with parameters that have units attached. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) assert g.amplitude.value == 1.0 assert g.amplitude.unit is u.J assert g.mean.value == 1.0 assert g.mean.unit is u.m assert g.stddev.value == 0.1 assert g.stddev.unit is u.m def test_parameter_set_quantity(): """ Make sure that parameters that start off as quantities can be set to any other quantity, regardless of whether the units of the new quantity are compatible with the original ones. We basically leave it up to the evaluate method to raise errors if there are issues with incompatible units, and we don't check for consistency at the parameter level. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # Try equivalent units g.amplitude = 4 * u.kJ assert_quantity_allclose(g.amplitude, 4 * u.kJ) g.mean = 3 * u.km assert_quantity_allclose(g.mean, 3 * u.km) g.stddev = 2 * u.mm assert_quantity_allclose(g.stddev, 2 * u.mm) # Try different units g.amplitude = 2 * u.s assert_quantity_allclose(g.amplitude, 2 * u.s) g.mean = 2 * u.Jy assert_quantity_allclose(g.mean, 2 * u.Jy) def test_parameter_lose_units(): """ Check that parameters that have been set to a quantity that are then set to a value with no units raise an exception. We do this because setting a parameter to a value with no units is ambiguous if units were set before: if a paramter is 1 * u.Jy and the parameter is then set to 4, does this mean 2 without units, or 2 * u.Jy? """ g = Gaussian1D(1 * u.Jy, 3, 0.1) with pytest.raises(UnitsError) as exc: g.amplitude = 2 assert exc.value.args[0] == ("The 'amplitude' parameter should be given as " "a Quantity because it was originally " "initialized as a Quantity") def test_parameter_add_units(): """ On the other hand, if starting from a parameter with no units, we should be able to add units since this is unambiguous. """ g = Gaussian1D(1, 3, 0.1) g.amplitude = 2 * u.Jy assert_quantity_allclose(g.amplitude, 2 * u.Jy) def test_parameter_change_unit(): """ Test that changing the unit on a parameter does not work. This is an ambiguous operation because it's not clear if it means that the value should be converted or if the unit should be changed without conversion. """ g = Gaussian1D(1, 1 * u.m, 0.1 * u.m) # Setting a unit on a unitless parameter should not work with pytest.raises(ValueError) as exc: g.amplitude.unit = u.Jy assert exc.value.args[0] == ("Cannot attach units to parameters that were " "not initially specified with units") # But changing to another unit should not, even if it is an equivalent unit with pytest.raises(ValueError) as exc: g.mean.unit = u.cm assert exc.value.args[0] == ("Cannot change the unit attribute directly, " "instead change the parameter to a new quantity") def test_parameter_set_value(): """ Test that changing the value on a parameter works as expected. """ g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m) # To set a parameter to a quantity, we simply do g.amplitude = 2 * u.Jy # If we try setting the value, we need to pass a non-quantity value # TODO: determine whether this is the desired behavior? g.amplitude.value = 4 assert_quantity_allclose(g.amplitude, 4 * u.Jy) assert g.amplitude.value == 4 assert g.amplitude.unit is u.Jy # If we try setting it to a Quantity, we raise an error with pytest.raises(TypeError) as exc: g.amplitude.value = 3 * u.Jy assert exc.value.args[0] == ("The .value property on parameters should be set to " "unitless values, not Quantity objects. To set a " "parameter to a quantity simply set the parameter " "directly without using .value") def test_parameter_quantity_property(): """ Test that the quantity property of Parameters behaves as expected """ # Since parameters have a .value and .unit parameter that return just the # value and unit respectively, we also have a .quantity parameter that # returns a Quantity instance. g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m) assert_quantity_allclose(g.amplitude.quantity, 1 * u.Jy) # Setting a parameter to a quantity changes the value and the default unit g.amplitude.quantity = 5 * u.mJy assert g.amplitude.value == 5 assert g.amplitude.unit is u.mJy # And we can also set the parameter to a value with different units g.amplitude.quantity = 4 * u.s assert g.amplitude.value == 4 assert g.amplitude.unit is u.s # But not to a value without units with pytest.raises(TypeError) as exc: g.amplitude.quantity = 3 assert exc.value.args[0] == "The .quantity attribute should be set to a Quantity object" def test_parameter_default_units_match(): # If the unit and default quantity units are different, raise an error with pytest.raises(ParameterDefinitionError) as exc: class TestC(Fittable1DModel): a = Parameter(default=1.0 * u.m, unit=u.Jy) assert exc.value.args[0] == ("parameter default 1.0 m does not have units " "equivalent to the required unit Jy") @pytest.mark.parametrize(('unit', 'default'), ((u.m, 1.0), (None, 1 * u.m))) def test_parameter_defaults(unit, default): """ Test that default quantities are correctly taken into account """ class TestModel(BaseTestModel): a = Parameter(default=default, unit=unit) # TODO: decide whether the default property should return a value or # a quantity? # The default unit and value should be set on the class assert TestModel.a.unit == u.m assert TestModel.a.default == 1.0 # Check that the default unit and value are also set on a class instance m = TestModel() assert m.a.unit == u.m assert m.a.default == m.a.value == 1.0 # If the parameter is set to a different value, the default is still the # internal default m = TestModel(2.0 * u.m) assert m.a.unit == u.m assert m.a.value == 2.0 assert m.a.default == 1.0 # Instantiate with a different, but compatible unit m = TestModel(2.0 * u.pc) assert m.a.unit == u.pc assert m.a.value == 2.0 # The default is still in the original units # TODO: but how do we know what those units are if we don't return a # quantity? assert m.a.default == 1.0 # Initialize with a completely different unit m = TestModel(2.0 * u.Jy) assert m.a.unit == u.Jy assert m.a.value == 2.0 # TODO: this illustrates why the default doesn't make sense anymore assert m.a.default == 1.0 # Instantiating with different units works, and just replaces the original unit with pytest.raises(InputParameterError) as exc: TestModel(1.0) assert exc.value.args[0] == ("TestModel.__init__() requires a " "Quantity for parameter 'a'") def test_parameter_quantity_arithmetic(): """ Test that arithmetic operations with properties that have units return the appropriate Quantities. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # Addition should work if units are compatible assert g.mean + (1 * u.m) == 2 * u.m assert (1 * u.m) + g.mean == 2 * u.m # Multiplication by a scalar should also preserve the quantity-ness assert g.mean * 2 == (2 * u.m) assert 2 * g.mean == (2 * u.m) # Multiplication by a quantity should result in units being multiplied assert g.mean * (2 * u.m) == (2 * (u.m ** 2)) assert (2 * u.m) * g.mean == (2 * (u.m ** 2)) # Negation should work properly too assert -g.mean == (-1 * u.m) assert abs(-g.mean) == g.mean # However, addition of a quantity + scalar should not work with pytest.raises(UnitsError) as exc: g.mean + 1 assert exc.value.args[0] == ("Can only apply 'add' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") with pytest.raises(UnitsError) as exc: 1 + g.mean assert exc.value.args[0] == ("Can only apply 'add' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") def test_parameter_quantity_comparison(): """ Basic test of comparison operations on properties with units. """ g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # Essentially here we are checking that parameters behave like Quantity assert g.mean == 1 * u.m assert 1 * u.m == g.mean assert g.mean != 1 assert 1 != g.mean assert g.mean < 2 * u.m assert 2 * u.m > g.mean with pytest.raises(UnitsError) as exc: g.mean < 2 assert exc.value.args[0] == ("Can only apply 'less' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") with pytest.raises(UnitsError) as exc: 2 > g.mean assert exc.value.args[0] == ("Can only apply 'less' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") g = Gaussian1D([1, 2] * u.J, [1, 2] * u.m, [0.1, 0.2] * u.m) assert np.all(g.mean == [1, 2] * u.m) assert np.all([1, 2] * u.m == g.mean) assert np.all(g.mean != [1, 2]) assert np.all([1, 2] != g.mean) with pytest.raises(UnitsError) as exc: g.mean < [3, 4] assert exc.value.args[0] == ("Can only apply 'less' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") with pytest.raises(UnitsError) as exc: [3, 4] > g.mean assert exc.value.args[0] == ("Can only apply 'less' function to " "dimensionless quantities when other argument " "is not a quantity (unless the latter is all " "zero/infinity/nan)") def test_parameters_compound_models(): tan = Pix2Sky_TAN() sky_coords = coord.SkyCoord(ra=5.6, dec=-72, unit=u.deg) lon_pole = 180 * u.deg n2c = RotateNative2Celestial(sky_coords.ra, sky_coords.dec, lon_pole) rot = Rotation2D(23) m = rot | n2c
{"hexsha": "ab4ebe8fae9fbc1df4df1c19fec308552ec239b8", "size": 11909, "ext": "py", "lang": "Python", "max_stars_repo_path": "astropy/modeling/tests/test_quantities_parameters.py", "max_stars_repo_name": "jbkalmbach/astropy", "max_stars_repo_head_hexsha": "88ae8c615533efd1e60de4aded204943f66f881c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-02T17:07:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T17:07:20.000Z", "max_issues_repo_path": "astropy/modeling/tests/test_quantities_parameters.py", "max_issues_repo_name": "jbkalmbach/astropy", "max_issues_repo_head_hexsha": "88ae8c615533efd1e60de4aded204943f66f881c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2017-12-18T16:27:29.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-29T14:54:22.000Z", "max_forks_repo_path": "astropy/modeling/tests/test_quantities_parameters.py", "max_forks_repo_name": "jbkalmbach/astropy", "max_forks_repo_head_hexsha": "88ae8c615533efd1e60de4aded204943f66f881c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-08-02T09:33:21.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-02T09:33:21.000Z", "avg_line_length": 34.9237536657, "max_line_length": 92, "alphanum_fraction": 0.6154169116, "include": true, "reason": "import numpy", "num_tokens": 3021}
__author__ = "Tomasz Rybotycki" """ A script containing some utilities that could be used in general QC simulaitons. """ # TODO TR: Consider releasing this file as a separate package. from typing import List, Union from numpy import ( abs, linalg, log, ndarray, sqrt, pi, exp, asarray, tile, power, diag, dot, ) from numpy.random import randn def count_total_variation_distance( distribution1: Union[List[float], ndarray], distribution2: Union[List[float], ndarray], ) -> float: """ This method calculates total variation distance between two given distributions. :param distribution1: First distribution. :param distribution2: Second distribution. :return: Total variation distance between two given distributions. """ assert len(distribution1) == len(distribution2), ( " \ f" f"Distributions must have equal lengths! Got: {len(distribution1)}" f"and {len(distribution2)}!" ) total_variation_distance = 0 for i in range(len(distribution1)): total_variation_distance += abs(distribution1[i] - distribution2[i]) return total_variation_distance / 2 def count_distance_between_matrices(matrix1: ndarray, matrix2: ndarray) -> float: """ Calculates distance between two given matrices. This method assumes, that the matrices have proper sizes. :param matrix1: First matrix. :param matrix2: Second matrix. :return: Distance between two given matrices. """ return linalg.norm(matrix1 - matrix2) def count_tv_distance_error_bound_of_experiment_results( outcomes_number: int, samples_number: int, error_probability: float ) -> float: """ Calculates the distance bound between the experimental results and the n-sample estimation of these results. In case of large outcomes numbers one should consider solutions given here: https://math.stackexchange.com/questions/2696344/is-there-a-way-to-find-the-log-of-very-large-numbers In the method formally I should compute for prime_factor in prime_factors_of_the_large_number: error_bound += log(prime_factor) where the large_number = 2 ** outcomes_number - 2. However, by simply approximating large_number = 2 ** outcomes_number I can do the same with just error_bound += log(2) * outcomes_number without wasting a lot of time for calculating the prime factors or the number. :param outcomes_number: :param samples_number: Number of samples used for estimation. :param error_probability: Desired probability of error. :return: Bound on the tv distance between the estimate and the experimental results. """ error_bound = -log(error_probability) error_bound += outcomes_number * log(2) # APPROXIMATION! error_bound /= 2 * samples_number return sqrt(error_bound) def get_prime_factors(number: int) -> List[int]: prime_factors = [] while number % 2 == 0: prime_factors.append(2) number = number / 2 for i in range(3, int(sqrt(number)) + 1, 2): while number % i == 0: prime_factors.append(i) number = number / i prime_factors.append(number) return prime_factors def compute_minimal_number_of_samples_for_desired_accuracy( outcomes_number: int, error_probability: float, expected_distance: float ) -> int: samples_number = -log(error_probability) samples_number += log(2) * outcomes_number samples_number /= 2 * pow(expected_distance, 2) return int(samples_number) + 1 def compute_qft_matrix(n: int) -> ndarray: """ Computes n x n matrix of quantum fourier transform. The formula can be found e.g. on wiki https://en.wikipedia.org/wiki/Quantum_Fourier_transform :param n: Dimension of the array. :return: n x n ndarray of qft. """ if n == 0: return asarray([]) omega = exp(2j * pi / n) horizontal_range = tile(range(n), n).reshape(n, n) vertical_range = horizontal_range.transpose() full_range = horizontal_range * vertical_range qft_matrix = power(omega, full_range) / sqrt(n) return qft_matrix
{"hexsha": "93d4c9b4480c00dfb1f4477f97573c5986bcc40f", "size": 4293, "ext": "py", "lang": "Python", "max_stars_repo_path": "theboss/quantum_computations_utilities.py", "max_stars_repo_name": "Tomev/BoSS", "max_stars_repo_head_hexsha": "45db090345650741c85b39b47cbc7b391d6daa33", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "theboss/quantum_computations_utilities.py", "max_issues_repo_name": "Tomev/BoSS", "max_issues_repo_head_hexsha": "45db090345650741c85b39b47cbc7b391d6daa33", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-07-10T00:28:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-12T11:51:38.000Z", "max_forks_repo_path": "theboss/quantum_computations_utilities.py", "max_forks_repo_name": "Tomev/BoSS", "max_forks_repo_head_hexsha": "45db090345650741c85b39b47cbc7b391d6daa33", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-12T15:35:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-12T15:35:09.000Z", "avg_line_length": 28.8120805369, "max_line_length": 109, "alphanum_fraction": 0.6780805963, "include": true, "reason": "from numpy", "num_tokens": 964}
import numpy as np # convert block format to linkedlist # adjacent format graph def block2adja(box, size): ops = [(1, 0), (-1, 0), (0, 1), (0, -1)] height, width = size vertices = {i: [] for i in range(width * height)} for i in range(width): for j in range(height): vertex = (j * width) + i if box[i, j] != 10: for op in ops: x = i + op[0] y = j + op[1] vertexb = (y * width) + x if 0 <= x < width and 0 <= y < height and box[x, y] != 10: vertices[vertex].append((vertexb, 1)) return vertices class Graph: BLOCK = 10 NON_BLOCK = 1 def __init__(self, box) -> None: self.size = (len(box), len(box[0])) self.vertices = block2adja(box, self.size)
{"hexsha": "21b67539bb98eb3ad413de47d22599b83946dd64", "size": 851, "ext": "py", "lang": "Python", "max_stars_repo_path": "graph/graph.py", "max_stars_repo_name": "HackerTon/astarviz", "max_stars_repo_head_hexsha": "ff63159206fc9f27374862ab29e010e82ce69369", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "graph/graph.py", "max_issues_repo_name": "HackerTon/astarviz", "max_issues_repo_head_hexsha": "ff63159206fc9f27374862ab29e010e82ce69369", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graph/graph.py", "max_forks_repo_name": "HackerTon/astarviz", "max_forks_repo_head_hexsha": "ff63159206fc9f27374862ab29e010e82ce69369", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7878787879, "max_line_length": 78, "alphanum_fraction": 0.4759106933, "include": true, "reason": "import numpy", "num_tokens": 243}
import networkx as nx import numpy as np import matplotlib.pyplot as plt import pandas as pd import pickle import argparse import pathlib as path layouts = { "circular": nx.circular_layout, "kamada_kawai": nx.kamada_kawai_layout, "random": nx.random_layout, "shell": nx.shell_layout, "spring": nx.spring_layout, "spectral": nx.spectral_layout, "fruchterman_reingold": nx.fruchterman_reingold_layout, "spiral": nx.spiral_layout} def graph_plotter(A: np.ndarray, **draw_args): g = nx.convert_matrix.from_numpy_matrix(A) nx.draw(g, node_size=50, **draw_args) plt.show() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pickled_files', type=str, default=None, help='pems_bay or metr_la') parser.add_argument('--seattle_data', type=bool, action='store_true', help='to use Seattle data') parser.add_argument('--layout', type=str, default=None, help='graph layout: circular, amada_kawai, random_layout,' 'shell, spring, spectral, fruchterman_reingold') args = parser.parse_args() if args.pickled_files: place = args.pickled_files place_path = path.Path("../data")/place with open(place_path/f"adj_mx_{place.split('_')[1]}.pkl", "rb") as f: sensor_ids, sensor_id_to_ind, A = pickle.load(f, encoding='latin-1') pos = np.load(place_path/f'pos_{place.split("_")[1]}.npy') graph_plotter(A, pos=pos) elif args.seattle_data: A = np.load("Seattle_Loop_Dataset/Loop_Seattle_2015_A.npy") mp = pd.read_csv("Seattle_Loop_Dataset/nodes_loop_mp_list.csv")["milepost"] mp = list(map(lambda entry: entry[1:4], mp)) col = list(map(lambda entry: 'purple' if entry=='090' else 'y' if entry=='405' else 'b' if entry=='520' else 'r', mp)) graph_plotter(A, node_color=col, layout=layouts[args.layout])
{"hexsha": "baf0a4e046160270ca10fb49de5fb305739a8050", "size": 1978, "ext": "py", "lang": "Python", "max_stars_repo_path": "graph_vis.py", "max_stars_repo_name": "hpi-sam/GNN-TiborMaxTiago", "max_stars_repo_head_hexsha": "986b3cf1e15328f6a03aa1e7f979b3435fc98910", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-12-17T07:43:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T01:30:46.000Z", "max_issues_repo_path": "graph_vis.py", "max_issues_repo_name": "hpi-sam/GNN-TiborMaxTiago", "max_issues_repo_head_hexsha": "986b3cf1e15328f6a03aa1e7f979b3435fc98910", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-27T15:18:37.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-27T15:18:37.000Z", "max_forks_repo_path": "graph_vis.py", "max_forks_repo_name": "hpi-sam/GNN-TiborMaxTiago", "max_forks_repo_head_hexsha": "986b3cf1e15328f6a03aa1e7f979b3435fc98910", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-11-27T09:38:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-13T11:18:42.000Z", "avg_line_length": 37.320754717, "max_line_length": 126, "alphanum_fraction": 0.6531850354, "include": true, "reason": "import numpy,import networkx", "num_tokens": 515}
[STATEMENT] lemma left_total_rel_resumption [transfer_rule]: "\<lbrakk> left_total R1; left_total R2 \<rbrakk> \<Longrightarrow> left_total (rel_resumption R1 R2)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>left_total R1; left_total R2\<rbrakk> \<Longrightarrow> left_total (Resumption.resumption.rel_resumption R1 R2) [PROOF STEP] by(simp only: left_total_alt_def resumption.rel_eq[symmetric] resumption.rel_conversep[symmetric] rel_resumption_OO resumption.rel_mono)
{"llama_tokens": 184, "file": "CryptHOL_Resumption", "length": 1}
import csv import numpy as np ############## Get Species Dictionary ################# filename = './data/alltrain.csv' species = {} LONG_KEY = 'long' LAT_KEY = 'lat' with open(filename) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: # print(f'Column names are {", ".join(row)}') line_count += 1 # elif line_count == 100: # print(species) # exit() else: if species.get(row[0]): # 二次以上遇到,增加物种 species[row[0]][LONG_KEY] = np.append(species[row[0]][LONG_KEY], row[1]) species[row[0]][LAT_KEY] = np.append(species[row[0]][LAT_KEY], row[2]) else: # 第一次遇到,创建新物种 species[row[0]] = { LONG_KEY: np.array([row[1]]), LAT_KEY: np.array([row[2]]) } # print(f'\tSepcies {row[0]} lives at ({row[1]}, {row[2]}).') line_count += 1 print(f'Processed {line_count} lines.') # print(species) ############## Get Species Dictionary ################# ############## Show species in a map ################## import matplotlib.pyplot as plt for animal in species.keys(): print(species[animal][LONG_KEY]) plt.scatter(species[animal][LONG_KEY], species[animal][LAT_KEY]) plt.show() ############## Show species in a map ##################
{"hexsha": "d93d559a8fed83414afd267aefde714b73e8a012", "size": 1470, "ext": "py", "lang": "Python", "max_stars_repo_path": "distribution.py", "max_stars_repo_name": "upupming/dragon", "max_stars_repo_head_hexsha": "245f71996004b386ae764eb8f76603233d8a6763", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-10T03:37:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-10T03:37:19.000Z", "max_issues_repo_path": "distribution.py", "max_issues_repo_name": "upupming/dragon", "max_issues_repo_head_hexsha": "245f71996004b386ae764eb8f76603233d8a6763", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "distribution.py", "max_forks_repo_name": "upupming/dragon", "max_forks_repo_head_hexsha": "245f71996004b386ae764eb8f76603233d8a6763", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0, "max_line_length": 88, "alphanum_fraction": 0.4993197279, "include": true, "reason": "import numpy", "num_tokens": 368}
module activations implicit none contains pure function sigmoid(z) double precision, intent(in) :: z(:) double precision, dimension(size(z)) :: sigmoid sigmoid = 1.0 / (1.0 + exp(-z)) end function sigmoid pure function sigmoid_prime(z) double precision, intent(in) :: z(:) double precision, dimension(size(z)) :: sigmoid_prime sigmoid_prime = sigmoid(z) * (1 - sigmoid(z)) end function sigmoid_prime ! maybe we will move this to another module later function cost(output, target) double precision, intent(in) :: output(:) double precision, intent(in) :: target(:) double precision :: cost cost = 0.5 * sum((target - output)**2) end function cost end module activations
{"hexsha": "325f7b1c0f9719926d4f24522dd314406e503c3d", "size": 687, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/activations.f90", "max_stars_repo_name": "alexmconn/dl4TRAN", "max_stars_repo_head_hexsha": "4fc0ccfa3e738615a9037061ac775ef7bf87ad5e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/activations.f90", "max_issues_repo_name": "alexmconn/dl4TRAN", "max_issues_repo_head_hexsha": "4fc0ccfa3e738615a9037061ac775ef7bf87ad5e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/activations.f90", "max_forks_repo_name": "alexmconn/dl4TRAN", "max_forks_repo_head_hexsha": "4fc0ccfa3e738615a9037061ac775ef7bf87ad5e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9, "max_line_length": 54, "alphanum_fraction": 0.7176128093, "num_tokens": 165}
#pragma once #include <boost/dynamic_bitset.hpp> #include "../Variable.hpp" #include "Counter.hpp" namespace mist { namespace it { using Bitset = boost::dynamic_bitset<unsigned long long>; using BitsetVariable = std::vector<Bitset>; using BitsetTable = std::vector<BitsetVariable>; /** Generates a ProbabilityDistribution from a Variable tuple. * * Recasts each Variable as an array of bitsets, one for each bin value. * Computes the ProbabilityDistribution using bitwise AND operation and bit * counting algorithm. */ class BitsetCounter : public Counter { public: BitsetCounter(Variable::tuple const& all_vars); ~BitsetCounter(){}; void count(Variable const&, Distribution&); void count(Variable::tuple const&, Distribution&); void count(Variable::tuple const&, Variable::indexes const&, Distribution&); private: BitsetTable bits; }; class BitsetCounterOutOfRange : public std::out_of_range { public: BitsetCounterOutOfRange(std::string const& method, int index, int max) : out_of_range("BitsetCounter::" + method + " : Variable index " + std::to_string(index) + " out of bitset table range, valid range [0," + std::to_string(max) + "]"){}; }; } // it } // mist
{"hexsha": "958e1a0d31b8bfcfea7f8586e8e15a4c3571ebe0", "size": 1253, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/mist/it/BitsetCounter.hpp", "max_stars_repo_name": "andbanman/mist", "max_stars_repo_head_hexsha": "2546fb41bccea1f89a43dbdbed7ce3a257926b54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/mist/it/BitsetCounter.hpp", "max_issues_repo_name": "andbanman/mist", "max_issues_repo_head_hexsha": "2546fb41bccea1f89a43dbdbed7ce3a257926b54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2021-03-30T21:40:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-08T18:54:34.000Z", "max_forks_repo_path": "include/mist/it/BitsetCounter.hpp", "max_forks_repo_name": "andbanman/mist", "max_forks_repo_head_hexsha": "2546fb41bccea1f89a43dbdbed7ce3a257926b54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6595744681, "max_line_length": 78, "alphanum_fraction": 0.6967278532, "num_tokens": 285}
#include "gtest/gtest.h" #include <boost/bimap.hpp> #include "opencv2/opencv.hpp" #include "utils/utils.hpp" #include <unordered_map> using namespace cv; using namespace std; Mat formatTransformationMat2(const Mat transformation_matrix) { cv::Mat m = cv::Mat::ones(2, 3, CV_64F); m.at<double>(0, 0) = transformation_matrix.at<double>(0, 0); m.at<double>(0, 1) = transformation_matrix.at<double>(0, 1); m.at<double>(0, 2) = transformation_matrix.at<double>(0, 2); m.at<double>(1, 0) = transformation_matrix.at<double>(1, 0); m.at<double>(1, 1) = transformation_matrix.at<double>(1, 1); m.at<double>(1, 2) = transformation_matrix.at<double>(1, 2); return m; } void runTheTest() { // //load the image // //apply a big transformation to it, resize the image if you have to // //create a getKeyPointsFunction you can call from the c++ side // // double rotation = 0; // double scale = 1; // Mat inputImage = cv::imread("./input/lennaWithGreenDots.jpg"); // for (int j = 0; j < 4; j++){ // for (int i = 0; i<360; i+= 1){ // Mat transformationMartix; // Size newImageSize; // tie(transformationMartix, newImageSize) = calcTransformationMatrix(inputImage.size(), rotation+i, scale*(j+1)); // cout << "Size: " << newImageSize << endl; // cout << "The output mat: " << endl; // cout << Mat(transformationMartix) << endl; // // Mat outputImage(newImageSize.height, newImageSize.width, CV_8UC3, Scalar(0,0,0)); // warpAffine(inputImage, outputImage, formatTransformationMat2(transformationMartix), outputImage.size()); // imshow("output", outputImage); // waitKey(1); // } // } // waitKey(); //// //Then apply it //// //Then get the keypoints for both images //// //Then calc the transformation matrix and project both keypoints to both... //// //Then calc all the stats... } TEST(accuracyTest, basic) { // runTheTest(); } TEST(utilsTest, getTheKeypointsTest) { // Mat inputImage = cv::imread("./input/rick1.jpg"); // auto keypoints = getKeypoints(inputImage); // drawKeypoints(keypoints, inputImage); // cv::imshow("here...", inputImage); // cv::waitKey(); // cout << "Number of keypoints: " << keypoints.size() << endl; } TEST(utilsTest, fullKeypointTest) { // Mat inputImage = cv::imread("./input/rick1.jpg"); // auto keypointsForImageOne = getKeypoints(inputImage); // convertKeypointsVectorToMat } TEST(utilsTest, testingTheConvertingOfKeypoints) { // Mat inputImage = cv::imread("./input/rick1.jpg"); // auto keypointsForImageOne = getKeypoints(inputImage); // Mat res = convertKeypointsVectorToMat(keypointsForImageOne); // //Mat res = cv::Mat::zeros(1,1, 1); // cout << "the resultant matrix" << endl; // cout << res << endl; // applyTransformationMatrixToKeypointVector(keypointsForImageOne, ); } typedef std::unordered_map<Keypoint, Keypoint> MatchingKeypointMap; MatchingKeypointMap getMatchingKeypointsTwoWayMap(vector<Keypoint> image1Keypoints, vector<Keypoint> image2Keypoints, cv::Mat transformationMatFromImage1To2) { MatchingKeypointMap result; vector<Keypoint> oneToTwo = applyTransformationMatrixToKeypointVector(image1Keypoints, transformationMatFromImage1To2); double threshold = 2.0; for (auto keypointFromImage2 : image2Keypoints) { //check the dist for (unsigned int i = 0; i < oneToTwo.size(); i++) { if (getKeypointDistance(keypointFromImage2, oneToTwo[i]) < threshold) { auto keyPointFromImage1 = image1Keypoints[i]; result.insert({keyPointFromImage1, keypointFromImage2}); } } } return result; } typedef std::unordered_map<Triangle, Triangle> MatchingTriangleMap; tuple<vector<Keypoint>, vector<Keypoint>> splitKeypointMap(MatchingKeypointMap map){ vector<Keypoint> kp1; vector<Keypoint> kp2; for (auto entry: map) { kp1.push_back(entry.first); kp2.push_back(entry.second); } return tuple<vector<Keypoint>, vector<Keypoint>>(kp1, kp2); } Triangle convertTriangleUsingMap(Triangle triangle, MatchingKeypointMap oneToTwo) { vector<Keypoint> result; for (auto kp: triangle.toKeypoints()) { result.push_back(oneToTwo[kp]); } return result; } MatchingTriangleMap buildMatchingTriMap(vector<Triangle> tris1, vector<Triangle> tris2, MatchingKeypointMap unorderedMap) { MatchingTriangleMap result; for (auto tri1: tris1) { for (auto tri2: tris2) { Triangle tri1Convert = convertTriangleUsingMap(tri1, unorderedMap); if (tri1Convert == tri2) { result.insert({tri1, tri2}); break; } } } return result; } MatchingTriangleMap getMatchingTriangleTwoWayMap(MatchingKeypointMap oneToTwo) { vector<Keypoint> image1Kp; vector<Keypoint> image2Kp; tie(image1Kp, image2Kp) = splitKeypointMap(oneToTwo); auto tris1 = buildTrianglesFromKeypoints(image1Kp); auto tris2 = buildTrianglesFromKeypoints(image2Kp); return buildMatchingTriMap(tris1, tris2, oneToTwo); } unsigned long xorshf962(void) { //period 2^96-1 unsigned long t; x ^= x << 16; x ^= x >> 5; x ^= x << 1; t = x; x = y; y = z; z = t ^ x ^ y; return z; } void dumpMathcingKeypointTestingInfoToJson(unsigned int count, unsigned int totalCount, unsigned int cout, MatchingKeypointMap map, unsigned int imagesCount, unsigned int image1Count, unsigned int image2Count, unsigned int trianglesTotalCount, unsigned int image2TrianglesTotalCount) { //dump it!!! } TEST(utilsTest, testingTheConvertingOfKeypoints2) { double rotation = 45; double scale = 2; Mat inputImage = cv::imread("./input/lennaWithGreenDots.jpg"); Mat transformationMartix; Size newImageSize; tie(transformationMartix, newImageSize) = calcTransformationMatrix(inputImage.size(), rotation, scale); Mat outputImage(newImageSize.height, newImageSize.width, CV_8UC3, Scalar(0,0,0)); warpAffine(inputImage, outputImage, formatTransformationMat2(transformationMartix), outputImage.size()); auto keypointsImage1 = getKeypoints(inputImage); auto keypointsImage2 = getKeypoints(outputImage); vector<Keypoint> oneToTwo = applyTransformationMatrixToKeypointVector(keypointsImage1, transformationMartix); vector<Keypoint> twoToOne = applyTransformationMatrixToKeypointVector(keypointsImage2, transformationMartix.inv()); drawKeypoints(keypointsImage1, inputImage); drawKeypoints(twoToOne, inputImage, cv::Scalar(0,255,0)); drawKeypoints(keypointsImage2, outputImage); drawKeypoints(oneToTwo, outputImage, cv::Scalar(0,255,0)); auto tempMap = getMatchingKeypointsTwoWayMap(keypointsImage1, keypointsImage2, transformationMartix); cout << "Number of matching keypints: " << tempMap.size() << endl; cout << "Number of keypoints in image1: " << keypointsImage1.size() << " image2: " << keypointsImage2.size() << endl; cout << "average: " << ((keypointsImage1.size() + keypointsImage2.size())/2) << endl; cout << "%average: " << 100.0*(float)(tempMap.size())/(float)((keypointsImage1.size() + keypointsImage2.size())/2) << "%" << endl; for (auto entry : tempMap) { drawSingleKeypoint(entry.first, inputImage, cv::Scalar(255,0,0)); drawSingleKeypoint(entry.second, outputImage, cv::Scalar(255,0,0)); } cv::imwrite("image1_keypoints.jpg", inputImage); cv::imwrite("image2_keypoints.jpg", outputImage); MatchingTriangleMap tempTriangleMap = getMatchingTriangleTwoWayMap(tempMap); auto allTrisImage1 = buildTrianglesFromKeypoints(keypointsImage1); auto allTrisImage2 = buildTrianglesFromKeypoints(keypointsImage2); vector<Keypoint> image1Kp; vector<Keypoint> image2Kp; tie(image1Kp, image2Kp) = splitKeypointMap(tempMap); auto tris1 = buildTrianglesFromKeypoints(image1Kp); auto tris2 = buildTrianglesFromKeypoints(image2Kp); cout << "Number of matching Triangles: " << tempTriangleMap.size() << endl; cout << "Number of Triangles made of matching points in image1: " << tris1.size() << " image2: " << tris2.size() << endl; cout << "average: " << ((tris1.size() + tris2.size())/2) << endl; cout << "%average: " << 100.0*(float)(tempTriangleMap.size())/(float)((tris1.size() + tris2.size())/2) << "%" << endl; cout << "Number of Triangles in image1: " << allTrisImage1.size() << " image2: " << allTrisImage2.size() << endl; cout << "average: " << ((allTrisImage1.size() + allTrisImage2.size())/2) << endl; cout << "%average: " << 100.0*(float)(tempTriangleMap.size())/(float)((allTrisImage1.size() + allTrisImage2.size())/2) << "%" << endl; for (auto entry: tempTriangleMap) { bool setColour = true; int r = (int) xorshf962(); int g = (int) xorshf962(); int b = (int) xorshf962(); cv::Scalar colour(b,g,r); drawSingleTriangleOntoImage(entry.first, inputImage, setColour, colour); drawSingleTriangleOntoImage(entry.second, outputImage, setColour, colour); } //keypoints unsigned int matchingKeypointsCount = tempMap.size(); unsigned int image1KeypointsTotalCount = keypointsImage1.size(); unsigned int image2KeypointsTotalCout = keypointsImage2.size(); auto matchingKeypoints = tempMap; //triangles unsigned int matchingTrianglesInBothImagesCount = tempTriangleMap.size(); unsigned int trianglesMadeOfMatchingKeypointsInImage1Count = tris1.size(); unsigned int trianglesMadeOfMatchingKeypointsInImage2Count = tris2.size(); unsigned int image1TrianglesTotalCount = allTrisImage1.size(); unsigned int image2TrianglesTotalCount = allTrisImage2.size(); auto matchingTriangles = tempTriangleMap; dumpMathcingKeypointTestingInfoToJson(matchingKeypointsCount, image1KeypointsTotalCount, image2KeypointsTotalCout, matchingKeypoints, matchingTrianglesInBothImagesCount, trianglesMadeOfMatchingKeypointsInImage1Count, trianglesMadeOfMatchingKeypointsInImage2Count, image1TrianglesTotalCount, image2TrianglesTotalCount ); cv::imwrite("image1_triangles.jpg", inputImage); cv::imwrite("image2_triangles.jpg", outputImage); // cv::waitKey(); }
{"hexsha": "55a3a8161a62fc4405123851f213b3a66905c20a", "size": 10686, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "c_src/test/fullKeypointAccuracyTest.cpp", "max_stars_repo_name": "pippy360/imageHashTesting", "max_stars_repo_head_hexsha": "230aa5adfda4e984475261e0d33453f4d9644f96", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-10-11T19:37:01.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-11T19:37:01.000Z", "max_issues_repo_path": "c_src/test/fullKeypointAccuracyTest.cpp", "max_issues_repo_name": "pippy360/imageHashTesting", "max_issues_repo_head_hexsha": "230aa5adfda4e984475261e0d33453f4d9644f96", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "c_src/test/fullKeypointAccuracyTest.cpp", "max_forks_repo_name": "pippy360/imageHashTesting", "max_forks_repo_head_hexsha": "230aa5adfda4e984475261e0d33453f4d9644f96", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2018-11-28T01:38:49.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-28T01:38:49.000Z", "avg_line_length": 33.6037735849, "max_line_length": 157, "alphanum_fraction": 0.6673217294, "num_tokens": 2878}
[STATEMENT] lemma lemma_LIMSEQ_powrat_diff_inverse: assumes "1 \<le> a" and "(\<lambda>n. a pow\<^sub>\<rat> (s n))\<longlonglongrightarrow> y" shows "(\<lambda>n. a pow\<^sub>\<rat> (s n - 1/of_nat(Suc n))) \<longlonglongrightarrow> y" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<lambda>n. a pow\<^sub>\<rat> (s n - 1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. (\<lambda>n. a pow\<^sub>\<rat> (s n - 1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y [PROOF STEP] have "(\<lambda>x. a pow\<^sub>\<rat> (1 / rat_of_nat (Suc x))) \<longlonglongrightarrow> 1" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<lambda>x. a pow\<^sub>\<rat> (1 / rat_of_nat (Suc x))) \<longlonglongrightarrow> 1 [PROOF STEP] using assms(1) LIMSEQ_powrat_inverse_of_nat [PROOF STATE] proof (prove) using this: 1 \<le> a 0 < ?a \<Longrightarrow> (\<lambda>n. ?a pow\<^sub>\<rat> inverse (rat_of_nat n)) \<longlonglongrightarrow> 1 goal (1 subgoal): 1. (\<lambda>x. a pow\<^sub>\<rat> (1 / rat_of_nat (Suc x))) \<longlonglongrightarrow> 1 [PROOF STEP] by (auto intro!: LIMSEQ_Suc simp only: divide_inverse mult_1_left) [PROOF STATE] proof (state) this: (\<lambda>x. a pow\<^sub>\<rat> (1 / rat_of_nat (Suc x))) \<longlonglongrightarrow> 1 goal (1 subgoal): 1. (\<lambda>n. a pow\<^sub>\<rat> (s n - 1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y [PROOF STEP] then [PROOF STATE] proof (chain) picking this: (\<lambda>x. a pow\<^sub>\<rat> (1 / rat_of_nat (Suc x))) \<longlonglongrightarrow> 1 [PROOF STEP] have " (\<lambda>n. a pow\<^sub>\<rat> s n / a pow\<^sub>\<rat> (1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y" [PROOF STATE] proof (prove) using this: (\<lambda>x. a pow\<^sub>\<rat> (1 / rat_of_nat (Suc x))) \<longlonglongrightarrow> 1 goal (1 subgoal): 1. (\<lambda>n. a pow\<^sub>\<rat> s n / a pow\<^sub>\<rat> (1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y [PROOF STEP] using assms(2) tendsto_divide [PROOF STATE] proof (prove) using this: (\<lambda>x. a pow\<^sub>\<rat> (1 / rat_of_nat (Suc x))) \<longlonglongrightarrow> 1 (\<lambda>n. a pow\<^sub>\<rat> s n) \<longlonglongrightarrow> y \<lbrakk>(?f \<longlongrightarrow> ?a) ?F; (?g \<longlongrightarrow> ?b) ?F; ?b \<noteq> (0::?'a)\<rbrakk> \<Longrightarrow> ((\<lambda>x. ?f x / ?g x) \<longlongrightarrow> ?a / ?b) ?F goal (1 subgoal): 1. (\<lambda>n. a pow\<^sub>\<rat> s n / a pow\<^sub>\<rat> (1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y [PROOF STEP] by fastforce [PROOF STATE] proof (state) this: (\<lambda>n. a pow\<^sub>\<rat> s n / a pow\<^sub>\<rat> (1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y goal (1 subgoal): 1. (\<lambda>n. a pow\<^sub>\<rat> (s n - 1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y [PROOF STEP] then [PROOF STATE] proof (chain) picking this: (\<lambda>n. a pow\<^sub>\<rat> s n / a pow\<^sub>\<rat> (1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: (\<lambda>n. a pow\<^sub>\<rat> s n / a pow\<^sub>\<rat> (1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y goal (1 subgoal): 1. (\<lambda>n. a pow\<^sub>\<rat> (s n - 1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y [PROOF STEP] using powrat_diff assms(1) [PROOF STATE] proof (prove) using this: (\<lambda>n. a pow\<^sub>\<rat> s n / a pow\<^sub>\<rat> (1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y 0 < ?x \<Longrightarrow> ?x pow\<^sub>\<rat> (?a - ?b) = ?x pow\<^sub>\<rat> ?a / ?x pow\<^sub>\<rat> ?b 1 \<le> a goal (1 subgoal): 1. (\<lambda>n. a pow\<^sub>\<rat> (s n - 1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y [PROOF STEP] by auto [PROOF STATE] proof (state) this: (\<lambda>n. a pow\<^sub>\<rat> (s n - 1 / rat_of_nat (Suc n))) \<longlonglongrightarrow> y goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 1599, "file": "Real_Power_RealPower", "length": 13}
cat("Hello world!")
{"hexsha": "b08f00cde113899c1e40bf9fbdcf9ee34f097ca7", "size": 19, "ext": "r", "lang": "R", "max_stars_repo_path": "hello-world.r", "max_stars_repo_name": "In-All-Programming-Languages/helloWorld-in-all-programming-languages", "max_stars_repo_head_hexsha": "469939963e436635b9556267b9888743835a1914", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-31T21:35:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:35:45.000Z", "max_issues_repo_path": "hello-world.r", "max_issues_repo_name": "In-All-Programming-Languages/helloWorld-in-all-programming-languages", "max_issues_repo_head_hexsha": "469939963e436635b9556267b9888743835a1914", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hello-world.r", "max_forks_repo_name": "In-All-Programming-Languages/helloWorld-in-all-programming-languages", "max_forks_repo_head_hexsha": "469939963e436635b9556267b9888743835a1914", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.0, "max_line_length": 19, "alphanum_fraction": 0.6842105263, "num_tokens": 5}
##################################################################### ##### IMPORT STANDARD MODULES ##################################################################### from __future__ import print_function from ..data import DataBlock from ..preprocess import PreProcess import pandas as pd import numpy as np from sklearn.datasets import load_iris from random import sample ##################################################################### ##### TESTS FOR DATABLOCK ##################################################################### def test_datablock(datablock): assert datablock.train.shape == (150, 5) assert datablock.test.shape == (150, 5) assert datablock.predict.shape == (150, 5) ##################################################################### ##### TESTS FOR PREPROCESS ##################################################################### def test_check_missing_no_missing(datablock): pp = PreProcess(datablock) result = pp.check_missing(printResult=False,returnResult=True) for df,miss in result.items(): print(df,miss) assert miss.sum()==0 def test_check_missing_missing_induced(datablock): df = pd.DataFrame(datablock.train,copy=True) pp = PreProcess(DataBlock(df,df,df,'target')) num_miss=25 for data in pp.datablock.data_present().values(): data.iloc[sample(range(150),num_miss),0] = np.nan result = pp.check_missing(printResult=False,returnResult=True) for key,miss in result.items(): assert miss.sum()==num_miss
{"hexsha": "201c13266070803088b5eee33e10562fdb79a614", "size": 1475, "ext": "py", "lang": "Python", "max_stars_repo_path": "easyML/tests/test_preprocess.py", "max_stars_repo_name": "aarshayj/easyML", "max_stars_repo_head_hexsha": "d65d4776704c4e417374ff8fb0266b066da51757", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2017-01-16T22:02:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-18T10:42:08.000Z", "max_issues_repo_path": "easyML/tests/test_preprocess.py", "max_issues_repo_name": "YoungGod/easyML", "max_issues_repo_head_hexsha": "d65d4776704c4e417374ff8fb0266b066da51757", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "easyML/tests/test_preprocess.py", "max_forks_repo_name": "YoungGod/easyML", "max_forks_repo_head_hexsha": "d65d4776704c4e417374ff8fb0266b066da51757", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2017-01-22T13:15:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-20T20:01:41.000Z", "avg_line_length": 32.0652173913, "max_line_length": 69, "alphanum_fraction": 0.5430508475, "include": true, "reason": "import numpy", "num_tokens": 278}
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import math ################################# # Linear algebra helpers # ################################# def LU_solve(b, LU): """Finds a solution to the linear system Ax=b with A factorized into LU.""" rhs = np.asarray(b) mat = np.asarray(LU) if (mat.shape[0] != mat.shape[1]): raise ValueError('Matrix must be square!') if (mat.shape[1] != rhs.shape[0]): raise ValueError('Matrix and Right-Side must be of same dimension!') size = rhs.shape # Solving Ly=rhs y = np.zeros(size) for i in range(size[0]): sum = 0 for k in range(i): sum += mat[i, k]*y[k] y[i] = rhs[i] - sum # Solving Ux=y x = np.zeros(size) for i in range(size[0]-1, -1, -1): sum = 0 for k in range(i+1, size[0]): sum += mat[i, k] * x[k] x[i] = (y[i] - sum) / mat[i, i] return x def LU_fac(A): """Factorization of A according to A=LU without pivoting. L is an upper-triangular matrix. U is an lower-triangular matrix. LU=L+U-I""" mat = np.asarray(A) if (mat.shape[0] != mat.shape[1]): raise ValueError('Matrix must be square!') n = len(mat[0]) LU = np.zeros((n, n)) # Factorization of mat for i in range(n): # Calculate ith row for j in range(i, n): sum = 0.0 for k in range(i): sum += LU[i, k]*LU[k, j] LU[i, j] = mat[i, j]-sum # Calculate ith column for j in range(i+1, n): sum = 0.0 for k in range(i): sum += LU[j, k]*LU[k, i] LU[j, i] = (mat[j, i]-sum)/LU[i, i] return LU
{"hexsha": "d6eb95a2082d2388999f65e42d1e5c2eedd90e6c", "size": 1755, "ext": "py", "lang": "Python", "max_stars_repo_path": "_algebra.py", "max_stars_repo_name": "NklasF/splcurve", "max_stars_repo_head_hexsha": "09505f8cd1ae97eaa9583cfd19f02b319547341f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "_algebra.py", "max_issues_repo_name": "NklasF/splcurve", "max_issues_repo_head_hexsha": "09505f8cd1ae97eaa9583cfd19f02b319547341f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "_algebra.py", "max_forks_repo_name": "NklasF/splcurve", "max_forks_repo_head_hexsha": "09505f8cd1ae97eaa9583cfd19f02b319547341f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5909090909, "max_line_length": 76, "alphanum_fraction": 0.4849002849, "include": true, "reason": "import numpy", "num_tokens": 514}
#include <boost/mpl/aux_/preprocessed/bcc/list.hpp>
{"hexsha": "2459ea5c1a50ab8d9c6c5f6640aa8e874575f980", "size": 52, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_mpl_aux__preprocessed_bcc_list.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_mpl_aux__preprocessed_bcc_list.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_mpl_aux__preprocessed_bcc_list.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 26.0, "max_line_length": 51, "alphanum_fraction": 0.7884615385, "num_tokens": 15}
import datetime from .data_factory import create_message_lines import pytest from ..parser import Message, Chat import numpy as np # parsing a single line into 3 objects: date, author and message def test_parse_line(): l = '27/04/2021, 08:18 - Kyle: Number gas poor nothing will statement.' # action message_date, message_author, message_content = Message.parse_line(l, day_first=True) # assert assert isinstance(message_date,datetime.datetime) assert len(message_author)>0 assert len(message_content)>0 def test_multiple_parse_line(): # arrange lines = create_message_lines(100) # 100 lines # act for l in lines: time, author, content = Message.parse_line(l, day_first=True) # assert assert isinstance(time,datetime.datetime) assert len(author)>0 assert len(content)>0 def test_is_new_message_happy(): # arr text = r'28/08/2020, 18:33 - John: Maybe going through them might be ok' res = Chat._is_new_message(text) assert res def test_is_new_message_happy_stringified_message(): # arr text = r'2020-08-28 18:33 - John: Maybe going through them might be ok' res = Chat._is_new_message(text) assert res def test_is_new_message_unhappy(): # arr text = r'date missing here - John: Maybe going through them might be ok' res = Chat._is_new_message(text) assert not res def test_build_messages_no_line_breaks(): lines = [ '28/08/2020, 18:33 - John: line 1', '28/08/2020, 18:33 - John: line 2', '28/08/2020, 18:33 - John: line 3', ] c = Chat(lines) assert len(c) == 3 for i, m in enumerate(c.messages): assert m.content == f'line {i+1}' def test_build_messages_with_line_breaks(): lines = [ '28/08/2020, 18:33 - John: line 1', 'continuation of line 1', '28/08/2020, 18:33 - John: line 2', ] c = Chat(lines) assert len(c) == 2 assert c.messages[0].content == 'line 1 continuation of line 1' assert c.messages[1].content == 'line 2' def test_tokenise(): # arrange m = Message( time=datetime.datetime.now(), author='test', content='words to tokenise' ) # act tokens = m.tokenise() # assert assert tokens == ['words','to','tokenise'] def test_tokenise_using_stemmer(): # arrange m = Message( time=datetime.datetime.now(), author='test', content='words to tokenise' ) # act tokens = m.tokenise(stem=True) # assert assert tokens == ['word','to','tokenis'] def test_to_df(): # arrange lines = [ '28/08/2020, 18:33 - John: line 1', '28/08/2020, 18:33 - John: line 2', '28/08/2020, 18:33 - John: line 3', ] c = Chat(lines) # act df = c.df # assert for t in df.time: assert t == datetime.datetime(2020,8,28,18,33) for a in df.author: assert a == 'John' for i, c in enumerate(df.content): assert c == f'line {i+1}' @pytest.fixture def word_list(): return ["beach", "sea", "ice cream", "bat"] def test_list_by_words_expected(word_list): # arrange lines = [ '28/08/2020, 18:33 - John: beach sea', '28/08/2020, 18:34 - Jack: bat', '28/08/2020, 18:35 - John: we beaching today?', ] c = Chat(lines) # act word_counter = c.get_authors_by_words(word_list) # assert assert word_counter['Jack'] == 1 assert word_counter['John'] == 3 def test_list_by_words_one_not_used(word_list): # arrange lines = [ '28/08/2020, 18:33 - John: sea bat', '28/08/2020, 18:34 - Jack: Im well spoken', '28/08/2020, 18:35 - John: we beaching today?', ] c = Chat(lines) # act word_counter = c.get_authors_by_words(word_list) # assert assert word_counter['Jack'] == 0 assert word_counter['John'] == 3 def test_list_by_words_none_present(word_list): # arrange lines = [ '28/08/2020, 18:33 - John: hello jack', '28/08/2020, 18:34 - Jack: Im well spoken', '28/08/2020, 18:35 - John: right you are!', ] c = Chat(lines) # act word_counter = c.get_authors_by_words(word_list) # assert assert word_counter['Jack'] == 0 assert word_counter['John'] == 0 def test_list_by_words_stem_variation(word_list): # arrange lines = [ '28/08/2020, 18:33 - John: beacher beaching beached', '28/08/2020, 18:34 - Jack: sea seas', ] c = Chat(lines) # act word_counter = c.get_authors_by_words(word_list) # assert assert word_counter['John'] == 3 assert word_counter['Jack'] == 2 def test_get_contributors(): # arrange lines = [ '28/08/2020, 18:33 - John: hello jack old pal', '28/08/2020, 18:34 - Jack: sup mate', '28/08/2020, 18:35 - Gerald: what about me', '28/08/2020, 18:35 - John: what about you?', '28/08/2020, 18:35 - John: lols', '28/08/2020, 18:36 - Jack: looool', ] c = Chat(lines) # act counter = c.get_contributions_by_author() # assert assert counter['John'] == 3 assert counter['Jack'] == 2 assert counter['Gerald'] == 1 def test_get_author_total_content_length(): # arrange lines = [ '28/08/2020, 18:33 - John: hello jack old pal', '28/08/2020, 18:34 - Jack: sup mate', '28/08/2020, 18:35 - Gerald: what about me', '28/08/2020, 18:35 - John: what about you?', '28/08/2020, 18:35 - John: lols', '28/08/2020, 18:36 - Jack: looool', ] # act c = Chat(lines) counter = c.get_authors_by_verbosity() #assert assert counter['John'] == 37 assert counter['Jack'] == 14 assert counter['Gerald'] == 13 def test_get_author_average_content_length(): # arrange lines = [ '28/08/2020, 18:33 - John: hello jack old pal', '28/08/2020, 18:34 - Jack: sup mate', '28/08/2020, 18:35 - Gerald: what about me', '28/08/2020, 18:35 - John: what about you?', '28/08/2020, 18:35 - John: lols', '28/08/2020, 18:36 - Jack: looool', ] # act c = Chat(lines) counter = c.get_authors_by_verbosity(aggfunc = np.mean) #assert assert int(counter['John']) == 12 assert int(counter['Jack']) == 7 assert int(counter['Gerald']) == 13 def test_subscript_multiple_lines(): # arrange lines = [ '28/08/2020, 18:33 - John: hello jack old pal', '28/08/2020, 18:34 - Jack: sup mate', '28/08/2020, 18:35 - Gerald: what about me', '28/08/2020, 18:35 - John: what about you?', '28/08/2020, 18:35 - John: lols', '28/08/2020, 18:36 - Jack: looool', ] # act c = Chat(lines) subbed = c[:2] #assert assert isinstance(subbed, Chat) assert len(subbed) == 2 assert subbed.participants == {'John', 'Jack'} def test_subscript_single_value(): # arrange lines = [ '28/08/2020, 18:33 - John: hello jack old pal', '28/08/2020, 18:34 - Jack: sup mate', '28/08/2020, 18:35 - Gerald: what about me', '28/08/2020, 18:35 - John: what about you?', '28/08/2020, 18:35 - John: lols', '28/08/2020, 18:36 - Jack: looool', ] # act c = Chat(lines) subbed = c[1] #assert assert isinstance(subbed, Message) assert subbed.author == 'Jack' assert subbed.content == 'sup mate'
{"hexsha": "f5e0f9a397a00518b55933319b530452e4d49c0e", "size": 7522, "ext": "py", "lang": "Python", "max_stars_repo_path": "wassap/tests/test_parser.py", "max_stars_repo_name": "nicelgueta/whatsapp-utility", "max_stars_repo_head_hexsha": "2568ebe7fcbc071f642631836622f3ba7bdb43ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wassap/tests/test_parser.py", "max_issues_repo_name": "nicelgueta/whatsapp-utility", "max_issues_repo_head_hexsha": "2568ebe7fcbc071f642631836622f3ba7bdb43ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wassap/tests/test_parser.py", "max_forks_repo_name": "nicelgueta/whatsapp-utility", "max_forks_repo_head_hexsha": "2568ebe7fcbc071f642631836622f3ba7bdb43ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.662295082, "max_line_length": 89, "alphanum_fraction": 0.5874767349, "include": true, "reason": "import numpy", "num_tokens": 2353}
# ---------------------------------------------------------------------------- # File name: HolidayFea.py # # Created on: Aug. 11 2020 # # by Julia Hu # # Description: # # 1) This module to ## Add holidays for different sites # # # # ----------------------------------------------------------------------------- #first load in all necessary librares import pandas as pd import numpy as np import datetime from sklearn.base import BaseEstimator, TransformerMixin import holidays en_holidays = holidays.England() ir_holidays = holidays.Ireland() ca_holidays = holidays.Canada() us_holidays = holidays.UnitedStates() class HolidayTranformer(BaseEstimator, TransformerMixin): def __init__(self,holiday=True): self._holiday = holiday def fit(self, X, y=None): return self def transform(self, X): if self._holiday: en_idx = X.query('site_id == 1 or site_id == 5').index ir_idx = X.query('site_id == 12').index ca_idx = X.query('site_id == 7 or site_id == 11').index us_idx = X.query('site_id == 0 or site_id == 2 or site_id == 3 or site_id == 4 or site_id == 6 or site_id == 8 or site_id == 9 or site_id == 10 or site_id == 13 or site_id == 14 or site_id == 15').index X['IsHoliday'] = 0 X.loc[en_idx, 'IsHoliday'] = X.loc[en_idx, 'timestamp'].apply(lambda x: en_holidays.get(x, default=0)) X.loc[ir_idx, 'IsHoliday'] = X.loc[ir_idx, 'timestamp'].apply(lambda x: ir_holidays.get(x, default=0)) X.loc[ca_idx, 'IsHoliday'] = X.loc[ca_idx, 'timestamp'].apply(lambda x: ca_holidays.get(x, default=0)) X.loc[us_idx, 'IsHoliday'] = X.loc[us_idx, 'timestamp'].apply(lambda x: us_holidays.get(x, default=0)) holiday_idx = X['IsHoliday'] != 0 X.loc[holiday_idx, 'IsHoliday'] = 1 X['IsHoliday'] = X['IsHoliday'].astype(np.uint8) X = X.drop(['timestamp'], axis = 1 ) return X
{"hexsha": "c923864c6a88999afc02155f096536c856f37f72", "size": 2024, "ext": "py", "lang": "Python", "max_stars_repo_path": "sagemaker-notebook/HolidayFea.py", "max_stars_repo_name": "Julia-Bobo-Hu/IoTAnalytics-Realtime-Ingestion-Inference", "max_stars_repo_head_hexsha": "401dc9ac7f885a0b9924667a13291500f1159c79", "max_stars_repo_licenses": ["MIT-0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-11T22:39:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-11T22:39:46.000Z", "max_issues_repo_path": "sagemaker-notebook/HolidayFea.py", "max_issues_repo_name": "Julia-Bobo-Hu/IoTAnalytics-Realtime-Ingestion-Inference", "max_issues_repo_head_hexsha": "401dc9ac7f885a0b9924667a13291500f1159c79", "max_issues_repo_licenses": ["MIT-0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sagemaker-notebook/HolidayFea.py", "max_forks_repo_name": "Julia-Bobo-Hu/IoTAnalytics-Realtime-Ingestion-Inference", "max_forks_repo_head_hexsha": "401dc9ac7f885a0b9924667a13291500f1159c79", "max_forks_repo_licenses": ["MIT-0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4814814815, "max_line_length": 214, "alphanum_fraction": 0.5612648221, "include": true, "reason": "import numpy", "num_tokens": 529}
using Printf function output_result(stepnum, Qbase, cellxmax, cellymax, specific_heat_ratio, out_file_front, out_ext, out_dir, Rd, nval) stepnum = string(stepnum) while length(stepnum) < 6 stepnum = "0"*stepnum end fff = out_dir*"/"*out_file_front*stepnum*out_ext open(fff,"w") do f write(f,"result:rho[kg/m^3], u[m/s], v[m/s], p[Pa], massfrac_N2[kg/m^3], massfrac_N[kg/m^3], T[K]\n") for i in 2:cellxmax-1 for j in 2:cellymax-1 for l in 1:nval a = @sprintf("%8.8e", Qbase[i,j,l]) write(f, a*" ") end T = Qbase[i,j,4]/(Qbase[i,j,1]*Rd) a = @sprintf("%8.8e", T) write(f, a*"\n") end end end println("\nwrite "*fff) end function reset_write(fwrite) open( fwrite, "w" ) do f end end
{"hexsha": "4e0592f48cc4a37b3139470b259607c21642164a", "size": 904, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src_c/output.jl", "max_stars_repo_name": "hide-dog/general_2d_NS_Chemical-N2-N-", "max_stars_repo_head_hexsha": "e5e32ce510b91c82ffd216b4f2e5d1f97a624b12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src_c/output.jl", "max_issues_repo_name": "hide-dog/general_2d_NS_Chemical-N2-N-", "max_issues_repo_head_hexsha": "e5e32ce510b91c82ffd216b4f2e5d1f97a624b12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src_c/output.jl", "max_forks_repo_name": "hide-dog/general_2d_NS_Chemical-N2-N-", "max_forks_repo_head_hexsha": "e5e32ce510b91c82ffd216b4f2e5d1f97a624b12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1612903226, "max_line_length": 123, "alphanum_fraction": 0.5121681416, "num_tokens": 292}
\documentclass[5p,authoryear]{elsarticle} \makeatletter \def\ps@pprintTitle{% \let\@oddhead\@empty \let\@evenhead\@empty \let\@evenfoot\@oddfoot} % Supprimer le bas de page ELSEVIER \makeatother \usepackage[utf8]{inputenc} % En unicode \usepackage[T1]{fontenc} \usepackage[english]{babel} \usepackage[babel=true]{csquotes} % permet de faire \enquote{a} (« a ») \usepackage[fleqn]{amsmath} % pour certains signes mathématiques \usepackage{amsthm} % Pour \begin{gather} \usepackage{booktabs} % pour \toprule (un style de tableau) \usepackage{multirow} % Pour colonnes multiples des tableaux \usepackage{amssymb} % Pour \leqslant (<=, >=) \usepackage{float} \usepackage{hyperref} % \usepackage[english]{cleveref} % adding Code Blocking \usepackage{listings} \usepackage{color} \definecolor{dkgreen}{rgb}{0,0.6,0} \definecolor{gray}{rgb}{0.5,0.5,0.5} \definecolor{mauve}{rgb}{0.58,0,0.82} \lstset{frame=tb, language=Java, aboveskip=3mm, belowskip=3mm, showstringspaces=false, columns=flexible, basicstyle={\small\ttfamily}, numbers=none, numberstyle=\tiny\color{gray}, keywordstyle=\color{blue}, commentstyle=\color{dkgreen}, stringstyle=\color{mauve}, breaklines=true, breakatwhitespace=true, tabsize=3 } %\bibliographystyle{elsarticle-num} \bibliographystyle{elsarticle-harv} \usepackage{fancyhdr} \pagestyle{fancy} \lhead{MSDS 458 - SEC 56} \rhead{Lee, J.} \begin{document} \begin{frontmatter} \title{Focused Web Crawler: \\NBA Team Specific News Articles} \author{Jason Lee} \address{Northwestern University, SPS \\Natural Language Processing \\2020SP MSDS 453-56} \begin{abstract} Information is power in the sports betting industry. When teams' news hits the web, betting syndicates need to be able to react with speed before market prices adjust. A web scraper, or spider, can crawl the internet collecting relevant information. The goal of this project is to develop a spider utilizing the Scrapy and Selenium packages in Python to collect National Basketball Association (NBA) team specific news. The spider will crawl from page to page scraping, parsing, and saving the relevant articles. \end{abstract} \begin{keyword} Natural Language Processing (NLP) \sep NBA \sep Sports Betting \sep Web Scraping \sep Python \end{keyword} \end{frontmatter} %\linenumbers \section{Introduction}\label{introduction} The sports betting markets are presumed to be efficient markets by the time the closing price on each game is locked in \citep{market}. Fortunately for professional sports bettors, there is a difference between the opening line and the closing line for each game where there is opportunity to exploit inefficiencies for financial gain. Many of these lines move when new information about a team is released to the public. There is a delay between when critical news comes out and when sportsbooks adjust their lines. Professional sports bettors are able to use the lag in the shifting lines advantageously by reacting the fastest with new information. Professional sports bettors are able to do this by utilizing automated bots or web crawlers, also known as Spiders. These Spiders scour the internet searching for any relevant sporting news that can add value to the sports bettors. This project focuses on building one of these Spiders to crawl the internet searching for news articles relating to any National Basketball Association (NBA) team. The spider will then parse and save important information that can then be used to inform sports betting decisions. This project will be sponsored by A.I. Sports and the final product will be utilized by their company to consult with various betting syndicates \citep{aisports}. This project will also be the starting point for future natural language processing (NLP) projects. The focused web crawlers will collect hundreds of news articles combined together creating a complete corpus. The corpus will be broken down using document vectorization methods to understand relationships between each article in the corpus. There will be document classification models built on the corpus, as well as unsupervised machine learning techniques and multivariate analysis to better understand themes and sentiments of each article. Another future project will consist of developing a document summarization algorithm that can quickly generate accurate summarized outputs for each article. There are several desired outcomes from this first project: 1 – Create a fully functional and focused Python based web crawler. 2 – Understand how to inspect websites to extract the right information. 3 – Create a complete corpus of NBA team news articles for future Natural Language Processing (NLP) projects. \section{Literature Review} \label{lit_rev} The process of extracting and storing information from the internet in an automated fashion is a highly valuable skill that requires a keen attention to detail. Writing flexible code that is able to handle potential errors is crucial for a successfully programmed focused web crawler. Three researches with ties to IBM dedicated time formulating a list of eight pivotal steps when creating a focused web crawler \citep{focused}: \begin{enumerate} \item Canonical Taxonomy Creation \item Example Collection \item Taxonomy Selection and Refinement \item Interactive Exploration \item Classifier Training \item Resource Discovery \item Distiller Development \item Evaluation and Feedback \end{enumerate} \\ The beginning step is obvious, create a topic for the focused web crawler to search. The next step is to collect example websites containing information about the chosen topic. During the example collection process, the following step naturally takes shape as the topic becomes refined. Certain websites and links may be marked as "good" or "bad" during both the example collection and refinement steps \citep{focused}. Once the topic has been narrowed down, an interactive exploration of the websites begins. A key feature to any focused web crawler is a specific starting (or multiple starting points) and a designated stopping point \citep{crawl}. A focused web crawler must have a purpose, which requires the user to perform preparatory work by traversing the internet to find key domains with relevant information. Once these starting domains are selected, the Spiders will be able crawl through each link, moving from webpage to webpage. However, there are many links on relevant websites that might lead to irrelevant material or advertisements. If the web crawler is left on its own, it may end up falling down the endless black hole that is the internet. There are many ways to control the web crawler by setting program hyperparameters. The Spiders can be given strict instruction to remain only on specific domains. The Spiders can also be stopped after a predetermined number of pages deep it moves. Another program hyperparameter is the total number of pages to download, forcing the web crawler to immediately stop when it reaches the maximum limit \citep{crawl}. Step 5 in the IBM researchers list, training the classifier, helps the web crawler remain on a focused path. A classifier is a model that determines how relevant a particular website is to the chosen topic \citep{focused}. The classifier will allow for advanced filtering through websites that would not be feasible manually on larger projects. The classifier training takes into account the websites and domains manually marked as "good" and "bad" from the previous steps. \begin{figure}[!h] \centering \includegraphics[width=3.4in]{figures/focused_webcrawler_diagram.png} \caption[]{Block diagram of the focused crawler showing how the web crawler, classifier, and distiller are integrated.} \label{web_flow} \end{figure} The final steps bring the entire project together. The user discovers the resources that were collected by the focused web crawler as it scrapes the internet. The user will be able to provide feedback that is cycled back into the classifier retraining and improving the outputs. A distillation algorithm can also be run after the focus web crawler has been working for some time. The distiller will be able to find key domains that are known as the authorities on the given topic. Figure \ref{web_flow} visually depicts the eight steps seamlessly flowing together in a diagram that was used by the IBM researchers, with the classifier, distiller, and crawler as the three focal components \citep{focused}. \subsection{Scrapy}\label{Scrapy} Scrapy is a Pyhonic program designed specifically for web crawling and scraping \citep{scrapy}. Figure \ref{Scrapy_Framework} shows how Scrapy behaves, which has some similarities to the IBM researchers' Framework in Figure \ref{web_flow}. The Spiders send the initial request to the Scrapy Engine, where the Engine checks the scheduler before dispatching the Spiders to the internet. The Spiders send their requests to the internet and receive a response. The Spiders have predetermined items they are sent to collect. The Spiders will internally scan the response sending each item it finds through the Item Pipeline where it is stored. The Item Pipeline is where the items can be parsed, transformed, and formatted before they are saved to the desired database or file. \begin{figure}[!htb] \centering \includegraphics[width=3.7in]{figures/Scrapy_Framework.png} \caption[]{Illustration of the Scrapy Framework} \label{Scrapy_Framework} \end{figure} \subsection{Selenium}\label{Selenium} Many newer websites are written with JavaScript and Angular applications embedded into the HTML code to allow the page to operate in a smooth, reactive fashion \citep{angular}. However, while the user experience is enhanced, these websites cause serious issues for most web crawlers as they try to extract the page contents. From the perspective of the Spiders, there are only empty containers where the contents should be because the webpage does not actually queried the server and load. Selenium is a tool that launches a WebDriver with a designated browser to allow the Spider to view a website the same way a user would view it. Figure \ref{Selenium_Framework} shows the Framework that Selenium operates within. There are many different types of browsers that can be used and the HTTP requests to the server and JavaScript code will run within Selenium. This allows the spiders to "click" buttons, scroll, and extract the information on these pages. \begin{figure}[!htb] \centering \includegraphics[width=3.4in]{figures/Selenium_Framework.png} \caption[]{Illustration of the Selenium Framework} \label{Selenium_Framework} \end{figure} \section{Methodology}\label{meth} The methodology implemented during this project is as follows. \subsection{Topic Selection and Refinement}\label{chat} The canonical topic chosen for this project is the National Basketball Association (NBA). More specifically, news articles discussing team match-ups, injuries, and general news that might give insight into how the team will perform in their upcoming games. Gathering relevant examples was an easy task, almost too easy. There are local beat writers, national news coverage, bloggers, and the occasional deep dive investigative reporting from media companies. Unfortunately, the main sports news companies, like ESPN, FOX Sports, Turner, Bleacher Report, etc., do not cover teams equally or fairly. There are 30 NBA teams and each team has dedicated team website on NBA.com. An assumption by using the official team specific websites is that each team's news will be biased towards their own team. This is an acceptable bias for this project because of the consistency across all of the teams. The web crawlers will begin on each team's official NBA website and move from post to post scraping each article. \subsection{Interactive exploration of the web}\label{exploration} \begin{figure}[!htb] \centering \includegraphics[width=3.4in]{figures/Hawks_News.png} \caption[]{Atlanta Hawks News homepage} \label{hawks news} \end{figure} This portion of the project was extremely tedious. There were many issues trying to program the web crawlers. The Atlanta Hawks are the first alphabetical team in the NBA making them the experimental guinea pig. Their web page is shown in Figure \ref{hawks news}. \begin{figure}[!htb] \centering \includegraphics[width=3.0in]{figures/post_title.png} \caption[]{HTML Class = 'post\_\_title' showing where the web crawler can extract the information on the news article's title} \label{Post Title} \end{figure} The first step is to open the developer tools and inspect the raw HTML code searching for the XPATH or CSS class name, or id tag, for the section on the website that will be scraped. Figure \ref{Post Title} shows an example of the article's title class for the web crawler to find. Looking over the website's code and finding the class names, this appears to be a fairly simple and straightforward task for the focused web crawler to complete. \begin{figure}[!htb] \centering \includegraphics[width=3.0in]{figures/post_link.png} \caption[]{The HTML Hyperlink is contained within the 'post\_\_title' div allowing the spider to crawl to the actual new article's page to scrape the entire document} \label{Post Link} \end{figure} However, this did not work at all. In Figure \ref{Post Link}, there is a tag "\_ngcontent" meaning that this information is contained inside an Angular JavaScript application. This issue and coming up with a workable solution dominated the majority of the allotted time to complete this project. Eventually, the Spiders were able to collect the necessary information by using both Selenium and Scrapy together. \section{Computational Experiment} The entire Python code for this project's focused web crawler program will be attached to this paper, or can be reproduced by cloning the project's \href{https://github.com/papagorgio23/NBA_News_Spiders}{Github Repository} at this url:\\ \href{https://github.com/papagorgio23/NBA_News_Spiders}{github.com/papagorgio23/NBA\_News\_Spiders}\\ To run the program, simply type the following line of code into the command line from the "NBA\_News\_Spiders" project home directory: \begin{lstlisting} cd NBA_News python release_spiders.py \end{lstlisting} This script will unleash the spiders onto hundreds of NBA news articles where they can scrape relevant information. This program will output the articles in an itemized JSON line file, where each line in the file contains a single news article and key associated details. Those details include the team, url, date, tags, and the article. \subsection{Release Spiders Python Script}\label{release} The "release\_spiders.py" file is an adapted version of the "run-articles-spider.py" file from the "WebFocusedCrawlWorkV001" directory \citep{sample-code}. The file begins by creating a folder called "nba\_articles" where the downloaded html web pages will be saved by the Spiders as they crawl from site to site. The structure of the directory is then outputted to the terminal ensuring that everything is in order before the spiders are released into the wild. The final lines of code call on Scrapy to launch the focused web crawlers and save the items in a JSON line formatted file. \subsection{Scrapy with Selenium} The workhorse script for this focused web crawler program is found in the spiders folder with the title "articles\_spider.py". The Spider named "ArticlesSpider" is created with instructions to perform two sequential tasks. Typically, when a Scrapy Spider is released, it is given a list of URLs to start their crawl. What is unique about this particular Spider is that the starting URLs is a function call instead of a list of URLs. This function initializes a Selenium webdriver with a headless Chrome browser. Through Selenium, the Spider loops through a list of each team's news website clicking the "load more" button eight times to expand the number of news articles displayed on the web page to a maximum 96 articles. Once the articles are all loaded into the browser, the Spider collects the URLs for every single news article. This complete list with hundreds of URLs from every team is then passed back to the Scrapy Spider and the Selenium browser closes. The Scrapy Spiders will then use this returned list of URLs as starting points to crawl each article, parsing and storing the required information. \subsubsection{Items} The Scrapy items Python script is used for two purposes. First, the items that the Spiders are searching for are defined. The Spiders will collect six important pieces of information from every article: \begin{enumerate} \item team = The name of the NBA team \item url = The URL where the article is found \item tags = The topic tags for the article \item title = The title of the article \item date = The date the article was posted \item article = The complete article \end{enumerate} \\ The second purpose of the items script is to preprocess, or clean, some of the fields before they are stored. The raw date field would return a character string similar to this: "Posted on: Mar 14, 2020" This is not a workable format for a computer to understand. A custom function is written to trim "Posted on: " from the string and then "Mar 14, 2020" is converted into a Python Datetime field. The result of this function is then stored as the date item for each article. The other field that requires preprocessing is the article field. The articles are comprised of many elements, such as links, special characters, and lingering HTML tags. The Spiders also capture leading and trailing text that is not actually apart of the story. The first step cleaning this field comes by utilizing the remove\_tags function from the w3lib.html library. The trailing text always beings with "Copyright", making it a convenient word to split at to remove the irrelevant text. The leading text is consistent across the articles making it simple to remove as well. Finally, paragraph breaks and excess spaces are trimmed and the cleaned article string can be stored. All of these steps can found in the "items.py" script in the NBA\_Scrapy folder. \section{Results} The Scrapy program logs are shown in Figure \ref{Scrapy Results}. The Scrapy spiders crawled 771 links, extracting 768 itemized articles. Unfortunately, there were consistency issues between many of the NBA team's homepages. There were seven NBA teams that returned zero news articles, resulting in a total of 23 NBA teams represented in our corpus. While this is not optimal, it is an acceptable corpus size to meet the requirements. \begin{figure}[!htb] \centering \includegraphics[width=3.0in]{figures/Scrapy_Results.png} \caption[]{Scrapy output logs from the focused web crawler program} \label{Scrapy Results} \end{figure} Roughly 23 minutes elapsed to completely run the focused web crawler program, with the Selenium portion absorbing a disproportionate amount of the time. Selenium takes up so much time because each time a spider lands on a team's homepage, there is a programmed delay to allow the Angular application and JavaScript portions of the page to load. The spider then tries to click eight buttons, waiting a few seconds in between each click for the news articles to populate before the contents can be extracted. Once the Selenium portion finished collecting the 771 usable links to news articles, the Scrapy spiders were able to scrape each article in under two minutes. The Scrapy Spiders saved the itemized JSON file, as well as the full HTML webpage for all 768 articles. \section{Discussion and Conclusions} Potential future work to improve the performance of this focused web crawler and scraper would include adding additional error handling steps to hopefully collect news articles from all 30 teams. There were several NBA teams who have not updated their websites, meaning that they do not follow the same standard design template. Because of the time constraints for this project, there was not enough time to manually investigate the websites of each of the teams that through an error. Fixing this code will allow the spiders to scrape hundreds of additional articles. Another slight programming issue came trying to incorporate the Selenium driver within the Scrapy framework. The initial plan was to use Selenium on each team's website by placing a Selenium driver within the "start\_requests" function in the Spider's Class. However, this generated issues managing the number of drivers opened and transitioning the request from using Selenium on the starting JavaScript team page to using the standard Scrapy request and response on the static article page. The ideal plan would utilize the middlewares.py Scrapy functionality by adding Selenium into the "process\_request" function (Step 4 in the diagram in Figure \ref{Scrapy_Framework}). Ultimately, the attempts to implement this failed under the given time frame but future work should make this adjustment. In conclusion, this project was able to accomplish each of the goals originally laid out. A fully automated and narrowly focused web crawler was built in Python utilizing both Scrapy and Selenium that A.I. Sports will be able to use in the future. There has been a deeper understanding of website structure and tools need to extract the right information over the life cycle of the project. And finally, an organized corpus of NBA team news articles has been created for future natural language processing (NLP) projects. \clearpage \bibliography{bibliographie.bib} \bibliographystyle{newapa} \end{document}
{"hexsha": "bc8789bb70ec48dcd8baff842fb9d7656d47869f", "size": 21752, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Academic Paper/main.tex", "max_stars_repo_name": "papagorgio23/NBA_News_Spiders", "max_stars_repo_head_hexsha": "ca5c12bf50e1a8b422b0afc315a6b61ba3b67588", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-07-20T22:10:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T22:04:37.000Z", "max_issues_repo_path": "Academic Paper/main.tex", "max_issues_repo_name": "papagorgio23/NBA_News_Spiders", "max_issues_repo_head_hexsha": "ca5c12bf50e1a8b422b0afc315a6b61ba3b67588", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Academic Paper/main.tex", "max_forks_repo_name": "papagorgio23/NBA_News_Spiders", "max_forks_repo_head_hexsha": "ca5c12bf50e1a8b422b0afc315a6b61ba3b67588", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 71.7887788779, "max_line_length": 884, "alphanum_fraction": 0.7990989334, "num_tokens": 4751}
# -*- coding: utf-8 -*- """inter_annotator_agreement and data composition.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/19rbuLH97L8OesYayqaCXrrE0Y0I_ndTJ This pipeline extracts annotated entities and labels from training data for every annotator and preprocesses data for calculating inter-annotator agreement and calculates entity composition for each file. """ from google.colab import drive drive.mount('/content/drive') #Download packages !python3 -m spacy download en !python3 -m spacy download en_core_web_sm #Import libraries import json import os import glob import spacy import random import en_core_web_sm from spacy import displacy from pathlib import Path """Compute length and number of words for every file in training, testing and gold datasets""" #Define the source folder import collections training_txt_path='/content/drive/MyDrive/Bushire-GT-compound/training_data_txt/' testing_txt_path='/content/drive/MyDrive/Bushire-GT-compound/inputs/' gold_txt_path='/content/drive/MyDrive/Bushire-GT-compound/gold_data/gold_inputs/' def compute_lengths(path,input_lengths): new_path=path+"*.txt" #Load all json files from the source folder and append them to the dictionary for f in glob.glob(new_path): try: infile=open(f,encoding='utf8') #extract name of the file name=infile.name.split("/")[-1] print(name) index=name.find(".txt") name=name[:index] #read file text=infile.read() except UnicodeDecodeError: infile=open(f,encoding='windows-1252') #extract name of the file name=infile.name.split("/")[-1] index=name.find(".txt") name=name[:index] #read file text=infile.read() text=text.replace("\n", " ") text=text.replace("\'", " ") words = text.split() input_lengths[name]=(len(text),len(words)) #the dictionary consists of a tuple where first value = text length and second value=number of words gold_txt_lengths=collections.defaultdict(int) compute_lengths(gold_txt_path,gold_txt_lengths) training_txt_lengths=collections.defaultdict(int) compute_lengths(training_txt_path,training_txt_lengths) testing_txt_lengths=collections.defaultdict(int) compute_lengths(testing_txt_path,testing_txt_lengths) training_txt_lengths testing_txt_lengths gold_txt_lengths def print_lengths(dict_path,string): total_length=0 total_symbols=0 for filename, tuple in dict_path.items(): total_length+=tuple[1] total_symbols+=tuple[0] print(string+" data: number of words: "+str(total_length)+" and number of symbols: "+str(total_symbols)) print_lengths(gold_txt_lengths,"gold") print_lengths(training_txt_lengths,"training") """Load training data from every annotator and store them in a dictionary""" #Define the source folder source_path='/content/drive/MyDrive/Bushire-GT-compound/gold_data/gold_inputs/' #source_path='/content/drive/MyDrive/Bushire-GT-compound/training_data_json/' #Load all json files from the source folder and append them to the dictionary train_files = {} #-- name stores file name and value stores full json content train_annotations={} #-- name stores file name and value stores json content - only annotation section annotated_r={} annotated_d={} annotated_s={} for f in glob.glob(source_path+"*.json"): with open(f) as infile: name=infile.name.split("/")[-1] index=name.find(".json") name=name[:index] new_index=name.find("-") file_content=json.load(infile) if new_index!=-1: char=name[-1] if char=="r": annotated_r[name]=file_content['annotations'] elif char=="s": annotated_s[name]=file_content['annotations'] elif char=="d": annotated_d[name]=file_content['annotations'] train_files[name]=file_content['annotations'] #Store annotation section of each json file in a separate array #if 'annotations' in train_files[name]: train_annotations[name]=file_content['annotations'] print(annotated_d) annotated_r """Extract entity and label (as a tuple) for every text file from every annotator and calculated Named Entity composition for every file""" from collections import defaultdict total_labels=defaultdict(int) total_text_labels={} def populate(annotated_diction,total_labels,total_text_labels): all_files_entities=defaultdict(list) for filename in annotated_diction.keys(): orig_filename=filename content=annotated_diction[filename] index=filename.find("-") filename=filename[:index] all_entities=[] for content_file in content: annotation=content_file[0] #annotation=annotation.replace("\'", "") #annotation=annotation.replace("'I", "I") entities=content_file[1]['entities'] if entities: for ent in entities: start=ent[0] end=ent[1] label=ent[2] #increment number of labels total_labels[label]+=1 #increment number of labels per textfile if orig_filename not in total_text_labels: total_text_labels[orig_filename]={} if label in total_text_labels[orig_filename]: total_text_labels[orig_filename][label]+=1 else: total_text_labels[orig_filename][label]=1 entity_content=annotation[start:end] tuple=(entity_content,label) all_entities.append(tuple) if all_entities: all_files_entities[filename].append(all_entities) return all_files_entities r_entities=populate(annotated_r,total_labels,total_text_labels) s_entities=populate(annotated_s,total_labels,total_text_labels) d_entities=populate(annotated_d,total_labels,total_text_labels) s_entities['xaa'] total_label_count=0 for label,count in total_labels.items(): total_label_count+=count print(label+" "+str(count)) print(total_label_count) print(len(total_labels)) total_label_count=collections.defaultdict(int) for filename in total_text_labels: labels=total_text_labels[filename] total=0 for key in labels: total+=labels[key] total_label_count[filename]+=total print(total_label_count) """Write every annotated file to the drive """ #Format: every line contains a Named Entity and label path="/content/drive/MyDrive/Bushire-GT-compound/gold_data/gold_annotator_output/" def write_to_file(entity_dict,path,ext): for filename in entity_dict: out=entity_dict[filename][0] filepath=path+filename+ext+".txt" f = open((filepath), "w") for tuple in out: f.write(str(tuple[0])+" "+str(tuple[1])) f.write("\n") f.close() write_to_file(r_entities,path,"-r") write_to_file(d_entities,path,"-d") write_to_file(s_entities,path,"-s") """Evaluate annotator's output and model output""" model_output_path="/content/drive/MyDrive/Bushire-GT-compound/gold_data/gold_outputs/" annotator_output_path="/content/drive/MyDrive/Bushire-GT-compound/gold_data/gold_annotator_output/" for f in glob.glob(annotator_output_path+"*.txt"): try: infile=open(f,encoding='utf8') #read file text=infile.read() except UnicodeDecodeError: infile=open(f,encoding='windows-1252') #read file text=infile.read() #extract name of the file name=infile.name.split("/")[-1] index=name.find(".txt") name=name[:index] new_index=name.find("-") orig_name=name[:new_index] text_arr=text.split("\n") for pair in text_arr: ent_arr=pair.split(" ") label=ent_arr[-1] entity=" ".join(ent_arr[0:len(ent_arr)-1]) print(entity) #Credits: https://towardsdatascience.com/inter-annotator-agreement-2f46c6d37bf3 def cohen_kappa(ann1, ann2): """Computes Cohen kappa for pair-wise annotators. :param ann1: annotations provided by first annotator :type ann1: list :param ann2: annotations provided by second annotator :type ann2: list :rtype: float :return: Cohen kappa statistic """ count = 0 for an1, an2 in zip(ann1, ann2): if an1 == an2: count += 1 A = count / len(ann1) # observed agreement A (Po) uniq = set(ann1 + ann2) E = 0 # expected agreement E (Pe) for item in uniq: cnt1 = ann1.count(item) cnt2 = ann2.count(item) count = ((cnt1 / len(ann1)) * (cnt2 / len(ann2))) E += count return round((A - E) / (1 - E), 4) import numpy as np #Credits: https://www.datacamp.com/community/tutorials/fuzzy-string-python def levenshtein_ratio_and_distance(s, t, ratio_calc = False): """ levenshtein_ratio_and_distance: Calculates levenshtein distance between two strings. If ratio_calc = True, the function computes the levenshtein distance ratio of similarity between two strings For all i and j, distance[i,j] will contain the Levenshtein distance between the first i characters of s and the first j characters of t """ # Initialize matrix of zeros rows = len(s)+1 cols = len(t)+1 distance = np.zeros((rows,cols),dtype = int) # Populate matrix of zeros with the indeces of each character of both strings for i in range(1, rows): for k in range(1,cols): distance[i][0] = i distance[0][k] = k # Iterate over the matrix to compute the cost of deletions,insertions and/or substitutions for col in range(1, cols): for row in range(1, rows): if s[row-1] == t[col-1]: cost = 0 # If the characters are the same in the two strings in a given position [i,j] then the cost is 0 else: # In order to align the results with those of the Python Levenshtein package, if we choose to calculate the ratio # the cost of a substitution is 2. If we calculate just distance, then the cost of a substitution is 1. if ratio_calc == True: cost = 2 else: cost = 1 distance[row][col] = min(distance[row-1][col] + 1, # Cost of deletions distance[row][col-1] + 1, # Cost of insertions distance[row-1][col-1] + cost) # Cost of substitutions if ratio_calc == True: # Computation of the Levenshtein Distance Ratio Ratio = ((len(s)+len(t)) - distance[row][col]) / (len(s)+len(t)) return Ratio else: # print(distance) # Uncomment if you want to see the matrix showing how the algorithm computes the cost of deletions, # insertions and/or substitutions # This is the minimum number of edits needed to convert string a to string b return ("The strings are {} edits away".format(distance[row][col]),distance[row][col]) """Find approximately matching strings to solve the incorrect tokenization problem""" #we find at least 40% matching strings using the levenstein ratio formula def matching_named_entities(annot1,annot2,matching,non_matching,annotations1,annotations2): for filename in annot1: if filename in annot2: #print(filename) content1=annot1[filename][0] content2=annot2[filename][0] #print(len(content1)) for tuple1 in content1: text1=tuple1[0] label1=tuple1[1] for tuple2 in content2: text2=tuple2[0] label2=tuple2[1] dist=levenshtein_ratio_and_distance(text1,text2)[1] len1=len(text1) len2=len(text2) if dist<(0.7*min(len1,len2)): annotations1.add(text1) annotations2.add(text2) if label1==label2: matching[label1].add((text1,text2)) else: if (label1,label2) in non_matching: non_matching[(label1,label2)].add((text1,text2)) elif (label2,label1) in non_matching: non_matching[(label2,label1)].add((text1,text2)) else: non_matching[(label1,label2)].add((text1,text2)) annotations1=set() annotations2=set() matching=defaultdict(set) non_matching=defaultdict(set) matching_named_entities(r_entities,s_entities,matching,non_matching,annotations1,annotations2) matching """Evaluation""" train_annotations['xaa'] r_entities['xaa']
{"hexsha": "aa3d17f37069bb4ecc8bdee08e4e997f4f1cd199", "size": 12484, "ext": "py", "lang": "Python", "max_stars_repo_path": "pipelines/ner_spacy_final/inter_annotator_agreement_and_data_composition.py", "max_stars_repo_name": "almazhankapan/opengulf.github.io", "max_stars_repo_head_hexsha": "70771e84025a11a6afc39a4d50695db83b548638", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pipelines/ner_spacy_final/inter_annotator_agreement_and_data_composition.py", "max_issues_repo_name": "almazhankapan/opengulf.github.io", "max_issues_repo_head_hexsha": "70771e84025a11a6afc39a4d50695db83b548638", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pipelines/ner_spacy_final/inter_annotator_agreement_and_data_composition.py", "max_forks_repo_name": "almazhankapan/opengulf.github.io", "max_forks_repo_head_hexsha": "70771e84025a11a6afc39a4d50695db83b548638", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8735632184, "max_line_length": 204, "alphanum_fraction": 0.6763857738, "include": true, "reason": "import numpy", "num_tokens": 2972}
#include <iostream> #include <unistd.h> // Les lignes suivantes ne servent qu'à vérifier que la compilation avec SFML fonctionne #include <SFML/Graphics.hpp> void testSFML() { sf::Texture texture; } // Fin test SFML #include <state.h> #include <render.h> #include <engine.h> #include <ai.h> #include <client.h> #include <iostream> #include <sstream> #include <map> #include <cstdio> #include <functional> #include <boost/function.hpp> using namespace std; using namespace state; using namespace render; using namespace engine; using namespace client; int main(int argc,char* argv[]) { std::map<std::string,ICommand_Client*> MethodMap; MethodMap["hello"] = (ICommand_Client*)new Command_Client_Hello(); MethodMap["render"] = (ICommand_Client*)new Command_Client_Render(); MethodMap["random_ai"] = (ICommand_Client*)new Command_Client_RAI(); MethodMap["player_vs_rai"] = (ICommand_Client*)new Command_Client_PAI(); MethodMap["engine"] = (ICommand_Client*)new Command_Client_Engine(); MethodMap["heuristic_ai"] = (ICommand_Client*)new Command_Client_HAI(); MethodMap["player_vs_hai"] = (ICommand_Client*)new Command_Client_PHAI(); MethodMap["hai_vs_rai"] = (ICommand_Client*)new Command_Client_RHAI(); MethodMap["help"] = (ICommand_Client*)new Command_Client_Autre(); MethodMap["rollback"] = (ICommand_Client*)new Command_Client_Rollback(); MethodMap["play"] = (ICommand_Client*)new Command_Client_Play(); MethodMap["deep_ai"] = (ICommand_Client*)new Command_Client_DAI(); MethodMap["thread"] = (ICommand_Client*)new Command_Client_Thread(); MethodMap["network"] = (ICommand_Client*)new Command_Client_Network(); MethodMap["player_vs_player"] = (ICommand_Client*)new Command_Client_PvP(); if(argc==2) { if(MethodMap.find(argv[1])==MethodMap.end()) { cout << "Cette commande est inexistante" <<endl; MethodMap["help"]->execute(); }else { MethodMap[string(argv[1])]->execute(); } }else{ cout << "Give me something processable a.k.a hello/state/render/engine/random_ai/player_vs_rai/play/player_vs_player" << endl; } return 0; }
{"hexsha": "e8e4c9cbd8e8620faf4c9709fe6e4ef10c3b2ca6", "size": 2189, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/client/main.cpp", "max_stars_repo_name": "Kuga23/Projet-M2", "max_stars_repo_head_hexsha": "85c879b8fd1ed4fdf89eedd9f89841cbd7a1e433", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/client/main.cpp", "max_issues_repo_name": "Kuga23/Projet-M2", "max_issues_repo_head_hexsha": "85c879b8fd1ed4fdf89eedd9f89841cbd7a1e433", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/client/main.cpp", "max_forks_repo_name": "Kuga23/Projet-M2", "max_forks_repo_head_hexsha": "85c879b8fd1ed4fdf89eedd9f89841cbd7a1e433", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7246376812, "max_line_length": 134, "alphanum_fraction": 0.7030607583, "num_tokens": 537}
"""Provides analysis of a probe set for targeted genomes. This computes the number of bp across each target genome that the probes cover, as well as the percentage of each target genome that the probes cover. It computes a percentage against the full length of the target genome, as well as a percentage against the length of the target genome when only counting unambiguous bases (non-'N' bases). It also computes the average coverage that the probes yield across each target genome: again, one value against the full target genome and one against just the unambiguous bases. A few notes about the percentage of bases covered (in each note, assume that we specified a desired coverage of 100% when designing the probes, though this could be generalized to any desired coverage): - The percentage of bases covered in the full target genome (including ambiguous bases) may be less than 100%. This is expected if the target genome contains ambiguous bases ('N') because these are not covered. However, if this coverage analysis is run on probes to which adapters have been added, it may also be less than 100% even if the target genome contains no ambiguous bases. The reason is that, when this finds the ranges covered by each probe, it includes the adapters; some number of mismatches may be "used up" by the adapter, leaving fewer mismatches at the end of the true probe for aligning to the sequence, and meaning that the bases at the end of the true probe fail to cover bases of the target genome that they were originally intended to cover. For example, suppose we have a probe "ATCG" which was intended to cover the sequence "ATCC" in a target genome. Now suppose that (2-base) adapters are added to the probe, making it "GGATCGCC" (left adapter is 'GG' and right adapter is 'CC'). Suppose that the region in the target genome around "ATCC" is "GTATCCCC". And suppose we design the probes allowing for one mismatch. The one mismatch is "used up" by the second base on the left adapter ('G' in the adapter mismatches with 'T' in the target genome), so the base 'C' at the end of "ATCC" is not covered. Running the coverage analysis before adding adapters to the probes should alleviate this, making the percentage 100% as desired. - The percentage of bases covered in the target genome, when only counting unambiguous bases, may be less than 100% when adapters have been added to probes. The reason is the same as above. - The percentage of bases covered in the target genome, when only counting unambiguous bases, may be less than 100% even when adapters have not been added to probes. This can occur if there are bases in between two stretches of ambiguous bases ('N'). It may not be possible to cover the (unambiguous) bases in between these two stretches using a probe, so these bases then go uncovered. For example, consider the region 'NNNNNNATCGNNNNNN'; the four unambiguous bases ('ATCG') may not be covered by any probe. - The percentage of bases covered in the target genome, when only counting unambiguous bases, may be more than 100%. This could happen when allowing mismatches. A probe could cover an ambiguous base (i.e., 'N') of a target genome by simply using a mismatch when aligning to that base. Then, the probes collectively cover more bases than there are unambiguous bases in the target genome, making the percentage more than 100%. """ from collections import Counter from collections import defaultdict import logging import numpy as np from catch import probe from catch.utils import dynamic_load from catch.utils import interval from catch.utils import pretty_print __author__ = 'Hayden Metsky <hayden@mit.edu>' logger = logging.getLogger(__name__) class Analyzer: """Methods for testing quality control of a probe set. """ def __init__(self, probes, mismatches, lcf_thres, target_genomes, target_genomes_names=None, island_of_exact_match=0, custom_cover_range_fn=None, cover_extension=0, kmer_probe_map_k=10, rc_too=True): """ Args: probes: collection of instances of probe.Probe that form a complete probe set mismatches/lcf_thres: consider a probe to hybridize to a sequence if a stretch of 'lcf_thres' or more bp aligns with 'mismatches' or fewer mismatched bp; used to compute whether a probe "covers" a portion of a sequence target_genomes: list [g_1, g_2, ..., g_m] of m groupings of genomes, where each g_i is a list of genome.Genomes belonging to group i. For example, a group may be a species and each g_i would be a list of the target genomes of species i. target_genomes_names: list [s_1, s_2, ..., s_m] of strings where the name of the i'th genome grouping (from target_genomes) is s_i. When None, the name of the i'th grouping is "Group i". island_of_exact_match: for a probe to hybridize to a sequence, require that there be an exact match of length at least 'island_of_exact_match' custom_cover_range_fn: if set, tuple (path, fn) where path gives a path to a Python module and fn gives the name of a function in that module. This function is dynamically loaded and used to determine whether a probe will hybridize to a region of target sequence (and what portion will hybridize). The function must accept the same arguments as the function returned by probe.probe_covers_sequence_by_longest_common_substring() and return the same value. When set, the parameters 'mismatches', 'lcf_thres', and 'island_of_exact_match' are ignored (even if their values are default values) because they are only used in the default cover_range_fn. cover_extension: number of bp by which to extend the coverage on each side of a probe; a probe "covers" the portion of the sequence that it hybridizes to, as well as 'cover_extension' bp on each side of that portion kmer_probe_map_k: in calls to probe.construct_kmer_probe_map..., uses this value as min_k and k rc_too: when True, analyze all the target genomes in target_genomes, as well as their reverse complements (when False, do not analyze reverse complements) """ self.probes = probes self.target_genomes = target_genomes if target_genomes_names: if len(target_genomes_names) != len(target_genomes): raise ValueError(("Number of target genome names must be same " "as the number of target genomes")) self.target_genomes_names = target_genomes_names else: self.target_genomes_names = ["Group %d" % i for i in range(len(target_genomes))] if custom_cover_range_fn is not None: # Use a custom function to determine whether a probe hybridizes # to a region of target sequence (and what part hybridizes), # rather than the default model. Ignore the given values for # mismatches and lcf_thres (which may be default values) because # these are only relevant for the default model self.mismatches, self.lcf_thres = None, None # Dynamically load the function fn_path, fn_name = custom_cover_range_fn self.cover_range_fn = dynamic_load.load_function_from_path( fn_path, fn_name) else: self.mismatches = mismatches self.lcf_thres = lcf_thres # Construct a function using the default model of hybridization self.cover_range_fn = \ probe.probe_covers_sequence_by_longest_common_substring( mismatches, lcf_thres, island_of_exact_match) self.cover_extension = cover_extension self.kmer_probe_map_k = kmer_probe_map_k self.rc_too = rc_too def _iter_target_genomes(self): """Yield target genomes across groupings to iterate over. Yields: i, j, gnm, rc - i is the index of a target genome grouping - j is the index of a genome in grouping i - gnm is an instance of genome.Genome corresponding to to genome j in grouping i - if self.rc_too is True, rc cycles through False and True so that an iterator can take the reverse complement of gnm's sequences; otherwise, rc is only False """ for i, genomes_from_group in enumerate(self.target_genomes): for j, gnm in enumerate(genomes_from_group): yield i, j, gnm, False if self.rc_too: yield i, j, gnm, True def _find_covers_in_target_genomes(self): """Find intervals across the target genomes covered by the probe set. This considers the given probe set (self.probes) and determines the intervals, in each genome of the target genomes (as well as their reverse complements), that are covered by the probes. This saves a dict, self.target_covers, as follows: self.target_covers[i][j][b] is a list of all the intervals covered by the probes in the target genome j of grouping i (in the reverse complement of the genome if b is True, and provided sequence if b is False). The endpoints of the intervals are offset so as to give unique integer positions in the genome (e.g., endpoints in the second chromosome are offset based on the length of the first chromosome). There may be duplicate intervals if two probes cover the same region of a sequence. This also counts, for each probe, the number of sequences that it maps to (not counting reverse complements). """ logger.info("Finding probe covers across target genomes") logger.info("Building map from k-mers to probes") # Note that if adapters are added to the probes before this filter # is run (which would be typical), then self.lcf_thres will likely # be less than the probe length. So the k-mer to probe map will # be constructed using the random approach (yielding many k-mers # and thus a slower runtime in finding probe covers) rather than # the pigeonhole approach. kmer_probe_map = probe.SharedKmerProbeMap.construct( probe.construct_kmer_probe_map_to_find_probe_covers( self.probes, self.mismatches, self.lcf_thres, min_k=self.kmer_probe_map_k, k=self.kmer_probe_map_k) ) probe.open_probe_finding_pool(kmer_probe_map, self.cover_range_fn) self.target_covers = {} self.probe_map_counts = Counter() for i, j, gnm, rc in self._iter_target_genomes(): if not rc: logger.info(("Computing coverage in grouping %d (of %d), " "with target genome %d (of %d)"), i + 1, len(self.target_genomes), j + 1, len(self.target_genomes[i])) if i not in self.target_covers: self.target_covers[i] = {} if j not in self.target_covers[i]: self.target_covers[i][j] = {False: None, True: None} gnm_covers = [] length_so_far = 0 for sequence in gnm.seqs: if rc: # Take the reverse complement of sequence rc_map = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'} sequence = ''.join([rc_map.get(b, b) for b in sequence[::-1]]) # Find cover ranges of the probes, while allowing the ranges # to overlap (e.g., if one probe covers two regions that # overlap) probe_cover_ranges = probe.find_probe_covers_in_sequence( sequence, merge_overlapping=False) for p, cover_ranges in probe_cover_ranges.items(): if not rc: self.probe_map_counts[p] += 1 for cover_range in cover_ranges: # Extend the range covered by probe p on both sides # by self.cover_extension cover_start = max(0, cover_range[0] - self.cover_extension) cover_end = min(len(sequence), cover_range[1] + self.cover_extension) # The endpoints of the cover give positions in just # this sequence (chromosome), so adjust them (according # to length_so_far) to give a unique integer position # in the genome gnm adjusted_cover = (cover_start + length_so_far, cover_end + length_so_far) gnm_covers += [adjusted_cover] length_so_far += len(sequence) self.target_covers[i][j][rc] = gnm_covers probe.close_probe_finding_pool() def _compute_bp_covered_in_target_genomes(self): """Count number of bp covered by probes in each target genome. self._find_covers_in_target_genomes() must be called prior to this, so that self.target_covers can be accessed. This saves a dict, self.bp_covered, as follows: self.bp_covered[i][j][b] gives the number of bp covered by the probes in genome j of target genome grouping i (in the reverse complement of j if b is True, and in the provided sequence if b if False). """ logger.info("Computing bases covered across target genomes") self.bp_covered = {} for i, j, gnm, rc in self._iter_target_genomes(): if i not in self.bp_covered: self.bp_covered[i] = {} if j not in self.bp_covered[i]: self.bp_covered[i][j] = {False: None, True: None} covers = self.target_covers[i][j][rc] # Make an IntervalSet out of all covers to merge overlapping # ones and make it easy to count the number of bp covered covers_set = interval.IntervalSet(covers) self.bp_covered[i][j][rc] = len(covers_set) def _compute_average_coverage_in_target_genomes(self): """Calculate the average coverage/depth in each target genome. self._find_covers_in_target_genomes() must be called prior to this, so that self.target_covers can be accessed. This saves a dict, self.average_coverage, as follows: self.average_coverage[i][j][b] gives the average coverage/depth provided by the probes in genome j of target genome grouping i (in the reverse complement of j if b is True, and in the provided sequence if b is False). Specifically, the value is the average, taken across all bases, of the number of probes that hybridize to a region that includes a given base. """ logger.info("Computing average coverage across target genomes") self.average_coverage = {} for i, j, gnm, rc in self._iter_target_genomes(): if i not in self.average_coverage: self.average_coverage[i] = {} if j not in self.average_coverage[i]: self.average_coverage[i][j] = {False: None, True: None} covers = self.target_covers[i][j][rc] # Count the total number of bases covered by all the probe # hybridizations # (covers may include duplicates if two probes hybridize to # the same region, so it is important not to convert probes # to an IntervalSet or merge its intervals) total_covered = sum(c[1] - c[0] for c in covers) # Divide by the genome length to average across bases # (do this including ambiguous bases ('N') and not including them) avg_covg_over_all = float(total_covered) / gnm.size(False) avg_covg_over_unambig = float(total_covered) / gnm.size(True) self.average_coverage[i][j][rc] = (avg_covg_over_all, avg_covg_over_unambig) def _compute_sliding_coverage_in_target_genomes(self, window_length, window_stride): """Calculate the coverage over a sliding window in each target genome. self._find_covers_in_target_genomes() must be called prior to this, so that self.target_covers can be accessed. This saves a dict, self.sliding_coverage, as follows: self.sliding_coverage[i][j][b] gives a dict D provided by the probes in genome j of target genome grouping i (in the reverse complement of j if b is True, and in the provided sequence if b is False). D is a dict mapping a position p of a target genome to the average coverage/depth provided by the probes in a window centered at p. The value is the average, taken across the bases in the window, of the number of probes that hybridize to a region that includes each base in the window. This includes ambiguous bases in the sliding windows. """ logger.info("Computing sliding coverage across target genomes") self.sliding_coverage = {} for i, j, gnm, rc in self._iter_target_genomes(): if i not in self.sliding_coverage: self.sliding_coverage[i] = {} if j not in self.sliding_coverage[i]: self.sliding_coverage[i][j] = {False: None, True: None} covers = self.target_covers[i][j][rc] # Build a dict mapping endpoints to a list of False/True # values, where False indicates the endpoint is an end of # a probe and True indicates the endpoint is a start of a # probe endpoints = defaultdict(list) for c in covers: endpoints[c[0]].append(True) endpoints[c[1]].append(False) # Store, at each base in gnm, the number of probes that cover # a region including the base probe_counts = np.zeros(gnm.size(False), dtype='uint16') curr_count = 0 prev_endpoint = None for endpoint in sorted(endpoints.keys()): if prev_endpoint != None: # Update number of probes between prev_endpoint and # endpoint for pos in range(prev_endpoint, endpoint): probe_counts[pos] = curr_count # Compute the net change in number of probe covers at # this endpoint, which will last until the next endpoint endpoint_types = endpoints[endpoint] net_change = sum([1 if t == True else -1 for t in endpoint_types]) curr_count += net_change prev_endpoint = endpoint # Don't bother updating counts between prev_endpoint and the end of # the genome because all probes should have been ended, so # curr_count should equal 0 # (also, if probes cover the very end of the genome, prev_endpoint # might equal the end of the genome) # Slide over windows and fill in the dict gnm_sliding_coverage gnm_sliding_coverage = {} for window_start in np.arange(0, gnm.size(False), window_stride): window_end = window_start + window_length if window_end > gnm.size(False): # This window stretches past the end of the genome, so # make a new one that goes up to the very end window_end = gnm.size(False) window_start = window_end - window_length middle = window_start + (window_length / 2) window_average_count = np.average( probe_counts[window_start:window_end]) gnm_sliding_coverage[middle] = window_average_count self.sliding_coverage[i][j][rc] = gnm_sliding_coverage def run(self, window_length=50, window_stride=25): """Run all analysis methods. The methods called save their output to self. Args: window_length: number of bp in a window (for use in computing coverage over sliding windows) window_stride: number of bp by which to step (for use in computing coverage over sliding windows) """ self._find_covers_in_target_genomes() self._compute_bp_covered_in_target_genomes() self._compute_average_coverage_in_target_genomes() self._compute_sliding_coverage_in_target_genomes( window_length, window_stride) def write_data_matrix_as_tsv(self, fn): """Write 2D array representing results as a TSV file. Args: fn: path to file to write to """ # Make row headers data = [["Genome", "Num bases covered", "Frac bases covered", "Frac bases covered over unambig", "Average coverage/depth", "Average coverage/depth over unambig"]] # Create a row for every genome, including reverse complements for i, j, gnm, rc in self._iter_target_genomes(): col_header = "%s, genome %d" % (self.target_genomes_names[i], j) if rc: col_header += " (rc)" bp_covered = self.bp_covered[i][j][rc] frac_covered_all = float(bp_covered) / gnm.size(False) frac_covered_unambig = float(bp_covered) / gnm.size(True) avg_covg_all, avg_covg_unambig = self.average_coverage[i][j][rc] row = [col_header, bp_covered, frac_covered_all, frac_covered_unambig, avg_covg_all, avg_covg_unambig] data += [row] # Write to fn as a TSV with open(fn, 'w') as f: for row in data: line = '\t'.join([str(entry) for entry in row]) f.write(line + '\n') def _make_data_matrix_string(self): """Return 2D array representing results (as strings) to output. Returns: 2D array, with row and column headers, containing data to output as a table """ # Make row headers data = [["Genome", "Num bases covered\n[over unambig]", "Average coverage/depth\n[over unambig]"]] # Create a row for every genome, including reverse complements for i, j, gnm, rc in self._iter_target_genomes(): col_header = "%s, genome %d" % (self.target_genomes_names[i], j) if rc: col_header += " (rc)" # Format bp covered bp_covered = self.bp_covered[i][j][rc] frac_covered_all = float(bp_covered) / gnm.size(False) frac_covered_unambig = float(bp_covered) / gnm.size(True) if frac_covered_all < 0.0001: prct_covered_all_str = "<0.01%" else: prct_covered_all_str = "{0:.2%}".format(frac_covered_all) if frac_covered_unambig < 0.0001: prct_covered_unambig_str = "<0.01%" else: prct_covered_unambig_str = "{0:.2%}".format(frac_covered_unambig) bp_covered_str = "%d (%s) [%s]" % (bp_covered, prct_covered_all_str, prct_covered_unambig_str) # Format average covered avg_covg_all, avg_covg_unambig = self.average_coverage[i][j][rc] if avg_covg_all < 0.01: avg_covg_all_str = "<0.01" else: avg_covg_all_str = "{0:.2f}".format(avg_covg_all) if avg_covg_unambig < 0.01: avg_covg_unambig_str = "<0.01" else: avg_covg_unambig_str = "{0:.2f}".format(avg_covg_unambig) avg_covg_str = "%s [%s]" % (avg_covg_all_str, avg_covg_unambig_str) row = [col_header, bp_covered_str, avg_covg_str] data += [row] return data def print_analysis(self): """Print the number of probes and a table of results of the analysis. """ print("NUMBER OF PROBES: %d" % len(self.probes)) print() print(pretty_print.table(self._make_data_matrix_string(), ["left", "right", "right"], header_underline=True)) def write_sliding_window_coverage(self, fn): """Write coverage in sliding windows to a file. Args: fn: path to file to write to """ with open(fn, 'w') as f: # Create an entry for every genome, including reverse complements for i, j, gnm, rc in self._iter_target_genomes(): header = "%s, genome %d" % (self.target_genomes_names[i], j) if rc: header += " (rc)" gnm_sliding_coverage = self.sliding_coverage[i][j][rc] for pos in sorted(gnm_sliding_coverage.keys()): covg = gnm_sliding_coverage[pos] line = '\t'.join([str(x) for x in [header, pos, covg]]) f.write(line + '\n') def write_probe_map_counts(self, fn): """Write number of sequences mapped by each probe to a file. Args: fn: path to file to write to """ with open(fn, 'w') as f: header = ["Probe identifier", "Probe sequence", "Number sequences mapped to"] f.write('\t'.join(header) + '\n') # Create an row for every probe for p, count in self.probe_map_counts.items(): row = [p.identifier(), p.seq_str, count] line = '\t'.join([str(x) for x in row]) f.write(line + '\n')
{"hexsha": "3b4e6b4a2709ee6c4006fc76dbd41fc6bda4b9f6", "size": 27168, "ext": "py", "lang": "Python", "max_stars_repo_path": "catch/coverage_analysis.py", "max_stars_repo_name": "broadinstitute/catch", "max_stars_repo_head_hexsha": "2fedca15f921116f580de8b2ae7ac9972932e59e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 58, "max_stars_repo_stars_event_min_datetime": "2018-01-24T16:31:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T07:46:35.000Z", "max_issues_repo_path": "catch/coverage_analysis.py", "max_issues_repo_name": "broadinstitute/catch", "max_issues_repo_head_hexsha": "2fedca15f921116f580de8b2ae7ac9972932e59e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2018-04-17T17:36:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-25T11:48:58.000Z", "max_forks_repo_path": "catch/coverage_analysis.py", "max_forks_repo_name": "broadinstitute/catch", "max_forks_repo_head_hexsha": "2fedca15f921116f580de8b2ae7ac9972932e59e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2018-05-23T12:19:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-09T04:16:00.000Z", "avg_line_length": 47.7469244288, "max_line_length": 81, "alphanum_fraction": 0.6009643698, "include": true, "reason": "import numpy", "num_tokens": 5868}
from abc import ABCMeta, abstractmethod from types import FunctionType from tqdm import tqdm from torch.utils.data import random_split import traceback import shutil from typing import Union from jdit.dataset import DataLoadersFactory from jdit.model import Model from jdit.optimizer import Optimizer import torch import torchvision.transforms as transforms from torchvision.utils import make_grid from torch.utils.tensorboard import SummaryWriter import os import random import csv import numpy as np from functools import wraps class SupTrainer(object): """this is a super class of all trainers It defines: * The basic tools, ``Performance()``, ``Watcher()``, ``Loger()``. * The basic loop of epochs. * Learning rate decay and model check point. """ __metaclass__ = ABCMeta def __new__(cls, *args, **kwargs): instance = super(SupTrainer, cls).__new__(cls) instance._opts = dict() instance._datasets = dict() instance._models = dict() return instance def __init__(self, nepochs: int, logdir: str, gpu_ids_abs: Union[list, tuple] = ()): # os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in gpu_ids_abs]) # self.gpu_ids = [i for i in range(len(gpu_ids_abs))] self.gpu_ids = gpu_ids_abs self.logdir = logdir self.performance = Performance(gpu_ids_abs) self.watcher = Watcher(logdir) self.loger = Loger(logdir) self.use_gpu = True if (len(self.gpu_ids) > 0) and torch.cuda.is_available() else False self.device = torch.device("cuda:%d" % self.gpu_ids[0]) if self.use_gpu else torch.device("cpu") self.input = torch.Tensor() self.ground_truth = torch.Tensor() self.nepochs = nepochs self.current_epoch = 0 self.step = 0 self.start_epoch = 1 def train(self, process_bar_header: str = None, process_bar_position: int = None, subbar_disable=False, record_configs=True, show_network=False, **kwargs): """The main training loop of epochs. :param process_bar_header: The tag name of process bar header, which is used in ``tqdm(desc=process_bar_header)`` :param process_bar_position: The process bar's position. It is useful in multitask, which is used in ``tqdm(position=process_bar_position)`` :param subbar_disable: If show the info of every training set, :param record_configs: If record the training processing data. :param show_network: If show the structure of network. It will cost extra memory, :param kwargs: Any other parameters that passing to ``tqdm()`` to control the behavior of process bar. """ if record_configs: self._record_configs() if show_network: self.plot_graphs_lazy() for epoch in tqdm(range(self.start_epoch, self.nepochs + 1), total=self.nepochs, unit="epoch", desc=process_bar_header, position=process_bar_position, **kwargs): self.current_epoch = epoch self.train_epoch(subbar_disable) self.valid_epoch() self.test() self.watcher.close() def dist_train(self, process_bar_header: str = None, process_bar_position: int = None, subbar_disable=False, record_configs=True, show_network=False, **kwargs): """The main training loop of epochs. :param process_bar_header: The tag name of process bar header, which is used in ``tqdm(desc=process_bar_header)`` :param process_bar_position: The process bar's position. It is useful in multitask, which is used in ``tqdm(position=process_bar_position)`` :param subbar_disable: If show the info of every training set, :param record_configs: If record the training processing data. :param show_network: If show the structure of network. It will cost extra memory, :param kwargs: Any other parameters that passing to ``tqdm()`` to control the behavior of process bar. """ if record_configs: self._record_configs() if show_network: self.plot_graphs_lazy() for epoch in tqdm(range(self.start_epoch, self.nepochs + 1), total=self.nepochs, unit="epoch", desc=process_bar_header, position=process_bar_position, **kwargs): self._datasets["datasets"].loader_train.sampler.set_epoch(epoch) self.current_epoch = epoch self.train_epoch(subbar_disable) self.valid_epoch() self.test() self.watcher.close() def __setattr__(self, key, value): super(SupTrainer, self).__setattr__(key, value) if key == "step" and value != 0: is_change = super(SupTrainer, self).__getattribute__("_change_lr")("step", value) if is_change: super(SupTrainer, self).__getattribute__("_record_configs")("optimizer") elif key == "current_epoch" and value != 0: is_change_lr = super(SupTrainer, self).__getattribute__("_change_lr")("epoch", value) if is_change_lr: super(SupTrainer, self).__getattribute__("_record_configs")("optimizer") super(SupTrainer, self).__getattribute__("_check_point")() super(SupTrainer, self).__getattribute__("_record_configs")("performance") elif isinstance(value, Model): super(SupTrainer, self).__getattribute__("_models").update({key: value}) elif isinstance(value, Optimizer): super(SupTrainer, self).__getattribute__("_opts").update({key: value}) elif isinstance(value, DataLoadersFactory): super(SupTrainer, self).__getattribute__("_datasets").update({key: value}) else: pass def __delattr__(self, item): if isinstance(item, Model): super(SupTrainer, self).__getattribute__("_models").pop(item) elif isinstance(item, Optimizer): super(SupTrainer, self).__getattribute__("_opts").pop(item) elif isinstance(item, DataLoadersFactory): super(SupTrainer, self).__getattribute__("_datasets").pop(item) def __getattribute__(self, name): v = super(SupTrainer, self).__getattribute__(name) if name == "get_data_from_batch": new_fc = super(SupTrainer, self).__getattribute__("_mv_device")(v) return new_fc return v def debug(self): """Debug the trainer. It will check the function * ``self._record_configs()`` save all module's configures. * ``self.train_epoch()`` train one epoch with several samples. So, it is vary fast. * ``self.valid_epoch()`` valid one epoch using dataset_valid. * ``self._change_lr()`` do learning rate change. * ``self._check_point()`` do model check point. * ``self.test()`` do test by using dataset_test. Before debug, it will reset the ``datasets`` and only pick up several samples to do fast test. For test, it build a ``log_debug`` directory to save the log. :return: bool. It will return ``True``, if passes all the tests. """ self.watcher.close() self.logdir = "log_debug" # reset `log_debug` if os.path.exists(self.logdir): try: shutil.rmtree("log_debug") # 递归删除文件夹 except Exception as e: print('Can not remove logdir `log_debug`\n', e) traceback.print_exc() self.watcher = Watcher(self.logdir) self.loger = Loger(self.logdir) self.performance = Performance() # reset datasets and dataloaders for item in vars(self).values(): if isinstance(item, DataLoadersFactory): item.batch_size = 2 item.shuffle = False item.num_workers = 2 item.dataset_train, _ = random_split(item.dataset_train, [2, len(item.dataset_train) - 2]) item.dataset_valid, _ = random_split(item.dataset_valid, [2, len(item.dataset_valid) - 2]) item.dataset_test, _ = random_split(item.dataset_test, [2, len(item.dataset_test) - 2]) item.build_loaders() item.sample_dataset_size = 1 print("datas range: (%s, %s)" % (item.samples_train[0].min().cpu().numpy(), item.samples_train[0].max().cpu().numpy())) if isinstance(item, Model): item.check_point_pos = 2 if isinstance(item, Optimizer): item.decay_position = 2 item.position_type = "step" # the tested functions debug_fcs = [self._record_configs, self.train_epoch, self.valid_epoch, self._change_lr, self._check_point, self.test] print("{:=^30}".format(">Debug<")) success = True for fc in debug_fcs: print("{:_^30}".format(fc.__name__ + "()")) try: if fc.__name__ == "_change_lr": self.step = 2 is_lr_change = fc() if not is_lr_change: raise AssertionError("doesn't change learning rate!") elif fc.__name__ == "_check_point": self.current_epoch = 2 fc() else: fc() except Exception as e: print('Error:', e) traceback.print_exc() success = False else: print("pass!") self.watcher.close() if success: print("\033[1;32;40m" + "{:=^30}".format(">Debug Successful!<")) else: print("\033[1;31;40m" + "{:=^30}".format(">Debug Failed!<")) if os.path.exists(self.logdir): try: shutil.rmtree("log_debug") # 递归删除文件夹 except Exception as e: print('Can not remove logdir `log_debug`\n', e) traceback.print_exc() return success @abstractmethod def train_epoch(self, subbar_disable=False): """ You get train loader and do a loop to deal with data. .. Caution:: You must record your training step on ``self.step`` in your loop by doing things like this ``self.step += 1``. Example:: for iteration, batch in tqdm(enumerate(self.datasets.loader_train, 1)): self.step += 1 self.input_cpu, self.ground_truth_cpu = self.get_data_from_batch(batch, self.device) self._train_iteration(self.opt, self.compute_loss, tag="Train") :return: """ pass def _mv_device(self, f): @wraps(f) def wrapper(*args, **kwargs): variables = f(*args, **kwargs) device = super(SupTrainer, self).__getattribute__("device") variables = tuple(v.to(device) if hasattr(v, "to") else v for v in variables) return variables return wrapper def get_data_from_batch(self, batch_data: list, device: torch.device): """ Split your data from one batch data to specify . If your dataset return something like ``return input_data, label``. It means that two values need unpack. So, you need to split the batch data into two parts, like this ``input, ground_truth = batch_data[0], batch_data[1]`` .. Caution:: Don't forget to move these data to device, by using ``input.to(device)`` . :param batch_data: One batch data from dataloader. :param device: the device that data will be located. :return: The certain variable with correct device location. Example:: # load and unzip the data from one batch tuple (input, ground_truth) input, ground_truth = batch_data[0], batch_data[1] # move these data to device return input.to(device), ground_truth.to(device) """ input_img, ground_truth = batch_data[0], batch_data[1] return input_img, ground_truth def _train_iteration(self, opt: Optimizer, compute_loss_fc: FunctionType, csv_filename: str = "Train"): opt.zero_grad() loss, var_dic = compute_loss_fc() loss.backward() opt.step() self.watcher.scalars(var_dict=var_dic, global_step=self.step, tag="Train") opt_name = list(self._opts.keys())[list(self._opts.values()).index(opt)] self.watcher.scalars(var_dict={"Learning rate": opt.lr}, global_step=self.step, tag=opt_name) self.loger.write(self.step, self.current_epoch, var_dic, csv_filename, header=self.step <= 1) def _record_configs(self, configs_names=None): """to register the ``Model`` , ``Optimizer`` , ``Trainer`` and ``Performance`` config info. The default is record the info of ``trainer`` and ``performance`` config. If you want to record more configures info, you can add more module to ``self.loger.regist_config`` . The following is an example. Example:: # for opt.configure self.loger.regist_config(opt, self.current_epoch) # for model.configure self.loger.regist_config(model, self.current_epoch ) # for self.performance.configure self.loger.regist_config(self.performance, self.current_epoch) # for trainer.configure self.loger.regist_config(self, self.current_epoch) :return: """ if (configs_names is None) or "model" in configs_names: _models = super(SupTrainer, self).__getattribute__("_models") for name, model in _models.items(): self.loger.regist_config(model, self.current_epoch, self.step, config_filename=name) if (configs_names is None) or "dataset" in configs_names: _datasets = super(SupTrainer, self).__getattribute__("_datasets") for name, dataset in _datasets.items(): self.loger.regist_config(dataset, config_filename=name) if (configs_names is None) or "optimizer" in configs_names: _opts = super(SupTrainer, self).__getattribute__("_opts") for name, opt in _opts.items(): self.loger.regist_config(opt, self.current_epoch, self.step, config_filename=name) if (configs_names is None) or "trainer" in configs_names or (configs_names is None): self.loger.regist_config(self, config_filename=self.__class__.__name__) if (configs_names is None) or "performance" in configs_names: self.loger.regist_config(self.performance, self.current_epoch, self.step, config_filename="performance") def plot_graphs_lazy(self): """Plot model graph on tensorboard. To plot all models graphs in trainer, by using variable name as model name. :return: """ _models = super(SupTrainer, self).__getattribute__("_models") for name, model in _models.items(): self.watcher.graph_lazy(model, name) def _check_point(self): _models = super(SupTrainer, self).__getattribute__("_models") current_epoch = super(SupTrainer, self).__getattribute__("current_epoch") logdir = super(SupTrainer, self).__getattribute__("logdir") for name, model in _models.items(): model.is_checkpoint(name, current_epoch, logdir) def _change_lr(self, position_type="step", position=2): is_change = True _opts = super(SupTrainer, self).__getattribute__("_opts") for opt in _opts.values(): if opt.position_type == position_type: reset_lr = opt.is_reset_lr(position) if reset_lr: opt.do_lr_decay(reset_lr=reset_lr) elif opt.is_decay_lr(position): opt.do_lr_decay() else: is_change = False return is_change def valid_epoch(self): pass def test(self): pass @property def configure(self): config_dict = dict() config_dict["nepochs"] = int(self.nepochs) return config_dict class Performance(object): """this is a performance watcher. """ def __init__(self, gpu_ids_abs: Union[list, tuple] = ()): self.config_dic = dict() self.gpu_ids = gpu_ids_abs def mem_info(self): from psutil import virtual_memory mem = virtual_memory() self.config_dic['mem_total_GB'] = round(mem.total / 1024 ** 3, 2) self.config_dic['mem_used_GB'] = round(mem.used / 1024 ** 3, 2) self.config_dic['mem_percent'] = mem.percent # self.config_dic['mem_free_GB'] = round(mem.free // 1024 ** 3, 2) # self._set_dict_smooth("mem_total_M", mem.total // 1024 ** 2, smooth=0.3) # self._set_dict_smooth("mem_used_M", mem.used // 1024 ** 2, smooth=0.3) # self._set_dict_smooth("mem_free_M", mem.free // 1024 ** 2, smooth=0.3) # self._set_dict_smooth("mem_percent", mem.percent, smooth=0.3) def gpu_info(self): # pip install nvidia-ml-py3 if len(self.gpu_ids) >= 0 and torch.cuda.is_available(): try: import pynvml pynvml.nvmlInit() self.config_dic['gpu_driver_version'] = pynvml.nvmlSystemGetDriverVersion() for gpu_id in self.gpu_ids: handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id) gpu_id_name = "gpu%s" % gpu_id mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle) gpu_utilize = pynvml.nvmlDeviceGetUtilizationRates(handle) self.config_dic['%s_device_name' % gpu_id_name] = pynvml.nvmlDeviceGetName(handle) self.config_dic['%s_mem_total' % gpu_id_name] = gpu_mem_total = round(mem_info.total / 1024 ** 3, 2) self.config_dic['%s_mem_used' % gpu_id_name] = gpu_mem_used = round(mem_info.used / 1024 ** 3, 2) # self.config_dic['%s_mem_free' % gpu_id_name] = gpu_mem_free = mem_info.free // 1024 ** 2 self.config_dic['%s_mem_percent' % gpu_id_name] = round((gpu_mem_used / gpu_mem_total) * 100, 1) self._set_dict_smooth('%s_utilize_gpu' % gpu_id_name, gpu_utilize.gpu, 0.8) # self.config_dic['%s_utilize_gpu' % gpu_id_name] = gpu_utilize.gpu # self.config_dic['%s_utilize_memory' % gpu_id_name] = gpu_utilize.memory pynvml.nvmlShutdown() except Exception as e: print(e) def _set_dict_smooth(self, key: str, value, smooth: float = 0.3): now = value if key in self.config_dic: last = self.config_dic[key] self.config_dic[key] = now * (1 - smooth) + last * smooth else: self.config_dic[key] = now @property def configure(self): self.mem_info() self.gpu_info() self.gpu_info() return self.config_dic class Loger(object): """this is a log recorder. """ def __init__(self, logdir: str = "log"): self.logdir = logdir self.regist_dict = dict({}) self._build_dir() def _build_dir(self): if not os.path.exists(self.logdir): print("%s directory is not found. Build now!" % dir) os.makedirs(self.logdir) def regist_config(self, opt_model_data: Union[SupTrainer, Optimizer, Model, DataLoadersFactory, Performance], epoch=None, step=None, config_filename: str = None): """ get obj's configure. flag is time point, usually use `epoch`. obj_name default is 'opt_model_data' class name. If you pass two same class boj, you should give each of them a unique `obj_name` :param opt_model_data: Optm, Model or dataset :param epoch: time point such as `epoch` :param flag_name: name of flag `epoch` :param config_filename: default is 'opt_model_data' class name :return: """ if config_filename is None: config_filename = opt_model_data.__class__.__name__ obj_config_dic = opt_model_data.configure.copy() path = os.path.join(self.logdir, config_filename + ".csv") is_registed = config_filename in self.regist_dict.keys() if not is_registed: # 若没有注册过,注册该config self.regist_dict[config_filename] = obj_config_dic.copy() config_dic = dict() if step is not None: config_dic.update({"step": step}) if epoch is not None: config_dic.update({"epoch": epoch}) config_dic.update(obj_config_dic) # pdg = pd.DataFrame.from_dict(config_dic, orient="index").transpose() # pdg.to_csv(path, mode="w", encoding="utf-8", index=False, header=True) with open(path, "w", newline = "", encoding="utf-8") as csvfile: writer = csv.writer(csvfile) # 先写入columns_name writer.writerow(config_dic.keys()) # 写入多行用writerows writer.writerow(config_dic.values()) else: # 已经注册过config last_config = self.regist_dict[config_filename] if last_config != obj_config_dic: # 若已经注册过config,比对最后一次结果,如果不同,则写入,相同无操作。 self.regist_dict[config_filename] = obj_config_dic.copy() config_dic = dict() if step is not None: config_dic.update({"step": step}) if epoch is not None: config_dic.update({"epoch": epoch}) config_dic.update(obj_config_dic) # pdg = pd.DataFrame.from_dict(config_dic, orient="index").transpose() # pdg.to_csv(path, mode="a", encoding="utf-8", index=False, header=False) with open(path, "a",newline = "", encoding="utf-8") as csvfile: writer = csv.writer(csvfile) # 先写入columns_name # writer.writerow(config_dic.keys()) # 写入多行用writerows writer.writerow(config_dic.values()) def write(self, step: int, current_epoch: int, msg_dic: dict, filename: str, header=True): if msg_dic is None: return else: for key, value in msg_dic.items(): if hasattr(value, "item"): msg_dic[key] = value.detach().cpu().item() path = os.path.join(self.logdir, filename + ".csv") dic = dict({"step": step, "current_epoch": current_epoch}) dic.update(msg_dic) # pdg = pd.DataFrame.from_dict(dic, orient="index").transpose() # pdg.to_csv(path, mode="a", encoding="utf-8", index=False, header=header) with open(path, "a", newline = "", encoding="utf-8") as csvfile: writer = csv.writer(csvfile) if header: writer.writerow(dic.keys()) writer.writerow(dic.values()) def clear_regist(self): self.regist_dict = dict({}) class Watcher(object): """this is a params and images watcher """ def __init__(self, logdir: str): self.logdir = logdir self.writer = SummaryWriter(logdir) self._build_dir(logdir) self.training_progress_images = [] self.gif_duration = 0.5 self.handel = None def model_params(self, model: torch.nn.Module, global_step: int): for name, param in model.named_parameters(): if "bias" in name: continue self.writer.add_histogram(name, param.clone().cpu().data.numpy(), global_step) def scalars(self, var_dict: dict, global_step: int, tag="Train"): for key, scalar in var_dict.items(): self.writer.add_scalars(key, {tag: scalar}, global_step) @staticmethod def _sample(tensor: torch.Tensor, num_samples: int, shuffle=True): total = len(tensor) if num_samples > total: raise ValueError("sample(%d) greater than the total amount(%d)!" % (num_samples, len(tensor))) if shuffle: rand_index = random.sample(list(range(total)), num_samples) sampled_tensor: torch.Tensor = tensor[rand_index] else: sampled_tensor: torch.Tensor = tensor[:num_samples] return sampled_tensor def image(self, img_tensors: torch.Tensor, global_step: int, tag: str = "Train/input", grid_size: Union[list, tuple] = (3, 1), shuffle=True, save_file=False): if len(img_tensors.size()) != 4: raise TypeError("img_tensors rank should be 4, got %d instead" % len(img_tensors.size())) self._build_dir(os.path.join(self.logdir, "plots", tag)) rows, columns = grid_size[0], grid_size[1] batch_size = len(img_tensors) # img_tensors =>(batchsize, 3, 256, 256) num_samples: int = min(batch_size, rows * columns) sampled_tensor = self._sample(img_tensors, num_samples, shuffle).detach().cpu() # (sample_num, 3, 32,32) tensors # sampled_images = map(transforms.Normalize(mean, std), sampled_tensor) # (sample_num, 3, 32,32) images sampled_images: torch.Tensor = make_grid(sampled_tensor, nrow=rows, normalize=True, scale_each=True) self.writer.add_image(tag, sampled_images, global_step) if save_file: img = transforms.ToPILImage()(sampled_images) filename = "%s/plots/%s/E%03d.png" % (self.logdir, tag, global_step) img.save(filename) def embedding(self, data: torch.Tensor, label_img: torch.Tensor = None, label=None, global_step: int = None, tag: str = "embedding"): """ Show PCA, t-SNE of `mat` on tensorboard :param data: An img tensor with shape of (N, C, H, W) :param label_img: Label img on each data point. :param label: Label of each img. It will convert to str. :param global_step: Img step label. :param tag: Tag of this plot. """ features = data.view(len(data), -1) self.writer.add_embedding(features, metadata=label, label_img=label_img, global_step=global_step, tag=tag) def set_training_progress_images(self, img_tensors: torch.Tensor, grid_size: Union[list, tuple] = (3, 1)): if len(img_tensors.size()) != 4: raise ValueError("img_tensors rank should be 4, got %d instead" % len(img_tensors.size())) rows, columns = grid_size[0], grid_size[1] batch_size = len(img_tensors) # img_tensors =>(batchsize, 3, 256, 256) num_samples = min(batch_size, rows * columns) sampled_tensor = self._sample(img_tensors, num_samples, False).detach().cpu() # (sample_num, 3, 32,32) tensors sampled_images = make_grid(sampled_tensor, nrow=rows, normalize=True, scale_each=True) img_grid = np.transpose(sampled_images.numpy(), (1, 2, 0)) self.training_progress_images.append(img_grid) def save_in_gif(self): import imageio import warnings filename = "%s/plots/training.gif" % self.logdir with warnings.catch_warnings(): warnings.simplefilter("ignore") imageio.mimsave(filename, self.training_progress_images, duration=self.gif_duration) self.training_progress_images = None def graph(self, model: Union[torch.nn.Module, torch.nn.DataParallel, Model], name: str, use_gpu: bool, *input_shape): if isinstance(model, torch.nn.Module): proto_model: torch.nn.Module = model num_params: int = self._count_params(proto_model) elif isinstance(model, torch.nn.DataParallel): proto_model: torch.nn.Module = model.module num_params: int = self._count_params(proto_model) elif isinstance(model, Model): proto_model: torch.nn.Module = model.model num_params: int = model.num_params else: raise TypeError("Only `nn.Module`, `nn.DataParallel` and `Model` can be passed!") model_logdir = os.path.join(self.logdir, name) self._build_dir(model_logdir) writer_for_model = SummaryWriter(log_dir=model_logdir) input_list = tuple(torch.ones(shape).cuda() if use_gpu else torch.ones(shape) for shape in input_shape) self.scalars({'ParamsNum': num_params}, 0, tag="ParamsNum") self.scalars({'ParamsNum': num_params}, 1, tag="ParamsNum") proto_model(*input_list) writer_for_model.add_graph(proto_model, input_list) writer_for_model.close() def graph_lazy(self, model: Union[torch.nn.Module, torch.nn.DataParallel, Model], name: str): if isinstance(model, torch.nn.Module): proto_model: torch.nn.Module = model num_params: int = self._count_params(proto_model) elif isinstance(model, torch.nn.DataParallel): proto_model: torch.nn.Module = model.module num_params: int = self._count_params(proto_model) elif isinstance(model, Model): proto_model: torch.nn.Module = model.model num_params: int = model.num_params else: raise TypeError("Only `nn.Module`, `nn.DataParallel` and `Model` can be passed!, got %s instead" % model) model_logdir = os.path.join(self.logdir, name) self._build_dir(model_logdir) self.scalars({'ParamsNum': num_params}, 0, tag=name) self.scalars({'ParamsNum': num_params}, 1, tag=name) def hook(model, layer_input, layer_output): writer_for_model = SummaryWriter(log_dir=model_logdir) input_for_test = tuple(i[0].detach().clone().unsqueeze(0) for i in layer_input) handel.remove() if isinstance(proto_model, torch.nn.DataParallel): writer_for_model.add_graph(proto_model.module, input_for_test) else: writer_for_model.add_graph(proto_model, input_for_test) writer_for_model.close() del writer_for_model handel = model.register_forward_hook(hook=hook) def close(self): # self.writer.export_scalars_to_json("%s/scalers.json" % self.logdir) if self.training_progress_images: self.save_in_gif() self.writer.close() @staticmethod def _count_params(proto_model: torch.nn.Module): """count the total parameters of model. :param proto_model: pytorch module :return: number of parameters """ num_params = 0 for param in proto_model.parameters(): num_params += param.numel() return num_params @staticmethod def _build_dir(dirs: str): if not os.path.exists(dirs): os.makedirs(dirs) if __name__ == '__main__': import torch.nn as nn test_log = Loger('log') test_model = nn.Linear(10, 1) test_opt = Optimizer(test_model.parameters(), "Adam", lr_decay=2, decay_position=[1, 3]) test_log.regist_config(test_opt, epoch=1) test_opt.do_lr_decay() test_log.regist_config(test_opt, epoch=2) test_log.regist_config(test_opt, epoch=3) test_log.regist_config(test_opt)
{"hexsha": "128b50f9e2eb95fe62d8e168e07de1640d571f19", "size": 31540, "ext": "py", "lang": "Python", "max_stars_repo_path": "jdit/trainer/super.py", "max_stars_repo_name": "dingguanglei/jdit", "max_stars_repo_head_hexsha": "ef878e696c9e2fad5069f106496289d4e4cc6154", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2019-06-18T15:56:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T13:11:13.000Z", "max_issues_repo_path": "jdit/trainer/super.py", "max_issues_repo_name": "dingguanglei/jdit", "max_issues_repo_head_hexsha": "ef878e696c9e2fad5069f106496289d4e4cc6154", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-10-24T01:09:56.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-08T07:13:48.000Z", "max_forks_repo_path": "jdit/trainer/super.py", "max_forks_repo_name": "dingguanglei/jdit", "max_forks_repo_head_hexsha": "ef878e696c9e2fad5069f106496289d4e4cc6154", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-01-11T01:12:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-12T10:15:43.000Z", "avg_line_length": 43.146374829, "max_line_length": 120, "alphanum_fraction": 0.6135066582, "include": true, "reason": "import numpy", "num_tokens": 7229}
using RetroSignalModel using RetroSignalModel: RtgMTK using Documenter makedocs(; modules=[RetroSignalModel], authors="stevengogogo <stevengogogo4321@gmail.com> and contributors", repo="https://github.com/ntumitolab/RetroSignalModel.jl/blob/{commit}{path}#L{line}", sitename="RetroSignalModel.jl", format=Documenter.HTML(; prettyurls=get(ENV, "CI", "false") == "true", canonical="https://ntumitolab.github.io/RetroSignalModel.jl", assets=String[] ), pages=[ "Home" => "index.md", ] ) deploydocs(; branch="gh-pages", devbranch="main", repo="github.com/ntumitolab/RetroSignalModel.jl.git" )
{"hexsha": "bdbe9d93a46561e21608ee84b0d5c74f82b54d58", "size": 667, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "NTUMitoLab/RetroSignalModel.jl", "max_stars_repo_head_hexsha": "c39b22a312dccfc174d3a4db4c7dc6af8aca76b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "NTUMitoLab/RetroSignalModel.jl", "max_issues_repo_head_hexsha": "c39b22a312dccfc174d3a4db4c7dc6af8aca76b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "NTUMitoLab/RetroSignalModel.jl", "max_forks_repo_head_hexsha": "c39b22a312dccfc174d3a4db4c7dc6af8aca76b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.68, "max_line_length": 89, "alphanum_fraction": 0.6716641679, "num_tokens": 194}
"""HiddenFootprints Core Functions Read from Waymo records. Project labels in a sequence to reference frames. """ import numpy as np import tensorflow as tf from .utils import get_global_box, box_label_to_corners, global_box_to_camera_image_matmul, convert_camera_gc def read_single_frame(frame_record, open_dataset, selected_camera='FRONT'): """Return a dictionary for the given frame. frame['im']: image. frame['extrinsic']: selected camera extrinsic matrix. frame['intrinsic']: selected camera intrinsic matrix. frame['boxes_coords']: global coordinates of 8 corners of 3d labeled boxes, size Nx8x3 frame['boxes_id']: semantic id of 3d labeled boxes. See Waymo documentation for classes. """ frame = {} ################# # images for index, image in enumerate(frame_record.images): if open_dataset.CameraName.Name.Name(image.name) == selected_camera: im = tf.image.decode_jpeg(image.image).numpy() frame['im'] = im ################# # camera extrinsic (global frame to camera frame) and intrinsic for camera in frame_record.context.camera_calibrations: if open_dataset.CameraName.Name.Name(camera.name) == selected_camera: extrinsic_mat = np.array(camera.extrinsic.transform).reshape(4,4)# this is camera to vehicle extrinsic = convert_camera_gc(extrinsic_mat, np.array(frame_record.pose.transform).reshape(4,4)) # 4x4 intrinsic = camera.intrinsic # 9 frame['extrinsic'] = extrinsic frame['intrinsic'] = intrinsic ################# # 3D boxes in global frame frame['boxes_coords'] = [] frame['boxes_id'] = [] for chosen_obj in frame_record.laser_labels: # convert box to 3d cube corners in global frame obj_corners_3d_global_standard = get_global_box(frame_record, chosen_obj) # 3x8 frame['boxes_coords'].append(obj_corners_3d_global_standard.transpose()) frame['boxes_id'].append(chosen_obj.type) frame['boxes_coords'] = np.array(frame['boxes_coords']) return frame def propagate(reference_frame_idx, frames): """Return all boxes in the segment propagated into the reference frame, shape Nx3 reference_frame_propagated_labels: [id,x,y] """ reference_frame_camera_extrinsic = frames[reference_frame_idx]['extrinsic'] reference_frame_camera_intrinsic = frames[reference_frame_idx]['intrinsic'] reference_frame_propagated_labels = [] for source_frame_idx in range(len(frames)): # loop through all frames for box_idx in range(len(frames[source_frame_idx]['boxes_id'])): # for each object in current frame semantic_id = frames[source_frame_idx]['boxes_id'][box_idx] chosen_box_coords = frames[source_frame_idx]['boxes_coords'][box_idx,:].reshape((8,3)) # project to camera frame of 8 corners box_coords_projected_8_corners = global_box_to_camera_image_matmul(chosen_box_coords, reference_frame_camera_extrinsic, reference_frame_camera_intrinsic) if box_coords_projected_8_corners.shape[0]>4: # valid projected bottom face of 3D boxes footprint_x, footprint_y = box_coords_projected_8_corners[4:,:].mean(axis=0) reference_frame_propagated_labels.append([semantic_id, footprint_x, footprint_y]) return np.array(reference_frame_propagated_labels)
{"hexsha": "4122e054794241c0f7d5c63ee0cbee61c23c77fa", "size": 3609, "ext": "py", "lang": "Python", "max_stars_repo_path": "hiddenfootprints/core.py", "max_stars_repo_name": "jinsungit/hiddenfootprints", "max_stars_repo_head_hexsha": "13ae322cc77435809a408152fd2406dbe16ce9a6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-09-09T18:10:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T08:16:18.000Z", "max_issues_repo_path": "hiddenfootprints/core.py", "max_issues_repo_name": "jinsungit/hiddenfootprints", "max_issues_repo_head_hexsha": "13ae322cc77435809a408152fd2406dbe16ce9a6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hiddenfootprints/core.py", "max_forks_repo_name": "jinsungit/hiddenfootprints", "max_forks_repo_head_hexsha": "13ae322cc77435809a408152fd2406dbe16ce9a6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.8701298701, "max_line_length": 114, "alphanum_fraction": 0.6694375173, "include": true, "reason": "import numpy", "num_tokens": 764}
#pragma once #include <mtlog/badbotlogger.hpp> #include <sharedstorage/sharedstrings.hpp> #include <atomic> #include <thread> #include <boost/signals2.hpp> using namespace std; using namespace boost; class ControllerBase : protected BadBotLogger { public: typedef boost::signals2::signal<void (std::string source, unsigned char vLeft, unsigned char vRight)> signalDrivelineCmd; typedef signalDrivelineCmd::slot_type slot_signalDrivelineCmd; protected: //how many frames to keep the motor on once drive command is issued atomic<int> _driveline_on_counter; //decremented with every read frame //read from config //how much person can deviate before we take corrective action int _DEADZONE_MIN; int _DEADZONE_MAX; //how many ms to repeat the last action int _MS_TO_REPEAT_LAST_CMD; int _CMD_CALCULATION_FREQUENCY; int _MAX_NO_UPDATE_DURATION; std::thread _thread_cmd_generator; bool _keep_running = true; //flagged by the orchestrator //if true signals will be raised an calcualtions will be done bool _active_controller = false; signalDrivelineCmd _signalDrivelineCmd; //safety feature, how long since we last got a sensor update? //if we do not get updated in _MAX_NO_UPDATE_DURATION we stop issuing commands std::chrono::time_point<std::chrono::system_clock> _last_updated_inputs_at; protected: bool init(std::string ymlNodeName); /** * Utility function that takes returns true if * current time - _last_updated_inputs_at < _MAX_NO_UPDATE_DURATION * or false otherwise */ bool isInputStillValid(); /** * updates the member field _last_updated_inputs_at with current system clock timepoint */ void updateLastInputRxTime(); public: ControllerBase(std::string name); bool cleanup(); virtual void calculateNextFrame() = 0; void connectToSignalDrivelineCmd(const slot_signalDrivelineCmd &slot); virtual void setActive(bool state); void raiseDrivelineSignal(std::string source, unsigned char vLeft, unsigned char vRight); };
{"hexsha": "7beefe375f273a31d9cca4ee5b9a18073f1cf476", "size": 2284, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "fuse/controllers/controllerbase.hpp", "max_stars_repo_name": "badbot-v1/motion-controller", "max_stars_repo_head_hexsha": "560a8dfc19d628a7f474d26d0cbdf0426f683020", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fuse/controllers/controllerbase.hpp", "max_issues_repo_name": "badbot-v1/motion-controller", "max_issues_repo_head_hexsha": "560a8dfc19d628a7f474d26d0cbdf0426f683020", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fuse/controllers/controllerbase.hpp", "max_forks_repo_name": "badbot-v1/motion-controller", "max_forks_repo_head_hexsha": "560a8dfc19d628a7f474d26d0cbdf0426f683020", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5882352941, "max_line_length": 129, "alphanum_fraction": 0.6821366025, "num_tokens": 499}
function evaluate_to_array(backend::ArrayBackend, x::Number, target_dimensions; seed=nothing) evaluate(backend, convert(FieldExpr, x), target_dimensions) end function evaluate_to_array(backend::ArrayBackend, fexpr::FieldExpr, target_dimensions; seed=nothing) af = wrapped_array_function([fexpr => target_dimensions], []) cf = compile_wrapped_array_function(backend, af) cf(seed=seed) end function evaluate(backend::ArrayBackend, fexpr::FieldExpr, target_dimensions=nothing; seed=nothing) if target_dimensions === nothing target_dimensions = collect(known_dimensions_expr(fexpr)) end if !is_stochastic(fexpr) dims = target_dimensions else dims = [target_dimensions..., STOCHASTIC_DIMENSION] end ffunc = field_function([fexpr => dims], []) cffunc = compile_field_function(backend, ffunc) cffunc() end evaluate(args...; kwds...) = evaluate(default_array_backend(), args...; kwds...) function known_field{T <: KnownDimension}(x::BackendArray, dimensions::Array{T, 1}) @assert ndims(x) == length(dimensions) sdims = find(dimensions .== STOCHASTIC_DIMENSION) if length(sdims) == 0 regular_dims = dimensions stochastic_info = StochasticInfo() else regular_dims = [dimensions[1:sdims[1]-1]..., dimensions[sdims[1]+1:end]...] stochastic_info = StochasticInfo(size(x, sdims[1])) end KnownField(regular_dims, x, stochastic_info) end
{"hexsha": "09e8a09337d51d5fb48a975830149e9b19adda10", "size": 1460, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/evaluation.jl", "max_stars_repo_name": "fluxion/FluxionFields.jl", "max_stars_repo_head_hexsha": "663b5e0f73ae2396fa9e33abc0f3742a58d57469", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/evaluation.jl", "max_issues_repo_name": "fluxion/FluxionFields.jl", "max_issues_repo_head_hexsha": "663b5e0f73ae2396fa9e33abc0f3742a58d57469", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/evaluation.jl", "max_forks_repo_name": "fluxion/FluxionFields.jl", "max_forks_repo_head_hexsha": "663b5e0f73ae2396fa9e33abc0f3742a58d57469", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4444444444, "max_line_length": 100, "alphanum_fraction": 0.7075342466, "num_tokens": 367}
""" These functions have no direct analog in the standard python data analytics stack, or require information about the internal state of the system beyond what is present in the function call. We provide them in a structure that makes it easy for the model elements to call. """ import inspect import os import re import pickle import random import warnings from importlib.machinery import SourceFileLoader import numpy as np import pandas as pd import xarray as xr import scipy.stats as stats from . import utils from .external import External, Excels from pysd._version import __version__ small_vensim = 1e-6 # What is considered zero according to Vensim Help class Stateful(object): # the integrator needs to be able to 'get' the current state of the object, # and get the derivative. It calculates the new state, and updates it. # The state can be any object which is subject to basic (element-wise) # algebraic operations def __init__(self): self._state = None self.shape_info = None def __call__(self, *args, **kwargs): return self.state @property def state(self): if self._state is None: raise AttributeError('Attempt to call stateful element' + ' before it is initialized.') return self._state @state.setter def state(self, new_value): if self.shape_info: self._state = xr.DataArray(data=new_value, **self.shape_info) else: self._state = new_value class DynamicStateful(Stateful): def __init__(self): super().__init__() def update(self, state): try: self.state = state except Exception as err: raise ValueError(err.args[0] + "\n\n" + "Could not update the value of " + self.py_name) class Integ(DynamicStateful): """ Implements INTEG function """ def __init__(self, ddt, initial_value, py_name="Integ object"): """ Parameters ---------- ddt: function This will become an attribute of the object initial_value: function Initial value py_name: str Python name to identify the object """ super().__init__() self.init_func = initial_value self.ddt = ddt self.shape_info = None self.py_name = py_name def initialize(self, init_val=None): if init_val is None: self.state = self.init_func() else: self.state = init_val if isinstance(self.state, xr.DataArray): self.shape_info = {'dims': self.state.dims, 'coords': self.state.coords} def export(self): return {self.py_name: { 'state': self.state, 'shape_info': self.shape_info}} class Delay(DynamicStateful): """ Implements DELAY function """ # note that we could have put the `delay_input` argument as a parameter to # the `__call__` function, and more closely mirrored the vensim syntax. # However, people may get confused this way in thinking that they need # only one delay object and can call it with various arguments to delay # whatever is convenient. This method forces them to acknowledge that # additional structure is being created in the delay object. def __init__(self, delay_input, delay_time, initial_value, order, tstep=lambda: 0, py_name="Delay object"): """ Parameters ---------- delay_input: function delay_time: function initial_value: function order: function py_name: str Python name to identify the object """ super().__init__() self.init_func = initial_value self.delay_time_func = delay_time self.input_func = delay_input self.order_func = order self.order = None self.tstep = tstep self.shape_info = None self.py_name = py_name def initialize(self, init_val=None): order = self.order_func() if order != int(order): warnings.warn(self.py_name + '\n' + 'Casting delay order ' + f'from {order} to {int(order)}') self.order = int(order) # The order can only be set once if self.order*self.tstep() > np.min(self.delay_time_func()): while self.order*self.tstep() > np.min(self.delay_time_func()): self.order -= 1 warnings.warn(self.py_name + '\n' + 'Delay time very small, casting delay order ' + f'from {int(order)} to {self.order}') if init_val is None: init_state_value = self.init_func() * self.delay_time_func() else: init_state_value = init_val * self.delay_time_func() if isinstance(init_state_value, xr.DataArray): # broadcast self.state self.state = init_state_value.expand_dims({ '_delay': np.arange(self.order)}, axis=0) self.shape_info = {'dims': self.state.dims, 'coords': self.state.coords} else: self.state = np.array([init_state_value] * self.order) def __call__(self): if self.shape_info: return self.state[-1].reset_coords('_delay', drop=True)\ / self.delay_time_func() else: return self.state[-1] / self.delay_time_func() def ddt(self): outflows = self.state / self.delay_time_func() inflows = np.roll(outflows, 1, axis=0) if self.shape_info: inflows[0] = self.input_func().values else: inflows[0] = self.input_func() return (inflows - outflows) * self.order def export(self): return {self.py_name: { 'state': self.state, 'shape_info': self.shape_info}} class DelayN(DynamicStateful): """ Implements DELAY N function """ # note that we could have put the `delay_input` argument as a parameter to # the `__call__` function, and more closely mirrored the vensim syntax. # However, people may get confused this way in thinking that they need # only one delay object and can call it with various arguments to delay # whatever is convenient. This method forces them to acknowledge that # additional structure is being created in the delay object. def __init__(self, delay_input, delay_time, initial_value, order, tstep, py_name): """ Parameters ---------- delay_input: function delay_time: function initial_value: function order: function py_name: str Python name to identify the object """ super().__init__() self.init_func = initial_value self.delay_time_func = delay_time self.input_func = delay_input self.order_func = order self.order = None self.times = None self.tstep = tstep self.shape_info = None self.py_name = py_name def initialize(self, init_val=None): order = self.order_func() if order != int(order): warnings.warn(self.py_name + '\n' + 'Casting delay order ' + f'from {order} to {int(order)}') self.order = int(order) # The order can only be set once if self.order*self.tstep() > np.min(self.delay_time_func()): while self.order*self.tstep() > np.min(self.delay_time_func()): self.order -= 1 warnings.warn(self.py_name + '\n' + 'Delay time very small, casting delay order ' + f'from {int(order)} to {self.order}') if init_val is None: init_state_value = self.init_func() * self.delay_time_func() else: init_state_value = init_val * self.delay_time_func() if isinstance(init_state_value, xr.DataArray): # broadcast self.state self.state = init_state_value.expand_dims({ '_delay': np.arange(self.order)}, axis=0) self.times = self.delay_time_func().expand_dims({ '_delay': np.arange(self.order)}, axis=0) self.shape_info = {'dims': self.state.dims, 'coords': self.state.coords} else: self.state = np.array([init_state_value] * self.order) self.times = np.array([self.delay_time_func()] * self.order) def __call__(self): if self.shape_info: return self.state[-1].reset_coords('_delay', drop=True)\ / self.times[0].reset_coords('_delay', drop=True) else: return self.state[-1] / self.times[0] def ddt(self): if self.shape_info: # if is xarray need to preserve coords self.times = self.times.roll({'_delay': 1}, False) self.times[0] = self.delay_time_func() outflows = self.state / self.times inflows = outflows.roll({'_delay': 1}, False) else: # if is float use numpy.roll self.times = np.roll(self.times, 1, axis=0) self.times[0] = self.delay_time_func() outflows = self.state / self.times inflows = np.roll(outflows, 1, axis=0) inflows[0] = self.input_func() return (inflows - outflows)*self.order def export(self): return {self.py_name: { 'state': self.state, 'times': self.times, 'shape_info': self.shape_info}} class DelayFixed(DynamicStateful): """ Implements DELAY FIXED function """ def __init__(self, delay_input, delay_time, initial_value, tstep, py_name): """ Parameters ---------- delay_input: function delay_time: function initial_value: function order: function py_name: str Python name to identify the object """ super().__init__() self.init_func = initial_value self.delay_time_func = delay_time self.input_func = delay_input self.tstep = tstep self.order = None self.pointer = 0 self.py_name = py_name def initialize(self, init_val=None): order = max(self.delay_time_func()/self.tstep(), 1) if order != int(order): warnings.warn( self.py_name + '\n' + 'Casting delay order from %f to %i' % ( order, round(order + small_vensim))) # need to add a small decimal to ensure that 0.5 is rounded to 1 self.order = round(order + small_vensim) # The order can only be set once if init_val is None: init_state_value = self.init_func() else: init_state_value = init_val self.state = init_state_value self.pipe = [init_state_value] * self.order def __call__(self): return self.state def ddt(self): return np.nan def update(self, state): self.pipe[self.pointer] = self.input_func() self.pointer = (self.pointer + 1) % self.order self.state = self.pipe[self.pointer] def export(self): return {self.py_name: { 'state': self.state, 'pointer': self.pointer, 'pipe': self.pipe}} class Forecast(DynamicStateful): """ Implements FORECAST function """ def __init__(self, forecast_input, average_time, horizon, py_name): """ Parameters ---------- forecast_input: function average_time: function horizon: function py_name: str Python name to identify the object """ super().__init__() self.horizon = horizon self.average_time = average_time self.input = forecast_input self.py_name = py_name def initialize(self, init_val=None): # self.state = AV in the vensim docs if init_val is None: self.state = self.input() else: self.state = init_val if isinstance(self.state, xr.DataArray): self.shape_info = {'dims': self.state.dims, 'coords': self.state.coords} def __call__(self): return self.input() * ( 1 + zidz(self.input() - self.state, self.average_time() * self.state )*self.horizon() ) def ddt(self): return (self.input() - self.state) / self.average_time() def export(self): return {self.py_name: { 'state': self.state, 'shape_info': self.shape_info}} class Smooth(DynamicStateful): """ Implements SMOOTH function """ def __init__(self, smooth_input, smooth_time, initial_value, order, py_name="Smooth object"): """ Parameters ---------- smooth_input: function smooth_time: function initial_value: function order: function py_name: str Python name to identify the object """ super().__init__() self.init_func = initial_value self.smooth_time_func = smooth_time self.input_func = smooth_input self.order_func = order self.order = None self.shape_info = None self.py_name = py_name def initialize(self, init_val=None): self.order = self.order_func() # The order can only be set once if init_val is None: init_state_value = self.init_func() else: init_state_value = init_val if isinstance(init_state_value, xr.DataArray): # broadcast self.state self.state = init_state_value.expand_dims({ '_smooth': np.arange(self.order)}, axis=0) self.shape_info = {'dims': self.state.dims, 'coords': self.state.coords} else: self.state = np.array([init_state_value] * self.order) def __call__(self): if self.shape_info: return self.state[-1].reset_coords('_smooth', drop=True) else: return self.state[-1] def ddt(self): targets = np.roll(self.state, 1, axis=0) if self.shape_info: targets[0] = self.input_func().values else: targets[0] = self.input_func() return (targets - self.state) * self.order / self.smooth_time_func() def export(self): return {self.py_name: { 'state': self.state, 'shape_info': self.shape_info}} class Trend(DynamicStateful): """ Implements TREND function """ def __init__(self, trend_input, average_time, initial_trend, py_name="Trend object"): """ Parameters ---------- trend_input: function average_time: function initial_trend: function py_name: str Python name to identify the object """ super().__init__() self.init_func = initial_trend self.average_time_function = average_time self.input_func = trend_input self.py_name = py_name def initialize(self, init_val=None): if init_val is None: self.state = self.input_func()\ / (1 + self.init_func()*self.average_time_function()) else: self.state = self.input_func()\ / (1 + init_val*self.average_time_function()) if isinstance(self.state, xr.DataArray): self.shape_info = {'dims': self.state.dims, 'coords': self.state.coords} def __call__(self): return zidz(self.input_func() - self.state, self.average_time_function() * np.abs(self.state)) def ddt(self): return (self.input_func() - self.state) / self.average_time_function() def export(self): return {self.py_name: { 'state': self.state, 'shape_info': self.shape_info}} class SampleIfTrue(DynamicStateful): def __init__(self, condition, actual_value, initial_value, py_name="SampleIfTrue object"): """ Parameters ---------- condition: function actual_value: function initial_value: function py_name: str Python name to identify the object """ super().__init__() self.condition = condition self.actual_value = actual_value self.init_func = initial_value self.py_name = py_name def initialize(self, init_val=None): if init_val is None: self.state = self.init_func() else: self.state = init_val if isinstance(self.state, xr.DataArray): self.shape_info = {'dims': self.state.dims, 'coords': self.state.coords} def __call__(self): return if_then_else(self.condition(), self.actual_value, lambda: self.state) def ddt(self): return np.nan def update(self, state): self.state = self.state*0 + if_then_else(self.condition(), self.actual_value, lambda: self.state) def export(self): return {self.py_name: { 'state': self.state, 'shape_info': self.shape_info}} class Initial(Stateful): """ Implements INITIAL function """ def __init__(self, initial_value, py_name="Initial object"): """ Parameters ---------- initial_value: function py_name: str Python name to identify the object """ super().__init__() self.init_func = initial_value self.py_name = py_name def initialize(self, init_val=None): if init_val is None: self.state = self.init_func() else: self.state = init_val def export(self): return {self.py_name: { 'state': self.state}} class Macro(DynamicStateful): """ The Model class implements a stateful representation of the system, and contains the majority of methods for accessing and modifying model components. When the instance in question also serves as the root model object (as opposed to a macro or submodel within another model) it will have added methods to facilitate execution. """ def __init__(self, py_model_file, params=None, return_func=None, time=None, time_initialization=None, py_name=None): """ The model object will be created with components drawn from a translated python model file. Parameters ---------- py_model_file : <string> Filename of a model which has already been converted into a python format. get_time: needs to be a function that returns a time object params return_func """ super().__init__() self.time = time self.time_initialization = time_initialization self.py_name = py_name self.initialize_order = None # need a unique identifier for the imported module. module_name = os.path.splitext(py_model_file)[0]\ + str(random.randint(0, 1000000)) try: self.components = SourceFileLoader(module_name, py_model_file).load_module() except TypeError: raise ImportError( "\n\nNot able to import the model. " + "This may be because the model was compiled with an " + "earlier version of PySD, you can check on the top of " + " the model file you are trying to load." + "\nThe current version of PySd is :" + "\n\tPySD " + __version__ + "\n\n" + "Please translate again the model with the function" + " read_vensim or read_xmile.") if __version__.split(".")[0]\ != self.get_pysd_compiler_version().split(".")[0]: raise ImportError( "\n\nNot able to import the model. " + "The model was compiled with a " + "not compatible version of PySD:" + "\n\tPySD " + self.get_pysd_compiler_version() + "\n\nThe current version of PySd is:" + "\n\tPySD " + __version__ + "\n\n" + "Please translate again the model with the function" + " read_vensim or read_xmile.") if params is not None: self.set_components(params) # Get the collections of stateful elements and external elements self._stateful_elements = [ getattr(self.components, name) for name in dir(self.components) if isinstance(getattr(self.components, name), Stateful) ] self._dynamicstateful_elements = [ getattr(self.components, name) for name in dir(self.components) if isinstance(getattr(self.components, name), DynamicStateful) ] self._external_elements = [ getattr(self.components, name) for name in dir(self.components) if isinstance(getattr(self.components, name), External) ] if return_func is not None: self.return_func = getattr(self.components, return_func) else: self.return_func = lambda: 0 self.py_model_file = py_model_file def __call__(self): return self.return_func() def get_pysd_compiler_version(self): """ Returns the version of pysd complier that used for generating this model """ return self.components.__pysd_version__ def initialize(self, initialization_order=None): """ This function tries to initialize the stateful objects. In the case where an initialization function for `Stock A` depends on the value of `Stock B`, if we try to initialize `Stock A` before `Stock B` then we will get an error, as the value will not yet exist. In this case, just skip initializing `Stock A` for now, and go on to the other state initializations. Then come back to it and try again. """ # Initialize time if self.time is None: self.time = self.time_initialization() self.components.cache.clean() self.components.cache.time = self.time() self.components._init_outer_references({ 'scope': self, 'time': self.time }) # Initialize external elements for element in self._external_elements: element.initialize() Excels.clean() remaining = set(self._stateful_elements) if len(set([element.py_name for element in self._stateful_elements]))\ == len(set(self._stateful_elements)) and self.initialize_order: # use elements names to initialize them, this is available # after the model is initialized one time # solves issue #247 until we have a dependency dictionary try: for element_name in self.initialize_order: for element in remaining: if element.py_name == element_name: element.initialize() break remaining.remove(element) assert len(remaining) == 0 return except Exception as err: # if user includes new stateful objects or some other # dependencies the previous initialization order may # not be keept warnings.warn( err.args[0] + "\n\nNot able to initialize statefull elements " "with the same order as before..." "Trying to find a new order.") # initialize as always self.initialize_order = [] # Initialize stateful elements remaining = set(self._stateful_elements) while remaining: progress = set() for element in remaining: try: element.initialize() progress.add(element) self.initialize_order.append(element.py_name) except (KeyError, TypeError, AttributeError): pass if progress: remaining.difference_update(progress) else: raise ValueError('Unresolvable Reference: ' + 'Probable circular initialization...\n' + 'Not able to initialize the ' + 'following objects:\n\t' + '\n\t'.join([e.py_name for e in remaining])) def ddt(self): return np.array([component.ddt() for component in self._dynamicstateful_elements], dtype=object) @property def state(self): return np.array([component.state for component in self._dynamicstateful_elements], dtype=object) @state.setter def state(self, new_value): [component.update(val) for component, val in zip(self._dynamicstateful_elements, new_value)] def export(self, file_name): """ Export stateful values to pickle file. Parameters ---------- file_name: str Name of the file to export the values. """ warnings.warn( "\nCompatibility of exported states could be broken between" " different versions of PySD or xarray, current versions:\n" f"\tPySD {__version__}\n\txarray {xr.__version__}\n" ) stateful_elements = {} [stateful_elements.update(component.export()) for component in self._stateful_elements] with open(file_name, 'wb') as file: pickle.dump( (self.time(), stateful_elements, {'pysd': __version__, 'xarray': xr.__version__} ), file) def import_pickle(self, file_name): """ Import stateful values from pickle file. Parameters ---------- file_name: str Name of the file to import the values from. """ with open(file_name, 'rb') as file: time, stateful_dict, metadata = pickle.load(file) if __version__ != metadata['pysd']\ or xr.__version__ != metadata['xarray']: warnings.warn( "\nCompatibility of exported states could be broken between" " different versions of PySD or xarray. Current versions:\n" f"\tPySD {__version__}\n\txarray {xr.__version__}\n" "Loaded versions:\n" f"\tPySD {metadata['pysd']}\n\txarray {metadata['xarray']}\n" ) self.set_stateful(stateful_dict) self.time.update(time) self.components.cache.reset(time) def get_args(self, param): """ Returns the arguments of a model element. Parameters ---------- param: str or func The model element name or function. Returns ------- args: list List of arguments of the function. Examples -------- >>> model.get_args('birth_rate') >>> model.get_args('Birth Rate') """ if isinstance(param, str): func_name = utils.get_value_by_insensitive_key_or_value( param, self.components._namespace) or param if hasattr(self.components, func_name): func = getattr(self.components, func_name) else: NameError( "\n'%s' is not recognized as a model component." % param) else: func = param if hasattr(func, 'args'): # cached functions return func.args else: # regular functions args = inspect.getfullargspec(func)[0] if 'self' in args: args.remove('self') return args def get_coords(self, param): """ Returns the coordinates and dims of a model element. Parameters ---------- param: str or func The model element name or function. Returns ------- (coords, dims) or None: (dict, list) or None The coords and the dimensions of the element if it has. Otherwise, returns None. Examples -------- >>> model.get_coords('birth_rate') >>> model.get_coords('Birth Rate') """ if isinstance(param, str): func_name = utils.get_value_by_insensitive_key_or_value( param, self.components._namespace) or param if hasattr(self.components, func_name): func = getattr(self.components, func_name) else: NameError( "\n'%s' is not recognized as a model component." % param) else: func = param if not self.get_args(func): value = func() else: value = func(0) if isinstance(value, xr.DataArray): dims = list(value.dims) coords = {coord: list(value.coords[coord].values) for coord in value.coords} return coords, dims else: return None def set_components(self, params): """ Set the value of exogenous model elements. Element values can be passed as keyword=value pairs in the function call. Values can be numeric type or pandas Series. Series will be interpolated by integrator. Examples -------- >>> model.set_components({'birth_rate': 10}) >>> model.set_components({'Birth Rate': 10}) >>> br = pandas.Series(index=range(30), values=np.sin(range(30)) >>> model.set_components({'birth_rate': br}) """ # TODO: allow the params argument to take a pandas dataframe, where # column names are variable names. However some variables may be # constant or have no values for some index. This should be processed. for key, value in params.items(): func_name = utils.get_value_by_insensitive_key_or_value( key, self.components._namespace) if isinstance(value, np.ndarray) or isinstance(value, list): raise TypeError( 'When setting ' + key + '\n' 'Setting subscripted must be done using a xarray.DataArray' ' with the correct dimensions or a constant value ' '(https://pysd.readthedocs.io/en/master/basic_usage.html)') if func_name is None: raise NameError( "\n'%s' is not recognized as a model component." % key) try: func = getattr(self.components, func_name) _, dims = self.get_coords(func) or (None, None) args = self.get_args(func) except (AttributeError, TypeError): dims, args = None, None if isinstance(value, pd.Series): new_function, cache = self._timeseries_component( value, dims, args) elif callable(value): new_function = value cache = None else: new_function = self._constant_component(value, dims, args) cache = 'run' # this won't handle other statefuls... if '_integ_' + func_name in dir(self.components): warnings.warn("Replacing the equation of stock" + "{} with params".format(key), stacklevel=2) # add cache new_function.__name__ = func_name if cache == 'run': new_function = self.components.cache.run(new_function) elif cache == 'step': new_function = self.components.cache.step(new_function) setattr(self.components, func_name, new_function) self.components.cache.clean() def _timeseries_component(self, series, dims, args=[]): """ Internal function for creating a timeseries model element """ # this is only called if the set_component function recognizes a # pandas series # TODO: raise a warning if extrapolating from the end of the series. if isinstance(series.values[0], xr.DataArray) and args: # the argument is already given in the model when the model # is called return lambda x: utils.rearrange(xr.concat( series.values, series.index).interp(concat_dim=x).reset_coords( 'concat_dim', drop=True), dims, self.components._subscript_dict), 'lookup' elif isinstance(series.values[0], xr.DataArray): # the interpolation will be time dependent return lambda: utils.rearrange(xr.concat( series.values, series.index).interp(concat_dim=self.time()).reset_coords( 'concat_dim', drop=True), dims, self.components._subscript_dict), 'step' elif args and dims: # the argument is already given in the model when the model # is called return lambda x: utils.rearrange( np.interp(x, series.index, series.values), dims, self.components._subscript_dict), 'lookup' elif args: # the argument is already given in the model when the model # is called return lambda x:\ np.interp(x, series.index, series.values), 'lookup' elif dims: # the interpolation will be time dependent return lambda: utils.rearrange( np.interp(self.time(), series.index, series.values), dims, self.components._subscript_dict), 'step' else: # the interpolation will be time dependent return lambda:\ np.interp(self.time(), series.index, series.values), 'step' def _constant_component(self, value, dims, args=[]): """ Internal function for creating a constant model element """ if args and dims: # need to pass an argument to keep consistency with the calls # to the function return lambda x: utils.rearrange( value, dims, self.components._subscript_dict) elif args: # need to pass an argument to keep consistency with the calls # to the function return lambda x: value elif dims: return lambda: utils.rearrange( value, dims, self.components._subscript_dict) else: return lambda: value def set_state(self, t, initial_value): """ Old set_state method use set_initial_value""" warnings.warn( "\nset_state will be deprecated, use set_initial_value instead.", FutureWarning) self.set_initial_value(t, initial_value) def set_initial_value(self, t, initial_value): """ Set the system initial value. Parameters ---------- t : numeric The system time initial_value : dict A (possibly partial) dictionary of the system initial values. The keys to this dictionary may be either pysafe names or original model file names """ self.time.update(t) self.components.cache.reset(t) stateful_name = "_NONE" # TODO make this more solid, link with builder or next TODO? stateful_init = [ "_integ_", "_delay_", "_delayfixed_", "_delayn_", "_sample_if_true_", "_smooth_", "_trend_", "_initial_"] for key, value in initial_value.items(): component_name = utils.get_value_by_insensitive_key_or_value( key, self.components._namespace) if component_name is not None: for element in self._stateful_elements: # TODO make this more solid, add link between stateful # objects and model vars for init in stateful_init: if init + component_name == element.py_name: stateful_name = element.py_name else: component_name = key stateful_name = key try: _, dims = self.get_coords(component_name) except TypeError: dims = None if isinstance(value, xr.DataArray)\ and not set(value.dims).issubset(set(dims)): raise ValueError( f"\nInvalid dimensions for {component_name}." f"It should be a subset of {dims}, " f"but passed value has {list(value.dims)}") if isinstance(value, np.ndarray) or isinstance(value, list): raise TypeError( 'When setting ' + key + '\n' 'Setting subscripted must be done using a xarray.DataArray' ' with the correct dimensions or a constant value ' '(https://pysd.readthedocs.io/en/master/basic_usage.html)') # Try to update stateful component if hasattr(self.components, stateful_name): element = getattr(self.components, stateful_name) if dims: value = utils.rearrange( value, dims, self.components._subscript_dict) element.initialize(value) self.components.cache.clean() else: # Try to override component warnings.warn( f"\nSetting {component_name} to a constant value with " "initial_conditions will be deprecated. Use params={" f"'{component_name}': {value}"+"} instead.", FutureWarning) setattr(self.components, component_name, self._constant_component( value, dims, self.get_args(component_name))) self.components.cache.clean() def set_stateful(self, stateful_dict): """ Set stateful values. Parameters ---------- stateful_dict: dict Dictionary of the stateful elements and the attributes to change. """ for element, attrs in stateful_dict.items(): for attr, value in attrs.items(): setattr(getattr(self.components, element), attr, value) def doc(self): """ Formats a table of documentation strings to help users remember variable names, and understand how they are translated into python safe names. Returns ------- docs_df: pandas dataframe Dataframe with columns for the model components: - Real names - Python safe identifiers (as used in model.components) - Units string - Documentation strings from the original model file """ collector = [] for name, varname in self.components._namespace.items(): try: # TODO correct this when Original Eqn is in several lines docstring = getattr(self.components, varname).__doc__ lines = docstring.split('\n') for unit_line in range(3, 9): # this loop detects where Units: starts as # sometimes eqn could be split in several lines if re.findall('Units:', lines[unit_line]): break if unit_line == 3: eqn = lines[2].replace("Original Eqn:", "").strip() else: eqn = '; '.join([l.strip() for l in lines[3:unit_line]]) collector.append( {'Real Name': name, 'Py Name': varname, 'Eqn': eqn, 'Unit': lines[unit_line].replace("Units:", "").strip(), 'Lims': lines[unit_line+1].replace("Limits:", "").strip(), 'Type': lines[unit_line+2].replace("Type:", "").strip(), 'Subs': lines[unit_line+3].replace("Subs:", "").strip(), 'Comment': '\n'.join(lines[(unit_line+4):]).strip()}) except Exception: pass docs_df = pd.DataFrame(collector) docs_df.fillna('None', inplace=True) order = ['Real Name', 'Py Name', 'Unit', 'Lims', 'Type', 'Subs', 'Eqn', 'Comment'] return docs_df[order].sort_values(by='Real Name').reset_index(drop=True) def __str__(self): """ Return model source files """ # JT: Might be helpful to return not only the source file, but # also how the instance differs from that source file. This # would give a more accurate view of the current model. string = 'Translated Model File: ' + self.py_model_file if hasattr(self, 'mdl_file'): string += '\n Original Model File: ' + self.mdl_file return string class Time(object): def __init__(self, t=None, dt=None): self._t = t self._step = dt self.stage = None def __call__(self): return self._t def step(self): return self._step def update(self, value): if self._t is not None: self._step = value - self._t self._t = value class Model(Macro): def __init__(self, py_model_file, initialize, missing_values): """ Sets up the python objects """ super().__init__(py_model_file, None, None, Time()) self.time.stage = 'Load' self.missing_values = missing_values if initialize: self.initialize() def initialize(self): """ Initializes the simulation model """ self.time.update(self.components.initial_time()) self.time.stage = 'Initialization' External.missing = self.missing_values super().initialize() def _build_euler_timeseries(self, return_timestamps=None, final_time=None): """ - The integration steps need to include the return values. - There is no point running the model past the last return value. - The last timestep will be the last in that requested for return - Spacing should be at maximum what is specified by the integration time step. - The initial time should be the one specified by the model file, OR it should be the initial condition. - This function needs to be called AFTER the model is set in its initial state Parameters ---------- return_timestamps: numpy array Must be specified by user or built from model file before this function is called. final_time: float or None Final time of the simulation. If float, the given final time will be used. If None, the last return_timestamps will be used. Default is None. Returns ------- ts: numpy array The times that the integrator will use to compute time history """ t_0 = self.time() try: t_f = return_timestamps[-1] except IndexError: # return_timestamps is an empty list # model default final time or passed argument value t_f = self.components.final_time() if final_time is not None: t_f = max(final_time, t_f) ts = np.arange( t_0, t_f+self.components.time_step()/2, self.components.time_step(), dtype=np.float64 ) # Add the returned time series into the integration array. # Best we can do for now. This does change the integration ever # so slightly, but for well-specified models there shouldn't be # sensitivity to a finer integration time step. return np.sort(np.unique(np.append(ts, return_timestamps))) def _format_return_timestamps(self, return_timestamps=None): """ Format the passed in return timestamps value as a numpy array. If no value is passed, build up array of timestamps based upon model start and end times, and the 'saveper' value. Parameters ---------- return_timestamps: float, iterable of floats or None (optional) Iterable of timestamps to return or None. Default is None. Returns ------- ndarray (float) """ if return_timestamps is None: # Build based upon model file Start, Stop times and Saveper # Vensim's standard is to expect that the data set includes # the `final time`, so we have to add an extra period to # make sure we get that value in what numpy's `arange` gives us. return np.arange( self.time(), self.components.final_time() + self.components.saveper()/2, self.components.saveper(), dtype=float ) try: return np.array(return_timestamps, ndmin=1, dtype=float) except Exception: raise TypeError( '`return_timestamps` expects an iterable of numeric values' ' or a single numeric value') def run(self, params=None, return_columns=None, return_timestamps=None, initial_condition='original', final_time=None, time_step=None, saveper=None, reload=False, progress=False, flatten_output=False): """ Simulate the model's behavior over time. Return a pandas dataframe with timestamps as rows, model elements as columns. Parameters ---------- params: dict (optional) Keys are strings of model component names. Values are numeric or pandas Series. Numeric values represent constants over the model integration. Timeseries will be interpolated to give time-varying input. return_timestamps: list, numeric, ndarray (1D) (optional) Timestamps in model execution at which to return state information. Defaults to model-file specified timesteps. return_columns: list, 'step' or None (optional) List of string model component names, returned dataframe will have corresponding columns. If 'step' only variables with cache step will be returned. If None, variables with cache step and run will be returned. Default is None. initial_condition: str or (float, dict) (optional) The starting time, and the state of the system (the values of all the stocks) at that starting time. 'original' or 'o'uses model-file specified initial condition. 'current' or 'c' uses the state of the model after the previous execution. Other str objects, loads initial conditions from the pickle file with the given name.(float, dict) tuple lets the user specify a starting time (float) and (possibly partial) dictionary of initial values for stock (stateful) objects. Default is 'original'. final_time: float or None Final time of the simulation. If float, the given value will be used to compute the return_timestamps (if not given) and as a final time. If None the last value of return_timestamps will be used as a final time. Default is None. time_step: float or None Time step of the simulation. If float, the given value will be used to compute the return_timestamps (if not given) and euler time series. If None the default value from components will be used. Default is None. saveper: float or None Saving step of the simulation. If float, the given value will be used to compute the return_timestamps (if not given). If None the default value from components will be used. Default is None. reload : bool (optional) If True, reloads the model from the translated model file before making changes. Default is False. progress : bool (optional) If True, a progressbar will be shown during integration. Default is False. flatten_output: bool (optional) If True, once the output dataframe has been formatted will split the xarrays in new columns following vensim's naming to make a totally flat output. Default is False. Examples -------- >>> model.run(params={'exogenous_constant': 42}) >>> model.run(params={'exogenous_variable': timeseries_input}) >>> model.run(return_timestamps=[1, 2, 3.1415, 4, 10]) >>> model.run(return_timestamps=10) >>> model.run(return_timestamps=np.linspace(1, 10, 20)) See Also -------- pysd.set_components : handles setting model parameters pysd.set_initial_condition : handles setting initial conditions """ if reload: self.reload() self.progress = progress # TODO move control variables to a class if params is None: params = {} if final_time: params['final_time'] = final_time elif return_timestamps is not None: params['final_time'] =\ self._format_return_timestamps(return_timestamps)[-1] if time_step: params['time_step'] = time_step if saveper: params['saveper'] = saveper # END TODO if params: self.set_components(params) self.set_initial_condition(initial_condition) # TODO move control variables to a class # save control variables replace = { 'initial_time': self.time() } # END TODO return_timestamps = self._format_return_timestamps(return_timestamps) t_series = self._build_euler_timeseries(return_timestamps, final_time) if return_columns is None or isinstance(return_columns, str): return_columns = self._default_return_columns(return_columns) self.time.stage = 'Run' self.components.cache.clean() capture_elements, return_addresses = utils.get_return_elements( return_columns, self.components._namespace) # create a dictionary splitting run cached and others capture_elements = self._split_capture_elements(capture_elements) res = self._integrate(t_series, capture_elements['step'], return_timestamps) self._add_run_elements(res, capture_elements['run'], replace=replace) return_df = utils.make_flat_df(res, return_addresses, flatten_output) return return_df def reload(self): """ Reloads the model from the translated model file, so that all the parameters are back to their original value. """ self.__init__(self.py_model_file, initialize=True, missing_values=self.missing_values) def _default_return_columns(self, which): """ Return a list of the model elements tha change on time that does not include lookup other functions that take parameters or run-cached functions. Parameters ---------- which: str or None If it is 'step' only cache step elements will be returned. Else cache 'step' and 'run' elements will be returned. Default is None. Returns ------- return_columns: list List of columns to return """ if which == 'step': types = ['step'] else: types = ['step', 'run'] return_columns = [] parsed_expr = ['time'] # time is alredy returned as index for key, value in self.components._namespace.items(): if hasattr(self.components, value): func = getattr(self.components, value) if value not in parsed_expr and\ hasattr(func, 'type') and getattr(func, 'type') in types: return_columns.append(key) parsed_expr.append(value) return return_columns def _split_capture_elements(self, capture_elements): """ Splits the capture elements list between those with run cache and others. Parameters ---------- capture_elements: list Captured elements list Returns ------- capture_dict: dict Dictionary of sets with keywords step and run. """ capture_dict = {'step': set(), 'run': set()} for element in capture_elements: func = getattr(self.components, element) if hasattr(func, 'type') and getattr(func, 'type') == 'run': capture_dict['run'].add(element) else: # those with a cache different to run or non-identified # will be saved each step capture_dict['step'].add(element) return capture_dict def set_initial_condition(self, initial_condition): """ Set the initial conditions of the integration. Parameters ---------- initial_condition : str or (float, dict) The starting time, and the state of the system (the values of all the stocks) at that starting time. 'original' or 'o'uses model-file specified initial condition. 'current' or 'c' uses the state of the model after the previous execution. Other str objects, loads initial conditions from the pickle file with the given name.(float, dict) tuple lets the user specify a starting time (float) and (possibly partial) dictionary of initial values for stock (stateful) objects. Examples -------- >>> model.set_initial_condition('original') >>> model.set_initial_condition('current') >>> model.set_initial_condition('exported_pickle.pic') >>> model.set_initial_condition((10, {'teacup_temperature': 50})) See Also -------- PySD.set_initial_value() """ if isinstance(initial_condition, tuple): self.initialize() self.set_initial_value(*initial_condition) elif isinstance(initial_condition, str): if initial_condition.lower() in ['original', 'o']: self.initialize() elif initial_condition.lower() in ['current', 'c']: pass else: self.import_pickle(initial_condition) else: raise TypeError('Check documentation for valid entries') def _euler_step(self, dt): """ Performs a single step in the euler integration, updating stateful components Parameters ---------- dt : float This is the amount to increase time by this step """ self.state = self.state + self.ddt() * dt def _integrate(self, time_steps, capture_elements, return_timestamps): """ Performs euler integration Parameters ---------- time_steps: iterable the time steps that the integrator progresses over capture_elements: list which model elements to capture - uses pysafe names return_timestamps: which subset of 'timesteps' should be values be returned? Returns ------- outputs: list of dictionaries """ outputs = pd.DataFrame(columns=capture_elements) if self.progress: # initialize progress bar progressbar = utils.ProgressBar(len(time_steps)-1) else: # when None is used the update will do nothing progressbar = utils.ProgressBar(None) for t2 in time_steps[1:]: if self.time() in return_timestamps: outputs.at[self.time()] = [getattr(self.components, key)() for key in capture_elements] self._euler_step(t2 - self.time()) self.time.update(t2) # this will clear the stepwise caches self.components.cache.reset(t2) progressbar.update() # TODO move control variables to a class and automatically stop # when updating time if self.time() >= self.components.final_time(): break # need to add one more time step, because we run only the state # updates in the previous loop and thus may be one short. if self.time() in return_timestamps: outputs.at[self.time()] = [getattr(self.components, key)() for key in capture_elements] progressbar.finish() return outputs def _add_run_elements(self, df, capture_elements, replace={}): """ Adds constant elements to a dataframe. Parameters ---------- df: pandas.DataFrame Dataframe to add elements. capture_elements: list List of constant elements replace: dict Ouputs values to replace. TODO: move control variables to a class and avoid this. Returns ------- None """ nt = len(df.index.values) for element in capture_elements: df[element] = [getattr(self.components, element)()] * nt # TODO: move control variables to a class and avoid this. # update initial time values in df (necessary if initial_conditions) for it, value in replace.items(): if it in df: df[it] = value elif it.upper() in df: df[it.upper()] = value elif it.replace('_', ' ') in df: df[it.replace('_', ' ')] = value elif it.replace('_', ' ').upper() in df: df[it.replace('_', ' ').upper()] = value def ramp(time, slope, start, finish=0): """ Implements vensim's and xmile's RAMP function Parameters ---------- time: function The current time of modelling slope: float The slope of the ramp starting at zero at time start start: float Time at which the ramp begins finish: float Optional. Time at which the ramp ends Returns ------- response: float If prior to ramp start, returns zero If after ramp ends, returns top of ramp Examples -------- """ t = time() if t < start: return 0 else: if finish <= 0: return slope * (t - start) elif t > finish: return slope * (finish - start) else: return slope * (t - start) def step(time, value, tstep): """" Implements vensim's STEP function Parameters ---------- value: float The height of the step tstep: float The time at and after which `result` equals `value` Returns ------- - In range [-inf, tstep) returns 0 - In range [tstep, +inf] returns `value` """ return value if time() >= tstep else 0 def pulse(time, start, duration): """ Implements vensim's PULSE function In range [-inf, start) returns 0 In range [start, start + duration) returns 1 In range [start + duration, +inf] returns 0 """ t = time() return 1 if start <= t < start + duration else 0 def pulse_train(time, start, duration, repeat_time, end): """ Implements vensim's PULSE TRAIN function In range [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + duration) return 1 In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0 """ t = time() if start <= t < end: return 1 if (t - start) % repeat_time < duration else 0 else: return 0 def pulse_magnitude(time, magnitude, start, repeat_time=0): """ Implements xmile's PULSE function PULSE: Generate a one-DT wide pulse at the given time Parameters: 2 or 3: (magnitude, first time[, interval]) Without interval or when interval = 0, the PULSE is generated only once Example: PULSE(20, 12, 5) generates a pulse value of 20/DT at time 12, 17, 22, etc. In rage [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + dt) return magnitude/dt In rage [start + n * repeat_time + dt, start + (n + 1) * repeat_time) return 0 """ t = time() if repeat_time <= small_vensim: if abs(t - start) < time.step(): return magnitude * time.step() else: return 0 else: if abs((t - start) % repeat_time) < time.step(): return magnitude * time.step() else: return 0 def lookup(x, xs, ys): """ Intermediate values are calculated with linear interpolation between the intermediate points. Out-of-range values are the same as the closest endpoint (i.e, no extrapolation is performed). """ return np.interp(x, xs, ys) def lookup_extrapolation(x, xs, ys): """ Intermediate values are calculated with linear interpolation between the intermediate points. Out-of-range values are calculated with linear extrapolation from the last two values at either end. """ if x < xs[0]: dx = xs[1] - xs[0] dy = ys[1] - ys[0] k = dy / dx return ys[0] + (x - xs[0]) * k if x > xs[-1]: dx = xs[-1] - xs[-2] dy = ys[-1] - ys[-2] k = dy / dx return ys[-1] + (x - xs[-1]) * k return np.interp(x, xs, ys) def lookup_discrete(x, xs, ys): """ Intermediate values take on the value associated with the next lower x-coordinate (also called a step-wise function). The last two points of a discrete graphical function must have the same y value. Out-of-range values are the same as the closest endpoint (i.e, no extrapolation is performed). """ for index in range(0, len(xs)): if x < xs[index]: return ys[index - 1] if index > 0 else ys[index] return ys[-1] def if_then_else(condition, val_if_true, val_if_false): """ Implements Vensim's IF THEN ELSE function. https://www.vensim.com/documentation/20475.htm Parameters ---------- condition: bool or xarray.DataArray of bools val_if_true: function Value to evaluate and return when condition is true. val_if_false: function Value to evaluate and return when condition is false. Returns ------- The value depending on the condition. """ if isinstance(condition, xr.DataArray): if condition.all(): return val_if_true() elif not condition.any(): return val_if_false() return xr.where(condition, val_if_true(), val_if_false()) return val_if_true() if condition else val_if_false() def logical_and(*args): """ Implements Vensim's :AND: method for two or several arguments. Parameters ---------- *args: arguments The values to compare with and operator Returns ------- result: bool or xarray.DataArray The result of the comparison. """ current = args[0] for arg in args[1:]: current = np.logical_and(arg, current) return current def logical_or(*args): """ Implements Vensim's :OR: method for two or several arguments. Parameters ---------- *args: arguments The values to compare with and operator Returns ------- result: bool or xarray.DataArray The result of the comparison. """ current = args[0] for arg in args[1:]: current = np.logical_or(arg, current) return current def xidz(numerator, denominator, value_if_denom_is_zero): """ Implements Vensim's XIDZ function. https://www.vensim.com/documentation/fn_xidz.htm This function executes a division, robust to denominator being zero. In the case of zero denominator, the final argument is returned. Parameters ---------- numerator: float or xarray.DataArray denominator: float or xarray.DataArray Components of the division operation value_if_denom_is_zero: float or xarray.DataArray The value to return if the denominator is zero Returns ------- numerator / denominator if denominator > 1e-6 otherwise, returns value_if_denom_is_zero """ if isinstance(denominator, xr.DataArray): return xr.where(np.abs(denominator) < small_vensim, value_if_denom_is_zero, numerator * 1.0 / denominator) if abs(denominator) < small_vensim: return value_if_denom_is_zero else: return numerator * 1.0 / denominator def zidz(numerator, denominator): """ This function bypasses divide-by-zero errors, implementing Vensim's ZIDZ function https://www.vensim.com/documentation/fn_zidz.htm Parameters ---------- numerator: float or xarray.DataArray value to be divided denominator: float or xarray.DataArray value to devide by Returns ------- result of division numerator/denominator if denominator is not zero, otherwise zero. """ if isinstance(denominator, xr.DataArray): return xr.where(np.abs(denominator) < small_vensim, 0, numerator * 1.0 / denominator) if abs(denominator) < small_vensim: return 0 else: return numerator * 1.0 / denominator def active_initial(time, expr, init_val): """ Implements vensim's ACTIVE INITIAL function Parameters ---------- time: function The current time function expr init_val Returns ------- """ if time.stage == 'Initialization': return init_val else: return expr() def bounded_normal(minimum, maximum, mean, std, seed): """ Implements vensim's BOUNDED NORMAL function """ # np.random.seed(seed) # we could bring this back later, but for now, ignore return stats.truncnorm.rvs(minimum, maximum, loc=mean, scale=std) def random_0_1(): """ Implements Vensim's RANDOM 0 1 function. Returns ------- A random number from the uniform distribution between 0 and 1. """ return np.random.uniform(0, 1) def random_uniform(m, x, s): """ Implements Vensim's RANDOM UNIFORM function. Parameters ---------- m: int Minimum value that the function will return. x: int Maximun value that the function will return. s: int A stream ID for the distribution to use. In most cases should be 0. Returns ------- A random number from the uniform distribution between m and x (exclusive of the endpoints). """ if s != 0: warnings.warn( "Random uniform with a nonzero seed value, may not give the " "same result as vensim", RuntimeWarning) return np.random.uniform(m, x) def incomplete(*args): warnings.warn( 'Call to undefined function, calling dependencies and returning NaN', RuntimeWarning, stacklevel=2) return np.nan def not_implemented_function(*args): raise NotImplementedError( 'Not implemented function {}'.format(args[0])) def log(x, base): """ Implements Vensim's LOG function with change of base. Parameters ---------- x: input value base: base of the logarithm Returns ------- float the log of 'x' in base 'base' """ return np.log(x) / np.log(base) def sum(x, dim=None): """ Implements Vensim's SUM function. Parameters ---------- x: xarray.DataArray Input value dim: list of strs (optional) Dimensions to apply the function over. If not given the function will be applied over all dimensions Returns ------- xarray.DataArray or float The result of the sum operation in the given dimensions """ # float returned if the function is applied over all the dimensions if dim is None or set(x.dims) == set(dim): return float(x.sum()) return x.sum(dim=dim) def prod(x, dim=None): """ Implements Vensim's PROD function. Parameters ---------- x: xarray.DataArray Input value dim: list of strs (optional) Dimensions to apply the function over. If not given the function will be applied over all dimensions Returns ------- xarray.DataArray or float The result of the product operation in the given dimensions """ # float returned if the function is applied over all the dimensions if dim is None or set(x.dims) == set(dim): return float(x.prod()) return x.prod(dim=dim) def vmin(x, dim=None): """ Implements Vensim's Vmin function. Parameters ---------- x: xarray.DataArray Input value dim: list of strs (optional) Dimensions to apply the function over. If not given the function will be applied over all dimensions Returns ------- xarray.DataArray or float The result of the minimum value over the given dimensions """ # float returned if the function is applied over all the dimensions if dim is None or set(x.dims) == set(dim): return float(x.min()) return x.min(dim=dim) def vmax(x, dim=None): """ Implements Vensim's VMAX function. Parameters ---------- x: xarray.DataArray Input value dim: list of strs (optional) Dimensions to apply the function over. If not given the function will be applied over all dimensions Returns ------- xarray.DataArray or float The result of the maximum value over the dimensions """ # float returned if the function is applied over all the dimensions if dim is None or set(x.dims) == set(dim): return float(x.max()) return x.max(dim=dim) def invert_matrix(mat): """ Implements Vensim's INVERT MATRIX function. Invert the matrix defined by the last two dimensions of xarray.DataArray. Paramteters ----------- mat: xarray.DataArray The matrix to invert. Returns ------- mat1: xarray.DataArray Inverted matrix. """ return xr.DataArray(np.linalg.inv(mat.values), mat.coords, mat.dims)
{"hexsha": "1c3db062c4dbf77b79b85474ac206aec6b78f65b", "size": 72095, "ext": "py", "lang": "Python", "max_stars_repo_path": "pysd/py_backend/functions.py", "max_stars_repo_name": "JamesPHoughton/pysd", "max_stars_repo_head_hexsha": "5885d622144dd81af96e3c875bac74c51ddba62f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 240, "max_stars_repo_stars_event_min_datetime": "2015-01-10T21:32:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T07:55:55.000Z", "max_issues_repo_path": "pysd/py_backend/functions.py", "max_issues_repo_name": "JamesPHoughton/pysd", "max_issues_repo_head_hexsha": "5885d622144dd81af96e3c875bac74c51ddba62f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 304, "max_issues_repo_issues_event_min_datetime": "2015-01-20T18:51:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T10:54:45.000Z", "max_forks_repo_path": "pysd/py_backend/functions.py", "max_forks_repo_name": "JamesPHoughton/pysd", "max_forks_repo_head_hexsha": "5885d622144dd81af96e3c875bac74c51ddba62f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 72, "max_forks_repo_forks_event_min_datetime": "2015-05-14T21:15:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-04T16:33:31.000Z", "avg_line_length": 32.8152025489, "max_line_length": 97, "alphanum_fraction": 0.5754490603, "include": true, "reason": "import numpy,import scipy", "num_tokens": 15024}
[STATEMENT] lemma Vars_indeps_foldr: assumes "set xs \<subseteq> set Vars" shows "foldr (\<squnion>\<^sub>S) xs \<bottom>\<^sub>S \<bowtie>\<^sub>S foldr (\<squnion>\<^sub>S) (filter (\<lambda>x. x \<notin> set xs) Vars) \<bottom>\<^sub>S" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<Squnion>\<^sub>S xs \<bowtie>\<^sub>S \<Squnion>\<^sub>S (filter (\<lambda>x. x \<notin> set xs) Vars) [PROOF STEP] apply (rule foldr_scene_indep) [PROOF STATE] proof (prove) goal (2 subgoals): 1. pairwise (##\<^sub>S) (set (filter (\<lambda>x. x \<notin> set xs) Vars)) 2. \<forall>b\<in>set (filter (\<lambda>x. x \<notin> set xs) Vars). \<Squnion>\<^sub>S xs \<bowtie>\<^sub>S b [PROOF STEP] apply (meson filter_is_subset pairwise_subset scene_space_compats) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>b\<in>set (filter (\<lambda>x. x \<notin> set xs) Vars). \<Squnion>\<^sub>S xs \<bowtie>\<^sub>S b [PROOF STEP] apply (simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>x. x \<in> set Vars \<and> x \<notin> set xs \<longrightarrow> \<Squnion>\<^sub>S xs \<bowtie>\<^sub>S x [PROOF STEP] apply auto [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>x. \<lbrakk>x \<in> set Vars; x \<notin> set xs\<rbrakk> \<Longrightarrow> \<Squnion>\<^sub>S xs \<bowtie>\<^sub>S x [PROOF STEP] apply (rule scene_indep_sym) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>x. \<lbrakk>x \<in> set Vars; x \<notin> set xs\<rbrakk> \<Longrightarrow> x \<bowtie>\<^sub>S \<Squnion>\<^sub>S xs [PROOF STEP] apply (metis (no_types, lifting) assms foldr_scene_indep local.indep_Vars pairwiseD pairwise_mono scene_indeps_def scene_space_compats subset_iff) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{"llama_tokens": 732, "file": "Optics_Scene_Spaces", "length": 7}
""" vtki plotting module """ import collections import ctypes import logging import os import time from threading import Thread from subprocess import PIPE, Popen import imageio import numpy as np import vtk from vtk.util import numpy_support as VN import vtki from vtki.export import export_plotter_vtkjs from vtki.utilities import get_scalar, is_vtki_obj, numpy_to_texture, wrap _ALL_PLOTTERS = {} def close_all(): """Close all open/active plotters""" for key, p in _ALL_PLOTTERS.items(): p.close() _ALL_PLOTTERS.clear() return True MAX_N_COLOR_BARS = 10 PV_BACKGROUND = [82/255., 87/255., 110/255.] FONT_KEYS = {'arial': vtk.VTK_ARIAL, 'courier': vtk.VTK_COURIER, 'times': vtk.VTK_TIMES} log = logging.getLogger(__name__) log.setLevel('CRITICAL') rcParams = { 'background' : [0.3, 0.3, 0.3], 'camera' : { 'position' : [1, 1, 1], 'viewup' : [0, 0, 1], }, 'window_size' : [1024, 768], 'font' : { 'family' : 'courier', 'size' : 12, 'title_size': None, 'label_size' : None, 'color' : [1, 1, 1], 'fmt' : None, }, 'cmap' : 'jet', 'color' : 'white', 'nan_color' : 'darkgray', 'outline_color' : 'white', 'colorbar_orientation' : 'horizontal', 'colorbar_horizontal' : { 'width' : 0.60, 'height' : 0.08, 'position_x' : 0.35, 'position_y' : 0.02, }, 'colorbar_vertical' : { 'width' : 0.1, 'height' : 0.8, 'position_x' : 0.85, 'position_y' : 0.1, }, 'show_scalar_bar' : True, 'show_edges' : False, 'lighting' : True, 'interactive' : False, 'render_points_as_spheres' : False } DEFAULT_THEME = dict(rcParams) def set_plot_theme(theme): """Set the plotting parameters to a predefined theme""" if theme.lower() in ['paraview', 'pv']: rcParams['background'] = PV_BACKGROUND rcParams['cmap'] = 'coolwarm' rcParams['font']['family'] = 'arial' rcParams['font']['label_size'] = 16 rcParams['show_edges'] = False elif theme.lower() in ['document', 'doc', 'paper', 'report']: rcParams['background'] = 'white' rcParams['cmap'] = 'viridis' rcParams['font']['color'] = 'black' rcParams['show_edges'] = False rcParams['color'] = 'orange' rcParams['outline_color'] = 'black' elif theme.lower() in ['night', 'dark']: rcParams['background'] = 'black' rcParams['cmap'] = 'viridis' rcParams['font']['color'] = 'white' rcParams['show_edges'] = False rcParams['color'] = 'orange' rcParams['outline_color'] = 'white' elif theme.lower() in ['default']: for k,v in DEFAULT_THEME.items(): rcParams[k] = v def run_from_ipython(): """ returns True when run from IPython """ try: py = __IPYTHON__ return True except NameError: return False def _raise_not_matching(scalars, mesh): raise Exception('Number of scalars (%d) ' % scalars.size + 'must match either the number of points ' + '(%d) ' % mesh.GetNumberOfPoints() + 'or the number of cells ' + '(%d) ' % mesh.GetNumberOfCells()) def opacity_transfer_function(key, n_colors): """Get the opacity transfer function results: range from 0 to 255 """ transfer_func = { 'linear': np.linspace(0, 255, n_colors, dtype=np.uint8), 'linear_r': np.linspace(0, 255, n_colors, dtype=np.uint8)[::-1], 'geom': np.geomspace(1e-6, 255, n_colors, dtype=np.uint8), 'geom_r': np.geomspace(255, 1e-6, n_colors, dtype=np.uint8), } try: return transfer_func[key] except KeyError: raise KeyError('opactiy transfer function ({}) unknown.'.format(key)) def plot(var_item, off_screen=False, full_screen=False, screenshot=None, interactive=True, cpos=None, window_size=None, show_bounds=False, show_axes=True, notebook=None, background=None, text='', return_img=False, eye_dome_lighting=False, **kwargs): """ Convenience plotting function for a vtk or numpy object. Parameters ---------- item : vtk or numpy object VTK object or numpy array to be plotted. off_screen : bool Plots off screen when True. Helpful for saving screenshots without a window popping up. full_screen : bool, optional Opens window in full screen. When enabled, ignores window_size. Default False. screenshot : str or bool, optional Saves screenshot to file when enabled. See: help(vtkinterface.Plotter.screenshot). Default disabled. When True, takes screenshot and returns numpy array of image. window_size : list, optional Window size in pixels. Defaults to [1024, 768] show_bounds : bool, optional Shows mesh bounds when True. Default False. Alias ``show_grid`` also accepted. notebook : bool, optional When True, the resulting plot is placed inline a jupyter notebook. Assumes a jupyter console is active. show_axes : bool, optional Shows a vtk axes widget. Enabled by default. text : str, optional Adds text at the bottom of the plot. **kwargs : optional keyword arguments See help(Plotter.add_mesh) for additional options. Returns ------- cpos : list List of camera position, focal point, and view up. img : numpy.ndarray Array containing pixel RGB and alpha. Sized: [Window height x Window width x 3] for transparent_background=False [Window height x Window width x 4] for transparent_background=True Returned only when screenshot enabled """ if notebook is None: if run_from_ipython(): try: notebook = type(get_ipython()).__module__.startswith('ipykernel.') except NameError: pass if notebook: off_screen = notebook plotter = Plotter(off_screen=off_screen, notebook=notebook) if show_axes: plotter.add_axes() plotter.set_background(background) if isinstance(var_item, list): if len(var_item) == 2: # might be arrows isarr_0 = isinstance(var_item[0], np.ndarray) isarr_1 = isinstance(var_item[1], np.ndarray) if isarr_0 and isarr_1: plotter.add_arrows(var_item[0], var_item[1]) else: for item in var_item: plotter.add_mesh(item, **kwargs) else: for item in var_item: plotter.add_mesh(item, **kwargs) else: plotter.add_mesh(var_item, **kwargs) if text: plotter.add_text(text) if show_bounds or kwargs.get('show_grid', False): if kwargs.get('show_grid', False): plotter.show_grid() else: plotter.show_bounds() if cpos is None: cpos = plotter.get_default_cam_pos() plotter.camera_position = cpos plotter.camera_set = False else: plotter.camera_position = cpos if eye_dome_lighting: plotter.enable_eye_dome_lighting() result = plotter.show(window_size=window_size, auto_close=False, interactive=interactive, full_screen=full_screen, screenshot=screenshot, return_img=return_img) # close and return camera position and maybe image plotter.close() # Result will be handled by plotter.show(): cpos or [cpos, img] return result def plot_arrows(cent, direction, **kwargs): """ Plots arrows as vectors Parameters ---------- cent : np.ndarray Accepts a single 3d point or array of 3d points. directions : np.ndarray Accepts a single 3d point or array of 3d vectors. Must contain the same number of items as cent. **kwargs : additional arguments, optional See help(vtki.Plot) Returns ------- Same as Plot. See help(vtki.Plot) """ return plot([cent, direction], **kwargs) def running_xserver(): """ Check if x server is running Returns ------- running_xserver : bool True when on Linux and running an xserver. Returns None when on a non-linux platform. """ try: p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE) p.communicate() return p.returncode == 0 except: return False class BasePlotter(object): """ To be used by the Plotter and QtInteractor classes. Parameters ---------- shape : list or tuple, optional Number of sub-render windows inside of the main window. Specify two across with ``shape=(2, 1)`` and a two by two grid with ``shape=(2, 2)``. By default there is only one renderer. border : bool, optional Draw a border around each render window. Default False. border_color : string or 3 item list, optional, defaults to white Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' border_width : float, optional Width of the border in pixels when enabled. """ def __new__(cls, *args, **kwargs): if cls is BasePlotter: raise TypeError("vtki.BasePlotter is an abstract class and may not be instantiated.") return object.__new__(cls) def __init__(self, shape=(1, 1), border=None, border_color='k', border_width=1.0): """ Initialize base plotter """ self.image_transparent_background = False # by default add border for multiple plots if border is None: if shape != (1, 1): border = True else: border = False # add render windows self.renderers = [] self._active_renderer_index = 0 assert_str = '"shape" should be a list or tuple' assert isinstance(shape, collections.Iterable), assert_str assert shape[0] > 0, '"shape" must be positive' assert shape[1] > 0, '"shape" must be positive' self.shape = shape for i in reversed(range(shape[0])): for j in range(shape[1]): renderer = vtki.Renderer(self, border, border_color, border_width) x0 = i/shape[0] y0 = j/shape[1] x1 = (i+1)/shape[0] y1 = (j+1)/shape[1] renderer.SetViewport(y0, x0, y1, x1) self.renderers.append(renderer) # This is a private variable to keep track of how many colorbars exist # This allows us to keep adding colorbars without overlapping self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS)) self._scalar_bar_slot_lookup = {} # This keeps track of scalar names already plotted and their ranges self._scalar_bar_ranges = {} self._scalar_bar_mappers = {} self._scalar_bar_actors = {} self._scalar_bar_widgets = {} self._actors = {} # track if the camera has been setup # self.camera_set = False self.first_time = True # Keep track of the scale self._labels = [] # Add self to open plotters _ALL_PLOTTERS[str(hex(id(self)))] = self # lighting style self.lighting = vtk.vtkLightKit() # self.lighting.SetHeadLightWarmth(1.0) # self.lighting.SetHeadLightWarmth(1.0) for renderer in self.renderers: self.lighting.AddLightsToRenderer(renderer) renderer.LightFollowCameraOn() def update_style(self): if not hasattr(self, '_style'): self._style = vtk.vtkInteractorStyleTrackballCamera() if hasattr(self, 'iren'): return self.iren.SetInteractorStyle(self._style) def enable_trackball_style(self): """ sets the interactive style to trackball - the default syle """ self._style = vtk.vtkInteractorStyleTrackballCamera() return self.update_style() def enable_image_style(self): """ sets the interactive style to image Controls: - Left Mouse button triggers window level events - CTRL Left Mouse spins the camera around its view plane normal - SHIFT Left Mouse pans the camera - CTRL SHIFT Left Mouse dollys (a positional zoom) the camera - Middle mouse button pans the camera - Right mouse button dollys the camera. - SHIFT Right Mouse triggers pick events """ self._style = vtk.vtkInteractorStyleImage() return self.update_style() def enable_joystick_style(self): """ sets the interactive style to joystick allows the user to move (rotate, pan, etc.) the camera, the point of view for the scene. The position of the mouse relative to the center of the scene determines the speed at which the camera moves, and the speed of the mouse movement determines the acceleration of the camera, so the camera continues to move even if the mouse if not moving. For a 3-button mouse, the left button is for rotation, the right button for zooming, the middle button for panning, and ctrl + left button for spinning. (With fewer mouse buttons, ctrl + shift + left button is for zooming, and shift + left button is for panning.) """ self._style = vtk.vtkInteractorStyleJoystickCamera() return self.update_style() def enable_zoom_style(self): """ sets the interactive style to rubber band zoom This interactor style allows the user to draw a rectangle in the render window using the left mouse button. When the mouse button is released, the current camera zooms by an amount determined from the shorter side of the drawn rectangle. """ self._style = vtk.vtkInteractorStyleRubberBandZoom() return self.update_style() def enable_terrain_style(self): """ sets the interactive style to terrain Used to manipulate a camera which is viewing a scene with a natural view up, e.g., terrain. The camera in such a scene is manipulated by specifying azimuth (angle around the view up vector) and elevation (the angle from the horizon). """ self._style = vtk.vtkInteractorStyleTerrain() return self.update_style() def enable_rubber_band_style(self): """ sets the interactive style to rubber band picking This interactor style allows the user to draw a rectangle in the render window by hitting 'r' and then using the left mouse button. When the mouse button is released, the attached picker operates on the pixel in the center of the selection rectangle. If the picker happens to be a vtkAreaPicker it will operate on the entire selection rectangle. When the 'p' key is hit the above pick operation occurs on a 1x1 rectangle. In other respects it behaves the same as its parent class. """ self._style = vtk.vtkInteractorStyleRubberBandPick() return self.update_style() def set_focus(self, point): """ sets focus to a point """ if isinstance(point, np.ndarray): if point.ndim != 1: point = point.ravel() self.camera.SetFocalPoint(point) self._render() def set_position(self, point): """ sets camera position to a point """ if isinstance(point, np.ndarray): if point.ndim != 1: point = point.ravel() self.camera.SetPosition(point) self._render() def set_viewup(self, vector): """ sets camera viewup vector """ if isinstance(vector, np.ndarray): if vector.ndim != 1: vector = vector.ravel() self.camera.SetViewUp(vector) self._render() def _render(self): """ redraws render window if the render window exists """ if hasattr(self, 'ren_win'): if hasattr(self, 'render_trigger'): self.render_trigger.emit() elif not self.first_time: self.render() def add_axes(self, interactive=None, color=None): """ Add an interactive axes widget """ if interactive is None: interactive = rcParams['interactive'] if hasattr(self, 'axes_widget'): self.axes_widget.SetInteractive(interactive) self._update_axes_color(color) return self.axes_actor = vtk.vtkAxesActor() self.axes_widget = vtk.vtkOrientationMarkerWidget() self.axes_widget.SetOrientationMarker(self.axes_actor) if hasattr(self, 'iren'): self.axes_widget.SetInteractor(self.iren) self.axes_widget.SetEnabled(1) self.axes_widget.SetInteractive(interactive) # Set the color self._update_axes_color(color) def hide_axes(self): """Hide the axes orientation widget""" if hasattr(self, 'axes_widget'): self.axes_widget.EnabledOff() def show_axes(self): """Show the axes orientation widget""" if hasattr(self, 'axes_widget'): self.axes_widget.EnabledOn() else: self.add_axes() def key_press_event(self, obj, event): """ Listens for key press event """ key = self.iren.GetKeySym() log.debug('Key %s pressed' % key) if key == 'q': self.q_pressed = True elif key == 'b': self.observer = self.iren.AddObserver('LeftButtonPressEvent', self.left_button_down) elif key == 'v': self.isometric_view_interactive() def left_button_down(self, obj, event_type): """Register the event for a left button down click""" # Get 2D click location on window click_pos = self.iren.GetEventPosition() # Get corresponding click location in the 3D plot picker = vtk.vtkWorldPointPicker() picker.Pick(click_pos[0], click_pos[1], 0, self.renderer) self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3)) if np.any(np.isnan(self.pickpoint)): self.pickpoint[:] = 0 def isometric_view_interactive(self): """ sets the current interactive render window to isometric view """ interactor = self.iren.GetInteractorStyle() renderer = interactor.GetCurrentRenderer() renderer.view_isometric() def update(self, stime=1, force_redraw=True): """ Update window, redraw, process messages query Parameters ---------- stime : int, optional Duration of timer that interrupt vtkRenderWindowInteractor in milliseconds. force_redraw : bool, optional Call vtkRenderWindowInteractor.Render() immediately. """ if stime <= 0: stime = 1 curr_time = time.time() if Plotter.last_update_time > curr_time: Plotter.last_update_time = curr_time if not hasattr(self, 'iren'): return update_rate = self.iren.GetDesiredUpdateRate() if (curr_time - Plotter.last_update_time) > (1.0/update_rate): self.right_timer_id = self.iren.CreateRepeatingTimer(stime) self.iren.Start() self.iren.DestroyTimer(self.right_timer_id) self._render() Plotter.last_update_time = curr_time else: if force_redraw: self.iren.Render() def add_mesh(self, mesh, color=None, style=None, scalars=None, rng=None, stitle=None, show_edges=None, point_size=5.0, opacity=1.0, line_width=None, flip_scalars=False, lighting=None, n_colors=256, interpolate_before_map=False, cmap=None, label=None, reset_camera=None, scalar_bar_args=None, multi_colors=False, name=None, texture=None, render_points_as_spheres=None, render_lines_as_tubes=False, edge_color='black', ambient=0.0, show_scalar_bar=None, nan_color=None, nan_opacity=1.0, loc=None, backface_culling=False, rgb=False, **kwargs): """ Adds a unstructured, structured, or surface mesh to the plotting object. Also accepts a 3D numpy.ndarray Parameters ---------- mesh : vtk unstructured, structured, polymesh, or 3D numpy.ndarray A vtk unstructured, structured, or polymesh to plot. color : string or 3 item list, optional, defaults to white Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' Color will be overridden when scalars are input. style : string, optional Visualization style of the vtk mesh. One for the following: style='surface' style='wireframe' style='points' Defaults to 'surface' scalars : numpy array, optional Scalars used to "color" the mesh. Accepts an array equal to the number of cells or the number of points in the mesh. Array should be sized as a single vector. If both color and scalars are None, then the active scalars are used rng : 2 item list, optional Range of mapper for scalars. Defaults to minimum and maximum of scalars array. Example: ``[-1, 2]``. ``clim`` is also an accepted alias for this. stitle : string, optional Scalar title. By default there is no scalar legend bar. Setting this creates the legend bar and adds a title to it. To create a bar with no title, use an empty string (i.e. ''). show_edges : bool, optional Shows the edges of a mesh. Does not apply to a wireframe representation. point_size : float, optional Point size. Applicable when style='points'. Default 5.0 opacity : float, optional Opacity of mesh. Should be between 0 and 1. Default 1.0. A string option can also be specified to map the scalar range to the opacity. Options are: linear, linear_r, geom, geom_r line_width : float, optional Thickness of lines. Only valid for wireframe and surface representations. Default None. flip_scalars : bool, optional Flip direction of cmap. lighting : bool, optional Enable or disable view direction lighting. Default False. n_colors : int, optional Number of colors to use when displaying scalars. Default 256. interpolate_before_map : bool, optional Enabling makes for a smoother scalar display. Default False cmap : str, optional cmap string. See available matplotlib cmaps. Only applicable for when displaying scalars. Defaults None (rainbow). Requires matplotlib. multi_colors : bool, optional If a ``MultiBlock`` dataset is given this will color each block by a solid color using matplotlib's color cycler. name : str, optional The name for the added mesh/actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. texture : vtk.vtkTexture or np.ndarray or boolean, optional A texture to apply if the input mesh has texture coordinates. This will not work with MultiBlock datasets. If set to ``True``, the first avaialble texture on the object will be used. If a string name is given, it will pull a texture with that name associated to the input mesh. ambient : float, optional When lighting is enabled, this is the amount of light from 0 to 1 that reaches the actor when not directed at the light source emitted from the viewer. Default 0.2. nan_color : string or 3 item list, optional, defaults to gray The color to use for all NaN values in the plotted scalar array. nan_opacity : float, optional Opacity of NaN values. Should be between 0 and 1. Default 1.0 backface_culling : bool optional Does not render faces that should not be visible to the plotter. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Default False. rgb : bool, optional If an 2 dimensional array is passed as the scalars, plot those values as RGB+A colors! ``rgba`` is also accepted alias for this. Returns ------- actor: vtk.vtkActor VTK actor of the mesh. """ # fixes lighting issue when using precalculated normals if isinstance(mesh, vtk.vtkPolyData): if mesh.GetPointData().HasArray('Normals'): mesh.point_arrays['Normals'] = mesh.point_arrays.pop('Normals') if scalar_bar_args is None: scalar_bar_args = {} if isinstance(mesh, np.ndarray): mesh = vtki.PolyData(mesh) style = 'points' # Convert the VTK data object to a vtki wrapped object if neccessary if not is_vtki_obj(mesh): mesh = wrap(mesh) if show_edges is None: show_edges = rcParams['show_edges'] if show_scalar_bar is None: show_scalar_bar = rcParams['show_scalar_bar'] if lighting is None: lighting = rcParams['lighting'] if rng is None: rng = kwargs.get('clim', None) if render_points_as_spheres is None: render_points_as_spheres = rcParams['render_points_as_spheres'] if name is None: name = '{}({})'.format(type(mesh).__name__, str(hex(id(mesh)))) if isinstance(mesh, vtki.MultiBlock): self.remove_actor(name, reset_camera=reset_camera) # frist check the scalars if rng is None and scalars is not None: # Get the data range across the array for all blocks # if scalar specified if isinstance(scalars, str): rng = mesh.get_data_range(scalars) else: # TODO: an array was given... how do we deal with # that? Possibly a 2D arrays or list of # arrays where first index corresponds to # the block? This could get complicated real # quick. raise RuntimeError('Scalar array must be given as a string name for multiblock datasets.') if multi_colors: # Compute unique colors for each index of the block import matplotlib as mpl from itertools import cycle cycler = mpl.rcParams['axes.prop_cycle'] colors = cycle(cycler) # Now iteratively plot each element of the multiblock dataset actors = [] for idx in range(mesh.GetNumberOfBlocks()): if mesh[idx] is None: continue # Get a good name to use next_name = '{}-{}'.format(name, idx) # Get the data object if not is_vtki_obj(mesh[idx]): data = wrap(mesh.GetBlock(idx)) if not is_vtki_obj(mesh[idx]): continue # move on if we can't plot it else: data = mesh.GetBlock(idx) if data is None: # Note that a block can exist but be None type continue # Now check that scalars is available for this dataset if isinstance(data, vtk.vtkMultiBlockDataSet) or get_scalar(data, scalars) is None: ts = None else: ts = scalars if multi_colors: color = next(colors)['color'] a = self.add_mesh(data, color=color, style=style, scalars=ts, rng=rng, stitle=stitle, show_edges=show_edges, point_size=point_size, opacity=opacity, line_width=line_width, flip_scalars=flip_scalars, lighting=lighting, n_colors=n_colors, interpolate_before_map=interpolate_before_map, cmap=cmap, label=label, scalar_bar_args=scalar_bar_args, reset_camera=reset_camera, name=next_name, texture=None, render_points_as_spheres=render_points_as_spheres, render_lines_as_tubes=render_lines_as_tubes, edge_color=edge_color, show_scalar_bar=show_scalar_bar, nan_color=nan_color, nan_opacity=nan_opacity, loc=loc, rgb=rgb, **kwargs) actors.append(a) if (reset_camera is None and not self.camera_set) or reset_camera: cpos = self.get_default_cam_pos() self.camera_position = cpos self.camera_set = False self.reset_camera() return actors if nan_color is None: nan_color = rcParams['nan_color'] nanr, nanb, nang = parse_color(nan_color) nan_color = nanr, nanb, nang, nan_opacity if mesh.n_points < 1: raise RuntimeError('Empty meshes cannot be plotted. Input mesh has zero points.') # set main values self.mesh = mesh self.mapper = vtk.vtkDataSetMapper() self.mapper.SetInputData(self.mesh) if isinstance(scalars, str): self.mapper.SetArrayName(scalars) actor, prop = self.add_actor(self.mapper, reset_camera=reset_camera, name=name, loc=loc, culling=backface_culling) # Try to plot something if no preference given if scalars is None and color is None and texture is None: # Prefer texture first if len(list(mesh.textures.keys())) > 0: texture = True # If no texture, plot any active scalar else: # Make sure scalar components are not vectors/tuples scalars = mesh.active_scalar if scalars is None:# or scalars.ndim != 1: scalars = None else: if stitle is None: stitle = mesh.active_scalar_info[1] if texture == True or isinstance(texture, (str, int)): texture = mesh._activate_texture(texture) if texture: if isinstance(texture, np.ndarray): texture = numpy_to_texture(texture) if not isinstance(texture, (vtk.vtkTexture, vtk.vtkOpenGLTexture)): raise TypeError('Invalid texture type ({})'.format(type(texture))) if mesh.GetPointData().GetTCoords() is None: raise AssertionError('Input mesh does not have texture coordinates to support the texture.') actor.SetTexture(texture) # Set color to white by default when using a texture if color is None: color = 'white' if scalars is None: show_scalar_bar = False self.mapper.SetScalarModeToUsePointFieldData() # Scalar formatting =================================================== if cmap is None: cmap = kwargs.get('colormap', None) if cmap is None: cmap = rcParams['cmap'] title = 'Data' if stitle is None else stitle if scalars is not None: # if scalars is a string, then get the first array found with that name append_scalars = True if isinstance(scalars, str): title = scalars scalars = get_scalar(mesh, scalars, preference=kwargs.get('preference', 'cell'), err=True) if stitle is None: stitle = title #append_scalars = False if not isinstance(scalars, np.ndarray): scalars = np.asarray(scalars) if rgb is False or rgb is None: rgb = kwargs.get('rgba', False) if rgb: if scalars.ndim != 2 or scalars.shape[1] < 3 or scalars.shape[1] > 4: raise ValueError('RGB array must be n_points/n_cells by 3/4 in shape.') if scalars.ndim != 1: if rgb: pass elif scalars.ndim == 2 and (scalars.shape[0] == mesh.n_points or scalars.shape[0] == mesh.n_cells): scalars = np.linalg.norm(scalars.copy(), axis=1) title = '{}-normed'.format(title) else: scalars = scalars.ravel() if scalars.dtype == np.bool: scalars = scalars.astype(np.float) # Scalar interpolation approach if scalars.shape[0] == mesh.n_points: self.mesh._add_point_scalar(scalars, title, append_scalars) self.mapper.SetScalarModeToUsePointData() self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors) if interpolate_before_map: self.mapper.InterpolateScalarsBeforeMappingOn() elif scalars.shape[0] == mesh.n_cells: self.mesh._add_cell_scalar(scalars, title, append_scalars) self.mapper.SetScalarModeToUseCellData() self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors) if interpolate_before_map: self.mapper.InterpolateScalarsBeforeMappingOn() else: _raise_not_matching(scalars, mesh) # Set scalar range if rng is None: rng = [np.nanmin(scalars), np.nanmax(scalars)] elif isinstance(rng, float) or isinstance(rng, int): rng = [-rng, rng] if np.any(rng) and not rgb: self.mapper.SetScalarRange(rng[0], rng[1]) # Flip if requested table = self.mapper.GetLookupTable() table.SetNanColor(nan_color) if cmap is not None: try: from matplotlib.cm import get_cmap except ImportError: raise Exception('cmap requires matplotlib') if isinstance(cmap, str): cmap = get_cmap(cmap) # ELSE: assume cmap is callable ctable = cmap(np.linspace(0, 1, n_colors))*255 ctable = ctable.astype(np.uint8) # Set opactities if isinstance(opacity, str): ctable[:,-1] = opacity_transfer_function(opacity, n_colors) if flip_scalars: ctable = np.ascontiguousarray(ctable[::-1]) table.SetTable(VN.numpy_to_vtk(ctable)) else: # no cmap specified if flip_scalars: table.SetHueRange(0.0, 0.66667) else: table.SetHueRange(0.66667, 0.0) else: self.mapper.SetScalarModeToUseFieldData() # select view style if not style: style = 'surface' style = style.lower() if style == 'wireframe': prop.SetRepresentationToWireframe() if color is None: color = rcParams['outline_color'] elif style == 'points': prop.SetRepresentationToPoints() elif style == 'surface': prop.SetRepresentationToSurface() else: raise Exception('Invalid style. Must be one of the following:\n' + '\t"surface"\n' + '\t"wireframe"\n' + '\t"points"\n') prop.SetPointSize(point_size) prop.SetAmbient(ambient) # edge display style if show_edges: prop.EdgeVisibilityOn() rgb_color = parse_color(color) prop.SetColor(rgb_color) if isinstance(opacity, (float, int)): prop.SetOpacity(opacity) prop.SetEdgeColor(parse_color(edge_color)) if render_points_as_spheres: prop.SetRenderPointsAsSpheres(render_points_as_spheres) if render_lines_as_tubes: prop.SetRenderLinesAsTubes(render_lines_as_tubes) # legend label if label: if not isinstance(label, str): raise AssertionError('Label must be a string') self._labels.append([single_triangle(), label, rgb_color]) # lighting display style if not lighting: prop.LightingOff() # set line thickness if line_width: prop.SetLineWidth(line_width) # Add scalar bar if available if stitle is not None and show_scalar_bar and not rgb: self.add_scalar_bar(stitle, **scalar_bar_args) return actor def update_scalar_bar_range(self, clim, name=None): """Update the value range of the active or named scalar bar. Parameters ---------- 2 item list The new range of scalar bar. Example: ``[-1, 2]``. name : str, optional The title of the scalar bar to update """ if isinstance(clim, float) or isinstance(clim, int): clim = [-clim, clim] if len(clim) != 2: raise TypeError('clim argument must be a length 2 iterable of values: (min, max).') if name is None: if not hasattr(self, 'mapper'): raise RuntimeError('This plotter does not have an active mapper.') return self.mapper.SetScalarRange(*clim) # Use the name to find the desired actor def update_mapper(mapper): return mapper.SetScalarRange(*clim) try: for m in self._scalar_bar_mappers[name]: update_mapper(m) except KeyError: raise KeyError('Name ({}) not valid/not found in this plotter.') return @property def camera_set(self): """ Returns if the camera of the active renderer has been set """ return self.renderer.camera_set def get_default_cam_pos(self): """ Return the default camera position of the active renderer """ return self.renderer.get_default_cam_pos() @camera_set.setter def camera_set(self, is_set): """ Sets if the camera has been set on the active renderer""" self.renderer.camera_set = is_set @property def renderer(self): """ simply returns the active renderer """ return self.renderers[self._active_renderer_index] @property def bounds(self): """ Returns the bounds of the active renderer """ return self.renderer.bounds @property def center(self): """ Returns the center of the active renderer """ return self.renderer.center def update_bounds_axes(self): """ Update the bounds of the active renderer """ return self.renderer.update_bounds_axes() def clear(self): """ Clears plot by removing all actors and properties """ for renderer in self.renderers: renderer.RemoveAllViewProps() self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS)) self._scalar_bar_slot_lookup = {} self._scalar_bar_ranges = {} self._scalar_bar_mappers = {} self._scalar_bar_actors = {} self._scalar_bar_widgets = {} def remove_actor(self, actor, reset_camera=False): """ Removes an actor from the Plotter. Parameters ---------- actor : vtk.vtkActor Actor that has previously added to the Renderer. reset_camera : bool, optional Resets camera so all actors can be seen. Returns ------- success : bool True when actor removed. False when actor has not been removed. """ for renderer in self.renderers: renderer.remove_actor(actor, reset_camera) return True def add_actor(self, uinput, reset_camera=False, name=None, loc=None, culling=False): """ Adds an actor to render window. Creates an actor if input is a mapper. Parameters ---------- uinput : vtk.vtkMapper or vtk.vtkActor vtk mapper or vtk actor to be added. reset_camera : bool, optional Resets the camera when true. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If None, selects the last active Renderer. culling : bool optional Does not render faces that should not be visible to the plotter. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Default False. Returns ------- actor : vtk.vtkActor The actor. actor_properties : vtk.Properties Actor properties. """ # add actor to the correct render window self._active_renderer_index = self.loc_to_index(loc) renderer = self.renderers[self._active_renderer_index] return renderer.add_actor(uinput, reset_camera, name, culling) def loc_to_index(self, loc): """ Return index of the render window given a location index. Parameters ---------- loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. Returns ------- idx : int Index of the render window. """ if loc is None: return self._active_renderer_index elif isinstance(loc, int): return loc elif isinstance(loc, collections.Iterable): assert len(loc) == 2, '"loc" must contain two items' return loc[0]*self.shape[0] + loc[1] def index_to_loc(self, index): """Convert a 1D index location to the 2D location on the plotting grid """ sz = int(self.shape[0] * self.shape[1]) idxs = np.array([i for i in range(sz)], dtype=int).reshape(self.shape) args = np.argwhere(idxs == index) if len(args) < 1: raise RuntimeError('Index ({}) is out of range.') return args[0] @property def camera(self): """ The active camera of the active renderer """ return self.renderer.camera def add_axes_at_origin(self, loc=None): """ Add axes actor at the origin of a render window. Parameters ---------- loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. When None, defaults to the active render window. Returns -------- marker_actor : vtk.vtkAxesActor vtkAxesActor actor """ self._active_renderer_index = self.loc_to_index(loc) return self.renderers[self._active_renderer_index].add_axes_at_origin() def show_bounds(self, mesh=None, bounds=None, show_xaxis=True, show_yaxis=True, show_zaxis=True, show_xlabels=True, show_ylabels=True, show_zlabels=True, italic=False, bold=True, shadow=False, font_size=None, font_family=None, color=None, xlabel='X Axis', ylabel='Y Axis', zlabel='Z Axis', use_2d=False, grid=None, location='closest', ticks=None, all_edges=False, corner_factor=0.5, fmt=None, minor_ticks=False, loc=None): """ Adds bounds axes. Shows the bounds of the most recent input mesh unless mesh is specified. Parameters ---------- mesh : vtkPolydata or unstructured grid, optional Input mesh to draw bounds axes around bounds : list or tuple, optional Bounds to override mesh bounds. [xmin, xmax, ymin, ymax, zmin, zmax] show_xaxis : bool, optional Makes x axis visible. Default True. show_yaxis : bool, optional Makes y axis visible. Default True. show_zaxis : bool, optional Makes z axis visible. Default True. show_xlabels : bool, optional Shows x labels. Default True. show_ylabels : bool, optional Shows y labels. Default True. show_zlabels : bool, optional Shows z labels. Default True. italic : bool, optional Italicises axis labels and numbers. Default False. bold : bool, optional Bolds axis labels and numbers. Default True. shadow : bool, optional Adds a black shadow to the text. Default False. font_size : float, optional Sets the size of the label font. Defaults to 16. font_family : string, optional Font family. Must be either courier, times, or arial. color : string or 3 item list, optional Color of all labels and axis titles. Default white. Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' xlabel : string, optional Title of the x axis. Default "X Axis" ylabel : string, optional Title of the y axis. Default "Y Axis" zlabel : string, optional Title of the z axis. Default "Z Axis" use_2d : bool, optional A bug with vtk 6.3 in Windows seems to cause this function to crash this can be enabled for smoother plotting for other enviornments. grid : bool or str, optional Add grid lines to the backface (``True``, ``'back'``, or ``'backface'``) or to the frontface (``'front'``, ``'frontface'``) of the axes actor. location : str, optional Set how the axes are drawn: either static (``'all'``), closest triad (``front``), furthest triad (``'back'``), static closest to the origin (``'origin'``), or outer edges (``'outer'``) in relation to the camera position. Options include: ``'all', 'front', 'back', 'origin', 'outer'`` ticks : str, optional Set how the ticks are drawn on the axes grid. Options include: ``'inside', 'outside', 'both'`` all_edges : bool, optional Adds an unlabeled and unticked box at the boundaries of plot. Useful for when wanting to plot outer grids while still retaining all edges of the boundary. corner_factor : float, optional If ``all_edges````, this is the factor along each axis to draw the default box. Dafuault is 0.5 to show the full box. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If None, selects the last active Renderer. Returns ------- cube_axes_actor : vtk.vtkCubeAxesActor Bounds actor Examples -------- >>> import vtki >>> from vtki import examples >>> mesh = vtki.Sphere() >>> plotter = vtki.Plotter() >>> _ = plotter.add_mesh(mesh) >>> _ = plotter.show_bounds(grid='front', location='outer', all_edges=True) >>> plotter.show() # doctest:+SKIP """ kwargs = locals() _ = kwargs.pop('self') _ = kwargs.pop('loc') self._active_renderer_index = self.loc_to_index(loc) renderer = self.renderers[self._active_renderer_index] renderer.show_bounds(**kwargs) def add_bounds_axes(self, *args, **kwargs): """Deprecated""" logging.warning('`add_bounds_axes` is deprecated. Use `show_bounds` or `show_grid`.') return self.show_bounds(*args, **kwargs) def add_bounding_box(self, color=None, corner_factor=0.5, line_width=None, opacity=1.0, render_lines_as_tubes=False, lighting=None, reset_camera=None, loc=None): """ Adds an unlabeled and unticked box at the boundaries of plot. Useful for when wanting to plot outer grids while still retaining all edges of the boundary. Parameters ---------- corner_factor : float, optional If ``all_edges``, this is the factor along each axis to draw the default box. Dafuault is 0.5 to show the full box. corner_factor : float, optional This is the factor along each axis to draw the default box. Dafuault is 0.5 to show the full box. line_width : float, optional Thickness of lines. opacity : float, optional Opacity of mesh. Should be between 0 and 1. Default 1.0 loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If None, selects the last active Renderer. """ kwargs = locals() _ = kwargs.pop('self') _ = kwargs.pop('loc') self._active_renderer_index = self.loc_to_index(loc) renderer = self.renderers[self._active_renderer_index] return renderer.add_bounding_box(**kwargs) def remove_bounding_box(self, loc=None): """ Removes bounding box from the active renderer. Parameters ---------- loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If None, selects the last active Renderer. """ self._active_renderer_index = self.loc_to_index(loc) renderer = self.renderers[self._active_renderer_index] renderer.remove_bounding_box() def remove_bounds_axes(self, loc=None): """ Removes bounds axes from the active renderer. Parameters ---------- loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If None, selects the last active Renderer. """ self._active_renderer_index = self.loc_to_index(loc) renderer = self.renderers[self._active_renderer_index] renderer.remove_bounds_axes() def subplot(self, index_x, index_y): """ Sets the active subplot. Parameters ---------- index_x : int Index of the subplot to activate in the x direction. index_y : int Index of the subplot to activate in the y direction. """ self._active_renderer_index = self.loc_to_index((index_x, index_y)) def show_grid(self, **kwargs): """ A wrapped implementation of ``show_bounds`` to change default behaviour to use gridlines and showing the axes labels on the outer edges. This is intended to be silimar to ``matplotlib``'s ``grid`` function. """ kwargs.setdefault('grid', 'back') kwargs.setdefault('location', 'outer') kwargs.setdefault('ticks', 'both') return self.show_bounds(**kwargs) def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True): """ Scale all the datasets in the scene of the active renderer. Scaling in performed independently on the X, Y and Z axis. A scale of zero is illegal and will be replaced with one. Parameters ---------- xscale : float, optional Scaling of the x axis. Must be greater than zero. yscale : float, optional Scaling of the y axis. Must be greater than zero. zscale : float, optional Scaling of the z axis. Must be greater than zero. reset_camera : bool, optional Resets camera so all actors can be seen. """ self.renderer.set_scale(xscale, yscale, zscale, reset_camera) @property def scale(self): """ The scaling of the active renderer. """ return self.renderer.scale def _update_axes_color(self, color): """Internal helper to set the axes label color""" prop_x = self.axes_actor.GetXAxisCaptionActor2D().GetCaptionTextProperty() prop_y = self.axes_actor.GetYAxisCaptionActor2D().GetCaptionTextProperty() prop_z = self.axes_actor.GetZAxisCaptionActor2D().GetCaptionTextProperty() if color is None: color = rcParams['font']['color'] color = parse_color(color) for prop in [prop_x, prop_y, prop_z]: prop.SetColor(color[0], color[1], color[2]) prop.SetShadow(False) return def add_scalar_bar(self, title=None, n_labels=5, italic=False, bold=True, title_font_size=None, label_font_size=None, color=None, font_family=None, shadow=False, mapper=None, width=None, height=None, position_x=None, position_y=None, vertical=None, interactive=False, fmt=None): """ Creates scalar bar using the ranges as set by the last input mesh. Parameters ---------- title : string, optional Title of the scalar bar. Default None n_labels : int, optional Number of labels to use for the scalar bar. italic : bool, optional Italicises title and bar labels. Default False. bold : bool, optional Bolds title and bar labels. Default True title_font_size : float, optional Sets the size of the title font. Defaults to None and is sized automatically. label_font_size : float, optional Sets the size of the title font. Defaults to None and is sized automatically. color : string or 3 item list, optional, defaults to white Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' font_family : string, optional Font family. Must be either courier, times, or arial. shadow : bool, optional Adds a black shadow to the text. Defaults to False width : float, optional The percentage (0 to 1) width of the window for the colorbar height : float, optional The percentage (0 to 1) height of the window for the colorbar position_x : float, optional The percentage (0 to 1) along the windows's horizontal direction to place the bottom left corner of the colorbar position_y : float, optional The percentage (0 to 1) along the windows's vertical direction to place the bottom left corner of the colorbar interactive : bool, optional Use a widget to control the size and location of the scalar bar. Notes ----- Setting title_font_size, or label_font_size disables automatic font sizing for both the title and label. """ if font_family is None: font_family = rcParams['font']['family'] if label_font_size is None: label_font_size = rcParams['font']['label_size'] if title_font_size is None: title_font_size = rcParams['font']['title_size'] if color is None: color = rcParams['font']['color'] if fmt is None: fmt = rcParams['font']['fmt'] if vertical is None: if rcParams['colorbar_orientation'].lower() == 'vertical': vertical = True # Automatically choose size if not specified if width is None: if vertical: width = rcParams['colorbar_vertical']['width'] else: width = rcParams['colorbar_horizontal']['width'] if height is None: if vertical: height = rcParams['colorbar_vertical']['height'] else: height = rcParams['colorbar_horizontal']['height'] # check if maper exists if mapper is None: if not hasattr(self, 'mapper'): raise Exception('Mapper does not exist. ' + 'Add a mesh with scalars first.') mapper = self.mapper if title: # Check that this data hasn't already been plotted if title in list(self._scalar_bar_ranges.keys()): rng = list(self._scalar_bar_ranges[title]) newrng = mapper.GetScalarRange() oldmappers = self._scalar_bar_mappers[title] # get max for range and reset everything if newrng[0] < rng[0]: rng[0] = newrng[0] if newrng[1] > rng[1]: rng[1] = newrng[1] for m in oldmappers: m.SetScalarRange(rng[0], rng[1]) mapper.SetScalarRange(rng[0], rng[1]) self._scalar_bar_mappers[title].append(mapper) self._scalar_bar_ranges[title] = rng # Color bar already present and ready to be used so returning return # Automatically choose location if not specified if position_x is None or position_y is None: try: slot = min(self._scalar_bar_slots) self._scalar_bar_slots.remove(slot) self._scalar_bar_slot_lookup[title] = slot except: raise RuntimeError('Maximum number of color bars reached.') if position_x is None: if vertical: position_x = rcParams['colorbar_vertical']['position_x'] position_x -= slot * width else: position_x = rcParams['colorbar_horizontal']['position_x'] if position_y is None: if vertical: position_y = rcParams['colorbar_vertical']['position_y'] else: position_y = rcParams['colorbar_horizontal']['position_y'] position_y += slot * height # Adjust to make sure on the screen if position_x + width > 1: position_x -= width if position_y + height > 1: position_y -= height # parse color color = parse_color(color) # Create scalar bar self.scalar_bar = vtk.vtkScalarBarActor() self.scalar_bar.SetLookupTable(mapper.GetLookupTable()) self.scalar_bar.SetNumberOfLabels(n_labels) # edit the size of the colorbar self.scalar_bar.SetHeight(height) self.scalar_bar.SetWidth(width) self.scalar_bar.SetPosition(position_x, position_y) if fmt is not None: self.scalar_bar.SetLabelFormat(fmt) if vertical: self.scalar_bar.SetOrientationToVertical() else: self.scalar_bar.SetOrientationToHorizontal() if label_font_size is None or title_font_size is None: self.scalar_bar.UnconstrainedFontSizeOn() if n_labels: label_text = self.scalar_bar.GetLabelTextProperty() label_text.SetColor(color) label_text.SetShadow(shadow) # Set font label_text.SetFontFamily(parse_font_family(font_family)) label_text.SetItalic(italic) label_text.SetBold(bold) if label_font_size: label_text.SetFontSize(label_font_size) # Set properties if title: rng = mapper.GetScalarRange() self._scalar_bar_ranges[title] = rng self._scalar_bar_mappers[title] = [mapper] self.scalar_bar.SetTitle(title) title_text = self.scalar_bar.GetTitleTextProperty() title_text.SetJustificationToCentered() title_text.SetItalic(italic) title_text.SetBold(bold) title_text.SetShadow(shadow) if title_font_size: title_text.SetFontSize(title_font_size) # Set font title_text.SetFontFamily(parse_font_family(font_family)) # set color title_text.SetColor(color) self._scalar_bar_actors[title] = self.scalar_bar if interactive is None: interactive = rcParams['interactive'] if shape != (1, 1): interactive = False elif interactive and self.shape != (1, 1): err_str = 'Interactive scalar bars disabled for multi-renderer plots' raise Exception(err_str) if interactive and hasattr(self, 'iren'): self.scalar_widget = vtk.vtkScalarBarWidget() self.scalar_widget.SetScalarBarActor(self.scalar_bar) self.scalar_widget.SetInteractor(self.iren) self.scalar_widget.SetEnabled(1) rep = self.scalar_widget.GetRepresentation() # self.scalar_widget.On() if vertical is True or vertical is None: rep.SetOrientation(1) # 0 = Horizontal, 1 = Vertical else: rep.SetOrientation(0) # 0 = Horizontal, 1 = Vertical self._scalar_bar_widgets[title] = self.scalar_widget self.add_actor(self.scalar_bar, reset_camera=False) def update_scalars(self, scalars, mesh=None, render=True): """ Updates scalars of the an object in the plotter. Parameters ---------- scalars : np.ndarray Scalars to replace existing scalars. mesh : vtk.PolyData or vtk.UnstructuredGrid, optional Object that has already been added to the Plotter. If None, uses last added mesh. render : bool, optional Forces an update to the render window. Default True. """ if mesh is None: mesh = self.mesh if isinstance(mesh, (collections.Iterable, vtki.MultiBlock)): # Recursive if need to update scalars on many meshes for m in mesh: self.update_scalars(scalars, mesh=m, render=False) if render: self.ren_win.Render() return if isinstance(scalars, str): # Grab scalar array if name given scalars = get_scalar(mesh, scalars) if scalars is None: if render: self.ren_win.Render() return if scalars.shape[0] == mesh.GetNumberOfPoints(): data = mesh.GetPointData() elif scalars.shape[0] == mesh.GetNumberOfCells(): data = mesh.GetCellData() else: _raise_not_matching(scalars, mesh) vtk_scalars = data.GetScalars() if vtk_scalars is None: raise Exception('No active scalars') s = VN.vtk_to_numpy(vtk_scalars) s[:] = scalars data.Modified() try: # Why are the points updated here? Not all datasets have points # and only the scalar array is modified by this function... mesh.GetPoints().Modified() except: pass if render: self.ren_win.Render() def update_coordinates(self, points, mesh=None, render=True): """ Updates the points of the an object in the plotter. Parameters ---------- points : np.ndarray Points to replace existing points. mesh : vtk.PolyData or vtk.UnstructuredGrid, optional Object that has already been added to the Plotter. If None, uses last added mesh. render : bool, optional Forces an update to the render window. Default True. """ if mesh is None: mesh = self.mesh mesh.points = points if render: self._render() def close(self): """ closes render window """ # must close out axes marker if hasattr(self, 'axes_widget'): del self.axes_widget # reset scalar bar stuff self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS)) self._scalar_bar_slot_lookup = {} self._scalar_bar_ranges = {} self._scalar_bar_mappers = {} if hasattr(self, 'ren_win'): self.ren_win.Finalize() del self.ren_win if hasattr(self, '_style'): del self._style if hasattr(self, 'iren'): self.iren.RemoveAllObservers() del self.iren if hasattr(self, 'textActor'): del self.textActor # end movie if hasattr(self, 'mwriter'): try: self.mwriter.close() except BaseException: pass def add_text(self, text, position=None, font_size=50, color=None, font=None, shadow=False, name=None, loc=None): """ Adds text to plot object in the top left corner by default Parameters ---------- text : str The text to add the the rendering position : tuple(float) Length 2 tuple of the pixelwise position to place the bottom left corner of the text box. Default is to find the top left corner of the renderering window and place text box up there. font : string, optional Font name may be courier, times, or arial shadow : bool, optional Adds a black shadow to the text. Defaults to False name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. Returns ------- textActor : vtk.vtkTextActor Text actor added to plot """ if font is None: font = rcParams['font']['family'] if font_size is None: font_size = rcParams['font']['size'] if color is None: color = rcParams['font']['color'] if position is None: # Set the position of the text to the top left corner window_size = self.window_size x = (window_size[0] * 0.02) / self.shape[0] y = (window_size[1] * 0.90) / self.shape[0] position = [x, y] self.textActor = vtk.vtkTextActor() self.textActor.SetPosition(position) self.textActor.GetTextProperty().SetFontSize(font_size) self.textActor.GetTextProperty().SetColor(parse_color(color)) self.textActor.GetTextProperty().SetFontFamily(FONT_KEYS[font]) self.textActor.GetTextProperty().SetShadow(shadow) self.textActor.SetInput(text) self.add_actor(self.textActor, reset_camera=False, name=name, loc=loc) return self.textActor def open_movie(self, filename, framerate=24): """ Establishes a connection to the ffmpeg writer Parameters ---------- filename : str Filename of the movie to open. Filename should end in mp4, but other filetypes may be supported. See "imagio.get_writer" framerate : int, optional Frames per second. """ if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(vtki.FIGURE_PATH, filename) self.mwriter = imageio.get_writer(filename, fps=framerate) def open_gif(self, filename): """ Open a gif file. Parameters ---------- filename : str Filename of the gif to open. Filename must end in gif. """ if filename[-3:] != 'gif': raise Exception('Unsupported filetype. Must end in .gif') if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(vtki.FIGURE_PATH, filename) self._gif_filename = os.path.abspath(filename) self.mwriter = imageio.get_writer(filename, mode='I') def write_frame(self): """ Writes a single frame to the movie file """ if not hasattr(self, 'mwriter'): raise AssertionError('This plotter has not opened a movie or GIF file.') self.mwriter.append_data(self.image) @property def window_size(self): """ returns render window size """ return list(self.ren_win.GetSize()) @window_size.setter def window_size(self, window_size): """ set the render window size """ self.ren_win.SetSize(window_size[0], window_size[1]) def _run_image_filter(self, ifilter): # Update filter and grab pixels ifilter.Modified() ifilter.Update() image = vtki.wrap(ifilter.GetOutput()) img_size = image.dimensions img_array = vtki.utilities.point_scalar(image, 'ImageScalars') # Reshape and write tgt_size = (img_size[1], img_size[0], -1) return img_array.reshape(tgt_size)[::-1] @property def image_depth(self): """ Returns an image array of current render window """ ifilter = vtk.vtkWindowToImageFilter() ifilter.SetInput(self.ren_win) ifilter.ReadFrontBufferOff() ifilter.SetInputBufferTypeToZBuffer() return self._run_image_filter(ifilter) @property def image(self): """ Returns an image array of current render window """ if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'): return self.last_image ifilter = vtk.vtkWindowToImageFilter() ifilter.SetInput(self.ren_win) ifilter.ReadFrontBufferOff() if self.image_transparent_background: ifilter.SetInputBufferTypeToRGBA() else: ifilter.SetInputBufferTypeToRGB() return self._run_image_filter(ifilter) def enable_eye_dome_lighting(self): """Enable eye dome lighting (EDL) for active renderer""" return self.renderer.enable_eye_dome_lighting() def disable_eye_dome_lighting(self): """Disable eye dome lighting (EDL) for active renderer""" return self.renderer.disable_eye_dome_lighting() def add_lines(self, lines, color=(1, 1, 1), width=5, label=None, name=None): """ Adds lines to the plotting object. Parameters ---------- lines : np.ndarray or vtki.PolyData Points representing line segments. For example, two line segments would be represented as: np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]]) color : string or 3 item list, optional, defaults to white Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' width : float, optional Thickness of lines name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. Returns ------- actor : vtk.vtkActor Lines actor. """ if not isinstance(lines, np.ndarray): raise Exception('Input should be an array of point segments') lines = vtki.lines_from_points(lines) # Create mapper and add lines mapper = vtk.vtkDataSetMapper() mapper.SetInputData(lines) rgb_color = parse_color(color) # legend label if label: if not isinstance(label, str): raise AssertionError('Label must be a string') self._labels.append([lines, label, rgb_color]) # Create actor self.scalar_bar = vtk.vtkActor() self.scalar_bar.SetMapper(mapper) self.scalar_bar.GetProperty().SetLineWidth(width) self.scalar_bar.GetProperty().EdgeVisibilityOn() self.scalar_bar.GetProperty().SetEdgeColor(rgb_color) self.scalar_bar.GetProperty().SetColor(rgb_color) self.scalar_bar.GetProperty().LightingOff() # Add to renderer self.add_actor(self.scalar_bar, reset_camera=False, name=name) return self.scalar_bar def remove_scalar_bar(self): """ Removes scalar bar """ if hasattr(self, 'scalar_bar'): self.remove_actor(self.scalar_bar, reset_camera=False) def add_point_labels(self, points, labels, italic=False, bold=True, font_size=None, text_color='k', font_family=None, shadow=False, show_points=True, point_color='k', point_size=5, name=None): """ Creates a point actor with one label from list labels assigned to each point. Parameters ---------- points : np.ndarray n x 3 numpy array of points. labels : list List of labels. Must be the same length as points. italic : bool, optional Italicises title and bar labels. Default False. bold : bool, optional Bolds title and bar labels. Default True font_size : float, optional Sets the size of the title font. Defaults to 16. text_color : string or 3 item list, optional, defaults to black Color of text. Either a string, rgb list, or hex color string. For example: text_color='white' text_color='w' text_color=[1, 1, 1] text_color='#FFFFFF' font_family : string, optional Font family. Must be either courier, times, or arial. shadow : bool, optional Adds a black shadow to the text. Defaults to False show_points : bool, optional Controls if points are visible. Default True point_color : string or 3 item list, optional, defaults to black Color of points (if visible). Either a string, rgb list, or hex color string. For example: text_color='white' text_color='w' text_color=[1, 1, 1] text_color='#FFFFFF' point_size : float, optional Size of points (if visible) name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. Returns ------- labelMapper : vtk.vtkvtkLabeledDataMapper VTK label mapper. Can be used to change properties of the labels. """ if font_family is None: font_family = rcParams['font']['family'] if font_size is None: font_size = rcParams['font']['size'] if len(points) != len(labels): raise Exception('There must be one label for each point') vtkpoints = vtki.PolyData(points) vtklabels = vtk.vtkStringArray() vtklabels.SetName('labels') for item in labels: vtklabels.InsertNextValue(str(item)) vtkpoints.GetPointData().AddArray(vtklabels) # create label mapper labelMapper = vtk.vtkLabeledDataMapper() labelMapper.SetInputData(vtkpoints) textprop = labelMapper.GetLabelTextProperty() textprop.SetItalic(italic) textprop.SetBold(bold) textprop.SetFontSize(font_size) textprop.SetFontFamily(parse_font_family(font_family)) textprop.SetColor(parse_color(text_color)) textprop.SetShadow(shadow) labelMapper.SetLabelModeToLabelFieldData() labelMapper.SetFieldDataName('labels') labelActor = vtk.vtkActor2D() labelActor.SetMapper(labelMapper) # add points if show_points: style = 'points' else: style = 'surface' self.add_mesh(vtkpoints, style=style, color=point_color, point_size=point_size) self.add_actor(labelActor, reset_camera=False, name=name) return labelMapper def add_points(self, points, **kwargs): """ Add points to a mesh """ kwargs['style'] = 'points' self.add_mesh(points, **kwargs) def add_arrows(self, cent, direction, mag=1, **kwargs): """ Adds arrows to plotting object """ direction = direction.copy() if cent.ndim != 2: cent = cent.reshape((-1, 3)) if direction.ndim != 2: direction = direction.reshape((-1, 3)) direction[:,0] *= mag direction[:,1] *= mag direction[:,2] *= mag pdata = vtki.vector_poly_data(cent, direction) # Create arrow object arrow = vtk.vtkArrowSource() arrow.Update() glyph3D = vtk.vtkGlyph3D() glyph3D.SetSourceData(arrow.GetOutput()) glyph3D.SetInputData(pdata) glyph3D.SetVectorModeToUseVector() glyph3D.Update() arrows = wrap(glyph3D.GetOutput()) return self.add_mesh(arrows, **kwargs) @staticmethod def _save_image(image, filename, return_img=None): """Internal helper for saving a NumPy image array""" if not image.size: raise Exception('Empty image. Have you run plot() first?') # write screenshot to file if isinstance(filename, str): if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(vtki.FIGURE_PATH, filename) if not return_img: return imageio.imwrite(filename, image) imageio.imwrite(filename, image) return image def screenshot(self, filename=None, transparent_background=False, return_img=None, window_size=None): """ Takes screenshot at current camera position Parameters ---------- filename : str, optional Location to write image to. If None, no image is written. transparent_background : bool, optional Makes the background transparent. Default False. return_img : bool, optional If a string filename is given and this is true, a NumPy array of the image will be returned. Returns ------- img : numpy.ndarray Array containing pixel RGB and alpha. Sized: [Window height x Window width x 3] for transparent_background=False [Window height x Window width x 4] for transparent_background=True Examples -------- >>> import vtki >>> sphere = vtki.Sphere() >>> plotter = vtki.Plotter() >>> actor = plotter.add_mesh(sphere) >>> plotter.screenshot('screenshot.png') # doctest:+SKIP """ if window_size is not None: self.window_size = window_size # configure image filter self.image_transparent_background = transparent_background # This if statement allows you to save screenshots of closed plotters # This is needed for the sphinx-gallery work if not hasattr(self, 'ren_win'): # If plotter has been closed... # check if last_image exists if hasattr(self, 'last_image'): # Save last image return self._save_image(self.last_image, filename, return_img) # Plotter hasn't been rendered or was improperly closed raise AttributeError('This plotter is unable to save a screenshot.') if isinstance(self, Plotter): # TODO: we need a consistent rendering function self.render() else: self._render() # debug: this needs to be called twice for some reason, img = self.image img = self.image return self._save_image(img, filename, return_img) def add_legend(self, labels=None, bcolor=(0.5, 0.5, 0.5), border=False, size=None, name=None): """ Adds a legend to render window. Entries must be a list containing one string and color entry for each item. Parameters ---------- labels : list, optional When set to None, uses existing labels as specified by - add_mesh - add_lines - add_points List contianing one entry for each item to be added to the legend. Each entry must contain two strings, [label, color], where label is the name of the item to add, and color is the color of the label to add. bcolor : list or string, optional Background color, either a three item 0 to 1 RGB color list, or a matplotlib color string (e.g. 'w' or 'white' for a white color). If None, legend background is disabled. border : bool, optional Controls if there will be a border around the legend. Default False. size : list, optional Two float list, each float between 0 and 1. For example [0.1, 0.1] would make the legend 10% the size of the entire figure window. name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. Returns ------- legend : vtk.vtkLegendBoxActor Actor for the legend. Examples -------- >>> import vtki >>> from vtki import examples >>> mesh = examples.load_hexbeam() >>> othermesh = examples.load_uniform() >>> plotter = vtki.Plotter() >>> _ = plotter.add_mesh(mesh, label='My Mesh') >>> _ = plotter.add_mesh(othermesh, 'k', label='My Other Mesh') >>> _ = plotter.add_legend() >>> plotter.show() # doctest:+SKIP Alternative manual example >>> import vtki >>> from vtki import examples >>> mesh = examples.load_hexbeam() >>> othermesh = examples.load_uniform() >>> legend_entries = [] >>> legend_entries.append(['My Mesh', 'w']) >>> legend_entries.append(['My Other Mesh', 'k']) >>> plotter = vtki.Plotter() >>> _ = plotter.add_mesh(mesh) >>> _ = plotter.add_mesh(othermesh, 'k') >>> _ = plotter.add_legend(legend_entries) >>> plotter.show() # doctest:+SKIP """ self.legend = vtk.vtkLegendBoxActor() if labels is None: # use existing labels if not self._labels: raise Exception('No labels input.\n\n' + 'Add labels to individual items when adding them to' + 'the plotting object with the "label=" parameter. ' + 'or enter them as the "labels" parameter.') self.legend.SetNumberOfEntries(len(self._labels)) for i, (vtk_object, text, color) in enumerate(self._labels): self.legend.SetEntry(i, vtk_object, text, parse_color(color)) else: self.legend.SetNumberOfEntries(len(labels)) legendface = single_triangle() for i, (text, color) in enumerate(labels): self.legend.SetEntry(i, legendface, text, parse_color(color)) if size: self.legend.SetPosition2(size[0], size[1]) if bcolor is None: self.legend.UseBackgroundOff() else: self.legend.UseBackgroundOn() self.legend.SetBackgroundColor(bcolor) if border: self.legend.BorderOn() else: self.legend.BorderOff() # Add to renderer self.add_actor(self.legend, reset_camera=False, name=name) return self.legend @property def camera_position(self): """ Returns camera position of the active render window """ return self.renderer.camera_position @camera_position.setter def camera_position(self, camera_location): """ Set camera position of the active render window """ self.renderer.camera_position = camera_location def reset_camera(self): """ Reset camera so it slides along the vector defined from camera position to focal point until all of the actors can be seen. """ self.renderer.reset_camera() self._render() def isometric_view(self): """DEPRECATED: Please use ``view_isometric``""" return self.view_isometric() def view_isometric(self): """ Resets the camera to a default isometric view showing all the actors in the scene. """ return self.renderer.view_isometric() def view_vector(self, vector, viewup=None): return self.renderer.view_vector(vector, viewup=viewup) def view_xy(self, negative=False): """View the XY plane""" return self.renderer.view_xy(negative=negative) def view_xz(self, negative=False): """View the XZ plane""" return self.renderer.view_xz(negative=negative) def view_yz(self, negative=False): """View the YZ plane""" return self.renderer.view_yz(negative=negative) def disable(self): """Disable this renderer's camera from being interactive""" return self.renderer.disable() def enable(self): """Enable this renderer's camera to be interactive""" return self.renderer.enable() def set_background(self, color, loc='all'): """ Sets background color Parameters ---------- color : string or 3 item list, optional, defaults to white Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' loc : int, tuple, list, or str, optional Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If ``loc='all'`` then all render windows will have their background set. """ if color is None: color = rcParams['background'] if isinstance(color, str): if color.lower() in 'paraview' or color.lower() in 'pv': # Use the default ParaView background color color = PV_BACKGROUND else: color = vtki.string_to_rgb(color) if loc =='all': for renderer in self.renderers: renderer.SetBackground(color) else: renderer = self.renderers[self.loc_to_index(loc)] renderer.SetBackground(color) @property def background_color(self): """ Returns background color of the first render window """ return self.renderers[0].GetBackground() @background_color.setter def background_color(self, color): """ Sets the background color of all the render windows """ self.set_background(color) def remove_legend(self): """ Removes legend actor """ if hasattr(self, 'legend'): self.remove_actor(self.legend, reset_camera=False) self._render() def enable_cell_picking(self, mesh=None, callback=None): """ Enables picking of cells. Press r to enable retangle based selection. Press "r" again to turn it off. Selection will be saved to self.picked_cells. Uses last input mesh for input Parameters ---------- mesh : vtk.UnstructuredGrid, optional UnstructuredGrid grid to select cells from. Uses last input grid by default. callback : function, optional When input, calls this function after a selection is made. The picked_cells are input as the first parameter to this function. """ if mesh is None: if not hasattr(self, 'mesh'): raise Exception('Input a mesh into the Plotter class first or ' + 'or set it in this function') mesh = self.mesh def pick_call_back(picker, event_id): extract = vtk.vtkExtractGeometry() mesh.cell_arrays['orig_extract_id'] = np.arange(mesh.n_cells) extract.SetInputData(mesh) extract.SetImplicitFunction(picker.GetFrustum()) extract.Update() self.picked_cells = vtki.wrap(extract.GetOutput()) if callback is not None: callback(self.picked_cells) area_picker = vtk.vtkAreaPicker() area_picker.AddObserver(vtk.vtkCommand.EndPickEvent, pick_call_back) self.enable_rubber_band_style() self.iren.SetPicker(area_picker) def generate_orbital_path(self, factor=3., n_points=20, viewup=None, z_shift=None): """Genrates an orbital path around the data scene Parameters ---------- facotr : float A scaling factor when biulding the orbital extent n_points : int number of points on the orbital path viewup : list(float) the normal to the orbital plane z_shift : float, optional shift the plane up/down from the center of the scene by this amount """ if viewup is None: viewup = rcParams['camera']['viewup'] center = list(self.center) bnds = list(self.bounds) if z_shift is None: z_shift = (bnds[5] - bnds[4]) * factor center[2] = center[2] + z_shift radius = (bnds[1] - bnds[0]) * factor y = (bnds[3] - bnds[2]) * factor if y > radius: radius = y return vtki.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points) def fly_to(point): """Given a position point, move the current camera's focal point to that point. The movement is animated over the number of frames specified in NumberOfFlyFrames. The LOD desired frame rate is used. """ return self.iren.FlyTo(self.renderer, *point) def orbit_on_path(self, path=None, focus=None, step=0.5, viewup=None, bkg=True): """Orbit on the given path focusing on the focus point Parameters ---------- path : vtki.PolyData Path of orbital points. The order in the points is the order of travel focus : list(float) of length 3, optional The point ot focus the camera. step : float, optional The timestep between flying to each camera position viewup : list(float) the normal to the orbital plane """ if focus is None: focus = self.center if viewup is None: viewup = rcParams['camera']['viewup'] if path is None: path = self.generate_orbital_path(viewup=viewup) if not is_vtki_obj(path): path = vtki.PolyData(path) points = path.points def orbit(): """Internal thread for running the orbit""" for point in points: self.set_position(point) self.set_focus(focus) self.set_viewup(viewup) time.sleep(step) if bkg: thread = Thread(target=orbit) thread.start() else: orbit() return def export_vtkjs(self, filename, compress_arrays=False): """ Export the current rendering scene as a VTKjs scene for rendering in a web browser """ if not hasattr(self, 'ren_win'): raise RuntimeError('Export must be called before showing/closing the scene.') if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(vtki.FIGURE_PATH, filename) return export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays) class Plotter(BasePlotter): """ Plotting object to display vtk meshes or numpy arrays. Example ------- >>> import vtki >>> from vtki import examples >>> mesh = examples.load_hexbeam() >>> another_mesh = examples.load_uniform() >>> plotter = vtki.Plotter() >>> _ = plotter.add_mesh(mesh, color='red') >>> _ = plotter.add_mesh(another_mesh, color='blue') >>> plotter.show() # doctest:+SKIP Parameters ---------- off_screen : bool, optional Renders off screen when False. Useful for automated screenshots. notebook : bool, optional When True, the resulting plot is placed inline a jupyter notebook. Assumes a jupyter console is active. Automatically enables off_screen. shape : list or tuple, optional Number of sub-render windows inside of the main window. Specify two across with ``shape=(2, 1)`` and a two by two grid with ``shape=(2, 2)``. By default there is only one render window. border : bool, optional Draw a border around each render window. Default False. border_color : string or 3 item list, optional, defaults to white Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' window_size : list, optional Window size in pixels. Defaults to [1024, 768] """ last_update_time = 0.0 q_pressed = False right_timer_id = -1 def __init__(self, off_screen=False, notebook=None, shape=(1, 1), border=None, border_color='k', border_width=1.0, window_size=None): """ Initialize a vtk plotting object """ super(Plotter, self).__init__(shape=shape, border=border, border_color=border_color, border_width=border_width) log.debug('Initializing') def on_timer(iren, event_id): """ Exit application if interactive renderer stops """ if event_id == 'TimerEvent': self.iren.TerminateApp() if vtki.TESTING_OFFSCREEN: off_screen = True if notebook is None: if run_from_ipython(): try: notebook = type(get_ipython()).__module__.startswith('ipykernel.') except NameError: pass self.notebook = notebook if self.notebook: off_screen = True self.off_screen = off_screen if window_size is None: window_size = vtki.rcParams['window_size'] # initialize render window self.ren_win = vtk.vtkRenderWindow() self.ren_win.SetBorders(True) for renderer in self.renderers: self.ren_win.AddRenderer(renderer) if self.off_screen: self.ren_win.SetOffScreenRendering(1) else: # Allow user to interact self.iren = vtk.vtkRenderWindowInteractor() self.iren.LightFollowCameraOff() self.iren.SetDesiredUpdateRate(30.0) self.iren.SetRenderWindow(self.ren_win) self.enable_trackball_style() self.iren.AddObserver("KeyPressEvent", self.key_press_event) self.update_style() # for renderer in self.renderers: # self.iren.SetRenderWindow(renderer) # Set background self.set_background(rcParams['background']) # Set window size self.window_size = window_size # add timer event if interactive render exists if hasattr(self, 'iren'): self.iren.AddObserver(vtk.vtkCommand.TimerEvent, on_timer) def show(self, title=None, window_size=None, interactive=True, auto_close=True, interactive_update=False, full_screen=False, screenshot=False, return_img=False): """ Creates plotting window Parameters ---------- title : string, optional Title of plotting window. window_size : list, optional Window size in pixels. Defaults to [1024, 768] interactive : bool, optional Enabled by default. Allows user to pan and move figure. auto_close : bool, optional Enabled by default. Exits plotting session when user closes the window when interactive is True. interactive_update: bool, optional Disabled by default. Allows user to non-blocking draw, user should call Update() in each iteration. full_screen : bool, optional Opens window in full screen. When enabled, ignores window_size. Default False. Returns ------- cpos : list List of camera position, focal point, and view up """ # reset unless camera for the first render unless camera is set if self.first_time: # and not self.camera_set: for renderer in self.renderers: if not renderer.camera_set: renderer.camera_position = renderer.get_default_cam_pos() renderer.ResetCamera() self.first_time = False if title: self.ren_win.SetWindowName(title) # if full_screen: if full_screen: self.ren_win.SetFullScreen(True) self.ren_win.BordersOn() # super buggy when disabled else: if window_size is None: window_size = self.window_size self.ren_win.SetSize(window_size[0], window_size[1]) # Render log.debug('Rendering') self.ren_win.Render() if interactive and (not self.off_screen): try: # interrupts will be caught here log.debug('Starting iren') self.update_style() self.iren.Initialize() if not interactive_update: self.iren.Start() except KeyboardInterrupt: log.debug('KeyboardInterrupt') self.close() raise KeyboardInterrupt # Keep track of image for sphinx-gallery self.last_image = self.screenshot(screenshot, return_img=True) # Get camera position before closing cpos = self.camera_position if self.notebook: # sanity check try: import IPython except ImportError: raise Exception('Install IPython to display image in a notebook') import PIL.Image disp = IPython.display.display(PIL.Image.fromarray(self.last_image)) if auto_close: self.close() if self.notebook: return disp if return_img or screenshot == True: return cpos, self.last_image return cpos def plot(self, *args, **kwargs): """ Present for backwards compatibility. Use `show()` instead """ return self.show(*args, **kwargs) def render(self): """ renders main window """ self.ren_win.Render() def single_triangle(): """ A single PolyData triangle """ points = np.zeros((3, 3)) points[1] = [1, 0, 0] points[2] = [0.5, 0.707, 0] cells = np.array([[3, 0, 1, 2]], ctypes.c_long) return vtki.PolyData(points, cells) def parse_color(color): """ Parses color into a vtk friendly rgb list """ if color is None: color = rcParams['color'] if isinstance(color, str): return vtki.string_to_rgb(color) elif len(color) == 3: return color else: raise Exception(""" Invalid color input Must ba string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF'""") def parse_font_family(font_family): """ checks font name """ # check font name font_family = font_family.lower() if font_family not in ['courier', 'times', 'arial']: raise Exception('Font must be either "courier", "times" ' + 'or "arial"') return FONT_KEYS[font_family]
{"hexsha": "36f0658bd4d3b351a93af03bc322ffc9a5176b8d", "size": 103054, "ext": "py", "lang": "Python", "max_stars_repo_path": "vtki/plotting.py", "max_stars_repo_name": "GuillaumeFavelier/pyvista", "max_stars_repo_head_hexsha": "dd2cb22464d0e96d8c92d91106283ee59b8b5041", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vtki/plotting.py", "max_issues_repo_name": "GuillaumeFavelier/pyvista", "max_issues_repo_head_hexsha": "dd2cb22464d0e96d8c92d91106283ee59b8b5041", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vtki/plotting.py", "max_forks_repo_name": "GuillaumeFavelier/pyvista", "max_forks_repo_head_hexsha": "dd2cb22464d0e96d8c92d91106283ee59b8b5041", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4137457045, "max_line_length": 115, "alphanum_fraction": 0.5802880043, "include": true, "reason": "import numpy", "num_tokens": 22115}
import numpy as np import pandas as pd # house list of lists house = [["hallway", 11.25], ["kitchen", 18.0], ["living room", 20.0], ["bedroom", 10.75], ["bathroom", 9.50]] # Build a for loop from scratch for a, [x, y] in enumerate(house): print("the " + str(x) + " is " + str(y) + " sqm") world = {"afghanistan": 30.55, "albania": 2.77, "algeria": 39.21} for k, v in world.items(): print(k + "--" + str(v)) np_height = np.array([1.73, 1.68, 1.71, 1.89, 1.79]) np_weight = np.array([65.4, 59.2, 63.6, 88.4, 68.7]) meas = np.array([np_height, np_weight]) for val in np.nditer(meas): # np.nditer() => x.item() print(val) print('=================================================') belnet_excel = pd.read_excel('belnet-subs-and-marketplace.xlsx', index_col=0) for index, row in belnet_excel.iterrows(): print(index) print(row) for year, row in belnet_excel.iterrows(): print(f'{index} : {row.iloc[1]}') for lab, row in belnet_excel.iterrows() : # - Creating Series on every iteration belnet_excel.loc[lab, "Number Count"] = len(str(row.iloc[1])) print(belnet_excel) # Code for loop that adds COUNTRY column for lab, row in belnet_excel.iterrows() : belnet_excel.loc[lab, "TEXTUPPER"] = row["Text"].upper() print(belnet_excel) belnet_excel["text_length"] = belnet_excel["TEXTUPPER"].apply(len) # same code but one line print(belnet_excel) # .apply(str.upper)
{"hexsha": "f1c1998e04db1d70d45b375e2a594d6cf54761f5", "size": 1461, "ext": "py", "lang": "Python", "max_stars_repo_path": "Day 15 - Practice, Study Case/1 - Practice.py", "max_stars_repo_name": "ServerCetin/hello_python3", "max_stars_repo_head_hexsha": "7cf0807e09c819c690f28ee30758f22355c79115", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Day 15 - Practice, Study Case/1 - Practice.py", "max_issues_repo_name": "ServerCetin/hello_python3", "max_issues_repo_head_hexsha": "7cf0807e09c819c690f28ee30758f22355c79115", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Day 15 - Practice, Study Case/1 - Practice.py", "max_forks_repo_name": "ServerCetin/hello_python3", "max_forks_repo_head_hexsha": "7cf0807e09c819c690f28ee30758f22355c79115", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6315789474, "max_line_length": 91, "alphanum_fraction": 0.605065024, "include": true, "reason": "import numpy", "num_tokens": 462}
module Occupation public export record Occupation where constructor CreateOccupation type : String id : String name : String
{"hexsha": "30e7cab8f6975eb33e3e00f256ad0d155b2edae9", "size": 135, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "src/Objects/Occupation.idr", "max_stars_repo_name": "GrandArchTemplar/IdrisVKAPI", "max_stars_repo_head_hexsha": "bcbb27d2591588c04709d83808deb864fe9e4bb6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Objects/Occupation.idr", "max_issues_repo_name": "GrandArchTemplar/IdrisVKAPI", "max_issues_repo_head_hexsha": "bcbb27d2591588c04709d83808deb864fe9e4bb6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Objects/Occupation.idr", "max_forks_repo_name": "GrandArchTemplar/IdrisVKAPI", "max_forks_repo_head_hexsha": "bcbb27d2591588c04709d83808deb864fe9e4bb6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.5, "max_line_length": 30, "alphanum_fraction": 0.7703703704, "num_tokens": 31}
import random import hashlib import glob import os, sys import pickle import json import itertools import torch import numpy as np from codae.dataset import ConcatenatedEmbeddingDataset def get_mask_transformation(observation_mask, loss_mask): """ Create a boolean transformation matrix T to go from an observation mask matrix to a loss mask matrix. input observation_mask : list(bool) loss_mask : list(bool) output T : torch.Tensor((len(observation_mask), len(loss_mask))) """ T = torch.zeros((len(observation_mask), len(loss_mask))) b = True c = 0 for i in range(len(observation_mask)): if observation_mask[i] == 1: if not b: b = True T[i, c] = 1 c += 1 else: if b: b = False T[i, c] = 1 c += 1 return T class Normalizer: def __init__(self, normalizer, device, normalization_type="min_max"): """ input normalizer : MinMAxScaler object to extract attributes from device : str device on which to store tensors normalization_type : str how to normalize (min_max/...) """ self.normalization_type = normalization_type self.device = device self.min = torch.Tensor(normalizer.data_min_).to(device) self.max = torch.Tensor(normalizer.data_max_).to(device) self.scale = torch.Tensor(normalizer.data_range_).to(device) def do(self, data): """ normalize data using previously fitted model input data : (np.ndarray/torch.Tensor) data to normalize loss : bool use loss normalizer or not """ return (data - self.min) / self.scale def undo(self, data): """ denormalize data using previously fitted model input data : (np.ndarray/torch.Tensor) data to denormalize loss : bool use loss normalizer or not """ return (data * self.scale) + self.min def collate_embedding(batch): """ unzip and merge list of torch.Tensor into single Tensor by stacking them """ batch, indices = zip(*batch) return torch.stack(batch), indices def simple_collate(batch): """ merge list of torch.Tensor into single Tensor by stacking them """ return torch.stack(batch) def load_dataset_of_embeddings(embedding_path, config, cache_dir="tmp/"): using_cache = False dataset_cache = glob.glob(os.path.join(cache_dir, "*_dataset.bin")) if len(dataset_cache) > 0: # check for cached dataset dataset_cache_path = dataset_cache[0] dataset_hash = dataset_cache_path.split("/")[-1].split("_")[0] # load cached dataset if corresponding embedding file hasn't changed if dataset_hash == hashlib.sha1( str( os.stat( embedding_path)[9]).encode('utf-8') ).hexdigest(): #args.log.info("Using cached dataset.") using_cache = True try: with open(dataset_cache_path, 'rb') as f: dataset = pickle.load(f) except: raise Exception("Error while reading embedding json file.") else: # delete old cached dataset os.remove(dataset_cache_path) if not using_cache: # then load from json of embeddings try: with open(embedding_path, 'r') as f: embeddings = json.load(f) except: raise Exception("Error while reading embedding json file.") dataset = ConcatenatedEmbeddingDataset( embeddings=embeddings, used_category=config["DATASET"]["USED_CATEGORY"]) # write new cache to disk if not os.path.exists(cache_dir): os.makedirs(cache_dir) with open(os.path.join(cache_dir, "new_dataset_tmp.bin"), "wb") as f: pickle.dump( dataset, f ) dataset_cache_name = os.path.join(cache_dir, hashlib.sha1( str(\ os.stat(embedding_path)[9])\ .encode('utf-8') ).hexdigest() + "_dataset.bin") os.rename(os.path.join(cache_dir, "new_dataset_tmp.bin"), dataset_cache_name) return dataset class Corrupter: """ Create, handle, and keep track of corruption masks. """ def __init__(self, nb_observation, arch, k_max, device): """ input nb_observation : int arch : int k_max : int device : torch.device """ self.nb_observation = nb_observation self.arch = arch self.k_max = k_max self.device = device if (k_max < 0) | (k_max > len(self.arch)-1): raise Exception("Invalid k_max number. k_max > 0 && k_max < nb_predictor - 1") self.io_size = sum([v["size"] for v in self.arch]) self.nb_predictor = len(self.arch) # build binary mask tensors ############################################ # get binomial coefficent indexes indices = [i for i in range(self.nb_predictor)] binomial_coef_indices = [] self.nb_corruption_per_k = [0 for x in range(0, k_max)] for k in range(0, k_max): binomial_coef_indices.append( torch.LongTensor(list(itertools.combinations(indices, k+1))) ) self.nb_corruption_per_k[k] = len(binomial_coef_indices[-1]) self.nb_run = sum(self.nb_corruption_per_k) # dictionnary of stacked binary masks tensors binary_masks = [] for k, bci in enumerate(binomial_coef_indices): # for k missing variable for subset in bci: # for subset of indices of size k tmp = torch.ones((self.io_size)) for idx in subset: # for each corrupted variable tmp[self.arch[idx]["position"]:self.arch[idx]["position"]+self.arch[idx]["size"]] = 0 binary_masks.append(tmp) self.binary_masks = torch.stack(binary_masks) # build randomized index of mask to use per observation ################ # nb missing variable to pick at each augmentation run self.nb_missing_per_run = [0 for x in range(self.nb_run)] j = 0 for k in range(k_max): for i in range(self.nb_corruption_per_k[k]): self.nb_missing_per_run[j] = k+1 j += 1 # shuffle nb of missing variable to pick at each run per observation mask_to_use = [] self.corrupted_index = [x for x in range(self.nb_run)] for i in range(nb_observation): mask_to_use.append( torch.LongTensor(random.sample(self.corrupted_index, self.nb_run)) ) self.mask_to_use = torch.stack(mask_to_use) # compute nb subset where any variable is seen given k self.nb_subset_per_variable = [] for i in range(1, self.k_max+1): k_subset = 1 for j in range(1, i): k_subset *= (self.nb_predictor-j)/2 self.nb_subset_per_variable.append( k_subset ) #print(self.nb_subset_per_variable) def get_masks(self, batch_indices, run): """ Return list of [k_max] corruption masks as boolean tensors. input batch_indices : list(int) list of observation index run : int current augmentation run output masks : list(torch.Tensor) list of binary mask tensor for each [nb_missing_variable] """ masks = [torch.zeros((len(batch_indices), self.io_size), device=self.device) for x in range(self.k_max)] for i, idx in enumerate(batch_indices): # for each observation in batch # get nb missing variable k = self.nb_missing_per_run[self.mask_to_use[idx][run]]-1 # extract masks masks[k][i, :] = self.binary_masks[self.mask_to_use[idx][run]] return masks, torch.sum(torch.stack(masks, dim=0), dim=0)
{"hexsha": "a57f2ab94e6138333b7158187a2d92f41a8f9cb0", "size": 8169, "ext": "py", "lang": "Python", "max_stars_repo_path": "codae/tool/data_tool.py", "max_stars_repo_name": "victordeleau/MUI-DeepAutoEncoder", "max_stars_repo_head_hexsha": "d5b8351334df64b30b3f6929c934a936b66ed963", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "codae/tool/data_tool.py", "max_issues_repo_name": "victordeleau/MUI-DeepAutoEncoder", "max_issues_repo_head_hexsha": "d5b8351334df64b30b3f6929c934a936b66ed963", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codae/tool/data_tool.py", "max_forks_repo_name": "victordeleau/MUI-DeepAutoEncoder", "max_forks_repo_head_hexsha": "d5b8351334df64b30b3f6929c934a936b66ed963", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.179389313, "max_line_length": 112, "alphanum_fraction": 0.58697515, "include": true, "reason": "import numpy", "num_tokens": 1797}
import pytorch_lightning as pl from torch.utils.data import DataLoader, random_split, ConcatDataset, Subset from transformers import T5ForConditionalGeneration, Adafactor, T5Tokenizer from utils import * import dataloading as dl import warnings import datasets import torch import numpy as np # set seeds pl.seed_everything(73) # just here for cleaner console output warnings.filterwarnings("ignore") # text quality metrics sacrebleu = datasets.load_metric('sacrebleu') rouge = datasets.load_metric('rouge') meteor = datasets.load_metric('meteor') # pytorch-lightning module to fine-tune model on scores class LitScoreFineT5(pl.LightningModule): def __init__(self, batch_size): super(LitScoreFineT5, self).__init__() # Load model and tokenizer self.model = T5ForConditionalGeneration.from_pretrained('t5-base') self.tokenizer = T5Tokenizer.from_pretrained('t5-base') self.batch_size = batch_size # Load and split data data = dl.T5Dataset('preprocessed/score_kn1_train.npy') self.train_data, self.val_data = random_split(data, split(len(data)), generator=torch.Generator().manual_seed(42)) self.test_data = dl.T5Dataset('preprocessed/score_kn1_ua.npy') self.save_hyperparameters() def forward(self, tok_seq, attn_seq): # force min length of prediction return self.tokenizer.decode(self.model.generate(input_ids=tok_seq, attention_mask=attn_seq, min_length=11, max_length=128)[0], skip_special_tokens=True) def training_step(self, batch, batch_idx): text, text_attn, answer, lab = batch loss = self.model(input_ids=text, attention_mask=text_attn, labels=answer)[0].mean() self.log('Cross-Entropy-Loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, batch, batch_idx): text, text_attn, answer, lab = batch return {'prediction': self(text, text_attn), 'truth': self.tokenizer.decode(answer.squeeze(), skip_special_tokens=True), 'label': self.tokenizer.decode(lab.squeeze(), skip_special_tokens=True), 'original': self.tokenizer.decode(text.squeeze(), skip_special_tokens=True), } def validation_epoch_end(self, outputs): # validation array: first entry are all full text predictions, second entry gold standard, third entry label # and fourth entry label prediction val_data = [[x['prediction'] for x in outputs], [x['truth'] for x in outputs], [x['label'] for x in outputs], [x['prediction'].split(' ', 1)[0] for x in outputs]] pred = extract_model_pred(val_data[0]) truth = [x.split(' ', 2)[2] for x in val_data[1]] # calculate model selection metrics acc_data = np.array(val_data[2:]) sacrebleu_score = sacrebleu.compute(predictions=pred, references=[[x] for x in truth])['score'] rouge_score = rouge.compute(predictions=pred, references=truth)['rouge2'].mid.fmeasure meteor_score = meteor.compute(predictions=pred, references=truth)['meteor'] if len(acc_data[1]) > 0: mse_val, invalid = mse(acc_data[1], acc_data[0]) self.log('my_metric', (sacrebleu_score / 100 + rouge_score + meteor_score) / 3 * (1 - mse_val) * (1 - invalid / len(acc_data[1]))) else: print('\nInvalid mse') mse_val, invalid = 1 self.log('my_metric', 0) self.log('sacreBleu', sacrebleu_score) self.log('ROUGE', rouge_score) self.log('METEOR', meteor_score) print('MSE = {:.4f}, BLEU = {:.4f}, Rouge = {:.4f}, Meteor = {:.4f}' .format(mse_val, sacrebleu_score, rouge_score, meteor_score)) def test_step(self, batch, batch_idx): text, text_attn, answer, lab = batch return {'prediction': self(text, text_attn), 'truth': self.tokenizer.decode(answer.squeeze(), skip_special_tokens=True), 'label': self.tokenizer.decode(lab.squeeze(), skip_special_tokens=True), 'original': self.tokenizer.decode(text.squeeze(), skip_special_tokens=True), } def test_epoch_end(self, outputs): test_data = [[x['prediction'] for x in outputs], [x['truth'] for x in outputs], [x['original'] for x in outputs], [x['label'] for x in outputs], [x['prediction'].split(' ', 1)[0] for x in outputs]] pred = extract_model_pred(test_data[0]) truth = [x.split(' ', 2)[2] for x in test_data[1]] # calculate model metrics acc_data = np.array(test_data[3:]) sacrebleu_score = sacrebleu.compute(predictions=pred, references=[[x] for x in truth])['score'] rouge_score = rouge.compute(predictions=pred, references=truth)['rouge2'].mid.fmeasure meteor_score = meteor.compute(predictions=pred, references=truth)['meteor'] # Check if there are no empty predictions for mse if len(acc_data[1]) > 0: mse_val, invalid = mse(acc_data[1], acc_data[0]) self.log('mse', mse_val) else: print('\nInvalid mse') self.log('mse', 0) self.log('bleu', sacrebleu_score) self.log('rouge', rouge_score) self.log('meteor', meteor_score) print('MSE = {:.4f}, BLEU = {:.4f}, Rouge = {:.4f}, Meteor = {:.4f}' .format(mse_val, sacrebleu_score, rouge_score, meteor_score)) # FIXME: Currently recognizes different test sets by length, not safe! if acc_data.shape[1] == 252: save('models/score/kn1_ua_bertscore', test_data[:3]) else: save('models/score/kn1_uq_bertscore.npy', test_data[:3]) def configure_optimizers(self): return Adafactor(self.model.parameters(), lr=None, warmup_init=True, relative_step=True) def train_dataloader(self): return DataLoader(self.train_data, batch_size=self.batch_size, num_workers=0, shuffle=False) def val_dataloader(self): return DataLoader(self.val_data, batch_size=1, num_workers=0, shuffle=False) def test_dataloader(self): return DataLoader(self.test_data, batch_size=1, num_workers=0, shuffle=False) # pytorch-lightning module to fine-tune model on verification feedback class LitVerFineT5(pl.LightningModule): def __init__(self, batch_size, model=None): super(LitVerFineT5, self).__init__() if model != None: self.model = model else: self.model = T5ForConditionalGeneration.from_pretrained('t5-base') self.tokenizer = T5Tokenizer.from_pretrained('t5-base') self.batch_size = batch_size data = dl.T5Dataset('preprocessed/ver_kn1_train.npy') self.train_data, self.val_data = random_split(data, split(len(data)), generator=torch.Generator().manual_seed(42)) self.test_data = dl.T5Dataset('preprocessed/ver_kn1_ua.npy') self.save_hyperparameters() def forward(self, tok_seq, attn_seq): return self.tokenizer.decode(self.model.generate(input_ids=tok_seq, attention_mask=attn_seq, min_length=10, max_length=128)[0], skip_special_tokens=True) def training_step(self, batch, batch_idx): text, text_attn, answer, lab = batch loss = self.model(input_ids=text, attention_mask=text_attn, labels=answer)[0].mean() self.log('Cross-Entropy-Loss', loss, on_epoch=True) return loss def validation_step(self, batch, batch_idx): text, text_attn, answer, lab = batch return {'prediction': self(text, text_attn), 'truth': self.tokenizer.decode(answer.squeeze(), skip_special_tokens=True), 'label': self.tokenizer.decode(lab.squeeze(), skip_special_tokens=True), 'original': self.tokenizer.decode(text.squeeze(), skip_special_tokens=True), } def validation_epoch_end(self, outputs): # validation array, first entry are all full text predictions, second entry gold standard, third entry label # and fourth entry label prediction val_data = [[x['prediction'] for x in outputs], [x['truth'] for x in outputs], [x['label'] for x in outputs]] pred = extract_pred(val_data[0]) truth = [x.split(':', 1)[1] for x in val_data[1]] label_pred = extract_label(val_data[0]) acc_data = np.array([val_data[2], label_pred]) val_acc = np.sum(acc_data[0] == acc_data[1]) / acc_data.shape[1] val_weighted = weighted_f1(acc_data[1], acc_data[0]) val_macro = macro_f1(acc_data[1], acc_data[0]) sacrebleu_score = sacrebleu.compute(predictions=pred, references=[[x] for x in truth])['score'] rouge_score = rouge.compute(predictions=pred, references=truth)['rouge2'].mid.fmeasure meteor_score = meteor.compute(predictions=pred, references=truth)['meteor'] self.log('my_metric', (sacrebleu_score / 100 + rouge_score + meteor_score) / 3 * val_macro) self.log('sacreBleu', sacrebleu_score) self.log('val_macro', val_macro) self.log('ROUGE', rouge_score) self.log('METEOR', meteor_score) print('Acc = {:.4f}, M-F1 = {:.4f}, W-F1 = {:.4f}, BLEU = {:.4f}, Rouge = {:.4f}, Meteor = {:.4f}' .format(val_acc, val_macro, val_weighted, sacrebleu_score, rouge_score, meteor_score)) def test_step(self, batch, batch_idx): text, text_attn, answer, lab = batch return {'prediction': self(text, text_attn), 'truth': self.tokenizer.decode(answer.squeeze(), skip_special_tokens=True), 'label': self.tokenizer.decode(lab.squeeze(), skip_special_tokens=True), 'original': self.tokenizer.decode(text.squeeze(), skip_special_tokens=True), } def test_epoch_end(self, outputs): # validation array, first entry are all full text predictions, second entry gold standard, third entry label # and fourth entry label prediction test_data = [[x['prediction'] for x in outputs], [x['truth'] for x in outputs], [x['original'] for x in outputs], [x['label'] for x in outputs]] pred = extract_pred(test_data[0]) truth = [x.split(':', 1)[1] for x in test_data[1]] label_pred = extract_label(test_data[0]) acc_data = np.array([test_data[3], label_pred]) val_acc = np.sum(acc_data[0] == acc_data[1]) / acc_data.shape[1] val_weighted = weighted_f1(acc_data[1], acc_data[0]) val_macro = macro_f1(acc_data[1], acc_data[0]) sacrebleu_score = sacrebleu.compute(predictions=pred, references=[[x] for x in truth])['score'] rouge_score = rouge.compute(predictions=pred, references=truth)['rouge2'].mid.fmeasure meteor_score = meteor.compute(predictions=pred, references=truth)['meteor'] self.log('bleu', sacrebleu_score) self.log('macro_f1', val_macro) self.log('rouge', rouge_score) self.log('meteor', meteor_score) self.log('acc', val_acc) self.log('weighted', val_weighted) print('Acc = {:.4f}, M-F1 = {:.4f}, W-F1 = {:.4f}, BLEU = {:.4f}, Rouge = {:.4f}, Meteor = {:.4f}' .format(val_acc, val_macro, val_weighted, sacrebleu_score, rouge_score, meteor_score)) # FIXME: Currently recognizes different test sets by length, not safe! if acc_data.shape[1] == 252: save('models/ver/kn1_ua_bertscore.npy', test_data[:3]) else: save('models/ver/kn1_uq_bertscore.npy', test_data[:3]) def configure_optimizers(self): return Adafactor(self.model.parameters(), lr=None, warmup_init=True, relative_step=True) def train_dataloader(self): return DataLoader(self.train_data, batch_size=self.batch_size, num_workers=0, shuffle=False) def val_dataloader(self): return DataLoader(self.val_data, batch_size=1, num_workers=0, shuffle=False) def test_dataloader(self): return DataLoader(self.test_data, batch_size=1, num_workers=0, shuffle=False) # pytorch-lightning module to fine-tune model on verification feedback class LitMultiT5(pl.LightningModule): def __init__(self, batch_size): super(LitMultiT5, self).__init__() self.model = T5ForConditionalGeneration.from_pretrained('t5-base') self.tokenizer = T5Tokenizer.from_pretrained('t5-base') # KN1 dataset self.batch_size = batch_size kn1_normal = dl.T5Dataset('preprocessed/ver_kn1_train.npy') kn1_label = dl.T5Dataset('preprocessed/label_only_kn1.npy') self.kn1_train = random_split(kn1_label, split(len(kn1_label)), generator=torch.Generator().manual_seed(42))[0] self.val_data = random_split(kn1_normal, split(len(kn1_normal)), generator=torch.Generator().manual_seed(42))[1] self.seb = dl.T5Dataset("preprocessed/seb_train.npy") self.cose = dl.T5Dataset('preprocessed/cose_train.npy') self.esnli = dl.T5Dataset('preprocessed/esnli_train.npy') self.test_data = dl.T5Dataset('preprocessed/ver_kn1_ua.npy') self.save_hyperparameters() def forward(self, tok_seq, attn_seq): return self.tokenizer.decode(self.model.generate(input_ids=tok_seq, attention_mask=attn_seq, min_length=10, max_length=128)[0], skip_special_tokens=True) def training_step(self, batch, batch_idx): text, text_attn, answer, lab = batch return self.model(input_ids=text, attention_mask=text_attn, labels=answer)[0].mean() def validation_step(self, batch, batch_idx): text, text_attn, answer, lab = batch return {'prediction': self(text, text_attn), 'truth': self.tokenizer.decode(answer.squeeze(), skip_special_tokens=True), 'label': self.tokenizer.decode(lab.squeeze(), skip_special_tokens=True), 'original': self.tokenizer.decode(text.squeeze(), skip_special_tokens=True), } def validation_epoch_end(self, outputs): # validation array, first entry are all full text predictions, second entry gold standard, third entry label # and fourth entry label prediction val_data = [[x['prediction'] for x in outputs], [x['truth'] for x in outputs], [x['label'] for x in outputs]] pred = extract_pred(val_data[0]) truth = [x.split(':', 1)[1] for x in val_data[1]] label_pred = extract_label(val_data[0]) acc_data = np.array([val_data[2], label_pred]) val_acc = np.sum(acc_data[0] == acc_data[1]) / acc_data.shape[1] val_weighted = weighted_f1(acc_data[1], acc_data[0]) val_macro = macro_f1(acc_data[1], acc_data[0]) sacrebleu_score = sacrebleu.compute(predictions=pred, references=[[x] for x in truth])['score'] rouge_score = rouge.compute(predictions=pred, references=truth)['rouge2'].mid.fmeasure meteor_score = meteor.compute(predictions=pred, references=truth)['meteor'] self.log('my_metric', (sacrebleu_score / 100 + rouge_score + meteor_score) / 3 * val_macro) self.log('bleu', sacrebleu_score) self.log('val_macro', val_macro) self.log('rouge', rouge_score) self.log('meteor', meteor_score) print('Acc = {:.4f}, M-F1 = {:.4f}, W-F1 = {:.4f}, BLEU = {:.4f}, Rouge = {:.4f}, Meteor = {:.4f}' .format(val_acc, val_macro, val_weighted, sacrebleu_score, rouge_score, meteor_score)) def test_step(self, batch, batch_idx): text, text_attn, answer, lab = batch return {'prediction': self(text, text_attn), 'truth': self.tokenizer.decode(answer.squeeze(), skip_special_tokens=True), 'label': self.tokenizer.decode(lab.squeeze(), skip_special_tokens=True), 'original': self.tokenizer.decode(text.squeeze(), skip_special_tokens=True), } def test_epoch_end(self, outputs): # validation array, first entry are all full text predictions, second entry gold standard, third entry label # and fourth entry label prediction test_data = [[x['prediction'] for x in outputs], [x['truth'] for x in outputs], [x['original'] for x in outputs], [x['label'] for x in outputs]] pred = extract_pred(test_data[0]) truth = [x.split(':', 1)[1] for x in test_data[1]] label_pred = extract_label(test_data[0]) acc_data = np.array([test_data[3], label_pred]) val_acc = np.sum(acc_data[0] == acc_data[1]) / acc_data.shape[1] val_weighted = weighted_f1(acc_data[1], acc_data[0]) val_macro = macro_f1(acc_data[1], acc_data[0]) sacrebleu_score = sacrebleu.compute(predictions=pred, references=[[x] for x in truth])['score'] rouge_score = rouge.compute(predictions=pred, references=truth)['rouge2'].mid.fmeasure meteor_score = meteor.compute(predictions=pred, references=truth)['meteor'] self.log('bleu', sacrebleu_score) self.log('macro_f1', val_macro) self.log('rouge', rouge_score) self.log('meteor', meteor_score) self.log('acc', val_acc) self.log('weighted', val_weighted) print('Acc = {:.4f}, M-F1 = {:.4f}, W-F1 = {:.4f}, BLEU = {:.4f}, Rouge = {:.4f}, Meteor = {:.4f}' .format(val_acc, val_macro, val_weighted, sacrebleu_score, rouge_score, meteor_score)) # FIXME: Currently recognizes different test sets by length, not safe! if acc_data.shape[1] == 252: np.save('ver_kn1_ua_bertscore.npy', np.array(test_data[:3]), allow_pickle=True) else: np.save('ver_kn1_uq_bertscore.npy', np.array(test_data[:3]), allow_pickle=True) def configure_optimizers(self): return Adafactor(self.model.parameters(), lr=None, warmup_init=True, relative_step=True) def train_dataloader(self): train_length = len(self.kn1_train) train_set = ConcatDataset( [ get_subset(self.esnli, train_length), get_subset(self.cose, train_length), get_subset(self.seb, train_length), self.kn1_train ] ) return DataLoader(train_set, batch_size=self.batch_size, num_workers=0, shuffle=True) def val_dataloader(self): return DataLoader(self.val_data, batch_size=1, num_workers=0, shuffle=False) def test_dataloader(self): return DataLoader(self.test_data, batch_size=1, num_workers=0, shuffle=False)
{"hexsha": "7b96e887c52d02aededc79c55a6c522332582e10", "size": 19388, "ext": "py", "lang": "Python", "max_stars_repo_path": "litT5.py", "max_stars_repo_name": "SebOchs/KN1-baseline", "max_stars_repo_head_hexsha": "17b9bb724ac2d372b04ee6f629d213ff930220ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "litT5.py", "max_issues_repo_name": "SebOchs/KN1-baseline", "max_issues_repo_head_hexsha": "17b9bb724ac2d372b04ee6f629d213ff930220ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "litT5.py", "max_forks_repo_name": "SebOchs/KN1-baseline", "max_forks_repo_head_hexsha": "17b9bb724ac2d372b04ee6f629d213ff930220ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-12T11:55:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-16T20:32:10.000Z", "avg_line_length": 50.4895833333, "max_line_length": 116, "alphanum_fraction": 0.6244068496, "include": true, "reason": "import numpy", "num_tokens": 4591}
! ! AtmProfile_netCDF_IO ! ! Module containing routines to read and write AtmProfile netCDF ! format files. ! ! ! CREATION HISTORY: ! Written by: Paul van Delst, 08-Jul-2002 ! paul.vandelst@noaa.gov ! MODULE AtmProfile_netCDF_IO ! ----------------- ! Environment setup ! ----------------- ! Module use USE Type_Kinds , ONLY: Long, Double USE File_Utility , ONLY: File_Exists USE Message_Handler, ONLY: SUCCESS, FAILURE, INFORMATION, Display_Message USE String_Utility , ONLY: StrClean USE AtmProfile_Define, ONLY: AtmProfile_type , & AtmProfile_Associated , & AtmProfile_Destroy , & AtmProfile_Create , & AtmProfile_Inspect , & AtmProfile_ValidRelease , & AtmProfile_Info , & AtmProfile_DefineVersion , & AtmProfile_Absorber_Name , & AtmProfile_Absorber_Units_Name, & AtmProfile_Absorber_Units_LBL USE netcdf ! Disable implicit typing IMPLICIT NONE ! ------------ ! Visibilities ! ------------ ! Everything private by default PRIVATE ! Procedures PUBLIC :: AtmProfile_netCDF_InquireFile PUBLIC :: AtmProfile_netCDF_ReadFile PUBLIC :: AtmProfile_netCDF_ReadGroup PUBLIC :: AtmProfile_netCDF_WriteFile PUBLIC :: AtmProfile_netCDF_WriteGroup PUBLIC :: AtmProfile_netCDF_IOVersion ! ----------------- ! Module parameters ! ----------------- ! Module Version Id CHARACTER(*), PARAMETER :: MODULE_VERSION_ID = & ! Default message string length INTEGER, PARAMETER :: ML = 512 ! Literal constants REAL(Double), PARAMETER :: ZERO = 0.0_Double REAL(Double), PARAMETER :: ONE = 1.0_Double ! Extra parameters not in netCDF(?) INTEGER, PARAMETER :: MAX_N_GROUPS = 8096 ! Global attribute names. Case sensitive CHARACTER(*), PARAMETER :: RELEASE_GATTNAME = 'Release' CHARACTER(*), PARAMETER :: VERSION_GATTNAME = 'Version' CHARACTER(*), PARAMETER :: TITLE_GATTNAME = 'Title' CHARACTER(*), PARAMETER :: HISTORY_GATTNAME = 'History' CHARACTER(*), PARAMETER :: COMMENT_GATTNAME = 'Comment' CHARACTER(*), PARAMETER :: PROFILE_SET_GATTNAME = 'Profile_Set' ! Dimension names CHARACTER(*), PARAMETER :: LEVEL_DIMNAME = 'n_Levels' CHARACTER(*), PARAMETER :: LAYER_DIMNAME = 'n_Layers' CHARACTER(*), PARAMETER :: ABSORBER_DIMNAME = 'n_Absorbers' CHARACTER(*), PARAMETER :: PROFILE_DIMNAME = 'n_Profiles' CHARACTER(*), PARAMETER :: DESCRIPTION_DIMNAME = 'pl_strlen' ! Variable names CHARACTER(*), PARAMETER :: PROFILE_VARNAME = 'Profile_Number' CHARACTER(*), PARAMETER :: DESCRIPTION_VARNAME = 'Profile_Description' CHARACTER(*), PARAMETER :: CLIMATOLOGY_MODEL_VARNAME = 'Climatology_Model' CHARACTER(*), PARAMETER :: YEAR_VARNAME = 'Year' CHARACTER(*), PARAMETER :: MONTH_VARNAME = 'Month' CHARACTER(*), PARAMETER :: DAY_VARNAME = 'Day' CHARACTER(*), PARAMETER :: HOUR_VARNAME = 'Hour' CHARACTER(*), PARAMETER :: LATITUDE_VARNAME = 'Latitude' CHARACTER(*), PARAMETER :: LONGITUDE_VARNAME = 'Longitude' CHARACTER(*), PARAMETER :: SURFACE_ALTITUDE_VARNAME = 'Surface_Altitude' CHARACTER(*), PARAMETER :: ABSORBER_ID_VARNAME = 'Absorber_Id' CHARACTER(*), PARAMETER :: ABSORBER_UNITS_ID_VARNAME = 'Absorber_Units_Id' CHARACTER(*), PARAMETER :: LEVEL_PRESSURE_VARNAME = 'Level_Pressure' CHARACTER(*), PARAMETER :: LEVEL_TEMPERATURE_VARNAME = 'Level_Temperature' CHARACTER(*), PARAMETER :: LEVEL_ABSORBER_VARNAME = 'Level_Absorber' CHARACTER(*), PARAMETER :: LEVEL_ALTITUDE_VARNAME = 'Level_Altitude' CHARACTER(*), PARAMETER :: LAYER_PRESSURE_VARNAME = 'Layer_Pressure' CHARACTER(*), PARAMETER :: LAYER_TEMPERATURE_VARNAME = 'Layer_Temperature' CHARACTER(*), PARAMETER :: LAYER_ABSORBER_VARNAME = 'Layer_Absorber' CHARACTER(*), PARAMETER :: LAYER_DELTA_Z_VARNAME = 'Layer_Delta_Z' ! Variable long name attribute. CHARACTER(*), PARAMETER :: LONGNAME_ATTNAME = 'long_name' CHARACTER(*), PARAMETER :: PROFILE_LONGNAME = 'Profile Number' CHARACTER(*), PARAMETER :: DESCRIPTION_LONGNAME = 'Profile Description' CHARACTER(*), PARAMETER :: CLIMATOLOGY_MODEL_LONGNAME = 'Climatology Model' CHARACTER(*), PARAMETER :: YEAR_LONGNAME = 'Year' CHARACTER(*), PARAMETER :: MONTH_LONGNAME = 'Month' CHARACTER(*), PARAMETER :: DAY_LONGNAME = 'Day' CHARACTER(*), PARAMETER :: HOUR_LONGNAME = 'Hour' CHARACTER(*), PARAMETER :: LATITUDE_LONGNAME = 'Latitude' CHARACTER(*), PARAMETER :: LONGITUDE_LONGNAME = 'Longitude' CHARACTER(*), PARAMETER :: SURFACE_ALTITUDE_LONGNAME = 'Surface Altitude' CHARACTER(*), PARAMETER :: ABSORBER_ID_LONGNAME = 'Absorber ID' CHARACTER(*), PARAMETER :: ABSORBER_UNITS_ID_LONGNAME = 'Absorber Units ID' CHARACTER(*), PARAMETER :: LEVEL_PRESSURE_LONGNAME = 'Level pressure' CHARACTER(*), PARAMETER :: LEVEL_TEMPERATURE_LONGNAME = 'Level temperature' CHARACTER(*), PARAMETER :: LEVEL_ABSORBER_LONGNAME = 'Level absorber' CHARACTER(*), PARAMETER :: LEVEL_ALTITUDE_LONGNAME = 'Level altitude' CHARACTER(*), PARAMETER :: LAYER_PRESSURE_LONGNAME = 'Layer pressure' CHARACTER(*), PARAMETER :: LAYER_TEMPERATURE_LONGNAME = 'Layer temperature' CHARACTER(*), PARAMETER :: LAYER_ABSORBER_LONGNAME = 'Layer absorber' CHARACTER(*), PARAMETER :: LAYER_DELTA_Z_LONGNAME = 'Layer thickness' ! Variable description attribute. CHARACTER(*), PARAMETER :: DESCRIPTION_ATTNAME = 'description' CHARACTER(*), PARAMETER :: PROFILE_DESCRIPTION = 'The number of the profile in the dataset' CHARACTER(*), PARAMETER :: DESCRIPTION_DESCRIPTION = 'Description of atmospheric profile and modification' CHARACTER(*), PARAMETER :: CLIMATOLOGY_MODEL_DESCRIPTION = 'Climatology model associated with profile date/time/location.' CHARACTER(*), PARAMETER :: YEAR_DESCRIPTION = 'Year associated with profile date' CHARACTER(*), PARAMETER :: MONTH_DESCRIPTION = 'Month of year associated with profile date' CHARACTER(*), PARAMETER :: DAY_DESCRIPTION = 'Day of month associated with profile date' CHARACTER(*), PARAMETER :: HOUR_DESCRIPTION = 'Hour of day associated with profile time' CHARACTER(*), PARAMETER :: LATITUDE_DESCRIPTION = 'Latitude of profile location' CHARACTER(*), PARAMETER :: LONGITUDE_DESCRIPTION = 'Longitude of profile location' CHARACTER(*), PARAMETER :: SURFACE_ALTITUDE_DESCRIPTION = 'Surface altitude of profile' CHARACTER(*), PARAMETER :: ABSORBER_ID_DESCRIPTION = 'HITRAN/LBLRTM/MonoRTM absorber ID for atmospheric absorbers' CHARACTER(*), PARAMETER :: ABSORBER_UNITS_ID_DESCRIPTION = 'LBLRTM/MonoRTM absorber units ID' CHARACTER(*), PARAMETER :: LEVEL_PRESSURE_DESCRIPTION = 'Level pressure' CHARACTER(*), PARAMETER :: LEVEL_TEMPERATURE_DESCRIPTION = 'Level temperature' CHARACTER(*), PARAMETER :: LEVEL_ABSORBER_DESCRIPTION = 'Level absorber amount' CHARACTER(*), PARAMETER :: LEVEL_ALTITUDE_DESCRIPTION = 'Level geopotential altitude' CHARACTER(*), PARAMETER :: LAYER_PRESSURE_DESCRIPTION = 'Average layer pressure' CHARACTER(*), PARAMETER :: LAYER_TEMPERATURE_DESCRIPTION = 'Average layer temperature' CHARACTER(*), PARAMETER :: LAYER_ABSORBER_DESCRIPTION = 'Average layer absorber amount' CHARACTER(*), PARAMETER :: LAYER_DELTA_Z_DESCRIPTION = 'Layer thickness' ! Variable units attribute. CHARACTER(*), PARAMETER :: UNITS_ATTNAME = 'units' CHARACTER(*), PARAMETER :: PROFILE_UNITS = 'N/A' CHARACTER(*), PARAMETER :: DESCRIPTION_UNITS = 'N/A' CHARACTER(*), PARAMETER :: CLIMATOLOGY_MODEL_UNITS = 'N/A' CHARACTER(*), PARAMETER :: YEAR_UNITS = 'Year (C.E.)' CHARACTER(*), PARAMETER :: MONTH_UNITS = 'Month of year' CHARACTER(*), PARAMETER :: DAY_UNITS = 'Day of month' CHARACTER(*), PARAMETER :: HOUR_UNITS = 'Hour of day (24H)' CHARACTER(*), PARAMETER :: LATITUDE_UNITS = 'degress North (-90->+90)' CHARACTER(*), PARAMETER :: LONGITUDE_UNITS = 'degress East (0->360)' CHARACTER(*), PARAMETER :: SURFACE_ALTITUDE_UNITS = 'metres (m)' CHARACTER(*), PARAMETER :: ABSORBER_ID_UNITS = 'N/A' CHARACTER(*), PARAMETER :: ABSORBER_UNITS_ID_UNITS = 'N/A' CHARACTER(*), PARAMETER :: LEVEL_PRESSURE_UNITS = 'hectoPascals (hPa)' CHARACTER(*), PARAMETER :: LEVEL_TEMPERATURE_UNITS = 'Kelvin (K)' CHARACTER(*), PARAMETER :: LEVEL_ABSORBER_UNITS = 'Variable (see Absorber_Units_Id)' CHARACTER(*), PARAMETER :: LEVEL_ALTITUDE_UNITS = 'metres (m)' CHARACTER(*), PARAMETER :: LAYER_PRESSURE_UNITS = 'hectoPascals (hPa)' CHARACTER(*), PARAMETER :: LAYER_TEMPERATURE_UNITS = 'Kelvin (K)' CHARACTER(*), PARAMETER :: LAYER_ABSORBER_UNITS = 'Variable (see Absorber_Units_Id)' CHARACTER(*), PARAMETER :: LAYER_DELTA_Z_UNITS = 'metres (m)' ! Variable fill value attribute CHARACTER(*), PARAMETER :: FILLVALUE_ATTNAME = '_FillValue' INTEGER(Long), PARAMETER :: PROFILE_FILLVALUE = 0 CHARACTER(*) , PARAMETER :: DESCRIPTION_FILLVALUE = NF90_FILL_CHAR INTEGER(Long), PARAMETER :: CLIMATOLOGY_MODEL_FILLVALUE = 0 INTEGER(Long), PARAMETER :: YEAR_FILLVALUE = 0 INTEGER(Long), PARAMETER :: MONTH_FILLVALUE = 0 INTEGER(Long), PARAMETER :: DAY_FILLVALUE = 0 INTEGER(Long), PARAMETER :: HOUR_FILLVALUE = 0 REAL(Double) , PARAMETER :: LATITUDE_FILLVALUE = -999.0_Double REAL(Double) , PARAMETER :: LONGITUDE_FILLVALUE = -999.0_Double REAL(Double) , PARAMETER :: SURFACE_ALTITUDE_FILLVALUE = -999.0_Double INTEGER(Long), PARAMETER :: ABSORBER_ID_FILLVALUE = 0 INTEGER(Long), PARAMETER :: ABSORBER_UNITS_ID_FILLVALUE = 0 REAL(Double) , PARAMETER :: LEVEL_PRESSURE_FILLVALUE = ZERO REAL(Double) , PARAMETER :: LEVEL_TEMPERATURE_FILLVALUE = ZERO REAL(Double) , PARAMETER :: LEVEL_ABSORBER_FILLVALUE = ZERO REAL(Double) , PARAMETER :: LEVEL_ALTITUDE_FILLVALUE = ZERO REAL(Double) , PARAMETER :: LAYER_PRESSURE_FILLVALUE = ZERO REAL(Double) , PARAMETER :: LAYER_TEMPERATURE_FILLVALUE = ZERO REAL(Double) , PARAMETER :: LAYER_ABSORBER_FILLVALUE = ZERO REAL(Double) , PARAMETER :: LAYER_DELTA_Z_FILLVALUE = ZERO ! Variable datatypes INTEGER, PARAMETER :: PROFILE_TYPE = NF90_INT INTEGER, PARAMETER :: DESCRIPTION_TYPE = NF90_CHAR INTEGER, PARAMETER :: CLIMATOLOGY_MODEL_TYPE = NF90_INT INTEGER, PARAMETER :: YEAR_TYPE = NF90_INT INTEGER, PARAMETER :: MONTH_TYPE = NF90_INT INTEGER, PARAMETER :: DAY_TYPE = NF90_INT INTEGER, PARAMETER :: HOUR_TYPE = NF90_INT INTEGER, PARAMETER :: LATITUDE_TYPE = NF90_DOUBLE INTEGER, PARAMETER :: LONGITUDE_TYPE = NF90_DOUBLE INTEGER, PARAMETER :: SURFACE_ALTITUDE_TYPE = NF90_DOUBLE INTEGER, PARAMETER :: ABSORBER_ID_TYPE = NF90_INT INTEGER, PARAMETER :: ABSORBER_UNITS_ID_TYPE = NF90_INT INTEGER, PARAMETER :: LEVEL_PRESSURE_TYPE = NF90_DOUBLE INTEGER, PARAMETER :: LEVEL_TEMPERATURE_TYPE = NF90_DOUBLE INTEGER, PARAMETER :: LEVEL_ABSORBER_TYPE = NF90_DOUBLE INTEGER, PARAMETER :: LEVEL_ALTITUDE_TYPE = NF90_DOUBLE INTEGER, PARAMETER :: LAYER_PRESSURE_TYPE = NF90_DOUBLE INTEGER, PARAMETER :: LAYER_TEMPERATURE_TYPE = NF90_DOUBLE INTEGER, PARAMETER :: LAYER_ABSORBER_TYPE = NF90_DOUBLE INTEGER, PARAMETER :: LAYER_DELTA_Z_TYPE = NF90_DOUBLE CONTAINS !################################################################################## !################################################################################## !## ## !## ## PUBLIC MODULE ROUTINES ## ## !## ## !################################################################################## !################################################################################## !------------------------------------------------------------------------------ !:sdoc+: ! ! NAME: ! AtmProfile_netCDF_InquireFile ! ! PURPOSE: ! Function to inquire AtmProfile object netCDF format files. ! ! CALLING SEQUENCE: ! Error_Status = AtmProfile_netCDF_InquireFile( & ! Filename , & ! n_Profiles = n_Profiles , & ! Release = Release , & ! Title = Title , & ! History = History , & ! Comment = Comment , & ! Profile_Set = Profile_Set ) ! ! INPUTS: ! Filename: Character string specifying the name of the ! AtmProfile data file to inquire. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN) ! ! OPTIONAL OUTPUTS: ! n_Profiles: The number of profiles contained in the AtmProfile ! file. ! UNITS: N/A ! TYPE: INTEGER ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! Release: The release number of the AtmProfile file. ! UNITS: N/A ! TYPE: INTEGER ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! Title: Character string written into the TITLE global ! attribute field of the AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! History: Character string written into the HISTORY global ! attribute field of the AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! Comment: Character string written into the COMMENT global ! attribute field of the AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! Profile_Set: Character string written into the PROFILE_SET global ! attribute field of the netCDF AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! FUNCTION RESULT: ! Error_Status: The return value is an integer defining the error ! status. The error codes are defined in the ! Message_Handler module. ! If == SUCCESS the file inquiry was successful ! == FAILURE an error occurred. ! UNITS: N/A ! TYPE: INTEGER ! DIMENSION: Scalar ! !:sdoc-: !------------------------------------------------------------------------------ FUNCTION AtmProfile_netCDF_InquireFile( & Filename , & ! Input n_Profiles , & ! Optional output Release , & ! Optional Output Title , & ! Optional output History , & ! Optional output Comment , & ! Optional output Profile_Set) & ! Optional output RESULT( err_stat ) ! Arguments CHARACTER(*), INTENT(IN) :: Filename INTEGER , OPTIONAL, INTENT(OUT) :: n_Profiles INTEGER , OPTIONAL, INTENT(OUT) :: Release CHARACTER(*), OPTIONAL, INTENT(OUT) :: Title CHARACTER(*), OPTIONAL, INTENT(OUT) :: History CHARACTER(*), OPTIONAL, INTENT(OUT) :: Comment CHARACTER(*), OPTIONAL, INTENT(OUT) :: Profile_Set ! Function result INTEGER :: err_stat ! Function parameters CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'AtmProfile_netCDF_IO::InquireFile' ! Function variables CHARACTER(ML) :: msg LOGICAL :: close_file INTEGER :: nf90_stat INTEGER :: fileid INTEGER :: groupid(MAX_N_GROUPS) INTEGER :: n_groups ! Set up err_stat = SUCCESS close_file = .FALSE. ! Open the file nf90_stat = NF90_OPEN( Filename,NF90_NOWRITE,fileid ) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error opening '//TRIM(Filename)//' for read access - '// & TRIM(NF90_STRERROR( nf90_stat )) CALL Inquire_Cleanup(); RETURN END IF ! ...Close the file if any error from here on close_file = .TRUE. ! Get the number of profiles nf90_stat = NF90_INQ_GRPS( fileid,n_groups,groupid ) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error inquiring group IDs in '//TRIM(Filename)//' - '// & TRIM(NF90_STRERROR( nf90_stat )) CALL Inquire_Cleanup(); RETURN END IF ! Get the global attributes err_stat = ReadGAtts( & fileid , & Release = Release , & Title = Title , & History = History , & Comment = Comment , & Profile_Set = Profile_Set ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error reading global attributes from '//TRIM(Filename) CALL Inquire_Cleanup(); RETURN END IF ! Close the file nf90_stat = NF90_CLOSE( fileid ) close_file = .FALSE. IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error closing input file - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Inquire_Cleanup(); RETURN END IF ! Set the return values IF ( PRESENT(n_Profiles) ) n_Profiles = n_groups CONTAINS SUBROUTINE Inquire_CleanUp() IF ( close_file ) THEN nf90_stat = NF90_CLOSE( fileid ) IF ( nf90_stat /= NF90_NOERR ) & msg = TRIM(msg)//'; Error closing input file during error cleanup.' END IF err_stat = FAILURE CALL Display_Message( ROUTINE_NAME,msg,err_stat ) END SUBROUTINE Inquire_CleanUp END FUNCTION AtmProfile_netCDF_InquireFile !------------------------------------------------------------------------------ !:sdoc+: ! ! NAME: ! AtmProfile_netCDF_WriteFile ! ! PURPOSE: ! Function to write AtmProfile object files in netCDF format. ! ! CALLING SEQUENCE: ! Error_Status = AtmProfile_netCDF_WriteFile( & ! AtmProfile, & ! Filename , & ! Quiet = Quiet , & ! Clobber = Clobber , & ! Title = Title , & ! History = History , & ! Comment = Comment , & ! Profile_Set = Profile_Set ) ! ! OBJECTS: ! AtmProfile: AtmProfile object array containing the data to write ! to file. ! UNITS: N/A ! TYPE: AtmProfile_type ! DIMENSION: Rank-1 ! ATTRIBUTES: INTENT(IN) ! ! INPUTS: ! Filename: Character string specifying the name of the ! AtmProfile data file to write. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN) ! ! OPTIONAL INPUTS: ! Quiet: Set this logical argument to suppress INFORMATION ! messages being printed to stdout ! If == .FALSE., INFORMATION messages are OUTPUT [DEFAULT]. ! == .TRUE., INFORMATION messages are SUPPRESSED. ! If not specified, default is .FALSE. ! UNITS: N/A ! TYPE: LOGICAL ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN), OPTIONAL ! ! Clobber: Set this logical argument to overwrite an existing filename ! If == .FALSE., an existing file is NOT overwritten, and the ! function returns with an error [DEFAULT]. ! == .TRUE., an existing file is overwritten with the new data. ! If not specified, default is .FALSE. ! UNITS: N/A ! TYPE: LOGICAL ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN), OPTIONAL ! ! Title: Character string written into the TITLE global ! attribute field of the AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN), OPTIONAL ! ! History: Character string written into the HISTORY global ! attribute field of the AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN), OPTIONAL ! ! Comment: Character string written into the COMMENT global ! attribute field of the AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN), OPTIONAL ! ! Profile_Set: Character string written into the PROFILE_SET global ! attribute field of the netCDF AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! FUNCTION RESULT: ! Error_Status: The return value is an integer defining the error status. ! The error codes are defined in the Message_Handler module. ! If == SUCCESS the data write was successful ! == FAILURE an unrecoverable error occurred. ! UNITS: N/A ! TYPE: INTEGER ! DIMENSION: Scalar ! !:sdoc-: !------------------------------------------------------------------------------ FUNCTION AtmProfile_netCDF_WriteFile( & AtmProfile , & ! Input Filename , & ! Input Quiet , & ! Optional input Clobber , & ! Optional input Title , & ! Optional input History , & ! Optional input Comment , & ! Optional input Profile_Set, & ! Optional input Debug ) & ! Optional input (Debug output control) RESULT( err_stat ) ! Arguments TYPE(AtmProfile_type), INTENT(IN) :: AtmProfile(:) CHARACTER(*), INTENT(IN) :: Filename LOGICAL, OPTIONAL, INTENT(IN) :: Quiet LOGICAL, OPTIONAL, INTENT(IN) :: Clobber CHARACTER(*), OPTIONAL, INTENT(IN) :: Title CHARACTER(*), OPTIONAL, INTENT(IN) :: History CHARACTER(*), OPTIONAL, INTENT(IN) :: Comment CHARACTER(*), OPTIONAL, INTENT(IN) :: Profile_Set LOGICAL, OPTIONAL, INTENT(IN) :: Debug ! Function result INTEGER :: err_stat ! Local parameters CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'AtmProfile_netCDF_IO::WriteFile' ! Local variables CHARACTER(ML) :: msg LOGICAL :: noisy LOGICAL :: no_clobber LOGICAL :: new_file INTEGER :: nf90_stat INTEGER :: clobber_flag INTEGER :: fileid INTEGER :: n TYPE(AtmProfile_type) :: dummy ! Set up err_stat = SUCCESS ! ...Check structure IF ( .NOT. ALL(AtmProfile_Associated( AtmProfile )) ) THEN msg = 'AtmProfile structure is empty. Nothing to do!' CALL Write_CleanUp(); RETURN END IF ! ...Check Quiet argument noisy = .TRUE. IF ( PRESENT(Quiet) ) noisy = .NOT. Quiet ! ...Check Clobber argument no_clobber = .TRUE. IF ( PRESENT(Clobber) ) no_clobber = .NOT. Clobber ! ...Override Quiet settings if debug set. IF ( PRESENT(Debug) ) THEN IF ( Debug ) noisy = .TRUE. END IF ! Open the file new_file = .TRUE. ! ...Set the clobber flag IF ( no_clobber ) THEN clobber_flag = NF90_NOCLOBBER ELSE clobber_flag = NF90_CLOBBER END IF ! ...Create the file nf90_stat = NF90_CREATE( & Filename, & clobber_flag+NF90_NETCDF4, & fileid ) IF ( nf90_stat /= NF90_NOERR ) THEN ! Was creation failure due to exisitng file? IF ( nf90_stat == NF90_EEXIST ) THEN ! ...Yes, so just open it nf90_stat = NF90_OPEN( & Filename , & NF90_WRITE, & ! Test with NF90_SHARE? fileid ) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error opening existing file, '//TRIM(Filename)//', for write access - '//& TRIM(NF90_STRERROR( nf90_stat )) CALL Write_Cleanup(); RETURN END IF new_file = .FALSE. ELSE ! ...No, so toss an error msg = 'Error creating '//TRIM(Filename)//' - '//& TRIM(NF90_STRERROR( nf90_stat )) CALL Write_Cleanup(); RETURN END IF END IF ! Write the global attributes IF ( new_file ) THEN err_stat = WriteGAtts( & fileid, & Release = dummy%Release, & Title = Title , & History = History , & Comment = Comment , & Profile_Set = Profile_Set ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error writing global attribute to '//TRIM(Filename) CALL Write_Cleanup(); RETURN END IF END IF ! Write each entry as a separate group Profile_loop: DO n = 1, SIZE(AtmProfile) IF ( .NOT. AtmProfile_Associated( AtmProfile(n) ) ) CYCLE Profile_loop err_stat = AtmProfile_netCDF_WriteGroup( & AtmProfile(n), & fileid, & Quiet = Quiet, & Debug = Debug ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error writing group to '//TRIM(Filename) CALL Write_Cleanup(); RETURN END IF END DO Profile_loop ! Close the file nf90_stat = NF90_CLOSE( fileid ) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error closing output file - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Write_Cleanup(); RETURN END IF CONTAINS SUBROUTINE Write_CleanUp() nf90_stat = NF90_CLOSE( fileid ) err_stat = FAILURE CALL Display_Message( ROUTINE_NAME,msg,err_stat ) END SUBROUTINE Write_CleanUp END FUNCTION AtmProfile_netCDF_WriteFile !------------------------------------------------------------------------------ !:sdoc+: ! ! NAME: ! AtmProfile_netCDF_WriteGroup ! ! PURPOSE: ! Function to write a AtmProfile object group to a netCDF file. ! ! CALLING SEQUENCE: ! Error_Status = AtmProfile_netCDF_WriteGroup( & ! AtmProfile , & ! FileId , & ! GroupName = GroupName, & ! Quiet = Quiet ) ! ! OBJECTS: ! AtmProfile: AtmProfile object containing the group data to write ! to file. ! UNITS: N/A ! TYPE: AtmProfile_type ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN) ! ! INPUTS: ! FileId: The netCDF Id for the file to contain the AtmProfile ! group. ! UNITS: N/A ! TYPE: INTEGER ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN) ! ! OPTIONAL INPUTS: ! GroupName: The name of the group to write to file. If not specified ! the default group name is "atmprofile-X" where "X" is the ! profile number associated with the AtmProfile object. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN), OPTIONAL ! ! Quiet: Set this logical argument to suppress INFORMATION ! messages being printed to stdout ! If == .FALSE., INFORMATION messages are OUTPUT [DEFAULT]. ! == .TRUE., INFORMATION messages are SUPPRESSED. ! If not specified, default is .FALSE. ! UNITS: N/A ! TYPE: LOGICAL ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN), OPTIONAL ! ! FUNCTION RESULT: ! Error_Status: The return value is an integer defining the error status. ! The error codes are defined in the Message_Handler module. ! If == SUCCESS the group data write was successful ! == FAILURE an unrecoverable error occurred. ! UNITS: N/A ! TYPE: INTEGER ! DIMENSION: Scalar ! !:sdoc-: !------------------------------------------------------------------------------ FUNCTION AtmProfile_netCDF_WriteGroup( & AtmProfile, & ! Input FileId , & ! Input GroupName , & ! Optional input Quiet , & ! Optional input Debug ) & ! Optional input (Debug output control) RESULT( err_stat ) ! Arguments TYPE(AtmProfile_type), INTENT(IN) :: AtmProfile INTEGER, INTENT(IN) :: FileId CHARACTER(*), OPTIONAL, INTENT(IN) :: GroupName LOGICAL, OPTIONAL, INTENT(IN) :: Quiet LOGICAL, OPTIONAL, INTENT(IN) :: Debug ! Function result INTEGER :: err_stat ! Local parameters CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'AtmProfile_netCDF_IO::WriteGroup' ! Local variables CHARACTER(ML) :: msg CHARACTER(ML) :: group_name LOGICAL :: noisy INTEGER :: nf90_stat INTEGER :: groupid INTEGER :: n_levels_dimid INTEGER :: n_layers_dimid INTEGER :: n_absorbers_dimid INTEGER :: pl_strlen_dimid ! Setup err_stat = SUCCESS ! ...Check structure IF ( .NOT. (AtmProfile_Associated( AtmProfile )) ) THEN msg = 'AtmProfile structure is empty. Nothing to do!' CALL Write_CleanUp(); RETURN END IF IF ( .NOT. AtmProfile_ValidRelease( AtmProfile ) ) THEN msg = 'AtmProfile Release check failed.' CALL Write_Cleanup(); RETURN END IF ! ...Check GroupName argument, defining default. WRITE(group_name,'("atmprofile-",i0)') AtmProfile%Profile IF ( PRESENT(GroupName) ) THEN group_name = ADJUSTL(GroupName) END IF ! ...Check Quiet argument noisy = .TRUE. IF ( PRESENT(Quiet) ) noisy = .NOT. Quiet ! ...Override Quiet settings if debug set. IF ( PRESENT(Debug) ) THEN IF ( Debug ) noisy = .TRUE. END IF ! Create a new group for the AtmProfile data nf90_stat = NF90_DEF_GRP( & fileid, & group_name, & groupid ) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error creating '//TRIM(group_name)//' group - '//& ' - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Write_Cleanup(); RETURN END IF ! Write the group attributes err_stat = WriteGAtts( & groupid, & Version = AtmProfile%Version ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error writing Version attribute for the '//TRIM(group_name)//& ' group - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Write_Cleanup(); RETURN END IF ! Define the dimensions for the group err_stat = DefineDimensions( & AtmProfile , & groupid , & n_levels_dimid , & n_layers_dimid , & n_absorbers_dimid, & pl_strlen_dimid ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error defining dimensions for the '//TRIM(group_name)//& ' group - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Write_Cleanup(); RETURN END IF ! Define the variables for the group err_stat = DefineVariables( & groupid , & n_levels_dimid , & n_layers_dimid , & n_absorbers_dimid, & pl_strlen_dimid ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error defining variables for the '//TRIM(group_name)//& ' group - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Write_Cleanup(); RETURN END IF ! Take netCDF file out of define mode nf90_stat = NF90_ENDDEF( fileid ) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error taking file out of define mode to write the '//& TRIM(group_name)//' group - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Write_Cleanup(); RETURN END IF ! Write the variables for the group err_stat = WriteVariables( AtmProfile, groupid ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error writing variables for the '//TRIM(group_name)//& ' group - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Write_Cleanup(); RETURN END IF ! Put netCDF file back into define mode nf90_stat = NF90_REDEF( fileid ) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error putting file back into define mode after writing the '//& TRIM(group_name)//' group - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Write_Cleanup(); RETURN END IF ! Output an info message IF ( noisy ) THEN CALL AtmProfile_Info( AtmProfile,msg ) CALL Display_Message( ROUTINE_NAME,msg,INFORMATION ) END IF CONTAINS SUBROUTINE Write_CleanUp() nf90_stat = NF90_CLOSE( fileid ) err_stat = FAILURE CALL Display_Message( ROUTINE_NAME,msg,err_stat ) END SUBROUTINE Write_CleanUp END FUNCTION AtmProfile_netCDF_WriteGroup !------------------------------------------------------------------------------ !:sdoc+: ! ! NAME: ! AtmProfile_netCDF_ReadFile ! ! PURPOSE: ! Function to read AtmProfile object files in netCDF format. ! ! CALLING SEQUENCE: ! Error_Status = AtmProfile_netCDF_ReadFile( & ! AtmProfile, & ! Filename , & ! Quiet = Quiet , & ! Title = Title , & ! History = History , & ! Comment = Comment , & ! Profile_Set = Profile_Set ) ! ! OBJECTS: ! AtmProfile: AtmProfile object array to contain the data read ! from file. ! UNITS: N/A ! TYPE: AtmProfile_type ! DIMENSION: Rank-1 ! ATTRIBUTES: INTENT(OUT), ALLOCATABLE ! ! INPUTS: ! Filename: Character string specifying the name of the ! AtmProfile data file to write. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN) ! ! OPTIONAL INPUTS: ! Quiet: Set this logical argument to suppress INFORMATION ! messages being printed to stdout ! If == .FALSE., INFORMATION messages are OUTPUT [DEFAULT]. ! == .TRUE., INFORMATION messages are SUPPRESSED. ! If not specified, default is .FALSE. ! UNITS: N/A ! TYPE: LOGICAL ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN), OPTIONAL ! ! OPTIONAL OUTPUTS: ! Title: Character string written into the TITLE global ! attribute field of the AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! History: Character string written into the HISTORY global ! attribute field of the AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! Comment: Character string written into the COMMENT global ! attribute field of the AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! Profile_Set: Character string written into the PROFILE_SET global ! attribute field of the AtmProfile file. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT), OPTIONAL ! ! FUNCTION RESULT: ! Error_Status: The return value is an integer defining the error status. ! The error codes are defined in the Message_Handler module. ! If == SUCCESS the data write was successful ! == FAILURE an unrecoverable error occurred. ! UNITS: N/A ! TYPE: INTEGER ! DIMENSION: Scalar ! !:sdoc-: !------------------------------------------------------------------------------ FUNCTION AtmProfile_netCDF_ReadFile( & AtmProfile , & ! Output Filename , & ! Input Quiet , & ! Optional input Title , & ! Optional output History , & ! Optional output Comment , & ! Optional output Profile_Set, & ! Optional output Debug ) & ! Optional input (Debug output control) RESULT( err_stat ) ! Arguments TYPE(AtmProfile_type), ALLOCATABLE, INTENT(OUT) :: AtmProfile(:) CHARACTER(*), INTENT(IN) :: Filename LOGICAL, OPTIONAL, INTENT(IN) :: Quiet CHARACTER(*), OPTIONAL, INTENT(OUT) :: Title CHARACTER(*), OPTIONAL, INTENT(OUT) :: History CHARACTER(*), OPTIONAL, INTENT(OUT) :: Comment CHARACTER(*), OPTIONAL, INTENT(OUT) :: Profile_Set LOGICAL, OPTIONAL, INTENT(IN) :: Debug ! Function result INTEGER :: err_stat ! Function parameters CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'AtmProfile_netCDF_IO::ReadFile' ! Function variables CHARACTER(ML) :: msg CHARACTER(ML) :: groupname LOGICAL :: close_file LOGICAL :: noisy INTEGER :: alloc_stat INTEGER :: nf90_stat INTEGER :: fileid INTEGER :: n, n_profiles INTEGER, ALLOCATABLE :: groupid(:) TYPE(AtmProfile_type) :: dummy ! Set up err_stat = SUCCESS close_file = .FALSE. ! ...Check that the file exists IF ( .NOT. File_Exists(Filename) ) THEN msg = 'File '//TRIM(Filename)//' not found.' CALL Read_Cleanup(); RETURN END IF ! ...Check Quiet argument noisy = .TRUE. IF ( PRESENT(Quiet) ) noisy = .NOT. Quiet ! Inquire the file to get the number of profiles err_stat = AtmProfile_netCDF_InquireFile( & Filename, & n_Profiles = n_profiles ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error obtaining profile count from '//TRIM(Filename) CALL Read_Cleanup(); RETURN END IF ! Allocate the output and group id structure ALLOCATE( AtmProfile(n_profiles), groupid(n_profiles), & STAT=alloc_stat ) IF ( alloc_stat /= 0 ) THEN msg = 'Error allocating arrays' CALL Read_Cleanup(); RETURN END IF ! Open the file for reading nf90_stat = NF90_OPEN( Filename,NF90_NOWRITE,fileid ) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error opening '//TRIM(Filename)//' for read access - '//& TRIM(NF90_STRERROR( nf90_stat )) CALL Read_Cleanup(); RETURN END IF ! ...Close the file if any error from here on close_file = .TRUE. ! Read the global attributes err_stat = ReadGAtts( & fileid , & Release = dummy%Release, & Title = Title , & History = History, & Comment = Comment, & Profile_Set = Profile_Set ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error reading global attribute from '//TRIM(Filename) CALL Read_Cleanup(); RETURN END IF ! ...Check if release is valid IF ( .NOT. AtmProfile_ValidRelease( dummy ) ) THEN msg = 'Release check failed.' CALL Read_Cleanup(); RETURN END IF ! Get the list of groups and their id nf90_stat = NF90_INQ_GRPS( fileid, n, groupid ) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error inquiring '//TRIM(Filename)//' for group ids - '//& TRIM(NF90_STRERROR( nf90_stat )) CALL Read_Cleanup(); RETURN END IF ! Begin main profile loop Profile_loop: DO n = 1, n_profiles ! Get the current group's name nf90_stat = NF90_INQ_GRPNAME( groupid(n), groupname ) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error inquiring '//TRIM(Filename)//' for the current group name - '//& TRIM(NF90_STRERROR( nf90_stat )) CALL Read_Cleanup(); RETURN END IF ! Read the current group's data err_stat = AtmProfile_netCDF_ReadGroup( & AtmProfile(n), & fileid , & GroupName = groupname, & Quiet = Quiet , & Debug = Debug ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error reading '//TRIM(groupname)//' group from '//TRIM(Filename) CALL Read_Cleanup(); RETURN END IF END DO Profile_loop ! Close the file nf90_stat = NF90_CLOSE( fileid ); close_file = .FALSE. IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error closing output file - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Read_Cleanup(); RETURN END IF CONTAINS SUBROUTINE Read_CleanUp() IF ( close_file ) THEN nf90_stat = NF90_CLOSE( fileid ) IF ( nf90_stat /= NF90_NOERR ) & msg = TRIM(msg)//'; Error closing input file during error cleanup- '//& TRIM(NF90_STRERROR( nf90_stat )) END IF IF ( ALLOCATED(AtmProfile) ) THEN CALL AtmProfile_Destroy( AtmProfile ) DEALLOCATE(AtmProfile) END IF IF ( ALLOCATED(groupid) ) DEALLOCATE(groupid) err_stat = FAILURE CALL Display_Message( ROUTINE_NAME,msg,err_stat ) END SUBROUTINE Read_CleanUp END FUNCTION AtmProfile_netCDF_ReadFile !------------------------------------------------------------------------------ !:sdoc+: ! ! NAME: ! AtmProfile_netCDF_ReadGroup ! ! PURPOSE: ! Function to read a AtmProfile object group from a netCDF file. ! ! CALLING SEQUENCE: ! Error_Status = AtmProfile_netCDF_ReadGroup( & ! AtmProfile , & ! FileId , & ! GroupName = GroupName, & ! Quiet = Quiet ) ! ! OBJECTS: ! AtmProfile: AtmProfile object to contain the group data read ! from file. ! UNITS: N/A ! TYPE: AtmProfile_type ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT) ! ! INPUTS: ! FileId: The netCDF Id for the file containing the AtmProfile ! group. ! UNITS: N/A ! TYPE: INTEGER ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN) ! ! OPTIONAL INPUTS: ! GroupName: The name of the group to read from file. If not specified ! the default group name is 'atmprofile' ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN), OPTIONAL ! ! Quiet: Set this logical argument to suppress INFORMATION ! messages being printed to stdout ! If == .FALSE., INFORMATION messages are OUTPUT [DEFAULT]. ! == .TRUE., INFORMATION messages are SUPPRESSED. ! If not specified, default is .FALSE. ! UNITS: N/A ! TYPE: LOGICAL ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(IN), OPTIONAL ! ! FUNCTION RESULT: ! Error_Status: The return value is an integer defining the error status. ! The error codes are defined in the Message_Handler module. ! If == SUCCESS the group data read was successful ! == FAILURE an unrecoverable error occurred. ! UNITS: N/A ! TYPE: INTEGER ! DIMENSION: Scalar ! !:sdoc-: !------------------------------------------------------------------------------ FUNCTION AtmProfile_netCDF_ReadGroup( & AtmProfile, & ! Output FileId , & ! Input GroupName , & ! Optional input Quiet , & ! Optional input Debug ) & ! Optional input (Debug output control) RESULT( err_stat ) ! Arguments TYPE(AtmProfile_type), INTENT(OUT) :: AtmProfile INTEGER, INTENT(IN) :: FileId CHARACTER(*), OPTIONAL, INTENT(IN) :: GroupName LOGICAL, OPTIONAL, INTENT(IN) :: Quiet LOGICAL, OPTIONAL, INTENT(IN) :: Debug ! Function result INTEGER :: err_stat ! Local parameters CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'AtmProfile_netCDF_IO::ReadGroup' ! Local variables CHARACTER(ML) :: msg CHARACTER(ML) :: group_name LOGICAL :: noisy INTEGER :: nf90_stat INTEGER :: groupid INTEGER :: n_levels INTEGER :: n_layers INTEGER :: n_absorbers INTEGER :: pl_strlen ! Setup err_stat = SUCCESS ! ...Check GroupName argument, defining default. group_name = 'atmprofile' IF ( PRESENT(GroupName) ) THEN group_name = ADJUSTL(GroupName) END IF ! ...Check Quiet argument noisy = .TRUE. IF ( PRESENT(Quiet) ) noisy = .NOT. Quiet ! ...Override Quiet settings if debug set. IF ( PRESENT(Debug) ) THEN IF ( Debug ) noisy = .TRUE. END IF ! Get the group id nf90_stat = NF90_INQ_GRP_NCID(fileid, group_name, groupid) IF ( nf90_stat /= NF90_NOERR ) THEN msg = 'Error inquiring '//TRIM(group_name)//' group for its group id - '//& TRIM(NF90_STRERROR( nf90_stat )) CALL Read_Cleanup(); RETURN END IF ! Get the group dimensions err_stat = ReadDimensions( & GroupId , & n_levels , & n_layers , & n_absorbers, & pl_strlen ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error reading dimensions for the '//TRIM(group_name)//& ' group - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Read_Cleanup(); RETURN END IF ! Allocate the current AtmProfile object CALL AtmProfile_Create( & AtmProfile , & n_layers , & n_absorbers ) IF ( .NOT. AtmProfile_Associated(AtmProfile) ) THEN msg = 'Error allocating output AtmProfile for group '//TRIM(group_name) CALL Read_Cleanup(); RETURN END IF ! Read the group attributes err_stat = ReadGAtts( & groupid, & Version = AtmProfile%Version ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error reading Version attribute for the '//TRIM(group_name)//& ' group - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Read_Cleanup(); RETURN END IF ! Read the variables for the group err_stat = ReadVariables( AtmProfile, groupid ) IF ( err_stat /= SUCCESS ) THEN msg = 'Error reading variables for the '//TRIM(group_name)//& ' group - '//TRIM(NF90_STRERROR( nf90_stat )) CALL Read_Cleanup(); RETURN END IF ! Convert absorber id info CALL AtmProfile_Absorber_Name(AtmProfile) CALL AtmProfile_Absorber_Units_Name(AtmProfile) CALL AtmProfile_Absorber_Units_LBL(AtmProfile) ! Output an info message IF ( noisy ) THEN CALL AtmProfile_Info( AtmProfile,msg ) CALL Display_Message( ROUTINE_NAME,msg,INFORMATION ) END IF CONTAINS SUBROUTINE Read_CleanUp() CALL AtmProfile_Destroy(AtmProfile) err_stat = FAILURE CALL Display_Message( ROUTINE_NAME,msg,err_stat ) END SUBROUTINE Read_CleanUp END FUNCTION AtmProfile_netCDF_ReadGroup !-------------------------------------------------------------------------------- !:sdoc+: ! ! NAME: ! AtmProfile_netCDF_IOVersion ! ! PURPOSE: ! Subroutine to return the module version information. ! ! CALLING SEQUENCE: ! CALL AtmProfile_netCDF_IOVersion( Id ) ! ! OUTPUT ARGUMENTS: ! Id: Character string containing the version Id information ! for the module. ! UNITS: N/A ! TYPE: CHARACTER(*) ! DIMENSION: Scalar ! ATTRIBUTES: INTENT(OUT) ! !:sdoc-: !-------------------------------------------------------------------------------- SUBROUTINE AtmProfile_netCDF_IOVersion( Id ) CHARACTER(*), INTENT(OUT) :: Id Id = MODULE_VERSION_ID END SUBROUTINE AtmProfile_netCDF_IOVersion !################################################################################## !################################################################################## !## ## !## ## PRIVATE MODULE ROUTINES ## ## !## ## !################################################################################## !################################################################################## INCLUDE 'AtmProfile_netCDF_IO.inc' END MODULE AtmProfile_netCDF_IO
{"hexsha": "848530aec7eac537d3f68bf47964451655109e92", "size": 51877, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/TauProd/AtmProfile/AtmProfile_netCDF_IO.f90", "max_stars_repo_name": "hsbadr/crtm", "max_stars_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-11-19T10:00:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T02:42:18.000Z", "max_issues_repo_path": "src/TauProd/AtmProfile/AtmProfile_netCDF_IO.f90", "max_issues_repo_name": "hsbadr/crtm", "max_issues_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-05T21:04:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T18:23:10.000Z", "max_forks_repo_path": "src/TauProd/AtmProfile/AtmProfile_netCDF_IO.f90", "max_forks_repo_name": "hsbadr/crtm", "max_forks_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-10-29T17:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T08:42:45.000Z", "avg_line_length": 38.9466966967, "max_line_length": 126, "alphanum_fraction": 0.5420321144, "num_tokens": 12037}
(* Copyright 2018 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. *) theory spinlock_lock_mem imports spinlock begin text \<open>Up to two locales per function in the binary.\<close> locale spinlock_lock_function = spinlock_context + fixes rsp\<^sub>0 rbp\<^sub>0 current_task tid a spinlock_lock_ret :: \<open>64 word\<close> and v\<^sub>0 :: \<open>8 word\<close> and blocks :: \<open>(nat \<times> 64 word \<times> nat) set\<close> assumes seps: \<open>seps blocks\<close> and current_task: \<open>the (label_to_address assembly ''current_task'') = current_task\<close> and masters: \<open>master blocks (a, 1) 0\<close> \<open>master blocks (rsp\<^sub>0, 8) 1\<close> \<open>master blocks (rsp\<^sub>0-8, 8) 2\<close> \<open>master blocks (rsp\<^sub>0-16, 8) 3\<close> \<open>master blocks (rsp\<^sub>0-24, 8) 4\<close> \<open>master blocks (rsp\<^sub>0-32, 8) 5\<close> \<open>master blocks (current_task, 8) 6\<close> \<open>master blocks (tid, 4) 7\<close> and ret_address: \<open>outside spinlock_lock_ret 308 468\<close> \<comment> \<open>Only works for non-recursive functions.\<close> begin text \<open> The Floyd invariant expresses for some locations properties that are invariably true. Simply expresses that a byte in the memory remains untouched. \<close> definition pp_\<Theta> :: \<open>_ \<Rightarrow> floyd_invar\<close> where \<open>pp_\<Theta> s \<equiv> [ \<comment> \<open>precondition\<close> boffset+308 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0 \<and> regs \<sigma> rbp = rbp\<^sub>0 \<and> regs \<sigma> rdi = s \<and> \<sigma> \<turnstile> *[current_task,8] = tid \<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+spinlock_lock_ret \<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0, boffset+402 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0-32 \<and> regs \<sigma> rbp = rsp\<^sub>0-8 \<and> \<sigma> \<turnstile> *[rsp\<^sub>0-32,8] = s \<and> \<sigma> \<turnstile> *[rsp\<^sub>0-8,8] = rbp\<^sub>0 \<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+spinlock_lock_ret \<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0, boffset+407 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0-32 \<and> regs \<sigma> rbp = rsp\<^sub>0-8 \<and> \<sigma> \<turnstile> *[rsp\<^sub>0-32,8] = s \<and> \<sigma> \<turnstile> *[rsp\<^sub>0-8,8] = rbp\<^sub>0 \<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+spinlock_lock_ret \<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0, boffset+426 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0-32 \<and> regs \<sigma> rbp = rsp\<^sub>0-8 \<and> \<sigma> \<turnstile> *[rsp\<^sub>0-32,8] = s \<and> \<sigma> \<turnstile> *[rsp\<^sub>0-8,8] = rbp\<^sub>0 \<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+spinlock_lock_ret \<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0, boffset+431 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0-32 \<and> regs \<sigma> rbp = rsp\<^sub>0-8 \<and> \<sigma> \<turnstile> *[rsp\<^sub>0-32,8] = s \<and> \<sigma> \<turnstile> *[rsp\<^sub>0-8,8] = rbp\<^sub>0 \<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+spinlock_lock_ret \<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0, boffset+466 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0-32 \<and> regs \<sigma> rbp = rsp\<^sub>0-8 \<and> \<sigma> \<turnstile> *[rsp\<^sub>0-8,8] = rbp\<^sub>0 \<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+spinlock_lock_ret \<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0, \<comment> \<open>postcondition\<close> boffset+spinlock_lock_ret \<mapsto> \<lambda>\<sigma>. \<sigma> \<turnstile> *[a,1] = v\<^sub>0 \<and> regs \<sigma> rsp = rsp\<^sub>0+8 \<and> regs \<sigma> rbp = rbp\<^sub>0 ]\<close> text \<open>Adding some rules to the simplifier to simplify proofs.\<close> schematic_goal pp_\<Theta>_zero[simp]: \<open>pp_\<Theta> s boffset = ?x\<close> unfolding pp_\<Theta>_def by simp schematic_goal pp_\<Theta>_numeral_l[simp]: \<open>pp_\<Theta> s (n + boffset) = ?x\<close> unfolding pp_\<Theta>_def by simp schematic_goal pp_\<Theta>_numeral_r[simp]: \<open>pp_\<Theta> s (boffset + n) = ?x\<close> unfolding pp_\<Theta>_def by simp end locale spinlock_lock_function_calls = spinlock_lock_function + fixes atomic_int64_read atomic_int64_inc :: \<open>state \<Rightarrow> state\<close> assumes atomic_int64_read_def: \<open>exec_instr assembly semantics tasks_flag_annotation (Unary (IS_8088 Call) (Immediate (ImmLabel ''atomic_int64_read''))) 5 = atomic_int64_read\<close> and atomic_int64_read_incr: \<open>regs (atomic_int64_read \<sigma>) rip = regs \<sigma> rip + 5\<close> and atomic_int64_read426: \<open>the (pp_\<Theta> s (boffset+426)) \<sigma> \<Longrightarrow> the (pp_\<Theta> s (boffset+431)) (atomic_int64_read \<sigma>)\<close> and atomic_int64_inc_def: \<open>exec_instr assembly semantics tasks_flag_annotation (Unary (IS_8088 Call) (Immediate (ImmLabel ''atomic_int64_inc''))) 5 = atomic_int64_inc\<close> and atomic_int64_inc_incr: \<open>regs (atomic_int64_inc \<sigma>) rip = regs \<sigma> rip + 5\<close> and atomic_int64_inc402: \<open>the (pp_\<Theta> s (boffset+402)) \<sigma> \<Longrightarrow> the (pp_\<Theta> s (boffset+407)) (atomic_int64_inc \<sigma>)\<close> begin lemma rewrite_spinlock_lock_mem: assumes \<open>master blocks (s + 16, 4) 8\<close> \<open>master blocks (s + 20, 4) 9\<close> shows \<open>is_std_invar spinlock_lock_ret (floyd.invar spinlock_lock_ret (pp_\<Theta> s))\<close> proof - note masters = masters assms show ?thesis text \<open>Boilerplate code to start the VCG\<close> apply (rule floyd_invarI) apply (rewrite at \<open>floyd_vcs spinlock_lock_ret \<hole> _\<close> pp_\<Theta>_def) apply (intro floyd_vcsI) text \<open>Subgoal for rip = boffset+308\<close> subgoal premises prems for \<sigma> text \<open>Insert relevant knowledge\<close> apply (insert prems seps ret_address current_task) text \<open>Apply VCG/symb.\ execution\<close> apply (restart_symbolic_execution?, (symbolic_execution masters: masters)+, (finish_symbolic_execution masters: masters)?)+ done text \<open>Subgoal for rip = boffset+402\<close> subgoal premises prems for \<sigma> text \<open>Insert relevant knowledge\<close> apply (insert prems seps ret_address current_task) text \<open>Discharge function call\<close> apply (rule wps_rls) apply (simp (no_asm_simp) add: lookup_table_def instr_index_def entry_size_def) apply (rewrite_one_let' add: assembly_def) apply (rewrite_one_let' add: atomic_int64_inc_def) apply (rule floyd_invarI'[of _ _ \<open>the (pp_\<Theta> s (boffset+407))\<close>]) apply (simp add: atomic_int64_inc_incr) using atomic_int64_inc402 apply simp done text \<open>Subgoal for rip = boffset+407\<close> subgoal premises prems for \<sigma> text \<open>Insert relevant knowledge\<close> apply (insert prems seps ret_address current_task) text \<open>Apply VCG/symb.\ execution\<close> apply (restart_symbolic_execution?, (symbolic_execution masters: masters)+, (finish_symbolic_execution masters: masters)?)+ done text \<open>Subgoal for rip = boffset+426\<close> subgoal premises prems for \<sigma> text \<open>Insert relevant knowledge\<close> apply (insert prems seps ret_address current_task) text \<open>Discharge function call\<close> apply (rule wps_rls) apply (simp (no_asm_simp) add: lookup_table_def instr_index_def entry_size_def) apply (rewrite_one_let' add: assembly_def) apply (rewrite_one_let' add: atomic_int64_read_def) apply (rule floyd_invarI'[of _ _ \<open>the (pp_\<Theta> s (boffset+431))\<close>]) apply (simp add: atomic_int64_read_incr) using atomic_int64_read426 apply simp done text \<open>Subgoal for rip = boffset+431\<close> subgoal premises prems for \<sigma> proof - have x: \<open>regs \<sigma> rbp = rsp\<^sub>0 - 8\<close> using prems by simp show ?thesis text \<open>Insert relevant knowledge\<close> apply (insert prems seps ret_address current_task) text \<open>Apply VCG/symb.\ execution\<close> apply ((restart_symbolic_execution add: x)?, (symbolic_execution add: x masters: masters)+, (finish_symbolic_execution add: x masters: masters)?)+ done qed text \<open>Subgoal for rip = boffset+466\<close> subgoal premises prems for \<sigma> text \<open>Insert relevant knowledge\<close> apply (insert prems seps ret_address current_task) text \<open>Apply VCG/symb.\ execution\<close> apply (restart_symbolic_execution?, (symbolic_execution masters: masters)+, (finish_symbolic_execution masters: masters)?)+ done text \<open>Trivial ending subgoal.\<close> subgoal by simp done qed end end
{"author": "ssrg-vt", "repo": "Luce-src", "sha": "f7f1ef0fd07bba48bcb3d5e32404db6013a5f1bc", "save_path": "github-repos/isabelle/ssrg-vt-Luce-src", "path": "github-repos/isabelle/ssrg-vt-Luce-src/Luce-src-f7f1ef0fd07bba48bcb3d5e32404db6013a5f1bc/safecomp2019_artifact/current_work/examples/hermitcore/spinlock/spinlock_lock_mem.thy"}
import os, cv2 import pickle import torch import torchvision from libs.Loader import Dataset import numpy as np from Networks.StyleNet import StyleAugmentation from torchvision import transforms if __name__ == "__main__": os.environ["CUDA_VISIBLE_DEVICES"]="0" print_idx = True batch_size = 8 num_examples = 4 alpha_list = np.arange(0.1,1,0.2) imsz = [256, 128, 96] for sz in imsz: for alpha in alpha_list: content_dataset = Dataset('Database/COCO/2017/train2017/',sz,sz,test=True) content_loader = torch.utils.data.DataLoader(dataset = content_dataset, batch_size = batch_size, shuffle = False, num_workers = 1, drop_last = True) train__setting = torchvision.datasets.STL10(root='./Database/', split='train', transform=transforms.Compose([ transforms.Resize(sz, interpolation=2), transforms.ToTensor(), ]), target_transform=None, download=True) content_loader = torch.utils.data.DataLoader(train__setting, batch_size=batch_size, shuffle=False, num_workers=1) Stylenet = StyleAugmentation(layer="r41", alpha=[alpha], prob=1.0, pseudo1=True, Noise=False, std=1.,mean=0.).cuda() output_img = np.zeros([batch_size*sz,(num_examples+1)*sz,3], dtype = np.uint8) for it, (content,_) in enumerate(content_loader): Image = np.uint8(content.permute(0,2,3,1).cpu().detach().numpy()*255) for n in range(batch_size): output_img[n*sz:(n+1)*sz, 0:sz] = Image[n] if print_idx: output_img[n*sz:(n+1)*sz, 0:sz] = cv2.putText(output_img[n*sz:(n+1)*sz, 0:sz],'Image', (0,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1) for j in range(num_examples): styled = Stylenet(content.cuda()) # Manual index. Edit the StyleNet file idx = Stylenet.idx[0] styled = np.uint8(styled.permute(0,2,3,1).cpu().data.numpy()*255) for n in range(styled.shape[0]): output_img[n*sz:(n+1)*sz, (j+1)*sz:(j+2)*sz] = styled[n] if print_idx: output_img[n*sz:(n+1)*sz, (j+1)*sz:(j+2)*sz] = cv2.putText(output_img[n*sz:(n+1)*sz, (j+1)*sz:(j+2)*sz], str(idx), (0,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1) break; output_img = np.array(output_img) print(sz, alpha, output_img.shape) cv2.imwrite('image_test_{0}_{1:.1f}.png'.format(sz, alpha),cv2.cvtColor( output_img ,cv2.COLOR_BGR2RGB))
{"hexsha": "26d6086ee03a4827a4073a6d8cc76cc2b28679d8", "size": 3451, "ext": "py", "lang": "Python", "max_stars_repo_path": "AugmentedTest.py", "max_stars_repo_name": "emedinac/UnderstandingSA", "max_stars_repo_head_hexsha": "a234631e99f2979396fef9e24f54865e63147ef4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AugmentedTest.py", "max_issues_repo_name": "emedinac/UnderstandingSA", "max_issues_repo_head_hexsha": "a234631e99f2979396fef9e24f54865e63147ef4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AugmentedTest.py", "max_forks_repo_name": "emedinac/UnderstandingSA", "max_forks_repo_head_hexsha": "a234631e99f2979396fef9e24f54865e63147ef4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 59.5, "max_line_length": 196, "alphanum_fraction": 0.4555201391, "include": true, "reason": "import numpy", "num_tokens": 736}
//============================================================================== // Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II // Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI // // Distributed under the Boost Software License, Version 1.0. // See accompanying file LICENSE.txt or copy at // http://www.boost.org/LICENSE_1_0.txt //============================================================================== #ifndef BOOST_SIMD_TOOLBOX_IEEE_FUNCTIONS_SIMD_COMMON_EPS_HPP_INCLUDED #define BOOST_SIMD_TOOLBOX_IEEE_FUNCTIONS_SIMD_COMMON_EPS_HPP_INCLUDED #include <boost/simd/toolbox/ieee/functions/eps.hpp> #include <boost/simd/include/constants/one.hpp> #include <boost/dispatch/meta/as_integer.hpp> #include <boost/simd/include/constants/smallestposval.hpp> #include <boost/simd/include/constants/mindenormal.hpp> #include <boost/simd/include/constants/nbmantissabits.hpp> #include <boost/simd/include/constants/nan.hpp> #include <boost/simd/include/functions/simd/ldexp.hpp> #include <boost/simd/include/functions/simd/seladd.hpp> #include <boost/simd/include/functions/simd/if_else.hpp> #include <boost/simd/include/functions/simd/is_less.hpp> #include <boost/simd/include/functions/simd/is_invalid.hpp> #include <boost/simd/include/functions/simd/exponent.hpp> #include <boost/simd/include/functions/simd/abs.hpp> namespace boost { namespace simd { namespace ext { BOOST_SIMD_FUNCTOR_IMPLEMENTATION( boost::simd::tag::eps_, tag::cpu_ , (A0)(X) , ((simd_<arithmetic_<A0>,X>)) ) { typedef A0 result_type; inline result_type operator()(const A0&)const { return One<A0>(); } }; BOOST_SIMD_FUNCTOR_IMPLEMENTATION( boost::simd::tag::eps_, tag::cpu_ , (A0)(X) , ((simd_<floating_<A0>,X>)) ) { typedef A0 result_type; BOOST_SIMD_FUNCTOR_CALL(1) { typedef typename dispatch::meta::as_integer<A0, signed>::type int_type; const A0 a = boost::simd::abs(a0); return seladd(is_invalid(a), select(boost::simd::is_less(a, Smallestposval<A0>()), Mindenormal<A0>(), ldexp(One<A0>(), exponent(a)-Nbmantissabits<A0>()) ), Nan<A0>()); } }; } } } #endif
{"hexsha": "2919004c08480725092d8e3dec7ad65dbce9c6e6", "size": 2480, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/boost/simd/ieee/include/boost/simd/toolbox/ieee/functions/simd/common/eps.hpp", "max_stars_repo_name": "timblechmann/nt2", "max_stars_repo_head_hexsha": "6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2016-09-14T00:23:53.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-14T12:51:18.000Z", "max_issues_repo_path": "modules/boost/simd/ieee/include/boost/simd/toolbox/ieee/functions/simd/common/eps.hpp", "max_issues_repo_name": "timblechmann/nt2", "max_issues_repo_head_hexsha": "6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/boost/simd/ieee/include/boost/simd/toolbox/ieee/functions/simd/common/eps.hpp", "max_forks_repo_name": "timblechmann/nt2", "max_forks_repo_head_hexsha": "6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3650793651, "max_line_length": 84, "alphanum_fraction": 0.5907258065, "num_tokens": 566}
[STATEMENT] theorem prover_complete_refutation: "prover N \<longleftrightarrow> satisfiable (RP.grounded_N0 N)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. prover N = satisfiable (RP.grounded_N0 N) [PROOF STEP] unfolding prover_def St0_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. (case deterministic_RP (RP.St0 N 0) of None \<Rightarrow> True | Some R \<Rightarrow> [] \<notin> set R) = satisfiable (RP.grounded_N0 N) [PROOF STEP] using RP.deterministic_RP_complete[of N 0] RP.deterministic_RP_refutation[of N 0] [PROOF STATE] proof (prove) using this: deterministic_RP (RP.St0 N 0) = None \<Longrightarrow> satisfiable (RP.grounded_N0 N) deterministic_RP (RP.St0 N 0) = Some ?R \<Longrightarrow> (\<not> satisfiable (RP.grounded_N0 N)) = ({#} \<in> RP.grounded_R ?R) goal (1 subgoal): 1. (case deterministic_RP (RP.St0 N 0) of None \<Rightarrow> True | Some R \<Rightarrow> [] \<notin> set R) = satisfiable (RP.grounded_N0 N) [PROOF STEP] by (force simp: grounding_of_clss_def grounding_of_cls_def ex_ground_subst split: option.splits if_splits)
{"llama_tokens": 448, "file": "Functional_Ordered_Resolution_Prover_Executable_FO_Ordered_Resolution_Prover", "length": 3}
import numpy from clpy.creation import basic from clpy.creation import from_data from clpy.creation import ranges from clpy.math import trigonometric def blackman(M): """Returns the Blackman window. The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + 0.08 \\cos\\left(\\frac{4\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 Args: M (:class:`~int`): Number of points in the output window. If zero or less, an empty array is returned. Returns: ~clpy.ndarray: Output ndarray. .. seealso:: :func:`numpy.blackman` """ if M < 1: return from_data.array([]) if M == 1: return basic.ones(1, float) n = ranges.arange(0, M) return 0.42 - 0.5 * trigonometric.cos(2.0 * numpy.pi * n / (M - 1))\ + 0.08 * trigonometric.cos(4.0 * numpy.pi * n / (M - 1)) def hamming(M): """Returns the Hamming window. The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 Args: M (:class:`~int`): Number of points in the output window. If zero or less, an empty array is returned. Returns: ~clpy.ndarray: Output ndarray. .. seealso:: :func:`numpy.hamming` """ if M < 1: return from_data.array([]) if M == 1: return basic.ones(1, float) n = ranges.arange(0, M) return 0.54 - 0.46 * trigonometric.cos(2.0 * numpy.pi * n / (M - 1)) def hanning(M): """Returns the Hanning window. The Hanning window is defined as .. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 Args: M (:class:`~int`): Number of points in the output window. If zero or less, an empty array is returned. Returns: ~clpy.ndarray: Output ndarray. .. seealso:: :func:`numpy.hanning` """ if M < 1: return from_data.array([]) if M == 1: return basic.ones(1, float) n = ranges.arange(0, M) return 0.5 - 0.5 * trigonometric.cos(2.0 * numpy.pi * n / (M - 1))
{"hexsha": "8fb2a2737ce6eb38f4a0f8d37fd264006ef6e7c5", "size": 2247, "ext": "py", "lang": "Python", "max_stars_repo_path": "clpy/math/window.py", "max_stars_repo_name": "fixstars/clpy", "max_stars_repo_head_hexsha": "693485f85397cc110fa45803c36c30c24c297df0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 142, "max_stars_repo_stars_event_min_datetime": "2018-06-07T07:43:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-30T21:06:32.000Z", "max_issues_repo_path": "clpy/math/window.py", "max_issues_repo_name": "fixstars/clpy", "max_issues_repo_head_hexsha": "693485f85397cc110fa45803c36c30c24c297df0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 282, "max_issues_repo_issues_event_min_datetime": "2018-06-07T08:35:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-31T03:14:32.000Z", "max_forks_repo_path": "clpy/math/window.py", "max_forks_repo_name": "fixstars/clpy", "max_forks_repo_head_hexsha": "693485f85397cc110fa45803c36c30c24c297df0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2018-06-19T11:07:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-13T20:57:04.000Z", "avg_line_length": 24.9666666667, "max_line_length": 76, "alphanum_fraction": 0.5429461504, "include": true, "reason": "import numpy", "num_tokens": 709}
# -*- encoding: utf-8 -*- import numpy as np def rmse(prof_ref,prof_seg): dif_curv = [] for shift in range(prof_seg.shape[1]): dif_curv.append(np.abs(np.sum((prof_ref[0] - np.roll(prof_seg[0],shift))**2))) prof_seg_shift = np.apply_along_axis(np.roll, 1, prof_seg, np.argmin(dif_curv)) return np.sqrt(np.sum((prof_ref - prof_seg_shift)**2,axis=1)/(prof_ref.shape[1]))
{"hexsha": "aac9465f6f2fd6788492f65d860377cf66980269", "size": 395, "ext": "py", "lang": "Python", "max_stars_repo_path": "deliver/functions_will/rmse.py", "max_stars_repo_name": "mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso", "max_stars_repo_head_hexsha": "f094c706db815f91cf61d1d501c2a9030b9b54d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deliver/functions_will/rmse.py", "max_issues_repo_name": "mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso", "max_issues_repo_head_hexsha": "f094c706db815f91cf61d1d501c2a9030b9b54d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deliver/functions_will/rmse.py", "max_forks_repo_name": "mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso", "max_forks_repo_head_hexsha": "f094c706db815f91cf61d1d501c2a9030b9b54d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9166666667, "max_line_length": 86, "alphanum_fraction": 0.6658227848, "include": true, "reason": "import numpy", "num_tokens": 122}
# Copyright 2020 Novartis Institutes for BioMedical Research Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pandas as pd import numpy as np import os import math import time import itertools from sklearn.decomposition import PCA as sk_PCA from sklearn.manifold import TSNE as sk_TSNE from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn import preprocessing from sklearn.metrics import calinski_harabasz_score, silhouette_score from sklearn.metrics import completeness_score, adjusted_rand_score, fowlkes_mallows_score, adjusted_mutual_info_score from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.spatial.distance import pdist from hdbscan import HDBSCAN np.random.seed(seed=42) from correction import do_batch_correction from plot import plot_confusion_matrix, plot_consistency_matrix, plot_clustermap, plot_cluster_assignment from plot import plot_distance_heatmaps, plot_dmso_pca, plot_embeddings, plot_DMSO_3PCA def unique(list1): x = np.array(list1) return list(np.unique(x)) def number_of_components_95(df, embeds_cols): # PCA on all embeddings pca = sk_PCA().fit(df[embeds_cols]) # Find the number of dimensions to explain 95% of variance i = 0 s = 0 for j in range(100): s += pca.explained_variance_ratio_[j] if s > 0.95: i = j break # There should be at least 8 dimensions if i < 8: return 8 else: return i # --------------------------------------------------- collapse methods --------------------------------------------------- def collapse_domain(df, headers, do_median=True, remove_dmso=False): if do_median: avg_df = df.groupby(headers).median() else: avg_df = df.groupby(headers).mean() avg_df = avg_df.reset_index(drop=False) if 'field' in avg_df.columns: avg_df = avg_df.drop(columns=['field', 'image_nr']) if remove_dmso: avg_df = avg_df[avg_df['compound'] != 'DMSO'] avg_df = avg_df.reset_index(drop=True) return avg_df def collapse_well_level(df, do_median=True, remove_dmso=False): avg_df = collapse_domain(df, ['batch', 'plate', 'well', 'compound', 'compound_uM', 'pseudoclass', 'moa'], do_median, remove_dmso) return avg_df def collapse_treatment_level(df, do_median=True, remove_dmso=False): avg_df = collapse_domain(df, ['compound', 'compound_uM', 'pseudoclass', 'moa'], do_median, remove_dmso) #avg_df = avg_df.drop(columns=['table_nr', 'replicate']) return avg_df def collapse_plate_level(df, do_median=True, remove_dmso=False): avg_df = collapse_domain(df, ['batch', 'plate', 'compound', 'compound_uM', 'pseudoclass', 'moa'], do_median, remove_dmso) return avg_df def collapse_batch_level(df, do_median=True, remove_dmso=False): avg_df = collapse_domain(df, ['batch', 'compound', 'compound_uM', 'pseudoclass', 'moa'], do_median, remove_dmso) avg_df = avg_df.drop(columns=['replicate']) return avg_df # --------------------------------------------------- eval methods --------------------------------------------------- def calc_theoretical_number_of_clusters(df_well): n_cluster_dict = dict() # number of clusters according prior knowledge n_cluster_dict['n_cluster_comp'] = len(df_well['compound'].unique()) - 1 n_cluster_dict['n_cluster_treat'] = len(df_well['pseudoclass'].unique()) # number of clusters n_cluster_dict['n_cluster_treat'] = math.sqrt(len(df_well) / 2) return pd.DataFrame(n_cluster_dict) def jaccard(labels1, labels2): n11 = n10 = n01 = 0 n = len(labels1) # TODO: Throw exception if len(labels1) != len(labels2) for i, j in itertools.combinations(range(n), 2): comembership1 = labels1[i] == labels1[j] comembership2 = labels2[i] == labels2[j] if comembership1 and comembership2: n11 += 1 elif comembership1 and not comembership2: n10 += 1 elif not comembership1 and comembership2: n01 += 1 return float(n11) / (n11 + n10 + n01) def internal_partitional_validation(org_emb, tsne_emb, moa_, pred_, random_): metrices_dict = dict() # Calisnki-Harabasz coefficient metrices_dict['cal-har_moa'] = [calinski_harabasz_score(org_emb, moa_)] metrices_dict['cal-har_pred'] = [calinski_harabasz_score(org_emb, pred_)] metrices_dict['cal-har_rand'] = [calinski_harabasz_score(org_emb, random_)] # Calisnki-Harabasz coefficient on TSNE metrices_dict['cal-har_moa_tsne'] = [calinski_harabasz_score(tsne_emb, moa_)] metrices_dict['cal-har_pred_tsne'] = [calinski_harabasz_score(tsne_emb, pred_)] metrices_dict['cal-har_rand_tsne'] = [calinski_harabasz_score(tsne_emb, random_)] # Silhouette Coefficient (Cosine) metrices_dict['silhou_moa'] = [silhouette_score(org_emb, moa_)] metrices_dict['silhou_pred'] = [silhouette_score(org_emb, pred_)] metrices_dict['silhou_rand'] = [silhouette_score(org_emb, random_)] # Silhouette Coefficient on TSNE (Cosine) metrices_dict['silhou_moa_tsne'] = [silhouette_score(tsne_emb, moa_)] metrices_dict['silhou_pred_tsne'] = [silhouette_score(tsne_emb, pred_)] metrices_dict['silhou_rand_tsne'] = [silhouette_score(tsne_emb, random_)] return pd.DataFrame(metrices_dict) def internal_hierarchical_validation(org_emb, tsne_emb): metrices_dict = dict() # Cophenetic distance X = pdist(org_emb, metric='cosine') Z = linkage(X, 'average') c, _ = cophenet(Z, X) metrices_dict['cophenet'] = [c] # Cophenetic distance on TSNE X = pdist(tsne_emb, metric='cosine') Z = linkage(X, 'average') c, _ = cophenet(Z, X) metrices_dict['cophenet_tsne'] = [c] return pd.DataFrame(metrices_dict) def external_validation(pred_, moa_, treat_, comp_, random_, same_): metrices_dict = dict() # Completeness metrices_dict['comple_moa-pred'] = [completeness_score(moa_, pred_)] metrices_dict['comple_treat-moa'] = [completeness_score(treat_, moa_)] metrices_dict['comple_treat-pred'] = [completeness_score(treat_, pred_)] metrices_dict['comple_treat-rand'] = [completeness_score(treat_, random_)] metrices_dict['comple_treat-same'] = [completeness_score(treat_, same_)] metrices_dict['comple_comp-moa'] = [completeness_score(comp_, moa_)] metrices_dict['comple_comp_pred'] = [completeness_score(comp_, pred_)] metrices_dict['comple_comp_rand'] = [completeness_score(comp_, random_)] metrices_dict['comple_comp_same'] = [completeness_score(comp_, same_)] #  Jaccard similarity coefficient metrices_dict['jaccard_moa-pred'] = [jaccard(moa_, pred_)] metrices_dict['jaccard_treat-moa'] = [jaccard(treat_, moa_)] metrices_dict['jaccard_treat-pred'] = [jaccard(treat_, pred_)] metrices_dict['jaccard_treat-rand'] = [jaccard(treat_, random_)] metrices_dict['jaccard_treat-same'] = [jaccard(treat_, same_)] metrices_dict['jaccard_comp-moa'] = [jaccard(comp_, moa_)] metrices_dict['jaccard_comp_pred'] = [jaccard(comp_, pred_)] metrices_dict['jaccard_comp_rand'] = [jaccard(comp_, random_)] metrices_dict['jaccard_comp_same'] = [jaccard(comp_, same_)] # Adjusted Rand index metrices_dict['adj-rand_moa-pred'] = [adjusted_rand_score(moa_, pred_)] metrices_dict['adj-rand_treat-moa'] = [adjusted_rand_score(treat_, moa_)] metrices_dict['adj-rand_treat-pred'] = [adjusted_rand_score(treat_, pred_)] metrices_dict['adj-rand_treat-rand'] = [adjusted_rand_score(treat_, random_)] metrices_dict['adj-rand_treat-same'] = [adjusted_rand_score(treat_, same_)] metrices_dict['adj-rand_comp-moa'] = [adjusted_rand_score(comp_, moa_)] metrices_dict['adj-rand_comp_pred'] = [adjusted_rand_score(comp_, pred_)] metrices_dict['adj-rand_comp_rand'] = [adjusted_rand_score(comp_, random_)] metrices_dict['adj-rand_comp_same'] = [adjusted_rand_score(comp_, same_)] # Fowlkes-Mallows index metrices_dict['fow-mal_moa-pred'] = [fowlkes_mallows_score(moa_, pred_)] metrices_dict['fow-mal_treat-moa'] = [fowlkes_mallows_score(treat_, moa_)] metrices_dict['fow-mal_treat-pred'] = [fowlkes_mallows_score(treat_, pred_)] metrices_dict['fow-mal_treat-rand'] = [fowlkes_mallows_score(treat_, random_)] metrices_dict['fow-mal_treat-same'] = [fowlkes_mallows_score(treat_, same_)] metrices_dict['fow-mal_comp-moa'] = [fowlkes_mallows_score(comp_, moa_)] metrices_dict['fow-mal_comp_pred'] = [fowlkes_mallows_score(comp_, pred_)] metrices_dict['fow-mal_comp_rand'] = [fowlkes_mallows_score(comp_, random_)] metrices_dict['fow-mal_comp_same'] = [fowlkes_mallows_score(comp_, same_)] # Adjusted mutual information metrices_dict['adj-mut_moa-pred'] = [adjusted_mutual_info_score(moa_, pred_)] metrices_dict['adj-mut_treat-moa'] = [adjusted_mutual_info_score(treat_, moa_)] metrices_dict['adj-mut_treat-pred'] = [adjusted_mutual_info_score(treat_, pred_)] metrices_dict['adj-mut_treat-rand'] = [adjusted_mutual_info_score(treat_, random_)] metrices_dict['adj-mut_treat-same'] = [adjusted_mutual_info_score(treat_, same_)] metrices_dict['adj-mut_comp-moa'] = [adjusted_mutual_info_score(comp_, moa_)] metrices_dict['adj-mut_comp_pred'] = [adjusted_mutual_info_score(comp_, pred_)] metrices_dict['adj-mut_comp_rand'] = [adjusted_mutual_info_score(comp_, random_)] metrices_dict['adj-mut_comp_same'] = [adjusted_mutual_info_score(comp_, same_)] return pd.DataFrame(metrices_dict) def batch_classification_accuracy(df_dmso, embeds_cols): clf = LogisticRegression(random_state=42, max_iter=1000) X = preprocessing.StandardScaler().fit_transform(df_dmso[embeds_cols]) y_batch = df_dmso['batch'] y_plate = df_dmso['plate'] scores_batch = cross_val_score(clf, X, y_batch, cv=3) scores_plate = cross_val_score(clf, X, y_plate, cv=3) return pd.DataFrame([[scores_batch.mean(), scores_batch.std() * 2, scores_plate.mean(), scores_plate.std() * 2]], columns=["batch_class_acc", "batch_class_std", "plate_class_acc", "plate_class_std"]) def NSC_k_NN(df_treatment, embeds_cols, plot_conf=False, savepath=None): # Create classes for each moa class_dict = dict(zip(df_treatment['moa'].unique(), np.arange(len(df_treatment['moa'].unique())))) df_treatment['moa_class'] = df_treatment['moa'].map(class_dict) # Create nearest neighbors classifier predictions = list() labels = list() label_names = list() for comp in df_treatment['compound'].unique(): df_ = df_treatment.loc[df_treatment['compound'] != comp, :] knn = KNeighborsClassifier(n_neighbors=4, algorithm='brute', metric='cosine') knn.fit(df_.loc[:, embeds_cols], df_.loc[:, 'moa_class']) nn = knn.kneighbors(df_treatment.loc[df_treatment['compound'] == comp, embeds_cols]) for p in range(nn[1].shape[0]): predictions.append(list(df_.iloc[nn[1][p]]['moa_class'])) labels.extend(df_treatment.loc[df_treatment['compound'] == comp, 'moa_class']) label_names.extend(df_treatment.loc[df_treatment['compound'] == comp, 'moa']) predictions = np.asarray(predictions) k_nn_acc = [accuracy_score(labels, predictions[:, 0]), accuracy_score(labels, predictions[:, 1]), accuracy_score(labels, predictions[:, 2]), accuracy_score(labels, predictions[:, 3])] if plot_conf: print('There are {} treatments'.format(len(df_treatment))) print('NSC is: {:.2f}%'.format(accuracy_score(labels, predictions[:, 0]) * 100)) plot_confusion_matrix(labels, predictions[:, 0], class_dict, 'NSC', savepath) return k_nn_acc def NSB_k_NN(df_treatment, embeds_cols, plot_conf=False, savepath=None): # Remove moa with only 1 plate df_treatment = df_treatment[df_treatment['moa'] != 'Cholesterol-lowering'] df_treatment = df_treatment[df_treatment['moa'] != 'Kinase inhibitors'] df_treatment = df_treatment.reset_index(drop=True) class_dict = dict(zip(df_treatment['moa'].unique(), np.arange(len(df_treatment['moa'].unique())))) df_treatment['moa_class'] = df_treatment['moa'].map(class_dict) predictions = list() labels = list() label_names = list() for batch in df_treatment['table_nr'].unique(): for comp in df_treatment.loc[df_treatment['table_nr'] == batch, 'compound'].unique(): df_ = df_treatment.loc[(df_treatment['compound'] != comp) & (df_treatment['table_nr'] != batch), :] knn = KNeighborsClassifier(n_neighbors=4, algorithm='brute', metric='cosine') knn.fit(df_.loc[:, embeds_cols], df_.loc[:, 'moa_class']) nn = knn.kneighbors( df_treatment.loc[(df_treatment['compound'] == comp) & (df_treatment['table_nr'] == batch), embeds_cols]) for p in range(nn[1].shape[0]): predictions.append(list(df_.iloc[nn[1][p]]['moa_class'])) labels.extend( df_treatment.loc[(df_treatment['compound'] == comp) & (df_treatment['table_nr'] == batch), 'moa_class']) label_names.extend( df_treatment.loc[(df_treatment['compound'] == comp) & (df_treatment['table_nr'] == batch), 'moa']) predictions = np.asarray(predictions) k_nn_acc = [accuracy_score(labels, predictions[:, 0]), accuracy_score(labels, predictions[:, 1]), accuracy_score(labels, predictions[:, 2]), accuracy_score(labels, predictions[:, 3])] if plot_conf: print('There are {} treatments'.format(len(df_treatment))) print('NSCB is: {:.2f}%'.format(accuracy_score(labels, predictions[:, 0]) * 100)) plot_confusion_matrix(labels, predictions[:, 0], class_dict, 'NSCB', savepath) return k_nn_acc def NSC(df_well, df_plate, df_batch, embeds_cols): nsc_well = NSC_k_NN(df_well, embeds_cols) nsc_plate = NSC_k_NN(df_plate, embeds_cols) nsc_batch = NSC_k_NN(df_batch, embeds_cols) nsc_average = np.asarray([nsc_well, nsc_plate, nsc_batch]).mean(axis=0) nsc_list = list() nsc_list.extend(nsc_well) nsc_list.extend(nsc_plate) nsc_list.extend(nsc_batch) nsc_list.extend(nsc_average) return pd.DataFrame([nsc_list], columns=['NSC_1-NN_well', 'NSC_2-NN_well', 'NSC_3-NN_well', 'NSC_4-NN_well', 'NSC_1-NN_plate', 'NSC_2-NN_plate', 'NSC_3-NN_plate', 'NSC_4-NN_plate', 'NSC_1-NN_batch', 'NSC_2-NN_batch', 'NSC_3-NN_batch', 'NSC_4-NN_batch', 'NSC_1-NN_avg', 'NSC_2-NN_avg', 'NSC_3-NN_avg', 'NSC_4-NN_avg']) def NSB(df_well, df_plate, df_batch, embeds_cols): nsb_well = NSB_k_NN(df_well, embeds_cols) nsb_plate = NSB_k_NN(df_plate, embeds_cols) nsb_batch = NSB_k_NN(df_batch, embeds_cols) nsb_average = np.asarray([nsb_well, nsb_plate, nsb_batch]).mean(axis=0) nsb_list = list() nsb_list.extend(nsb_well) nsb_list.extend(nsb_plate) nsb_list.extend(nsb_batch) nsb_list.extend(nsb_average) return pd.DataFrame([nsb_list], columns=['NSB_1-NN_well', 'NSB_2-NN_well', 'NSB_3-NN_well', 'NSB_4-NN_well', 'NSB_1-NN_plate', 'NSB_2-NN_plate', 'NSB_3-NN_plate', 'NSB_4-NN_plate', 'NSB_1-NN_batch', 'NSB_2-NN_batch', 'NSB_3-NN_batch', 'NSB_4-NN_batch', 'NSB_1-NN_avg', 'NSB_2-NN_avg', 'NSB_3-NN_avg', 'NSB_4-NN_avg']) def create_consistency_matrix(df_well, predictions, savepath): # create mappers pred_mapper = {"cluster_{}".format(i): i for i in sorted(list(predictions))} moa_mapper = dict(zip(sorted(df_well['moa'].unique()), range(len(df_well['moa'].unique())))) treatment_mapper = dict(zip(sorted(df_well['pseudoclass'].unique()), range(len(df_well['pseudoclass'].unique())))) compound_mapper = dict(zip(sorted(df_well['compound'].unique()), range(len(df_well['compound'].unique())))) pred_ = list(predictions) moa_ = list(df_well['moa'].map(moa_mapper)) treat_ = list(df_well['pseudoclass'].map(treatment_mapper)) comp_ = list(df_well['compound'].map(compound_mapper)) # prediction vs moa conti_pred_moa = metrics.cluster.contingency_matrix(pred_, moa_) df_conti_pred_moa = pd.DataFrame(conti_pred_moa, columns=list(moa_mapper.keys()), index=list(pred_mapper.keys())) plot_consistency_matrix(df_conti_pred_moa, "prediction-moa", savepath) # moa vs treatment conti_moa_treat = metrics.cluster.contingency_matrix(moa_, treat_) df_conti_moa_treat = pd.DataFrame(conti_moa_treat, columns=list(treatment_mapper.keys()), index=list(moa_mapper.keys())) plot_consistency_matrix(df_conti_moa_treat, "moa-treatment", savepath) # prediction vs treatment conti_pred_treat = metrics.cluster.contingency_matrix(pred_, treat_) df_conti_pred_treat = pd.DataFrame(conti_pred_treat, columns=list(treatment_mapper.keys()), index=list(pred_mapper.keys())) plot_consistency_matrix(df_conti_pred_treat, "prediction-treatment", savepath) # prediction vs compound conti_pred_comp = metrics.cluster.contingency_matrix(pred_, comp_) df_conti_pred_comp = pd.DataFrame(conti_pred_comp, columns=list(compound_mapper.keys()), index=list(pred_mapper.keys())) plot_consistency_matrix(df_conti_pred_comp, "prediction-compound", savepath) # --------------------------------------------------- clustering assignment --------------------------------------------------- def assign_clusters(df_well, embeds_cols, min_cluster_size=10, min_samples=3): pca_image = sk_PCA(n_components=number_of_components_95(df_well, embeds_cols)).fit_transform(df_well[embeds_cols]) tsne_image = sk_TSNE(metric='cosine', n_jobs=1).fit_transform(pca_image) clusterer = HDBSCAN(min_cluster_size=min_cluster_size, metric='manhattan', min_samples=min_samples).fit(tsne_image) # tsne_image = sk_TSNE(metric='cosine', n_jobs=1).fit_transform(df_well[embeds_cols]) # clusterer = HDBSCAN(min_cluster_size=min_cluster_size, metric='manhattan', min_samples=min_samples).fit(tsne_image) return clusterer.labels_, clusterer.labels_.max(), tsne_image # --------------------------------------------------- MAIN EVALUATION --------------------------------------------------- def evaluate_epoch(df_tile, embeds_cols, verbose=False): if verbose: print('Start evaluating of the features') end = time.time() # ----------- Well level ----------- # Create well collapse dataframe df_well = collapse_well_level(df_tile.copy(), remove_dmso=True) # clustering predictions, n_clusters, pca_tsne_image = assign_clusters(df_well, embeds_cols, min_cluster_size=10, min_samples=3) n_clus_df = pd.DataFrame([n_clusters], columns=['n_clusters_well']) # create mappers moa_mapper = dict(zip(sorted(df_well['moa'].unique()), range(len(df_well['moa'].unique())))) treatment_mapper = dict(zip(sorted(df_well['pseudoclass'].unique()), range(len(df_well['pseudoclass'].unique())))) compound_mapper = dict(zip(sorted(df_well['compound'].unique()), range(len(df_well['compound'].unique())))) # create assignment lists pred_ = list(predictions) moa_ = list(df_well['moa'].map(moa_mapper)) treat_ = list(df_well['pseudoclass'].map(treatment_mapper)) comp_ = list(df_well['compound'].map(compound_mapper)) random_ = list(np.random.randint(12, size=len(moa_))) same_ = list(np.ones(len(moa_))) if verbose: print('Run validation methods') # validation int_par_df = internal_partitional_validation(df_well[embeds_cols], pca_tsne_image, moa_, pred_, random_) int_hier_df = internal_hierarchical_validation(df_well[embeds_cols], pca_tsne_image) ext_df = external_validation(pred_, moa_, treat_, comp_, random_, same_) # Remove undefined clusters df_labeled = df_tile[df_tile['moa'] != 'undefined'].copy() df_labeled = df_labeled.reset_index(drop=True) # Create batch and plate collapse dataframe df_well = collapse_well_level(df_labeled.copy(), remove_dmso=True) df_plate = collapse_plate_level(df_labeled.copy(), remove_dmso=True) df_batch = collapse_batch_level(df_labeled.copy(), remove_dmso=True) # Nearest Neighborhood NSC_df = NSC(df_well, df_plate, df_batch, embeds_cols) NSB_df = NSB(df_well, df_plate, df_batch, embeds_cols) # ----------- Treatment level ----------- # Average per treatment per plate and median per treatment per batch avg_df = collapse_plate_level(df_labeled.copy(), do_median=False) df_treatment = collapse_treatment_level(avg_df, do_median=True, remove_dmso=True) NSC_treatment_df = pd.DataFrame([NSC_k_NN(df_treatment, embeds_cols)], columns=['NSC_1-NN_treatment', 'NSC_2-NN_treatment', 'NSC_3-NN_treatment', 'NSC_4-NN_treatment']) NSCB_treatment_df = pd.DataFrame([NSB_k_NN(df_treatment, embeds_cols)], columns=['NSB_1-NN_treatment', 'NSB_2-NN_treatment', 'NSB_3-NN_treatment', 'NSB_4-NN_treatment']) # Create well DMSO dataframe df_dmso = df_tile.loc[(df_tile['compound'] == 'DMSO'), :].copy() df_dmso = df_dmso.reset_index(drop=True) # Batch effect batch_acc_df = batch_classification_accuracy(df_dmso, embeds_cols) if verbose: print('Evaluation time: {0:.2f} s'.format(time.time() - end)) #  Return DataFrame with all metrices return pd.concat([n_clus_df, int_par_df, int_hier_df, ext_df, NSC_df, NSB_df, NSC_treatment_df, NSCB_treatment_df, batch_acc_df], axis=1) def evaluate_training(df_tile, embeds_cols, savepath=None, verbose=False): if verbose: print('Start evaluating of best features') if not os.path.isdir(savepath): os.makedirs(savepath) # ----------- Well level ----------- # Create well collapse dataframe df_well_with_dmso = collapse_well_level(df_tile.copy(), remove_dmso=False) df_save_well = df_well_with_dmso.copy() # Plot embeddings with ground truth labels and assigned labeles moa_unique_list = sorted(unique(list(df_well_with_dmso['moa']))) pca_well, pca_tsne_well, tsne_well, umap_well = plot_embeddings(df_well_with_dmso, embeds_cols, moa_unique_list, savepath) # Save well values df_save_well['PCA1'] = pca_well[:, 0] df_save_well['PCA2'] = pca_well[:, 1] df_save_well['TSNE1'] = tsne_well[:, 0] df_save_well['TSNE2'] = tsne_well[:, 1] df_save_well['PCA_TSNE1'] = pca_tsne_well[:, 0] df_save_well['PCA_TSNE2'] = pca_tsne_well[:, 1] df_save_well['UMAP1'] = umap_well[:, 0] df_save_well['UMAP2'] = umap_well[:, 1] # Create well DMSO dataframe df_tile_dmso = df_tile.loc[(df_tile['compound'] == 'DMSO'), :].copy() df_tile_dmso = df_tile_dmso.reset_index(drop=True) df_well_dmso = df_well_with_dmso.loc[(df_well_with_dmso['compound'] == 'DMSO'), :].copy() df_well_dmso = df_well_dmso.reset_index(drop=True) # Plot DMSO embeddings batch_unique_list = sorted(unique(list(df_well_with_dmso['batch']))) plot_dmso_pca(df_tile_dmso, df_well_dmso, embeds_cols, batch_unique_list, savepath) plot_distance_heatmaps(df_tile_dmso, df_well_dmso, embeds_cols, savepath) plot_DMSO_3PCA(df_tile_dmso, embeds_cols, savepath) # clustering wells df_well = collapse_well_level(df_tile.copy(), remove_dmso=True) predictions, n_clusters, pca_tsne_image = assign_clusters(df_well, embeds_cols, min_cluster_size=10, min_samples=3) plot_cluster_assignment(pca_tsne_image, predictions, list(df_well['moa']), savepath, prefix="Well_") # Save clustering assignment df_well['cluster_nr'] = predictions df_well['PCA_TSNE1'] = pca_tsne_image[:, 0] df_well['PCA_TSNE2'] = pca_tsne_image[:, 1] # Plot consistency_matrix create_consistency_matrix(df_well, predictions, savepath) # ----------- Treatment level ----------- # Average per treatment per plate and median per treatment per batch avg_df = collapse_plate_level(df_tile.copy(), do_median=False) df_treatment = collapse_treatment_level(avg_df, do_median=True, remove_dmso=True) # clustering treatments predictions2, n_clusters2, pca_tsne_image2 = assign_clusters(df_treatment, embeds_cols, min_cluster_size=5, min_samples=3) plot_cluster_assignment(pca_tsne_image2, predictions2, list(df_treatment['moa']), savepath, prefix="Treatment_") # Save clustering assignment df_treatment['cluster_nr'] = predictions2 df_treatment['PCA_TSNE1'] = pca_tsne_image2[:, 0] df_treatment['PCA_TSNE2'] = pca_tsne_image2[:, 1] # Labeled evaluation df_treatment = df_treatment[df_treatment['moa'] != 'undefined'].copy() df_treatment = df_treatment.reset_index(drop=True) plot_clustermap(df_treatment, embeds_cols, savepath) # NSC and NSCB NSC_k_NN(df_treatment, embeds_cols, plot_conf=True, savepath=savepath) NSB_k_NN(df_treatment, embeds_cols, plot_conf=True, savepath=savepath) return df_save_well
{"hexsha": "b4ed671691940d14196af4e43195bd9388eef56d", "size": 26396, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluation.py", "max_stars_repo_name": "andrew-xu-monash/UMM-Modified", "max_stars_repo_head_hexsha": "18729dc34733c203e8cd3873fec2b9f7d0b56dba", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-01-12T17:46:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T23:36:38.000Z", "max_issues_repo_path": "evaluation.py", "max_issues_repo_name": "Novartis/UMM-Discovery", "max_issues_repo_head_hexsha": "18729dc34733c203e8cd3873fec2b9f7d0b56dba", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evaluation.py", "max_forks_repo_name": "Novartis/UMM-Discovery", "max_forks_repo_head_hexsha": "18729dc34733c203e8cd3873fec2b9f7d0b56dba", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.0516934046, "max_line_length": 141, "alphanum_fraction": 0.6827170783, "include": true, "reason": "import numpy,from scipy", "num_tokens": 7121}
#!/usr/bin/env python3 # coding: utf-8 import os.path as osp import numpy as np from .io import _load def make_abs_path(d): return osp.join(osp.dirname(osp.realpath(__file__)), d) d = make_abs_path('../train.configs') keypoints = _load(osp.join(d, 'keypoints_sim.npy')) w_shp = _load(osp.join(d, 'w_shp_sim.npy')) w_exp = _load(osp.join(d, 'w_exp_sim.npy')) # simplified version meta = _load(osp.join(d, 'param_whitening.pkl')) # param_mean and param_std are used for re-whitening param_mean = meta.get('param_mean') param_std = meta.get('param_std') u_shp = _load(osp.join(d, 'u_shp.npy')) u_exp = _load(osp.join(d, 'u_exp.npy')) u = u_shp + u_exp w = np.concatenate((w_shp, w_exp), axis=1) w_base = w[keypoints] w_norm = np.linalg.norm(w, axis=0) w_base_norm = np.linalg.norm(w_base, axis=0) # for inference dim = w_shp.shape[0] // 3 u_base = u[keypoints].reshape(-1, 1) w_shp_base = w_shp[keypoints] w_exp_base = w_exp[keypoints] # std_size = 120 std_size = 120 # for paf (pac) paf = _load(osp.join(d, 'Model_PAF.pkl')) u_filter = paf.get('mu_filter') w_filter = paf.get('w_filter') w_exp_filter = paf.get('w_exp_filter') # pncc code (mean shape) pncc_code = _load(osp.join(d, 'pncc_code.npy'))
{"hexsha": "43d67be08c6888005852ea01e6f72e3f25cc9ccd", "size": 1211, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/params.py", "max_stars_repo_name": "xqterry/3DDFA", "max_stars_repo_head_hexsha": "3b8f7bb4cfafa349e628d6433d7a4edc55627243", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/params.py", "max_issues_repo_name": "xqterry/3DDFA", "max_issues_repo_head_hexsha": "3b8f7bb4cfafa349e628d6433d7a4edc55627243", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/params.py", "max_forks_repo_name": "xqterry/3DDFA", "max_forks_repo_head_hexsha": "3b8f7bb4cfafa349e628d6433d7a4edc55627243", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9111111111, "max_line_length": 65, "alphanum_fraction": 0.7076796036, "include": true, "reason": "import numpy", "num_tokens": 401}
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # ## Read data into dataframe data_name = 'metr-la' data_path = '../data/' + data_name + '.h5' df = pd.read_hdf(data_path) print(df.shape) print(df) # ## Get the critical info sensor_id_index = 0 sensor_id = list(df.keys())[sensor_id_index] date_index = 0 time_start = 12 * 24 * date_index time_end = 12 * 24 * (date_index + 1) a_day_speed = df.iloc[time_start:time_end, sensor_id_index] time_stamp = list(a_day_speed.index) time_stamp_info = [x.strftime('%Y-%m-%d %H:%M:%S').split(' ')[1][0:5] for x in time_stamp] date = time_stamp[0].strftime('%Y-%m-%d %H:%M:%S').split(' ')[0] a_day_speed_value = a_day_speed.values # ## Draw the figure plt.figure(figsize=(25, 2)) plt.plot(time_stamp_info, a_day_speed_value, color='red', linewidth=1.0, linestyle='--') plt.title(f'Traffic Speed of sensor {sensor_id} in {date}') plt.xlabel('Time') plt.ylabel('Average Speed') show_ticks = np.arange(0, 289, 12) plt.xticks(show_ticks) plt.savefig(f'figures/{data_name}_{sensor_id}_{date}') plt.show() print('Test over'.center(50, '-'))
{"hexsha": "38df9f3e28219657e1a75bbaef763073ae716278", "size": 1129, "ext": "py", "lang": "Python", "max_stars_repo_path": "display_data.py", "max_stars_repo_name": "KarlDenken/Traffic-YX", "max_stars_repo_head_hexsha": "086346350ccc7a8c6751790f263a0dc73a5d0b78", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "display_data.py", "max_issues_repo_name": "KarlDenken/Traffic-YX", "max_issues_repo_head_hexsha": "086346350ccc7a8c6751790f263a0dc73a5d0b78", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "display_data.py", "max_forks_repo_name": "KarlDenken/Traffic-YX", "max_forks_repo_head_hexsha": "086346350ccc7a8c6751790f263a0dc73a5d0b78", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7105263158, "max_line_length": 90, "alphanum_fraction": 0.7041629761, "include": true, "reason": "import numpy", "num_tokens": 337}
""" @alias new = original Define `const new = original` and attach `original`'s docstring to `new`. """ macro alias(expr) expr.head == :(=) || error("must be an assignment expression") new, original = expr.args return quote @doc (@doc $original) const $(esc(new)) = $(esc(original)) end end """ @withfb "Reticulating splines" slow_function() Give user feedback: what is happening when the program hangs; and when is it done. """ macro withfb(description, expr) return quote print($(esc(description)), " … ") flush(stdout) t0 = time() $(esc(expr)) dt = time() - t0 print("done") if dt > 0.1 # seconds @printf " (%.1f s)" dt end println() end end
{"hexsha": "0c832fc3467c62efc9245daa6eec0ced3980316e", "size": 783, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/macros.jl", "max_stars_repo_name": "tfiers/MyToolbox.jl", "max_stars_repo_head_hexsha": "c96c0eb2e93d98c6e8a6fe11729bb737cef3d963", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/macros.jl", "max_issues_repo_name": "tfiers/MyToolbox.jl", "max_issues_repo_head_hexsha": "c96c0eb2e93d98c6e8a6fe11729bb737cef3d963", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/macros.jl", "max_forks_repo_name": "tfiers/MyToolbox.jl", "max_forks_repo_head_hexsha": "c96c0eb2e93d98c6e8a6fe11729bb737cef3d963", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.75, "max_line_length": 82, "alphanum_fraction": 0.558109834, "num_tokens": 204}
import numpy as np import cv2 import matplotlib.pyplot as plt import torch from tqdm import tqdm from pathlib import Path import os import warnings import argparse import torch import sys import json sys.path.append('./src/utils') from openpose_utils import create_label_full, create_face_label from matplotlib import pyplot as plt from functools import cmp_to_key def gao(idx): # if os.path.exists(test_label_dir.joinpath('%s.torch' % idx)): # return anno_path = anno_dir.joinpath('%s_%s_keypoints.json' % (args.name, idx)) anno = json.load(open(str(anno_path)))['people'] if len(anno) == 0: print("warnings: %s no people"% idx) return [-1, -1, -1, -1] anno = anno[0] for key in anno: anno[key] = np.array(anno[key]).reshape(-1, 3).astype(np.float) anno[key][:, 0] -= 420 anno[key][:, :2] = (anno[key][:, :2] / 1080. * 512).clip(min=0) x = anno['pose_keypoints_2d'][:,:2] s = np.linalg.norm(x[1,:] - x[8,:]) y = max(x[21,1], x[24,1]) if y > 10 and x[1,:].min() > 5 and x[8, :].min() > 5: w = x[:,0].max() - x[:,0].min() b = (y - ymin)/(ymax-ymin) * (fmax - fmin) + fmin l = (y - ymin)/(ymax-ymin) * (tmax / smax - tmin / smin) + tmin / smin left = w * (1 - l) * x[:,1].min() / (512 - w) for key in anno: anno[key] *= l d = max(x[21,1], x[24,1]) - b # print(l, d) for key in anno: anno[key][:, 0] += left anno[key][:, 1] -= d # print(max(x[21,1], x[24,1]) - b) # img_path = img_dir.joinpath('%s.png'%idx) # img = cv2.imread(str(img_path))[:, 420: -420] # img = cv2.resize(img, (512, 512)) # cv2.imwrite(str(test_image_dir.joinpath('%s.png'%idx)), img) # label = create_label_full((512, 512), anno) # s = label.max(axis = 2)[:,:, np.newaxis] # fig = plt.figure(1) # ax = fig.add_subplot(111) # ax.imshow((img * .8 + s * 255 * .2 ).astype(np.uint8)) # ax.imshow((s[:,:, 0] * 255).astype(np.uint8)) # plt.show() # label = torch.tensor(label).byte() # label_path = test_label_dir.joinpath('%s.torch'% idx) # torch.save(label, str(label_path)) # print(str(test_image_dir.joinpath('%s.png'%idx))) # ================ Crop Face===================== face = anno['face_keypoints_2d'] if face[:, 2].min() < 0.001: print(face[:, 2].min()) return [-1, -1, -1, -1] minx, maxx = int(max(face[:, 1].min() - 20, 0)), int(min(face[:, 1].max() + 10, 512)) miny, maxy = int(max(face[:, 0].min() - 15, 0)), int(min(face[:, 0].max() + 15, 512)) face[:, 0] = (face[:, 0] - miny) / (maxy - miny + 1.) * 128. face[:, 1] = (face[:, 1] - minx) / (maxx - minx + 1.) * 128. face_label = create_face_label((128, 128), face) # fig = plt.figure(1) # ax = fig.add_subplot(111) # s = face_label.max(axis = 2, keepdims = False) # print(s.shape) # ax.imshow(s) # plt.show() face_label = torch.tensor(face_label).byte() face_label_dir = test_face_label_dir.joinpath('%s.torch'% idx) torch.save(face_label, str(face_label_dir)) return [minx, maxx, miny, maxy] parser = argparse.ArgumentParser() parser.add_argument('--name', metavar = '-n', type = str, help = 'name of the datset') parser.add_argument('--which_train', metavar = '-a', type = str, help = 'name of the corresponding training set') args = parser.parse_args() save_dir = Path('./data/%s/'%(args.name)) anno_dir = save_dir.joinpath('anno') save_dir.mkdir(exist_ok=True) img_dir = save_dir.joinpath('images') img_dir.mkdir(exist_ok=True) test_dir = save_dir.joinpath('test') test_dir.mkdir(exist_ok=True) test_label_dir = test_dir.joinpath('test_label') test_label_dir.mkdir(exist_ok=True) test_image_dir = test_dir.joinpath('test_img') test_image_dir.mkdir(exist_ok=True) test_face_label_dir = test_dir.joinpath('test_face_label') test_face_label_dir.mkdir(exist_ok=True) all_index = [] scale = [] for anno_name in sorted(os.listdir(anno_dir))[: 1800]: all_index.append(anno_name.split('_')[1]) x = json.load(open(anno_dir.joinpath(anno_name)))['people'] if len(x) == 0: continue x = x[0] x = np.array(x['pose_keypoints_2d']).reshape(-1, 3)[:,:2] x[:, 0] -= 420 x = (x / 1080. * 512).clip(min=0) y = max(x[21,1], x[24,1]) s = np.linalg.norm(x[1,:] - x[8,:]) if x[1,:].min() < 5 or x[8, :].min() < 5: continue if y < 10: continue scale.append([y, s, int(all_index[-1])]) def xcmp(x, y): return x[0] - y[0] scale = sorted(scale, key = cmp_to_key(xcmp)) scale = np.array(scale) median = np.median(scale[:, 0]) xlen = int(scale.shape[0] * 0.05) d = (scale[-1, 0] - scale[0,0]) * 0.1 print(scale.shape, d) idx = np.searchsorted(scale[:, 0], scale[-1, 0] - d) smax = scale[-idx:, 1].max() midx = scale[-idx:, 1].argmax() print (scale[-idx:, -1][midx]) idx = np.searchsorted(scale[:, 0], scale[0, 0] + d, side = 'right') smin = scale[:idx, 1].max() midx = scale[:idx, 1].argmax() print (scale[:idx, -1][midx]) ymin, ymax = scale[0,0], scale[-1,0] print(smin, smax, ymin, ymax) f = open("./data/%s/train/scale.txt"%args.which_train,'r') fmin, fmax, tmin, tmax = list(map(float,f.readline().split(' '))) # for index in sorted(all_index)[:2000]: # gao(index) from multiprocessing import Pool pool = Pool(10) head_coor = pool.map(gao, sorted(all_index)[:2000]) head_coor = torch.tensor(head_coor) print (head_coor) torch.save(head_coor, str(test_dir.joinpath('face_crop_coor.torch')))
{"hexsha": "2a9f0416736fceb9b76d747ef6768bddd9a4ef7b", "size": 5239, "ext": "py", "lang": "Python", "max_stars_repo_path": "source.py", "max_stars_repo_name": "kjzju/EverybodyDanceNow-Temporal-FaceGAN", "max_stars_repo_head_hexsha": "b61e628ac3f4be351dc0bc80445049394ce500a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2019-05-15T06:18:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-16T16:25:07.000Z", "max_issues_repo_path": "source.py", "max_issues_repo_name": "kjzju/EverybodyDanceNow-Temporal-FaceGAN", "max_issues_repo_head_hexsha": "b61e628ac3f4be351dc0bc80445049394ce500a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-06-15T09:14:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-04T03:04:25.000Z", "max_forks_repo_path": "source.py", "max_forks_repo_name": "kjzju/EverybodyDanceNow-Temporal-FaceGAN", "max_forks_repo_head_hexsha": "b61e628ac3f4be351dc0bc80445049394ce500a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2019-05-16T17:42:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T13:22:06.000Z", "avg_line_length": 30.4593023256, "max_line_length": 113, "alphanum_fraction": 0.6291276961, "include": true, "reason": "import numpy", "num_tokens": 1804}
[STATEMENT] lemma order_bal_nonempty_lasttreebal: "\<lbrakk>k > 0; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t [PROOF STEP] proof(induction k t rule: order.induct) [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>k. \<lbrakk>0 < k; root_order k Leaf; bal Leaf\<rbrakk> \<Longrightarrow> nonempty_lasttreebal Leaf 2. \<And>k ts t. \<lbrakk>\<And>x. \<lbrakk>x \<in> set (subtrees ts); 0 < k; root_order k x; bal x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal x; \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t; 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> nonempty_lasttreebal (Node ts t) [PROOF STEP] case (2 k ts t) [PROOF STATE] proof (state) this: \<lbrakk>?x \<in> set (subtrees ts); 0 < k; root_order k ?x; bal ?x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal ?x \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t 0 < k root_order k (Node ts t) bal (Node ts t) goal (2 subgoals): 1. \<And>k. \<lbrakk>0 < k; root_order k Leaf; bal Leaf\<rbrakk> \<Longrightarrow> nonempty_lasttreebal Leaf 2. \<And>k ts t. \<lbrakk>\<And>x. \<lbrakk>x \<in> set (subtrees ts); 0 < k; root_order k x; bal x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal x; \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t; 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> nonempty_lasttreebal (Node ts t) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<lbrakk>?x \<in> set (subtrees ts); 0 < k; root_order k ?x; bal ?x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal ?x \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t 0 < k root_order k (Node ts t) bal (Node ts t) [PROOF STEP] have "length ts > 0" [PROOF STATE] proof (prove) using this: \<lbrakk>?x \<in> set (subtrees ts); 0 < k; root_order k ?x; bal ?x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal ?x \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t 0 < k root_order k (Node ts t) bal (Node ts t) goal (1 subgoal): 1. 0 < length ts [PROOF STEP] by auto [PROOF STATE] proof (state) this: 0 < length ts goal (2 subgoals): 1. \<And>k. \<lbrakk>0 < k; root_order k Leaf; bal Leaf\<rbrakk> \<Longrightarrow> nonempty_lasttreebal Leaf 2. \<And>k ts t. \<lbrakk>\<And>x. \<lbrakk>x \<in> set (subtrees ts); 0 < k; root_order k x; bal x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal x; \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t; 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> nonempty_lasttreebal (Node ts t) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: 0 < length ts [PROOF STEP] obtain ls tsub tsep where ts_split: "ts = (ls@[(tsub,tsep)])" [PROOF STATE] proof (prove) using this: 0 < length ts goal (1 subgoal): 1. (\<And>ls tsub tsep. ts = ls @ [(tsub, tsep)] \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (metis eq_fst_iff length_greater_0_conv snoc_eq_iff_butlast) [PROOF STATE] proof (state) this: ts = ls @ [(tsub, tsep)] goal (2 subgoals): 1. \<And>k. \<lbrakk>0 < k; root_order k Leaf; bal Leaf\<rbrakk> \<Longrightarrow> nonempty_lasttreebal Leaf 2. \<And>k ts t. \<lbrakk>\<And>x. \<lbrakk>x \<in> set (subtrees ts); 0 < k; root_order k x; bal x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal x; \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t; 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> nonempty_lasttreebal (Node ts t) [PROOF STEP] moreover [PROOF STATE] proof (state) this: ts = ls @ [(tsub, tsep)] goal (2 subgoals): 1. \<And>k. \<lbrakk>0 < k; root_order k Leaf; bal Leaf\<rbrakk> \<Longrightarrow> nonempty_lasttreebal Leaf 2. \<And>k ts t. \<lbrakk>\<And>x. \<lbrakk>x \<in> set (subtrees ts); 0 < k; root_order k x; bal x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal x; \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t; 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> nonempty_lasttreebal (Node ts t) [PROOF STEP] have "height tsub = height t" [PROOF STATE] proof (prove) goal (1 subgoal): 1. height tsub = height t [PROOF STEP] using "2.prems"(3) ts_split [PROOF STATE] proof (prove) using this: bal (Node ts t) ts = ls @ [(tsub, tsep)] goal (1 subgoal): 1. height tsub = height t [PROOF STEP] by auto [PROOF STATE] proof (state) this: height tsub = height t goal (2 subgoals): 1. \<And>k. \<lbrakk>0 < k; root_order k Leaf; bal Leaf\<rbrakk> \<Longrightarrow> nonempty_lasttreebal Leaf 2. \<And>k ts t. \<lbrakk>\<And>x. \<lbrakk>x \<in> set (subtrees ts); 0 < k; root_order k x; bal x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal x; \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t; 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> nonempty_lasttreebal (Node ts t) [PROOF STEP] moreover [PROOF STATE] proof (state) this: height tsub = height t goal (2 subgoals): 1. \<And>k. \<lbrakk>0 < k; root_order k Leaf; bal Leaf\<rbrakk> \<Longrightarrow> nonempty_lasttreebal Leaf 2. \<And>k ts t. \<lbrakk>\<And>x. \<lbrakk>x \<in> set (subtrees ts); 0 < k; root_order k x; bal x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal x; \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t; 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> nonempty_lasttreebal (Node ts t) [PROOF STEP] have "nonempty_lasttreebal t" [PROOF STATE] proof (prove) goal (1 subgoal): 1. nonempty_lasttreebal t [PROOF STEP] using 2 order_impl_root_order [PROOF STATE] proof (prove) using this: \<lbrakk>?x \<in> set (subtrees ts); 0 < k; root_order k ?x; bal ?x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal ?x \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t 0 < k root_order k (Node ts t) bal (Node ts t) \<lbrakk>0 < ?k; order ?k ?t\<rbrakk> \<Longrightarrow> root_order ?k ?t goal (1 subgoal): 1. nonempty_lasttreebal t [PROOF STEP] by auto [PROOF STATE] proof (state) this: nonempty_lasttreebal t goal (2 subgoals): 1. \<And>k. \<lbrakk>0 < k; root_order k Leaf; bal Leaf\<rbrakk> \<Longrightarrow> nonempty_lasttreebal Leaf 2. \<And>k ts t. \<lbrakk>\<And>x. \<lbrakk>x \<in> set (subtrees ts); 0 < k; root_order k x; bal x\<rbrakk> \<Longrightarrow> nonempty_lasttreebal x; \<lbrakk>0 < k; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t; 0 < k; root_order k (Node ts t); bal (Node ts t)\<rbrakk> \<Longrightarrow> nonempty_lasttreebal (Node ts t) [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: ts = ls @ [(tsub, tsep)] height tsub = height t nonempty_lasttreebal t [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: ts = ls @ [(tsub, tsep)] height tsub = height t nonempty_lasttreebal t goal (1 subgoal): 1. nonempty_lasttreebal (Node ts t) [PROOF STEP] by simp [PROOF STATE] proof (state) this: nonempty_lasttreebal (Node ts t) goal (1 subgoal): 1. \<And>k. \<lbrakk>0 < k; root_order k Leaf; bal Leaf\<rbrakk> \<Longrightarrow> nonempty_lasttreebal Leaf [PROOF STEP] qed simp
{"llama_tokens": 3083, "file": "BTree_BTree_Set", "length": 20}
""" Implementation of DDPG - Deep Deterministic Policy Gradient Algorithm and hyperparameter details can be found here: http://arxiv.org/pdf/1509.02971v2.pdf The algorithm is tested on the Pendulum-v0 OpenAI gym task and developed with tflearn + Tensorflow Author: Patrick Emami """ import tensorflow as tf import numpy as np import tflearn import actor import critic from replay_buffer import ReplayBuffer # ========================== # Training Parameters # ========================== # Max training steps MAX_EPISODES = 50000 # Max episode length MAX_EP_STEPS = 1000 # Base learning rate for the Actor network ACTOR_LEARNING_RATE = 0.0001 # Base learning rate for the Critic Network CRITIC_LEARNING_RATE = 0.001 # Discount factor GAMMA = 0.99 # Soft target update param TAU = 0.001 # =========================== # Utility Parameters # =========================== # Render gym env during training RENDER_ENV = True # Use Gym Monitor GYM_MONITOR_EN = True # Gym environment ENV_NAME = 'Pendulum-v0' # Directory for storing gym results MONITOR_DIR = './results/gym_ddpg' # Directory for storing tensorboard summary results SUMMARY_DIR = './results/tf_ddpg' RANDOM_SEED = 1234 # Size of replay buffer BUFFER_SIZE = 10000 MINIBATCH_SIZE = 64 # =========================== # Actor and Critic DNNs # =========================== # =========================== # Tensorflow Summary Ops # =========================== def build_summaries(): episode_reward = tf.Variable(0.) tf.summary.scalar("Reward", episode_reward) episode_ave_max_q = tf.Variable(0.) tf.summary.scalar("Qmax Value", episode_ave_max_q) summary_vars = [episode_reward, episode_ave_max_q] summary_ops = tf.summary.merge_all() return summary_ops, summary_vars # =========================== # Agent Training # =========================== def train(sess, env, actor, critic): # Set up summary Ops summary_ops, summary_vars = build_summaries() sess.run(tf.global_variables_initializer()) writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph) # Initialize target network weights actor.update_target_network() critic.update_target_network() # Initialize replay memory replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED) for i in range(MAX_EPISODES): s = env.reset() ep_reward = 0 ep_ave_max_q = 0 for j in range(MAX_EP_STEPS): # Added exploration noise a = actor.predict(np.reshape(s, (1, 3))) + (1. / (1. + i)) s2, r, terminal, info = env.step(a[0]) replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r, terminal, np.reshape(s2, (actor.s_dim,))) # Keep adding experience to the memory until # there are at least minibatch size samples if replay_buffer.size() > MINIBATCH_SIZE: s_batch, a_batch, r_batch, t_batch, s2_batch = \ replay_buffer.sample_batch(MINIBATCH_SIZE) # Calculate targets target_q = critic.predict_target( s2_batch, actor.predict_target(s2_batch)) y_i = [] for k in range(MINIBATCH_SIZE): if t_batch[k]: y_i.append(r_batch[k]) else: y_i.append(r_batch[k] + GAMMA * target_q[k]) # Update the critic given the targets predicted_q_value, _ = critic.train( s_batch, a_batch, np.reshape(y_i, (MINIBATCH_SIZE, 1))) ep_ave_max_q += np.amax(predicted_q_value) # Update the actor policy using the sampled gradient a_outs = actor.predict(s_batch) grads = critic.action_gradients(s_batch, a_outs) actor.train(s_batch, grads[0]) # Update target networks actor.update_target_network() critic.update_target_network() s = s2 ep_reward += r if terminal: summary_str = sess.run(summary_ops, feed_dict={ summary_vars[0]: ep_reward, summary_vars[1]: ep_ave_max_q / float(j) }) writer.add_summary(summary_str, i) writer.flush() print ('| Reward: %.2i' % int(ep_reward), " | Episode", i, \ '| Qmax: %.4f' % (ep_ave_max_q / float(j))) break def main(_): with tf.Session() as sess: env = gym.make(ENV_NAME) np.random.seed(RANDOM_SEED) tf.set_random_seed(RANDOM_SEED) env.seed(RANDOM_SEED) state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] action_bound = env.action_space.high # Ensure action bound is symmetric assert (env.action_space.high == -env.action_space.low) actor = ActorNetwork(sess, state_dim, action_dim, action_bound, ACTOR_LEARNING_RATE, TAU) critic = CriticNetwork(sess, state_dim, action_dim, CRITIC_LEARNING_RATE, TAU, actor.get_num_trainable_vars()) if GYM_MONITOR_EN: if not RENDER_ENV: env = wrappers.Monitor( env, MONITOR_DIR, video_callable=False, force=True) else: env = wrappers.Monitor(env, MONITOR_DIR, force=True) train(sess, env, actor, critic) if GYM_MONITOR_EN: env.monitor.close() if __name__ == '__main__': tf.app.run()
{"hexsha": "28ce5e00c6e845acf5974c84f46764cbb334ba14", "size": 5695, "ext": "py", "lang": "Python", "max_stars_repo_path": "python_code/ddpg.py", "max_stars_repo_name": "ssriramana93/EnvClassify", "max_stars_repo_head_hexsha": "56c1b1965403d08fd3c1213a2bc67a27a9c1ebf8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python_code/ddpg.py", "max_issues_repo_name": "ssriramana93/EnvClassify", "max_issues_repo_head_hexsha": "56c1b1965403d08fd3c1213a2bc67a27a9c1ebf8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python_code/ddpg.py", "max_forks_repo_name": "ssriramana93/EnvClassify", "max_forks_repo_head_hexsha": "56c1b1965403d08fd3c1213a2bc67a27a9c1ebf8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7626262626, "max_line_length": 94, "alphanum_fraction": 0.5806848112, "include": true, "reason": "import numpy", "num_tokens": 1284}
from random import randint as rand import numpy as np from scipy.io import wavfile as wf import function_melody_generator as fmg samplerate = 44100 #Frequecy in Hz tempo = fmg.tempo_ke_detik(200) ketukan = 8 scale=[2,1,2,2,1,3,1] nada = 0 chord = [0,3,7] #chordx = fmg.to_chord(0,chord) #scalex = fmg.to_scale(0,scale) #ob = fmg.onebar(chordx,scalex,ketukan) ch = [0,7,9,5] chx = [[0,4,7],[0,4,7],[0,3,7],[0,4,7]] ach = len(ch) ch_a = [] sc = [[2,2,1,2,2,2,1],[2,2,1,2,2,1,2],[2,1,2,2,1,2,2],[2,2,2,1,2,2,1]] sc_a= [] for c in range(ach): ch_a.append(fmg.to_chord(ch[c],chx[c])) sc_a.append(fmg.to_scale(ch[c],sc[c])) ob = fmg.onebait(ch_a,sc_a,ketukan) result = fmg.get_song_data(ob,tempo) #untuk menyimpan lagu print(ob) wf.write('test.wav', samplerate,result.astype(np.int16)) x = input()
{"hexsha": "aa24870c54f68e41cb88748233e247a45cbd7dc0", "size": 852, "ext": "py", "lang": "Python", "max_stars_repo_path": "app 2.py", "max_stars_repo_name": "zemetia/random-automatic-melody-generator", "max_stars_repo_head_hexsha": "4ffaa855d7bf98b63e077209726abdba70d9de1f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-09-05T14:22:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T13:14:31.000Z", "max_issues_repo_path": "app 2.py", "max_issues_repo_name": "zemetia/random-automatic-melody-generator", "max_issues_repo_head_hexsha": "4ffaa855d7bf98b63e077209726abdba70d9de1f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app 2.py", "max_forks_repo_name": "zemetia/random-automatic-melody-generator", "max_forks_repo_head_hexsha": "4ffaa855d7bf98b63e077209726abdba70d9de1f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.027027027, "max_line_length": 71, "alphanum_fraction": 0.6279342723, "include": true, "reason": "import numpy,from scipy", "num_tokens": 347}
import matplotlib.pyplot as plt import numpy as np import scipy.io as scio from skimage import io from skimage import img_as_float import runkMeans as km import findClosestCentroids as fc import computeCentroids as cc import kMeansInitCentroids as kmic plt.ion() np.set_printoptions(formatter={'float': '{: 0.6f}'.format}) # ===================== Part 1: Find Closest Centroids ===================== # To help you implement K-means, we have divided the learning algorithm # into two functions -- find_closest_centroids and compute_centroids. In this # part, you should complete the code in the findClosestCentroids.py # print('Finding closest centroids.') # Load an example dataset that we will be using data = scio.loadmat('ex7data2.mat') X = data['X'] # Select an initial set of centroids k = 3 # Three centroids initial_centroids = np.array([[3, 3], [6, 2], [8, 5]]) # Find the closest centroids for the examples using the # initial_centroids idx = fc.find_closest_centroids(X, initial_centroids) print('Closest centroids for the first 3 examples: ') print('{}'.format(idx[0:3])) print('(the closest centroids should be 0, 2, 1 respectively)') input('Program paused. Press ENTER to continue') # ===================== Part 2: Compute Means ===================== # After implementing the closest centroids function, you should now # complete the compute_centroids function. # print('Computing centroids means.') # Compute means based on the closest centroids found in the previous part. centroids = cc.compute_centroids(X, idx, k) print('Centroids computed after initial finding of closest centroids: \n{}'.format(centroids)) print('the centroids should be') print('[[ 2.428301 3.157924 ]') print(' [ 5.813503 2.633656 ]') print(' [ 7.119387 3.616684 ]]') input('Program paused. Press ENTER to continue') # ===================== Part 3: K-Means Clustering ===================== # After you have completed the two functions compute_centroids and # find_closest_centroids, you will have all the necessary pieces to run the # kMeans algorithm. In this part, you will run the K-Means algorithm on # the example dataset we have provided. # print('Running K-Means Clustering on example dataset.') # Load an example dataset data = scio.loadmat('ex7data2.mat') X = data['X'] # Settings for running K-Means K = 3 max_iters = 10 # For consistency, here we set centroids to specific values # but in practice you want to generate them automatically, such as by # settings them to be random examples (as can be seen in # kMeansInitCentroids). initial_centroids = np.array([[3, 3], [6, 2], [8, 5]]) # Run K-Means algorithm. The 'true' at the end tells our function to plot # the progress of K-Means centroids, idx = km.run_kmeans(X, initial_centroids, max_iters, True) print('K-Means Done.') input('Program paused. Press ENTER to continue') # ===================== Part 4: K-Means Clustering on Pixels ===================== # In this exercise, you will use K-Means to compress an image. To do this, # you will first run K-Means on the colors of the pixels in the image and # then you will map each pixel onto its closest centroid. # # You should now complete the code in kMeansInitCentroids.m # print('Running K-Means clustering on pixels from an image') # Load an image of a bird image = io.imread('bird_small.png') image = img_as_float(image) # Size of the image img_shape = image.shape # Reshape the image into an Nx3 matrix where N = number of pixels. # Each row will contain the Red, Green and Blue pixel values # This gives us our dataset matrix X that we will use K-Means on. X = image.reshape(img_shape[0] * img_shape[1], 3) # Run your K-Means algorithm on this data # You should try different values of K and max_iters here K = 16 max_iters = 10 # When using K-Means, it is important the initialize the centroids # randomly. # You should complete the code in kMeansInitCentroids.py before proceeding initial_centroids = kmic.kmeans_init_centroids(X, K) # Run K-Means centroids, idx = km.run_kmeans(X, initial_centroids, max_iters, False) print('K-Means Done.') input('Program paused. Press ENTER to continue') # ===================== Part 5: Image Compression ===================== # In this part of the exercise, you will use the clusters of K-Means to # compress an image. To do this, we first find the closest clusters for # each example. print('Applying K-Means to compress an image.') # Find closest cluster members idx = fc.find_closest_centroids(X, centroids) # Essentially, now we have represented the image X as in terms of the # indices in idx. # We can now recover the image from the indices (idx) by mapping each pixel # (specified by its index in idx) to the centroid value X_recovered = centroids[idx] # Reshape the recovered image into proper dimensions X_recovered = np.reshape(X_recovered, (img_shape[0], img_shape[1], 3)) plt.subplot(2, 1, 1) plt.imshow(image) plt.title('Original') plt.subplot(2, 1, 2) plt.imshow(X_recovered) plt.title('Compressed, with {} colors'.format(K)) input('ex7 Finished. Press ENTER to exit')
{"hexsha": "8926759096ef0fa88172e5b419c1e0a78e1001f5", "size": 5077, "ext": "py", "lang": "Python", "max_stars_repo_path": "machine-learning-ex7/ex7/ex7.py", "max_stars_repo_name": "ShawnT4ever/coursera-ml-py", "max_stars_repo_head_hexsha": "ede0f259ed5ac6ed0c0d7b4d6f999cad5c07aafb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1333, "max_stars_repo_stars_event_min_datetime": "2017-03-24T05:51:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T14:20:55.000Z", "max_issues_repo_path": "machine-learning-ex7/ex7/ex7.py", "max_issues_repo_name": "ShawnT4ever/coursera-ml-py", "max_issues_repo_head_hexsha": "ede0f259ed5ac6ed0c0d7b4d6f999cad5c07aafb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-15T10:03:44.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-15T10:03:44.000Z", "max_forks_repo_path": "machine-learning-ex7/ex7/ex7.py", "max_forks_repo_name": "ShawnT4ever/coursera-ml-py", "max_forks_repo_head_hexsha": "ede0f259ed5ac6ed0c0d7b4d6f999cad5c07aafb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 531, "max_forks_repo_forks_event_min_datetime": "2017-03-25T14:08:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T07:01:30.000Z", "avg_line_length": 32.9675324675, "max_line_length": 94, "alphanum_fraction": 0.7187315344, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1307}
from PIL import Image import numpy as np from paddle.io import Dataset, DataLoader import scipy.io import os # A = TrainDataset('../Data/benchmark_RELEASE/dataset') class TrainDataset(Dataset): def __init__(self,dataset_path, img_folder='img', gt_folder='cls',threshold=128,ignore_label=None): self.threshold, self.ignore_label = threshold, ignore_label dataset_path=dataset_path with open(os.path.join(dataset_path, 'train.txt'), 'r', encoding='utf-8') as file: img_files = file.readlines() img_files = [img.rstrip('\n') for img in img_files] gt_files = [os.path.join(dataset_path, gt_folder, img+'.mat') for img in img_files] img_files = [os.path.join(dataset_path, img_folder, img+'.jpg') for img in img_files] self.img_files = img_files self.gt_files = gt_files def __len__(self): return len(self.img_files) def __getitem__(self, idx): img_src = np.array(Image.open(self.img_files[idx])) gt_src = scipy.io.loadmat(self.gt_files[idx]) gt = gt_src['GTcls'][0][0][-2] return img_src, gt
{"hexsha": "df8e996d016881ac71aadfd6cdd5e1bcb6ea1262", "size": 1144, "ext": "py", "lang": "Python", "max_stars_repo_path": "fcanet/fcanet/train.py", "max_stars_repo_name": "PaddleEdu/Segmentation-models-PaddlePaddle", "max_stars_repo_head_hexsha": "478b00f260128642d5db60bd5ac0485de0e341bc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-13T09:19:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-21T06:32:29.000Z", "max_issues_repo_path": "fcanet/fcanet/train.py", "max_issues_repo_name": "PaddleEdu/Segmentation-models-PaddlePaddle", "max_issues_repo_head_hexsha": "478b00f260128642d5db60bd5ac0485de0e341bc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fcanet/fcanet/train.py", "max_forks_repo_name": "PaddleEdu/Segmentation-models-PaddlePaddle", "max_forks_repo_head_hexsha": "478b00f260128642d5db60bd5ac0485de0e341bc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-12T16:43:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-12T16:43:47.000Z", "avg_line_length": 42.3703703704, "max_line_length": 104, "alphanum_fraction": 0.6582167832, "include": true, "reason": "import numpy,import scipy", "num_tokens": 274}
# 求一个三维的椭圆体 import numpy as np from skimage.draw import ellipsoid spacing = (1., 10 / 6., 16 / 6.) ellipsoid_anisotropic = ellipsoid(6, 10, 16, spacing=spacing, levelset=True) print(ellipsoid_anisotropic) print(ellipsoid_anisotropic.shape)
{"hexsha": "568ac87233e1e0990847c16fa55eef8b5a886894", "size": 239, "ext": "py", "lang": "Python", "max_stars_repo_path": "Draw/ellipsoid.py", "max_stars_repo_name": "Joevaen/Scikit-image_On_CT", "max_stars_repo_head_hexsha": "e3bf0eeadc50691041b4b7c44a19d07546a85001", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Draw/ellipsoid.py", "max_issues_repo_name": "Joevaen/Scikit-image_On_CT", "max_issues_repo_head_hexsha": "e3bf0eeadc50691041b4b7c44a19d07546a85001", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Draw/ellipsoid.py", "max_forks_repo_name": "Joevaen/Scikit-image_On_CT", "max_forks_repo_head_hexsha": "e3bf0eeadc50691041b4b7c44a19d07546a85001", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1428571429, "max_line_length": 76, "alphanum_fraction": 0.7782426778, "include": true, "reason": "import numpy", "num_tokens": 87}
#!/usr/bin/env python from pathlib import Path import numpy as np import pytest from histutils.rawDMCreader import goRead R = Path(__file__).parent def test_rawread(): bigfn = R / "testframes.DMCdata" params = { "xy_pixel": (512, 512), "xy_bin": (1, 1), "frame_request": (1, 2, 1), "header_bytes": 4, } testframe, testind, finf = goRead(bigfn, params) # these are both tested by goRead # finf = getDMCparam(bigfn,(512,512),(1,1),None,verbose=2) # with open(bigfn,'rb') as f: # testframe,testind = getDMCframe(f,iFrm=1,finf=finf,verbose=2) # test a handful of pixels assert testind.dtype == np.int64 assert testframe.dtype == np.uint16 assert testind == 710730 assert (testframe[0, :5, 0] == [956, 700, 1031, 730, 732]).all() assert (testframe[0, -5:, -1] == [1939, 1981, 1828, 1752, 1966]).all() if __name__ == "__main__": pytest.main(["-xrsv", __file__])
{"hexsha": "2733f9f05d11c08a97b817e7f377e6a255f77a03", "size": 961, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/histutils/tests/test_all.py", "max_stars_repo_name": "space-physics/histutils", "max_stars_repo_head_hexsha": "f2add29c73be5c62d8675139cc58250ece92f477", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/histutils/tests/test_all.py", "max_issues_repo_name": "space-physics/histutils", "max_issues_repo_head_hexsha": "f2add29c73be5c62d8675139cc58250ece92f477", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-05-27T15:20:23.000Z", "max_issues_repo_issues_event_max_datetime": "2015-12-14T07:55:18.000Z", "max_forks_repo_path": "src/histutils/tests/test_all.py", "max_forks_repo_name": "space-physics/histutils", "max_forks_repo_head_hexsha": "f2add29c73be5c62d8675139cc58250ece92f477", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2894736842, "max_line_length": 74, "alphanum_fraction": 0.6181061394, "include": true, "reason": "import numpy", "num_tokens": 327}
FUNCTION zbrent(func,x1,x2,tol) INTEGER ITMAX REAL zbrent,tol,x1,x2,func,EPS EXTERNAL func PARAMETER (ITMAX=100,EPS=3.e-8) INTEGER iter REAL a,b,c,d,e,fa,fb,fc,p,q,r,s,tol1,xm a=x1 b=x2 fa=func(a) fb=func(b) if((fa.gt.0..and.fb.gt.0.).or.(fa.lt.0..and.fb.lt.0.))pause *'root must be bracketed for zbrent' c=b fc=fb do 11 iter=1,ITMAX if((fb.gt.0..and.fc.gt.0.).or.(fb.lt.0..and.fc.lt.0.))then c=a fc=fa d=b-a e=d endif if(abs(fc).lt.abs(fb)) then a=b b=c c=a fa=fb fb=fc fc=fa endif tol1=2.*EPS*abs(b)+0.5*tol xm=.5*(c-b) if(abs(xm).le.tol1 .or. fb.eq.0.)then zbrent=b return endif if(abs(e).ge.tol1 .and. abs(fa).gt.abs(fb)) then s=fb/fa if(a.eq.c) then p=2.*xm*s q=1.-s else q=fa/fc r=fb/fc p=s*(2.*xm*q*(q-r)-(b-a)*(r-1.)) q=(q-1.)*(r-1.)*(s-1.) endif if(p.gt.0.) q=-q p=abs(p) if(2.*p .lt. min(3.*xm*q-abs(tol1*q),abs(e*q))) then e=d d=p/q else d=xm e=d endif else d=xm e=d endif a=b fa=fb if(abs(d) .gt. tol1) then b=b+d else b=b+sign(tol1,xm) endif fb=func(b) 11 continue pause 'zbrent exceeding maximum iterations' zbrent=b return END
{"hexsha": "d7f4ef4d48b77c549fa7e5e5fbafaab135d23cb0", "size": 1743, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "NR-Functions/Numerical Recipes- Example & Functions/Functions/zbrent.for", "max_stars_repo_name": "DingdingLuan/nrfunctions_fortran", "max_stars_repo_head_hexsha": "37e376dab8d6b99e63f6f1398d0c33d5d6ad3f8c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NR-Functions/Numerical Recipes- Example & Functions/Functions/zbrent.for", "max_issues_repo_name": "DingdingLuan/nrfunctions_fortran", "max_issues_repo_head_hexsha": "37e376dab8d6b99e63f6f1398d0c33d5d6ad3f8c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NR-Functions/Numerical Recipes- Example & Functions/Functions/zbrent.for", "max_forks_repo_name": "DingdingLuan/nrfunctions_fortran", "max_forks_repo_head_hexsha": "37e376dab8d6b99e63f6f1398d0c33d5d6ad3f8c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5540540541, "max_line_length": 67, "alphanum_fraction": 0.3866896156, "num_tokens": 576}
import os import gc from itertools import islice from typing import Generator, List, Tuple import numpy as np import pandas as pd from tqdm import tqdm import regex as re from networks.classes.centernet.utils.BBoxesVisualizer import BBoxesVisualizer class SubmissionHandler: def __init__(self, dict_cat, log): self.__log = log self.__dict_cat: [str, str] = {str(v): k for k, v in dict_cat.items()} def test(self, max_visualizations=5): self.__log.info('Testing the submission...') # Read the submission data from csv file path_to_submission = os.path.join('datasets', 'submission.csv') try: submission = pd.read_csv(path_to_submission, usecols=['image_id', 'labels']) except FileNotFoundError: raise Exception( 'Cannot fetch data for visualization because no submission was written at {}\n' 'Probably predict_on_test param was set to False, thus no submission has been written' .format(path_to_submission)) # Initialize a bboxes visualizer object to print bboxes on images bbox_visualizer = BBoxesVisualizer( path_to_images=os.path.join('datasets', 'kaggle', 'testing', 'images')) # i counts the number of images that can be visualized i = 0 # Iterate over the images for _, sub_data in submission.iterrows(): if i == max_visualizations: break labels = [label.strip().split(' ') for label in re.findall(r"(?:\s?\S*\s){2}\S*", sub_data['labels'])] labels = [[label[0], int(label[1]), int(label[2]), 5, 5] for label in labels] img_id = sub_data['image_id'] self.__log.info('Visualizing image {}'.format(img_id)) bbox_visualizer.visualize_bboxes(image_id=img_id, labels=labels) i += 1 def __get_class(self, prediction: List[List]): """ Gets list of unicode classes from the predictions :param prediction: a list of class predictions :return: """ return [self.__dict_cat[str(k)] for k in np.argmax(prediction, axis=1)] @staticmethod def __get_center_coords(bbox: List[str]): """ Gets the coordinates of the center of the bboxes :param bbox: a string representing the coordinates of the bbox :return: """ # Get the coordinates of the bbox # ymin, xmin, ymax, xmax = bbox.split(':') coords = [(round(float(ymin)), round(float(xmin)), round(float(ymax)), round(float(xmax))) for ymin, xmin, ymax, xmax in (bb.split(':') for bb in bbox)] # ymin = round(float(ymin)) # xmin = round(float(xmin)) # ymax = round(float(ymax)) # xmax = round(float(xmax)) return [(str(xmin + ((xmax - xmin) // 2)), str(ymin + ((ymax - ymin) // 2))) for ymin, xmin, ymax, xmax in coords] def __write_img_with_chars(self, images_data, predictions_gen, path_to_submission): submission_list = [] # Iterate over all the predicted original images for img_data in tqdm(images_data, total=len(images_data)): batch_labels: List[str] = [] bboxes = list(img_data['bboxes'].split(' ')) # Iterate over all the bboxes of the current image i = 0 while i < len(bboxes): # Get a class prediction from the generator try: prediction = next(predictions_gen) except StopIteration: break batch_size = len(prediction) # Get batch of bboxes with same length bbox_batch = bboxes[i:i + batch_size] i += batch_size # Get the unicode classes from the predictions unicode: List[str] = self.__get_class(prediction) del prediction gc.collect() # Get the coordinates of the center of boxes in 'bbox_batch' coords: List[Tuple[str, str]] = self.__get_center_coords(bbox_batch) # Append the labels of current batch to the list of the labels of the current image batch_labels.extend([' '.join([u, c[0], c[1]]) for u, c in zip(unicode, coords)]) # Gather the data for the submission of the current image submission_list.append({'image_id': img_data['original_image'], 'labels': ' '.join(batch_labels)}) # Write the submission to csv img_submission = pd.DataFrame(submission_list, columns=['image_id', 'labels']) img_submission.to_csv(path_to_submission, mode='a', header=False) @staticmethod def __write_img_with_no_chars(path_to_submission): submission = pd.read_csv(path_to_submission) submitted_images = submission['image_id'].tolist() for img_path in tqdm(os.listdir(os.path.join('datasets', 'kaggle', 'testing', 'images'))): img_id = img_path.split(os.sep)[-1].split('.')[0] if img_id not in submitted_images: # Gather the data for the submission of the empty image img_submission = pd.DataFrame(data={'image_id': img_id, 'labels': ''}, columns=['image_id', 'labels'], index=[0]) # Write the submission to csv img_submission.to_csv(path_to_submission, mode='a', header=False) @staticmethod def __fetch_images_data(path_to_submission, test_list): # Delete the previous submission if os.path.isfile(path_to_submission): partial_sub = pd.read_csv(path_to_submission, usecols=['image_id', 'labels']) # Start iterating through the images from the last image inserted in the partial submission images_data = [img_data for _, img_data in islice(test_list.iterrows(), len(partial_sub.index) - 1, None)] # Remove last row (since it may be partial) partial_sub.drop(partial_sub.tail(1).index, inplace=True) partial_sub.to_csv(path_to_submission) else: # Start iterating through the images from the beginning images_data = [img_data for _, img_data in test_list.iterrows()] # Write the header pd.DataFrame(columns=['image_id', 'labels']).to_csv(path_to_submission) return images_data def write(self, predictions_gen: Generator): """ Writes a submission csv file in the format: - names of columns : image_id, labels - example of row : image_id, {label X Y} {...} :param predictions_gen: a list of class predictions for the cropped characters """ self.__log.info('Writing submission data...') # Read the test data from csv file path_to_test_list = os.path.join('datasets', 'test_list.csv') try: test_list = pd.read_csv(path_to_test_list, usecols=['original_image', 'cropped_images', 'bboxes']) except FileNotFoundError: raise Exception('Cannot write submission because non test list was written at {}\n' 'Probably predict_on_test param was set to False, thus no prediction has been made on test' .format(path_to_test_list)) # Set the path to the submission path_to_submission = os.path.join('datasets', 'submission.csv') # Fetch the data of the images images_data = self.__fetch_images_data(path_to_submission, test_list) self.__log.info('Writing images with characters...') self.__write_img_with_chars(images_data=images_data, predictions_gen=predictions_gen, path_to_submission=path_to_submission) self.__log.info('Writing images with no characters...') self.__write_img_with_no_chars(path_to_submission) self.__log.info('Written submission data at {}'.format(path_to_submission))
{"hexsha": "0f7770a211f53bd282a210189d06fb47b0fa71c6", "size": 8277, "ext": "py", "lang": "Python", "max_stars_repo_path": "networks/classes/centernet/pipeline/SubmissionHandler.py", "max_stars_repo_name": "ALIENK9/Kuzushiji-recognition", "max_stars_repo_head_hexsha": "a18c1fbfa72b6bbbcfe4004148cd0e90531acf6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-15T08:52:38.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-15T08:58:58.000Z", "max_issues_repo_path": "networks/classes/centernet/pipeline/SubmissionHandler.py", "max_issues_repo_name": "MatteoRizzo96/CognitiveServices", "max_issues_repo_head_hexsha": "a5efeb8f585ae2ee0465ab25e587c4db0e2b32b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "networks/classes/centernet/pipeline/SubmissionHandler.py", "max_forks_repo_name": "MatteoRizzo96/CognitiveServices", "max_forks_repo_head_hexsha": "a5efeb8f585ae2ee0465ab25e587c4db0e2b32b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-06T07:29:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-06T07:33:27.000Z", "avg_line_length": 39.6028708134, "max_line_length": 119, "alphanum_fraction": 0.5999758367, "include": true, "reason": "import numpy", "num_tokens": 1717}
import numpy as np from opytimizer.optimizers.science import aig from opytimizer.spaces import search def test_aig_params(): params = { 'alpha': np.pi, 'beta': np.pi } new_aig = aig.AIG(params=params) assert new_aig.alpha == np.pi assert new_aig.beta == np.pi def test_aig_params_setter(): new_aig = aig.AIG() try: new_aig.alpha = 'a' except: new_aig.alpha = np.pi assert new_aig.alpha == np.pi try: new_aig.alpha = -1 except: new_aig.alpha = np.pi assert new_aig.alpha == np.pi try: new_aig.beta = 'b' except: new_aig.beta = np.pi assert new_aig.beta == np.pi try: new_aig.beta = -1 except: new_aig.beta = np.pi assert new_aig.beta == np.pi def test_aig_update(): def square(x): return np.sum(x**2) search_space = search.SearchSpace(n_agents=10, n_variables=2, lower_bound=[-10, -10], upper_bound=[10, 10]) new_aig = aig.AIG() new_aig.update(search_space, square)
{"hexsha": "36d07c24550ac1368db42c77202416e116ed69f6", "size": 1100, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/opytimizer/optimizers/science/test_aig.py", "max_stars_repo_name": "anukaal/opytimizer", "max_stars_repo_head_hexsha": "5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 528, "max_stars_repo_stars_event_min_datetime": "2018-10-01T20:00:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T11:15:31.000Z", "max_issues_repo_path": "tests/opytimizer/optimizers/science/test_aig.py", "max_issues_repo_name": "anukaal/opytimizer", "max_issues_repo_head_hexsha": "5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2019-10-30T00:47:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T11:39:28.000Z", "max_forks_repo_path": "tests/opytimizer/optimizers/science/test_aig.py", "max_forks_repo_name": "anukaal/opytimizer", "max_forks_repo_head_hexsha": "5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2018-10-01T20:03:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T03:54:15.000Z", "avg_line_length": 17.7419354839, "max_line_length": 83, "alphanum_fraction": 0.5763636364, "include": true, "reason": "import numpy", "num_tokens": 312}
theory NimFullProofs imports NimFull begin (*************************************************************************) subsection \<open> Proving function and operation satisfiability POs \<close> text \<open> Next, we illustrate the general PO setup for all auxiliary functions. After the translation is complete, one needs to translate proof obligations to ensure pre/post are satisfiable. The theorem layout depends on whether there is an explicit definition for the auxiliary function, given explicit definitions will determine the existential witness(es). For instance, for an implicitly defined VDM function \begin{vdmsl}[breaklines=true] f(i: T1) r: T2 pre pre_f(i) post post_f(i, r) \end{vdmsl} we need to prove this satisfiability theorem in Isabelle: @{term[display] "\<forall> i . inv_T1 i \<longrightarrow> pre_f i \<longrightarrow> (\<exists> r . inv_T2 r \<and> post_f i r)"} whereas, for an explicitly defined VDM function \begin{vdmsl}[breaklines=true] f: T1 -> T2 f(i) == expr pre pre_f(i) post post_f(i, RESULT) \end{vdmsl} we need to prove this satisfiability theorem in Isabelle: @{term[display] "\<forall> i . pre_f i \<longrightarrow> post_f i expr"} That is, if the pre condition holds (\textit{i.e.},~@{term "pre_f i"}), then so ought to hold the post condition. We use a definition to declare such statements as conjectures and then try to prove them as theorems. \textcolor{red}{\textbf{Notice that if explicit definitions are given, there is no choice for witness for the proof obligation!}} That is, the commitment in the model presented by the explicit definition (\textit{e.g.}~@{term expr}) must feature in the proof. This will be particularly interesting in the proof below about @{term best_move}, where the general case is provable, whereas the one with the initial explicit definition of @{term best_move} is not. \textcolor{red}{\textbf{That is, the specification is feasible for some implementation but not the one given by the explicit definition!}} \<close> (*------------------------------------------------------------------------*) subsection \<open> Role of lemmas \<close> text \<open> Some lemmas proved in the process of discovering the proofs, a few turned out not to be necessary in the final proof, but helped in discovering the problems with the precondition of @{term play_move}. \<close> (*************************************************************************) section \<open> Satisfiability PO @{term play_move} \<close> (*------------------------------------------------------------------------*) subsection \<open> Simpler variant of @{term play_move} \<close> text \<open>A simpler (earlier) version of @{term play_move} was defined in VDM as: % \begin{vdmsl}[breaklines=true] play: Move * Moves -> Moves play(m, s) == s ^ [m] pre m <= moves_left(s) and moves_left(s) > 0 post sum_elems(s) < sum_elems(RESULT) and sum_elems(s) + m = sum_elems(RESULT) \end{vdmsl} It is useful here as it is simpler than the current version, which we will prove below. Also, we define the version of @{term inv_Moves} that doesn't take the specification (pre/post) of @{term sum_elems} below. \<close> definition inv_MovesNim0 :: "Moves \<Rightarrow> \<bool>" where "inv_MovesNim0 s \<equiv> inv_SeqElems inv_Move s \<and> (sum_elems s) \<le> MAX_PILE \<and> ((sum_elems s) = MAX_PILE \<longrightarrow> s $ (len s) = 1)" definition pre_play_moveNim0 :: "Move \<Rightarrow> Moves \<Rightarrow> \<bool>" where "pre_play_moveNim0 m s \<equiv> inv_Move m \<and> inv_MovesNim0 s \<and> m \<le> (moves_left s) \<and> (moves_left s) > 0" definition post_play_moveNim0 :: "Move \<Rightarrow> Moves \<Rightarrow> Moves \<Rightarrow> \<bool>" where "post_play_moveNim0 m s RESULT \<equiv> inv_Move m \<and> inv_MovesNim0 s \<and> inv_MovesNim0 RESULT \<and> sum_elems s < sum_elems RESULT \<and> sum_elems s + m = sum_elems RESULT" definition PO_play_moveNim0_sat_obl0 :: "\<bool>" where "PO_play_moveNim0_sat_obl0 \<equiv> \<forall> m s . inv_Move m \<longrightarrow> inv_MovesNim0 s \<longrightarrow> pre_play_moveNim0 m s \<longrightarrow> post_play_moveNim0 m s (s @ [m])" theorem PO_play_moveNim0_sat_obl0 using[[show_types=false]] unfolding PO_play_moveNim0_sat_obl0_def apply simp unfolding pre_play_moveNim0_def post_play_moveNim0_def apply simp unfolding inv_Moves_def inv_MovesNim0_def apply simp apply safe txt \<open>too many similar goals. expanding won't work.\<close> oops (*------------------------------------------------------------------------*) subsection \<open> PO for the current version of @{term play_move} \<close> definition PO_play_move_sat_obl :: "\<bool>" where "PO_play_move_sat_obl \<equiv> \<forall> p m s . inv_Move m \<longrightarrow> inv_Moves s \<longrightarrow> pre_play_move p m s \<longrightarrow> (\<exists> r . post_play_move p m s r)" theorem PO_play_move_sat_obl using[[show_types=false]] unfolding PO_play_move_sat_obl_def apply simp unfolding pre_play_move_def post_play_move_def apply simp apply (intro allI impI conjI,elim conjE) apply (rule_tac x="s @ [m]" in exI) apply (simp) apply safe txt \<open> @{subgoals[display,indent=0]} These goals will require various lemmas. \<close> oops lemma "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> inv_Moves (s @ [m])" unfolding inv_Moves_def apply safe txt \<open> important one that will be difficult to finish \<close> oops definition PO_play_move_sat_exp_obl :: "\<bool>" where "PO_play_move_sat_exp_obl \<equiv> \<forall> p m s . pre_play_move p m s \<longrightarrow> post_play_move p m s (play_move p m s)" (*-----------------------------------------------------------------------*) subsection \<open> Naive attempt with split lemmas \<close> declare [[show_types=false]] theorem PO_play_move_sat_exp_obl unfolding PO_play_move_sat_exp_obl_def apply safe unfolding post_play_move_def apply safe unfolding post_sum_elems_def thm pre_play_move_def unfolding pre_play_move_def pre_sum_elems_def apply simp_all txt \<open> too many subgoals if you apply safe\<close> apply safe oops text \<open>Lemmas based on the goals before applying safe above.\<close> lemma l1: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> inv_Moves (play_move p m s)" unfolding play_move_def pre_play_move0_def apply (safe, simp_all) txt \<open> sledgehammer failed \<close> oops lemma l1: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> inv_Moves (play_move p m s)" unfolding play_move_def pre_play_move0_def apply (safe, simp_all) unfolding moves_left_def inv_Moves_def Let_def apply (simp, safe, simp_all) unfolding post_sum_elems_def pre_sum_elems_def txt \<open> naive strategy doesn't work. You can use sorry to discover the splitting lemmas to be proved next: that is, will they help the larger proof? \<close> apply (simp, safe, simp_all) sorry lemma l2: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> pre_sum_elems s" using inv_Moves_def by blast lemma l3: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> pre_sum_elems (play_move p m s)" using inv_Moves_def l1 apply blast done text \<open> The fact this proof is the same as l2, might mean they are the same goal? \<close> lemma l4: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> inv_SeqElems inv_Move s" using inv_Moves_def by blast lemma l2_same_l4: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> pre_sum_elems s" unfolding pre_sum_elems_def by (simp add: l4) text \<open>Study @{term "l_sum_elems_nat x"}: the meson proof \<close> lemma l5: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> inv_VDMNat (sum_elems s)" (* "cvc4": Try this: by (simp add: inv_VDMNat_def l4 l_sum_elems_nat) (3 ms) "vampire": Try this: by (meson inv_Moves_def post_sum_elems_def) (12 ms) *) by (meson inv_Moves_def post_sum_elems_def) text \<open> @{term l5} seems the same as @{term l6} \<close> lemma l6: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> s \<noteq> [] \<Longrightarrow> 0 < sum_elems s" by (meson inv_Moves_def post_sum_elems_def) text \<open> @{term l7} seems similar / more general to @{term l3} \<close> lemma l7: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> inv_SeqElems inv_Move (play_move p m s)" (* "cvc4": Try this: using inv_Moves_def l1 apply blast (4 ms) "vampire": Try this: apply (simp add: inv_Moves_def play_move_def) (4 ms) *) by (simp add: l4 l_inv_SeqElems_append play_move_def) lemma l8: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> inv_VDMNat (sum_elems (play_move p m s))" (*"cvc4": Try this: using inv_Moves_def inv_VDMNat_def l1 l_sum_elems_nat apply blast (8 ms) "vampire": Try this: apply (meson inv_Moves_def l1 post_sum_elems_def) (6 ms) *) by (meson inv_Moves_def l1 post_sum_elems_def) text \<open> @{term l9} seems similar to @{term l5} \<close> lemma l9: " inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> play_move p m s \<noteq> [] \<Longrightarrow> 0 < sum_elems (play_move p m s)" (* "cvc4": Try this: by (simp add: l7 l_sum_elems_nat1) (3 ms) "vampire": Try this: using l3 l_pre_sum_elems_sat by blast (1 ms)*) using l3 l_pre_sum_elems_sat apply blast done lemma l10: " inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> sum_elems s < sum_elems (play_move p m s)" unfolding pre_play_move0_def play_move_def (*sum_elems_def *) apply (simp) apply (safe,simp_all) apply (induct s) apply simp_all sorry lemma l11: " inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move0 p m s \<Longrightarrow> sum_elems s + m = sum_elems (play_move p m s)" unfolding pre_play_move0_def play_move_def (*sum_elems_def *) apply (simp) apply (safe,simp_all) sorry lemma l12: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> fair_play p s \<Longrightarrow> \<not> fair_play p (play_move p m s)" unfolding fair_play_def who_plays_next_def play_move_def apply (simp split: if_splits) apply auto[1] by (simp add: mod_add_cong) text \<open> 12 lemmas, 5 lemmas (42%~) proofs found through sledgehammer, 2 lemmas (l1, l10) seems similar, l11 is close to l10, other unproved lemmas very much depend on those. Lemmas with ``sorry'' are dangerous: if you don't prove them, you haven't finished. I left the above lemmas in place to enable you to see how they play in the proof below (i.e. change oops for sorry to see). \<close> theorem PO_play_move_sat_exp_obl unfolding PO_play_move_sat_exp_obl_def unfolding post_play_move_def unfolding post_sum_elems_def apply (safe, simp_all)+ txt \<open> All goals below are discovered with sledgehammer.\<close> using l1 l_inv_Move_nat1 pre_play_move0_def pre_play_move_def apply fastforce apply (simp add: pre_moves_left_def pre_play_move_def) apply (simp add: inv_Moves_def l_inv_SeqElems_append play_move_def pre_sum_elems_def) apply (simp add: inv_Moves_def pre_play_move_def) using inv_VDMNat_def l_sum_elems_nat pre_sum_elems_def apply blast apply (simp add: l_pre_sum_elems_sat) apply (simp add: inv_VDMNat_def l_sum_elems_nat pre_sum_elems_def) using l_pre_sum_elems_sat apply blast using l10 l_inv_Move_nat1 pre_play_move0_def pre_play_move_def apply fastforce using l11 l_inv_Move_nat1 pre_play_move0_def pre_play_move_def apply fastforce using l12 pre_play_move_def apply blast apply (simp add: play_move_def) oops text \<open>Only l1 l10 l11 l12 are needed, but first three have ``sorried'' proofs. We need to finish their proofs for this proof to be valid:~at least we established that they are the lemmas that will help with this proof. I leave their proof as an exercise --- some of it will be redone/reorganised below anyhow.\<close> (*-----------------------------------------------------------------------*) subsection \<open> Lemmas about auxiliary function @{term sum_elems} \<close> fun nconcat :: "\<int> list \<Rightarrow> \<int> list \<Rightarrow> \<int> list" where "nconcat [] ys = ys" | "nconcat (x#xs) ys = x # (nconcat xs ys)" lemma l_concat_append : "nconcat xs ys = xs @ ys" apply (induct ys, simp_all) oops lemma l_concat_append : "nconcat xs ys = xs @ ys" by (induct xs, simp_all) text \<open>Definitions using sequence cons (@{term "x # xs"}) will need lemmas about sequence append (@{term "s @ t"}).\<close> lemma l_sum_elems_nconcat: "sum_elems (nconcat ms [m]) = (m + sum_elems ms)" (*--"using [[rule_trace,simp_trace]]"*) apply (induct ms, simp_all) done text \<open> Some interesting lemmas about @{term sum_elems} @{thm[display,indent=0] l_sum_elems_nat l_sum_elems_nat1 l_pre_sum_elems } \<close> (*<*)find_theorems "sum_elems _" name:sum(*>*) (*-----------------------------------------------------------------------*) subsection \<open> Lemma discovery through failed proof attempts \<close> text \<open> The proof attempt above for succeeded but there were lemmas missing their proofs. Let's try again, this time with the current version of @{term play_move} and without any ``sorry'' theorems. \begin{enumerate} \item[1] Naive attempt:~layered expansion followed by simplification. \end{enumerate} \<close> theorem PO_play_move_sat_exp_obl unfolding PO_play_move_sat_exp_obl_def post_play_move_def play_move_def apply (safe) txt \<open> @{subgoals[display,indent=0]} The subgoals come directly from the @{term post_play_move} for the given witness: @{thm[display] post_play_move_def[of p m ms "ms @ [m]"] } After simplifying the already parts of the input invariants, we get \<close> apply (simp_all) txt \<open> @{subgoals[display,indent=0]} We will create a lemma for each expression that is not already part of the precondition. Moreover, it is interesting that @{term fair_play} does not appear in the post condition:~it ought to. I will tackle the expressions from simplest to most complex. This is a useful tactic as simpler goals will be easier to prove. What each say: \begin{enumerate} \item @{term inv_Moves} is preserved on @{term "s @ [m]"} \item @{term "pre_sum_elems s"} is trivial from @{term pre_play_move} \item \item @{term "post_sum_elems s"} is trivial from @{term pre_play_move} \item \end{enumerate} \<close> oops (*<*)thm pre_moves_left_def[of ms] pre_play_move_def[of p m ms] post_play_move_def[of p m ms "ms @ [m]"] (* pre_play_move p m ms \<equiv> inv_Player p \<and> inv_Move m \<and> inv_Moves ms \<and> pre_moves_left ms \<and> pre_fair_play p ms \<and> post_fair_play p ms (fair_play p ms) \<and> (moves_left ms \<noteq> 1 \<longrightarrow> m < moves_left ms) \<and> (moves_left ms = m \<longrightarrow> m = 1) \<and> fair_play p ms post_play_move p m ms (ms @ [m]) \<equiv> pre_play_move p m ms \<longrightarrow> inv_Moves (ms @ [m]) \<and> pre_sum_elems ms \<and> pre_sum_elems (ms @ [m]) \<and> post_sum_elems ms (sum_elems ms) \<and> post_sum_elems (ms @ [m]) (sum_elems (ms @ [m])) \<and> sum_elems ms < sum_elems (ms @ [m]) \<and> sum_elems ms + m = sum_elems (ms @ [m]) \<and> \<not> fair_play p (ms @ [m]) \<and> ms \<sqsubseteq> ms @ [m] proof (prove) goal (8 subgoals): 1. \<And>p m s. inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> inv_Moves (s @ [m]) 2. \<And>p m s. inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> pre_sum_elems s 3. \<And>p m s. inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> pre_sum_elems (s @ [m]) 4. \<And>p m s. inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> post_sum_elems s (sum_elems s) 5. \<And>p m s. inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> post_sum_elems (s @ [m]) (sum_elems (s @ [m])) 6. \<And>p m s. inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> sum_elems s < sum_elems (s @ [m]) 7. \<And>p m s. inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> sum_elems s + m = sum_elems (s @ [m]) 8. \<And>p m s. inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> fair_play p (s @ [m]) \<Longrightarrow> False *) (*>*) (*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*) subsubsection \<open> Lemmas per subgoal\<close> text \<open> For each subgoal above, let's try and create lemmas (and their generalisations). The first subgoal is difficult:~it relies on @{term inv_Moves}, which contains various predicates, so we start the next goal. The precondition knows about @{term pre_moves_left}, which knows about @{term pre_sum_elems}. The next lemma weakens the goal:~if you get a @{term pre_sum_elems} to handle, you can exchange it with a @{term pre_moves_left}. This fits with the necessary proof to do, but is not quite a general lemma. \<close> lemma l_moves_left_pre_sume: "pre_moves_left ms \<Longrightarrow> pre_sum_elems ms" by (simp add: pre_moves_left_def) (*SH, subgoal 2~*) lemma l_pre_sume_seqelems_move: "inv_SeqElems inv_Move ms \<Longrightarrow> pre_sum_elems ms" by (simp add: pre_sum_elems_def) (*SH, subgoal 2~*) text \<open> The next lemma helps Isabelle infer (forwardly) that, if @{term "inv_Moves ms"} holds, then so would the smaller claim that all elements within the sequence respect @{term inv_Move}. As you will see in proofs below, this lemma is useful in bridging the gap between what is needed for the lemma proof, and what is available in the goal where the lemma is to be used (i.e. the simpler the lemma conditions the better/most applicable the lemma will be). \<close> lemma l_inv_Moves_inv_SeqElems: "inv_Moves ms \<Longrightarrow> inv_SeqElems inv_Move ms" using inv_Moves_def by blast (*SH, useful for subgoal 2*) lemma l_sg2_pre_sume: "inv_Moves ms \<Longrightarrow> pre_sum_elems ms"(* "z3": Try this: using inv_Moves_def by blast (0.5 ms). "spass": Try this: using inv_Moves_def by blast (1 ms). "cvc4": Try this: by (simp add: l_inv_Moves_inv_SeqElems l_pre_sume_seqelems_move) (0.6 ms). "remote_vampire": Try this: by (simp add: l_inv_Moves_inv_SeqElems l_pre_sume_seqelems_move) (1 ms). *) using inv_Moves_def by blast (*SH, subgoal 2*) text \<open> These synonyms for lemmas/definition groups is useful not only to avoid long unfolding chains but also to help sledgehammer know bout related concepts. \<close> lemma l_sg3_pre_sume_append: "inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> pre_sum_elems (ms @ [m])" oops text \<open>Groups of definitions can be named to make their unfolding in one go.\<close> lemmas inv_Move_defs = inv_Move_def inv_VDMNat1_def max_def lemmas inv_Moves_defs = inv_Moves_def inv_SeqElems_def pre_sum_elems_def post_sum_elems_def lemma l_sg3_pre_sume_append: "inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> pre_sum_elems (ms @ [m])" unfolding inv_Moves_defs play_move_def Let_def by simp (*SH, subgoal 3*) lemma l_sg4_post_sume: "inv_SeqElems inv_Move ms \<Longrightarrow> post_sum_elems ms (sum_elems ms)" unfolding post_sum_elems_def by (simp add: inv_VDMNat_def l_pre_sum_elems l_sum_elems_nat) (*SH, subgoal 4~*) lemma l_sg5_post_sume_append: "inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> post_sum_elems (ms @ [m]) (sum_elems (ms @ [m]))" unfolding post_sum_elems_def (* "z3": Try this: by (metis l_inv_Moves_inv_SeqElems l_inv_SeqElems_append l_sg4_post_sume_append post_sum_elems_def) (128 ms). "cvc4": Try this: by (metis l_inv_Moves_inv_SeqElems l_inv_SeqElems_append l_sg4_post_sume_append post_sum_elems_def) (66 ms). "remote_vampire": Try this: using l_sg3_pre_sume_append l_sg4_post_sume_append post_sum_elems_def pre_sum_elems_def by auto (20 ms). "spass": Try this: using l_sg3_pre_sume_append l_sg4_post_sume_append post_sum_elems_def pre_sum_elems_def by auto (25 ms). *) by (metis l_inv_Moves_inv_SeqElems l_inv_SeqElems_append l_sg4_post_sume post_sum_elems_def) (*SH, subgoal5*) text \<open> This is a variation over @{thm l12}. \<close> lemma l_sg6_2_fair_play: "fair_play p s \<Longrightarrow> \<not> fair_play p (s @ [m])" unfolding fair_play_def who_plays_next_def apply (safe,simp split: if_splits) unfolding len_def by presburger+ lemma l_sg6_not_fair_play_play_move: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> \<not> fair_play p (s @ [m])" unfolding pre_play_move_def by (simp add: l_sg6_2_fair_play) (*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*) subsubsection \<open> General lemmas are easier \<close> text \<open> The actual VDM (declared) postcondition represents some of the subgoals above. Those are discharged by the most general of lemmas here. It is a nice property of @{term sum_elems}:~it distributes over concatenation and is exchanged for summation, on singleton lists as well as in general. It is often better to give general lemmas as they are more applicable, and surprisingly, easier to prove. \<close> lemma l_sum_elems_append: "sum_elems (ms @ [m]) = (m + sum_elems ms)" by (induct ms, simp_all) lemma l_sum_elems_append_gen: "sum_elems (s @ t) = (sum_elems s + sum_elems t)" by (induct s, simp_all) (*<*)thm list.induct list.induct[of "(\<lambda> s . sum_elems(s @ [m]) = sum_elems s + m)" "ms"] list.induct[of "(\<lambda> s . sum_elems(s @ t) = sum_elems s + sum_elems t)" "ms"] (*>*) text \<open> Similarly, this exercise suggested the introduction of various other lemmas for definitions in \texttt{VDMToolkit.thy}, such as: @{thm[display] l_len_nat1 l_len_append l_len_cons} @{thm[display] l_elems_append l_elems_cons} @{thm[display] l_inv_SeqElems_append l_inv_SeqElems_Cons} @{thm[display] l_applyVDMSeq_defined l_applyVDMSeq_append_last l_applyVDMSeq_cons_last} @{thm[display] l_inds_append l_len_within_inds} \<close> (*<*)find_theorems name:VDMSeq(*>*) (*-----------------------------------------------------------------------*) subsection \<open> ``Sledgehammerable proofs'' \<close> text \<open> \begin{enumerate} \item[2] Lemma-based attempt with \textsf{sledgehammer} support. \end{enumerate} Let us see if our lemmas are working:~will \textsf{sledgehammer} find the proofs? \<close> theorem PO_play_move_sat_exp_obl (*<*) unfolding PO_play_move_sat_exp_obl_def post_play_move_def play_move_def apply (safe,simp_all) (*>*) txt \<open> $\cdots$ @{subgoals[display,indent=0]} \<close> txt \<open> Goal about @{term "inv_Moves (s @ [m])"} is missing above; postpone it for now.\<close> defer apply (simp add: l_sg2_pre_sume pre_play_move_def) apply (simp add: l_sg3_pre_sume_append pre_play_move_def) apply (meson inv_Moves_def pre_play_move_def) apply (simp add: l_sg5_post_sume_append pre_play_move_def) apply (simp add: l_inv_Move_nat1 l_sum_elems_append pre_play_move_def) apply (simp add: l_sum_elems_append) apply (simp add: l_sg6_2_fair_play pre_play_move_def) txt \<open> @{subgoals[display,indent=0]} Yes! So, for the difficult case: it generates more subgoals :-(. Will avoid safe here, but otherwise would have to deal with the many it generates. \<close> (*unfolding inv_Moves_def Let_def apply (simp, safe)*) apply (simp (no_asm) add: inv_Moves_def Let_def, intro conjI impI) txt \<open> @{subgoals[display,indent=0]} As before, let us tackle each one of the sub parts in the definition @{thm[display] inv_Moves_def} \<close> (*<*)thm inv_Moves_def[of "ms @ [m]"](*>*) (* goal (5 subgoals): 1. \<And>p m s. pre_play_move p m s \<Longrightarrow> inv_SeqElems inv_Move (s @ [m]) 2. \<And>p m s. pre_play_move p m s \<Longrightarrow> pre_sum_elems (s @ [m]) 3. \<And>p m s. pre_play_move p m s \<Longrightarrow> post_sum_elems (s @ [m]) (sum_elems (s @ [m])) 4. \<And>p m s. pre_play_move p m s \<Longrightarrow> sum_elems (s @ [m]) \<le> MAX_PILE 5. \<And>p m s. pre_play_move p m s \<Longrightarrow> sum_elems (s @ [m]) = MAX_PILE \<Longrightarrow> (s @ [m]) ! nat (len s) = 1 *) oops lemma "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> inv_Moves (s @ [m])" unfolding inv_Moves_defs Let_def apply (simp,safe) using inv_VDMNat_def l_inv_Move_nat1 l_sum_elems_append apply force using l_inv_Move_nat1 l_sum_elems_append apply fastforce defer defer apply (simp add: inv_VDMNat_def l_inv_Move_nat1 le_less) apply (simp add: l_inv_Move_nat1) apply (simp add: inv_Move_def) apply (simp add: inv_Move_def) using inv_VDMNat_def l_inv_Move_nat1 l_sum_elems_append apply force using l_inv_Move_nat1 l_sum_elems_append apply fastforce defer defer apply (simp add: inv_VDMNat_def l_inv_Move_nat1 le_less) apply (simp add: l_inv_Move_nat1) apply (simp add: inv_Move_def) apply (simp add: inv_Move_def) oops text \<open> Alternative variant proof slightly simpler to the same outcome of goal on @{term inv_Moves} missing.\<close> theorem PO_play_move_sat_exp_obl unfolding PO_play_move_sat_exp_obl_def post_play_move_def play_move_def apply (simp,safe) (*using l1 play_move_def apply auto[1]*) defer apply (simp add: l_sg2_pre_sume pre_play_move_def) apply (simp add: l_sg3_pre_sume_append pre_play_move_def) apply (meson inv_Moves_def pre_play_move_def) apply (simp add: l_sg5_post_sume_append pre_play_move_def) apply (simp add: l_inv_Move_nat1 l_sum_elems_append pre_play_move_def) apply (simp add: l_sum_elems_append) apply (simp add: l_sg6_2_fair_play pre_play_move_def) oops (*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*) subsubsection \<open> Handling (last?) difficult case on @{term "inv_Moves (s @ [m])"} \<close> text \<open>For the final case, we start with the naive attempt from remaining goal as \<close> lemma l_sg1_inv_Moves_append: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> inv_Moves (s @ [m])" unfolding inv_Moves_def Let_def apply (simp,safe) txt \<open>This generates (10) new subgoals (some somewhat repeated), which some Sledgehammer already finds proofs for\<close> apply (simp add: l_inv_SeqElems_append) apply (simp add: l_inv_SeqElems_append pre_sum_elems_def) apply (metis l_inv_SeqElems_append l_sg4_post_sume l_sum_elems_append) defer using l_applyVDMSeq_append_last l_sum_elems_append moves_left_def pre_play_move_def apply force apply (simp add: l_inv_SeqElems_append) apply (simp add: l_inv_SeqElems_append pre_sum_elems_def) apply (simp add: inv_Moves_def l_sg5_post_sume_append) defer using l_applyVDMSeq_append_last l_sum_elems_append moves_left_def pre_play_move_def apply force txt \<open> Remaining (2) subgoals are about @{term "sum_elems (s @ [m])"} being within @{term "MAX_PILE"} @{subgoals[display,indent=0]} These cases have to do with normal and final play. \<close> (* proof (prove) goal (2 subgoals): 1. inv_Move m \<Longrightarrow> pre_play_move p m s \<Longrightarrow> inv_SeqElems inv_Move s \<Longrightarrow> pre_sum_elems s \<Longrightarrow> post_sum_elems s (sum_elems s) \<Longrightarrow> sum_elems s \<le> MAX_PILE \<Longrightarrow> sum_elems s \<noteq> MAX_PILE \<Longrightarrow> sum_elems (s @ [m]) \<le> MAX_PILE 2. inv_Move m \<Longrightarrow> pre_play_move p m s \<Longrightarrow> inv_SeqElems inv_Move s \<Longrightarrow> pre_sum_elems s \<Longrightarrow> post_sum_elems s (sum_elems s) \<Longrightarrow> sum_elems s \<le> MAX_PILE \<Longrightarrow> s $ len s = 1 \<Longrightarrow> sum_elems (s @ [m]) \<le> MAX_PILE *) oops lemma l_sg1_inv_Moves_append: "pre_play_move p m s \<Longrightarrow> inv_Moves (s @ [m])" unfolding inv_Moves_def Let_def apply (simp,safe) apply (simp add: l_inv_Moves_inv_SeqElems l_inv_SeqElems_append pre_play_move_def) apply (simp add: l_sg3_pre_sume_append pre_play_move_def) using l_sg5_post_sume_append pre_play_move_def apply blast defer apply (simp add: VDMSeq_defs(5) l_sum_elems_append moves_left_def pre_play_move_def) txt \<open> Last goal of interest, but when you open @{term pre_play_move}\<close> unfolding pre_play_move_def apply (safe,simp_all) oops text \<open> The remaining goals show how we are getting close. They also reveal that conditionals (through if-then-else or implication lead to case analysis (i.e. more subgoals). The choice among the sledgehammer-discovered proofs was on the minimal number of lemmas to use. This is useful to remove any cluttering lemmas at the end, something important to help sledgehammer in finding proofs later on. \<close> lemma l_sg1_4_inv_Moves_maxpile_sume_append: "inv_Move m \<Longrightarrow> inv_SeqElems inv_Move s \<Longrightarrow> sum_elems (ms @ [m]) \<le> MAX_PILE" (*nitpick quickcheck = none*) apply (simp add: l_sum_elems_append) apply (induct ms) apply (simp add: inv_Move_def) apply simp nitpick txt \<open> @{subgoals[display, indent=0]} We are stuck. Sledgehammer finds nothing, and nitpick finds a potential (but not certain) counterexample. Seems like we need more assumptions. Let us try the last subgoal. \<close> oops text \<open> We are really narrowing it down. Let us set them up with extra assumptions from @{term pre_play_move}. \<close> lemma l_sg1_4_inv_Moves_moves_left_sume_append: "pre_play_move p m ms \<Longrightarrow> sum_elems (ms @ [m]) \<le> MAX_PILE" unfolding pre_play_move_def apply (elim conjE impE) txt \<open> @{subgoals[display,indent=0]} \<close> defer apply (simp add: l_sum_elems_append moves_left_def) (*SH, subgoal 1.4.2*) defer defer apply (simp add: l_sum_elems_append moves_left_def) txt \<open> Still subgoals we can't easily discover. Try again each as a sub lemma (for third iteration time!) @{subgoals[display, indent=0]} \<close> (* proof (prove) goal (3 subgoals): 1. inv_Player p \<Longrightarrow> inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> pre_moves_left ms \<Longrightarrow> pre_fair_play p ms \<Longrightarrow> post_fair_play p ms (fair_play p ms) \<Longrightarrow> fair_play p ms \<Longrightarrow> moves_left ms = m 2. inv_Player p \<Longrightarrow> inv_Move 1 \<Longrightarrow> inv_Moves ms \<Longrightarrow> pre_moves_left ms \<Longrightarrow> pre_fair_play p ms \<Longrightarrow> post_fair_play p ms True \<Longrightarrow> fair_play p ms \<Longrightarrow> m = 1 \<Longrightarrow> sum_elems ms \<noteq> 19 3. inv_Player p \<Longrightarrow> inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> pre_moves_left ms \<Longrightarrow> pre_fair_play p ms \<Longrightarrow> post_fair_play p ms (fair_play p ms) \<Longrightarrow> fair_play p ms \<Longrightarrow> m < moves_left ms \<Longrightarrow> moves_left ms = m *) oops text \<open>Example lemmas that turn out to be superfluous are commented out in the code. \<close> (* lemma l_sg1_1_inv_Moves_seqelems_append: "inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> inv_SeqElems inv_Move (ms @ [m])" by (simp add: l_inv_Moves_inv_SeqElems l_inv_SeqElems_append) lemma l_sg1_2_inv_Moves_pre_sume_append: "inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> pre_sum_elems (ms @ [m])" by (simp add: l_sg3_pre_sume_append) (*SH, subgoal 1.2*) lemma l_sg1_3_inv_Moves_post_sume_append: "inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> post_sum_elems (ms @ [m]) (sum_elems (ms @ [m]))" by (simp add: l_sg1_1_inv_Moves_seqelems_append l_sg5_post_sume_append) (*SH, subgoal 1.3*) lemma l_sg1_5_inv_Moves_last_move_append0: "pre_play_move0 p m s \<Longrightarrow> (sum_elems (s @ [m])) = MAX_PILE \<longrightarrow> (s @ [m]) $ (len (s @ [m])) = 1" using l_applyVDMSeq_append_last l_sum_elems_append moves_left_def pre_play_move0_def by force (*SH, subgoal 1.5*) lemma l_sg1_4_2_inv_Moves_moves_left_sume_append: "inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> m < moves_left ms \<Longrightarrow> sum_elems (ms @ [m]) \<le> MAX_PILE" by (simp add: l_sum_elems_append moves_left_def) (*SH, subgoal 1.4.2*) lemma l_sg1_4_1_inv_Moves_moves_left_sume_append: "inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> 0 < moves_left ms \<Longrightarrow> moves_left ms \<noteq> 1" (*nitpick quickcheck = none*) unfolding moves_left_def inv_Move_def inv_VDMNat1_def apply (elim conjE, intro notI) oops *) (*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*) subsubsection \<open> Generalisation of terms \<close> text \<open> Understanding more general principle independent of given terms is an important step in proof. We have to prove that @{term "sum_elems (s @ [m]) \<le> MAX_PILE"} when @{term "sum_elems s \<le> MAX_PILE"} for two cases: 1) normal play (@{term "sum_elems s \<noteq> MAX_PILE"}) and 2) final play (@{term "s $ len s = 1"}).\<close> text \<open> Below we try to generalise it away\<close> lemma "0 < m \<Longrightarrow> m \<le> G_MAX_MOV \<Longrightarrow> inv_Moves ms \<Longrightarrow> 0 < G_MAX_PILE - sum_elems ms \<Longrightarrow> G_MAX_PILE - sum_elems ms \<noteq> 1" apply (rule notI,simp) oops lemma "x \<ge> (0::nat) \<Longrightarrow> x \<le> nat MAX_MOV \<Longrightarrow> list_all (\<lambda> e . e \<ge> (0::nat)) xs \<Longrightarrow> listsum xs \<ge> 0 \<Longrightarrow> listsum xs \<le> nat MAX_PILE \<Longrightarrow> x + listsum xs \<le> nat MAX_PILE" apply (induct x rule:nat_induct) apply simp_all apply (induct xs rule:list.induct) apply simp_all defer apply (subgoal_tac "(\<And>n. n + listsum x2 \<le> 20 \<Longrightarrow> n \<le> 2 )", simp) apply (subgoal_tac "(\<forall> m . m + listsum x2 \<le> 19)") apply (erule_tac x="n+x1" in allE, simp) apply (subgoal_tac "(\<forall> n . n + listsum x2 \<le> 20)") apply simp_all apply (subgoal_tac " n + listsum x2 \<le> 19") apply simp_all apply auto oops lemma "x \<ge> (0::nat) \<Longrightarrow> x \<le> nat MAX_MOV \<Longrightarrow> list_all (\<lambda> e . e \<ge> (0::nat)) xs \<Longrightarrow> listsum xs \<ge> 0 \<Longrightarrow> listsum xs \<le> nat MAX_PILE \<Longrightarrow> x + listsum xs \<le> nat MAX_PILE" apply (induct x rule:nat_induct) apply simp_all apply (simp only: le_less) apply (erule disjE) apply (rule disjI1) nitpick[user_axioms] oops lemma "x \<le> nat MAX_MOV \<Longrightarrow> y \<le> nat MAX_PILE \<Longrightarrow> x + y \<le> nat MAX_PILE" nitpick[user_axioms] apply (induct x rule:nat_induct) apply simp_all apply (simp only: le_less) apply (erule disjE) apply (rule disjI1) nitpick[user_axioms] oops text \<open> To try and understand what is the problem, we generalise the expressions to simpler terms. It is useful to discover what we know about the operators involved (e.g. @{term "sum_elems (s @ t)"}). \<close> find_theorems "sum_elems _" name:Nim find_theorems "sum_elems (_ @ _)" (* inv_Move m \<Longrightarrow> sum_elems s \<le> MAX_PILE \<Longrightarrow> sum_elems s \<noteq> MAX_PILE \<Longrightarrow> sum_elems (s @ [m]) \<le> MAX_PILE" = [via inv_Move_def and l_sum_elems_append_gen] m \<le> MAX_MOV \<Longrightarrow> sum_elems ms \<le> MAX_PILE \<Longrightarrow> sum_elems s \<noteq> MAX_PILE \<Longrightarrow> m + sum_elems ms \<le> MAX_PILE = [generalise terms] x \<le> MAX_MOV \<Longrightarrow> y \<le> MAX_PILE \<Longrightarrow> y \<noteq> MAX_PILE \<Longrightarrow> x + y \<le> MAX_PILE *) text \<open> And get to the following unprovable conjecture, which gives the hint to the missing condition of interest, and its improved (provable) version. \<close> lemma l_sg1_4_1_explore: "x \<le> MAX_MOV \<Longrightarrow> y \<le> MAX_PILE \<Longrightarrow> y \<noteq> MAX_PILE \<Longrightarrow> x + y \<le> MAX_PILE" nitpick oops text \<open> This shows the missing relationship that @{term x} (or @{term m}) has to have with @{term y} (or @{term "sum_elems s"}) in order to prove the goal. This needs to come as an assumption from somewhere.\<close> lemma l_sg1_4_1_inv_Moves_maxpile_moves_left_gen: "x \<le> MAX_MOV \<Longrightarrow> y \<le> MAX_PILE \<Longrightarrow> x + y < MAX_PILE \<Longrightarrow> x + y \<le> MAX_PILE" by auto (*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*) subsubsection \<open> Proving the missing cases for @{term "inv_Moves (s @ [m])"} subgoal \<close> text \<open>More lemmas that turn out to be superfluous are commented out in the code. \<close> (* lemma l_sg1_5_inv_Moves_last_move_append: "pre_play_move p m s \<Longrightarrow> (sum_elems (s @ [m])) = MAX_PILE \<longrightarrow> (s @ [m]) $ (len (s @ [m])) = 1" using l_applyVDMSeq_append_last l_sum_elems_append moves_left_def pre_play_move_def by force (*SH, subgoal 1.5*) text \<open> Given the change to @{term pre_play_move}, we also add a lemma that a previously state postcondition is now a direct consequence of the current post condition. \<close> lemma l_pre_play_moves_left_nat1: "pre_play_move p m s \<Longrightarrow> moves_left s > 0" using pre_play_move_def l_inv_Move_nat1 by fastforce (*SH*) lemma l_sg1_4_inv_Moves_moves_left_sume_append: "pre_play_move p m ms \<Longrightarrow> sum_elems (ms @ [m]) \<le> MAX_PILE" unfolding pre_play_move_def apply (simp only: le_less) apply (simp (no_asm) only: le_less[symmetric]) apply (elim conjE disjE) txt \<open> @{subgoals[display,indent=0]} \<close> apply (simp add: l_sg1_4_2_inv_Moves_moves_left_sume_append) (*SH, sg 1.4.2*) by (metis (full_types) inv_Moves_def l1 l_sg1_4_2_inv_Moves_moves_left_sume_append play_move_def pre_play_move0_def zero_less_one) (*SH, sg 1.4.1*) *) text \<open> The proof commented below shows the dangers of sorried lemmas: it used l1, which wasn't prooved. \<close> (* lemma l_sg1_inv_Moves_append: "pre_play_move p m s \<Longrightarrow> inv_Moves (s @ [m])" unfolding inv_Moves_def pre_play_move_def Let_def apply (elim conjE, intro conjI impI, simp_all) apply (simp add: l_inv_SeqElems_append) apply (simp add: l_sg1_1_inv_Moves_seqelems_append pre_moves_left_def) (*SH, 1.1*) apply (simp add: l_sg1_2_inv_Moves_pre_sume_append pre_moves_left_def) (*SH, 1.2*) apply (simp add: l_sg1_3_inv_Moves_post_sume_append pre_moves_left_def)(*SH, 1.3*) apply (metis (full_types) l_sg1_4_inv_Moves_moves_left_sume_append pre_fair_play_def pre_play_move_def) (*SH, 1.4*) using l_applyVDMSeq_append_last l_sum_elems_append moves_left_def by force (*SH, 1.5*) *) lemma l_sg1_1_missing_assumption: "pre_play_move p m s \<Longrightarrow> sum_elems s \<noteq> MAX_PILE \<Longrightarrow> m + (sum_elems s) < MAX_PILE" unfolding pre_play_move_def apply (safe,simp_all) txt \<open> Oh man, it generates yet four more subgoals. Let's try avoiding safe @{subgoals[display,indent=0]}\<close> oops lemma l_sg1_1_missing_assumption: "pre_play_move p m s \<Longrightarrow> sum_elems s \<noteq> MAX_PILE \<Longrightarrow> m + (sum_elems s) < MAX_PILE" unfolding pre_play_move_def pre_moves_left_def moves_left_def apply (simp) unfolding pre_fair_play_def post_fair_play_def pre_who_plays_next_def post_who_plays_next_def apply simp unfolding pre_sum_elems_def inv_Moves_def post_sum_elems_def apply simp unfolding isFirst_def inv_Player_def inv_Move_def inv_SeqElems_def inv_VDMNat1_def Let_def inv_VDMNat_def apply (elim conjE, simp) apply safe txt \<open> Oh man, it generates yet four more subgoals. Let's try avoiding safe \<close> oops lemma l_sg1_1_missing_assumption: "pre_play_move p m s \<Longrightarrow> sum_elems s \<noteq> MAX_PILE \<Longrightarrow> m + (sum_elems s) < MAX_PILE" unfolding pre_play_move_def moves_left_def pre_moves_left_def apply (simp) unfolding inv_Moves_def Let_def apply simp apply (case_tac "sum_elems s \<noteq> 19") apply (simp_all) unfolding post_sum_elems_def unfolding inv_Move_def inv_VDMNat1_def apply simp apply (elim conjE, simp, elim conjE) txt \<open> Oh man, it generates funny subgoals. Let's try generalising and simplifying pre. Where is the contradiction? \<close> unfolding pre_sum_elems_def oops lemma l_sg1_1_missing_assumption: "pre_play_move p m s \<Longrightarrow> sum_elems s \<noteq> MAX_PILE \<Longrightarrow> m + (sum_elems s) < MAX_PILE" unfolding pre_play_move_def unfolding moves_left_def pre_moves_left_def apply (simp) apply (cases "m=1", simp_all) unfolding inv_Moves_def Let_def apply simp_all apply (case_tac "sum_elems s \<noteq> 19", simp_all) apply (elim conjE, simp) defer apply (elim conjE impE, simp_all) txt \<open> different strategy on m case analysis didn't work. Generalise \<close> oops find_theorems "sum_elems (_ @ _)" lemma l_sg1_1_missing_assumption_simplified: " 0 < m \<Longrightarrow> m \<le> MAX_MOV \<Longrightarrow> (MAX_PILE - sum_elems s \<noteq> 1 \<longrightarrow> m < MAX_PILE - sum_elems s) \<Longrightarrow> (MAX_PILE - sum_elems s = m \<longrightarrow> m = 1) \<Longrightarrow> sum_elems s \<noteq> MAX_PILE \<Longrightarrow> m + sum_elems s < MAX_PILE" apply safe nitpick[user_axioms] txt \<open> Now nicpick found a counter exmple :-(. We were too aggresive in the assumption simplification. If we play in VDM the counter example we see why (assuming in NimFull.vdmsl): \begin{vdmsl}[breaklines=true] > p let s = [3,3,3,3,3,3,1], s'=play_move(<P2>, 1, s) in moves_left(s') = 0 Executed in 0.007 secs. > p let s = [3,3,3,3,3,3,1], s'=play_move(<P2>, 3, s) in moves_left(s') Error 4060: Type invariant violated for Moves in 'NimFull' (console) at line 1:29 MainThread> \end{vdmsl} @{subgoals[display,indent=0]}\<close> oops lemma l_sg1_1_missing_assumption_strengthened: "pre_play_move p m s \<Longrightarrow> m \<le> moves_left s \<Longrightarrow> sum_elems s \<noteq> MAX_PILE \<Longrightarrow> m + (sum_elems s) < MAX_PILE" unfolding pre_play_move_def unfolding moves_left_def pre_moves_left_def apply (safe,simp_all) unfolding post_fair_play_def pre_fair_play_def pre_who_plays_next_def post_who_plays_next_def isFirst_def fair_play_def who_plays_next_def apply simp unfolding inv_Moves_def inv_Move_def post_sum_elems_def pre_sum_elems_def inv_Player_def inv_VDMNat1_def inv_VDMNat_def apply (safe, simp) txt \<open> Another term is missing when m = 1? What could it be? @{subgoals[display, indent=0]}\<close> oops text \<open> \textbf{AHA!!!}: to play there must be more moves left? This means the final play specification isn't quite right: the precondition of @{term play_move} is wrong: it needs to be when = 1 not m!\<close> text \<open> Part of the difficulty in @{term play_move} is that its precondition has too many (assymetric) cases. Let's try with a simpler, more uniform scenario.\<close> thm pre_play_move_def definition pre_play_move_NEW :: "Player \<Rightarrow> Move \<Rightarrow> Moves \<Rightarrow> \<bool>" where "pre_play_move_NEW p m s \<equiv> inv_Player p \<and> inv_Move m \<and> inv_Moves s \<and> pre_moves_left s \<and> pre_fair_play p s \<and> post_fair_play p s (fair_play p s) \<and> 0 < moves_left s \<and> m \<le> moves_left s \<and> (moves_left s = m \<longrightarrow> m = 1) \<and> fair_play p s" lemma l_sg1_1_missing_assumption_strengthened: "pre_play_move_NEW p m s \<Longrightarrow> m + (sum_elems s) \<le> MAX_PILE" unfolding pre_play_move_NEW_def unfolding moves_left_def pre_moves_left_def apply (elim conjE) apply (simp only: le_less) apply (elim disjE) apply simp_all done lemma l_sg1_1_inv_Moves_sum_elems_append: "inv_Move m \<Longrightarrow> pre_play_move_NEW p m s \<Longrightarrow> inv_SeqElems inv_Move s \<Longrightarrow> pre_sum_elems s \<Longrightarrow> post_sum_elems s (sum_elems s) \<Longrightarrow> moves_left s \<noteq> 1 \<Longrightarrow> sum_elems s \<noteq> MAX_PILE \<Longrightarrow> sum_elems (s @ [m]) \<le> MAX_PILE" apply (simp add: l_sum_elems_append_gen) unfolding pre_play_move_NEW_def by (simp add: moves_left_def) text \<open> The last lemma @{thm l_sg1_1_inv_Moves_sum_elems_append} is also useful to prove a few others associated with the original @{term pre_play_move} definition.\<close> lemma l_sg1_2_inv_Moves_sum_elems_append: "pre_play_move p m s \<Longrightarrow> moves_left s \<ge> m \<Longrightarrow> sum_elems s \<le> MAX_PILE \<Longrightarrow> s $ len s = 1 \<Longrightarrow> sum_elems (s @ [m]) \<le> MAX_PILE" using l_sg1_1_inv_Moves_sum_elems_append l_sum_elems_append moves_left_def pre_play_move_def apply simp done text \<open>For the final case, we start with the naive attempt from remaining goal as \<close> lemma l_sg1_inv_Moves_append: "inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> pre_play_move p m s \<Longrightarrow> inv_Moves (s @ [m])" unfolding inv_Moves_def Let_def apply (simp,safe) txt \<open>This generates (10) new subgoals (some somewhat repeated), which some Sledgehammer already finds proofs for\<close> apply (simp add: l_inv_SeqElems_append) apply (simp add: l_inv_SeqElems_append pre_sum_elems_def) apply (metis l_inv_SeqElems_append l_sg4_post_sume l_sum_elems_append) defer using l_applyVDMSeq_append_last l_sum_elems_append moves_left_def pre_play_move_def apply force apply (simp add: l_inv_SeqElems_append) apply (simp add: l_inv_SeqElems_append pre_sum_elems_def) apply (simp add: inv_Moves_def l_sg5_post_sume_append) defer using l_applyVDMSeq_append_last l_sum_elems_append moves_left_def pre_play_move_def apply force apply (subgoal_tac "moves_left s \<ge> m") defer apply (subgoal_tac "moves_left s \<ge> m") using l_sg1_2_inv_Moves_sum_elems_append apply blast unfolding moves_left_def pre_play_move_def inv_Moves_def apply simp_all txt \<open> missing condition on precondition of @{term play_move}\<close> oops text \<open> With the sorry proof for case above, we know that there is something wrong with precondition of @{term play_move} that needs fixing. I will leave this as an exercise.\<close> lemma "pre_play_move p m s = pre_play_move_NEW p m s" apply safe unfolding pre_play_move_def pre_play_move_NEW_def apply simp_all unfolding moves_left_def apply simp_all apply (safe, simp_all) defer using l_inv_Move_nat1 apply force apply (insert l_inv_Move_nat1[of m], simp) oops text \<open> Older has a missing case: when at end of play but m is 2 or 3! \<close> lemma "pre_play_move p m s \<Longrightarrow> pre_play_move_NEW p m s" unfolding pre_play_move_def pre_play_move_NEW_def apply simp_all unfolding moves_left_def apply simp_all apply (safe, simp_all) defer using l_inv_Move_nat1 apply fastforce txt \<open> The missing case: when @{term "moves_left s"} is 1, yet nothing is said about @{term m} when it could have been 2 or 3. @{subgoals[display, indent=0]}\<close> unfolding inv_Move_def inv_VDMNat1_def apply safe apply (cases "m=1", simp_all) oops text \<open> New version is stronger than older, hence covers the case \<close> lemma "pre_play_move_NEW p m s \<Longrightarrow> pre_play_move p m s" unfolding pre_play_move_def pre_play_move_NEW_def apply simp_all unfolding moves_left_def apply simp_all by (safe, simp_all) lemma "inv_Moves s \<Longrightarrow> sum_elems s = MAX_PILE - 1 \<Longrightarrow> pre_play_move p 2 s" unfolding pre_play_move_def apply simp unfolding pre_fair_play_def pre_who_plays_next_def isFirst_def post_fair_play_def post_who_plays_next_def inv_Player_def apply simp unfolding pre_moves_left_def pre_sum_elems_def apply simp apply safe apply (simp add: inv_Move_def inv_VDMNat1_def) apply (simp add: l_inv_Moves_inv_SeqElems) apply (simp add: moves_left_def)+ unfolding fair_play_def who_plays_next_def txt \<open> The statement works for @{term P2} not @{term P1}. @{subgoals[display, indent=0]}\<close> oops lemma "inv_Moves s \<Longrightarrow> sum_elems s = MAX_PILE - 1 \<Longrightarrow> pre_play_move P2 2 s" unfolding pre_play_move_def apply simp unfolding pre_fair_play_def pre_who_plays_next_def isFirst_def post_fair_play_def post_who_plays_next_def inv_Player_def apply simp unfolding pre_moves_left_def pre_sum_elems_def apply simp apply safe apply (simp add: inv_Move_def inv_VDMNat1_def) apply (simp add: l_inv_Moves_inv_SeqElems) apply (simp add: moves_left_def)+ unfolding fair_play_def who_plays_next_def apply simp unfolding inv_Moves_def post_sum_elems_def pre_sum_elems_def Let_def inv_VDMNat_def apply simp apply safe txt \<open>@{subgoals[display, indent=0]} To prove this will be involved: we would have to show that any sequence we get that has @{term "sum_elems s = 19"} has an odd length. Perhaps restate the goal again with an extra assumption. \<close> oops lemma l_pre_play_move_OFFENDING_CASE: "fair_play p s \<Longrightarrow> inv_Moves s \<Longrightarrow> sum_elems s = MAX_PILE - 1 \<Longrightarrow> pre_play_move p 2 s" unfolding pre_play_move_def apply simp unfolding pre_fair_play_def pre_who_plays_next_def isFirst_def post_fair_play_def post_who_plays_next_def inv_Player_def pre_moves_left_def pre_sum_elems_def apply simp by (simp add: inv_Move_def inv_VDMNat1_def l_inv_Moves_inv_SeqElems moves_left_def) lemma l_pre_play_move_NEW_OFFENDING_CASE_SOLUTION: "fair_play p s \<Longrightarrow> inv_Moves s \<Longrightarrow> sum_elems s = MAX_PILE - 1 \<Longrightarrow> pre_play_move_NEW p 2 s" unfolding pre_play_move_NEW_def apply simp unfolding pre_fair_play_def pre_who_plays_next_def isFirst_def post_fair_play_def post_who_plays_next_def inv_Player_def pre_moves_left_def pre_sum_elems_def apply simp apply safe apply (simp add: inv_Move_def inv_VDMNat1_def) using l_inv_Moves_inv_SeqElems apply blast apply (simp add: moves_left_def) defer using moves_left_def apply auto[1] unfolding moves_left_def apply simp txt \<open> Finally we see that @{term pre_play_move_NEW} fixes the offending case. \<close> oops text \<open> The general case missing by the original prcondition is when we are at the end of the game but the call comes with @{term m} different from @{term 1}.\<close> lemma l_pre_play_move_NEW_OFFENDING_CASE_SOLUTION_GENERAL: "fair_play p s \<Longrightarrow> inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> sum_elems s = MAX_PILE - 1 \<Longrightarrow> m \<noteq> 1 \<Longrightarrow> \<not> pre_play_move_NEW p m s" unfolding pre_play_move_NEW_def apply simp unfolding pre_fair_play_def pre_who_plays_next_def isFirst_def post_fair_play_def post_who_plays_next_def inv_Player_def pre_moves_left_def pre_sum_elems_def apply simp using l_inv_Move_nat1 moves_left_def by force (*-----------------------------------------------------------------------*) subsection \<open> Putting it all together for satisfiability PO for @{term play_move}\<close> text \<open> \begin{enumerate} \item[3] Lemma-based attempt with \textsf{sledgehammer} support. \end{enumerate} \<close> text \<open>Other example lemmas deleted are commented in the code below. It was assuming that the lemma about append over invariant of Moves worked. \<close> (* thm PO_play_move_sat_exp_obl_def theorem PO_play_move_sat_exp_obl unfolding PO_play_move_sat_exp_obl_def unfolding post_play_move_def play_move_def apply (simp, safe) txt \<open> 8 instead of 12 subgoals from the first attempt \<close> using l_sg1_inv_Moves_append pre_play_move_def apply blast apply (simp add: l_sg2_pre_sume pre_play_move_def) apply (simp add: l_sg3_pre_sume_append pre_play_move_def) apply (meson inv_Moves_def pre_play_move_def) apply (simp add: l_sg5_post_sume_append pre_play_move_def) apply (simp add: l_inv_Move_nat1 l_sum_elems_append pre_play_move_def) apply (simp add: l_sum_elems_append) by (simp add: l_sg6_2_fair_play pre_play_move_def) text \<open> Finally, the lemmas that were useful are displayed below. \<close> thm l_sum_elems_append l_inv_Move_nat1 l_sg1_inv_Moves_append l_sg2_pre_sume l_sg3_pre_sume_append l_sg5_post_sume_append l_sg6_not_fair_play_play_move text \<open> Also here, an alternative (albeit similar) proof of the same goal. \<close> theorem PO_play_move_sat_exp_obl (*<*) unfolding PO_play_move_sat_exp_obl_def post_play_move_def play_move_def apply (intro allI impI conjI,simp_all) (*>*) txt \<open> $\cdots$ @{subgoals[display,indent=0]} \<close> apply (simp add: l_sg1_inv_Moves_append) (*SH, sg1*) apply (simp add: l_sg2_pre_sume) (*SH, sg2*) apply (simp add: l_sg3_pre_sume_append) (*SH, sg3*) apply (simp add: l_inv_Moves_inv_SeqElems l_sg4_post_sume) (*SH, sg4*) apply (simp add: l_inv_Moves_inv_SeqElems l_sg5_post_sume_append) (*SH, sg5*) apply (simp add: l_inv_Move_nat1 l_sum_elems_append) (*SH, sg6*) apply (simp add: l_sum_elems_append) (*SH, sg7*) by (simp add: l_sg6_not_fair_play_play_move) lemma l_sg6_3_fair_play: "fair_play p s \<Longrightarrow> \<not> fair_play p (m # s)" unfolding fair_play_def who_plays_next_def apply (safe,simp split: if_splits) unfolding len_def by presburger+ theorem PO_play_move_sat_obl (*<*) unfolding PO_play_move_sat_obl_def post_play_move_def play_move_def apply (intro allI impI conjI,simp_all) (*>*) txt \<open> $\cdots$ @{subgoals[display,indent=0]} \<close> apply (rule_tac x="s @ [m]" in exI, intro conjI, simp_all) using l_sg1_inv_Moves_append apply blast apply (simp add: l_sg2_pre_sume) (*SH, sg2*) apply (simp add: l_sg1_2_inv_Moves_pre_sume_append) apply (simp add: l_inv_Moves_inv_SeqElems l_sg4_post_sume) using l_sg1_3_inv_Moves_post_sume_append apply auto[1] apply (simp add: l_inv_Move_nat1 l_sum_elems_append) apply (simp add: l_sum_elems_append) using l_sg6_not_fair_play_play_move apply blast done lemma l_inv_Moves_play_move: "pre_play_move0 p m s \<Longrightarrow> inv_Moves (play_move p m s)" unfolding inv_Moves_def Let_def apply (intro conjI impI) using l_play_move_pre_sume pre_play_move0_def pre_sum_elems_def apply blast (*SH*) apply (simp add: l_play_move_pre_sume pre_play_move0_def) (*SH*) apply (simp add: l_post_sume_play_move pre_play_move0_def) (*SH*) defer apply (simp add: l_inv_Moves_last_move_append) (*SH*) apply (simp add: pre_play_move0_def play_move_def) --"expansion on local goal only" apply (elim conjE impE) defer apply (simp add: l_inv_Moves_maxpile_sume_append) unfolding moves_left_def apply simp oops lemma l_inv_Moves_play_move: "pre_play_move0 p m s \<Longrightarrow> inv_Moves (play_move p m s)" unfolding inv_Moves_def Let_def apply (intro conjI impI) using l_play_move_pre_sume pre_play_move0_def pre_sum_elems_def apply blast (*SH*) apply (simp add: l_play_move_pre_sume pre_play_move0_def) (*SH*) apply (simp add: l_post_sume_play_move pre_play_move0_def) (*SH*) apply (simp add: pre_play_move0_def play_move_def) --"expansion on local goal only" apply (elim conjE impE) defer apply (simp add: l_inv_Moves_maxpile_sume_append) find_theorems "applyVDMSeq _ _" apply (simp add: l_applyVDMSeq_append_last play_move_def l_sum_elems_append) unfolding pre_play_move0_def inv_Moves_def Let_def apply simp oops theorem PO_play_move_sat_exp_obl0 unfolding PO_play_move_sat_exp_obl0_def post_play_move_def apply simp apply (intro allI impI conjI) apply (simp add: l_inv_Moves_play_move) (*SH*) apply (simp add: pre_play_move0_def l_moves_left_pre_sume) (*SH*) apply (simp add: pre_play_move0_def l_moves_left_pre_sume) (*SH*) apply (simp add: l_play_move_pre_sume) (*SH*) apply (meson inv_Moves_def) (*SH*) (* why not apply (simp add: l_pre_sume_seqelems_move l_sum_elems_post l_moves_left_pre_sume)? *) apply (simp add: l_post_sume_play_move) (*SH*) apply (simp add: l_sum_elems_append play_move_def l_inv_Move_nat1) (*SH*) apply (simp add: l_sum_elems_append play_move_def) (*SH*) oops lemma "inv_Move x \<Longrightarrow> inv_Moves xs \<Longrightarrow> inv_Moves(x # xs)" unfolding inv_Moves_def Let_def apply simp apply (elim conjE, intro conjI impI) apply (simp add: l_inv_SeqElems_Cons) (*SH*) apply (simp add: l_inv_SeqElems_Cons l_pre_sume_seqelems_move) (*SH*) apply (metis NimFull.sum_elemsI l_inv_SeqElems_Cons l_sum_elems_post) (*SH*) oops lemma l_play_move_sat: "pre_play_move0 p m ms \<Longrightarrow> post_play_move p m ms (play_move p m ms)" unfolding pre_play_move0_def post_play_move_def apply (elim conjE, simp, intro conjI) defer apply (meson inv_Moves_def) (*SH*) apply (simp add: l_play_move_pre_sume) (*SH*) apply (meson inv_Moves_def)(*SH*) (*apply (simp add: l_sum_elems_post) (*SH*) *) apply (simp add: l_post_sume_play_move) (*SH*) apply (simp add: l_inv_Move_nat1 l_sum_elems_append play_move_def) (*SH*) apply (simp add: l_sum_elems_append play_move_def) (*SH*) oops lemma l_play_move_inv_moves: "inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> inv_Moves (play_move p m ms) " unfolding inv_Moves_defs play_move_def Let_def apply (simp add: l_applyVDMSeq_append_last) apply (simp add: l_sum_elems_append l_len_append) apply (elim conjE, intro conjI impI) using inv_VDMNat_def l_inv_Move_nat1 apply force (*SH*) using l_inv_Move_nat1 apply force (*SH*) find_theorems "_ \<le> _" name:le -name:Nat -name:Set thm le_less apply (simp only: le_less) apply (simp (no_asm) only: le_less[symmetric]) apply (erule disjE) apply simp_all (* ERROR! It's \<longleftrightarrow> for applyVDMSeq in the last ? No. but something else*) oops lemma l_inv_Moves_append: "inv_Move m \<Longrightarrow> inv_Moves ms \<Longrightarrow> inv_Moves (ms @ [m])" apply (simp (no_asm) add: inv_Moves_def Let_def) unfolding post_sum_elems_def apply (intro conjI) apply (simp add: inv_Moves_def l_inv_SeqElems_append)(*SH*) apply (simp add: l_inv_Moves_inv_SeqElems l_inv_SeqElems_append pre_sum_elems_def)(*SH*) apply (simp add: inv_Moves_def l_inv_SeqElems_append)(*SH*) apply (metis l_inv_Moves_inv_SeqElems l_inv_SeqElems_append l_sum_elems_post post_sum_elems_def) apply (simp add: l_inv_Moves_inv_SeqElems l_inv_SeqElems_append l_sum_elems_nat1) apply (simp_all add: l_sum_elems_append l_applyVDMSeq_append_last) defer apply (intro impI) oops (*apply (metis l_inv_SeqElems_append l_sum_elems_post post_sum_elems_def pre_sum_elems_def)(*SH*)*) (*apply (smt inv_VDMNat_def l_inv_SeqElems_append l_sum_elems_nat1 l_sum_elems_post post_sum_elems_def snoc_eq_iff_butlast)*) (*apply (smt inv_VDMNat_def l_inv_SeqElems_append l_sum_elems_nat1 l_sum_elems_post post_sum_elems_def snoc_eq_iff_butlast) (*SH*)*) lemma l_inv_Moves_play_move: "pre_play_move p m s \<Longrightarrow> inv_Moves (play_move p m s)" unfolding inv_Moves_def Let_def apply (intro conjI impI) using l_play_move_pre_sume pre_play_move_def pre_sum_elems_def apply blast (*SH*) apply (simp add: l_play_move_pre_sume pre_play_move_def) (*SH*) apply (simp add: l_post_sume_play_move pre_play_move_def) (*SH*) defer apply (simp add: l_inv_Moves_last_move_append) (*SH*) apply (simp add: pre_play_move_def play_move_def) --"expansion on local goal only" apply (elim conjE, erule impE) defer apply (simp add: l_inv_Moves_maxpile_sume_append) apply (erule impE) unfolding moves_left_def apply simp oops theorem PO_play_move_sat_exp_obl unfolding PO_play_move_sat_exp_obl_def post_play_move_def apply simp apply (intro allI impI conjI) apply (simp add: l_inv_Moves_play_move) (*SH*) apply (simp add: pre_play_move0_def l_moves_left_pre_sume) (*SH*) apply (simp add: pre_play_move0_def l_moves_left_pre_sume) (*SH*) apply (simp add: l_play_move_pre_sume) (*SH*) apply (meson inv_Moves_def) (*SH*) (* why not apply (simp add: l_pre_sume_seqelems_move l_sum_elems_post l_moves_left_pre_sume)? *) apply (simp add: l_post_sume_play_move) (*SH*) apply (simp add: l_sum_elems_append play_move_def l_inv_Move_nat1) (*SH*) by (simp add: l_sum_elems_append play_move_def) (*SH*) *) text \<open> And finally, we have all the lemmas we need to prove the satisfiability of @{term play_move}. \<close> definition post_play_move_NEW :: "Player \<Rightarrow> Move \<Rightarrow> Moves \<Rightarrow> Moves \<Rightarrow> \<bool>" where "post_play_move_NEW p m s RESULT \<equiv> pre_play_move_NEW p m s \<longrightarrow> inv_Moves RESULT \<and> pre_sum_elems s \<and> pre_sum_elems RESULT \<and> post_sum_elems s (sum_elems s) \<and> post_sum_elems RESULT (sum_elems RESULT) \<and> sum_elems s < sum_elems RESULT \<and> sum_elems s + m = sum_elems RESULT \<and> \<not> (fair_play p RESULT) \<and> s \<sqsubseteq> RESULT" definition PO_play_move_sat_exp_NEW_obl :: "\<bool>" where "PO_play_move_sat_exp_NEW_obl \<equiv> \<forall> p m s . pre_play_move_NEW p m s \<longrightarrow> post_play_move_NEW p m s (play_move p m s)" lemma l_sg1_inv_Moves_end: "(s @ [m]) ! nat (len s) = m" unfolding len_def by simp lemma "0 < moves_left s \<and> m \<le> moves_left s \<Longrightarrow> inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> inv_Moves(s @ [m])" unfolding inv_Moves_def Let_def apply simp apply (intro conjI impI) apply (simp add: l_inv_SeqElems_append) apply (simp add: l_inv_SeqElems_append pre_sum_elems_def) apply (simp add: inv_Moves_def l_sg5_post_sume_append) apply (simp add: l_sum_elems_append moves_left_def) apply (simp add: l_sg1_inv_Moves_end) unfolding moves_left_def apply (elim conjE) apply (induct s) apply (simp add: inv_Move_def) apply simp txt \<open> Looks like a similar dead end as seen before \<close> oops lemma l_sg1_1_inv_Moves_append_NEW:" inv_Move m \<Longrightarrow> inv_Moves s \<Longrightarrow> 0 < moves_left s \<Longrightarrow> m \<le> moves_left s \<Longrightarrow> moves_left s \<noteq> m \<Longrightarrow> inv_Moves (s @ [m])" unfolding moves_left_def apply simp unfolding inv_Moves_def Let_def apply simp by (metis l_inv_SeqElems_append l_sg4_post_sume l_sum_elems_append le_diff_eq less_diff_eq less_irrefl less_le pre_sum_elems_def) lemma l_sg1_2_inv_Moves_append_NEW:" inv_Moves s \<Longrightarrow> 0 < moves_left s \<Longrightarrow> 1 \<le> moves_left s \<Longrightarrow> inv_Moves (s @ [1])" unfolding inv_Moves_def Let_def apply simp txt \<open> Sledgehammer finds these lemmas, which are not used/useful \<close> thm dbl_inc_simps(5) dbl_simps(3) dbl_simps(5) txt \<open> Lemma we came up with was caught here; i.e. useful to generalise for later \<close> thm l_concat_append by (metis inv_Move_def inv_VDMNat1_def l_inv_SeqElems_append l_sg1_inv_Moves_end l_sg4_post_sume l_sum_elems_append le_diff_eq moves_left_def one_le_numeral pre_sum_elems_def zero_less_one) lemma l_sg1_inv_Moves_append_NEW: "pre_play_move_NEW p m s \<Longrightarrow> inv_Moves (s @ [m])" unfolding pre_play_move_NEW_def apply safe using l_sg1_1_inv_Moves_append_NEW apply blast using l_sg1_2_inv_Moves_append_NEW by blast theorem PO_play_move_sat_exp_NEW_obl unfolding PO_play_move_sat_exp_NEW_obl_def unfolding post_play_move_NEW_def play_move_def apply (simp, safe) txt \<open> 8 instead of 12 subgoals from the first attempt \<close> apply (simp add: l_sg1_inv_Moves_append_NEW) apply (simp add: pre_moves_left_def pre_play_move_NEW_def) apply (simp add: l_sg1_inv_Moves_append_NEW l_sg2_pre_sume) apply (meson inv_Moves_def pre_play_move_NEW_def) apply (meson inv_Moves_def l_sg1_inv_Moves_append_NEW) apply (simp add: l_inv_Move_nat1 l_sum_elems_append pre_play_move_NEW_def) apply (simp add: l_sum_elems_append) by (simp add: l_sg6_2_fair_play pre_play_move_NEW_def) text \<open> Finally, the lemmas that were useful are displayed below. \<close> thm l_sg1_inv_Moves_append_NEW l_sg2_pre_sume l_inv_Move_nat1 l_sum_elems_append l_sg6_2_fair_play text \<open> Also here, an alternative (albeit similar) proof of the same goal. \<close> theorem PO_play_move_sat_exp_NEW_obl (*<*) unfolding PO_play_move_sat_exp_NEW_obl_def post_play_move_NEW_def play_move_def apply (intro allI impI conjI,simp_all) (*>*) txt \<open> $\cdots$ @{subgoals[display,indent=0]} \<close> apply (simp add: l_sg1_inv_Moves_append_NEW) (*SH, sg1*) using l_sg2_pre_sume pre_play_move_NEW_def apply blast (*SH, sg2*) using l_sg3_pre_sume_append pre_play_move_NEW_def apply blast (*SH, sg3*) apply (meson inv_Moves_def pre_play_move_NEW_def) (*SH, sg4*) apply (simp add: l_sg5_post_sume_append pre_play_move_NEW_def) (*SH, sg5*) apply (simp add: l_inv_Move_nat1 l_sum_elems_append pre_play_move_NEW_def) (*SH, sg6*) apply (simp add: l_sum_elems_append) (*SH, sg7*) by (simp add: l_sg6_2_fair_play pre_play_move_NEW_def) (*SH, sg8*) (*************************************************************************) section \<open> VDM Operations satisfiability POs \<close> theorem PO_first_player_winning_choose_move_sat_exp_obl0 unfolding PO_first_player_winning_choose_move_sat_exp_obl0_def apply (intro allI impI) unfolding pre_first_player_winning_choose_move0_def post_first_player_winning_choose_move0_def apply (elim conjE) unfolding post_fixed_choose_move_def first_player_winning_choose_move_def apply simp apply (intro conjI) unfolding inv_Move_def max_def Let_def txt \<open> too repetitive on the various appearances of @{term inv_Move} \<close> oops find_theorems "sum_elems (_ @ _)" text \<open> Intermediate result needed for first subgoal. Also create the structured expansion as @{term lemmas} statements. \<close> lemma l_best_move_range: "best_move ms \<ge> 1 \<Longrightarrow> best_move ms \<le> MAX_MOV" unfolding best_move_def moves_left_def by simp lemma l_best_move_nat: "0 \<le> best_move ms" unfolding best_move_def by simp lemma l_best_move_nat1: "inv_Moves ms \<Longrightarrow> (0 < best_move ms) = will_first_player_win l" txt \<open>doesn't work every time;\<close> oops text \<open>You can name group of lemmas\<close> lemmas PO_first_player_winning_choose_move_sat_exp_obl0_pre_post = PO_first_player_winning_choose_move_sat_exp_obl0_def pre_first_player_winning_choose_move0_def post_first_player_winning_choose_move0_def post_fixed_choose_move_def lemmas PO_first_player_winning_choose_move_sat_exp_obl_pre_post = PO_first_player_winning_choose_move_sat_exp_obl_def pre_first_player_winning_choose_move_def post_first_player_winning_choose_move_def post_fixed_choose_move_def lemma l_first_player_win_best_move: "0 < best_move ms \<Longrightarrow> inv_Move (best_move ms)" unfolding best_move_def moves_left_def inv_Move_def inv_VDMNat1_def by simp theorem PO_first_player_winning_choose_move_sat_exp_obl0 unfolding PO_first_player_winning_choose_move_sat_exp_obl0_pre_post first_player_winning_choose_move_def apply (safe,simp) txt \<open>first goal saying that resut must be @{term inv_Move}, but that's only the case if @{term best_move} isn't zero! Given lemma above @{thm l_first_player_win_best_move}, it's a missing PRECONDITION!\<close> defer apply (simp add: l_best_move_range) txt \<open> similar to @{term inv_Move}, don't want to keep expanding @{term inv_Nim} \<close> oops text \<open> Deduce information from @{term inv_Nim} without the need to expand it \<close> lemmas inv_Nim_defs = inv_Nim_def inv_Nim_flat_def lemma f_Nim_inv_Moves: "inv_Nim st \<Longrightarrow> inv_Moves (moves st)" unfolding inv_Nim_defs by simp lemma l_isFirst: "isFirst P1" unfolding isFirst_def by simp find_theorems name:split name:"if" thm Let_def option.split split_ifs theorem PO_first_player_winning_choose_move_sat_exp_obl unfolding PO_first_player_winning_choose_move_sat_exp_obl_pre_post first_player_winning_choose_move_def apply (intro allI impI, elim conjE, intro conjI, simp_all) unfolding inv_Move_def max_def apply (simp add: l_best_move_range) unfolding pre_who_plays_next_def Let_def apply (simp add: inv_VDMNat1_def) unfolding pre_play_move_def apply (simp) txt \<open> realise that @{thm l_best_move_range} can be generalised for @{term inv_Move}! \<close> oops lemma l_best_move_inv_Move: "inv_Moves s \<Longrightarrow> inv_Move (best_move s)" unfolding best_move_def moves_left_def apply simp unfolding inv_Move_def inv_VDMNat1_def unfolding inv_Moves_def Let_def thm le_less[of "sum_elems s" MAX_PILE] apply (simp add: le_less) apply (elim conjE disjE, simp_all) find_theorems "sum_elems _ > 0" apply (cases "s = []", simp) apply (frule l_sum_elems_nat1, simp) apply (intro conjI) find_theorems (1000) name:int -name:Real -name:Code -name:Quickcheck -name:Ring -name:Topo -name:Prod -name:Rat -name:String -name:BNF -name:Set name:Int name:induct apply (cases "sum_elems s", simp_all) apply (induct_tac n rule:int_less_induct, simp_all) apply simp txt \<open> Still not entirely obvious. Let's go back to prove @{term best_move}, like we did for @{term play_move}\<close> oops definition PO_best_move_obl :: "\<bool>" where "PO_best_move_obl \<equiv> \<forall> s . pre_best_move s \<longrightarrow> post_best_move s (best_move s)" theorem PO_best_move_obl unfolding PO_best_move_obl_def apply safe unfolding pre_best_move_def post_best_move_def apply simp txt \<open> suggests to do the same with @{term moves_left} \<close> oops definition PO_moves_left_obl :: "\<bool>" where "PO_moves_left_obl \<equiv> \<forall> s . pre_moves_left s \<longrightarrow> post_moves_left s (moves_left s)" theorem PO_moves_left_obl unfolding PO_moves_left_obl_def apply safe unfolding pre_moves_left_def post_moves_left_def apply safe apply (simp add: l_inv_VDMNat_moves_left) txt \<open> this suggests the same for @{term sum_elems}, which we already proved before \<close> thm PO_sum_elems_obl using PO_sum_elems_obl apply blast done text \<open> For it to be reusable in middle of proofs, important to set it up in a way Isabelle prefers \<close> lemma l_moves_left_sat: "pre_moves_left ms \<Longrightarrow> post_moves_left ms (moves_left ms)" by (meson inv_Moves_def l_inv_VDMNat_moves_left post_moves_left_def pre_moves_left_def) (*SH*) theorem PO_best_move_obl unfolding PO_best_move_obl_def apply safe unfolding pre_best_move_def post_best_move_def apply safe apply (simp add: inv_VDMNat_def l_best_move_nat) apply (simp add: l_moves_left_sat) unfolding pre_moves_left_def pre_sum_elems_def txt \<open> Last goal suggests a couple of lemmas \<close> oops text \<open> The first lemma is that @{term "inv_Moves s"} is subsumes (is stronger) than @{term pre_moves_left} \<close> lemma l_inv_Moves_pre_moves_left: "inv_Moves s \<Longrightarrow> pre_moves_left s" by (simp add: l_sg2_pre_sume pre_moves_left_def) text \<open> Second lemma is the goal itself, but with smaller set of assumptions, given lemma 1\<close> lemma "inv_Moves s \<Longrightarrow> 0 < moves_left s \<Longrightarrow> best_move s \<le> moves_left s" unfolding best_move_def apply simp apply (cases "moves_left s", simp_all) thm l_nim_mod_prop by (simp add: l_nim_mod_prop) theorem PO_best_move_obl unfolding PO_best_move_obl_def apply safe unfolding pre_best_move_def post_best_move_def apply safe apply (simp add: inv_VDMNat_def l_best_move_nat) apply (simp add: l_moves_left_sat) using best_move_def l_nim_mod_prop by auto lemma l_fpwcm_lemma1: "inv_Nim bst \<Longrightarrow> pre_will_first_player_win (limit bst)" unfolding pre_will_first_player_win_def inv_MAX_PILE_def inv_VDMNat1_def pre_vdm_mod_def apply simp by (simp add: inv_Move_def inv_Nim_def inv_Nim_defs(2) inv_VDMNat1_def) theorem PO_first_player_winning_choose_move_sat_exp_obl unfolding PO_first_player_winning_choose_move_sat_exp_obl_pre_post first_player_winning_choose_move_def apply (intro allI impI, elim conjE, intro conjI, simp_all) apply (simp add: l_first_player_win_best_move) unfolding Let_def unfolding pre_who_plays_next_def apply (simp add: f_Nim_inv_Moves l_isFirst) unfolding pre_play_move_def apply simp apply (intro impI conjI) txt \<open> Example of reuse of previous PO proofs in the right format for Isabelle \<close> apply (simp add: l_moves_left_sat) apply (simp add: inv_Player_def post_who_plays_next_def) apply (simp add: l_fpwcm_lemma1) using inv_Player_def apply blast apply (simp add: l_first_player_win_best_move) using pre_best_move_def apply blast apply (simp add: inv_Nim_def inv_Nim_defs(2)) apply (simp add: inv_Player_def post_fair_play_def post_who_plays_next_def) txt \<open>Missed opportunity for lemma; smt solution is non-ideal\<close> apply (smt best_move_def vdmmod_mod_ge0 zmod_le_nonneg_dividend)+ using inv_Nim_def inv_Nim_defs(2) apply blast apply (smt best_move_def inv_Nim_def inv_Nim_defs(2) l12 l_first_player_win_best_move l_moves_left_pre_sume l_moves_left_sat l_seq_prefix_append l_sg1_1_inv_Moves_append_NEW l_sg3_pre_sume_append l_sg5_post_sume_append l_sum_elems_append play_move_def post_moves_left_def post_play_move_def vdmmod_mod_ge0 zmod_le_nonneg_dividend) apply (smt best_move_def l_first_player_win_best_move l_inv_Moves_pre_moves_left l_sg1_1_inv_Moves_append_NEW l_sum_elems_append moves_left_def play_move_def pre_best_move_def unique_euclidean_semiring_numeral_class.mod_less_eq_dividend vdmmod_mod_ge0) apply (smt best_move_def inv_Moves_def inv_VDMNat_def l_best_move_nat l_first_player_win_best_move l_inv_SeqElems_append l_moves_left_sat l_sg3_pre_sume_append l_sg5_post_sume_append l_sum_elems_append moves_left_def play_move_def post_best_move_def pre_moves_left_def vdmmod_mod_ge0 zmod_le_nonneg_dividend) apply (smt best_move_def vdmmod_mod_ge0 zmod_le_nonneg_dividend) txt \<open> Last goal is not discovered by sledegehammer. Let's try a lemma instead. The SMT proofs above might break over time; better to make them lemmas as well. \<close> oops lemma l_best_play_move_failed: " pre_best_move s \<Longrightarrow> 0 < moves_left s \<Longrightarrow> 0 < best_move s \<Longrightarrow> best_move (play_move m (best_move s) s) = 0" unfolding play_move_def best_move_def moves_left_def pre_best_move_def find_theorems "sum_elems (_ @ _)" apply (simp add: l_sum_elems_append_gen, safe) by (simp add: add_diff_eq) thm add_diff_eq theorem PO_first_player_winning_choose_move_sat_exp_obl unfolding PO_first_player_winning_choose_move_sat_exp_obl_pre_post first_player_winning_choose_move_def apply (intro allI impI, elim conjE, intro conjI, simp_all) apply (simp add: l_first_player_win_best_move) unfolding Let_def unfolding pre_who_plays_next_def apply (simp add: f_Nim_inv_Moves l_isFirst) unfolding pre_play_move_def apply simp apply (intro impI conjI) apply (simp add: l_moves_left_sat) apply (simp add: inv_Player_def post_who_plays_next_def) apply (simp add: l_fpwcm_lemma1) using inv_Player_def apply blast apply (simp add: l_first_player_win_best_move) using pre_best_move_def apply blast apply (simp add: inv_Nim_def inv_Nim_defs(2)) apply (simp add: inv_Player_def post_fair_play_def post_who_plays_next_def) apply (smt best_move_def vdmmod_mod_ge0 zmod_le_nonneg_dividend)+ using inv_Nim_def inv_Nim_defs(2) apply blast apply (smt best_move_def inv_Nim_def inv_Nim_defs(2) l12 l_first_player_win_best_move l_moves_left_pre_sume l_moves_left_sat l_seq_prefix_append l_sg1_1_inv_Moves_append_NEW l_sg3_pre_sume_append l_sg5_post_sume_append l_sum_elems_append play_move_def post_moves_left_def post_play_move_def vdmmod_mod_ge0 zmod_le_nonneg_dividend) apply (smt best_move_def l_first_player_win_best_move l_inv_Moves_pre_moves_left l_sg1_1_inv_Moves_append_NEW l_sum_elems_append moves_left_def play_move_def pre_best_move_def unique_euclidean_semiring_numeral_class.mod_less_eq_dividend vdmmod_mod_ge0) apply (smt best_move_def inv_Moves_def inv_VDMNat_def l_best_move_nat l_first_player_win_best_move l_inv_SeqElems_append l_moves_left_sat l_sg3_pre_sume_append l_sg5_post_sume_append l_sum_elems_append moves_left_def play_move_def post_best_move_def pre_moves_left_def vdmmod_mod_ge0 zmod_le_nonneg_dividend) apply (smt best_move_def vdmmod_mod_ge0 zmod_le_nonneg_dividend) using l_best_play_move_failed by blast lemma PO_first_player_winning_choose_move_sat_obl unfolding PO_first_player_winning_choose_move_sat_obl_def pre_first_player_winning_choose_move_def post_first_player_winning_choose_move_def apply (intro allI impI, elim conjE) unfolding max_def apply (simp add: l_first_player_win_best_move) unfolding pre_who_plays_next_def apply (simp add: l_inv_Move_nat1 l_isFirst) unfolding pre_moves_left_def apply (simp add: l_isFirst) txt \<open> Wahh! Complicated. We need more lemmas for this one \<close> oops text \<open> Let us try the lemma about @{term best_move} again, but generalise it this time. Say, take the expression: \[ @{term "best_move ms < moves_left ms"}[display=true] = @{term "((moves_left ms) - 1) mod (MAX_MOV + 1) < (moves_left ms)"} = @{term "(x - 1) mod (MAX_MOV + 1) < x"} \] Now, let us investigate known facts about @{term "x mod y"} under @{typ \<nat>}. \<close> (*<*)find_theorems "(_::nat) mod _ = _" thm Divides.zmod_eq_0_iff Divides.mod_eq_0D Divides.mod_eq_0D Divides.mod_less Divides.Suc_0_mod_numeral (*>*) text \<open> \textsf{quickcheck} immediately finds the useful counter examples, which if ruled out by suitable assumptions on involved values leads to the main result discovered by \textsf{sledgehammer}. \<close> lemma l_best_move_mov_limit_mod: "n > 0 \<Longrightarrow> m > 0 \<Longrightarrow> ((m::int) - 1) mod n < m" (* "cvc4": Try this: by (smt zmod_le_nonneg_dividend) (351 ms). "z3": Try this: by (smt semiring_numeral_div_class.mod_less_eq_dividend) (174 ms). Isar proof (70 ms): proof - assume a1: "0 < m" have f2: "(0 \<le> - 1 + m + - 1 * ((- 1 + m) mod n)) = (1 \<le> m + - 1 * ((- 1 + m) mod n))" by linarith have f3: "(0 \<le> - 1 + m) = (1 \<le> m)" by auto have f4: "\<forall>x0 x1. ((x1\<Colon>\<int>) mod x0 \<le> x1) = (0 \<le> x1 + - 1 * (x1 mod x0))" by fastforce have f5: "1 \<le> m" using a1 by force have f6: "(\<not> (m - 1) mod n < m) = (m + - 1 * ((- 1 + m) mod n) \<le> 0)" by fastforce have "1 \<le> m + - 1 * ((- 1 + m) mod n)" using f5 f4 f3 f2 semiring_numeral_div_class.mod_less_eq_dividend by blast thus ?thesis using f6 by linarith qed "spass": Try this: using zle_diff1_eq zmod_le_nonneg_dividend by blast (17 ms). *) using zle_diff1_eq zmod_le_nonneg_dividend by blast lemma l_best_move_inv: "moves_left s > 0 \<Longrightarrow> best_move s < moves_left s" unfolding best_move_def using [[rule_trace,simp_trace]] by (simp only: vdmmod_mod_ge0 l_best_move_mov_limit_mod) text \<open> Let's try and reuse the lemmas everywhere, at once; plus expanding the easy case on @{term will_first_player_win} as well as Isabelle constructs for max and let. It works: makes for two sub goals. \<close> lemma PO_first_player_winning_choose_move_sat_exp_obl unfolding PO_first_player_winning_choose_move_sat_exp_obl_pre_post apply (intro allI impI, elim conjE, intro conjI, simp_all) unfolding max_def Let_def pre_who_plays_next_def pre_moves_left_def pre_play_move0_def will_first_player_win_def first_player_winning_choose_move_def apply (simp_all add: l_first_player_win_best_move l_inv_Move_nat1 l_isFirst l_best_move_inv) apply (safe) txt \<open> Argh...! seems like we are back to where we strated. Perhaps the first goal to tackle should be the hard, last one \<close> oops lemma l_sg_1: "inv_Nim bst \<Longrightarrow> inv_Moves (moves bst) \<and> pre_sum_elems (moves bst) \<Longrightarrow> 0 < moves_left (moves bst) \<Longrightarrow> 1 < moves_left (moves bst)" unfolding inv_Nim_def inv_Nim_flat_def apply simp txt \<open> Nim bst is irrelevant; abstract \<close> oops lemma PO_first_player_winning_choose_move_sat_exp_obl unfolding PO_first_player_winning_choose_move_sat_exp_obl_pre_post Let_def apply (intro allI impI, elim conjE, intro conjI, simp_all) unfolding max_def pre_who_plays_next_def pre_moves_left_def pre_play_move0_def will_first_player_win_def first_player_winning_choose_move_def apply (simp add: l_first_player_win_best_move) txt \<open> Don't know where the offending goal is coming from yet \<close> oops lemma PO_first_player_winning_choose_move_sat_exp_obl unfolding PO_first_player_winning_choose_move_sat_exp_obl_pre_post Let_def apply (intro allI impI, elim conjE, intro conjI, simp_all) unfolding first_player_winning_choose_move_def apply (simp add: l_first_player_win_best_move l_inv_Move_nat1) unfolding pre_who_plays_next_def best_move_def inv_Move_def apply (simp add: l_isFirst f_Nim_inv_Moves inv_VDMNat1_def) apply (simp add: l_moves_left_sat) apply (simp add: l_isFirst pre_moves_left_def) apply (simp add: inv_Player_def post_who_plays_next_def) using pre_will_first_player_win_def inv_MAX_PILE_def inv_VDMNat1_def apply simp txt \<open> not via sledgehammer; try simplifying all \<close> apply simp_all unfolding play_move_def pre_play_move_def pre_best_move_def apply simp_all txt \<open> goals 2,3, 4 show we need lemmas about @{term inv_Moves} and @{term pre_moves_left} over concatenation \<close> oops lemma l_inv_Moves_Append: "inv_Moves (s @ t) = (inv_Moves s \<and> inv_Moves t)" apply (induct s) apply simp txt \<open>two cases as empty and non-empty \<close> oops lemma l_inv_Moves_Empty: "inv_Moves []" unfolding inv_Moves_defs pre_sum_elems_def elems_def inv_VDMNat_def by simp lemma l_inv_Moves_Append: "inv_Moves (s @ t) = (inv_Moves s \<and> inv_Moves t)" apply (induct s) apply (simp add: l_inv_Moves_Empty) txt \<open>for append, need lemma about cons\<close> oops lemma l_inv_Moves_Cons: "inv_Moves (a#s) = (inv_Move a \<and> inv_Moves s)" apply (rule iffI) unfolding inv_Moves_def apply auto txt \<open>needs to go slowly\<close> oops lemma l_inv_Moves_Cons: "inv_Moves (a#s) = (inv_Move a \<and> inv_Moves s)" apply (safe) apply (simp add: inv_Moves_defs(1) l_inv_SeqElems_Cons) unfolding inv_Moves_def pre_sum_elems_def Let_def apply (safe) apply (simp_all add: l_inv_SeqElems_Cons) unfolding post_sum_elems_def apply simp_all txt \<open>needs slower pace\<close> oops text \<open> Singleton version of @{term inv_Moves} equal @{term inv_Move} \<close> lemma l_inv_Moves_Singleton: "inv_Moves [m] = inv_Move m" unfolding inv_Moves_def inv_SeqElems_def apply simp unfolding pre_sum_elems_def Let_def post_sum_elems_def apply simp using inv_Move_def inv_VDMNat_def l_inv_Move_nat1 l_inv_SeqElems_Cons by fastforce text \<open> Singleton version of @{term inv_Moves} append. See also @{thm l_sg1_inv_Moves_append_NEW} \<close> lemma l_inv_Moves_Append1: "inv_Moves (s @ [m]) = (inv_Moves s \<and> inv_Move m)" find_theorems "_ @ [_]" name:List apply (induct s) apply (simp add: l_inv_Moves_Empty l_inv_Moves_Singleton) unfolding inv_Moves_def apply (simp) oops lemma l_not_inv_Move_zero: "\<not> inv_Move 0" by (simp add: inv_Move_def inv_VDMNat1_def) lemma l_inv_Moves_Cons: "inv_Moves (a#s) \<Longrightarrow> (inv_Move a \<and> inv_Moves s)" unfolding inv_Moves_def post_sum_elems_def pre_sum_elems_def Let_def apply (simp add: l_inv_SeqElems_Cons) using inv_VDMNat_def l_inv_Move_nat1 l_pre_sum_elems by fastforce theorem PO_first_player_winning_choose_move_sat_exp_obl unfolding PO_first_player_winning_choose_move_sat_exp_obl_def post_first_player_winning_choose_move_def Let_def first_player_winning_choose_move_def apply simp txt \<open> safe will be unhelpful here as it will generate manye (13) small goals \<close> apply safe apply (simp add: l_first_player_win_best_move pre_first_player_winning_choose_move_def) unfolding pre_who_plays_next_def apply (simp add: pre_first_player_winning_choose_move_def)+ unfolding pre_moves_left_def apply (simp add: l_isFirst) apply (simp add: l_moves_left_sat pre_moves_left_def) apply (simp add: f_Nim_inv_Moves isFirst_def pre_first_player_winning_choose_move_def) apply (simp add: inv_Player_def post_who_plays_next_def) txt \<open> Sledgehammer seems to have stopped being useful. Expansion of various parts for this goal is unhelpful. make it the first goal\<close> oops lemma l_sg_ml: "inv_Nim bst \<Longrightarrow> inv_Moves (moves bst) \<and> pre_sum_elems (moves bst) \<Longrightarrow> 0 < moves_left (moves bst) \<Longrightarrow> Suc 0 < moves_left (moves bst)" oops lemma l_sg_let: "inv_Nim bst \<Longrightarrow> pre_moves_left (moves bst) \<Longrightarrow> 0 < moves_left (moves bst) \<Longrightarrow> let pm = play_move (current bst) (max (Suc 0) (best_move (moves bst))) (moves bst) in pre_best_move pm \<and> post_best_move pm (best_move pm) \<and> (isFirst (who_plays_next (moves bst)) \<longrightarrow> best_move pm = 0)" find_theorems name:"let" -name:Complete_ -name:Induc -name:Set -name:List -name:Lat -name:Nim -name:Map -name:BNF -name:Predicate find_theorems name:"let" name:cong unfolding Let_def apply (intro conjI impI) unfolding pre_best_move_def apply (intro conjI) unfolding play_move_def oops lemma l_inv_Moves_Append1: "inv_Moves (s @ [m])" unfolding inv_Moves_def apply (intro conjI) find_theorems "inv_SeqElems _ _" oops lemma f_inv_Move : "inv_Move m \<Longrightarrow> m \<le> MAX_MOV" unfolding inv_Move_def by simp lemma l_MAX_rel: "MAX_MOV < MAX_PILE" by simp end
{"author": "leouk", "repo": "VDM_Toolkit", "sha": "791013909961d45949fcd96d937ae18f0174c7ec", "save_path": "github-repos/isabelle/leouk-VDM_Toolkit", "path": "github-repos/isabelle/leouk-VDM_Toolkit/VDM_Toolkit-791013909961d45949fcd96d937ae18f0174c7ec/experiments/isa/Nim/NimFullProofs.thy"}
import numpyro numpyro.enable_x64() import sys import argparse import mechbayes.util as util import numpy as onp from run_util import load_config, get_method import data_cleaning if __name__ == "__main__": parser = argparse.ArgumentParser(description='Run forecast model for one location.') parser.add_argument('place', help='location (e.g., US state)') parser.add_argument('--config_file', help='configuration file (default: config.json)', default='config.json') parser.add_argument('--start', help='start date', default='2020-03-04') parser.add_argument('--end', help='end date (i.e., forecast date)', default=None) parser.add_argument('--prefix', help='path prefix for saving results', default='results') parser.add_argument('--model_config', help='model configuration name') parser.add_argument('--run', help="run model", dest='run', action='store_true') parser.add_argument('--no-run', help="update plots without running model", dest='run', action='store_false') parser.set_defaults(run=True) args = parser.parse_args() config = load_config(args.config_file) model_config = config['model_configs'][args.model_config] model_type = get_method(model_config['model']) forecast_date = args.end data = util.load_data() clean_to_date = forecast_date if args.run else data['US']['data'].index[-1] data_cleaning.clean(data, clean_to_date) if args.run: util.run_place(data, args.place, start=args.start, end=forecast_date, prefix=args.prefix, model_type=model_type, **model_config['args']) util.gen_forecasts(data, args.place, start=args.start, prefix=args.prefix, model_type=model_type, show=False)
{"hexsha": "f558463e228eda27d37fee47baf051b7c77362d1", "size": 1959, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/run_model.py", "max_stars_repo_name": "dsheldon/mechbayes", "max_stars_repo_head_hexsha": "dc1b857e5bee6429aa18233d4f4890b2892a2e4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/run_model.py", "max_issues_repo_name": "dsheldon/mechbayes", "max_issues_repo_head_hexsha": "dc1b857e5bee6429aa18233d4f4890b2892a2e4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-08-23T16:26:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-20T15:39:46.000Z", "max_forks_repo_path": "scripts/run_model.py", "max_forks_repo_name": "dsheldon/mechbayes", "max_forks_repo_head_hexsha": "dc1b857e5bee6429aa18233d4f4890b2892a2e4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-20T21:53:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T21:53:38.000Z", "avg_line_length": 36.9622641509, "max_line_length": 117, "alphanum_fraction": 0.6278713629, "include": true, "reason": "import numpy", "num_tokens": 400}
''' use the neural network from nn.py to find evidence in a file uses cleaned files from clean_hslld.py ''' import numpy as np import pickle from bertinator import get_bert_vector def transcript_to_chunks(transcript, radius=1): transcript_lines = transcript.splitlines() chunks = [None] * len(transcript_lines) for i in range(len(transcript_lines)): chunks[i] = '\n'.join(transcript_lines[max(0, i - radius) : min(len(transcript_lines), i + radius + 1)]) return chunks def find_evidence(clf, transcript, radius=1): chunks = transcript_to_chunks(transcript, radius=radius) bert_vectors = [] for chunk in chunks: bert_vector = get_bert_vector(chunk) bert_vectors += [bert_vector.detach().numpy()] X = np.array(bert_vectors) y = clf.predict(X) for i, is_evidence in enumerate(y): if is_evidence == 1: print(chunks[i]) print('-'*48) if __name__ == "__main__": fname = 'transcripts/remmt1.cha' with open(fname, 'r') as f: transcript = f.read() # load neural network with open('clf.pickle', 'rb') as f: clf = pickle.load(f) find_evidence(clf, transcript)
{"hexsha": "d12c20c6d3eba6a598bdb50573740089b84d81a0", "size": 1217, "ext": "py", "lang": "Python", "max_stars_repo_path": "clf_evidence_finder.py", "max_stars_repo_name": "chrisraff/conversation2foods", "max_stars_repo_head_hexsha": "76948d7b302d7d9aa22d8405d13f0e63e22e5706", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "clf_evidence_finder.py", "max_issues_repo_name": "chrisraff/conversation2foods", "max_issues_repo_head_hexsha": "76948d7b302d7d9aa22d8405d13f0e63e22e5706", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "clf_evidence_finder.py", "max_forks_repo_name": "chrisraff/conversation2foods", "max_forks_repo_head_hexsha": "76948d7b302d7d9aa22d8405d13f0e63e22e5706", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.537037037, "max_line_length": 112, "alphanum_fraction": 0.6425636812, "include": true, "reason": "import numpy", "num_tokens": 304}
""" $(TYPEDEF) Structure to contain the density values obtained from the calculation. $(TYPEDFIELDS) """ @with_kw mutable struct Density solute::Float64 = 0.0 solvent::Float64 = 0.0 solvent_bulk::Float64 = 0.0 end function reset!(d::Density) d.solute = 0.0 d.solvent = 0.0 d.solvent_bulk = 0.0 return nothing end #function Base.show(io::IO, d::Density ) # println(" Mean solute density: $(d.solute) ") # println(" Mean solvent density: $(d.solvent) ") # println(" Mean solvent bulk density: $(d.solvent_bulk) ") #end
{"hexsha": "eb8279b1eef7124eaf8edc0c95523d6d20040e78", "size": 555, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Density.jl", "max_stars_repo_name": "m3g/MDDF", "max_stars_repo_head_hexsha": "efbc8e0dcf426c9b2246217eb9edaf4605318e84", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-04T21:33:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-04T21:33:56.000Z", "max_issues_repo_path": "src/Density.jl", "max_issues_repo_name": "m3g/MDDF", "max_issues_repo_head_hexsha": "efbc8e0dcf426c9b2246217eb9edaf4605318e84", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-10-31T00:18:34.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-23T20:04:16.000Z", "max_forks_repo_path": "src/Density.jl", "max_forks_repo_name": "m3g/ComplexMixtures", "max_forks_repo_head_hexsha": "efbc8e0dcf426c9b2246217eb9edaf4605318e84", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.8214285714, "max_line_length": 70, "alphanum_fraction": 0.6630630631, "num_tokens": 172}
# Copyright 2019 Xilinx, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import numpy as np import keras from keras.models import Sequential from keras.layers import Dense from keras.callbacks import ModelCheckpoint from keras.datasets import mnist from keras.optimizers import RMSprop import argparse import gemx import sys sys.path.append("./examples/keras") import mlp_common #Quantization parameters to bring fp32 ranges to fit into int16; parameters are derived offline ( see quantize.py) #This parameters won't be used if using fp32 xclbin g_in_scale = 128.0 g_wgt_scale = [404.0560286512244, 473.4069784577793, 281.28154919137654] g_bias_scale = [51719.17166735672, 7762.150921617317, 4669.09545165545] g_post_scale = [[5, 14], [1, 9], [1, 12]] def train(model, x_train, y_train, x_test, y_test): num_classes = 10 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) modelcheckpoint_callback = ModelCheckpoint("./best_mnist_model.h5", monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True) history = model.fit(x_train, y_train, batch_size=128, epochs=20, verbose=1, validation_split=0.1, callbacks=[modelcheckpoint_callback]) score = model.evaluate(x_test, y_test, batch_size=128, verbose=1) print('Test score:', score[0]) print('Test accuracy:', score[1]) def create_keras_model(num_classes): # Generate a simple Keras model. model = Sequential() model.add(Dense(512, input_shape=(784,),activation='relu')) model.add(Dense(512, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() return model if __name__ == '__main__': np.random.seed(27) parser = argparse.ArgumentParser(description='GEMX') parser.add_argument('--model', required = True, help='model') parser.add_argument('--xclbin', required = True, help='file path to FPGA bitstream') parser.add_argument('--cfg', required = True, help='file describing properties of .xclbin') parser.add_argument('--gemxlib', required = True, help='file path to GEMX host code shared library') parser.add_argument('--engine', default = 'fcn', choices=['fcn','uspmv'],help='choose fcn, uspmv engine') parser.add_argument('--train', default = False, help='set to True if retrain the model') args = parser.parse_args() xclbin_prop = gemx.parse_cfg(args.cfg) #load xclbin if args.engine == 'fcn': gemx.createFCNHandle( args, xclbin_prop ) else: gemx.createUSPMVHandle( args, xclbin_prop ) (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000, 784) x_test = x_test.reshape(10000, 784) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices num_classes = 10 model = create_keras_model(num_classes) model.load_weights(args.model) if args.train: train(model, x_train, y_train, x_test, y_test) cpu_out = mlp_common.predict_cpu( model, x_test) if args.engine == 'fcn': fpga_out = mlp_common.predict_fpga( model, x_test, xclbin_prop, g_in_scale, g_wgt_scale, g_bias_scale, g_post_scale) else: fpga_out = mlp_common.predict_uspmv_fpga(model, x_test, xclbin_prop) print("compare real data with cpu:") mlp_common.compare_real_results( y_test, np.argmax(cpu_out,axis=1)) print("compare real data with fpga:") mlp_common.compare_real_results( y_test, np.argmax(fpga_out,axis=1)) print("compare cpu with fpga:") mlp_common.compare_real_results( np.argmax(cpu_out,axis=1), np.argmax(fpga_out,axis=1))
{"hexsha": "932fc0bbc8b659421081be8dbe3edbd080841105", "size": 4611, "ext": "py", "lang": "Python", "max_stars_repo_path": "gemx/MLsuite_MLP/examples/keras/mnist/mlp_mnist.py", "max_stars_repo_name": "mihnea-chirila/gemx", "max_stars_repo_head_hexsha": "81e1075975908744d905fdb00175e00849b90630", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 58, "max_stars_repo_stars_event_min_datetime": "2017-12-14T12:45:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T16:01:03.000Z", "max_issues_repo_path": "gemx/MLsuite_MLP/examples/keras/mnist/mlp_mnist.py", "max_issues_repo_name": "mihnea-chirila/gemx", "max_issues_repo_head_hexsha": "81e1075975908744d905fdb00175e00849b90630", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2018-01-26T03:19:27.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-21T16:26:00.000Z", "max_forks_repo_path": "gemx/MLsuite_MLP/examples/keras/mnist/mlp_mnist.py", "max_forks_repo_name": "mihnea-chirila/gemx", "max_forks_repo_head_hexsha": "81e1075975908744d905fdb00175e00849b90630", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2017-12-28T08:05:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-12T00:06:23.000Z", "avg_line_length": 41.1696428571, "max_line_length": 148, "alphanum_fraction": 0.7028844069, "include": true, "reason": "import numpy", "num_tokens": 1177}
import sys sys.path.append(".") from PyQt5.QtCore import QObject, pyqtSignal, QBasicTimer import numpy as np import cv2 import os class VideoRecorder(QObject): imageData = pyqtSignal(np.ndarray) def __init__(self, camera_port=0, parent=None): super().__init__(parent) self.camera = cv2.VideoCapture(camera_port) self.timer = QBasicTimer() def startRecording(self): self.timer.start(0, self) def timerEvent(self, event): if (event.timerId() != self.timer.timerId()): return read, image = self.camera.read() if read: self.imageData.emit(image)
{"hexsha": "808f9741bbf9bf60742171589bfb9b1d74322ab4", "size": 644, "ext": "py", "lang": "Python", "max_stars_repo_path": "App/utils/video_recorder.py", "max_stars_repo_name": "yonycherkos/automatic-classroom-attendance-system-using-face-recognition", "max_stars_repo_head_hexsha": "95925d9906a0b76cadb6abac778dd3bd18f01174", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-04T07:34:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-04T07:34:45.000Z", "max_issues_repo_path": "App/utils/video_recorder.py", "max_issues_repo_name": "yonycherkos/automatic-classroom-attendance-system-using-face-recognition", "max_issues_repo_head_hexsha": "95925d9906a0b76cadb6abac778dd3bd18f01174", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "App/utils/video_recorder.py", "max_forks_repo_name": "yonycherkos/automatic-classroom-attendance-system-using-face-recognition", "max_forks_repo_head_hexsha": "95925d9906a0b76cadb6abac778dd3bd18f01174", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8518518519, "max_line_length": 57, "alphanum_fraction": 0.647515528, "include": true, "reason": "import numpy", "num_tokens": 149}
From aneris.aneris_lang.lib Require Export assert_proof network_util_proof set_proof map_proof nodup_proof coin_flip_proof inject. From aneris_examples.transaction_commit Require Import two_phase_prelude. Section transaction_manager. Context `{!network_topo}. Context `{!anerisG (TC_model RMs) Σ, !tcG Σ}. Lemma wp_transaction_manager_recv_responses R T h l rcv vRMs : is_set RMs vRMs → {{{ pending ∗ is_rcvset_log l R tm ∗ wp_nodup_rcv rcv tm (udp_socket (Some tm) true) l ∗ tm ⤳ (R, T) ∗ h ↪[ip_of_address tm] udp_socket (Some tm) true ∗ tm ⤇ tm_si }}} recv_responses rcv #(LitSocket h) vRMs @[ip_of_address tm] {{{ (b : bool) R', RET #b; pending ∗ tm ⤳ (R', T) ∗ h ↪[ip_of_address tm] udp_socket (Some tm) true ∗ is_rcvset_log l R' tm ∗ if b then [∗ set] rm ∈ RMs, rm ↦◯ PREPARED ∗ pending else ∃ rm, ⌜rm ∈ RMs⌝ ∗ rm ↦◯ ABORTED ∗ pending_discarded }}}. Proof. iIntros (HRMs Φ) "(Hpend & Hl & #Hrcv & Htm & Hh & #Hsi) HΦ". rewrite /recv_responses. do 8 wp_pure _. wp_apply (wp_set_empty socket_address); [done|]; iIntros (v Hv). iAssert (∃ prepared, ⌜is_set prepared v⌝ ∗ [∗ set] rm ∈ prepared, rm ↦◯ (PREPARED : rm_stateO) ∗ pending)%I as "-#Hloop". { iExists _. eauto. } clear Hv. iLöb as "IH" forall (v R) "Hloop". iDestruct "Hloop" as (X) "[%HX Hprepared]". wp_pures. wp_apply wp_set_equal; [done|]. iIntros ([] ?); simplify_eq. { wp_pures. iApply "HΦ". iFrame. } wp_pures. wp_apply ("Hrcv" with "[$Hh $Htm $Hl $Hsi]"). iIntros (m) "(Hh & Htm & Hm & Hl & % & %)". wp_pures. case_bool_decide. - wp_pures. wp_apply (wp_set_add $! HX). iIntros (X' HX'). wp_apply ("IH" with "Hpend Hl Htm Hh HΦ"). rewrite /tm_si. iDestruct "Hm" as "[% [(_ & Hal & Hpend) | [[% ?] | [% ?]]]]"; [|congruence..]. iExists _. iFrame (HX'). destruct (decide (m_sender m ∈ X)). { by assert ({[m_sender m]} ∪ X = X) as -> by set_solver. } iApply big_sepS_union; [set_solver|]. rewrite big_sepS_singleton. iFrame. - wp_pures. iApply "HΦ". iDestruct "Hm" as "[% [(% & Hal & Hpend') | [(% & ? & Hshot) | (% & ? & Hdisc)]]]"; [congruence| |]. { iDestruct (pending_shot with "Hpend Hshot") as %[]. } iFrame. eauto. Qed. (** * Transaction manager spec *) Lemma transaction_manager_spec A vRMs : tm ∈ A → is_set RMs vRMs → fixed A -∗ free_ports (ip_of_address tm) {[port_of_address tm]} -∗ ([∗ set] rm ∈ RMs, rm ⤇ rm_si) -∗ tm ⤇ tm_si -∗ tm ⤳ (∅, ∅) -∗ pending -∗ WP transaction_manager #tm vRMs @[ip_of_address tm] {{ v, (⌜v = #("COMMITTED")⌝ ∗ [∗ set] rm ∈ RMs, rm ↦◯ COMMITTED) ∨ (⌜v = #("ABORTED")⌝ ∗ ∃ rm, ⌜rm ∈ RMs⌝ ∗ rm ↦◯ ABORTED) }}. Proof. iIntros (? HRMs) "#HA Hp #Hrmsis #Htm_si Htm Hpend". rewrite /transaction_manager. wp_pures. wp_socket h as "Hh". wp_pures. wp_socketbind_static. wp_apply (wp_nodup_init _ (udp_socket _ _)); [done..|]. iIntros (l rcv) "[Hlog #Hrcv]". wp_let. (* sending "PREPARE" to all *) wp_apply (wp_sendto_all_set (λ _, rm_si) with "[$Hh $Htm]"); auto. { iFrame "%". iApply (big_sepS_impl with "Hrmsis"). iIntros "!#" (??) "Hsi". iFrame. by iLeft. } iIntros (?) "[Hh Htm]". wp_seq. wp_apply (wp_transaction_manager_recv_responses with "[$Hh $Hpend $Hlog $Htm $Hrcv $Htm_si]"); [done|]. iIntros ([] R') "(Hpend & Htm & Hh & Hlog & Hb)". - (* all RMs are prepared to commit *) wp_pures. iDestruct (big_sepS_sep with "Hb") as "[#Hprepared Hpends]". iMod (tm_shot_prepared with "Hpend Hpends") as "#Hshot". wp_apply (wp_sendto_all_set (λ _, rm_si) with "[$Hh $Htm]"); [done|done|..]. { iFrame "%". iApply (big_sepS_impl with "Hrmsis"). iIntros "!#" (??) "Hsi". iFrame. rewrite /rm_si /=. eauto. } iIntros (?) "(Hh & Htm)". wp_pures. wp_apply (wp_receivefrom_nodup_set with "[] Hrcv [$Hh $Htm_si $Htm $Hlog //]"); [done..| |]. { by iIntros "!#" (?) "[% ?]". } iIntros (d' vd' ?) "(%Hd' & %Hdom' & Hms & _ & Hh & Htm & Hlog)". wp_pures. iPoseProof (tm_rm_committed with "Hshot Hms") as "Hms". iDestruct (big_sepM_sep with "Hms") as "[Hb Hms]". wp_apply (wp_map_iter (λ _ b, ⌜b = "COMMITTED"⌝)%I (λ _ _, True)%I True%I _ _ d' with "[] [$Hb //]"). { iIntros (rm b Ξ) "!# [_ ->] HΞ". do 3 wp_pure _. wp_apply wp_assert. wp_pures. iSplit; [done|]. iModIntro. by iApply "HΞ". } iIntros "_". wp_seq. iLeft. iSplit; [done|]. rewrite big_sepM_dom Hdom' //. - (* someone aborted *) wp_pures. iMod (pending_discard with "Hpend") as "#Hdisc". wp_apply (wp_sendto_all_set (λ _, rm_si) with "[$Hh $Htm]"); [done|done|..]. { iFrame "%". iApply (big_sepS_impl with "Hrmsis"). iIntros "!#" (??) "Hsi". iFrame. rewrite /rm_si; eauto. } iIntros (?) "[Hh Htm]". wp_pures. iRight. iDestruct "Hb" as (?) "(?&?&?)". eauto. Qed. End transaction_manager.
{"author": "fresheed", "repo": "trillium-experiments", "sha": "a9c38a9e9566fb8057ae97ecb8d1a0c09c799aef", "save_path": "github-repos/coq/fresheed-trillium-experiments", "path": "github-repos/coq/fresheed-trillium-experiments/trillium-experiments-a9c38a9e9566fb8057ae97ecb8d1a0c09c799aef/theories/transaction_commit/two_phase_tm.v"}
__copyright__ = "Copyright (c) Microsoft Corporation and Mila - Quebec AI Institute" __license__ = "MIT" """Metrics used to compare environemnts, sets of factors, etc """ import numpy as np import ot def wasserstein_distance( x1: np.ndarray, x2: np.ndarray, p: int = 2, seed: int = 0, n_projections: int = 50, ) -> float: """Wasserstein distance. Uses euclidean metric. :param x1: First set of empirical samples (from P). :param x2: Second set of empirical samples (from Q). :param p: Order of metric, e.g., 2=W-2 :param seed: Random seed to use. :param n_projections: Number of projections for slice wasserstein algorithm. :return: Estimated W-p distance. """ n1 = x1.shape[0] n2 = x2.shape[0] a, b = np.ones((n1,)) / n1, np.ones((n2,)) / n2 w = ot.sliced_wasserstein_distance(x1, x2, a, b, n_projections, seed=seed, p=p) return w
{"hexsha": "cc549b0d37b9310d5b90a93fd5df50aeb76d5d92", "size": 906, "ext": "py", "lang": "Python", "max_stars_repo_path": "segar/metrics.py", "max_stars_repo_name": "fgolemo/segar", "max_stars_repo_head_hexsha": "8e21f8ee01bc72adb84dec7998b014d11d2b1fbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2022-02-16T18:45:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T10:42:19.000Z", "max_issues_repo_path": "segar/metrics.py", "max_issues_repo_name": "microsoft/segar", "max_issues_repo_head_hexsha": "78463968238482ae035121504458dd0909107e10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2022-02-16T22:58:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T23:11:10.000Z", "max_forks_repo_path": "segar/metrics.py", "max_forks_repo_name": "fgolemo/segar", "max_forks_repo_head_hexsha": "8e21f8ee01bc72adb84dec7998b014d11d2b1fbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2022-02-17T01:46:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T21:21:19.000Z", "avg_line_length": 29.2258064516, "max_line_length": 87, "alphanum_fraction": 0.6556291391, "include": true, "reason": "import numpy", "num_tokens": 279}
""" This file contains fixtures that are used at multiple points in the tests. """ import pytest import numpy as np import pandas as pd from mokapot import LinearPsmDataset @pytest.fixture(scope="session") def psm_df_6(): """A DataFrame containing 6 PSMs""" data = { "target": [True, True, True, False, False, False], "spectrum": [1, 2, 3, 4, 5, 1], "group": [1, 1, 2, 2, 2, 1], "peptide": ["a", "b", "a", "c", "d", "e"], "protein": ["A", "B"] * 3, "feature_1": [4, 3, 2, 2, 1, 0], "feature_2": [2, 3, 4, 1, 2, 3], } return pd.DataFrame(data) @pytest.fixture() def psm_df_1000(tmp_path): """A DataFrame with 1000 PSMs from 500 spectra and a FASTA file.""" rng = np.random.Generator(np.random.PCG64(42)) targets = { "target": [True] * 500, "spectrum": np.arange(500), "group": rng.choice(2, size=500), "peptide": [_random_peptide(5, rng) for _ in range(500)], "score": np.concatenate( [rng.normal(3, size=200), rng.normal(size=300)] ), "filename": "test.mzML", "calcmass": rng.uniform(500, 2000, size=500), "expmass": rng.uniform(500, 2000, size=500), "ret_time": rng.uniform(0, 60 * 120, size=500), "charge": rng.choice([2, 3, 4], size=500), } decoys = { "target": [False] * 500, "spectrum": np.arange(500), "group": rng.choice(2, size=500), "peptide": [_random_peptide(5, rng) for _ in range(500)], "score": rng.normal(size=500), "filename": "test.mzML", "calcmass": rng.uniform(500, 2000, size=500), "expmass": rng.uniform(500, 2000, size=500), "ret_time": rng.uniform(0, 60 * 120, size=500), "charge": rng.choice([2, 3, 4], size=500), } fasta_data = "\n".join( _make_fasta(100, targets["peptide"], 10, rng) + _make_fasta(100, decoys["peptide"], 10, rng, "decoy") ) fasta = tmp_path / "test_1000.fasta" with open(fasta, "w+") as fasta_ref: fasta_ref.write(fasta_data) return (pd.concat([pd.DataFrame(targets), pd.DataFrame(decoys)]), fasta) @pytest.fixture def psms(psm_df_1000): """A small LinearPsmDataset""" df, _ = psm_df_1000 psms = LinearPsmDataset( psms=df, target_column="target", spectrum_columns="spectrum", peptide_column="peptide", feature_columns="score", filename_column="filename", scan_column="spectrum", calcmass_column="calcmass", expmass_column="expmass", rt_column="ret_time", charge_column="charge", copy_data=True, ) return psms def _make_fasta( num_proteins, peptides, peptides_per_protein, random_state, prefix="" ): """Create a FASTA string from a set of peptides Parameters ---------- num_proteins : int The number of proteins to generate. peptides : list of str A list of peptide sequences. peptides_per_protein: int The number of peptides per protein. random_state : numpy.random.Generator object The random state. prefix : str The prefix, if generating decoys Returns ------- list of str A list of lines in a FASTA file. """ lines = [] for protein in range(num_proteins): lines.append(f">{prefix}sp|test|test_{protein}") lines.append( "".join(list(random_state.choice(peptides, peptides_per_protein))) ) return lines def _random_peptide(length, random_state): """Generate a random peptide""" return "".join( list(random_state.choice(list("ACDEFGHILMNPQSTVWY"), length - 1)) + ["K"] ) @pytest.fixture def mock_proteins(): class proteins: def __init__(self): self.peptide_map = {"ABCDXYZ": "X|Y|Z"} self.shared_peptides = {"ABCDEFG": "A|B|C; X|Y|Z"} return proteins() @pytest.fixture def mock_conf(): "Create a mock-up of a LinearConfidence object" class conf: def __init__(self): self._optional_columns = { "filename": "filename", "calcmass": "calcmass", "rt": "ret_time", "charge": "charge", } self._protein_column = "protein" self._peptide_column = "peptide" self._eval_fdr = 0.5 self._proteins = None self._has_proteins = False self.peptides = pd.DataFrame( { "filename": "a/b/c.mzML", "calcmass": [1, 2], "ret_time": [60, 120], "charge": [2, 3], "peptide": ["B.ABCD[+2.817]XYZ.A", "ABCDE(shcah8)FG"], "mokapot q-value": [0.001, 0.1], "protein": ["A|B|C\tB|C|A", "A|B|C"], } ) self.confidence_estimates = {"peptides": self.peptides} self.decoy_confidence_estimates = {"peptides": self.peptides} return conf()
{"hexsha": "c490a6175952ad5a79f7015d0bc8af19d1a06ed9", "size": 5109, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/conftest.py", "max_stars_repo_name": "mobiusklein/mokapot", "max_stars_repo_head_hexsha": "5148da1c09cfc92f9b776ae8306619429d0656cb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-09-14T19:47:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-25T21:10:50.000Z", "max_issues_repo_path": "tests/conftest.py", "max_issues_repo_name": "mobiusklein/mokapot", "max_issues_repo_head_hexsha": "5148da1c09cfc92f9b776ae8306619429d0656cb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2020-09-29T19:38:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T05:02:56.000Z", "max_forks_repo_path": "tests/conftest.py", "max_forks_repo_name": "mobiusklein/mokapot", "max_forks_repo_head_hexsha": "5148da1c09cfc92f9b776ae8306619429d0656cb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-09-29T19:12:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T17:39:16.000Z", "avg_line_length": 29.0284090909, "max_line_length": 78, "alphanum_fraction": 0.5519671169, "include": true, "reason": "import numpy", "num_tokens": 1442}
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init import os import random import numpy as np ## Adapted from https://github.com/joaomonteirof/e2e_antispoofing class SelfAttention(nn.Module): def __init__(self, hidden_size, mean_only=False): super(SelfAttention, self).__init__() #self.output_size = output_size self.hidden_size = hidden_size self.att_weights = nn.Parameter(torch.Tensor(1, hidden_size),requires_grad=True) self.mean_only = mean_only init.kaiming_uniform_(self.att_weights) def forward(self, inputs): batch_size = inputs.size(0) weights = torch.bmm(inputs, self.att_weights.permute(1, 0).unsqueeze(0).repeat(batch_size, 1, 1)) if inputs.size(0)==1: attentions = F.softmax(torch.tanh(weights),dim=1) weighted = torch.mul(inputs, attentions.expand_as(inputs)) else: attentions = F.softmax(torch.tanh(weights.squeeze()),dim=1) weighted = torch.mul(inputs, attentions.unsqueeze(2).expand_as(inputs)) if self.mean_only: return weighted.sum(1) else: noise = 1e-5*torch.randn(weighted.size()) if inputs.is_cuda: noise = noise.to(inputs.device) avg_repr, std_repr = weighted.sum(1), (weighted+noise).std(1) representations = torch.cat((avg_repr,std_repr),1) return representations class PreActBlock(nn.Module): '''Pre-activation version of the BasicBlock.''' expansion = 1 def __init__(self, in_planes, planes, stride, *args, **kwargs): super(PreActBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)) def forward(self, x): out = F.relu(self.bn1(x)) shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x out = self.conv1(out) out = self.conv2(F.relu(self.bn2(out))) out += shortcut return out class PreActBottleneck(nn.Module): '''Pre-activation version of the original Bottleneck module.''' expansion = 4 def __init__(self, in_planes, planes, stride, *args, **kwargs): super(PreActBottleneck, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn3 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False) if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)) def forward(self, x): out = F.relu(self.bn1(x)) shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x out = self.conv1(out) out = self.conv2(F.relu(self.bn2(out))) out = self.conv3(F.relu(self.bn3(out))) out += shortcut return out def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) RESNET_CONFIGS = {'18': [[2, 2, 2, 2], PreActBlock], '28': [[3, 4, 6, 3], PreActBlock], '34': [[3, 4, 6, 3], PreActBlock], '50': [[3, 4, 6, 3], PreActBottleneck], '101': [[3, 4, 23, 3], PreActBottleneck] } def setup_seed(random_seed, cudnn_deterministic=True): # initialization torch.manual_seed(random_seed) random.seed(random_seed) np.random.seed(random_seed) os.environ['PYTHONHASHSEED'] = str(random_seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(random_seed) torch.backends.cudnn.deterministic = cudnn_deterministic torch.backends.cudnn.benchmark = False class ResNet(nn.Module): def __init__(self, num_nodes, enc_dim, resnet_type='18', nclasses=2): self.in_planes = 16 super(ResNet, self).__init__() layers, block = RESNET_CONFIGS[resnet_type] self._norm_layer = nn.BatchNorm2d self.conv1 = nn.Conv2d(1, 16, kernel_size=(9, 3), stride=(3, 1), padding=(1, 1), bias=False) self.bn1 = nn.BatchNorm2d(16) self.activation = nn.ReLU() self.layer1 = self._make_layer(block, 64, layers[0], stride=1) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.conv5 = nn.Conv2d(512 * block.expansion, 256, kernel_size=(num_nodes, 3), stride=(1, 1), padding=(0, 1), bias=False) self.bn5 = nn.BatchNorm2d(256) self.fc = nn.Linear(256 * 2, enc_dim) self.fc_mu = nn.Linear(enc_dim, nclasses) if nclasses >= 2 else nn.Linear(enc_dim, 1) self.initialize_params() self.attention = SelfAttention(256) def initialize_params(self): for layer in self.modules(): if isinstance(layer, torch.nn.Conv2d): init.kaiming_normal_(layer.weight, a=0, mode='fan_out') elif isinstance(layer, torch.nn.Linear): init.kaiming_uniform_(layer.weight) elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d): layer.weight.data.fill_(1) layer.bias.data.zero_() def _make_layer(self, block, planes, num_blocks, stride=1): norm_layer = self._norm_layer downsample = None if stride != 1 or self.in_planes != planes * block.expansion: downsample = nn.Sequential(conv1x1(self.in_planes, planes * block.expansion, stride), norm_layer(planes * block.expansion)) layers = [] layers.append(block(self.in_planes, planes, stride, downsample, 1, 64, 1, norm_layer)) self.in_planes = planes * block.expansion for _ in range(1, num_blocks): layers.append( block(self.in_planes, planes, 1, groups=1, base_width=64, dilation=False, norm_layer=norm_layer)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.activation(self.bn1(x)) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.conv5(x) x = self.activation(self.bn5(x)).squeeze(2) stats = self.attention(x.permute(0, 2, 1).contiguous()) feat = self.fc(stats) mu = self.fc_mu(feat) return feat, mu
{"hexsha": "ccda2be0e277d40ccd7cfaa006347b7b29ac0dba", "size": 7381, "ext": "py", "lang": "Python", "max_stars_repo_path": "resnet.py", "max_stars_repo_name": "AirLabUR/ASVspoof2021_AIR", "max_stars_repo_head_hexsha": "e63ce99ceb1827d81e306b75d09999be28a042c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2020-10-30T20:41:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T05:36:47.000Z", "max_issues_repo_path": "resnet.py", "max_issues_repo_name": "AirLabUR/ASVspoof2021_AIR", "max_issues_repo_head_hexsha": "e63ce99ceb1827d81e306b75d09999be28a042c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2020-11-01T16:58:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-29T16:49:36.000Z", "max_forks_repo_path": "resnet.py", "max_forks_repo_name": "AirLabUR/ASVspoof2021_AIR", "max_forks_repo_head_hexsha": "e63ce99ceb1827d81e306b75d09999be28a042c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2020-12-23T09:03:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T10:20:33.000Z", "avg_line_length": 38.4427083333, "max_line_length": 128, "alphanum_fraction": 0.6274217586, "include": true, "reason": "import numpy", "num_tokens": 1915}
#ifndef STAN_MCMC_HMC_HAMILTONIANS_UNIT_E_METRIC_HPP #define STAN_MCMC_HMC_HAMILTONIANS_UNIT_E_METRIC_HPP #include <stan/mcmc/hmc/hamiltonians/base_hamiltonian.hpp> #include <stan/mcmc/hmc/hamiltonians/unit_e_point.hpp> #include <boost/random/variate_generator.hpp> #include <boost/random/normal_distribution.hpp> namespace stan { namespace mcmc { // Euclidean manifold with unit metric template <class Model, class BaseRNG> class unit_e_metric : public base_hamiltonian<Model, unit_e_point, BaseRNG> { public: explicit unit_e_metric(const Model& model) : base_hamiltonian<Model, unit_e_point, BaseRNG>(model) {} double T(unit_e_point& z) { return 0.5 * z.p.squaredNorm(); } double tau(unit_e_point& z) { return T(z); } double phi(unit_e_point& z) { return this->V(z); } double dG_dt(unit_e_point& z, callbacks::logger& logger) { return 2 * T(z) - z.q.dot(z.g); } Eigen::VectorXd dtau_dq(unit_e_point& z, callbacks::logger& logger) { return Eigen::VectorXd::Zero(this->model_.num_params_r()); } Eigen::VectorXd dtau_dp(unit_e_point& z) { return z.p; } Eigen::VectorXd dphi_dq(unit_e_point& z, callbacks::logger& logger) { return z.g; } void sample_p(unit_e_point& z, BaseRNG& rng) { boost::variate_generator<BaseRNG&, boost::normal_distribution<> > rand_unit_gaus(rng, boost::normal_distribution<>()); for (int i = 0; i < z.p.size(); ++i) z.p(i) = rand_unit_gaus(); } }; } // mcmc } // stan #endif
{"hexsha": "d0e914bd643a299c6703838068d15a488da3ee99", "size": 1704, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "archive/stan/src/stan/mcmc/hmc/hamiltonians/unit_e_metric.hpp", "max_stars_repo_name": "alashworth/stan-monorepo", "max_stars_repo_head_hexsha": "75596bc1f860ededd7b3e9ae9002aea97ee1cd46", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-09-06T15:53:17.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-06T15:53:17.000Z", "max_issues_repo_path": "archive/stan/src/stan/mcmc/hmc/hamiltonians/unit_e_metric.hpp", "max_issues_repo_name": "alashworth/stan-monorepo", "max_issues_repo_head_hexsha": "75596bc1f860ededd7b3e9ae9002aea97ee1cd46", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8.0, "max_issues_repo_issues_event_min_datetime": "2019-01-17T18:51:16.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-17T18:51:39.000Z", "max_forks_repo_path": "archive/stan/src/stan/mcmc/hmc/hamiltonians/unit_e_metric.hpp", "max_forks_repo_name": "alashworth/stan-monorepo", "max_forks_repo_head_hexsha": "75596bc1f860ededd7b3e9ae9002aea97ee1cd46", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4, "max_line_length": 76, "alphanum_fraction": 0.6179577465, "num_tokens": 453}
import torch import torch.utils.data as data from glob import glob from os.path import join, basename, exists import numpy as np import pickle as pkl from random import random np.random.seed(123) class TTSDataset(data.Dataset): def __init__(self, which_set='train', datapath='./samples'): # Load vocabulary vocab_path = datapath + '/vocab_dict.pkl' self.vocab_dict = pkl.load(open(vocab_path, 'rb')) self.vocab_size = len(self.vocab_dict) # Filelist self.txtlist = np.sort(glob(datapath+'/*.txt')) self.mellist = np.sort(glob(datapath+'/*.mel')) sent_no = lambda x: int(basename(x).split('_')[1][:5]) self.gen_lu = {'female': 0, 'male': 1} self.age_lu = {'age20': 0, 'age30': 1, 'age40': 2} self.emo_lu = {'neu': 0, 'hap': 1, 'sad': 2, 'ang': 3, 'sur': 4, 'fea': 5, 'dis': 6} assert len(self.txtlist)==len(self.mellist), \ 'mellist({}) and txtlist({}) has different length'.format(len(self.mellist), len(self.txtlist)) self.char2onehot = lambda x : self.vocab_dict[x] if x in self.vocab_dict.keys() else None def __len__(self): return len(self.txtlist) def __getitem__(self, idx): ''' Be sure that contents(input_mel) == contents(target_mel) == txt style(ref_mel) == style(target_mel) ''' # Text read with open(self.txtlist[idx], 'r') as f: txt = f.readline() txt_feat = list(filter(None, [self.char2onehot(xx) for xx in txt])) # load mel/lin of x_o mellin = pkl.load(open(self.mellist[idx], 'rb')) mel = mellin['mel'] lin = mellin['lin'] # Get path of x_s mel_emo = basename(self.mellist[idx])[:3] emo_set = sorted(self.emo_lu.keys()) emo_set.remove(mel_emo) emo_set = np.random.permutation(emo_set) for input_emo in emo_set: input_path = self.mellist[idx].replace(mel_emo, input_emo) if exists(input_path): break # Get path of x_c while True: sent_no = '{:05d}'.format(np.random.randint(3000)) ref_path = self.mellist[idx] ref_path = ref_path.replace(ref_path[-9:-4], sent_no) if exists(ref_path): break input_mel = pkl.load(open(input_path, 'rb'))['mel'] ref_mel = pkl.load(open(ref_path, 'rb'))['mel'] style = self.getstyle(self.txtlist[idx]) return {'txt': np.asarray(txt_feat), 'style': style, 'lin': np.asarray(lin), 'target_mel': np.asarray(mel), 'ref_mel': np.asarray(ref_mel), 'input_mel': np.asarray(input_mel), 'filename': {'target':self.mellist[idx], 'ref':ref_path, 'input':input_path} } def getstyle(self, filename): filename = basename(filename) gender = self.gen_lu['male'] age = self.age_lu['age30'] emotion = self.emo_lu[filename[:3]] return {'age': age, 'gender': gender,'emotion': emotion} def get_vocab_size(self): return self.vocab_size
{"hexsha": "084ed86d7ae7071ba4426d9ee4a95746c999711c", "size": 3212, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset.py", "max_stars_repo_name": "ktho22/vctts", "max_stars_repo_head_hexsha": "84e8bc6c4b5586aa319c7c21c4325f879f2cd3ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-08-31T09:36:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T03:15:09.000Z", "max_issues_repo_path": "dataset.py", "max_issues_repo_name": "Lukelluke/vctts", "max_issues_repo_head_hexsha": "84e8bc6c4b5586aa319c7c21c4325f879f2cd3ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataset.py", "max_forks_repo_name": "Lukelluke/vctts", "max_forks_repo_head_hexsha": "84e8bc6c4b5586aa319c7c21c4325f879f2cd3ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-09-19T11:50:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-19T14:57:43.000Z", "avg_line_length": 35.6888888889, "max_line_length": 111, "alphanum_fraction": 0.5663138232, "include": true, "reason": "import numpy", "num_tokens": 856}
(* Title: List2.thy Date: Oct 2006 Author: David Trachtenherz *) header {* Additional definitions and results for lists *} theory List2 imports "../CommonSet/SetIntervalCut" begin subsection {* Additional definitions and results for lists *} text {* Infix syntactical abbreviations for operators @{term take} and @{term drop}. The abbreviations resemble to the operator symbols used later for take and drop operators on infinite lists in ListInf. *} (* syntax (xsymbols) "_f_take" :: "'a list \<Rightarrow> nat \<Rightarrow> 'a list" (infixl "\<down>" 100) "_f_drop" :: "'a list \<Rightarrow> nat \<Rightarrow> 'a list" (infixl "\<up>" 100) translations "xs \<down> n" \<rightleftharpoons> "CONST take n xs" "xs \<up> n" \<rightleftharpoons> "CONST drop n xs" *) abbreviation (xsymbols) "f_take'" :: "'a list \<Rightarrow> nat \<Rightarrow> 'a list" (infixl "\<down>" 100) where "xs \<down> n \<equiv> take n xs" abbreviation (xsymbols) "f_drop'" :: "'a list \<Rightarrow> nat \<Rightarrow> 'a list" (infixl "\<up>" 100) where "xs \<up> n \<equiv> drop n xs" syntax (HTML output) "f_take'" :: "'a list \<Rightarrow> nat \<Rightarrow> 'a list" (infixl "\<down>" 100) "f_drop'" :: "'a list \<Rightarrow> nat \<Rightarrow> 'a list" (infixl "\<up>" 100) term "xs \<down> n" term "xs \<up> n" thm List.append_Cons lemma append_eq_Cons: "[x] @ xs = x # xs" by simp lemma length_Cons: "length (x # xs) = Suc (length xs)" by simp lemma length_snoc: "length (xs @ [x]) = Suc (length xs)" by simp subsubsection {* Additional lemmata about list emptiness *} lemma length_greater_imp_not_empty:"n < length xs \<Longrightarrow> xs \<noteq> []" by fastforce lemma length_ge_Suc_imp_not_empty:"Suc n \<le> length xs \<Longrightarrow> xs \<noteq> []" by fastforce thm length_take lemma length_take_le: "length (xs \<down> n) \<le> length xs" by simp lemma take_not_empty_conv:"(xs \<down> n \<noteq> []) = (0 < n \<and> xs \<noteq> [])" by simp lemma drop_not_empty_conv:"(xs \<up> n \<noteq> []) = (n < length xs)" by fastforce lemma zip_not_empty_conv: "(zip xs ys \<noteq> []) = (xs \<noteq> [] \<and> ys \<noteq> [])" by (simp add: zip_eq_Nil) subsubsection {* Additional lemmata about @{term take}, @{term drop}, @{term hd}, @{term last}, @{text nth} and @{text filter} *} lemma nth_tl_eq_nth_Suc: " Suc n \<le> length xs \<Longrightarrow> (tl xs) ! n = xs ! Suc n" thm hd_Cons_tl[OF length_ge_Suc_imp_not_empty, THEN subst] by (rule hd_Cons_tl[OF length_ge_Suc_imp_not_empty, THEN subst], simp+) corollary nth_tl_eq_nth_Suc2: " n < length xs \<Longrightarrow> (tl xs) ! n = xs ! Suc n" by (simp add: nth_tl_eq_nth_Suc) lemma hd_eq_first: "xs \<noteq> [] \<Longrightarrow> xs ! 0 = hd xs" by (induct xs, simp_all) corollary take_first:"xs \<noteq> [] \<Longrightarrow> xs \<down> (Suc 0) = [xs ! 0]" by (induct xs, simp_all) corollary take_hd:"xs \<noteq> [] \<Longrightarrow> xs \<down> (Suc 0) = [hd xs]" by (simp add: take_first hd_eq_first) thm last_conv_nth theorem last_nth: "xs \<noteq> [] \<Longrightarrow> last xs = xs ! (length xs - Suc 0)" by (simp add: last_conv_nth) lemma last_take: "n < length xs \<Longrightarrow> last (xs \<down> Suc n) = xs ! n" by (simp add: last_nth length_greater_imp_not_empty min_eqR) corollary last_take2:" \<lbrakk> 0 < n; n \<le> length xs \<rbrakk> \<Longrightarrow> last (xs \<down> n) = xs ! (n - Suc 0)" thm diff_Suc_less[THEN order_less_le_trans] apply (frule diff_Suc_less[THEN order_less_le_trans, of _ "length xs" 0], assumption) thm last_take[of "n - Suc 0" xs] apply (drule last_take[of "n - Suc 0" xs]) apply simp done thm List.nth_drop corollary nth_0_drop: "n \<le> length xs \<Longrightarrow> (xs \<up> n) ! 0 = xs ! n" by (cut_tac nth_drop[of n 0 xs], simp+) corollary hd_drop: "n < length xs \<Longrightarrow> hd (xs \<up> n) = xs ! n" apply (frule drop_not_empty_conv[THEN iffD2]) apply (simp add: hd_eq_first[symmetric]) done lemma drop_eq_tl: "xs \<up> (Suc 0) = tl xs" by (simp add: drop_Suc) lemma drop_take_1: " n < length xs \<Longrightarrow> xs \<up> n \<down> (Suc 0) = [xs ! n]" thm take_hd hd_drop by (simp add: take_hd hd_drop) lemma upt_append: "m \<le> n \<Longrightarrow> [0..<m] @ [m..<n] = [0..<n]" thm upt_add_eq_append[of 0 m "n - m"] by (insert upt_add_eq_append[of 0 m "n - m"], simp) thm nth_append lemma nth_append1: "n < length xs \<Longrightarrow> (xs @ ys) ! n = xs ! n" by (simp add: nth_append) lemma nth_append2: "length xs \<le> n \<Longrightarrow> (xs @ ys) ! n = ys ! (n - length xs)" by (simp add: nth_append) lemma list_all_conv: "list_all P xs = (\<forall>i<length xs. P (xs ! i))" by (rule list_all_length) lemma expand_list_eq: " \<And>ys. (xs = ys) = (length xs = length ys \<and> (\<forall>i<length xs. xs ! i = ys ! i))" by (rule list_eq_iff_nth_eq) lemmas list_eq_iff = expand_list_eq lemma list_take_drop_imp_eq: " \<lbrakk> xs \<down> n = ys \<down> n; xs \<up> n = ys \<up> n \<rbrakk> \<Longrightarrow> xs = ys" apply (rule subst[OF append_take_drop_id[of n xs]]) apply (rule subst[OF append_take_drop_id[of n ys]]) apply simp done lemma list_take_drop_eq_conv: " (xs = ys) = (\<exists>n. (xs \<down> n = ys \<down> n \<and> xs \<up> n = ys \<up> n))" by (blast intro: list_take_drop_imp_eq) lemma list_take_eq_conv: "(xs = ys) = (\<forall>n. xs \<down> n = ys \<down> n)" apply (rule iffI, simp) apply (drule_tac x="max (length xs) (length ys)" in spec) apply simp done abbreviation (xsymbols) "replicate'" :: "'a \<Rightarrow> nat \<Rightarrow> 'a list" ("_\<^bsup>_\<^esup>" [1000,65]) where "x\<^bsup>n\<^esup> \<equiv> replicate n x" term "length x\<^bsup>(a+b)\<^esup>" thm List.replicate_Suc thm List.replicate_app_Cons_same lemma replicate_snoc: "x\<^bsup>n\<^esup> @ [x] = x\<^bsup>Suc n\<^esup>" by (simp add: replicate_app_Cons_same) thm List.nth_replicate lemma eq_replicate_conv: "(\<forall>i<length xs. xs ! i = m) = (xs = m\<^bsup>length xs\<^esup>)" apply (rule iffI) apply (simp add: expand_list_eq) apply clarsimp apply (rule ssubst[of xs "replicate (length xs) m"], assumption) apply (rule nth_replicate, simp) done lemma replicate_Cons_length: "length (x # a\<^bsup>n\<^esup>) = Suc n" by simp lemma replicate_pred_Cons_length: "0 < n \<Longrightarrow> length (x # a\<^bsup>n - Suc 0\<^esup>) = n" by simp thm replicate_add lemma replicate_le_diff: "m \<le> n \<Longrightarrow> x\<^bsup>m\<^esup> @ x\<^bsup>n - m\<^esup> = x\<^bsup>n\<^esup>" by (simp add: replicate_add[symmetric]) lemma replicate_le_diff2: "\<lbrakk> k \<le> m; m \<le> n \<rbrakk> \<Longrightarrow> x\<^bsup>m - k\<^esup> @ x\<^bsup>n - m\<^esup> = x\<^bsup>n - k\<^esup>" by (subst replicate_add[symmetric], simp) thm list.induct lemma append_constant_length_induct_aux: "\<And>xs. \<lbrakk> length xs div k = n; \<And>ys. k = 0 \<or> length ys < k \<Longrightarrow> P ys; \<And>xs ys. \<lbrakk> length xs = k; P ys \<rbrakk> \<Longrightarrow> P (xs @ ys) \<rbrakk> \<Longrightarrow> P xs" apply (case_tac "k = 0", blast) apply simp apply (induct n) apply (simp add: div_eq_0_conv') apply (subgoal_tac "k \<le> length xs") prefer 2 apply (rule div_gr_imp_gr_divisor[of 0], simp) apply (simp only: atomize_all atomize_imp, clarsimp) apply (erule_tac x="drop k xs" in allE) apply (simp add: div_diff_self2) apply (erule_tac x=undefined in allE) apply (erule_tac x="take k xs" in allE) apply (simp add: min_eqR) apply (erule_tac x="drop k xs" in allE) apply simp done lemma append_constant_length_induct: " \<lbrakk> \<And>ys. k = 0 \<or> length ys < k \<Longrightarrow> P ys; \<And>xs ys. \<lbrakk> length xs = k; P ys \<rbrakk> \<Longrightarrow> P (xs @ ys) \<rbrakk> \<Longrightarrow> P xs" by (simp add: append_constant_length_induct_aux[of _ _ "length xs div k"]) lemma zip_swap: "map (\<lambda>(y,x). (x,y)) (zip ys xs) = (zip xs ys)" by (simp add: expand_list_eq) lemma zip_takeL: "(zip xs ys) \<down> n = zip (xs \<down> n) ys" by (simp add: expand_list_eq) lemma zip_takeR: "(zip xs ys) \<down> n = zip xs (ys \<down> n)" thm zip_swap[of ys] apply (subst zip_swap[of ys, symmetric]) apply (subst take_map) apply (subst zip_takeL) apply (simp add: zip_swap) done lemma zip_take: "(zip xs ys) \<down> n = zip (xs \<down> n) (ys \<down> n)" by (rule take_zip) thm nth_zip lemma hd_zip: "\<lbrakk> xs \<noteq> []; ys \<noteq> [] \<rbrakk> \<Longrightarrow> hd (zip xs ys) = (hd xs, hd ys)" by (simp add: hd_conv_nth zip_not_empty_conv) lemma map_id: "map id xs = xs" by (simp add: id_def) lemma map_id_subst: "P (map id xs) \<Longrightarrow> P xs" by (subst map_id[symmetric]) lemma map_one: "map f [x] = [f x]" by simp lemma map_last: "xs \<noteq> [] \<Longrightarrow> last (map f xs) = f (last xs)" by (rule last_map) lemma filter_list_all: "list_all P xs \<Longrightarrow> filter P xs = xs" by (induct xs, simp+) lemma filter_snoc: "filter P (xs @ [x]) = (if P x then (filter P xs) @ [x] else filter P xs)" by (case_tac "P x", simp+) lemma filter_filter_eq: "list_all (\<lambda>x. P x = Q x) xs \<Longrightarrow> filter P xs = filter Q xs" by (induct xs, simp+) lemma filter_nth: "\<And>n. n < length (filter P xs) \<Longrightarrow> (filter P xs) ! n = xs ! (LEAST k. k < length xs \<and> n < card {i. i \<le> k \<and> i < length xs \<and> P (xs ! i)})" apply (induct xs rule: rev_induct, simp) apply (rename_tac x xs n) thm filter_snoc apply (simp only: filter_snoc) apply (simp split del: split_if) apply (case_tac "xs = []") apply (simp split del: split_if) apply (rule_tac t = "\<lambda>k i. i = 0 \<and> i \<le> k \<and> P ([x] ! i)" and s = "\<lambda>k i. i = 0 \<and> P x" in subst) apply (simp add: fun_eq_iff) apply fastforce apply (fastforce simp: Least_def) apply (rule_tac t = "\<lambda>k. card {i. i \<le> k \<and> i < Suc (length xs) \<and> P ((xs @ [x]) ! i)}" and s = "\<lambda>k. (card {i. i \<le> k \<and> i < length xs \<and> P (xs ! i)} + (if k \<ge> length xs \<and> P x then Suc 0 else 0))" in subst) apply (clarsimp simp: fun_eq_iff split del: split_if, rename_tac k) apply (simp split del: split_if add: less_Suc_eq conj_disj_distribL conj_disj_distribR Collect_disj_eq) apply (subst card_Un_disjoint) apply (rule_tac n="length xs" in bounded_nat_set_is_finite, blast) apply (rule_tac n="Suc (length xs)" in bounded_nat_set_is_finite, blast) apply blast apply (rule_tac t = "\<lambda>i. i < length xs \<and> P ((xs @ [x]) ! i)" and s = "\<lambda>i. i < length xs \<and> P (xs ! i)" in subst) apply (rule fun_eq_iff[THEN iffD2]) apply (fastforce simp: nth_append1) apply (rule nat_add_left_cancel[THEN iffD2]) apply (rule_tac t = "\<lambda>i. i = length xs \<and> i \<le> k \<and> P ((xs @ [x]) ! i)" and s = "\<lambda>i. i = length xs \<and> i \<le> k \<and> P x" in subst) apply (rule fun_eq_iff[THEN iffD2]) apply fastforce apply (case_tac "length xs \<le> k") apply clarsimp apply (rule_tac t = "\<lambda>i. i = length xs \<and> i \<le> k" and s = "\<lambda>i. i = length xs" in subst) apply (rule fun_eq_iff[THEN iffD2]) apply fastforce apply simp apply simp apply (simp split del: split_if add: less_Suc_eq conj_disj_distribL conj_disj_distribR) apply (rule_tac t = "\<lambda>k. k < length xs \<and> n < card {i. i \<le> k \<and> i < length xs \<and> P (xs ! i)} + (if length xs \<le> k \<and> P x then Suc 0 else 0)" and s = "\<lambda>k. k < length xs \<and> n < card {i. i \<le> k \<and> i < length xs \<and> P (xs ! i)}" in subst) apply (simp add: fun_eq_iff) apply (rule_tac t = "\<lambda>k. k = length xs \<and> n < card {i. i \<le> k \<and> i < length xs \<and> P (xs ! i)} + (if length xs \<le> k \<and> P x then Suc 0 else 0)" and s = "\<lambda>k. k = length xs \<and> n < card {i. i \<le> k \<and> i < length xs \<and> P (xs ! i)} + (if P x then Suc 0 else 0)" in subst) apply (simp add: fun_eq_iff) apply (case_tac "n < length (filter P xs)") apply (rule_tac t = "(if P x then filter P xs @ [x] else filter P xs) ! n" and s = "(filter P xs) ! n" in subst) apply (simp add: nth_append1) apply (simp split del: split_if) apply (subgoal_tac "\<exists>k<length xs. n < card {i. i \<le> k \<and> i < length xs \<and> P (xs ! i)}") prefer 2 apply (rule_tac x="length xs - Suc 0" in exI) apply (simp add: length_filter_conv_card less_eq_le_pred[symmetric]) apply (subgoal_tac "\<exists>k\<le>length xs. n < card {i. i \<le> k \<and> i < length xs \<and> P (xs ! i)}") prefer 2 apply (blast intro: less_imp_le) thm Least_le_imp_le_disj apply (subst Least_le_imp_le_disj) apply simp apply simp thm nth_append1 apply (rule sym, rule nth_append1) apply (rule LeastI2_ex, assumption) apply blast apply (simp add: linorder_not_less) apply (subgoal_tac "P x") prefer 2 apply (rule ccontr, simp) apply (simp add: length_snoc) apply (drule less_Suc_eq_le[THEN iffD1], drule_tac x=n in order_antisym, assumption) apply (simp add: nth_append2) thm length_filter_conv_card apply (simp add: length_filter_conv_card) apply (rule_tac t = "\<lambda>k. card {i. i < length xs \<and> P (xs ! i)} < card {i. i \<le> k \<and> i < length xs \<and> P (xs ! i)}" and s = "\<lambda>k. False" in subst) apply (rule fun_eq_iff[THEN iffD2], rule allI, rename_tac k) apply (simp add: linorder_not_less) apply (rule card_mono) apply fastforce apply blast apply simp apply (rule_tac t = "(LEAST k. k = length xs \<and> card {i. i < length xs \<and> P (xs ! i)} < Suc (card {i. i \<le> k \<and> i < length xs \<and> P (xs ! i)}))" and s = "length xs" in subst) apply (rule sym, rule Least_equality) apply simp apply (rule le_imp_less_Suc) apply (rule card_mono) apply fastforce apply fastforce apply simp apply simp done subsubsection {* Ordered lists *} fun list_ord :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> ('a::ord) list \<Rightarrow> bool" where "list_ord ord (x1 # x2 # xs) = (ord x1 x2 \<and> list_ord ord (x2 # xs))" | "list_ord ord xs = True" thm list_ord.simps definition list_asc :: "('a::ord) list \<Rightarrow> bool" where "list_asc xs \<equiv> list_ord (op \<le>) xs" definition list_strict_asc :: "('a::ord) list \<Rightarrow> bool" where "list_strict_asc xs \<equiv> list_ord (op <) xs" value "list_asc [1::nat, 2, 2]" value "list_strict_asc [1::nat, 2, 2]" definition list_desc :: "('a::ord) list \<Rightarrow> bool" where "list_desc xs \<equiv> list_ord (op \<ge>) xs" definition list_strict_desc :: "('a::ord) list \<Rightarrow> bool" where "list_strict_desc xs \<equiv> list_ord (op >) xs" lemma list_ord_Nil: "list_ord ord []" by simp lemma list_ord_one: "list_ord ord [x]" by simp lemma list_ord_Cons: " list_ord ord (x # xs) = (xs = [] \<or> (ord x (hd xs) \<and> list_ord ord xs))" by (induct xs, simp+) lemma list_ord_Cons_imp: "\<lbrakk> list_ord ord xs; ord x (hd xs) \<rbrakk> \<Longrightarrow> list_ord ord (x # xs)" by (induct xs, simp+) lemma list_ord_append: "\<And>ys. list_ord ord (xs @ ys) = (list_ord ord xs \<and> (ys = [] \<or> (list_ord ord ys \<and> (xs = [] \<or> ord (last xs) (hd ys)))))" apply (induct xs, fastforce) apply (case_tac xs, case_tac ys, fastforce+) done lemma list_ord_snoc: " list_ord ord (xs @ [x]) = (xs = [] \<or> (ord (last xs) x \<and> list_ord ord xs))" by (fastforce simp: list_ord_append) lemma list_ord_all_conv: " (list_ord ord xs) = (\<forall>n < length xs - 1. ord (xs ! n) (xs ! Suc n))" apply (rule iffI) apply (induct xs, simp) apply clarsimp apply (simp add: list_ord_Cons) apply (erule disjE, simp) apply clarsimp apply (case_tac n) apply (simp add: hd_conv_nth) apply simp apply (induct xs, simp) apply (simp add: list_ord_Cons) apply (case_tac "xs = []", simp) apply (drule meta_mp) apply (intro allI impI, rename_tac n) apply (drule_tac x="Suc n" in spec, simp) apply (drule_tac x=0 in spec) apply (simp add: hd_conv_nth) done lemma list_ord_imp: " \<lbrakk> \<And>x y. ord x y \<Longrightarrow> ord' x y; list_ord ord xs \<rbrakk> \<Longrightarrow> list_ord ord' xs" apply (induct xs, simp) apply (simp add: list_ord_Cons) apply fastforce done corollary list_strict_asc_imp_list_asc: " list_strict_asc (xs::'a::preorder list) \<Longrightarrow> list_asc xs" by (unfold list_strict_asc_def list_asc_def, rule list_ord_imp[of "op <"], rule order_less_imp_le) corollary list_strict_desc_imp_list_desc: " list_strict_desc (xs::'a::preorder list) \<Longrightarrow> list_desc xs" by (unfold list_strict_desc_def list_desc_def, rule list_ord_imp[of "op >"], rule order_less_imp_le) lemma list_ord_trans_imp: "\<And>i. \<lbrakk> transP ord; list_ord ord xs; j < length xs; i < j \<rbrakk> \<Longrightarrow> ord (xs ! i) (xs ! j)" apply (simp add: list_ord_all_conv) apply (induct j, simp) apply (case_tac "j < i", simp) apply (simp add: linorder_not_less) apply (case_tac "i = j", simp) thm trans_def apply (drule_tac x=i in meta_spec, simp) apply (drule_tac x=j in spec, simp add: Suc_less_pred_conv) apply (unfold trans_def) apply (drule_tac x="xs ! i" in spec, drule_tac x="xs ! j" in spec, drule_tac x="xs ! Suc j" in spec) apply simp done lemma list_ord_trans: " transP ord \<Longrightarrow> (list_ord ord xs) = (\<forall>j < length xs. \<forall>i < j. ord (xs ! i) (xs ! j))" apply (rule iffI) apply (simp add: list_ord_trans_imp) apply (simp add: list_ord_all_conv) done lemma list_ord_trans_refl_le: " \<lbrakk> transP ord; reflP ord \<rbrakk> \<Longrightarrow> (list_ord ord xs) = (\<forall>j < length xs. \<forall>i \<le> j. ord (xs ! i) (xs ! j))" apply (subst list_ord_trans, simp) apply (rule iffI) apply clarsimp apply (case_tac "i = j") apply (simp add: refl_on_def) apply simp+ done lemma list_ord_trans_refl_le_imp: " \<lbrakk> transP ord; \<And>x y. ord x y \<Longrightarrow> ord' x y; reflP ord'; list_ord ord xs \<rbrakk> \<Longrightarrow> (\<forall>j < length xs. \<forall>i \<le> j. ord' (xs ! i) (xs ! j))" apply clarify apply (case_tac "i = j") apply (simp add: refl_on_def) thm list_ord_trans_imp apply (simp add: list_ord_trans_imp) done corollary list_asc_trans: " (list_asc (xs::'a::preorder list)) = (\<forall>j < length xs. \<forall>i < j. xs ! i \<le> xs ! j)" and list_strict_asc_trans: " (list_strict_asc (xs::'a::preorder list)) = (\<forall>j < length xs. \<forall>i < j. xs ! i < xs ! j)" and list_desc_trans: " (list_desc (xs::'a::preorder list)) = (\<forall>j < length xs. \<forall>i < j. xs ! j \<le> xs ! i)" and list_strict_desc_trans: " (list_strict_desc (xs::'a::preorder list)) = (\<forall>j < length xs. \<forall>i < j. xs ! j < xs ! i)" apply (unfold list_asc_def list_strict_asc_def list_desc_def list_strict_desc_def) apply (rule list_ord_trans, unfold trans_def, blast intro: order_trans order_less_trans)+ done corollary list_asc_trans_le: " (list_asc (xs::'a::preorder list)) = (\<forall>j < length xs. \<forall>i \<le> j. xs ! i \<le> xs ! j)" and list_desc_trans_le: " (list_desc (xs::'a::preorder list)) = (\<forall>j < length xs. \<forall>i \<le> j. xs ! j \<le> xs ! i)" apply (unfold list_asc_def list_strict_asc_def list_desc_def list_strict_desc_def) apply (rule list_ord_trans_refl_le, unfold trans_def, blast intro: order_trans, simp add: refl_on_def)+ done corollary list_strict_asc_trans_le: " (list_strict_asc (xs::'a::preorder list)) \<Longrightarrow> (\<forall>j < length xs. \<forall>i \<le> j. xs ! i \<le> xs ! j)" apply (unfold list_strict_asc_def) thm list_ord_trans_refl_le_imp apply (rule list_ord_trans_refl_le_imp[where ord="op \<le>"]) apply (unfold trans_def, blast intro: order_trans) apply assumption apply (unfold refl_on_def, clarsimp) thm list_ord_imp apply (rule list_ord_imp[where ord="op <"], simp_all add: less_imp_le) done lemma list_ord_le_sorted_eq: "list_asc xs = sorted xs" apply (rule sym) apply (simp add: list_asc_def) apply (induct xs, simp) apply (rename_tac x xs) apply (simp add: list_ord_Cons sorted_Cons) apply (case_tac "xs = []", simp_all) apply (case_tac "list_ord op \<le> xs", simp_all) apply (rule iffI) apply (drule_tac x="hd xs" in bspec, simp_all) apply clarify apply (drule in_set_conv_nth[THEN iffD1], clarsimp, rename_tac i1) apply (simp add: hd_conv_nth) apply (case_tac i1, simp) apply (rename_tac i2) apply simp apply (fold list_asc_def) thm list_asc_trans apply (fastforce simp: list_asc_trans) done corollary list_asc_upto: "list_asc [m..n]" by (simp add: list_ord_le_sorted_eq) lemma list_strict_asc_upt: "list_strict_asc [m..<n]" by (simp add: list_strict_asc_def list_ord_all_conv) thm list_strict_asc_imp_list_asc[OF list_strict_asc_upt] lemma list_ord_distinct_aux: " \<lbrakk> irrefl {(a, b). ord a b}; transP ord; list_ord ord xs; i < length xs; j < length xs; i < j \<rbrakk> \<Longrightarrow> xs ! i \<noteq> xs ! j" apply (subgoal_tac "\<And>x y. ord x y \<Longrightarrow> x \<noteq> y") prefer 2 apply (rule ccontr) apply (simp add: irrefl_def) thm list_ord_trans apply (simp add: list_ord_trans) done lemma list_ord_distinct: " \<lbrakk> irrefl {(a,b). ord a b}; transP ord; list_ord ord xs \<rbrakk> \<Longrightarrow> distinct xs" thm distinct_conv_nth apply (simp add: distinct_conv_nth, intro allI impI, rename_tac i j) apply (drule neq_iff[THEN iffD1], erule disjE) thm list_ord_distinct_aux apply (simp add: list_ord_distinct_aux) thm list_ord_distinct_aux[THEN not_sym] apply (simp add: list_ord_distinct_aux[THEN not_sym]) done lemma list_strict_asc_distinct: "list_strict_asc (xs::'a::preorder list) \<Longrightarrow> distinct xs" apply (rule_tac ord="op <" in list_ord_distinct) apply (unfold irrefl_def list_strict_asc_def trans_def) apply (blast intro: less_trans)+ done lemma list_strict_desc_distinct: "list_strict_desc (xs::'a::preorder list) \<Longrightarrow> distinct xs" apply (rule_tac ord="op >" in list_ord_distinct) apply (unfold irrefl_def list_strict_desc_def trans_def) apply (blast intro: less_trans)+ done subsubsection {* Additional definitions and results for sublists *} primrec sublist_list :: "'a list \<Rightarrow> nat list \<Rightarrow> 'a list" where "sublist_list xs [] = []" | "sublist_list xs (y # ys) = (xs ! y) # (sublist_list xs ys)" value "sublist_list [0::int,10::int,20,30,40,50] [1::nat,2,3]" value "sublist_list [0::int,10::int,20,30,40,50] [1::nat,1,2,3]" value "sublist_list [0::int,10::int,20,30,40,50] [1::nat,1,2,3,10]" thm sublist_def term "map fst (filter (\<lambda>p. snd p \<in> A) (zip xs [0..<length xs]))" term "map fst ([p\<leftarrow>(zip xs [0..<length xs]). (snd p \<in> A)])" lemma sublist_list_length: "length (sublist_list xs ys) = length ys" by (induct ys, simp_all) lemma sublist_list_append: " \<And>zs. sublist_list xs (ys @ zs) = sublist_list xs ys @ sublist_list xs zs" by (induct ys, simp_all) lemma sublist_list_Nil: "sublist_list xs [] =[]" by simp lemma sublist_list_is_Nil_conv: " (sublist_list xs ys = []) = (ys = [])" apply (rule iffI) apply (rule ccontr) apply (clarsimp simp: neq_Nil_conv) apply simp done lemma sublist_list_nth: " \<And>n. n < length ys \<Longrightarrow> sublist_list xs ys ! n = xs ! (ys ! n)" apply (induct ys, simp) apply (case_tac n, simp_all) done lemma take_drop_eq_sublist_list: " m + n \<le> length xs \<Longrightarrow> xs \<up> m \<down> n = sublist_list xs [m..<m+n]" apply (insert length_upt[of m "m+n"]) apply (simp add: expand_list_eq) apply (simp add: sublist_list_length) apply (frule add_le_imp_le_diff2) apply (simp add: min_eqR) apply (clarsimp, rename_tac i) thm sublist_list_nth apply (simp add: sublist_list_nth) done primrec sublist_list_if :: "'a list \<Rightarrow> nat list \<Rightarrow> 'a list" where "sublist_list_if xs [] = []" | "sublist_list_if xs (y # ys) = (if y < length xs then (xs ! y) # (sublist_list_if xs ys) else (sublist_list_if xs ys))" value "sublist_list_if [0::int,10::int,20,30,40,50] [1::nat,2,3]" value "sublist_list_if [0::int,10::int,20,30,40,50] [1::nat,1,2,3]" value "sublist_list_if [0::int,10::int,20,30,40,50] [1::nat,1,2,3,10]" lemma sublist_list_if_sublist_list_filter_conv: "\<And>xs. sublist_list_if xs ys = sublist_list xs (filter (\<lambda>i. i < length xs) ys)" by (induct ys, simp+) corollary sublist_list_if_sublist_list_eq: "\<And>xs. list_all (\<lambda>i. i < length xs) ys \<Longrightarrow> sublist_list_if xs ys = sublist_list xs ys" by (simp add: sublist_list_if_sublist_list_filter_conv filter_list_all) corollary sublist_list_if_sublist_list_eq2: "\<And>xs. \<forall>n<length ys. ys ! n < length xs \<Longrightarrow> sublist_list_if xs ys = sublist_list xs ys" thm list_all_conv[THEN iffD2] by (rule sublist_list_if_sublist_list_eq, rule list_all_conv[THEN iffD2]) lemma sublist_list_if_Nil_left: "sublist_list_if [] ys = []" by (induct ys, simp+) lemma sublist_list_if_Nil_right: "sublist_list_if xs [] = []" by simp lemma sublist_list_if_length: " length (sublist_list_if xs ys) = length (filter (\<lambda>i. i < length xs) ys)" by (simp add: sublist_list_if_sublist_list_filter_conv sublist_list_length) lemma sublist_list_if_append: " sublist_list_if xs (ys @ zs) = sublist_list_if xs ys @ sublist_list_if xs zs" by (simp add: sublist_list_if_sublist_list_filter_conv sublist_list_append) lemma sublist_list_if_snoc: " sublist_list_if xs (ys @ [y]) = sublist_list_if xs ys @ (if y < length xs then [xs ! y] else [])" by (simp add: sublist_list_if_append) lemma sublist_list_if_is_Nil_conv: " (sublist_list_if xs ys = []) = (list_all (\<lambda>i. length xs \<le> i) ys)" by (simp add: sublist_list_if_sublist_list_filter_conv sublist_list_is_Nil_conv filter_empty_conv list_all_iff linorder_not_less) lemma sublist_list_if_nth: " n < length ((filter (\<lambda>i. i < length xs) ys)) \<Longrightarrow> sublist_list_if xs ys ! n = xs ! ((filter (\<lambda>i. i < length xs) ys) ! n)" by (simp add: sublist_list_if_sublist_list_filter_conv sublist_list_nth) lemma take_drop_eq_sublist_list_if: " m + n \<le> length xs \<Longrightarrow> xs \<up> m \<down> n = sublist_list_if xs [m..<m+n]" thm take_drop_eq_sublist_list by (simp add: sublist_list_if_sublist_list_filter_conv take_drop_eq_sublist_list) lemma sublist_empty_conv: "(sublist xs I = []) = (\<forall>i\<in>I. length xs \<le> i)" using [[hypsubst_thin = true]] by (fastforce simp: set_empty[symmetric] set_sublist linorder_not_le[symmetric]) thm sublist_singleton lemma sublist_singleton2: "sublist xs {y} = (if y < length xs then [xs ! y] else [])" apply (unfold sublist_def) apply (induct xs rule: rev_induct, simp) apply (simp add: nth_append) done lemma sublist_take_eq: " \<lbrakk> finite I; Max I < n \<rbrakk> \<Longrightarrow> sublist (xs \<down> n) I = sublist xs I" apply (case_tac "I = {}", simp) apply (case_tac "n < length xs") prefer 2 apply simp thm append_take_drop_id apply (rule_tac t = "sublist xs I" and s = "sublist (xs \<down> n @ xs \<up> n) I" in subst) apply simp apply (subst sublist_append) apply (simp add: min_eqR) apply (rule_tac t="{j. j + n \<in> I}" and s="{}" in subst) apply blast apply simp done lemma sublist_drop_eq: " n \<le> iMin I \<Longrightarrow> sublist (xs \<up> n) {j. j + n \<in> I} = sublist xs I" apply (case_tac "I = {}", simp) apply (case_tac "n < length xs") prefer 2 apply (simp add: sublist_def filter_empty_conv linorder_not_less) apply (clarsimp, rename_tac a b) thm set_zip_rightD apply (drule set_zip_rightD) apply fastforce apply (rule_tac t = "sublist xs I" and s = "sublist (xs \<down> n @ xs \<up> n) I" in subst) apply simp apply (subst sublist_append) apply (fastforce simp: sublist_empty_conv min_eqR) done lemma sublist_cut_less_eq: " length xs \<le> n \<Longrightarrow> sublist xs (I \<down>< n) = sublist xs I" apply (simp add: sublist_def cut_less_mem_iff) apply (rule_tac f="\<lambda>xs. map fst xs" in arg_cong) thm filter_filter_eq apply (rule filter_filter_eq) apply (simp add: list_all_conv) done lemma sublist_disjoint_Un: " \<lbrakk> finite A; Max A < iMin B \<rbrakk> \<Longrightarrow> sublist xs (A \<union> B) = sublist xs A @ sublist xs B" apply (case_tac "A = {}", simp) apply (case_tac "B = {}", simp) apply (case_tac "length xs \<le> iMin B") thm sublist_cut_less_eq apply (subst sublist_cut_less_eq[of xs "iMin B", symmetric], assumption) apply (simp (no_asm_simp) add: cut_less_Un cut_less_Min_empty cut_less_Max_all) apply (simp add: sublist_empty_conv iMin_ge_iff) apply (simp add: linorder_not_le) thm sublist_append apply (rule_tac t = "sublist xs (A \<union> B)" and s = "sublist (xs \<down> (iMin B) @ xs \<up> (iMin B)) (A \<union> B)" in subst) apply simp apply (subst sublist_append) apply (simp add: min_eqR) thm sublist_cut_less_eq apply (subst sublist_cut_less_eq[where xs="xs \<down> iMin B" and n="iMin B", symmetric], simp) apply (simp add: cut_less_Un cut_less_Min_empty cut_less_Max_all) thm sublist_take_eq apply (simp add: sublist_take_eq) apply (rule_tac t = "\<lambda>j. j + iMin B \<in> A \<or> j + iMin B \<in> B" and s = "\<lambda>j. j + iMin B \<in> B" in subst) apply (force simp: fun_eq_iff) thm sublist_drop_eq apply (simp add: sublist_drop_eq) done corollary sublist_disjoint_insert_left: " \<lbrakk> finite I; x < iMin I \<rbrakk> \<Longrightarrow> sublist xs (insert x I) = sublist xs {x} @ sublist xs I" apply (rule_tac t="insert x I" and s="{x} \<union> I" in subst, simp) apply (subst sublist_disjoint_Un) apply simp_all done corollary sublist_disjoint_insert_right: " \<lbrakk> finite I; Max I < x \<rbrakk> \<Longrightarrow> sublist xs (insert x I) = sublist xs I @ sublist xs {x}" apply (rule_tac t="insert x I" and s="I \<union> {x}" in subst, simp) apply (subst sublist_disjoint_Un) apply simp_all done lemma sublist_all: "{..<length xs} \<subseteq> I \<Longrightarrow> sublist xs I = xs" apply (case_tac "xs = []", simp) apply (rule_tac t = "I" and s = "I \<down>< (length xs) \<union> I \<down>\<ge> (length xs)" in subst) apply (simp add: cut_less_cut_ge_ident) apply (rule_tac t = "I \<down>< length xs" and s = "{..<length xs}" in subst) apply blast apply (case_tac "I \<down>\<ge> (length xs) = {}", simp) apply (subst sublist_disjoint_Un[OF finite_lessThan]) apply (rule less_imp_Max_less_iMin[OF finite_lessThan]) apply blast apply blast apply (blast intro: less_le_trans) apply (fastforce simp: sublist_empty_conv) done corollary sublist_UNIV: "sublist xs UNIV = xs" by (rule sublist_all[OF subset_UNIV]) lemma sublist_list_sublist_eq: "\<And>xs. list_strict_asc ys \<Longrightarrow> sublist_list_if xs ys = sublist xs (set ys)" apply (case_tac "xs = []") apply (simp add: sublist_list_if_Nil_left) apply (induct ys rule: rev_induct, simp) apply (rename_tac y ys xs) apply (case_tac "ys = []") apply (simp add: sublist_singleton2) apply (unfold list_strict_asc_def) apply (simp add: sublist_list_if_snoc split del: split_if) thm list_ord_append apply (frule list_ord_append[THEN iffD1]) apply (clarsimp split del: split_if) apply (subst sublist_disjoint_insert_right) apply simp apply (clarsimp simp: in_set_conv_nth, rename_tac i) thm list_strict_asc_trans[unfolded list_strict_asc_def, THEN iffD1, rule_format] apply (drule_tac i=i and j="length ys" in list_strict_asc_trans[unfolded list_strict_asc_def, THEN iffD1, rule_format]) apply (simp add: nth_append split del: split_if)+ apply (simp add: sublist_singleton2) done lemma set_sublist_list_if: "\<And>xs. set (sublist_list_if xs ys) = {xs ! i |i. i < length xs \<and> i \<in> set ys}" apply (induct ys, simp_all) apply blast done lemma set_sublist_list: " list_all (\<lambda>i. i < length xs) ys \<Longrightarrow> set (sublist_list xs ys) = {xs ! i |i. i < length xs \<and> i \<in> set ys}" by (simp add: sublist_list_if_sublist_list_eq[symmetric] set_sublist_list_if) subsubsection {* Natural set images with lists *} definition f_image :: "'a list \<Rightarrow> nat set \<Rightarrow> 'a set" (infixr "`\<^sup>f" 90) where "xs `\<^sup>f A \<equiv> {y. \<exists>n\<in>A. n < length xs \<and> y = xs ! n }" abbreviation f_range :: "'a list \<Rightarrow> 'a set" where "f_range xs \<equiv> f_image xs UNIV" thm Set.image_eqI lemma f_image_eqI[simp, intro]: " \<lbrakk> x = xs ! n; n \<in> A; n < length xs \<rbrakk> \<Longrightarrow> x \<in> xs `\<^sup>f A" by (unfold f_image_def, blast) thm Set.imageI lemma f_imageI: "\<lbrakk> n \<in> A; n < length xs \<rbrakk> \<Longrightarrow> xs ! n \<in> xs `\<^sup>f A" by blast thm Set.rev_image_eqI lemma rev_f_imageI: "\<lbrakk> n \<in> A; n < length xs; x = xs ! n \<rbrakk> \<Longrightarrow> x \<in> xs `\<^sup>f A" by (rule f_image_eqI) thm Set.imageE lemma f_imageE[elim!]: " \<lbrakk> x \<in> xs `\<^sup>f A; \<And>n. \<lbrakk> x = xs ! n; n \<in> A; n < length xs \<rbrakk> \<Longrightarrow> P \<rbrakk> \<Longrightarrow> P" by (unfold f_image_def, blast) thm Set.image_Un lemma f_image_Un: "xs `\<^sup>f (A \<union> B) = xs `\<^sup>f A \<union> xs `\<^sup>f B" by blast thm Set.image_mono lemma f_image_mono: "A \<subseteq> B ==> xs `\<^sup>f A \<subseteq> xs `\<^sup>f B" by blast thm Set.image_iff lemma f_image_iff: "(x \<in> xs `\<^sup>f A) = (\<exists>n\<in>A. n < length xs \<and> x = xs ! n)" by blast thm Set.image_subset_iff lemma f_image_subset_iff: " (xs `\<^sup>f A \<subseteq> B) = (\<forall>n\<in>A. n < length xs \<longrightarrow> xs ! n \<in> B)" by blast thm Set.subset_image_iff lemma subset_f_image_iff: "(B \<subseteq> xs `\<^sup>f A) = (\<exists>A'\<subseteq>A. B = xs `\<^sup>f A')" apply (rule iffI) apply (rule_tac x="{ n. n \<in> A \<and> n < length xs \<and> xs ! n \<in> B }" in exI) apply blast apply (blast intro: f_image_mono) done thm image_subsetI lemma f_image_subsetI: " \<lbrakk> \<And>n. n \<in> A \<and> n < length xs \<Longrightarrow> xs ! n \<in> B \<rbrakk> \<Longrightarrow> xs `\<^sup>f A \<subseteq> B" by blast thm Set.image_empty lemma f_image_empty: "xs `\<^sup>f {} = {}" by blast thm Set.image_insert lemma f_image_insert_if: " xs `\<^sup>f (insert n A) = ( if n < length xs then insert (xs ! n) (xs `\<^sup>f A) else (xs `\<^sup>f A))" by (split split_if, blast) lemma f_image_insert_eq1: " n < length xs \<Longrightarrow> xs `\<^sup>f (insert n A) = insert (xs ! n) (xs `\<^sup>f A)" by (simp add: f_image_insert_if) lemma f_image_insert_eq2: " length xs \<le> n \<Longrightarrow> xs `\<^sup>f (insert n A) = (xs `\<^sup>f A)" by (simp add: f_image_insert_if) thm Set.insert_image lemma insert_f_image: " \<lbrakk> n \<in> A; n < length xs \<rbrakk> \<Longrightarrow> insert (xs ! n) (xs `\<^sup>f A) = (xs `\<^sup>f A)" by blast thm Set.image_is_empty lemma f_image_is_empty: "(xs `\<^sup>f A = {}) = ({x. x \<in> A \<and> x < length xs} = {})" by blast thm Set.image_Collect lemma f_image_Collect: "xs `\<^sup>f {n. P n} = {xs ! n |n. P n \<and> n < length xs}" by blast lemma f_image_eq_set: "\<forall>n<length xs. n \<in> A \<Longrightarrow> xs `\<^sup>f A = set xs" by (fastforce simp: in_set_conv_nth) lemma f_range_eq_set: "f_range xs = set xs" by (simp add: f_image_eq_set) lemma f_image_eq_set_sublist: "xs `\<^sup>f A = set (sublist xs A)" by (unfold set_sublist, blast) lemma f_image_eq_set_sublist_list_if: "xs `\<^sup>f (set ys) = set (sublist_list_if xs ys)" by (simp add: set_sublist_list_if_eq_set_sublist f_image_eq_set_sublist) lemma f_image_eq_set_sublist_list: " list_all (\<lambda>i. i < length xs) ys \<Longrightarrow> xs `\<^sup>f (set ys) = set (sublist_list xs ys)" by (simp add: sublist_list_if_sublist_list_eq f_image_eq_set_sublist_list_if) thm Set.range_eqI lemma f_range_eqI: "\<lbrakk> x = xs ! n; n < length xs \<rbrakk> \<Longrightarrow> x \<in> f_range xs" by blast thm Set.rangeI lemma f_rangeI: "n < length xs \<Longrightarrow> xs ! n \<in> f_range xs" by blast thm Set.rangeE lemma f_rangeE[elim?]: " \<lbrakk> x \<in> f_range xs; \<And>n. \<lbrakk> n < length xs; x = xs ! n \<rbrakk> \<Longrightarrow> P \<rbrakk> \<Longrightarrow> P" by blast subsubsection {* Mapping lists of functions to lists *} primrec map_list :: "('a \<Rightarrow> 'b) list \<Rightarrow> 'a list \<Rightarrow> 'b list" where "map_list [] xs = []" | "map_list (f # fs) xs = f (hd xs) # map_list fs (tl xs)" lemma map_list_Nil: "map_list [] xs = []" by simp lemma map_list_Cons_Cons: " map_list (f # fs) (x # xs) = (f x) # map_list fs xs" by simp lemma map_list_length: "\<And>xs. length (map_list fs xs) = length fs" by (induct fs, simp+) corollary map_list_empty_conv: " (map_list fs xs = []) = (fs = [])" by (simp del: length_0_conv add: length_0_conv[symmetric] map_list_length) corollary map_list_not_empty_conv: " (map_list fs xs \<noteq> []) = (fs \<noteq> [])" by (simp add: map_list_empty_conv) lemma map_list_nth: "\<And>n xs. \<lbrakk> n < length fs; n < length xs \<rbrakk> \<Longrightarrow> (map_list fs xs ! n) = (fs ! n) (xs ! n)" apply (induct fs, simp+) apply (case_tac n) apply (simp add: hd_conv_nth) apply (simp add: nth_tl_eq_nth_Suc2) done lemma map_list_xs_take: "\<And>n xs. length fs \<le> n \<Longrightarrow> map_list fs (xs \<down> n) = map_list fs xs" apply (induct fs, simp+) apply (rename_tac f fs n xs) apply (simp add: tl_take) thm arg_cong apply (rule_tac f=f in arg_cong) apply (case_tac "xs = []", simp) apply (simp add: hd_conv_nth) done lemma map_list_take: "\<And>n xs. (map_list fs xs) \<down> n = (map_list (fs \<down> n) xs)" apply (induct fs, simp) apply (case_tac n, simp+) done lemma map_list_take_take: "\<And>n xs. (map_list fs xs) \<down> n = (map_list (fs \<down> n) (xs \<down> n))" by (simp add: map_list_take map_list_xs_take) lemma map_list_drop: "\<And>n xs. (map_list fs xs) \<up> n = (map_list (fs \<up> n) (xs \<up> n))" apply (induct fs, simp) apply (case_tac n) apply (simp add: drop_Suc)+ done lemma map_list_append_append: "\<And>xs1 . length fs1 = length xs1 \<Longrightarrow> map_list (fs1 @ fs2) (xs1 @ xs2) = map_list fs1 xs1 @ map_list fs2 xs2" apply (induct fs1, simp+) apply (case_tac "xs1", simp+) done lemma map_list_snoc_snoc: " length fs = length xs \<Longrightarrow> map_list (fs @ [f]) (xs @ [x]) = map_list fs xs @ [f x]" by (simp add: map_list_append_append) lemma map_list_snoc: "\<And>xs. length fs < length xs \<Longrightarrow> map_list (fs @ [f]) xs = map_list fs xs @ [f (xs ! (length fs))]" apply (induct fs) apply (simp add: hd_conv_nth) apply (simp add: nth_tl_eq_nth_Suc2) done lemma map_list_Cons_if: " map_list fs (x # xs) = (if (fs = []) then [] else ( ((hd fs) x) # map_list (tl fs) xs))" by (case_tac "fs", simp+) lemma map_list_Cons_not_empty: " fs \<noteq> [] \<Longrightarrow> map_list fs (x # xs) = ((hd fs) x) # map_list (tl fs) xs" by (simp add: map_list_Cons_if) lemma map_eq_map_list_take: "\<And>xs. \<lbrakk> length fs \<le> length xs; list_all (\<lambda>x. x = f) fs \<rbrakk> \<Longrightarrow> map_list fs xs = map f (xs \<down> length fs)" apply (induct fs, simp+) apply (case_tac xs, simp+) done lemma map_eq_map_list_take2: " \<lbrakk> length fs = length xs; list_all (\<lambda>x. x = f) fs \<rbrakk> \<Longrightarrow> map_list fs xs = map f xs" by (simp add: map_eq_map_list_take) lemma map_eq_map_list_replicate: " map_list (f\<^bsup>length xs\<^esup>) xs = map f xs" by (induct xs, simp+) subsubsection {* Mapping functions with two arguments to lists *} primrec map2 :: " (* Function taking two parameters *) ('a \<Rightarrow> 'b \<Rightarrow> 'c) \<Rightarrow> (* Lists of parameters *) 'a list \<Rightarrow> 'b list \<Rightarrow> 'c list" where "map2 f [] ys = []" | "map2 f (x # xs) ys = f x (hd ys) # map2 f xs (tl ys)" lemma map2_map_list_conv: "\<And>ys. map2 f xs ys = map_list (map f xs) ys" by (induct xs, simp+) lemma map2_Nil: "map2 f [] ys = []" by simp lemma map2_Cons_Cons: " map2 f (x # xs) (y # ys) = (f x y) # map2 f xs ys" by simp lemma map2_length: "\<And>ys. length (map2 f xs ys) = length xs" by (induct xs, simp+) corollary map2_empty_conv: " (map2 f xs ys = []) = (xs = [])" by (simp del: length_0_conv add: length_0_conv[symmetric] map2_length) corollary map2_not_empty_conv: " (map2 f xs ys \<noteq> []) = (xs \<noteq> [])" by (simp add: map2_empty_conv) lemma map2_nth: "\<And>n ys. \<lbrakk> n < length xs; n < length ys \<rbrakk> \<Longrightarrow> (map2 f xs ys ! n) = f (xs ! n) (ys ! n)" thm map_list_nth by (simp add: map2_map_list_conv map_list_nth) lemma map2_ys_take: "\<And>n ys. length xs \<le> n \<Longrightarrow> map2 f xs (ys \<down> n) = map2 f xs ys" thm map_list_xs_take by (simp add: map2_map_list_conv map_list_xs_take) lemma map2_take: "\<And>n ys. (map2 f xs ys) \<down> n = (map2 f (xs \<down> n) ys)" thm map_list_take by (simp add: map2_map_list_conv take_map map_list_take) lemma map2_take_take: "\<And>n ys. (map2 f xs ys) \<down> n = (map2 f (xs \<down> n) (ys \<down> n))" by (simp add: map2_take map2_ys_take) lemma map2_drop: "\<And>n ys. (map2 f xs ys) \<up> n = (map2 f (xs \<up> n) (ys \<up> n))" thm map_list_drop by (simp add: map2_map_list_conv map_list_drop drop_map) lemma map2_append_append: "\<And>ys1 . length xs1 = length ys1 \<Longrightarrow> map2 f (xs1 @ xs2) (ys1 @ ys2) = map2 f xs1 ys1 @ map2 f xs2 ys2" thm map_list_append_append by (simp add: map2_map_list_conv map_list_append_append) lemma map2_snoc_snoc: " length xs = length ys \<Longrightarrow> map2 f (xs @ [x]) (ys @ [y]) = map2 f xs ys @ [f x y]" by (simp add: map2_append_append) lemma map2_snoc: "\<And>ys. length xs < length ys \<Longrightarrow> map2 f (xs @ [x]) ys = map2 f xs ys @ [f x (ys ! (length xs))]" thm map_list_snoc by (simp add: map2_map_list_conv map_list_snoc) lemma map2_Cons_if: " map2 f xs (y # ys) = (if (xs = []) then [] else ( (f (hd xs) y) # map2 f (tl xs) ys))" by (case_tac "xs", simp+) lemma map2_append1_take_drop: " length xs1 \<le> length ys \<Longrightarrow> map2 f (xs1 @ xs2) ys = map2 f xs1 (ys \<down> length xs1) @ map2 f xs2 (ys \<up> length xs1)" thm map2_append_append thm append_take_drop_id apply (rule_tac t = "map2 f (xs1 @ xs2) ys" and s = "map2 f (xs1 @ xs2) (ys \<down> length xs1 @ ys \<up> length xs1)" in subst) apply simp apply (simp add: map2_append_append del: append_take_drop_id) done lemma map2_append2_take_drop: " length ys1 \<le> length xs \<Longrightarrow> map2 f xs (ys1 @ ys2) = map2 f (xs \<down> length ys1) ys1 @ map2 f (xs \<up> length ys1) ys2" apply (rule_tac t = "map2 f xs (ys1 @ ys2)" and s = "map2 f (xs \<down> length ys1 @ xs \<up> length ys1) (ys1 @ ys2)" in subst) apply simp apply (simp add: map2_append_append del: append_take_drop_id) done thm List.map_cong lemma map2_cong: " \<lbrakk> xs1 = xs2; ys1 = ys2; length xs2 \<le> length ys2; \<And>x y. \<lbrakk> x \<in> set xs2; y \<in> set ys2 \<rbrakk> \<Longrightarrow> f x y = g x y \<rbrakk> \<Longrightarrow> map2 f xs1 ys1 = map2 g xs2 ys2" by (simp (no_asm_simp) add: expand_list_eq map2_length map2_nth) thm List.map_eq_conv lemma map2_eq_conv: " length xs \<le> length ys \<Longrightarrow> (map2 f xs ys = map2 g xs ys) = (\<forall>i<length xs. f (xs ! i) (ys ! i) = g (xs ! i) (ys ! i))" by (simp add: expand_list_eq map2_length map2_nth) thm List.map_replicate lemma map2_replicate: "map2 f x\<^bsup>n\<^esup> y\<^bsup>n\<^esup> = (f x y)\<^bsup>n\<^esup>" by (induct n, simp+) lemma map2_zip_conv: "\<And>ys. length xs \<le> length ys \<Longrightarrow> map2 f xs ys = map (\<lambda>(x,y). f x y) (zip xs ys)" apply (induct xs, simp) apply (case_tac ys, simp+) done lemma map2_rev: "\<And>ys. length xs = length ys \<Longrightarrow> rev (map2 f xs ys) = map2 f (rev xs) (rev ys)" apply (induct xs, simp) apply (case_tac ys, simp) apply (simp add: map2_Cons_Cons map2_snoc_snoc) done end
{"author": "Josh-Tilles", "repo": "AFP", "sha": "f4bf1d502bde2a3469d482b62c531f1c3af3e881", "save_path": "github-repos/isabelle/Josh-Tilles-AFP", "path": "github-repos/isabelle/Josh-Tilles-AFP/AFP-f4bf1d502bde2a3469d482b62c531f1c3af3e881/thys/List-Infinite/ListInf/List2.thy"}
function alpha = complexNormalAngle(varargin) %COMPLEXNORMALANGLE compute normal angle of a vertex of a cellular complex % % ALPHA = complexNormalAngle(NODES, EDGES, FACES, INDEX) % ALPHA = complexNormalAngle(NODES, EDGES, FACES, CELLS, INDEX) % Compute the nortmal angle of the polyhedral reconstruction defined be % nodes NODES, edges EDGES and faces FACES. For 3D reconstructions, it % can also contain cells CELLS. INDEX is the index of NODES for which the % normal angle ALPHA is computed. % Result is normalised between 0 and 2*PI. % % ALPHA = complexNormalAngle(GRAPH, INDEX) % Internal data are stored in a structure GRAPH, with fields : 'nodes', % 'edges', 'faces', and eventually 'cells'. % % % ALPHA = complexNormalAngle(..., INDICES) % If INDICES is an array of indices, the normal angle is computed for % each element of NODES(INDICES,:). The result ALPHA has the same size % than INDICES. % % % ------ % Author: David Legland % e-mail: david.legland@jouy.inra.fr % Created: 2005-12-19 % Copyright 2005 INRA - CEPIA Nantes - MIAJ (Jouy-en-Josas). % 2006-04-19 fix bug for small number of faces % 2006-04-26 returns 0 and not [] for null complexes % 2006-10-25 revert to return value=[] for null complex % 2008-08-11 code clean up cells = []; if length(varargin)==4 % no cells in cellular complex nodes = varargin{1}; edges = varargin{2}; faces = varargin{3}; ind = varargin{4}; elseif length(varargin)==5 % cells are given nodes = varargin{1}; edges = varargin{2}; faces = varargin{3}; cells = varargin{4}; ind = varargin{5}; elseif length(varargin)==2 % data stored as structure graph = varargin{1}; nodes = graph.nodes; edges = graph.edges; faces = graph.faces; if isfield(graph, 'cells') cells = graph.cells; end ind = varargin{2}; else error('wrong number of arguments'); end alpha0 = zeros([length(ind) 1]); alpha1 = zeros([length(ind) 1]); alpha2 = zeros([length(ind) 1]); alpha3 = zeros([length(ind) 1]); alpha = []; if size(nodes, 2)==2 % 2 dimensions if iscell(faces) % process faces as cell array for i=1:length(ind) % check that vertex is contained in the complex if ind(i)>size(nodes, 1) continue; end % normal angle of vertex alpha0(i) = 2*pi; % normal angle of edges alpha1(i) = length(find(sum(edges==ind(i), 2)))*pi; % normal angle of faces alpha2(i) = 0; for j=1:length(faces) face = faces{j}; indf = find(face==ind(i)); if ~isempty(indf) alpha2(i) = alpha2(i) + polygonNormalAngle(nodes(face,:), indf); end end end else % process faces as arrays for i=1:length(ind) % check that vertex is contained in the complex if ind(i)>size(nodes, 1) continue; end % normal angle of vertex alpha0(i) = 2*pi; % normal angle of edges alpha1(i) = length(find(sum(edges==ind(i), 2)))*pi; % normal angle of faces alpha2(i) = 0; for j=1:size(faces, 1) face = faces(j,:); indf = find(face==ind(i)); if ~isempty(indf) alpha2(i) = alpha2(i) + polygonNormalAngle(nodes(face,:), indf); end end end end % compute total normal angle of reconstruction alpha = alpha0 - alpha1 + alpha2; elseif size(nodes, 2)==3 % 3 dimensions for i=1:length(ind) % check that vertex is contained in the complex if ind(i)>size(nodes, 1) continue; end % normal angle of vertex alpha0(i) = 4*pi; % normal angle of edges alpha1(i) = length(find(sum(edges==ind(i), 2)))*2*pi; % normal angle of faces alpha2(i) = 0; if iscell(faces) % process faces as cell array for j=1:length(faces) face = faces{j}; indf = find(face==ind(i)); if ~isempty(indf) alpha2(i) = alpha2(i) + polygon3dNormalAngle(nodes(face,:), indf); end end else % process faces as array of double for j=1:size(faces, 1) face = faces(j,:); indf = find(face==ind(i)); if ~isempty(indf) alpha2(i) = alpha2(i) + polygon3dNormalAngle(nodes(face,:), indf); end end end % normal angle of cells alpha3(i) = 0; for j=1:length(cells) cell = cells{j}; if iscell(faces) cellFaces = faces(cell); else cellFaces = faces(cell, :); end alpha3(i) = alpha3(i) + polyhedronNormalAngle(nodes, cellFaces, ind(i)); end % compute total normal angle of reconstruction alpha = alpha0 - alpha1 + alpha2 - alpha3; end end %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % set of internal functions, for angle computations in 2D and 3D function theta = polygonNormalAngle(points, ind) %POLYGONNORMALANGLE compute normal angle at a vertex of the polygon % number of points np = size(points, 1); % number of angles to compute nv = length(ind); theta = zeros(nv, 1); for i=1:nv p0 = points(ind(i), :); % previous vertex if ind(i)==1 p1 = points(np, :); else p1 = points(ind(i)-1, :); end % next vertex if ind(i)==np p2 = points(1, :); else p2 = points(ind(i)+1, :); end % compute angles theta1 = mod(atan2(p1(2)-p0(2), p1(1)-p0(1)) + 2*pi, 2*pi); theta2 = mod(atan2(p2(2)-p0(2), p2(1)-p0(1)) + 2*pi, 2*pi); dtheta = mod(theta2-theta1+2*pi, 2*pi); % use simplification due to the fact that cells are convex dtheta = min(dtheta, 2*pi-dtheta); theta(i)= pi - dtheta; end return; function theta = polyhedronNormalAngle(nodes, faces, ind) %POLYHEDRONNORMALANGLE compute normal angle at a vertex of a 3D polyhedron % % THETA = polyhedronNormalAngle(NODES, FACES, IND); % where NODES is a set of 3D points, and FACES a set of faces, whose % elements are indices to NODES array, compute the normal angle at the % vertex whose index is given by IND. % number of angles to compute na = length(ind); theta = zeros(na, 1); for i=1:na % find faces containing given vertex, % and compute normal angle at each face containing vertex if iscell(faces) for j=1:length(faces) if ismember(ind(i), faces{j}) % create 3D polygon face = nodes(faces{j}, :); % index of point in polygon indp = find(faces{j}==ind(i)); % compute face angle thetaf = [thetaf polygon3dInnerAngle(face, indp)]; end end else indf = find(sum(ismember(faces, ind(i)), 2)); thetaf = zeros(length(indf), 1); for j=1:length(indf) ind2 = faces(indf(j), :); face = nodes(ind2, :); indp = find(ind2==ind(i)); thetaf(j) = polygon3dInnerAngle(face, indp); end end % compute normal angle of polyhedron, by use of angle defect formula if ~isempty(thetaf) theta(i) = 2*pi - sum(thetaf); end end return; function theta = polygon3dNormalAngle(points, ind) %POLYGON3DNORMALANGLE compute normal angle at a vertex of the 3D polygon theta = 2*pi - 2*polygon3dInnerAngle(points, ind); return; function theta = polygon3dInnerAngle(points, ind) %POLYGON3DNORMALANGLE compute normal angle at a vertex of the 3D polygon % number of points np = size(points, 1); % number of angles to compute nv = length(ind); theta = zeros(nv, 1); for i=1:nv p0 = points(ind(i), :); % previous vertex if ind(i)==1 p1 = points(np, :); else p1 = points(ind(i)-1, :); end % next vertex if ind(i)==np p2 = points(1, :); else p2 = points(ind(i)+1, :); end theta(i) = angle3d(p1, p0, p2); theta(i) = min(theta(i), 2*pi-theta(i)); % todo: solve case for CW oriented polygons end return;
{"author": "mattools", "repo": "matImage", "sha": "94d892c7beac0db32daadf2646ce37f58e894caf", "save_path": "github-repos/MATLAB/mattools-matImage", "path": "github-repos/MATLAB/mattools-matImage/matImage-94d892c7beac0db32daadf2646ce37f58e894caf/matImage/imMinkowski/private/complexNormalAngle.m"}
import argparse from torch.utils.data.sampler import SequentialSampler import sys import numpy as np import os import sys import pandas as pd import pickle from apex import amp sys.path.insert(1,'./') from train.zoo.models import * from train.zoo.surgery import * from train.datafeeding.retriever import * from train.tools.torch_utils import * from train.tools.fitter import * from train.optim import get_optimizer def main(): DATA_ROOT_PATH = os.environ.get('DATA_ROOT_PATH') parser = argparse.ArgumentParser("Train imagenet pretrained model using pytorch") arg = parser.add_argument arg('--model', type=str, default='mixnet_s', help='model name') arg('--experiment', type=str, default='test', help='specific model experiment name') arg('--surgery', type=int, default=1, help='modification level') arg('--optimizer-name', type=str, default='adamw', help='optimizer name') arg('--start-lr', type=float, default=1e-3, help='starting learning rate') arg('--weight-decay', type=float, default=1e-2, help='weight decay') arg('--num-epochs', type=int, default=40, help='number of training epochs') arg('--batch-size', type=int, default=16, help='batch size') arg('--load-checkpoint', type=str, default='' , help='path to checkpoint to load') arg('--fine-tune', type=int, default=0, help='Finetune? ie reset optimization parameters and load only weights') arg('--output', type=str, default='weights/', help='output folder') arg('--random-seed', type=int, default=0, help='random seed') arg('--fp16', type=int, default=1, help='use AMP?') arg('--decoder', type=str, default='NR', help='jpeg decoder, R or NR') arg('--device', type=str, default='0', help='device id') args = parser.parse_args() os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = args.device #torch.cuda.set_device(int(args.device.split(':')[-1])) seed_everything(args.random_seed) device = 'cuda:0' #torch.device(args.device) QFs = ['75','90', '95'] Classes = ['Cover', 'JMiPOD', 'JUNIWARD', 'UERD'] IL_train = [] IL_val = [] for QF in QFs: with open('./IL_train_'+QF+'.p', 'rb') as handle: IL_train.extend(pickle.load(handle)) with open('./IL_val_'+QF+'.p', 'rb') as handle: IL_val.extend(pickle.load(handle)) dataset = [] for label, kind in enumerate(Classes): for path in IL_train: dataset.append({ 'kind': kind, 'image_name': path, 'label': label, 'fold':1, }) for label, kind in enumerate(Classes): for path in IL_val: dataset.append({ 'kind': kind, 'image_name': path, 'label': label, 'fold':0, }) random.shuffle(dataset) dataset = pd.DataFrame(dataset) class TrainGlobalConfig: base_dir = args.output+args.experiment num_workers = 5 batch_size = args.batch_size n_epochs = args.num_epochs optimizer = get_optimizer(args.optimizer_name) lr = args.start_lr weight_decay = args.weight_decay keep_top = 3 if args.fp16: loss_scale = 'dynamic' opt_level = 'O1' else: loss_scale = 1.0 opt_level = 'O0' fine_tune = args.fine_tune verbose = True verbose_step = 1 SchedulerClass = torch.optim.lr_scheduler.ReduceLROnPlateau scheduler_params = dict( mode='min', factor=0.5, patience=1, verbose=False, threshold=0.0001, threshold_mode='abs', cooldown=0, min_lr=1e-8, eps=1e-08 ) os.makedirs(TrainGlobalConfig.base_dir, exist_ok=True) log_args(args.__dict__, os.path.join(args.output+args.experiment, 'run_hyper_params.txt')) net = get_net(args.model) if args.surgery == 2: net = to_InPlaceABN(net) source = 'timm' if args.model.startswith('mixnet') else 'efficientnet-pytorch' net = to_MishME(net, source=source) elif args.surgery == 1: source = 'timm' if args.model.startswith('mixnet') else 'efficientnet-pytorch' net = to_MishME(net, source=source) elif args.surgery == 3: net = remove_stride(net) net = add_pooling(net) elif args.surgery == 4: net = add_pooling(net) net = net.to(device) train_dataset = TrainRetriever( kinds=dataset[dataset['fold'] != 0].kind.values, image_names=dataset[dataset['fold'] != 0].image_name.values, labels=dataset[dataset['fold'] != 0].label.values, transforms=get_train_transforms(), decoder=args.decoder ) validation_dataset = TrainRetriever( kinds=dataset[dataset['fold'] == 0].kind.values, image_names=dataset[dataset['fold'] == 0].image_name.values, labels=dataset[dataset['fold'] == 0].label.values, transforms=get_valid_transforms(), decoder=args.decoder ) train_loader = torch.utils.data.DataLoader( train_dataset, shuffle=True, batch_size=TrainGlobalConfig.batch_size, pin_memory=False, drop_last=True, num_workers=TrainGlobalConfig.num_workers, ) validation_loader = torch.utils.data.DataLoader( validation_dataset, batch_size=TrainGlobalConfig.batch_size, num_workers=TrainGlobalConfig.num_workers, shuffle=False, sampler=SequentialSampler(validation_dataset), pin_memory=False, ) fitter = Fitter(net, train_loader, validation_loader, device, TrainGlobalConfig) if args.fp16: fitter.model, fitter.optimizer = amp.initialize(fitter.model, fitter.optimizer, opt_level=fitter.config.opt_level, loss_scale=fitter.config.loss_scale,verbosity=0) if args.load_checkpoint != '': fitter.load(args.load_checkpoint) fitter.fit() if __name__ == "__main__": main()
{"hexsha": "99b7b9281e0b8cf6c01c5e0d94b9d5f3a31b8252", "size": 6273, "ext": "py", "lang": "Python", "max_stars_repo_path": "abba/train/train_pytorch.py", "max_stars_repo_name": "simphide/Kaggle-2020-Alaska2", "max_stars_repo_head_hexsha": "3c1f5e8e564c9f04423beef69244fc74168f88ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2020-08-09T11:30:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-28T14:15:08.000Z", "max_issues_repo_path": "abba/train/train_pytorch.py", "max_issues_repo_name": "simphide/Kaggle-2020-Alaska2", "max_issues_repo_head_hexsha": "3c1f5e8e564c9f04423beef69244fc74168f88ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-08-09T15:30:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T07:34:39.000Z", "max_forks_repo_path": "abba/train/train_pytorch.py", "max_forks_repo_name": "simphide/Kaggle-2020-Alaska2", "max_forks_repo_head_hexsha": "3c1f5e8e564c9f04423beef69244fc74168f88ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-08-09T14:29:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-27T13:07:12.000Z", "avg_line_length": 35.4406779661, "max_line_length": 123, "alphanum_fraction": 0.6094372708, "include": true, "reason": "import numpy", "num_tokens": 1500}
""" File: create_grid.py Author: David Solanas Sanz TFG """ import argparse import csv import os import keras import numpy as np from scipy.ndimage import rotate, measurements from skimage.transform import resize def create_grid(src_image): """ Creates 16 brain sections from center of mass Parameters ---------- src_image: 3D image Returns ------- 16 slices of original image """ slices = [] _, _, z = measurements.center_of_mass(src_image) z = int(np.round(z)) for i in range(z - 10, z + 22, 2): slices.append(rotate(src_image[:, :, i], -90)) return slices def copy_from_to(src, dst, i1, i2, j1, j2): """ Copies region in src to dst Parameters ---------- src: source image dst: dest image i1: Begining row i2: End row j1: Begining column j2: End column Returns ------- """ i0 = 0 for id in range(i1, i2): j0 = 0 for jd in range(j1, j2): dst[id, jd] = src[i0, j0] j0 += 1 i0 += 1 def create_data_matrix(slices): """ Creates a matrix with the 16 brain sections Parameters ---------- slices: 2D images Returns ------- 512x512 matrix """ matrix = np.zeros(shape=(512, 512)) for s1 in range(0, 4): for s2 in range(0, 4): ns = 4 * s1 + s2 i1 = 128 * s1 i2 = 128 * (s1 + 1) j1 = 128 * s2 j2 = 128 * (s2 + 1) data = resize(slices[ns], (128, 128), order=3) copy_from_to(data, matrix, i1, i2, j1, j2) return matrix def create_dictionary(filename): """ Creates a dict that contains, patientid as key and diagnosis + month as value Parameters ---------- filename: str, path to csv file Returns ------- dictionary with all patient data """ dictionary = {} with open(filename) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') next(csv_reader) for row in csv_reader: phase = row[0] key = row[3] if phase == 'ADNI1': dx = row[11] elif phase == 'ADNIGO' or phase == 'ADNI2': dx = row[10] else: dx = row[-2] if dx == '1' or dx == '7' or dx == '9': dx = 'CN' elif dx == '2' or dx == '4' or dx == '8': dx = 'MCI' elif dx == '3' or dx == '5' or dx == '6': dx = 'AD' date = row[7] date = date.split('-')[0] + date.split('-')[1] key = key + date date2 = int(date[-2:]) date3 = date2 - 1 date2 = (date2 + 1) % 13 year = int(date[:4]) year2 = year if date2 == 0: date2 = '01' year += 1 elif date2 < 10: date2 = '0' + str(date2) else: date2 = str(date2) if date3 == 0: date3 = '12' year2 -= 1 elif date3 < 10: date3 = '0' + str(date3) else: date3 = str(date3) date2 = str(year) + date2 date3 = str(year2) + date3 key2 = key[:10] + date2 key3 = key[:10] + date3 dictionary[key] = dx dictionary[key2] = dx dictionary[key3] = dx dictionary['051_S_1123201202'] = 'MCI' dictionary['051_S_1072201202'] = 'MCI' dictionary['041_S_4014201107'] = 'CN' return dictionary def label_data(dictionary, images): """ Labels the data depending on patient's diagnosis Parameters ---------- dictionary: Dict with patient information images: Names of images to label Returns ------- Labeled data """ data = [] last_patient = '' aux = [] for img in images: patientid = img[5:15] if last_patient == '': last_patient = patientid aux.append(img) continue if patientid == last_patient: aux.append(img) else: last_date = aux[-1][16:22] if last_patient + last_date in dictionary: dx = dictionary[last_patient + last_date] for a in aux: data.append((a, dx)) aux = [img] last_patient = patientid return data if __name__ == "__main__": ap = argparse.ArgumentParser() ap.add_argument("-i", "--input_directory", default=None, help="path to the directory where the images are stored") ap.add_argument("-d", "--dictionary", default=None, help="path to the csv file where the patient data is stored") ap.add_argument("-o", "--output_directory", default=None, help="path to the directory where the preprocessed images will be stored") args = ap.parse_args() base = None dest = None dict_path = None if args.input_directory is not None: if not os.path.isdir(args.input_directory): print("Directory \'%s\' does not exist" % args.input_directory) exit(1) base = args.input_directory else: print("You must specify the directory where the images are stored (see help).") exit(1) if args.output_directory is not None: if not os.path.isdir(args.output_directory): print("Directory \'%s\' does not exist" % args.output_directory) exit(1) dest = args.output_directory else: print("You must specify the directory where the resampled images will be stored (see help).") exit(1) if args.dictionary is not None: if not os.path.isfile(args.dictionary): print("File \'%s\' does not exist" % args.dictionary) exit(1) dict_path = args.dictionary else: print("You must specify the csv file where the patient data is stored (see help).") exit(1) images = os.listdir(base) d = create_dictionary(dict_path) data = label_data(d, images) print(len(data)) test_size = int(len(data) * 0.9) val_size = int(test_size * .9) # train_images, val_images, test_images = split_images(images, val_size, test_size) train_images = data[:val_size + 7] val_images = data[val_size + 7:test_size - 3] test_images = data[test_size - 3:] print(len(train_images), len(val_images), len(test_images), len(train_images) + len(val_images) + len(test_images)) train_dir = os.path.join(dest, 'train') validation_dir = os.path.join(dest, 'validation') test_dir = os.path.join(dest, 'test') # Creates output directories os.makedirs(train_dir, exist_ok=True) os.makedirs(validation_dir, exist_ok=True) os.makedirs(test_dir, exist_ok=True) os.makedirs(os.path.join(train_dir, 'AD'), exist_ok=True) os.makedirs(os.path.join(train_dir, 'MCI'), exist_ok=True) os.makedirs(os.path.join(train_dir, 'CN'), exist_ok=True) os.makedirs(os.path.join(validation_dir, 'AD'), exist_ok=True) os.makedirs(os.path.join(validation_dir, 'MCI'), exist_ok=True) os.makedirs(os.path.join(validation_dir, 'CN'), exist_ok=True) os.makedirs(os.path.join(test_dir, 'AD'), exist_ok=True) os.makedirs(os.path.join(test_dir, 'MCI'), exist_ok=True) os.makedirs(os.path.join(test_dir, 'CN'), exist_ok=True) for image, dx in train_images: path = os.path.join(base, image) img = np.load(path) print(img.shape) slices = create_grid(img) matrix = create_data_matrix(slices) matrix = (matrix - matrix.min()) / (matrix.max() - matrix.min()) img_name = image.split(".npy")[0] # Save to train directories if dx == 'CN': cn_train_dir = os.path.join(train_dir, 'CN') file = os.path.join(cn_train_dir, img_name + '.tif') matrix = np.stack((matrix,) * 3, axis=-1) keras.preprocessing.image.save_img(file, matrix) # np.save(file, matrix) if dx == 'AD': ad_train_dir = os.path.join(train_dir, 'AD') file = os.path.join(ad_train_dir, img_name + '.tif') matrix = np.stack((matrix,) * 3, axis=-1) keras.preprocessing.image.save_img(file, matrix) # np.save(file, matrix) if dx == 'MCI' or dx == 'LMCI': mci_train_dir = os.path.join(train_dir, 'MCI') file = os.path.join(mci_train_dir, img_name + '.tif') matrix = np.stack((matrix,) * 3, axis=-1) keras.preprocessing.image.save_img(file, matrix) # np.save(file, matrix) print('Stored train images.') for image, dx in val_images: path = os.path.join(base, image) img = np.load(path) slices = create_grid(img) matrix = create_data_matrix(slices) matrix = (matrix - matrix.min()) / (matrix.max() - matrix.min()) img_name = image.split(".npy")[0] # Save to train directories if dx == 'CN': cn_val_dir = os.path.join(validation_dir, 'CN') file = os.path.join(cn_val_dir, img_name + '.tif') matrix = np.stack((matrix,) * 3, axis=-1) keras.preprocessing.image.save_img(file, matrix) # np.save(file, matrix) if dx == 'AD': ad_val_dir = os.path.join(validation_dir, 'AD') file = os.path.join(ad_val_dir, img_name + '.tif') matrix = np.stack((matrix,) * 3, axis=-1) keras.preprocessing.image.save_img(file, matrix) # np.save(file, matrix) if dx == 'MCI' or dx == 'LMCI': mci_val_dir = os.path.join(validation_dir, 'MCI') file = os.path.join(mci_val_dir, img_name + '.tif') matrix = np.stack((matrix,) * 3, axis=-1) keras.preprocessing.image.save_img(file, matrix) # np.save(file, matrix) print('Stored validation images.') for image, dx in test_images: path = os.path.join(base, image) img = np.load(path) slices = create_grid(img) matrix = create_data_matrix(slices) matrix = (matrix - matrix.min()) / (matrix.max() - matrix.min()) img_name = image.split(".npy")[0] # Save to train directories if dx == 'CN': cn_test_dir = os.path.join(test_dir, 'CN') file = os.path.join(cn_test_dir, img_name + '.tif') matrix = np.stack((matrix,) * 3, axis=-1) keras.preprocessing.image.save_img(file, matrix) # np.save(file, matrix) if dx == 'AD': ad_test_dir = os.path.join(test_dir, 'AD') file = os.path.join(ad_test_dir, img_name + '.tif') matrix = np.stack((matrix,) * 3, axis=-1) keras.preprocessing.image.save_img(file, matrix) # np.save(file, matrix) if dx == 'MCI' or dx == 'LMCI': mci_test_dir = os.path.join(test_dir, 'MCI') file = os.path.join(mci_test_dir, img_name + '.tif') matrix = np.stack((matrix,) * 3, axis=-1) keras.preprocessing.image.save_img(file, matrix) # np.save(file, matrix) print('Stored test images.')
{"hexsha": "070a9f98228d272ca8ec6331819ee62668257878", "size": 11380, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/preprocessing/create_grid.py", "max_stars_repo_name": "DavidSolanas/TFG", "max_stars_repo_head_hexsha": "dc84a8b11cfcc53e13ba6080793e3550bea6a48b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/preprocessing/create_grid.py", "max_issues_repo_name": "DavidSolanas/TFG", "max_issues_repo_head_hexsha": "dc84a8b11cfcc53e13ba6080793e3550bea6a48b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/preprocessing/create_grid.py", "max_forks_repo_name": "DavidSolanas/TFG", "max_forks_repo_head_hexsha": "dc84a8b11cfcc53e13ba6080793e3550bea6a48b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4364640884, "max_line_length": 119, "alphanum_fraction": 0.5474516696, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2854}
[STATEMENT] lemma comparator_of_rep [simp]: "comparator_of (rep_nat x) (rep_nat y) = comparator_of x y" [PROOF STATE] proof (prove) goal (1 subgoal): 1. comparator_of (rep_nat x) (rep_nat y) = comparator_of x y [PROOF STEP] by (simp add: comparator_of_def linorder_class.comparator_of_def ord_iff rep_inj)
{"llama_tokens": 126, "file": "Polynomials_Term_Order", "length": 1}
[STATEMENT] lemma card_of_empty4: "|{}::'b set| <o |A::'a set| = (A \<noteq> {})" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (|{}| <o |A|) = (A \<noteq> {}) [PROOF STEP] proof(intro iffI notI) [PROOF STATE] proof (state) goal (2 subgoals): 1. \<lbrakk>|{}| <o |A|; A = {}\<rbrakk> \<Longrightarrow> False 2. A \<noteq> {} \<Longrightarrow> |{}| <o |A| [PROOF STEP] assume *: "|{}::'b set| <o |A|" and "A = {}" [PROOF STATE] proof (state) this: |{}| <o |A| A = {} goal (2 subgoals): 1. \<lbrakk>|{}| <o |A|; A = {}\<rbrakk> \<Longrightarrow> False 2. A \<noteq> {} \<Longrightarrow> |{}| <o |A| [PROOF STEP] hence "|A| =o |{}::'b set|" [PROOF STATE] proof (prove) using this: |{}| <o |A| A = {} goal (1 subgoal): 1. |A| =o |{}| [PROOF STEP] using card_of_ordIso [PROOF STATE] proof (prove) using this: |{}| <o |A| A = {} (\<exists>f. bij_betw f ?A ?B) = (|?A| =o |?B|) goal (1 subgoal): 1. |A| =o |{}| [PROOF STEP] unfolding bij_betw_def inj_on_def [PROOF STATE] proof (prove) using this: |{}| <o |A| A = {} (\<exists>f. (\<forall>x\<in>?A. \<forall>y\<in>?A. f x = f y \<longrightarrow> x = y) \<and> f ` ?A = ?B) = (|?A| =o |?B|) goal (1 subgoal): 1. |A| =o |{}| [PROOF STEP] by blast [PROOF STATE] proof (state) this: |A| =o |{}| goal (2 subgoals): 1. \<lbrakk>|{}| <o |A|; A = {}\<rbrakk> \<Longrightarrow> False 2. A \<noteq> {} \<Longrightarrow> |{}| <o |A| [PROOF STEP] hence "|{}::'b set| =o |A|" [PROOF STATE] proof (prove) using this: |A| =o |{}| goal (1 subgoal): 1. |{}| =o |A| [PROOF STEP] using ordIso_symmetric [PROOF STATE] proof (prove) using this: |A| =o |{}| ?r =o ?r' \<Longrightarrow> ?r' =o ?r goal (1 subgoal): 1. |{}| =o |A| [PROOF STEP] by blast [PROOF STATE] proof (state) this: |{}| =o |A| goal (2 subgoals): 1. \<lbrakk>|{}| <o |A|; A = {}\<rbrakk> \<Longrightarrow> False 2. A \<noteq> {} \<Longrightarrow> |{}| <o |A| [PROOF STEP] with * [PROOF STATE] proof (chain) picking this: |{}| <o |A| |{}| =o |A| [PROOF STEP] show False [PROOF STATE] proof (prove) using this: |{}| <o |A| |{}| =o |A| goal (1 subgoal): 1. False [PROOF STEP] using not_ordLess_ordIso[of "|{}::'b set|" "|A|"] [PROOF STATE] proof (prove) using this: |{}| <o |A| |{}| =o |A| |{}| <o |A| \<Longrightarrow> (|{}|, |A|) \<notin> ordIso goal (1 subgoal): 1. False [PROOF STEP] by blast [PROOF STATE] proof (state) this: False goal (1 subgoal): 1. A \<noteq> {} \<Longrightarrow> |{}| <o |A| [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. A \<noteq> {} \<Longrightarrow> |{}| <o |A| [PROOF STEP] assume "A \<noteq> {}" [PROOF STATE] proof (state) this: A \<noteq> {} goal (1 subgoal): 1. A \<noteq> {} \<Longrightarrow> |{}| <o |A| [PROOF STEP] hence "(\<not> (\<exists>f. inj_on f A \<and> f ` A \<subseteq> {}))" [PROOF STATE] proof (prove) using this: A \<noteq> {} goal (1 subgoal): 1. \<nexists>f. inj_on f A \<and> f ` A \<subseteq> {} [PROOF STEP] unfolding inj_on_def [PROOF STATE] proof (prove) using this: A \<noteq> {} goal (1 subgoal): 1. \<nexists>f. (\<forall>x\<in>A. \<forall>y\<in>A. f x = f y \<longrightarrow> x = y) \<and> f ` A \<subseteq> {} [PROOF STEP] by blast [PROOF STATE] proof (state) this: \<nexists>f. inj_on f A \<and> f ` A \<subseteq> {} goal (1 subgoal): 1. A \<noteq> {} \<Longrightarrow> |{}| <o |A| [PROOF STEP] thus "| {} | <o | A |" [PROOF STATE] proof (prove) using this: \<nexists>f. inj_on f A \<and> f ` A \<subseteq> {} goal (1 subgoal): 1. |{}| <o |A| [PROOF STEP] using card_of_ordLess [PROOF STATE] proof (prove) using this: \<nexists>f. inj_on f A \<and> f ` A \<subseteq> {} (\<nexists>f. inj_on f ?A \<and> f ` ?A \<subseteq> ?B) = (|?B| <o |?A|) goal (1 subgoal): 1. |{}| <o |A| [PROOF STEP] by blast [PROOF STATE] proof (state) this: |{}| <o |A| goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 1867, "file": null, "length": 22}
""" Unit tests for computing density, baroclinic head and the internal pressure gradient from a temperature field. Runs MES convergence tests against a non-trivial analytical solution in a deformed geometry. NOTE currently only linear equation of state is tested TODO test full nonlinear equation of state """ from thetis import * from thetis.momentum_eq import InternalPressureGradientCalculator from scipy import stats import pytest def compute_l2_error(refinement=1, quadratic_pressure=False, quadratic_density=False, project_density=False, full_eos=False, no_exports=True): """ Computes pressure gradient in a setting where bathymetry, mesh surface elevation, and pressure are analytical, non-trivial functions. """ print_output(' ---- running refinement {:}'.format(refinement)) # create mesh rho_0 = 1000.0 physical_constants['rho0'] = rho_0 g_grav = physical_constants['g_grav'] delta_x = 120e3/refinement lx = 360e3 ly = 360e3 nx = int(lx/delta_x) ny = int(ly/delta_x) mesh2d = RectangleMesh(nx, ny, lx, ly) layers = 3*refinement # bathymetry P1_2d = get_functionspace(mesh2d, 'CG', 1) bathymetry_2d = Function(P1_2d, name='Bathymetry') elev_warp_fact = 0.3 bath_warp_fact = 0.6 xy = SpatialCoordinate(mesh2d) depth = 3600. bath_expr = 0.5*(depth + depth)*(1 - bath_warp_fact*tanh(4*(xy[1]-ly/2)/ly)*sin(1.5*xy[0]/ly+0.2)) bathymetry_2d.project(bath_expr) mesh = extrude_mesh_sigma(mesh2d, layers, bathymetry_2d) bnd_len = compute_boundary_length(mesh2d) mesh2d.boundary_len = bnd_len mesh.boundary_len = bnd_len # make function spaces and fields p1 = get_functionspace(mesh, 'CG', 1) p1dg = get_functionspace(mesh, 'DG', 1) if quadratic_density: fs_density = get_functionspace(mesh, 'DG', 2, vfamily='CG', vdegree=2) else: fs_density = p1dg if quadratic_pressure: # NOTE for 3rd order convergence both the scalar and grad must be p2 fs_bhead = get_functionspace(mesh, 'DG', 2, vfamily='CG', vdegree=2) fs_pg = get_functionspace(mesh, 'DG', 2, 'CG', 2, vector=True, dim=2) else: # the default function spaces in Thetis fs_bhead = get_functionspace(mesh, 'DG', 1, vfamily='CG', vdegree=2) fs_pg = get_functionspace(mesh, 'DG', 1, 'CG', 2, vector=True, dim=2) temp_3d = Function(p1dg, name='temperature') density_3d = Function(fs_density, name='density') baroc_head_3d = Function(fs_bhead, name='baroclinic head') int_pg_3d = Function(fs_pg, name='pressure gradient') elev_3d = Function(p1, name='elevation') bathymetry_3d = Function(p1, name='elevation') ExpandFunctionTo3d(bathymetry_2d, bathymetry_3d).solve() # deform mesh by elevation xyz = SpatialCoordinate(mesh) elev_expr = elev_warp_fact*depth*cos(3*(xyz[0]/lx-0.3))*sin(2*xyz[1]/ly+0.3) elev_3d.project(elev_expr) z_ref = mesh.coordinates.dat.data[:, 2] bath = bathymetry_3d.dat.data[:] eta = elev_3d.dat.data[:] new_z = eta*(z_ref + bath)/bath + z_ref mesh.coordinates.dat.data[:, 2] = new_z # project initial temperature, range ~ [0, 10] deg C temp_expr = 5*cos((2*xyz[0] + xyz[1])/lx)*cos((xyz[2]/depth)) + 15 temp_3d.project(temp_expr) # compute density alpha = 0.2 # thermal expansion coeff beta = 0.0 # haline contraction coeff temp_ref = 15.0 salt_const = 10.0 salt = Constant(salt_const) if not full_eos: eos_params = { 'rho_ref': rho_0, 's_ref': salt_const, 'th_ref': temp_ref, 'alpha': alpha, 'beta': beta, } equation_of_state = LinearEquationOfState(**eos_params) else: equation_of_state = JackettEquationOfState() if project_density: density_solver = DensitySolverWeak(salt, temp_3d, density_3d, equation_of_state) else: density_solver = DensitySolver(salt, temp_3d, density_3d, equation_of_state) density_solver.solve() # solve baroclinic head VerticalIntegrator(density_3d, baroc_head_3d, bottom_to_top=False, average=False).solve() baroc_head_3d *= -physical_constants['rho0_inv'] # solve pressure gradient fields = FieldDict() fields.baroc_head_3d = baroc_head_3d fields.int_pg_3d = int_pg_3d bnd_functions = {} int_pg_solver = InternalPressureGradientCalculator( fields, bathymetry_3d, bnd_functions, solver_parameters=None) int_pg_solver.solve() # analytical solution if not full_eos: density_expr = - alpha*(temp_expr - temp_ref) else: # just use the nonlin expression density_expr = equation_of_state.eval(salt, temp_expr, p=0.0, rho0=rho_0) assert not full_eos # need to integrate f(z) = -alpha*temp_expr(z) # f(z) = -alpha*5*cos((2*xyz[0] + xyz[1])/lx)*cos((z/depth)) # F(z) = -alpha*5*cos((2*xyz[0] + xyz[1])/lx)*depth*sin((z/depth)) # int(f, eta, z) = F(eta) - F(z) a = -physical_constants['rho0_inv']*alpha*5 b = cos((2*xyz[0] + xyz[1])/lx) c_xy = depth*sin((elev_expr/depth)) c_z = -depth*sin((xyz[2]/depth)) baroc_head_expr = a*b*(c_xy + c_z) # compute Dx Dy of the above b_dx = -sin((2*xyz[0] + xyz[1])/lx)*2/lx b_dy = -sin((2*xyz[0] + xyz[1])/lx)/lx elev_expr_dx = -elev_warp_fact*depth*3/lx*sin(3*(xyz[0]/lx-0.3))*sin(2*xyz[1]/ly+0.3) elev_expr_dy = elev_warp_fact*depth*2/ly*cos(3*(xyz[0]/lx-0.3))*cos(2*xyz[1]/ly+0.3) c_xy_dx = elev_expr_dx*cos((elev_expr/depth)) c_xy_dy = elev_expr_dy*cos((elev_expr/depth)) bhead_dx_expr = a*b_dx*(c_xy + c_z) + a*b*c_xy_dx bhead_dy_expr = a*b_dy*(c_xy + c_z) + a*b*c_xy_dy int_pg_expr = g_grav*as_vector((bhead_dx_expr, bhead_dy_expr)) # error norms volume = comp_volume_3d(mesh) l2_err_density = errornorm(density_expr, density_3d, degree_rise=2)/np.sqrt(volume) print_output('Density L2 error: {:}'.format(l2_err_density)) l2_err_bhead = errornorm(baroc_head_expr, baroc_head_3d, degree_rise=2)/np.sqrt(volume) print_output('B.head L2 error: {:}'.format(l2_err_bhead)) l2_err_pg = errornorm(int_pg_expr, int_pg_3d, degree_rise=2)/np.sqrt(volume) print_output('Int.PG L2 error: {:}'.format(l2_err_pg)) if not no_exports: out_temp = File('temperature.pvd') out_density = File('density.pvd') out_bhead = File('baroc_head.pvd') out_pg = File('int_pg.pvd') # export numerical solution out_temp.write(temp_3d) out_density.write(density_3d) out_bhead.write(baroc_head_3d) out_pg.write(int_pg_3d) # export projected analytical solution out_temp.write(temp_3d) out_density.write(density_3d.project(density_expr)) out_bhead.write(baroc_head_3d.project(baroc_head_expr)) out_pg.write(int_pg_3d.project(int_pg_expr)) return l2_err_density, l2_err_density, l2_err_pg def run_convergence(ref_list, save_plot=False, **options): """Runs test for a list of refinements and computes error convergence rate""" l2_err = [] for r in ref_list: l2_err.append(compute_l2_error(r, **options)) x_log = np.log10(np.array(ref_list, dtype=float)**-1) y_log = np.log10(np.array(l2_err)) y_log_density = y_log[:, 0] y_log_bhead = y_log[:, 1] y_log_intpg = y_log[:, 2] setup_name = 'intpg-stack' order = 1 def check_convergence(x_log, y_log, expected_slope, field_str, save_plot): slope_rtol = 0.2 slope, intercept, r_value, p_value, std_err = stats.linregress(x_log, y_log) if save_plot: import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(5, 5)) # plot points ax.plot(x_log, y_log, 'k.') x_min = x_log.min() x_max = x_log.max() offset = 0.05*(x_max - x_min) n = 50 xx = np.linspace(x_min - offset, x_max + offset, n) yy = intercept + slope*xx # plot line ax.plot(xx, yy, linestyle='--', linewidth=0.5, color='k') ax.text(xx[2*n/3], yy[2*n/3], '{:4.2f}'.format(slope), verticalalignment='top', horizontalalignment='left') ax.set_xlabel('log10(dx)') ax.set_ylabel('log10(L2 error)') ax.set_title(field_str) ref_str = 'ref-' + '-'.join([str(r) for r in ref_list]) order_str = 'o{:}'.format(order) imgfile = '_'.join(['convergence', setup_name, field_str, ref_str, order_str]) imgfile += '.png' img_dir = create_directory('plots') imgfile = os.path.join(img_dir, imgfile) print_output('saving figure {:}'.format(imgfile)) plt.savefig(imgfile, dpi=200, bbox_inches='tight') if expected_slope is not None: err_msg = '{:}: Wrong convergence rate {:.4f}, expected {:.4f}'.format(setup_name, slope, expected_slope) assert abs(slope - expected_slope)/expected_slope < slope_rtol, err_msg print_output('{:}: convergence rate {:.4f} PASSED'.format(setup_name, slope)) else: print_output('{:}: {:} convergence rate {:.4f}'.format(setup_name, field_str, slope)) return slope check_convergence(x_log, y_log_density, 2, 'density', save_plot) check_convergence(x_log, y_log_bhead, 2, 'bhead', save_plot) check_convergence(x_log, y_log_intpg, 1, 'intpg', save_plot) @pytest.mark.parametrize(('quad_p'), [True, False], ids=['p2_pressure', 'p1_pressure']) def test_int_pg(quad_p): run_convergence([1, 2, 3], quadratic_pressure=quad_p, quadratic_density=False, full_eos=False, project_density=False, no_exports=True, save_plot=False) if __name__ == '__main__': run_convergence([1, 2, 3, 4], quadratic_pressure=True, quadratic_density=False, full_eos=False, project_density=False, no_exports=True, save_plot=True)
{"hexsha": "1ce4b1f374333614de010d4dfb6d9af39f0d9392", "size": 10277, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/pressure_grad/test_pg-stack_mes.py", "max_stars_repo_name": "LawrenceDior/thetis", "max_stars_repo_head_hexsha": "fa4b14eeac1063f922ba24f03ebf7ecdf80b82ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/pressure_grad/test_pg-stack_mes.py", "max_issues_repo_name": "LawrenceDior/thetis", "max_issues_repo_head_hexsha": "fa4b14eeac1063f922ba24f03ebf7ecdf80b82ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/pressure_grad/test_pg-stack_mes.py", "max_forks_repo_name": "LawrenceDior/thetis", "max_forks_repo_head_hexsha": "fa4b14eeac1063f922ba24f03ebf7ecdf80b82ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3470149254, "max_line_length": 117, "alphanum_fraction": 0.6364697869, "include": true, "reason": "from scipy", "num_tokens": 2967}
import numpy as np import pytest from scipy.spatial.transform import Rotation from nao_gestures.nao_kinematics import InverseKinematics, ForwardKinematics, isclose_angles def test_right_shoulder_forward_kinematics_zero(): np.random.seed(42) position_right_shoulder_standard = np.random.random([3]) rotation_right_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) # Zero control input: position_right_elbow_standard, rotation_right_elbow_standard = \ ForwardKinematics.forward_kinematics_right_shoulder( theta_right_shoulder_pitch=0, theta_right_shoulder_roll=0, position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, right_arm_length=1.0, ) position_right_elbow_standard_expected = rotation_right_shoulder_standard.apply(np.array([0, -1, 0])) + position_right_shoulder_standard assert np.allclose(position_right_elbow_standard, position_right_elbow_standard_expected) assert np.allclose(rotation_right_elbow_standard.as_rotvec(), rotation_right_shoulder_standard.as_rotvec()) def test_right_shoulder_forward_kinematics_shoulder_roll_only(): np.random.seed(42) position_right_shoulder_standard = np.random.random([3]) rotation_right_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) # Shoulder roll only: position_right_elbow_standard, rotation_right_elbow_standard = \ ForwardKinematics.forward_kinematics_right_shoulder( theta_right_shoulder_pitch=0, theta_right_shoulder_roll=-np.pi/2, position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, right_arm_length=1.0, ) position_right_elbow_standard_expected = rotation_right_shoulder_standard.apply(np.array([0, 0, 1])) + position_right_shoulder_standard assert np.allclose(position_right_elbow_standard, position_right_elbow_standard_expected) assert np.allclose(rotation_right_elbow_standard.as_rotvec(), (rotation_right_shoulder_standard * Rotation.from_euler('zxy', [0, -np.pi/2, 0])).as_rotvec()) def test_right_shoulder_forward_kinematics_shoulder_pitch_only(): np.random.seed(42) position_right_shoulder_standard = np.random.random([3]) rotation_right_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) # Shoulder pitch only: position_right_elbow_standard, rotation_right_elbow_standard = \ ForwardKinematics.forward_kinematics_right_shoulder( theta_right_shoulder_pitch=np.pi/2, theta_right_shoulder_roll=0, position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, right_arm_length=1.0, ) position_right_elbow_standard_expected = rotation_right_shoulder_standard.apply(np.array([-1, 0, 0])) + position_right_shoulder_standard assert np.allclose(position_right_elbow_standard, position_right_elbow_standard_expected) assert np.allclose(rotation_right_elbow_standard.as_rotvec(), (rotation_right_shoulder_standard * Rotation.from_euler('zxy', [-np.pi/2, 0, 0])).as_rotvec()) def test_right_shoulder_forward_kinematics_shoulder_roll_and_pitch(): np.random.seed(42) position_right_shoulder_standard = np.random.random([3]) rotation_right_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) # Shoulder roll and pitch: position_right_elbow_standard, rotation_right_elbow_standard = \ ForwardKinematics.forward_kinematics_right_shoulder( theta_right_shoulder_pitch=np.pi / 4, theta_right_shoulder_roll=-np.pi / 4, position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, right_arm_length=1.0, ) position_right_elbow_standard_expected = rotation_right_shoulder_standard.apply(np.array([-0.5, -0.5, 1/np.sqrt(2)])) + position_right_shoulder_standard assert np.allclose(position_right_elbow_standard, position_right_elbow_standard_expected) assert np.allclose(rotation_right_elbow_standard.as_rotvec(), (rotation_right_shoulder_standard * Rotation.from_euler('xzy', [-np.pi / 4, -1 * np.pi / 4, 0])).as_rotvec()) # pitch (-y axis) then roll (+z axis) def test_right_shoulder_inverse_kinematics_zero(): np.random.seed(42) position_right_shoulder_standard = np.random.random([3]) rotation_right_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) # Zero control input: theta_r, theta_p, _, _ = InverseKinematics.inverse_kinematics_right_shoulder( position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, position_right_elbow_inertial=rotation_right_shoulder_standard.apply(np.array([0, -1, 0])) + position_right_shoulder_standard, ) assert np.allclose(theta_r, 0) assert np.allclose(theta_p, 0) def test_right_shoulder_inverse_kinematics_shoulder_roll_only(): np.random.seed(42) position_right_shoulder_standard = np.random.random([3]) rotation_right_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) # Shoulder roll only: theta_r, theta_p, _, _ = InverseKinematics.inverse_kinematics_right_shoulder( position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, position_right_elbow_inertial=rotation_right_shoulder_standard.apply(np.array([0, -1/np.sqrt(2), 1/np.sqrt(2)])) + position_right_shoulder_standard, ) assert np.allclose(theta_r, -np.pi/4) assert np.allclose(theta_p, 0) def test_right_shoulder_inverse_kinematics_shoulder_pitch_only(): np.random.seed(42) position_right_shoulder_standard = np.random.random([3]) rotation_right_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) # Shoulder pitch only: theta_r, theta_p, _, _ = InverseKinematics.inverse_kinematics_right_shoulder( position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, position_right_elbow_inertial=rotation_right_shoulder_standard.apply(np.array([-1, 0, 0])) + position_right_shoulder_standard, ) assert np.allclose(theta_r, 0) assert np.allclose(theta_p, np.pi/2) def test_right_shoulder_inverse_kinematics_shoulder_roll_and_pitch(): np.random.seed(42) position_right_shoulder_standard = np.random.random([3]) rotation_right_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) # Shoulder roll and pitch: theta_r, theta_p, _, _ = InverseKinematics.inverse_kinematics_right_shoulder( position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, position_right_elbow_inertial=rotation_right_shoulder_standard.apply(np.array([-0.5, -0.5, 1/np.sqrt(2)])) + position_right_shoulder_standard, ) assert np.allclose(theta_r, -np.pi / 4) assert np.allclose(theta_p, np.pi / 4) def test_right_shoulder_forward_then_inverse_kinematics_random(): np.random.seed(12) for _ in range(10): # Arbitrary shoulder pose: position_right_shoulder_standard = np.random.random([3]) rotation_right_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) theta_right_shoulder_pitch = np.random.random() * 2 * np.pi - np.pi theta_right_shoulder_roll = np.random.random() * np.pi - np.pi / 2 fk_arm_length = 10 * np.random.random() position_right_elbow_standard, rotation_right_elbow_standard = \ ForwardKinematics.forward_kinematics_right_shoulder( theta_right_shoulder_pitch=theta_right_shoulder_pitch, theta_right_shoulder_roll=theta_right_shoulder_roll, position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, right_arm_length=fk_arm_length, ) theta_r, theta_p, _, _ = InverseKinematics.inverse_kinematics_right_shoulder( position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, position_right_elbow_inertial=position_right_elbow_standard, ) assert np.isclose(theta_p, theta_right_shoulder_pitch) assert np.isclose(theta_r, theta_right_shoulder_roll) def test_right_shoulder_inverse_then_forward_kinematics_random(): np.random.seed(12) for _ in range(10): # Arbitrary shoulder pose: position_right_shoulder_standard = np.random.random([3]) rotation_right_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) position_right_elbow_standard_initial = np.random.random([3]) initial_arm_length = np.linalg.norm(position_right_elbow_standard_initial - position_right_shoulder_standard) fk_arm_length = 10 * np.random.random() theta_r, theta_p, _, _ = InverseKinematics.inverse_kinematics_right_shoulder( position_right_shoulder_standard=np.array(position_right_shoulder_standard), rotation_right_shoulder_standard=rotation_right_shoulder_standard, position_right_elbow_inertial=position_right_elbow_standard_initial, ) position_right_elbow_standard, rotation_right_elbow_standard = \ ForwardKinematics.forward_kinematics_right_shoulder( theta_right_shoulder_pitch=theta_p, theta_right_shoulder_roll=theta_r, position_right_shoulder_standard=position_right_shoulder_standard, rotation_right_shoulder_standard=rotation_right_shoulder_standard, right_arm_length=fk_arm_length, ) # Un-normalize length: position_right_elbow_standard = position_right_shoulder_standard + (position_right_elbow_standard - position_right_shoulder_standard) * (initial_arm_length / fk_arm_length) # Give a relatively large tolerance as small errors in IK can add to relatively large differences here assert np.allclose(position_right_elbow_standard, position_right_elbow_standard_initial, atol=0.01) @pytest.mark.xfail def test_left_shoulder_fk(): raise NotImplementedError() @pytest.mark.xfail def test_left_shoulder_ik(): raise NotImplementedError() def test_left_shoulder_forward_then_inverse_kinematics_random(): np.random.seed(12) for _ in range(10): # Arbitrary shoulder pose: position_left_shoulder_standard = np.random.random([3]) rotation_left_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) theta_left_shoulder_pitch = np.random.random() * 2 * np.pi - np.pi theta_left_shoulder_roll = np.random.random() * np.pi - np.pi / 2 fk_arm_length = 10 * np.random.random() position_left_elbow_standard, rotation_left_elbow_standard = \ ForwardKinematics.forward_kinematics_left_shoulder( theta_left_shoulder_pitch=theta_left_shoulder_pitch, theta_left_shoulder_roll=theta_left_shoulder_roll, position_left_shoulder_standard=position_left_shoulder_standard, rotation_left_shoulder_standard=rotation_left_shoulder_standard, left_arm_length=fk_arm_length, ) theta_r, theta_p, _, _ = InverseKinematics.inverse_kinematics_left_shoulder( position_left_shoulder_standard=position_left_shoulder_standard, rotation_left_shoulder_standard=rotation_left_shoulder_standard, position_left_elbow_inertial=position_left_elbow_standard, ) assert np.isclose(theta_p, theta_left_shoulder_pitch) assert np.isclose(theta_r, theta_left_shoulder_roll) def test_left_shoulder_inverse_then_forward_kinematics_random(): np.random.seed(12) for _ in range(10): # Arbitrary shoulder pose: position_left_shoulder_standard = np.random.random([3]) rotation_left_shoulder_standard = Rotation.from_rotvec(np.random.random([3])) position_left_elbow_standard_initial = np.random.random([3]) initial_arm_length = np.linalg.norm(position_left_elbow_standard_initial - position_left_shoulder_standard) fk_arm_length = 10 * np.random.random() theta_r, theta_p, _, _ = InverseKinematics.inverse_kinematics_left_shoulder( position_left_shoulder_standard=np.array(position_left_shoulder_standard), rotation_left_shoulder_standard=rotation_left_shoulder_standard, position_left_elbow_inertial=position_left_elbow_standard_initial, ) position_left_elbow_standard, rotation_left_elbow_standard = \ ForwardKinematics.forward_kinematics_left_shoulder( theta_left_shoulder_pitch=theta_p, theta_left_shoulder_roll=theta_r, position_left_shoulder_standard=position_left_shoulder_standard, rotation_left_shoulder_standard=rotation_left_shoulder_standard, left_arm_length=fk_arm_length, ) # Un-normalize length: position_left_shoulder_standard = position_left_shoulder_standard + ( position_left_elbow_standard - position_left_shoulder_standard) * ( initial_arm_length / fk_arm_length) # Give a relatively large tolerance as small errors in IK can add to relatively large differences here assert np.allclose(position_left_shoulder_standard, position_left_elbow_standard_initial, atol=0.01) @pytest.mark.xfail def test_right_elbow_fk(): raise NotImplementedError() @pytest.mark.xfail def test_right_elbow_ik(): raise NotImplementedError() def test_right_elbow_forward_then_inverse_kinematics_random(): np.random.seed(12) for _ in range(10): # Arbitrary elbow pose: position_right_elbow_standard = np.random.random([3]) rotation_right_elbow_standard = Rotation.from_rotvec(np.random.random([3])) theta_right_elbow_roll = np.random.random() * np.pi theta_right_elbow_yaw = np.random.random() * 2 * np.pi - np.pi fk_forearm_length = 10 * np.random.random() position_right_hand_standard, rotation_right_hand_standard = \ ForwardKinematics.forward_kinematics_right_elbow( theta_right_elbow_roll=theta_right_elbow_roll, theta_right_elbow_yaw=theta_right_elbow_yaw, position_right_elbow_standard=position_right_elbow_standard, rotation_right_elbow_standard=rotation_right_elbow_standard, right_forearm_length=fk_forearm_length, ) theta_r, theta_y, _, _ = InverseKinematics.inverse_kinematics_right_elbow( position_right_elbow_standard=position_right_elbow_standard, rotation_right_elbow_standard=rotation_right_elbow_standard, position_right_hand_inertial=position_right_hand_standard, ) assert isclose_angles(theta_r, theta_right_elbow_roll) assert isclose_angles(theta_y, theta_right_elbow_yaw) def test_right_elbow_inverse_then_forward_kinematics_random(): np.random.seed(12) for _ in range(10): # Arbitrary elbow pose: position_right_elbow_standard = np.random.random([3]) rotation_right_elbow_standard= Rotation.from_rotvec(np.random.random([3])) # Random hand pose: position_right_hand_inertial_initial = np.random.random([3]) initial_right_forearm_length = np.linalg.norm(position_right_hand_inertial_initial - position_right_elbow_standard) fk_forearm_length = 10 * np.random.random() theta_r, theta_y, _, _ = \ InverseKinematics.inverse_kinematics_right_elbow( position_right_elbow_standard=position_right_elbow_standard, rotation_right_elbow_standard=rotation_right_elbow_standard, position_right_hand_inertial=position_right_hand_inertial_initial, ) position_right_hand_standard, rotation_right_hand_standard = \ ForwardKinematics.forward_kinematics_right_elbow( theta_right_elbow_roll=theta_r, theta_right_elbow_yaw=theta_y, position_right_elbow_standard=position_right_elbow_standard, rotation_right_elbow_standard=rotation_right_elbow_standard, right_forearm_length=fk_forearm_length, ) # Un-normalize length: position_right_hand_standard = position_right_elbow_standard + (position_right_hand_standard - position_right_elbow_standard) * (initial_right_forearm_length / fk_forearm_length) # Give a relatively large tolerance as small errors in IK can add to relatively large differences here assert np.allclose(position_right_hand_standard, position_right_hand_inertial_initial, atol=0.01) @pytest.mark.xfail def test_left_elbow_fk(): raise NotImplementedError() @pytest.mark.xfail def test_left_elbow_ik(): raise NotImplementedError() def test_left_elbow_forward_then_inverse_kinematics_random(): np.random.seed(12) for _ in range(10): # Arbitrary elbow pose: position_left_elbow_standard = np.random.random([3]) rotation_left_elbow_standard = Rotation.from_rotvec(np.random.random([3])) theta_left_elbow_roll = -np.random.random() * np.pi theta_left_elbow_yaw = np.random.random() * 2 * np.pi - np.pi fk_forearm_length = 10 * np.random.random() position_left_hand_standard, rotation_left_hand_standard = \ ForwardKinematics.forward_kinematics_left_elbow( theta_left_elbow_roll=theta_left_elbow_roll, theta_left_elbow_yaw=theta_left_elbow_yaw, position_left_elbow_standard=position_left_elbow_standard, rotation_left_elbow_standard=rotation_left_elbow_standard, left_forearm_length=fk_forearm_length, ) theta_r, theta_y, _, _ = InverseKinematics.inverse_kinematics_left_elbow( position_left_elbow_standard=position_left_elbow_standard, rotation_left_elbow_standard=rotation_left_elbow_standard, position_left_hand_inertial=position_left_hand_standard, ) assert isclose_angles(theta_r, theta_left_elbow_roll) assert isclose_angles(theta_y, theta_left_elbow_yaw) def test_left_elbow_inverse_then_forward_kinematics_random(): np.random.seed(12) for _ in range(10): # Arbitrary elbow pose: position_left_elbow_standard = np.random.random([3]) rotation_left_elbow_standard= Rotation.from_rotvec(np.random.random([3])) # Random hand pose: position_left_hand_inertial_initial = np.random.random([3]) initial_left_forearm_length = np.linalg.norm(position_left_hand_inertial_initial - position_left_elbow_standard) fk_forearm_length = 10 * np.random.random() theta_r, theta_y, _, _ = \ InverseKinematics.inverse_kinematics_left_elbow( position_left_elbow_standard=position_left_elbow_standard, rotation_left_elbow_standard=rotation_left_elbow_standard, position_left_hand_inertial=position_left_hand_inertial_initial, ) position_left_hand_standard, rotation_left_hand_standard = \ ForwardKinematics.forward_kinematics_left_elbow( theta_left_elbow_roll=theta_r, theta_left_elbow_yaw=theta_y, position_left_elbow_standard=position_left_elbow_standard, rotation_left_elbow_standard=rotation_left_elbow_standard, left_forearm_length=fk_forearm_length, ) # Un-normalize length: position_left_hand_standard = position_left_elbow_standard + (position_left_hand_standard - position_left_elbow_standard) * (initial_left_forearm_length / fk_forearm_length) # Give a relatively large tolerance as small errors in IK can add to relatively large differences here assert np.allclose(position_left_hand_standard, position_left_hand_inertial_initial, atol=0.01)
{"hexsha": "ef529feeb028fade4865b038c8908b8b8a7aad05", "size": 20857, "ext": "py", "lang": "Python", "max_stars_repo_path": "nao_gestures/test_nao_kinematics.py", "max_stars_repo_name": "TomKingsfordUoA/NaoGestures", "max_stars_repo_head_hexsha": "ba1cfbb2a376c02ab7bb51264e7504d6bb255c28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-12T03:35:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-12T03:35:59.000Z", "max_issues_repo_path": "nao_gestures/test_nao_kinematics.py", "max_issues_repo_name": "TomKingsfordUoA/NaoGestures", "max_issues_repo_head_hexsha": "ba1cfbb2a376c02ab7bb51264e7504d6bb255c28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-10T20:22:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-10T20:22:55.000Z", "max_forks_repo_path": "nao_gestures/test_nao_kinematics.py", "max_forks_repo_name": "TomKingsfordUoA/NaoGestures", "max_forks_repo_head_hexsha": "ba1cfbb2a376c02ab7bb51264e7504d6bb255c28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-18T06:24:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-18T06:24:11.000Z", "avg_line_length": 45.9405286344, "max_line_length": 186, "alphanum_fraction": 0.7410461715, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4383}
""" The MIT License (MIT) Copyright (c) 2017 Eduardo Henrique Vieira dos Santos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy as np import copy class AnnyBee(object): """docstring for AnnBP""" def __init__(self, arg): super(AnnyBee, self).__init__() self.arg = arg self.synapses = self.randomSynapses(arg[0]) self.desiredInputs = arg[1] self.desiredOutputs = arg[2] self.min_error = arg[3] def save(name, ob): #save object pickle_out = open(name, "wb") pickle.dump(ob, pickle_out) pickle_out.close() def load(name): pickle_in = open(name, "rb") z = pickle.load(pickle_in) pickle_in.close() return z def activationFunction(self, x, d=False): if(d): return x*(1-x) return 1/(1+np.exp(-x)) def activateNet(self, inputList = None, synapsesList = None): if inputList is None: inputList = self.desiredInputs if synapsesList is None: synapsesList = self.synapses np.random.seed(1) l = [] for i in xrange(len(synapsesList)+1): if i == 0: l.append(inputList) else: l.append(self.activationFunction(np.dot(l[i-1],synapsesList[i-1]))) return l def randomSynapses(self, listOfLayerHeights): np.random.seed(1) synapses = [] for i in xrange(len(listOfLayerHeights)): if i != 0: synapses.append(2*np.random.random((listOfLayerHeights[i-1],listOfLayerHeights[i])) - 1) return synapses def rateError(self, layersActivations, desiredOutputs = None, synapsesList = None): if desiredOutputs is None: desiredOutputs = copy.deepcopy(self.desiredOutputs) if synapsesList is None: synapsesList = copy.deepcopy(self.synapses) error = [] delta = [] layersActivations.reverse() synapsesList.reverse() for i in xrange(len(synapsesList)): if i == 0: error.append(desiredOutputs - layersActivations[i]) delta.append(error[0]*self.activationFunction(layersActivations[0],True)) else: error.append(delta[i-1].dot(synapsesList[i-1].T)) delta.append(error[i]*self.activationFunction(layersActivations[i],True)) layersActivations.reverse() synapsesList.reverse() error.reverse() delta.reverse() return delta, error def assignDelta(self, synapsesList, layersActivations, deltaList): for i in xrange(len(synapsesList)): synapsesList[i] += layersActivations[i].T.dot(deltaList[i]) def learnBP(self, synapsesList = None, inputList = None, desiredOutputs = None): if synapsesList is None: synapsesList = self.synapses if inputList is None: inputList = self.desiredInputs if desiredOutputs is None: desiredOutputs = self.desiredOutputs loop = True j = 0 while loop: layersActivations = self.activateNet(inputList, synapsesList) delta = self.rateError(layersActivations, desiredOutputs,synapsesList) self.assignDelta(synapsesList, layersActivations, delta[0]) error = np.mean(np.abs(delta[-1][1])) if (j% 10000) == 0: print "Error:" + str(error) j = 0 if (self.min_error > error): loop = False j = j + 1 #Input xInput = np.array([[ 0,0 ], [ 0,1 ], [ 1,0 ], [ 1,1 ]]) #Output yTargetOutput = np.array([[ 0,1 ], [ 1,0 ], [ 1,0 ], [ 0,1 ]]) MinimalError = 0.001 #Inicialization args[]: #[len(inputs), len(hidden1), len(hidden2)... len(output) #Input numpy.array() #Output numpy.array() #Minimal error ann = AnnyBee([[2,3,3,2],xInput,yTargetOutput,MinimalError]) #Learn ann.learnBP() #Show output print ann.activateNet()[-1]
{"hexsha": "2927e3f19c035f12a26dabe97c3e201ed2b76d09", "size": 4969, "ext": "py", "lang": "Python", "max_stars_repo_path": "AnnyBee.py", "max_stars_repo_name": "EdVieira/AnnyBee", "max_stars_repo_head_hexsha": "250f5193c9242a27bf31d76dbdf6a1902c4534bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AnnyBee.py", "max_issues_repo_name": "EdVieira/AnnyBee", "max_issues_repo_head_hexsha": "250f5193c9242a27bf31d76dbdf6a1902c4534bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AnnyBee.py", "max_forks_repo_name": "EdVieira/AnnyBee", "max_forks_repo_head_hexsha": "250f5193c9242a27bf31d76dbdf6a1902c4534bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0072463768, "max_line_length": 104, "alphanum_fraction": 0.6150130811, "include": true, "reason": "import numpy", "num_tokens": 1192}
# # Class Enhancement from scipy.signal import lfilter from spectrum import pmtm from Universal import * from VAD import * class Enhancement: def simplesubspec(self, signal, wlen, inc, NIS, a, b): """ simple spectrum subtraction :param signal: noisy speech :param wlen: window length :param inc: frame shift :param NIS: leading noise segment length :param a: over subtraction factor :param b: gain factor :return output: denoise speech """ wnd = np.hamming(wlen) # window function N = len(signal) # signal length speech = Speech() y = speech.enframe(signal, list(wnd), inc).T # enframe fn = y.shape[1] # frame number y_fft = np.fft.fft(y, axis=0) # FFT y_a = np.abs(y_fft) # amplitude y_phase = np.angle(y_fft) # phase y_a2 = y_a ** 2 # energy Nt = np.mean(y_a2[:, 0: NIS], 1) # average energy in noise segment nl2 = int(wlen / 2) + 1 # positvie frequency temp = np.zeros(nl2) # energy U = np.zeros(nl2) # one frame amplitude X = np.zeros((nl2, fn)) # amplitude for i in range(fn): # spectrum subtraction for k in range(nl2): if (y_a2[k, i] > a * Nt[k]): temp[k] = y_a2[k, i] - a * Nt[k] else: temp[k] = b * y_a2[k, i] U[k] = np.sqrt(temp[k]) X[:, i] = U output = speech.OverlapAdd2(X, y_phase[0:nl2, :], wlen, inc) # synthesis Nout = len(output) # spectrum subtraction length = original length? if Nout > N: output = output[0: N] else: output = np.concatenate([output, np.zeros(N - Nout)]) output = output / np.max(np.abs(output)) # normalization return output def segment(self, siganl, W=256, SP=0.4, Window=np.hamming(256)): """ chops a signal to overlapping windowed segments :param siganl: one dimentional signal :param W: sample number per window (default = 25) :param SP: shift percent (default = 0.4) :param Window: window function (default: hamming) :return Seg: segment matrix """ if (W != 256): Window = np.hamming(W) Window = Window.reshape(-1, 1) # make it a column vector L = len(siganl) SP = int(W * SP) N = int((L - W) / SP + 1) # number of segments Index = np.tile(np.arange(0, W), (N, 1)) + np.tile(SP * np.arange(0, N).reshape(-1, 1), (1, W)) Index = Index.T hw = np.tile(Window, (1, N)) Seg = siganl[Index] * hw return Seg def SSBoll79(self, signal, fs, IS=None): """ Spectral Subtraction based on Boll 79. Amplitude spectral subtraction Includes Magnitude Averaging and Residual noise Reduction :param signal: noisy signal :param fs: sampling frequency :param IS: initial silence (noise only) length in seconds (default value is .25 sec) :return output: denoise signal """ if not IS: IS = 0.25 # seconds elif isinstance(IS, float): W = int(0.025 * fs) # window length 25ms nfft = W # overlap-add method works good with this shift value SP = 0.4 # frame shift 40% (10ms) wnd = np.hamming(W) # IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM FROM HERE..... if isinstance(IS, dict): W = IS['windowsize'] SP = IS['shiftsize'] / W nfft = IS['nfft'] wnd = IS['window'] if hasattr(IS, 'IS'): IS = IS['IS'] else: IS = 0.25 # .......IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM T0 HERE NIS = int((IS * fs - W) / (SP * W) + 1) # number of initial silence segments Gamma = 1 # 1: magnitude, 2: power spectrum y = self.segment(signal, W, SP, wnd) Y = np.fft.fft(y, axis=0) FreqResol, NumberofFrames = Y.shape YPhase = np.angle(Y[0: int(NumberofFrames / 2) + 1, :]) # noisy speech phase Y = np.abs(Y[0: int(NumberofFrames / 2) + 1, :]) ** Gamma # Spectrogram N = np.mean(Y[:, 0:NIS].T, axis=0).T # initial noise power spectrum mean NRM = np.zeros(N.shape) # Noise Residual Maximum (Initialization) NoiseCounter = 0 NoiseLength = 9 # smoothing factor for noise updating Beta = 0.03 YS = Y # Y magnitude average for i in np.arange(1, NumberofFrames - 1): YS[:, i] = (YS[:, i - 1] + YS[:, i] + YS[:, i + 1]) / 3 X = np.zeros(Y.shape) D = np.zeros(FreqResol) for i in range(NumberofFrames): # Magnitude Spectrum Distance VAD NoiseFlag, SpeechFlag, NoiseCounter, Dist = VAD().vad(Y[:, i] ** (1 / Gamma), N ** (1 / Gamma), NoiseCounter) if SpeechFlag == 0: N = (NoiseLength * N + Y[:, i]) / (NoiseLength + 1) # update and smooth noise NRM = np.maximum(NRM, YS[:, i] - N) # update maximum noise residue X[:, i] = Beta * Y[:, i] else: D = YS[:, i] - N # spectral subtraction if i > 0 and i < NumberofFrames - 1: # residual noise reduction for j in range(len(D)): if D[j] < NRM[j]: D[j] = np.min(np.array([D[j], YS[j, i - 1] - N[j], YS[j, i + 1] - N[j]])) D[D < 0] = 0 X[:, i] = D output = Speech().OverlapAdd2(X ** (1 / Gamma), YPhase, int(W), int(SP * W)) return output def SSBoll79_2(self, signal, fs, T1, IS=None): """ Spectral Subtraction based on Boll 79. Amplitude spectral subtraction Includes Magnitude Averaging and Residual noise Reduction :param signal: noisy signal :param fs: sampling frequency :param IS: initial silence (noise only) length in seconds (default value is .25 sec) :return output: denoise signal """ if not IS: IS = 0.25 # seconds W = int(0.025 * fs) # window length 25ms nfft = W # overlap-add method works good with this shift value SP = 0.4 # frame shift 40% (10ms) wnd = np.hamming(W) elif isinstance(IS, float): W = int(0.025 * fs) # window length 25ms nfft = W # overlap-add method works good with this shift value SP = 0.4 # frame shift 40% (10ms) wnd = np.hamming(W) # IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM FROM HERE..... if isinstance(IS, dict): W = IS['windowsize'] SP = IS['shiftsize'] / W nfft = IS['nfft'] wnd = IS['window'] if hasattr(IS, 'IS'): IS = IS['IS'] else: IS = 0.25 # .......IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM T0 HERE NIS = int((IS * fs - W) / (SP * W) + 1) # number of initial silence segments Gamma = 1 # 1: magnitude, 2: power spectrum y = self.segment(signal, W, SP, wnd) Y = np.fft.fft(y, axis=0) FreqResol, NumberofFrames = Y.shape YPhase = np.angle(Y[0: int(NumberofFrames / 2) + 1, :]) # noisy speech phase Y = np.abs(Y[0: int(NumberofFrames / 2) + 1, :]) ** Gamma # Spectrogram N = np.mean(Y[:, 0:NIS].T, axis=0).T # initial noise power spectrum mean NRM = np.zeros(N.shape) # Noise Residual Maximum (Initialization) NoiseCounter = 0 NoiseLength = 9 # smoothing factor for noise updating Beta = 0.03 fn = NumberofFrames miniL = 5 voiceseg, vosl, SF, Ef = VAD().pitch_vad1(y, fn, T1, miniL) YS = Y # Y magnitude average for i in np.arange(1, NumberofFrames - 1): YS[:, i] = (YS[:, i - 1] + YS[:, i] + YS[:, i + 1]) / 3 X = np.zeros(Y.shape) D = np.zeros(FreqResol) for i in range(NumberofFrames): # Magnitude Spectrum Distance VAD NoiseFlag, SpeechFlag, NoiseCounter, Dist = VAD().vad(Y[:, i] ** (1 / Gamma), N ** (1 / Gamma), NoiseCounter) SpeechFlag = SF[i] if SpeechFlag == 0: N = (NoiseLength * N + Y[:, i]) / (NoiseLength + 1) # update and smooth noise NRM = np.maximum(NRM, YS[:, i] - N) # update maximum noise residue X[:, i] = Beta * Y[:, i] else: D = YS[:, i] - N # spectral subtraction if i > 0 and i < NumberofFrames - 1: # residual noise reduction for j in range(len(D)): if D[j] < NRM[j]: D[j] = np.min(np.array([D[j], YS[j, i - 1] - N[j], YS[j, i + 1] - N[j]])) D[D < 0] = 0 X[:, i] = D output = Speech().OverlapAdd2(X ** (1 / Gamma), YPhase, int(W), int(SP * W)) output = output / np.max(np.abs(output)) # normalized return output def Mtmpsd_ssb(self, signal, wlen, inc, NIS, alpha, beta, c): """ Spectral Subtraction Multitaper Spectrum Estimation Short-term Energy Entropy Ratio :param signal: noisy speech :param wlen: frame length :param inc: frame shift :param NIS: leding unvoiced (noise) frame number :param alpha: over subtraction factor in spectral subtraction :param beta: gain compensation factor :param c: gain factor (0: power spectrum, 1: amplitude spectrum) :return output: denoise speech """ w2 = int(wlen / 2) + 1 wind = np.hamming(wlen) # hamming window y = Speech().enframe(signal, list(wind), inc).T # enframe fn = y.shape[1] # frame number N = len(signal) # signal length fft_frame = np.fft.fft(y, axis=0) # FFT abs_frame = np.abs(fft_frame[0: w2, :]) # positive frequency amplitude ang_frame = np.angle(fft_frame[0: w2, :]) # positive frequency phase # smoothing in 3 neighbour frame abs_frame_backup = abs_frame for i in range(1, fn - 1, 2): abs_frame_backup[:, i] = 0.25 * abs_frame[:, i - 1] + 0.5 * abs_frame[:, i] + 0.25 * abs_frame[:, i + 1] abs_frame = abs_frame_backup # multitaper spectrum estimation power spectrum PSDFrame = np.zeros((w2, fn)) # PSD in each frame for i in range(fn): # PSDFrame[:, i] = pmtm(y[:, i], NW = 3, NFFT=wlen) Sk_complex, weights, eigenvalues = pmtm(y[:, i], NW=3, NFFT=wlen) Sk = (np.abs(Sk_complex) ** 2).transpose() PSDTwoSide = np.mean(Sk * weights, axis=1) PSDFrame[:, i] = PSDTwoSide[0: w2] PSDFrameBackup = PSDFrame for i in range(1, fn - 1, 2): PSDFrameBackup[:, i] = 0.25 * PSDFrame[:, i - 1] + 0.5 * PSDFrame[:, i] + 0.25 * PSDFrame[:, i + 1] PSDFrame = PSDFrameBackup # average PSD of leading unvoiced segment NoisePSD = np.mean(PSDFrame[:, 0: NIS], axis=1) # spectral subtraction -> gain factor g = np.zeros((w2, fn)) # gain factor g_n = np.zeros((w2, fn)) for k in range(fn): g[:, k] = (PSDFrame[:, k] - alpha * NoisePSD) / PSDFrame[:, k] g_n[:, k] = beta * NoisePSD / PSDFrame[:, k] gix = np.where(g[:, k] < 0) g[gix, k] = g_n[gix, k] gf = g if c == 0: g = gf else: g = np.sqrt(gf) SubFrame = g * abs_frame # spectral subtraction amplitude output = Speech().OverlapAdd2(SubFrame, ang_frame, wlen, inc) # synthesis output = output / np.max(np.abs(output)) # normalized ol = len(output) if ol < N: output = np.concatenate((output, np.zeros(N - ol))) return output def WienerScalart96m_2(self, signal, fs, T1, IS): """ Wiener filter based on tracking a priori SNR usingDecision-Directed method, proposed by Scalart et al 96. In this method it is assumed that SNRpost=SNRprior +1. based on this the Wiener Filter can be adapted to a model like Ephraims model in which we have a gain function which is a function of a priori SNR and a priori SNR is being tracked using Decision Directed method. :param signal: noisy signal :param fs: sampling frequency :param IS: initial silence (noise only) length in seconds (default value is .25 sec) :param T1: threshold :return output: denoise signal """ if not IS: IS = 0.25 # seconds W = int(0.025 * fs) # window length 25ms nfft = W # overlap-add method works good with this shift value SP = 0.4 # frame shift 40% (10ms) wnd = np.hamming(W) elif isinstance(IS, float): W = int(0.025 * fs) # window length 25ms nfft = W # overlap-add method works good with this shift value SP = 0.4 # frame shift 40% (10ms) wnd = np.hamming(W) # IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM FROM HERE..... if isinstance(IS, dict): W = IS['windowsize'] SP = IS['shiftsize'] / W nfft = IS['nfft'] wnd = IS['window'] if hasattr(IS, 'IS'): IS = IS['IS'] else: IS = 0.25 # .......IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM T0 HERE pre_emph = 0 # pre_emphasis parameter signal = lfilter(np.array([1, -1 * pre_emph]), 1, signal) # pre-emphasis NIS = int((IS * fs - W) / (SP * W) + 1) # number of initial silence segments y = self.segment(signal, W, SP, wnd) # enframe Y = np.fft.fft(y, axis=0) # FFT FreqResol, NumberofFrames = Y.shape YPhase = np.angle(Y[0: int(NumberofFrames / 2) + 1, :]) # noisy speech phase Y = np.abs(Y[0: int(NumberofFrames / 2) + 1, :]) # Spectrogram LambdaD = np.mean(Y[:, 0 : NIS] ** 2).T # initial noise power spectrum variance N = np.mean(Y[:, 0:NIS].T, axis=0).T # initial average noise power spectrum alpha = 0.99 fn = NumberofFrames miniL = 5 voiceseg, vosl, SF, Ef = VAD().pitch_vad1(y, fn, T1, miniL) # vad NoiseCounter = 0 NoiseLength = 9 # smoothing factor for noise updating G = np.ones(N.shape) # power estimation initialization Gamma = G X = np.zeros(Y.shape) # Y magnitude average for i in np.arange(1, NumberofFrames - 1): SpeechFlag = SF[i] if i <= NIS: # leading unvoiced segment SpeechFlag = 0 NoiseCounter = 100 if SpeechFlag == 0: # update noise spectrum in unvoiced segment N = (NoiseLength * N + Y[:, i])/(NoiseLength + 1) LambdaD = (NoiseLength * LambdaD + Y[:, i] ** 2)/(NoiseLength + 1) # update and smoothing noise variance gammaNew = (Y[:, i] ** 2)/LambdaD # post SNR xi = alpha * (G ** 2) * Gamma + (1 - alpha) * np.max(gammaNew - 1, 0) # senior SNR Gamma = gammaNew G = (xi/(xi + 1)) # wiener spectrum estimation X[:, i] = G * Y[:, i] # wiener filter spectrum output = Speech().OverlapAdd2(X, YPhase, int(W), int(SP * W)) output = lfilter([1], np.array([1, -1 * pre_emph]), output) output = output / np.max(np.abs(output)) # normalized return output
{"hexsha": "9bd543b2e9a144bdf68645b88e2f82e008aad06d", "size": 14130, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter8_PitchDetection/Enhancement.py", "max_stars_repo_name": "SeventeenChen/Python_Speech_SZY", "max_stars_repo_head_hexsha": "0074ad1d519387a75d5eca42c77f4d6966eb0a0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-11-23T11:03:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-24T01:26:21.000Z", "max_issues_repo_path": "Chapter7_SpeechEnhancement/Enhancement.py", "max_issues_repo_name": "SeventeenChen/Python_Speech_SZY", "max_issues_repo_head_hexsha": "0074ad1d519387a75d5eca42c77f4d6966eb0a0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter7_SpeechEnhancement/Enhancement.py", "max_forks_repo_name": "SeventeenChen/Python_Speech_SZY", "max_forks_repo_head_hexsha": "0074ad1d519387a75d5eca42c77f4d6966eb0a0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-23T14:26:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-30T10:39:08.000Z", "avg_line_length": 35.7721518987, "max_line_length": 108, "alphanum_fraction": 0.5954706299, "include": true, "reason": "from scipy", "num_tokens": 4653}
import datetime import random import numpy as np from sklearn.metrics import roc_auc_score as roc_auc from cases.credit_scoring.credit_scoring_problem import get_scoring_data from fedot.core.composer.gp_composer.gp_composer import GPComposerBuilder, GPComposerRequirements from fedot.core.data.data import InputData from fedot.core.optimisers.gp_comp.gp_optimiser import GPGraphOptimiserParameters, GeneticSchemeTypesEnum from fedot.core.optimisers.gp_comp.operators.selection import SelectionTypesEnum from fedot.core.pipelines.pipeline import Pipeline from fedot.core.repository.operation_types_repository import get_operations_for_task from fedot.core.repository.quality_metrics_repository import ClassificationMetricsEnum, ComplexityMetricsEnum from fedot.core.repository.tasks import Task, TaskTypesEnum from fedot.core.visualisation.opt_viz import PipelineEvolutionVisualiser random.seed(12) np.random.seed(12) def results_visualization(history, composed_pipelines): visualiser = PipelineEvolutionVisualiser() visualiser.visualise_history(history) visualiser.pareto_gif_create(history.archive_history, history.individuals) visualiser.boxplots_gif_create(history.individuals) for pipeline_evo_composed in composed_pipelines: pipeline_evo_composed.show() def calculate_validation_metric(pipeline: Pipeline, dataset_to_validate: InputData) -> float: # the execution of the obtained composite models predicted = pipeline.predict(dataset_to_validate) # the quality assessment for the simulation results roc_auc_value = roc_auc(y_true=dataset_to_validate.target, y_score=predicted.predict) return roc_auc_value def run_credit_scoring_problem(train_file_path, test_file_path, timeout: datetime.timedelta = datetime.timedelta(minutes=5), is_visualise=False): task = Task(TaskTypesEnum.classification) dataset_to_compose = InputData.from_csv(train_file_path, task=task) dataset_to_validate = InputData.from_csv(test_file_path, task=task) # the search of the models provided by the framework that can be used as nodes in a pipeline for the selected task available_model_types = get_operations_for_task(task=task, mode='model') # the choice of the metric for the pipeline quality assessment during composition quality_metric = ClassificationMetricsEnum.ROCAUC complexity_metric = ComplexityMetricsEnum.node_num metrics = [quality_metric, complexity_metric] # the choice and initialisation of the GP search composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=3, max_depth=3, pop_size=20, num_of_generations=20, crossover_prob=0.8, mutation_prob=0.8, timeout=timeout, start_depth=2) # GP optimiser parameters choice scheme_type = GeneticSchemeTypesEnum.parameter_free optimiser_parameters = GPGraphOptimiserParameters(genetic_scheme_type=scheme_type, selection_types=[SelectionTypesEnum.spea2]) # Create builder for composer and set composer params builder = GPComposerBuilder(task=task).with_requirements(composer_requirements).with_metrics( metrics).with_optimiser_parameters(optimiser_parameters) # Create GP-based composer composer = builder.build() # the optimal pipeline generation by composition - the most time-consuming task pipelines_evo_composed = composer.compose_pipeline(data=dataset_to_compose, is_visualise=True) composer.history.write_composer_history_to_csv() if is_visualise: results_visualization(composed_pipelines=pipelines_evo_composed, history=composer.history) pipelines_roc_auc = [] for pipeline_num, pipeline_evo_composed in enumerate(pipelines_evo_composed): pipeline_evo_composed.fine_tune_primary_nodes(input_data=dataset_to_compose, iterations=50) pipeline_evo_composed.fit(input_data=dataset_to_compose) # the quality assessment for the obtained composite models roc_on_valid_evo_composed = calculate_validation_metric(pipeline_evo_composed, dataset_to_validate) pipelines_roc_auc.append(roc_on_valid_evo_composed) if len(pipelines_evo_composed) > 1: print(f'Composed ROC AUC of pipeline {pipeline_num + 1} is {round(roc_on_valid_evo_composed, 3)}') else: print(f'Composed ROC AUC is {round(roc_on_valid_evo_composed, 3)}') return max(pipelines_roc_auc) if __name__ == '__main__': full_path_train, full_path_test = get_scoring_data() run_credit_scoring_problem(full_path_train, full_path_test, is_visualise=True)
{"hexsha": "5dff8ea6f886dccd89c1bc9c5c5d990a69429590", "size": 4943, "ext": "py", "lang": "Python", "max_stars_repo_path": "cases/credit_scoring/credit_scoring_problem_multiobj.py", "max_stars_repo_name": "rozlana-g/FEDOT", "max_stars_repo_head_hexsha": "a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 358, "max_stars_repo_stars_event_min_datetime": "2020-06-11T09:34:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:56:22.000Z", "max_issues_repo_path": "cases/credit_scoring/credit_scoring_problem_multiobj.py", "max_issues_repo_name": "rozlana-g/FEDOT", "max_issues_repo_head_hexsha": "a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 467, "max_issues_repo_issues_event_min_datetime": "2020-06-11T13:49:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:19:48.000Z", "max_forks_repo_path": "cases/credit_scoring/credit_scoring_problem_multiobj.py", "max_forks_repo_name": "rozlana-g/FEDOT", "max_forks_repo_head_hexsha": "a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 48, "max_forks_repo_forks_event_min_datetime": "2020-07-13T14:50:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T09:37:13.000Z", "avg_line_length": 45.7685185185, "max_line_length": 118, "alphanum_fraction": 0.7511632612, "include": true, "reason": "import numpy", "num_tokens": 1022}
import basevcstest import numpy import vcs class TestVCSNoXtraElts(basevcstest.VCSBaseTest): def testNoXtraElements(self): data = numpy.sin(numpy.arange(100)) data.shape = (10, 10) orig = {} new = {} for k in list(vcs.elements.keys()): new[k] = [] orig[k] = list(vcs.elements[k].keys()) self.x.plot(data, "default", "boxfill", bg=self.bg) self.x.plot(data, "default", "isofill", bg=self.bg) self.x.plot(data, "default", "isoline", bg=self.bg) self.x.plot(data, data, "default", "vector", bg=self.bg) self.x.plot(data, "default", "1d", bg=self.bg) self.x.clear() diff = False for e in list(vcs.elements.keys()): for k in list(vcs.elements[e].keys()): if k not in orig[e]: new[e].append(k) diff = True if diff: for k in list(new.keys()): if new[k] != []: print(k, new[k]) raise Exception("New elements added when it shouldn't")
{"hexsha": "41b3f026ff55fe3d0fe2fe45797cbae31979d6aa", "size": 1099, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_vcs_no_extra_elements.py", "max_stars_repo_name": "scottwittenburg/vcs", "max_stars_repo_head_hexsha": "5b9f17fb78f7ab186fc0132ab81ada043a7ba348", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-10-10T03:14:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T14:18:15.000Z", "max_issues_repo_path": "tests/test_vcs_no_extra_elements.py", "max_issues_repo_name": "scottwittenburg/vcs", "max_issues_repo_head_hexsha": "5b9f17fb78f7ab186fc0132ab81ada043a7ba348", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 196, "max_issues_repo_issues_event_min_datetime": "2018-03-21T19:44:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-21T21:56:24.000Z", "max_forks_repo_path": "tests/test_vcs_no_extra_elements.py", "max_forks_repo_name": "scottwittenburg/vcs", "max_forks_repo_head_hexsha": "5b9f17fb78f7ab186fc0132ab81ada043a7ba348", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-12-09T21:54:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T04:22:14.000Z", "avg_line_length": 31.4, "max_line_length": 67, "alphanum_fraction": 0.5131938126, "include": true, "reason": "import numpy", "num_tokens": 274}
[STATEMENT] lemma eeqButPID_F_cong: assumes "eeqButPID_F sw sw1" and "PID = PID \<Longrightarrow> eqButF uu uu1" and "pid \<noteq> PID \<Longrightarrow> uu = uu1" shows "eeqButPID_F (sw (pid := uu)) (sw1(pid := uu1))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. eeqButPID_F (sw(pid := uu)) (sw1(pid := uu1)) [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: eeqButPID_F sw sw1 PID = PID \<Longrightarrow> eqButF uu uu1 pid \<noteq> PID \<Longrightarrow> uu = uu1 goal (1 subgoal): 1. eeqButPID_F (sw(pid := uu)) (sw1(pid := uu1)) [PROOF STEP] unfolding eeqButPID_F_def [PROOF STATE] proof (prove) using this: \<forall>pid. if pid = PID then eqButF (sw PID) (sw1 PID) else sw pid = sw1 pid PID = PID \<Longrightarrow> eqButF uu uu1 pid \<noteq> PID \<Longrightarrow> uu = uu1 goal (1 subgoal): 1. \<forall>pida. if pida = PID then eqButF ((sw(pid := uu)) PID) ((sw1(pid := uu1)) PID) else (sw(pid := uu)) pida = (sw1(pid := uu1)) pida [PROOF STEP] by (auto split: if_splits)
{"llama_tokens": 461, "file": "CoSMeDis_Post_Confidentiality_Post_Unwinding_Helper_ISSUER", "length": 3}
fun(x,y) = x + y
{"hexsha": "a232acffd714d555e2598fe2e9ff40dd5c29f505", "size": 16, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/lib.jl", "max_stars_repo_name": "waseemssaeed/TetsRepo", "max_stars_repo_head_hexsha": "e8cec1b167b5b9eea113a4882452106955c3e57e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lib.jl", "max_issues_repo_name": "waseemssaeed/TetsRepo", "max_issues_repo_head_hexsha": "e8cec1b167b5b9eea113a4882452106955c3e57e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib.jl", "max_forks_repo_name": "waseemssaeed/TetsRepo", "max_forks_repo_head_hexsha": "e8cec1b167b5b9eea113a4882452106955c3e57e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.0, "max_line_length": 16, "alphanum_fraction": 0.4375, "num_tokens": 8}