text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
program hello
!$OMP parallel
print *, "Hello world"
!$OMP end parallel
end program hello
|
{"hexsha": "0b1def5b458afa554d986b01d272f2e25e67ddac", "size": 97, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "others/parallel_computting/hello-omp.f90", "max_stars_repo_name": "bt3gl/Resources-Numerical_Methods_for_Physics", "max_stars_repo_head_hexsha": "8668215f107230fafd9bdeb0061d353328cf03e8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2019-10-28T03:13:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-21T17:38:06.000Z", "max_issues_repo_path": "others/parallel_computting/hello-omp.f90", "max_issues_repo_name": "bt3gl/Resources-Numerical_Methods_for_Physics", "max_issues_repo_head_hexsha": "8668215f107230fafd9bdeb0061d353328cf03e8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "others/parallel_computting/hello-omp.f90", "max_forks_repo_name": "bt3gl/Resources-Numerical_Methods_for_Physics", "max_forks_repo_head_hexsha": "8668215f107230fafd9bdeb0061d353328cf03e8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-05-09T07:55:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-12T11:05:42.000Z", "avg_line_length": 12.125, "max_line_length": 24, "alphanum_fraction": 0.6907216495, "num_tokens": 25}
|
/*
* Copyright (C) 2012-2015 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <gtest/gtest.h>
#include <boost/algorithm/string/find.hpp>
#include <boost/regex.hpp>
#include "gazebo/test/ServerFixture.hh"
using namespace gazebo;
class WirelessReceiver_TEST : public ServerFixture
{
public: static const std::string receiverSensorString;
public: WirelessReceiver_TEST();
public: void TestCreateWirelessReceiver();
public: void TestIllegalTransceiver();
public: void TestIllegalPower();
public: void TestIllegalGain();
public: void TestIllegalMinFreq();
public: void TestIllegalMaxFreq();
public: void TestIllegalMinMaxFreq();
public: void TestIllegalSensitivity();
public: void TestUpdateImpl();
private: void CheckIllegalValue(std::string _sensorString);
private: sensors::SensorManager *mgr;
private: sdf::ElementPtr sdf;
};
const std::string WirelessReceiver_TEST::receiverSensorString =
"<sdf version='1.4'>"
" <sensor name='wirelessReceiver' type='wireless_receiver'>"
" <always_on>1</always_on>"
" <visualize>0</visualize>"
" <update_rate>1.0</update_rate>"
" <transceiver>"
" <min_frequency>2412.0</min_frequency>"
" <max_frequency>2484.0</max_frequency>"
" <power>14.5</power>"
" <gain>2.5</gain>"
" <sensitivity>-90.0</sensitivity>"
" </transceiver>"
" </sensor>"
"</sdf>";
/////////////////////////////////////////////////
WirelessReceiver_TEST::WirelessReceiver_TEST()
:sdf(new sdf::Element)
{
Load("worlds/empty.world");
this->mgr = sensors::SensorManager::Instance();
sdf::initFile("sensor.sdf", this->sdf);
}
/////////////////////////////////////////////////
/// \brief Test Creation of a wireless receiver sensor
void WirelessReceiver_TEST::TestCreateWirelessReceiver()
{
sdf::readString(this->receiverSensorString, this->sdf);
// Create the wireless receiver sensor
std::string sensorName = this->mgr->CreateSensor(this->sdf, "default",
"ground_plane::link", 0);
// Make sure the returned sensor name is correct
EXPECT_EQ(sensorName,
std::string("default::ground_plane::link::wirelessReceiver"));
// Update the sensor manager so that it can process new sensors.
this->mgr->Update();
// Get a pointer to the wireless receiver sensor
sensors::WirelessReceiverPtr sensor =
boost::dynamic_pointer_cast<sensors::WirelessReceiver>(
this->mgr->GetSensor(sensorName));
// Make sure the above dynamic cast worked.
ASSERT_TRUE(sensor != NULL);
EXPECT_DOUBLE_EQ(sensor->GetMinFreqFiltered(), 2412.0);
EXPECT_DOUBLE_EQ(sensor->GetMaxFreqFiltered(), 2484.0);
EXPECT_DOUBLE_EQ(sensor->GetPower(), 14.5);
EXPECT_DOUBLE_EQ(sensor->GetGain(), 2.5);
EXPECT_DOUBLE_EQ(sensor->GetSensitivity(), -90);
EXPECT_TRUE(sensor->IsActive());
}
/////////////////////////////////////////////////
/// \brief Create a sensor with an illegal value and checks that an exception
/// is thrown
void WirelessReceiver_TEST::CheckIllegalValue(std::string _sensorString)
{
sdf::readString(_sensorString, this->sdf);
// Create the wireless receiver sensor
ASSERT_ANY_THROW(this->mgr->CreateSensor(this->sdf,
"default", "ground_plane::link", 0));
}
/////////////////////////////////////////////////
/// \brief Test Non-existent transceiver element
void WirelessReceiver_TEST::TestIllegalTransceiver()
{
// Make a copy of the sdf string for avoid affecting other tests
std::string receiverSensorStringCopy = this->receiverSensorString;
boost::replace_first(receiverSensorStringCopy, "<transceiver>", "");
boost::replace_first(receiverSensorStringCopy, "</transceiver>", "");
this->CheckIllegalValue(receiverSensorStringCopy);
}
/////////////////////////////////////////////////
/// \brief Test wrong power value for the transceiver element
void WirelessReceiver_TEST::TestIllegalPower()
{
// Replace the power by an incorrect value
boost::regex re("<power>.*<\\/power>");
std::string receiverSensorStringCopy =
boost::regex_replace(this->receiverSensorString,
re, "<power>-1.0</power>");
this->CheckIllegalValue(receiverSensorStringCopy);
}
/////////////////////////////////////////////////
/// \brief Test wrong gain value for the transceiver element
void WirelessReceiver_TEST::TestIllegalGain()
{
// Replace the gain by an incorrect value
boost::regex re("<gain>.*<\\/gain>");
std::string receiverSensorStringCopy =
boost::regex_replace(this->receiverSensorString, re, "<gain>-1.0</gain>");
this->CheckIllegalValue(receiverSensorStringCopy);
}
/////////////////////////////////////////////////
/// \brief Test wrong min_frequency value for the transceiver element
void WirelessReceiver_TEST::TestIllegalMinFreq()
{
// Replace the min frequency by an incorrect value
boost::regex re("<min_frequency>.*<\\/min_frequency>");
std::string receiverSensorStringCopy =
boost::regex_replace(this->receiverSensorString, re,
"<min_frequency>-1.0</min_frequency>");
this->CheckIllegalValue(receiverSensorStringCopy);
}
/////////////////////////////////////////////////
/// \brief Test wrong max_frequency value for the transceiver element
void WirelessReceiver_TEST::TestIllegalMaxFreq()
{
// Replace the max frequency by an incorrect value
boost::regex re("<max_frequency>.*<\\/max_frequency>");
std::string receiverSensorStringCopy =
boost::regex_replace(this->receiverSensorString, re,
"<max_frequency>-1.0</max_frequency>");
this->CheckIllegalValue(receiverSensorStringCopy);
}
/////////////////////////////////////////////////
/// \brief Test min_frequency value greater than max_frequency
void WirelessReceiver_TEST::TestIllegalMinMaxFreq()
{
// Swap min_frequency and max_frequency
boost::regex re("<max_frequency>.*<\\/max_frequency>");
std::string receiverSensorStringCopy =
boost::regex_replace(this->receiverSensorString, re,
"<max_frequency>2412.0</max_frequency>");
re = "<min_frequency>.*<\\/min_frequency>";
receiverSensorStringCopy =
boost::regex_replace(receiverSensorStringCopy, re,
"<min_frequency>2484.0</min_frequency>");
this->CheckIllegalValue(receiverSensorStringCopy);
}
/////////////////////////////////////////////////
/// \brief Test wrong sensitivity value for the transceiver element
void WirelessReceiver_TEST::TestIllegalSensitivity()
{
// Replace the sensitivity by an incorrect value
boost::regex re("<sensitivity>.*<\\/sensitivity>");
std::string receiverSensorStringCopy =
boost::regex_replace(this->receiverSensorString, re,
"<sensitivity>1.0</sensitivity>");
this->CheckIllegalValue(receiverSensorStringCopy);
}
/////////////////////////////////////////////////
/// \brief Test the updateImpl() method
void WirelessReceiver_TEST::TestUpdateImpl()
{
sdf::readString(this->receiverSensorString, this->sdf);
// Create the wireless receiver sensor
std::string sensorName = this->mgr->CreateSensor(this->sdf, "default",
"ground_plane::link", 0);
// Make sure the returned sensor name is correct
EXPECT_EQ(sensorName,
std::string("default::ground_plane::link::wirelessReceiver"));
// Update the sensor manager so that it can process new sensors.
this->mgr->Update();
// Get a pointer to the wireless receiver sensor
sensors::WirelessReceiverPtr sensor =
boost::dynamic_pointer_cast<sensors::WirelessReceiver>(
this->mgr->GetSensor(sensorName));
// Make sure the above dynamic cast worked.
EXPECT_TRUE(sensor != NULL);
sensor->Update(true);
}
/////////////////////////////////////////////////
TEST_F(WirelessReceiver_TEST, TestCreateWilessReceiver)
{
TestCreateWirelessReceiver();
}
/////////////////////////////////////////////////
TEST_F(WirelessReceiver_TEST, TestIllegalTransceiver)
{
TestIllegalTransceiver();
}
/////////////////////////////////////////////////
TEST_F(WirelessReceiver_TEST, TestIllegalPower)
{
TestIllegalPower();
}
/////////////////////////////////////////////////
TEST_F(WirelessReceiver_TEST, TestIllegalGain)
{
TestIllegalGain();
}
/////////////////////////////////////////////////
TEST_F(WirelessReceiver_TEST, TestIllegalMinFreq)
{
TestIllegalMinFreq();
}
/////////////////////////////////////////////////
TEST_F(WirelessReceiver_TEST, TestIllegalMaxFreq)
{
TestIllegalMaxFreq();
}
/////////////////////////////////////////////////
TEST_F(WirelessReceiver_TEST, TestIllegalMinMaxFreq)
{
TestIllegalMinMaxFreq();
}
/////////////////////////////////////////////////
TEST_F(WirelessReceiver_TEST, TestIllegalSensitivity)
{
TestIllegalSensitivity();
}
/////////////////////////////////////////////////
TEST_F(WirelessReceiver_TEST, TestUpdateImpl)
{
TestUpdateImpl();
}
/////////////////////////////////////////////////
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
{"hexsha": "cdfd76f6b02af5079e3da590c68e74eb831401fb", "size": 9474, "ext": "cc", "lang": "C++", "max_stars_repo_path": "gazebo/sensors/WirelessReceiver_TEST.cc", "max_stars_repo_name": "horikawahorikawa/gazebo-PR", "max_stars_repo_head_hexsha": "bdb99bec78b8adb95f8855057aae7c028f9e78c2", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gazebo/sensors/WirelessReceiver_TEST.cc", "max_issues_repo_name": "horikawahorikawa/gazebo-PR", "max_issues_repo_head_hexsha": "bdb99bec78b8adb95f8855057aae7c028f9e78c2", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gazebo/sensors/WirelessReceiver_TEST.cc", "max_forks_repo_name": "horikawahorikawa/gazebo-PR", "max_forks_repo_head_hexsha": "bdb99bec78b8adb95f8855057aae7c028f9e78c2", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7919463087, "max_line_length": 80, "alphanum_fraction": 0.6512560692, "num_tokens": 2061}
|
// Copyright Abel Sinkovics (abel@sinkovics.hu) 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <mpllibs/metamonad/try_c.hpp>
#include <mpllibs/metamonad/exception.hpp>
#include <mpllibs/metamonad/tmp_tag.hpp>
#include <mpllibs/metamonad/tmp_value.hpp>
#include <mpllibs/metamonad/syntax.hpp>
#include <mpllibs/metamonad/is_tag.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/mpl/int.hpp>
#include <boost/mpl/equal_to.hpp>
#include <boost/mpl/always.hpp>
#include <boost/mpl/plus.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/type_traits/is_same.hpp>
#include "common.hpp"
using mpllibs::metamonad::tmp_tag;
using mpllibs::metamonad::tmp_value;
namespace
{
struct tag1 : tmp_tag<tag1> {};
struct e1 : tmp_value<e1, tag1> {};
struct tag2 : tmp_tag<tag2> {};
struct e2 : tmp_value<e2, tag2> {};
}
BOOST_AUTO_TEST_CASE(test_try_c)
{
using boost::mpl::equal_to;
using boost::mpl::tag;
using boost::mpl::plus;
using boost::mpl::true_;
using mpllibs::metamonad::exception;
using mpllibs::metamonad::try_c;
using mpllibs::metamonad::catch_;
using mpllibs::metamonad::catch_c;
using mpllibs::metamonad::syntax;
using mpllibs::metamonad::is_tag;
// test_no_exception
BOOST_MPL_ASSERT((
equal_to<
int13,
try_c<int13, catch_<e, syntax<is_tag<tag1, e> >, syntax<int11> > >::type
>
));
// test_no_exception_no_catch
BOOST_MPL_ASSERT((equal_to<int2, try_c<plus<int1, int1> >::type>));
// test_catch
BOOST_MPL_ASSERT((
equal_to<
int11,
try_c<
exception<e1>,
catch_<e, syntax<is_tag<tag1, e> >, syntax<int11> >
>::type
>
));
// test_exception_value_in_catch
BOOST_MPL_ASSERT((
equal_to<
int13,
try_c<
exception<int13>,
catch_<e, syntax<is_tag<tag<int13>::type, e> >, syntax<e> >
>::type
>
));
// test_not_catching
BOOST_MPL_ASSERT((
equal_to<
exception<int13>,
try_c<
exception<int13>,
catch_<e, syntax<is_tag<tag2, e> >, syntax<int11> >
>::type
>
));
// test_second_catch
BOOST_MPL_ASSERT((
equal_to<
int13,
try_c<
exception<e2>,
catch_<e, syntax<is_tag<tag1, e> >, syntax<int11> >,
catch_<e, syntax<is_tag<tag2, e> >, syntax<int13> >
>::type
>
));
// test_exception_propagation
BOOST_MPL_ASSERT((
equal_to<
exception<int2>,
try_c<
plus<int1, exception<int2> >,
catch_<e, syntax<is_tag<tag1, e> >, syntax<int11> >
>::type
>
));
// test_rethrowing
BOOST_MPL_ASSERT((
equal_to<
exception<int13>,
try_c<
exception<e1>,
catch_<e, syntax<true_>, syntax<exception<int13> > >
>::type
>
));
// test_rethrowing_not_caught_by_next_catch
BOOST_MPL_ASSERT((
equal_to<
exception<int13>,
try_c<
exception<e1>,
catch_<e, syntax<true_>, syntax<exception<int13> > >,
catch_<e, syntax<true_>, syntax<exception<int13> > >
>::type
>
));
}
|
{"hexsha": "8414e10acfabc0165a2d11dab4be5029d32b63be", "size": 3190, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/metamonad/test/try_c.cpp", "max_stars_repo_name": "sabel83/mpllibs", "max_stars_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 70.0, "max_stars_repo_stars_event_min_datetime": "2015-01-15T09:05:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T15:49:31.000Z", "max_issues_repo_path": "libs/metamonad/test/try_c.cpp", "max_issues_repo_name": "sabel83/mpllibs", "max_issues_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2015-06-18T19:25:34.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-13T19:49:51.000Z", "max_forks_repo_path": "libs/metamonad/test/try_c.cpp", "max_forks_repo_name": "sabel83/mpllibs", "max_forks_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2015-07-10T08:18:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-01T07:17:57.000Z", "avg_line_length": 22.3076923077, "max_line_length": 78, "alphanum_fraction": 0.6329153605, "num_tokens": 947}
|
(*
Copyright 2016 Luxembourg University
Copyright 2017 Luxembourg University
This file is part of Velisarios.
Velisarios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Velisarios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Velisarios. If not, see <http://www.gnu.org/licenses/>.
Authors: Vincent Rahli
Ivana Vukotic
*)
Require Export PBFTreceived_prepare_like1.
Require Export PBFTprepare_like2request_data.
Require Export PBFTnew_view_util.
Require Export LearnAndKnows.
Require Export PBFTlearns_or_knows_pl_nv.
Section PBFTlearns_or_knows_nv.
Local Open Scope eo.
Local Open Scope proc.
Context { pbft_context : PBFTcontext }.
Context { pbft_auth : PBFTauth }.
Context { pbft_keys : PBFTinitial_keys }.
Context { pbft_hash : PBFThash }.
Definition pbft_nv_data := NewView.
Definition pbft_nv_info := NewView.
Definition pbft_nv_knows (d : pbft_nv_data) (s : PBFTstate) : Prop :=
new_view_in_log d (view_change_state s).
Definition pbft_nv_data2main_auth_data (d : pbft_nv_data) : AuthenticatedData :=
new_view2main_auth_data d.
Definition pbft_nv_verify (eo : EventOrdering) (e : Event) (d : pbft_nv_data) : bool :=
verify_list_auth_data (loc e) (keys e) (new_view2auth_data d).
Definition pbft_nv_data2loc (d : pbft_nv_data) : Rep :=
new_view2sender d.
Lemma pbft_nv_no_initial_memory :
forall n d, ~ pbft_nv_knows d (Process.sm_state (PBFTreplicaSM n)).
Proof.
introv h; simpl in h; auto.
Qed.
Instance PBFT_I_LearnAndKnow_nv : LearnAndKnow 2.
Proof.
exact (MkLearnAndKnow
2
pbft_nv_data
pbft_nv_info
(fun x => x)
PBFTstate
pbft_nv_knows
pbft_nv_data2loc
pbft_nv_data2main_auth_data
pbft_nv_verify
_ _ pbft_nv_no_initial_memory).
Defined.
Definition knows2
{eo : EventOrdering}
(e : Event)
(d : @lak_data PBFT_I_Data PBFT_I_Node PBFT_I_Key PBFT_I_Msg PBFT_I_Quorum PBFT_I_AuthTok 2 PBFT_I_LearnAndKnow_nv) :=
@knows PBFT_I_Data PBFT_I_Node PBFT_I_Key PBFT_I_Msg PBFT_I_Quorum PBFT_I_AuthTok 2 PBFT_I_LearnAndKnow_nv eo e d.
Definition knew2
{eo : EventOrdering}
(e : Event)
(d : @lak_data PBFT_I_Data PBFT_I_Node PBFT_I_Key PBFT_I_Msg PBFT_I_Quorum PBFT_I_AuthTok 2 PBFT_I_LearnAndKnow_nv) :=
@knew PBFT_I_Data PBFT_I_Node PBFT_I_Key PBFT_I_Msg PBFT_I_Quorum PBFT_I_AuthTok 2 PBFT_I_LearnAndKnow_nv eo e d.
Definition learns2
{eo : EventOrdering}
(e : Event)
(d : @lak_data PBFT_I_Data PBFT_I_Node PBFT_I_Key PBFT_I_Msg PBFT_I_Quorum PBFT_I_AuthTok 2 PBFT_I_LearnAndKnow_nv) :=
@learns PBFT_I_Data PBFT_I_Node PBFT_I_Key PBFT_I_Msg PBFT_I_Quorum PBFT_I_AuthTok PBFT_I_ContainedAuthData 2 PBFT_I_LearnAndKnow_nv eo e d.
Definition learned2
{eo : EventOrdering}
(e : Event)
(d : @lak_data PBFT_I_Data PBFT_I_Node PBFT_I_Key PBFT_I_Msg PBFT_I_Quorum PBFT_I_AuthTok 2 PBFT_I_LearnAndKnow_nv) :=
@learned PBFT_I_Data PBFT_I_Node PBFT_I_Key PBFT_I_Msg PBFT_I_Quorum PBFT_I_AuthTok PBFT_I_ContainedAuthData 2 PBFT_I_LearnAndKnow_nv eo e d.
Definition learns_or_knows2 (eo : EventOrdering) :=
@learns_or_knows PBFT_I_Data PBFT_I_Node PBFT_I_Key PBFT_I_Msg PBFT_I_Quorum PBFT_I_AuthTok PBFT_I_ContainedAuthData 2 PBFT_I_LearnAndKnow_nv eo.
Lemma knows_pl_nv_implies_knows_nv :
forall (eo : EventOrdering) (e : Event) pl,
knows1 e pl
->
exists nv pi,
knows2 e nv
/\ In pi (mergeP (new_view2cert nv))
/\ prepare_like_in_prepared_info pl pi.
Proof.
introv k.
unfold knows1, knows in k; simpl in *; exrepnd.
unfold pbft_pl_nv_knows in *; exrepnd.
exists nv pi; dands; auto.
exists mem n; dands; auto.
Qed.
Lemma pl_in_nv_in_get_contained_authenticated_data_implies :
forall nv pl pi trig,
In (pbft_nv_data2main_auth_data nv) (get_contained_authenticated_data trig)
-> In pi (mergeP (new_view2cert nv))
-> prepare_like_in_prepared_info pl pi
-> In (pbft_pl_data2main_auth_data pl) (get_contained_authenticated_data trig).
Proof.
introv i j k.
destruct trig, nv, v; simpl in *; repndors; tcsp; ginv;
try (complete (destruct r; simpl in *; ginv));
try (complete (destruct p; simpl in *; ginv));
try (complete (destruct c; simpl in *; ginv)).
- destruct p, b; simpl in *.
unfold pre_prepare2auth_data_req in *; simpl in *.
allrw in_map_iff; exrepnd; ginv.
destruct x; simpl in *; ginv.
- destruct v; simpl in *; repndors; ginv.
allrw in_app_iff; repndors.
+ assert False; tcsp.
induction C; simpl in *; tcsp.
repndors; tcsp.
destruct a1; ginv.
+ assert False; tcsp.
induction P; simpl in *; tcsp.
repndors; tcsp.
* destruct a1; simpl in *.
destruct prepared_info_pre_prepare; simpl in *; ginv.
* destruct a1; simpl in *.
destruct prepared_info_pre_prepare, b; simpl in *; ginv.
allrw in_app_iff; repndors; tcsp.
{
unfold pre_prepare2auth_data_req in *; simpl in *.
allrw in_map_iff; exrepnd.
destruct x; ginv.
}
{
induction prepared_info_prepares; simpl in *; tcsp.
repndors; tcsp.
destruct a2; ginv.
}
- destruct v; simpl in *; repndors; ginv.
+ unfold mergeP in *; simpl in *.
allrw in_app_iff.
right; left.
induction V; simpl in *; tcsp.
allrw in_app_iff; repndors; tcsp.
left.
clear IHV.
destruct a0, v0; simpl in *.
allrw in_app_iff.
right; right.
unfold view_change2prep in j; simpl in j.
induction P; simpl in *; tcsp.
allrw in_app_iff; repndors; subst; tcsp.
clear IHP.
destruct pi, pl; simpl in *; subst; tcsp.
right; left; right.
induction prepared_info_prepares; simpl in *; repndors; subst; tcsp.
+ allrw in_app_iff.
repndors.
* assert False; tcsp.
clear j k.
induction V; simpl in *; tcsp.
allrw in_app_iff; repndors; tcsp.
clear IHV.
destruct a1, v1; simpl in *; repndors; tcsp; ginv.
allrw in_app_iff.
repndors; tcsp.
{
induction C; simpl in *; tcsp.
destruct a2, b; simpl in *; repndors; tcsp; ginv.
}
{
induction P; simpl in *; tcsp.
destruct a2; simpl in *; repndors; tcsp; ginv.
destruct prepared_info_pre_prepare; ginv.
allrw in_app_iff; repndors; tcsp.
- destruct prepared_info_pre_prepare, b; simpl in *; ginv.
unfold pre_prepare2auth_data_req in i; simpl in *.
allrw in_map_iff; exrepnd; destruct x; ginv.
- induction prepared_info_prepares; simpl in *; repndors; tcsp.
destruct a2; ginv.
}
* assert False; tcsp; clear j k.
induction OP; simpl in *; tcsp.
destruct a1, b; repndors; simpl in *; ginv.
allrw in_app_iff; repndors; tcsp.
unfold pre_prepare2auth_data_req in *; simpl in *.
allrw in_map_iff; exrepnd; destruct x; ginv.
* assert False; tcsp; clear j k.
induction NP; simpl in *; tcsp.
destruct a1, b; repndors; simpl in *; ginv.
allrw in_app_iff; repndors; tcsp.
unfold pre_prepare2auth_data_req in *; simpl in *.
allrw in_map_iff; exrepnd; destruct x; ginv.
Qed.
Hint Resolve pl_in_nv_in_get_contained_authenticated_data_implies : pbft.
Lemma pbft_nv_verify_implies_pbft_pl_verify :
forall (eo : EventOrdering) (e : Event) nv pl pi,
pbft_nv_verify eo e nv = true
-> In pi (mergeP (new_view2cert nv))
-> prepare_like_in_prepared_info pl pi
-> pbft_pl_verify eo e pl = true.
Proof.
introv verif i j.
destruct nv, v; simpl in *.
unfold pbft_nv_verify in verif; simpl in *; smash_pbft.
allrw verify_list_auth_data_app; smash_pbft.
clear verif verif1 verif2.
induction V; simpl in *; tcsp.
allrw verify_list_auth_data_app; smash_pbft.
allrw in_app_iff; repndors; tcsp;[].
clear verif1 IHV.
destruct a0, v0; simpl in *; smash_pbft.
clear verif0.
unfold view_change2prep in *; simpl in *.
allrw verify_list_auth_data_app; smash_pbft.
clear verif1.
induction P; simpl in *; tcsp; smash_pbft.
allrw verify_list_auth_data_app; smash_pbft.
repndors; subst; tcsp; smash_pbft.
clear IHP verif1 verif2.
destruct pi, pl; simpl in *; subst; tcsp.
clear verif0.
induction prepared_info_prepares; simpl in *; tcsp; smash_pbft.
repndors; subst; tcsp.
Qed.
Hint Resolve pbft_nv_verify_implies_pbft_pl_verify : pbft.
Lemma auth_data_in_trigger_nv_implies_pl :
forall (eo : EventOrdering) (e : Event) nv pl pi,
auth_data_in_trigger (pbft_nv_data2main_auth_data nv) e
-> In pi (mergeP (new_view2cert nv))
-> prepare_like_in_prepared_info pl pi
-> auth_data_in_trigger (pbft_pl_data2main_auth_data pl) e.
Proof.
introv ad i prep.
unfold auth_data_in_trigger in *.
remember (trigger e) as trig; destruct trig; auto; eauto 3 with pbft.
Qed.
Hint Resolve auth_data_in_trigger_nv_implies_pl : pbft.
Lemma in_bind_op_list_nv_implies_pl :
forall (eo : EventOrdering) (e : Event) nv pl pi,
In (pbft_nv_data2main_auth_data nv) (bind_op_list PBFTget_contained_auth_data (trigger e))
-> In pi (mergeP (new_view2cert nv))
-> prepare_like_in_prepared_info pl pi
-> In (pbft_pl_data2main_auth_data pl) (bind_op_list PBFTget_contained_auth_data (trigger e)).
Proof.
introv ad i prep.
allrw in_bind_op_list_as_auth_data_in_trigger; eauto 3 with pbft.
Qed.
Hint Resolve in_bind_op_list_nv_implies_pl : pbft.
Lemma learns_nv_implies_learns_pl :
forall (eo : EventOrdering) (e : Event) nv pl pi,
learns2 e nv
-> In pi (mergeP (new_view2cert nv))
-> prepare_like_in_prepared_info pl pi
-> learns0 e pl.
Proof.
introv ln i j.
unfold learns2, learns0, learns in *; simpl in *.
exrepnd.
exists n; dands; auto; eauto 3 with pbft eo.
Qed.
Hint Resolve learns_nv_implies_learns_pl : pbft.
Lemma learned_nv_implies_learned_pl :
forall (eo : EventOrdering) (e : Event) nv pl pi,
learned2 e nv
-> In pi (mergeP (new_view2cert nv))
-> prepare_like_in_prepared_info pl pi
-> learned0 e pl.
Proof.
introv ln i j.
unfold learned2, learned0, learned in *; exrepnd.
eexists; dands; eauto; eauto 3 with pbft;
try (eapply learns_nv_implies_learns_pl; eauto).
Qed.
Hint Resolve learned_nv_implies_learned_pl : pbft.
Lemma verify_new_view_implies_pbft_nv_verify :
forall (eo : EventOrdering) (e : Event) n nv ks,
loc e = PBFTreplica n
-> keys e = ks
-> verify_new_view n ks nv = true
-> pbft_nv_verify eo e nv = true.
Proof.
introv eqloc eqks verif.
unfold verify_new_view in verif.
unfold pbft_nv_verify.
allrw; auto.
Qed.
Hint Resolve verify_new_view_implies_pbft_nv_verify : pbft.
Lemma pbft_nv_verify_verify_new_view :
forall (eo : EventOrdering) (e : Event) (n : Rep) (nv : NewView) (ks : local_key_map),
loc e = PBFTreplica n
-> keys e = ks
-> pbft_nv_verify eo e nv = true
-> verify_new_view n ks nv = true.
Proof.
introv eqloc eqks verif.
unfold verify_new_view.
unfold pbft_nv_verify in verif.
rw <- eqloc; subst; auto.
Qed.
Lemma pbft_nv_data2main_auth_data_in_trigger_implies :
forall (eo : EventOrdering) (e : Event) nv,
auth_data_in_trigger (pbft_nv_data2main_auth_data nv) e
-> event_triggered_by_message e (PBFTnew_view nv).
Proof.
introv j; unfold event_triggered_by_message.
unfold auth_data_in_trigger in j.
remember (trigger e) as trig; destruct trig; simpl in *; tcsp.
destruct m; simpl in *; repndors; ginv; tcsp;
try (complete (destruct r, nv; simpl in *; ginv));
try (complete (destruct p, nv; simpl in *; ginv));
try (complete (destruct c, nv; simpl in *; ginv)).
- destruct p, nv, b; simpl in *.
unfold pre_prepare2auth_data_req in j; simpl in j.
apply in_map_iff in j; exrepnd.
destruct x; simpl in *; ginv.
- destruct nv, v, v; simpl in *; repndors; ginv.
allrw in_app_iff; repndors.
+ clear Heqtrig; assert False; tcsp.
induction C; simpl in *; repndors; tcsp.
destruct a1; simpl in *; ginv.
+ clear Heqtrig; assert False; tcsp.
induction P; simpl in *; allrw in_app_iff; repndors; tcsp.
* destruct a1, prepared_info_pre_prepare; simpl in *; ginv.
* destruct a1, prepared_info_pre_prepare, b; simpl in *.
unfold pre_prepare2auth_data_req in *; simpl in *.
allrw in_map_iff; exrepnd.
destruct x; ginv.
* destruct a1; simpl in *.
clear IHP.
induction prepared_info_prepares; simpl in *; repndors; tcsp.
destruct a1; ginv.
- destruct nv, v, v; simpl in *.
repndors; ginv.
allrw in_app_iff; repndors.
+ clear Heqtrig; assert False; tcsp.
induction V; simpl in *; repndors; tcsp.
allrw in_app_iff; repndors; tcsp.
destruct a1, v1; simpl in *; repndors; ginv.
allrw in_app_iff; repndors.
* clear IHV.
induction C; simpl in *; repndors; tcsp.
destruct a2; simpl in *; ginv.
* clear IHV.
induction P; simpl in *; allrw in_app_iff; repndors; tcsp.
{ destruct a2, prepared_info_pre_prepare; simpl in *; ginv. }
{ destruct a2, prepared_info_pre_prepare, b; simpl in *.
unfold pre_prepare2auth_data_req in *; simpl in *.
allrw in_map_iff; exrepnd.
destruct x; ginv. }
{ destruct a2; simpl in *.
clear IHP.
induction prepared_info_prepares; simpl in *; repndors; tcsp.
destruct a2; ginv. }
+ clear Heqtrig; assert False; tcsp.
induction OP; simpl in *; repndors; tcsp.
* destruct a1; ginv.
* destruct a1, b; allrw in_app_iff; repndors; ginv; tcsp.
unfold pre_prepare2auth_data_req in j; allrw in_map_iff; simpl in *.
exrepnd; destruct x; simpl in *; ginv.
+ clear Heqtrig; assert False; tcsp.
induction NP; simpl in *; repndors; tcsp.
* destruct a1; ginv.
* destruct a1, b; allrw in_app_iff; repndors; ginv; tcsp.
unfold pre_prepare2auth_data_req in j; allrw in_map_iff; simpl in *.
exrepnd; destruct x; simpl in *; ginv.
Qed.
End PBFTlearns_or_knows_nv.
Ltac custom_prove_knows :=
eapply implies_knows; eauto; autorewrite with pbft eo; simpl; auto;
unfold pbft_nv_knows;
eauto 4 with pbft.
Ltac custom_smash_pbft_ind ind :=
let base_tac := (fun _ => smash_pbft3) in
let ind_tac := (fun _ => eauto 4 with pbft; try (complete custom_prove_knows)) in
smash_pbft_ind_tac ind base_tac ind_tac.
Hint Resolve pl_in_nv_in_get_contained_authenticated_data_implies : pbft.
Hint Resolve pbft_nv_verify_implies_pbft_pl_verify : pbft.
Hint Resolve learns_nv_implies_learns_pl : pbft.
Hint Resolve learned_nv_implies_learned_pl : pbft.
Hint Resolve verify_new_view_implies_pbft_nv_verify : pbft.
Hint Resolve auth_data_in_trigger_nv_implies_pl : pbft.
Hint Resolve in_bind_op_list_nv_implies_pl : pbft.
|
{"author": "vrahli", "repo": "Velisarios", "sha": "6fb353b18610cd79210755fcc90123536c367aaa", "save_path": "github-repos/coq/vrahli-Velisarios", "path": "github-repos/coq/vrahli-Velisarios/Velisarios-6fb353b18610cd79210755fcc90123536c367aaa/PBFT/PBFTlearns_or_knows_nv.v"}
|
"""This model creates the ModelInterface for PyTorch."""
from contextlib import suppress
from copy import deepcopy
from typing import Optional, Tuple
import torch
import numpy as np
from ..helpers.model_interface import ModelInterface
from ..helpers import utils
class PyTorchModel(ModelInterface):
"""Interface for torch models."""
def __init__(self, model, channel_first):
super().__init__(model, channel_first)
def predict(self, x, softmax_act=False, **kwargs):
"""Predict on the given input."""
if self.model.training:
raise AttributeError("Torch model needs to be in the evaluation mode.")
device = kwargs.get("device", None)
grad = kwargs.get("grad", False)
grad_context = torch.no_grad() if not grad else suppress()
with grad_context:
pred = self.model(torch.Tensor(x).to(device))
if softmax_act:
pred = torch.nn.Softmax()(pred)
if pred.requires_grad:
return pred.detach().cpu().numpy()
return pred.cpu().numpy()
def shape_input(self, x: np.array, shape: Tuple[int, ...],
channel_first: Optional[bool] = None):
"""
Reshape input into model expected input.
channel_first: Explicitely state if x is formatted channel first (optional).
"""
if channel_first is None:
channel_first = utils.infer_channel_first
x = x.reshape(1, *shape)
if self.channel_first:
return utils.make_channel_first(x, channel_first)
raise ValueError("Channel first order expected for a torch model.")
def get_model(self):
"""Get the original torch/tf model."""
return self.model
def state_dict(self):
"""Get a dictionary of the model's learnable parameters."""
return self.model.state_dict()
def get_random_layer_generator(self, order: str = "top_down", seed: int = 42):
"""
In every iteration yields a copy of the model with one additional layer's parameters randomized.
Set order to top_down for cascading randomization.
Set order to independent for independent randomization.
"""
original_parameters = self.state_dict()
random_layer_model = deepcopy(self.model)
modules = [
l
for l in random_layer_model.named_modules()
if (hasattr(l[1], "reset_parameters"))
]
if order == "top_down":
modules = modules[::-1]
for module in modules:
if order == "independent":
random_layer_model.load_state_dict(original_parameters)
torch.manual_seed(seed=seed + 1)
module[1].reset_parameters()
yield module[0], random_layer_model
|
{"hexsha": "05ea239f48efd6005dc9763f35e3a69f4b812612", "size": 2824, "ext": "py", "lang": "Python", "max_stars_repo_path": "quantus/helpers/pytorch_model.py", "max_stars_repo_name": "sebastian-lapuschkin/Quantus", "max_stars_repo_head_hexsha": "c3b8a9fb2018f34bd89ba38efa2b2b8c38128b3f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "quantus/helpers/pytorch_model.py", "max_issues_repo_name": "sebastian-lapuschkin/Quantus", "max_issues_repo_head_hexsha": "c3b8a9fb2018f34bd89ba38efa2b2b8c38128b3f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "quantus/helpers/pytorch_model.py", "max_forks_repo_name": "sebastian-lapuschkin/Quantus", "max_forks_repo_head_hexsha": "c3b8a9fb2018f34bd89ba38efa2b2b8c38128b3f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8641975309, "max_line_length": 104, "alphanum_fraction": 0.6260623229, "include": true, "reason": "import numpy", "num_tokens": 569}
|
struct InconsistentVRep{T, AT, D<:Polyhedra.FullDim} <: VRepresentation{T}
points::Polyhedra.PointsHull{T, AT, D}
rays::Polyhedra.RaysHull{T, AT, D}
function InconsistentVRep{T, AT, D}(d::Polyhedra.FullDim, points, lines,
rays) where {T, AT, D}
new{T, AT, D}(Polyhedra.PointsHull(d, points),
Polyhedra.RaysHull(d, lines, rays))
end
end
Polyhedra.FullDim(rep::InconsistentVRep) = Polyhedra.FullDim(rep.points)
Polyhedra.dualtype(::Type{InconsistentVRep{T, AT, D}}, ::Type{AT}) where {T, AT, D} = Polyhedra.Intersection{T, AT, D}
Polyhedra.hvectortype(::Type{<:InconsistentVRep{T, AT}}) where {T, AT} = AT
Polyhedra.vvectortype(::Type{<:InconsistentVRep{T, AT}}) where {T, AT} = AT
Polyhedra.similar_type(PT::Type{<:InconsistentVRep}, d::Polyhedra.FullDim, ::Type{T}) where {T} = InconsistentVRep{T, Polyhedra.similar_type(Polyhedra.hvectortype(PT), d, T), typeof(d)}
Polyhedra.fulltype(::Type{InconsistentVRep{T, AT, D}}) where {T, AT, D} = InconsistentVRep{T, AT, D}
#Polyhedra.@subrepelem InconsistentVRep SymPoint points
Polyhedra.@subrepelem InconsistentVRep Point points
Polyhedra.@subrepelem InconsistentVRep Line rays
Polyhedra.@subrepelem InconsistentVRep Ray rays
|
{"hexsha": "d58b775c65d94c9906e1156a68b6d9cee3f21e0d", "size": 1258, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/inconsistentvrep.jl", "max_stars_repo_name": "mforets/Polyhedra.jl", "max_stars_repo_head_hexsha": "58013aefabcc3bbc71832ae8d9af495c613ab21a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 126, "max_stars_repo_stars_event_min_datetime": "2016-12-20T13:19:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T20:40:12.000Z", "max_issues_repo_path": "test/inconsistentvrep.jl", "max_issues_repo_name": "mforets/Polyhedra.jl", "max_issues_repo_head_hexsha": "58013aefabcc3bbc71832ae8d9af495c613ab21a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 271, "max_issues_repo_issues_event_min_datetime": "2016-12-20T21:07:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T00:40:27.000Z", "max_forks_repo_path": "test/inconsistentvrep.jl", "max_forks_repo_name": "mforets/Polyhedra.jl", "max_forks_repo_head_hexsha": "58013aefabcc3bbc71832ae8d9af495c613ab21a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2016-12-20T20:23:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T01:12:07.000Z", "avg_line_length": 62.9, "max_line_length": 185, "alphanum_fraction": 0.7027027027, "num_tokens": 416}
|
import pandas as pd
import numpy as np
import copy as cp
import math
def compute_functions(index):
invocations = pd.read_csv(f"/media/soufianej/Transcend/Traces/Azure/invocations/invocations_per_function_md.anon.d0{index}.csv", index_col=False)
exec_times = pd.read_csv(f"/media/soufianej/Transcend/Traces/Azure/duration/function_durations_percentiles.anon.d0{index}.csv", index_col=False)
memory_alloc= pd.read_csv(f"/media/soufianej/Transcend/Traces/Azure/memory/app_memory_percentiles.anon.d0{index}.csv", index_col=False)
# Merging invocations, execution time and memory alloc into a signle dataframe
invoc_exec_merged = pd.merge(invocations, exec_times, on=["HashOwner","HashApp","HashFunction"])
functions = pd.merge(invoc_exec_merged, memory_alloc, on=["HashOwner","HashApp"], how="left")
functions.reset_index(inplace=True,drop=True)
# Filtering out unwanted duplicates
try:
unwanted = pd.concat(g for _, g in functions.groupby("HashFunction") if len(g) > 1).sort_values(by="HashFunction")
for row in unwanted.HashFunction:
if (row in unwanted.HashFunction.values):
functions = functions[functions.HashFunction != row]
functions.reset_index(inplace=True,drop=True)
except ValueError:
print("no duplicates")
functions.to_csv(f'/media/soufianej/Transcend/Traces/Azure/OpenDCServerless/Functions/FunctionsDay{index}.csv', index=False)
return functions
#Filteting out mostly idle functions (low invocation frequency)
def filter_functions(functions, threshold, inverse):
to_drop = []
for index,row in enumerate(functions.values, start=0):
count = 0
for i in range(1440):
if (row[i+4] != 0):
count += 1
if (count / 1440 > threshold):
if inverse is True:
to_drop.append(index)
else:
continue
else:
if inverse is True:
continue
else:
to_drop.append(index)
functions.drop(to_drop, axis='index', inplace=True)
functions.reset_index(drop=True, inplace=True)
# Splitting the task into smaller tasks than can be executed in parallel
def split_tasks(functions):
lookup_df = functions.groupby('HashApp').count()
apps = []
for i in lookup_df.iterrows():
apps.append(i[0])
size = len(apps)
sample = math.ceil(size / 8) # task size divided by number of threads
start = 0
end = sample
for i in range(8):
if (end > size-1):
end = size
print('functions split: start = {0}, end = {1}'.format(start, end))
print(apps[start:end])
functions[functions['HashApp'].isin(apps[start:end])].to_csv(r'/media/soufianej/Transcend/Traces/Azure/OpenDCServerless/Samples/sample1/tasks/task{0}.csv'.format(i), index=False)
start += sample
end += sample
def compute_popularity(functions):
popularity_series = functions.groupby('HashApp').HashApp.count()
popularity_df = popularity_series.to_frame()
popularity_df.assign(popularity = 0)
popularity_df = popularity_df.rename(columns={'HashApp':'Count'})
popularity_set = np.array([])
for app_hash in popularity_series.keys():
app_functions = functions.loc[functions['HashApp'] == app_hash]
popularity_index = app_functions.apply(lambda row: row['1' : '1440'].sum(),axis=1).sum()
popularity_set = np.append(popularity_set, popularity_index)
popularity_df = popularity_df.assign(popularity = popularity_set)
popularity_df.to_csv(r'/media/soufianej/Transcend/Traces/Azure/OpenDCServerless/HashApps/HashAppsDay7.csv')
def sample_apps(n,hash_apps,seed):
return hash_apps.loc[((hash_apps['popularity'] > 1500) & (hash_apps['popularity'] < 2500))].sample(n=n, random_state=np.random.RandomState(seed), axis=0)
def get_functions(functions, hash_apps):
sample = functions.loc[functions['HashApp'].isin(hash_apps.HashApp)]
sample = sample.assign(id=0)
# Setting indexes
index_df = sample.groupby('HashFunction').count()
for index, func_key in enumerate(index_df.iterrows(), start=1):
sample.loc[sample['HashFunction'] == func_key[0], 'id'] = index
return sample
|
{"hexsha": "319b6a93ad5e05f4ea1ef32f1193fc2f83a197b3", "size": 4311, "ext": "py", "lang": "Python", "max_stars_repo_path": "trace-generation/AzurePreProcessing.py", "max_stars_repo_name": "atlarge-research/opendc-serverless", "max_stars_repo_head_hexsha": "11c772bcb3fc7a7c2590d6ed6ab979b78cb9fec9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-22T20:56:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-22T20:56:55.000Z", "max_issues_repo_path": "trace-generation/AzurePreProcessing.py", "max_issues_repo_name": "atlarge-research/opendc-serverless", "max_issues_repo_head_hexsha": "11c772bcb3fc7a7c2590d6ed6ab979b78cb9fec9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trace-generation/AzurePreProcessing.py", "max_forks_repo_name": "atlarge-research/opendc-serverless", "max_forks_repo_head_hexsha": "11c772bcb3fc7a7c2590d6ed6ab979b78cb9fec9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9083969466, "max_line_length": 186, "alphanum_fraction": 0.6798886569, "include": true, "reason": "import numpy", "num_tokens": 1048}
|
import enum
from typing import Optional
import numpy as np
import feast
class BqType(enum.Enum):
"""
BigQuery enum types. Used when dealing with types in any big query operation.
"""
FLOAT = 0
STRING = 1
DATETIME = 2
TIMESTAMP = 3
ARRAY = 4
BOOL = 5
STRUCT = 6
INTEGER = 7
class FeatureType:
"""
Feature value type. Used to define data types in Feature Tables.
Inherited from Feast
"""
def from_str_to_bq_type(type_in_str: str, format: Optional[str] = None) -> BqType:
"""Converts the string containing the type (JSONSchema types are supported)
to a Big Query equivalent type. Returns a BqType Enum value
Keyword arguments:
type_in_str -- the type name (JSONSchema)
"""
if type_in_str == "number":
return BqType.FLOAT
elif type_in_str == "string":
if format != None and format == "date-time":
return BqType.TIMESTAMP
return BqType.STRING
elif type_in_str == "array":
return BqType.ARRAY
elif type_in_str == "object":
return BqType.STRUCT
elif type_in_str == "boolean":
return BqType.BOOL
elif type_in_str == "integer":
return BqType.INTEGER
else:
raise ValueError("Unsupported type in bigquery")
def from_str_to_pd_type(type_in_str: str, format: Optional[str] = None) -> np.dtype:
"""Converts the string containing the type (JSONSchema types are supported)
to a pandas equivalent type. Returns a np.dtype
Keyword arguments:
type_in_str -- the type name (JSONSchema)
"""
if type_in_str == "number":
return np.float64
elif type_in_str == "string":
if format != None and format == "date-time":
return np.datetime64('2002-02-03T13:56:03.172')
return np.unicode_
elif type_in_str == "array":
return np.ndarray
elif type_in_str == "object":
return np.bytes_
elif type_in_str == "boolean":
return np.bool_
elif type_in_str == "integer":
return np.int32
else:
raise ValueError("Unsupported type in pandas")
def from_str_to_feature_type(type_in_str: str) -> feast.ValueType:
"""Converts the string containing the type (JSONSchema types are supported)
to a Feast ValueType to be used in the FeatureStore. Returns a ValueType from feast-sdk
Keyword arguments:
type_in_str -- the type name (JSONSchema)
"""
if type_in_str == "number":
return feast.ValueType.DOUBLE
elif type_in_str == "string":
return feast.ValueType.STRING
elif type_in_str in ["array", "object"]:
return feast.ValueType.BYTES
elif type_in_str == "boolean":
return feast.ValueType.BOOL
elif type_in_str == "integer":
return feast.ValueType.INT32
else:
return feast.ValueType.UNKNOWN
|
{"hexsha": "d1f456aa0e717d96a6684183de1672b0b27d83f5", "size": 3095, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/elemeno_ai_sdk/features/types.py", "max_stars_repo_name": "elemeno-ai/elemeno-ai-sdk", "max_stars_repo_head_hexsha": "2737b2c9c575119bd40efcd8c5e91a98f9d8b6b1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-16T13:38:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T13:38:39.000Z", "max_issues_repo_path": "src/elemeno_ai_sdk/features/types.py", "max_issues_repo_name": "elemeno-ai/elemeno-ai-sdk", "max_issues_repo_head_hexsha": "2737b2c9c575119bd40efcd8c5e91a98f9d8b6b1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/elemeno_ai_sdk/features/types.py", "max_forks_repo_name": "elemeno-ai/elemeno-ai-sdk", "max_forks_repo_head_hexsha": "2737b2c9c575119bd40efcd8c5e91a98f9d8b6b1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6413043478, "max_line_length": 95, "alphanum_fraction": 0.6019386107, "include": true, "reason": "import numpy", "num_tokens": 703}
|
module millerlocal
use common_types, only: flux_surface_type
implicit none
public :: init_local_defaults
public :: read_local_parameters
public :: communicate_parameters_multibox
public :: get_local_geo
public :: finish_local_geo
public :: local
private
integer :: nzed_local
real :: rhoc, rmaj, shift
real :: kappa, kapprim
real :: tri, triprim
real :: betaprim, betadbprim
real :: qinp, shat, d2qdr2
real :: rgeo
real :: dpsidrho, d2psidr2, dpsidrho_psi0
real :: psitor_lcfs
real :: rhotor, drhotordrho, dIdrho, dI
real :: rhoc0
logical :: write_profile_variation, read_profile_variation
logical :: load_psi0_variables
integer :: nz, nz2pi
real :: bi, dqdr, d2Idr2
real, dimension(:), allocatable :: grho, bmag, grho_psi0, bmag_psi0, gradpar
real, dimension(:), allocatable :: gradpararc, arc
real, dimension(:), allocatable :: gds2, gds21, gds22
real, dimension(:), allocatable :: gds23, gds24
real, dimension(:), allocatable :: gbdrift0, gbdrift
real, dimension(:), allocatable :: cvdrift0, cvdrift
real, dimension(:), allocatable :: d2Rdth2, d2Zdth2, d2Rdrdth, d2Zdrdth
real, dimension(:), allocatable :: gpsi, dBdrho, d2Bdrdth
real, dimension(:), allocatable :: dgradpardrho, dgradparBdrho, dBdth, gradparb
real, dimension(:), allocatable :: dcvdrift0drho, dgbdrift0drho, theta
real, dimension(:), allocatable :: varthet, dvarthdr, gradrho_gradthet, cross, d2varthdr2
real, dimension(:), allocatable :: gradthet2, gradalph_gradthet, gradrho_gradalph, gradalph2
real, dimension(:), allocatable :: d2Bdr2, d2Rdr2, d2Zdr2, drz, drzdth
real, dimension(:), allocatable :: d2Rdr2dth, d2Zdr2dth, d2gpsidr2, dcrossdr
real, dimension(:), allocatable :: dcvdriftdrho, dgbdriftdrho
real, dimension(:), allocatable :: dgds2dr, dgds21dr, dgds22dr
real, dimension(:), allocatable :: dgr2dr, dgpsi2dr
real, dimension(:), allocatable :: dgrgt, dgt2, dgagr, dgagt, dga2
real, dimension(:, :), allocatable :: Rr, Zr
real, dimension(:), allocatable :: jacrho, delthet, djacdrho, djacrdrho
real, dimension(:), allocatable :: d2jacdr2, dRdrho, dZdrho, dRdth, dZdth
real, dimension(:), allocatable :: d2R, d2Z
type(flux_surface_type) :: local
logical :: defaults_initialized = .false.
contains
subroutine init_local_defaults
implicit none
if (defaults_initialized) return
defaults_initialized = .true.
nzed_local = 128
rhoc = 0.5
rhoc0 = 0.5
rmaj = 3.0
rgeo = 3.0
qinp = 1.4
shat = 0.8
shift = 0.0
kappa = 0.0
kapprim = 0.0
tri = 0.0
triprim = 0.0
! betaprim = -(4pi/Bref^2)*d(ptot)/drho
betaprim = 0.0
! betadbprim = -(4pi/Bref^2)*d^2ptot/drho^2
betadbprim = 0.0
d2qdr2 = 0.0
d2psidr2 = 0.0
read_profile_variation = .false.
write_profile_variation = .false.
load_psi0_variables = .true.
! only needed for sfincs when not using
! geo info from file
rhotor = rhoc
psitor_lcfs = 1.0
drhotordrho = 1.0
end subroutine init_local_defaults
subroutine read_local_parameters(nzed, nzgrid, local_out)
use file_utils, only: input_unit_exist
use common_types, only: flux_surface_type
implicit none
type(flux_surface_type), intent(out) :: local_out
integer, intent(in) :: nzed, nzgrid
real :: dum
integer :: in_file, np, j
logical :: exist
namelist /millergeo_parameters/ rhoc, rmaj, shift, qinp, shat, &
kappa, kapprim, tri, triprim, rgeo, betaprim, &
betadbprim, d2qdr2, d2psidr2, &
nzed_local, read_profile_variation, write_profile_variation
call init_local_defaults
in_file = input_unit_exist("millergeo_parameters", exist)
if (exist) read (unit=in_file, nml=millergeo_parameters)
local%rhoc = rhoc
local%rmaj = rmaj
local%rgeo = rgeo
local%shift = shift
local%kappa = kappa
local%kapprim = kapprim
local%qinp = qinp
local%shat = shat
local%tri = tri
local%triprim = triprim
local%betaprim = betaprim
local%betadbprim = betadbprim
local%d2qdr2 = d2qdr2
local%d2psidr2 = d2psidr2
local%zed0_fac = 1.0
! following two variables are not inputs
local%dr = 1.e-3 * (rhoc / rmaj)
local%rhotor = rhotor
local%psitor_lcfs = psitor_lcfs
local%drhotordrho = drhotordrho
local%dpsitordrho = 0.0
local%d2psitordrho2 = 0.0
! the next three variablaes are for multibox simulations
! with radial variation
local%rhoc_psi0 = rhoc
local%qinp_psi0 = qinp
local%shat_psi0 = shat
! first get nperiod corresponding to input number of grid points
nz2pi = nzed / 2
np = (nzgrid - nz2pi) / nzed + 1
! now switch to using (possible higher resolution) local grid
nz2pi = nzed_local / 2
! this is the equivalent of nzgrid on the local grid
nz = nz2pi + nzed_local * (np - 1)
! initialize to zero
! will be overwritten if reading in from file
! only relevant for profile variation tests
! these needs to be deallocated somewhere
allocate (d2R(-nz:nz))
allocate (d2Z(-nz:nz))
allocate (bmag_psi0(-nz:nz))
allocate (grho_psi0(-nz:nz))
d2R = 0.; d2Z = 0.; dI = 0.
if (read_profile_variation) then
open (1002, file='RZ.in', status='old')
read (1002, '(12e13.5)') rhoc0, dI, qinp, shat, d2qdr2, kappa, kapprim, tri, triprim, &
betaprim, betadbprim, dpsidrho_psi0
do j = -nz, nz
read (1002, '(5e13.5)') dum, d2R(j), d2Z(j), bmag_psi0(j), grho_psi0(j)
end do
close (1002)
local%qinp = qinp + shat * qinp / rhoc0 * (local%rhoc - rhoc0) &
+ 0.5 * (local%rhoc - rhoc0)**2 * d2qdr2
local%shat = (local%rhoc / local%qinp) &
* (shat * qinp / rhoc0 + (local%rhoc - rhoc0) * d2qdr2)
local%kappa = kappa + kapprim * (local%rhoc - rhoc0)
local%tri = tri + triprim * (local%rhoc - rhoc0)
local%betaprim = betaprim + betadbprim * (local%rhoc - rhoc0)
local%rhoc_psi0 = rhoc0
local%qinp_psi0 = qinp
local%shat_psi0 = shat
load_psi0_variables = .false.
end if
local_out = local
end subroutine read_local_parameters
subroutine communicate_parameters_multibox(surf, drl, drr)
use mp, only: job, scope, mp_abort, &
crossdomprocs, subprocs, &
send, receive
use job_manage, only: njobs
use common_types, only: flux_surface_type
implicit none
real, optional, intent(in) :: drl, drr
type(flux_surface_type), intent(inout) :: surf
real :: lrhoc, lqinp, lshat, lkappa, ltri, lbetaprim
real :: rrhoc, rqinp, rshat, rkappa, rtri, rbetaprim
real :: dqdr
real :: rhoc_psi0, qinp_psi0, shat_psi0
!FLAG DSO - I think d2psidrho2 needs to be communicated, but
! I'm unsure what quantity needs to be updated
if (job == 1) then
dqdr = local%shat * local%qinp / local%rhoc
lrhoc = local%rhoc + drl
lqinp = local%qinp + drl * dqdr + 0.5 * drl**2 * local%d2qdr2
lshat = (lrhoc / lqinp) * (dqdr + drl * local%d2qdr2)
lkappa = kappa + drl * kapprim
ltri = tri + drl * triprim
lbetaprim = betaprim + drl * betadbprim
rrhoc = local%rhoc + drr
rqinp = local%qinp + drr * dqdr + 0.5 * drr**2 * local%d2qdr2
rshat = (rrhoc / rqinp) * (dqdr + drr * local%d2qdr2)
rkappa = kappa + drr * kapprim
rtri = tri + drr * triprim
rbetaprim = betaprim + drr * betadbprim
end if
call scope(crossdomprocs)
if (job == 1) then
call send(lrhoc, 0, 120)
call send(lqinp, 0, 121)
call send(lshat, 0, 122)
call send(lkappa, 0, 123)
call send(ltri, 0, 124)
call send(lbetaprim, 0, 125)
call send(local%rhoc, 0, 126)
call send(d2R, 0, 127)
call send(d2Z, 0, 128)
call send(dIdrho, 0, 129)
call send(rhoc, 0, 130)
call send(qinp, 0, 131)
call send(shat, 0, 132)
call send(dpsidrho, 0, 133)
call send(bmag, 0, 134)
call send(grho, 0, 135)
call send(rrhoc, njobs - 1, 220)
call send(rqinp, njobs - 1, 221)
call send(rshat, njobs - 1, 222)
call send(rkappa, njobs - 1, 223)
call send(rtri, njobs - 1, 224)
call send(rbetaprim, njobs - 1, 225)
call send(local%rhoc, njobs - 1, 226)
call send(d2R, njobs - 1, 227)
call send(d2Z, njobs - 1, 228)
call send(dIdrho, njobs - 1, 229)
call send(rhoc, njobs - 1, 230)
call send(qinp, njobs - 1, 231)
call send(shat, njobs - 1, 232)
call send(dpsidrho, njobs - 1, 233)
call send(bmag, njobs - 1, 234)
call send(grho, njobs - 1, 235)
rhoc_psi0 = rhoc
qinp_psi0 = qinp
shat_psi0 = shat
local%rhoc_psi0 = rhoc_psi0
local%qinp_psi0 = qinp_psi0
local%shat_psi0 = shat_psi0
elseif (job == 0) then
call receive(rhoc, 1, 120)
call receive(qinp, 1, 121)
call receive(shat, 1, 122)
call receive(kappa, 1, 123)
call receive(tri, 1, 124)
call receive(betaprim, 1, 125)
call receive(rhoc0, 1, 126)
call receive(d2R, 1, 127)
call receive(d2Z, 1, 128)
call receive(dI, 1, 129)
call receive(rhoc_psi0, 1, 130)
call receive(qinp_psi0, 1, 131)
call receive(shat_psi0, 1, 132)
call receive(dpsidrho_psi0, 1, 133)
call receive(bmag_psi0, 1, 134)
call receive(grho_psi0, 1, 135)
local%rhoc = rhoc
local%qinp = qinp
local%shat = shat
local%kappa = kappa
local%tri = tri
local%betaprim = betaprim
local%rhoc_psi0 = rhoc_psi0
local%qinp_psi0 = qinp_psi0
local%shat_psi0 = shat_psi0
load_psi0_variables = .false.
elseif (job == njobs - 1) then
call receive(rhoc, 1, 220)
call receive(qinp, 1, 221)
call receive(shat, 1, 222)
call receive(kappa, 1, 223)
call receive(tri, 1, 224)
call receive(betaprim, 1, 225)
call receive(rhoc0, 1, 226)
call receive(d2R, 1, 227)
call receive(d2Z, 1, 228)
call receive(dI, 1, 229)
call receive(rhoc_psi0, 1, 230)
call receive(qinp_psi0, 1, 231)
call receive(shat_psi0, 1, 232)
call receive(dpsidrho_psi0, 1, 233)
call receive(bmag_psi0, 1, 234)
call receive(grho_psi0, 1, 235)
local%rhoc = rhoc
local%qinp = qinp
local%shat = shat
local%kappa = kappa
local%tri = tri
local%betaprim = betaprim
local%rhoc_psi0 = rhoc_psi0
local%qinp_psi0 = qinp_psi0
local%shat_psi0 = shat_psi0
load_psi0_variables = .false.
end if
surf%rhoc = local%rhoc
surf%qinp = local%qinp
surf%shat = local%shat
surf%kappa = local%kappa
surf%tri = local%tri
surf%betaprim = local%betaprim
surf%rhoc_psi0 = rhoc_psi0
surf%qinp_psi0 = qinp_psi0
surf%shat_psi0 = shat_psi0
call scope(subprocs)
end subroutine communicate_parameters_multibox
subroutine get_local_geo(nzed, nzgrid, zed_in, zed_equal_arc, &
dpsidrho_out, dpsidrho_psi0_out, dIdrho_out, grho_out, &
bmag_out, bmag_psi0_out, &
gds2_out, gds21_out, gds22_out, gds23_out, gds24_out, gradpar_out, &
gbdrift0_out, gbdrift_out, cvdrift0_out, cvdrift_out, &
dBdrho_out, d2Bdrdth_out, dgradpardrho_out, &
btor_out, rmajor_out, &
dcvdrift0drho_out, dcvdriftdrho_out, &
dgbdrift0drho_out, dgbdriftdrho_out, &
dgds2dr_out, dgds21dr_out, &
dgds22dr_out, djacdrho_out)
use constants, only: pi
use splines, only: geo_spline
use file_utils, only: run_name
implicit none
integer, intent(in) :: nzed, nzgrid
real, dimension(-nzgrid:), intent(in) :: zed_in
logical, intent(in) :: zed_equal_arc
real, intent(out) :: dpsidrho_out, dpsidrho_psi0_out, dIdrho_out
real, dimension(-nzgrid:), intent(out) :: grho_out, &
bmag_out, bmag_psi0_out, &
gds2_out, gds21_out, gds22_out, gds23_out, gds24_out, &
gradpar_out, gbdrift0_out, &
gbdrift_out, cvdrift0_out, cvdrift_out, &
dBdrho_out, d2Bdrdth_out, dgradpardrho_out, &
btor_out, rmajor_out, &
dcvdrift0drho_out, dcvdriftdrho_out, &
dgbdrift0drho_out, dgbdriftdrho_out, &
dgds2dr_out, dgds21dr_out, &
dgds22dr_out, &
djacdrho_out
integer :: nr, np
integer :: i, j
real :: rmin, dum
real, dimension(3) :: dr
real, allocatable, dimension(:) :: zed_arc
character(len=512) :: filename
! number of grid points used for radial derivatives
nr = 3
! first get nperiod corresponding to input number of grid points
nz2pi = nzed / 2
np = (nzgrid - nz2pi) / nzed + 1
! now switch to using (possible higher resolution) local grid
nz2pi = nzed_local / 2
! this is the equivalent of nzgrid on the local grid
nz = nz2pi + nzed_local * (np - 1)
call allocate_arrays(nr, nz)
dqdr = local%shat * local%qinp / local%rhoc
dr(1) = -local%dr
dr(2) = 0.
dr(3) = local%dr
do j = -nz, nz
theta(j) = j * (2 * np - 1) * pi / real(nz)
do i = 1, 3
rmin = local%rhoc + dr(i)
Rr(i, j) = Rpos(rmin, theta(j), j)
Zr(i, j) = Zpos(rmin, theta(j), j)
end do
end do
if (.not. allocated(delthet)) allocate (delthet(-nz:nz - 1))
! get delta theta as a function of theta
delthet = theta(-nz + 1:) - theta(:nz - 1)
! get dR/drho and dZ/drho
call get_drho(Rr, dRdrho)
call get_drho(Zr, dZdrho)
! get dR/dtheta and dZ/dtheta
call get_dthet(Rr(2, :), dRdth)
call get_dthet(Zr(2, :), dZdth)
! get second derivatives of R and Z with respect to theta
call get_d2dthet2(Rr(2, :), d2Rdth2)
call get_d2dthet2(Zr(2, :), d2Zdth2)
! get mixed theta and rho derivatives of R and Z
call get_dthet(dRdrho, d2Rdrdth)
call get_dthet(dZdrho, d2Zdrdth)
! get the Jacobian of the transformation from (rho,theta,zeta) to (R,Z,zeta)
! this is what I call jacr or jacrho in following comments
! as opposed to jacobian, which is for tranformation from (psi,theta,zeta) to (R,Z,zeta)
call get_jacrho
! theta_integrate returns integral from 0 -> 2*pi
! note that dpsidrho here is an intermediary
! that requires manipulation to get final dpsidrho
call theta_integrate(jacrho(-nz2pi:nz2pi) / Rr(2, -nz2pi:nz2pi)**2, dpsidrho)
dpsidrho = dpsidrho / (2.*pi)
! get dpsinorm/drho = (I/2*pi*q)*int_0^{2*pi} dthet jacrho/R**2
! if using input.profiles, we are given
! dpsitordrho and must use it to compute rgeo
if (abs(local%dpsitordrho) > epsilon(0.)) then
local%rgeo = local%dpsitordrho / dpsidrho
dpsidrho = local%dpsitordrho / local%qinp
local%d2psidr2 = (local%d2psitordrho2 - local%dpsitordrho * local%shat / local%rhoc) &
/ local%qinp
! I=Btor*R is a flux function
! bi = I/(Btor(psi,theta of Rgeo)*a) = Rgeo/a
bi = local%rgeo
else
! otherwise, we are given rgeo
! and must use it to compute dpsidrho
! I=Btor*R is a flux function
! bi = I/(Btor(psi,theta of Rgeo)*a) = Rgeo/a
bi = local%rgeo + dI * (rhoc - rhoc0)
dpsidrho = dpsidrho * bi / local%qinp
end if
! ! get dpsinorm/drho
! call get_dpsidrho (dpsidrho)
! get |grad rho| and |grad psi|
call get_gradrho(dpsidrho, grho)
! quantity needed in calculation of dI/drho and djacrho/drho
drz = (dRdrho * dRdth + dZdrho * dZdth) / jacrho
call get_dthet(drz, drzdth)
! get dI/drho
call get_dIdrho(dpsidrho, grho, dIdrho)
dIdrho_out = dIdrho
! get djacobian/drho*dpsi/drho and djacr/drho
call get_djacdrho(dpsidrho, dIdrho, grho)
! get d2R/drho2 and d2Z/drho2
call get_d2RZdr2
d2R = d2Rdr2
d2Z = d2Zdr2
! get theta derivative of d2R/drho2 and d2Z/drho2
call get_dthet(d2Rdr2, d2Rdr2dth)
call get_dthet(d2Zdr2, d2Zdr2dth)
! calculate the magnitude of B (normalized by B(psi,theta corresponding to Rgeo))
! B/B0 = sqrt(I**2 + |grad psi|**2)/R
bmag = sqrt(bi**2 + gpsi**2) / Rr(2, :)
! the next line is for multibox runs
if (load_psi0_variables) then
dpsidrho_psi0 = dpsidrho
bmag_psi0 = bmag
grho_psi0 = grho
end if
if (write_profile_variation) then
open (1002, file='RZ.out', status='unknown')
write (1002, '(12e13.5)') local%rhoc, dIdrho, local%qinp, local%shat, local%d2qdr2, &
local%kappa, local%kapprim, &
local%tri, local%triprim, &
local%betaprim, local%betadbprim, dpsidrho
do j = -nz, nz
write (1002, '(5e13.5)') theta(j), d2Rdr2(j), d2Zdr2(j), bmag(j), grho(j)
end do
close (1002)
end if
! get dB/dtheta
call get_dthet(bmag, dbdth)
! calculate b . grad theta
gradpar = dpsidrho / (bmag * jacrho)
! b . grad B
gradparb = gradpar * dBdth
! get d|grad rho|^2/drho and d|grad psi|^2/drho
call get_dgr2dr(dpsidrho, grho)
! get dB/drho and d2B/drho2
call get_dBdrho(bmag, dIdrho)
! d (b . grad theta) / drho
dgradpardrho = -gradpar * (dBdrho / bmag + djacdrho / jacrho)
! get d/dtheta (dB/drho)
call get_dthet(dBdrho, d2Bdrdth)
! d(b . grad B)/drho
dgradparBdrho = dgradpardrho * dBdth + gradpar * d2Bdrdth
! obtain varthet = (I/(q*(dpsi/dr)) * int_0^theta dtheta' jacrho/R^2
call get_varthet(dpsidrho)
! obtain dvarthet/drho
call get_dvarthdr(dpsidrho, dIdrho)
! get |grad theta|^2, grad r . grad theta, grad alpha . grad theta, etc.
call get_graddotgrad(dpsidrho, grho)
call get_gds(gds2, gds21, gds22, gds23, gds24)
! this is (grad alpha x B) . grad theta
cross = dpsidrho * (gradrho_gradalph * gradalph_gradthet - gradalph2 * gradrho_gradthet)
! note that the definitions of gbdrift, gbdrift0, dgbdriftdr and dgbdrift0dr
! are such that it gets multiplied by vperp2, not mu. This is in contrast to
! Michael's GS3 notes
! this is bhat/B x (grad B/B) . grad alpha * 2 * dpsiN/drho
gbdrift = 2.0 * (-dBdrho + cross * dBdth * dpsidrho / bmag**2) / bmag
! this is bhat/B x (bhat . grad bhat) . grad alpha * 2 * dpsiN/drho
! this is assuming betaprim = 4*pi*ptot/B0^2 * (-d ln ptot / drho)
cvdrift = (gbdrift + 2.0 * local%betaprim / bmag**2)
! this is 2 *(bhat/B x grad B / B) . (grad q) * dpsiN/drho / (bhat . grad B)
! same as usual GS2 definition once bhat . grad B is added in below
cvdrift0 = -2.*bi * dqdr / bmag**2
! this is 2*dpsiN/drho times the rho derivative (bhat/B x grad B / B) . (grad q)
dcvdrift0drho = cvdrift0 * (dgradparbdrho + gradparb * (dIdrho / bi - 2.*dBdrho / bmag - local%d2psidr2 / dpsidrho)) &
- 2.*bi * gradparb * local%d2qdr2 / bmag**2
! this is 2*dpsiN/drho/B times the rho derivative of (bhat x gradB/B) . (grad q)
! note that there's an extra factor of 1/B that's not expanded due to v_perp -> mu
dgbdrift0drho = cvdrift0 * (dgradparbdrho + gradparb * (dIdrho / bi - dBdrho / bmag - local%d2psidr2 / dpsidrho)) &
- 2.*bi * gradparb * local%d2qdr2 / bmag**2
cvdrift0 = cvdrift0 * gradparb
! this is 2 * dpsiN/drho * (bhat/B x gradB/B) . (grad q)
gbdrift0 = cvdrift0
! get d^2I/drho^2 and d^2 Jac / dr^2
call get_d2Idr2_d2jacdr2(grho, dIdrho)
! get d^2varhteta/drho^2
call get_d2varthdr2(dpsidrho, dIdrho)
! get d2B/drho^2
call get_d2Bdr2(bmag, dIdrho)
! get d/dr [(grad alpha x B) . grad theta]
call get_dcrossdr(dpsidrho, dIdrho, grho)
! dgbdriftdrho is d/drho [(bhat/B x (grad B) . grad alpha) * 2 * dpsiN/drho] / B
! note that there's an extra factor of 1/B that's not expanded due to v_perp -> mu
dgbdriftdrho = 2.0 * (local%d2psidr2 * dBdrho / dpsidrho - d2Bdr2 &
+ dpsidrho * (dcrossdr * dBdth + cross * (d2Bdrdth - 2.*dBdth * dBdrho / bmag)) / bmag**2) / bmag
! dcvdriftdrho is d/drho (bhat/B x [bhat . grad bhat] . grad alpha) * 2 * dpsiN/drho
dcvdriftdrho = dgbdriftdrho - gbdrift * dBdrho / bmag &
+ 2.0 * local%betadbprim / bmag**2 - 4.0 * local%betaprim * dBdrho / bmag**3 &
- 2.0 * local%betaprim * local%d2psidr2 / dpsidrho
!the next two sets of lines are corrections needed for the side boxes in a multibox simulation
!gbdrift = gbdrift *(dpsidrho_psi0/dpsidrho)*(bmag/bmag_psi0)
!gbdrift0 = gbdrift0*(dpsidrho_psi0/dpsidrho)*(bmag/bmag_psi0)
gbdrift = gbdrift * (dpsidrho_psi0 / dpsidrho)
gbdrift0 = gbdrift0 * (dpsidrho_psi0 / dpsidrho)
cvdrift = cvdrift * (dpsidrho_psi0 / dpsidrho)
cvdrift0 = cvdrift0 * (dpsidrho_psi0 / dpsidrho)
!dgbdriftdrho = dgbdriftdrho *(dpsidrho_psi0/dpsidrho)*(bmag/bmag_psi0)
!dgbdrift0drho = dgbdrift0drho*(dpsidrho_psi0/dpsidrho)*(bmag/bmag_psi0)
dgbdriftdrho = dgbdriftdrho * (dpsidrho_psi0 / dpsidrho)
dgbdrift0drho = dgbdrift0drho * (dpsidrho_psi0 / dpsidrho)
dcvdriftdrho = dcvdriftdrho * (dpsidrho_psi0 / dpsidrho)
dcvdrift0drho = dcvdrift0drho * (dpsidrho_psi0 / dpsidrho)
! interpolate here
if (zed_equal_arc) then
call theta_integrate(1./gradpar, dum)
gradpararc = (theta(nz) - theta(-nz)) / ((2 * np - 1) * dum)
call theta_integrate_indef(gradpararc / gradpar, arc)
allocate (zed_arc(-nzgrid:nzgrid))
call geo_spline(arc, theta, zed_in, zed_arc)
call geo_spline(theta, grho_psi0, zed_arc, grho_out) !grho is used to normalize fluxes
call geo_spline(theta, bmag, zed_arc, bmag_out)
call geo_spline(theta, bmag_psi0, zed_arc, bmag_psi0_out)
call geo_spline(theta, gds2, zed_arc, gds2_out)
call geo_spline(theta, gds21, zed_arc, gds21_out)
call geo_spline(theta, gds22, zed_arc, gds22_out)
call geo_spline(theta, gds21, zed_arc, gds23_out)
call geo_spline(theta, gds21, zed_arc, gds24_out)
call geo_spline(theta, gradpararc, zed_arc, gradpar_out)
call geo_spline(theta, gbdrift, zed_arc, gbdrift_out)
call geo_spline(theta, gbdrift0, zed_arc, gbdrift0_out)
call geo_spline(theta, cvdrift, zed_arc, cvdrift_out)
call geo_spline(theta, cvdrift0, zed_arc, cvdrift0_out)
call geo_spline(theta, dBdrho, zed_arc, dBdrho_out)
call geo_spline(theta, d2Bdrdth, zed_arc, d2Bdrdth_out)
call geo_spline(theta, dgradpardrho, zed_arc, dgradpardrho_out)
call geo_spline(theta, Rr(2, :), zed_arc, rmajor_out)
call geo_spline(theta, dcvdriftdrho, zed_arc, dcvdriftdrho_out)
call geo_spline(theta, dgbdriftdrho, zed_arc, dgbdriftdrho_out)
call geo_spline(theta, dcvdrift0drho, zed_arc, dcvdrift0drho_out)
call geo_spline(theta, dgbdrift0drho, zed_arc, dgbdrift0drho_out)
call geo_spline(theta, dgds2dr, zed_arc, dgds2dr_out)
call geo_spline(theta, dgds21dr, zed_arc, dgds21dr_out)
call geo_spline(theta, dgds22dr, zed_arc, dgds22dr_out)
call geo_spline(theta, djacdrho / dpsidrho, zed_arc, djacdrho_out)
deallocate (zed_arc)
else
call geo_spline(theta, grho_psi0, zed_in, grho_out) !grho is used to normalize fluxes
call geo_spline(theta, bmag, zed_in, bmag_out)
call geo_spline(theta, bmag_psi0, zed_in, bmag_psi0_out)
call geo_spline(theta, gds2, zed_in, gds2_out)
call geo_spline(theta, gds21, zed_in, gds21_out)
call geo_spline(theta, gds22, zed_in, gds22_out)
call geo_spline(theta, gds21, zed_in, gds23_out)
call geo_spline(theta, gds21, zed_in, gds24_out)
call geo_spline(theta, gradpar, zed_in, gradpar_out)
call geo_spline(theta, gbdrift, zed_in, gbdrift_out)
call geo_spline(theta, gbdrift0, zed_in, gbdrift0_out)
call geo_spline(theta, cvdrift, zed_in, cvdrift_out)
call geo_spline(theta, cvdrift0, zed_in, cvdrift0_out)
call geo_spline(theta, dBdrho, zed_in, dBdrho_out)
call geo_spline(theta, d2Bdrdth, zed_in, d2Bdrdth_out)
call geo_spline(theta, dgradpardrho, zed_in, dgradpardrho_out)
call geo_spline(theta, Rr(2, :), zed_in, rmajor_out)
call geo_spline(theta, dcvdriftdrho, zed_in, dcvdriftdrho_out)
call geo_spline(theta, dgbdriftdrho, zed_in, dgbdriftdrho_out)
call geo_spline(theta, dcvdrift0drho, zed_in, dcvdrift0drho_out)
call geo_spline(theta, dgbdrift0drho, zed_in, dgbdrift0drho_out)
call geo_spline(theta, dgds2dr, zed_in, dgds2dr_out)
call geo_spline(theta, dgds21dr, zed_in, dgds21dr_out)
call geo_spline(theta, dgds22dr, zed_in, dgds22dr_out)
call geo_spline(theta, djacdrho / dpsidrho, zed_in, djacdrho_out)
end if
! get the toroidal component of the magnetic field
! btor = B_toroidal/Bref = I/R Bref = rgeo * a/R
btor_out = bi / rmajor_out
dpsidrho_out = dpsidrho
dpsidrho_psi0_out = dpsidrho_psi0
filename = "millerlocal."//trim(run_name)//".input"
open (1002, file=trim(filename), status='unknown')
write (1002, '(5a16)') '#1.rhoc', '2.rmaj', '3.rgeo', '4.shift', '5.qinp'
write (1002, '(5e16.8)') local%rhoc, local%rmaj, local%rgeo, local%shift, local%qinp
write (1002, *)
write (1002, '(5a16)') '#6.shat', '7.kappa', '8.kapprim', '9.tri', '10.triprim'
write (1002, '(5e16.8)') local%shat, local%kappa, local%kapprim, local%tri, local%triprim
write (1002, *)
write (1002, '(5a16)') '11.betaprim', '12.dpsitordrho', '13.rhotor', &
'14.drhotordrho', '15.d2qdr2'
write (1002, '(5e16.8)') local%betaprim, local%dpsitordrho, local%rhotor, &
local%drhotordrho, local%d2qdr2
write (1002, *)
write (1002, '(3a16)') '16.d2psidr2', '17.betadbprim', '18.psitor_lcfs'
write (1002, '(3e16.8)') local%d2psidr2, local%betadbprim, local%psitor_lcfs
close (1002)
filename = "millerlocal."//trim(run_name)//".output"
open (1001, file=trim(filename), status='unknown')
write (1001, '(a9,e18.9,a11,e18.9,a11,e18.9)') '#dI/dr: ', dIdrho, 'd2I/dr2: ', d2Idr2, 'dpsi/dr: ', dpsidrho
write (1001, '(58a15)') '#1.theta', '2.R', '3.dR/dr', '4.d2Rdr2', '5.dR/dth', &
'6.d2Rdrdth', '7.dZ/dr', '8.d2Zdr2', '9.dZ/dth', '10.d2Zdrdth', &
'11.bmag', '12.dBdr', '13.d2Bdr2', '14.dB/dth', '15.d2Bdrdth', &
'16.varthet', '17.dvarthdr', '18.d2varthdr2', '19.jacr', '20.djacrdr', &
'21.djacdrho', '22.d2jacdr2', '23.grho2', '24.dgr2dr', '25.gthet2', &
'26.dgt2', '27.grgthet', '28.dgrgt', '29.galphgth', '30.dgagt', &
'31.grgalph', '32.dgagr', '33.galph2', '34.dga2', '35.cross', &
'36.dcrossdr', '37.gbdrift0', '38.dgbdrift0', '39.cvdrift0', '40.dcvdrift0', &
'41.gbdrift', '42.dgbdrift', '43.cvdrift', '44.dcvdrift', '45.drzdth', &
'46.gradpar', '47.dgpardr', '48.gradparB', '49.dgparBdr', '50.gds2', &
'51.dgds2dr', '52.gds21', '53.dgds21dr', '54.gds22', '55.dgds22dr', &
'56.gds23', '57.gds24', '58.Zr'
do i = -nz, nz
write (1001, '(59e18.9)') theta(i), Rr(2, i), dRdrho(i), d2Rdr2(i), dRdth(i), &
d2Rdrdth(i), dZdrho(i), d2Zdr2(i), dZdth(i), d2Zdrdth(i), &
bmag(i), dBdrho(i), d2Bdr2(i), dBdth(i), d2Bdrdth(i), &
varthet(i), dvarthdr(i), d2varthdr2(i), jacrho(i), djacrdrho(i), &
djacdrho(i), d2jacdr2(i), grho(i)**2, dgr2dr(i), gradthet2(i), &
dgt2(i), gradrho_gradthet(i), dgrgt(i), gradalph_gradthet(i), dgagt(i), &
gradrho_gradalph(i), dgagr(i), gradalph2(i), dga2(i), cross(i), &
dcrossdr(i), gbdrift0(i), dgbdrift0drho(i), cvdrift0(i), dcvdrift0drho(i), &
gbdrift(i), dgbdriftdrho(i), cvdrift(i), dcvdriftdrho(i), drzdth(i), &
gradpar(i), dgradpardrho(i), gradparB(i), dgradparBdrho(i), gds2(i), &
dgds2dr(i), gds21(i), dgds21dr(i), gds22(i), dgds22dr(i), gds23(i), gds24(i), &
Zr(2, i)
end do
close (1001)
defaults_initialized = .false.
end subroutine get_local_geo
subroutine allocate_arrays(nr, nz)
implicit none
integer, intent(in) :: nr, nz
! periodic quantities can be computed on 2*pi grid and replicated
allocate (grho(-nz:nz), bmag(-nz:nz), gradpar(-nz:nz))
allocate (gds2(-nz:nz), gds21(-nz:nz), gds22(-nz:nz), gds23(-nz:nz), gds24(-nz:nz))
allocate (gbdrift0(-nz:nz), gbdrift(-nz:nz))
allocate (cvdrift0(-nz:nz), cvdrift(-nz:nz))
allocate (Rr(nr, -nz:nz), Zr(nr, -nz:nz))
allocate (jacrho(-nz:nz), djacdrho(-nz:nz), djacrdrho(-nz:nz), d2jacdr2(-nz:nz))
allocate (d2Rdrdth(-nz:nz), d2Zdrdth(-nz:nz), gpsi(-nz:nz))
allocate (dBdrho(-nz:nz), dgradpardrho(-nz:nz))
allocate (d2Bdrdth(-nz:nz), dgradparBdrho(-nz:nz), dBdth(-nz:nz), gradparb(-nz:nz))
allocate (dcvdrift0drho(-nz:nz), dgbdrift0drho(-nz:nz))
allocate (theta(-nz:nz))
allocate (gradpararc(-nz:nz))
allocate (arc(-nz:nz))
allocate (dRdrho(-nz:nz), dZdrho(-nz:nz), dRdth(-nz:nz), dZdth(-nz:nz))
allocate (gradrho_gradthet(-nz:nz), gradthet2(-nz:nz), dgr2dr(-nz:nz), dgpsi2dr(-nz:nz))
allocate (dgrgt(-nz:nz), dgt2(-nz:nz), dgagr(-nz:nz), dgagt(-nz:nz), dga2(-nz:nz))
allocate (d2Rdr2(-nz:nz), d2Zdr2(-nz:nz), d2Bdr2(-nz:nz))
allocate (drz(-nz:nz), drzdth(-nz:nz), d2Rdr2dth(-nz:nz), d2Zdr2dth(-nz:nz))
allocate (d2Rdth2(-nz:nz), d2Zdth2(-nz:nz))
allocate (d2gpsidr2(-nz:nz))
allocate (gradalph_gradthet(-nz:nz), gradalph2(-nz:nz), gradrho_gradalph(-nz:nz))
allocate (dgds2dr(-nz:nz), dgds21dr(-nz:nz))
allocate (dgds22dr(-nz:nz))
allocate (dcvdriftdrho(-nz:nz), dgbdriftdrho(-nz:nz))
allocate (varthet(-nz:nz), dvarthdr(-nz:nz), d2varthdr2(-nz:nz))
allocate (cross(-nz:nz))
allocate (dcrossdr(-nz:nz))
end subroutine allocate_arrays
subroutine deallocate_arrays
implicit none
deallocate (grho)
deallocate (bmag)
deallocate (gradpar)
deallocate (gds2)
deallocate (gds21)
deallocate (gds22)
deallocate (gds23)
deallocate (gds24)
deallocate (gbdrift0)
deallocate (gbdrift)
deallocate (cvdrift0)
deallocate (cvdrift)
deallocate (Rr, Zr)
deallocate (jacrho, djacdrho, djacrdrho, d2jacdr2)
deallocate (d2Rdrdth, d2Zdrdth, gpsi)
deallocate (dBdrho, dgradpardrho)
deallocate (d2Bdrdth, dgradparBdrho, dBdth, gradparb)
deallocate (dcvdrift0drho, dgbdrift0drho)
deallocate (theta)
deallocate (gradpararc)
deallocate (arc)
deallocate (dRdrho, dZdrho, dRdth, dZdth)
deallocate (gradrho_gradthet, gradthet2, dgr2dr, dgpsi2dr)
deallocate (dgrgt, dgt2, dgagr, dgagt, dga2)
deallocate (d2Rdr2, d2Zdr2, d2Bdr2)
deallocate (drz, drzdth, d2Rdr2dth, d2Zdr2dth)
deallocate (d2Rdth2, d2Zdth2)
deallocate (d2gpsidr2)
deallocate (gradalph_gradthet, gradalph2, gradrho_gradalph)
deallocate (dgds2dr, dgds21dr)
deallocate (dgds22dr)
deallocate (dcvdriftdrho, dgbdriftdrho)
deallocate (varthet, dvarthdr, d2varthdr2)
deallocate (cross)
deallocate (dcrossdr)
deallocate (d2R, d2Z)
if (allocated(delthet)) deallocate (delthet)
if (allocated(bmag_psi0)) deallocate (bmag_psi0)
if (allocated(grho_psi0)) deallocate (grho_psi0)
end subroutine deallocate_arrays
subroutine finish_local_geo
implicit none
call deallocate_arrays
end subroutine finish_local_geo
! takes in f(r), with r given at three radial locations
! and returns df = df/dr at the middle radius
subroutine get_drho(f, df)
implicit none
real, dimension(:, -nz:), intent(in) :: f
real, dimension(-nz:), intent(out) :: df
df = 0.5 * (f(3, :) - f(1, :)) / local%dr
end subroutine get_drho
! given function f(theta), calculate second derivative
! of f with respect to theta
! second order accurate, with equal grid spacing assumed
subroutine get_d2dthet2(f, d2f)
implicit none
real, dimension(-nz:), intent(in) :: f
real, dimension(-nz:), intent(out) :: d2f
! assuming equal grid spacing in theta here
d2f(-nz + 1:nz - 1) = (f(:nz - 2) - 2.*f(-nz + 1:nz - 1) + f(-nz + 2:)) / delthet(-nz + 1:nz - 1)**2
! use periodicity at boundary
d2f(-nz) = (f(nz - 1) - 2.*f(-nz) + f(-nz + 1)) / delthet(-nz + 1)**2
d2f(nz) = d2f(-nz)
end subroutine get_d2dthet2
! given function f(theta:-pi->pi), calculate theta derivative
! second order accurate, with equal grid spacing assumed
! assumes periodic in theta -- may need to change this in future
subroutine get_dthet(f, df)
implicit none
real, dimension(-nz:), intent(in) :: f
real, dimension(-nz:), intent(out) :: df
! assuming equal grid spacing in theta here
df(-nz + 1:nz - 1) = (f(-nz + 2:) - f(:nz - 2)) / (delthet(:nz - 2) + delthet(-nz + 1:))
! use periodicity at boundary
df(-nz) = (f(-nz + 1) - f(nz - 1)) / (delthet(-nz) + delthet(nz - 1))
df(nz) = df(-nz)
end subroutine get_dthet
subroutine get_jacrho
implicit none
! jacrho = R*(dR/drho * dZ/dtheta - dR/dtheta * dZ/drho)
jacrho = Rr(2, :) * (dRdrho * dZdth - dRdth * dZdrho)
end subroutine get_jacrho
! ! get dpsinorm/drho = (I/2*pi*q)*int_0^{2*pi} dthet jacrho/R**2
! subroutine get_dpsidrho (dpsidrho)
! use constants, only: pi
! implicit none
! real, intent (out) :: dpsidrho
! ! theta_integrate returns integral from 0 -> 2*pi
! call theta_integrate (jacrho(-nz2pi:nz2pi)/Rr(2,-nz2pi:nz2pi)**2, dpsidrho)
! ! integration done using trapezoidal rule
! dpsidrho = dpsidrho*bi/(2.*pi*local%qinp)
! end subroutine get_dpsidrho
subroutine get_gradrho(dpsidrho, grho)
implicit none
real, intent(in) :: dpsidrho
real, dimension(-nz:), intent(out) :: grho
grho = Rr(2, :) * sqrt(dRdth**2 + dZdth**2) / jacrho
gpsi = grho * dpsidrho
end subroutine get_gradrho
subroutine get_dIdrho(dpsidrho, grho, dIdrho)
use constants, only: pi
implicit none
real, intent(in) :: dpsidrho
real, dimension(-nz:), intent(in) :: grho
real, intent(out) :: dIdrho
real :: num1, num2, denom
real, dimension(:), allocatable :: dum
allocate (dum(-nz:nz)); dum = 0.
dum = jacrho * (1.0 + (bi / gpsi)**2) / Rr(2, :)**2
call theta_integrate(dum(-nz2pi:nz2pi), denom)
dum = jacrho * (2.*dRdrho / Rr(2, :) + dqdr / local%qinp) / Rr(2, :)**2
call theta_integrate(dum(-nz2pi:nz2pi), num1)
! betaprim below is (4*pi*ptot/B0^2)*(-d ln ptot / drho)
dum = (-2.*(dRdth * d2Rdrdth + dZdth * d2Zdrdth) / jacrho &
+ drzdth + local%betaprim * jacrho / dpsidrho**2) / grho**2
call theta_integrate(dum(-nz2pi:nz2pi), num2)
dIdrho = bi * (num1 + num2) / denom
deallocate (dum)
end subroutine get_dIdrho
subroutine get_djacdrho(dpsidrho, dIdrho, grho)
implicit none
real, intent(in) :: dpsidrho, dIdrho
real, dimension(-nz:), intent(in) :: grho
! this is dpsi/dr * d/dr (jacobian)
! betaprim below is (4*pi*ptot/B0^2)*(-d ln ptot / drho)
djacdrho = (Rr(2, :) / grho)**2 * (2.*(dRdth * d2Rdrdth + dZdth * d2Zdrdth) / jacrho &
- drzdth + jacrho * (bi * dIdrho / Rr(2, :)**2 - local%betaprim) / dpsidrho**2)
! this is d/dr (jacobian_r)
djacrdrho = djacdrho + jacrho * local%d2psidr2 / dpsidrho
end subroutine get_djacdrho
subroutine get_d2RZdr2
implicit none
! get factor common to both d2R/drho2 and d2Z/drho2
d2Rdr2 = ((djacrdrho - jacrho * dRdrho / Rr(2, :)) / Rr(2, :) &
- dRdrho * d2Zdrdth + dZdrho * d2Rdrdth) / (dRdth**2 + dZdth**2)
d2Zdr2 = -d2Rdr2 * dRdth
d2Rdr2 = d2Rdr2 * dZdth
end subroutine get_d2RZdr2
subroutine get_dgr2dr(dpsidrho, grho)
implicit none
real, intent(in) :: dpsidrho
real, dimension(-nz:), intent(in) :: grho
dgr2dr = 2.*(grho**2 * (dRdrho / Rr(2, :) - djacrdrho / jacrho) &
+ (Rr(2, :) / jacrho)**2 * (dRdth * d2Rdrdth + d2Zdrdth * dZdth))
dgpsi2dr = 2.*(gpsi**2 * (dRdrho / Rr(2, :) - djacdrho / jacrho) &
+ (Rr(2, :) / jacrho)**2 * (dRdth * d2Rdrdth + d2Zdrdth * dZdth) * dpsidrho**2)
end subroutine get_dgr2dr
subroutine get_graddotgrad(dpsidrho, grho)
implicit none
real, intent(in) :: dpsidrho
real, dimension(-nz:), intent(in) :: grho
! grad theta . grad theta
gradthet2 = (Rr(2, :) / jacrho)**2 * (dRdrho**2 + dZdrho**2)
! grad rho . grad theta
gradrho_gradthet = -(Rr(2, :) / jacrho)**2 * (dRdrho * dRdth + dZdrho * dZdth)
! grad alpha . grad theta
gradalph_gradthet = -(varthet * dqdr + local%qinp * dvarthdr) * gradrho_gradthet &
- bi * jacrho / (dpsidrho * Rr(2, :)**2) * gradthet2
! grad rho . grad alpha
gradrho_gradalph = -(varthet * dqdr + local%qinp * dvarthdr) * grho**2 &
- bi * jacrho / (dpsidrho * Rr(2, :)**2) * gradrho_gradthet
! grad alpha . grad alpha
gradalph2 = (1./Rr(2, :)**2) + ((varthet * dqdr + local%qinp * dvarthdr) * grho)**2 &
+ 2.*bi * jacrho * (varthet * dqdr + local%qinp * dvarthdr) * gradrho_gradthet / (dpsidrho * Rr(2, :)**2) &
+ (bi * jacrho / (dpsidrho * Rr(2, :)**2))**2 * gradthet2
end subroutine get_graddotgrad
subroutine get_gds(gds2, gds21, gds22, gds23, gds24)
implicit none
real, dimension(-nz:), intent(out) :: gds2, gds21, gds22, gds23, gds24
! |grad alpha|^2 * (dpsiN/drho)^2 (dpsiN/drho factor accounts for ky normalization)
gds2 = gradalph2 * dpsidrho_psi0**2
! (grad q . grad alpha) * (dpsiN/drho)^2
gds21 = gradrho_gradalph * dqdr * dpsidrho_psi0**2
! |grad q|^2 * (dpsiN/drho)^2
gds22 = (grho * dpsidrho_psi0 * dqdr)**2
! (grad rho . grad theta * |grad alpha|^2 - grad alpha . grad theta * grad rho . grad alpha) * (dpsiN/drho)^2 / B^2
gds23 = (gradrho_gradthet * gradalph2 - gradalph_gradthet * gradrho_gradalph) * (dpsidrho_psi0 / bmag)**2
! (grad rho . grad theta * grad rho . grad alpha - grad alpha . grad theta * |grad rho|^2) * (dpsiN/drho)^2 / B^2 * q/rho
gds24 = (gradrho_gradthet * gradrho_gradalph - gradalph_gradthet * grho**2) &
* (dpsidrho_psi0 / bmag)**2 * (local%qinp_psi0 / local%rhoc_psi0)
! note that kperp2 = (n0/a)^2*(drho/dpsiN)^2*(gds2 + 2*theta0*gds21 + theta0^2*gds22)
! theta0 = kx/(ky*shat)
end subroutine get_gds
subroutine get_dBdrho(bmag, dIdrho)
implicit none
real, dimension(-nz:), intent(in) :: bmag
real, intent(in) :: dIdrho
! dB/drho
dBdrho = (bi * dIdrho + 0.5 * dgpsi2dr) / (bmag * Rr(2, :)**2) &
- bmag * dRdrho / Rr(2, :)
end subroutine get_dBdrho
subroutine get_varthet(dpsidrho)
implicit none
real, intent(in) :: dpsidrho
call theta_integrate_indef(jacrho / Rr(2, :)**2, varthet)
varthet = bi * varthet / (dpsidrho * local%qinp)
end subroutine get_varthet
subroutine get_dvarthdr(dpsidrho, dIdrho)
implicit none
real, intent(in) :: dpsidrho, dIdrho
real, dimension(-nz:nz) :: dum
dum = bi * jacrho * (dIdrho / bi - dqdr / local%qinp + djacdrho / jacrho &
- 2.*dRdrho / Rr(2, :)) / Rr(2, :)**2
call theta_integrate_indef(dum, dvarthdr)
dvarthdr = dvarthdr / (dpsidrho * local%qinp)
end subroutine get_dvarthdr
subroutine get_d2Idr2_d2jacdr2(grho, dIdrho)
use constants, only: pi
implicit none
real, dimension(-nz:), intent(in) :: grho
real, intent(in) :: dIdrho
real :: denom, num1, num2, num3, num4
real, dimension(-nz:nz) :: tmp, tmp2
! denom is the denominator in the expression for d^2 I / dr^2
tmp = jacrho / Rr(2, :)**2 * (1.0 + (bi / gpsi)**2)
call theta_integrate(tmp(-nz2pi:nz2pi), denom)
denom = denom / bi
d2jacdr2 = dIdrho * bi * jacrho / gpsi**2 &
* (dIdrho / bi + djacrdrho / jacrho - dgpsi2dr / gpsi**2 &
- 2.*dRdrho / Rr(2, :))
tmp = -d2jacdr2 / Rr(2, :)**2 - dIdrho * jacrho / (bi * Rr(2, :)**2) &
* (djacrdrho / jacrho - dIdrho / bi - 2.*dRdrho / Rr(2, :))
call theta_integrate(tmp(-nz2pi:nz2pi), num1)
! tmp = -jacrho/(dpsidrho*Rr(2,:)**2)*(djacdrho/jacrho - 2.*dRdrho/Rr(2,:))
! call theta_integrate (tmp(-nz2pi:nz2pi), num2)
! d2jacdr2 = d2jacdr2 - tmp*Rr(2,:)**2*local%d2psidr2
! num2 = local%d2psidr2 * (2*pi*local%qinp/bi*(dqdr/local%qinp - dIdrho/bi) + num2)
tmp = (d2Rdr2 * dRdth + dRdrho * d2Rdrdth + d2Zdr2 * dZdth + dZdrho * d2Zdrdth) / jacrho &
- djacrdrho * (dRdrho * dRdth + dZdrho * dZdth) / jacrho**2
call get_dthet(tmp, tmp2)
tmp = (tmp2 - 2./jacrho * (-djacrdrho / jacrho * (dRdth * d2Rdrdth + dZdth * d2Zdrdth) &
+ d2Rdrdth**2 + dRdth * d2Rdr2dth + d2Zdrdth**2 + dZdth * d2Zdr2dth)) / grho**2 &
- dgr2dr * (drzdth - 2./jacrho * (dRdth * d2Rdrdth + dZdth * d2Zdrdth)) / grho**4
call theta_integrate(tmp(-nz2pi:nz2pi), num2)
d2jacdr2 = d2jacdr2 - tmp * Rr(2, :)**2
tmp = jacrho * (local%betadbprim + local%betaprim * (djacrdrho / jacrho - dgpsi2dr / gpsi**2)) / gpsi**2
call theta_integrate(tmp(-nz2pi:nz2pi), num3)
!FLAG - next negative sign?
d2jacdr2 = d2jacdr2 - tmp * Rr(2, :)**2
tmp = jacrho / Rr(2, :)**2 * (2.*d2Rdr2 / Rr(2, :) - 2.*(dRdrho / Rr(2, :))**2 &
+ local%d2qdr2 / local%qinp - (dqdr / local%qinp)**2 + (2 * dRdrho / Rr(2, :) + dqdr / local%qinp) &
* (djacrdrho / jacrho - 2.*dRdrho / Rr(2, :)))
call theta_integrate(tmp(-nz2pi:nz2pi), num4)
d2Idr2 = (num1 + num2 + num3 + num4) / denom
! d2jacdr2 = d2jacdr2 + bi*jacrho/(gpsi*Rr(2,:))**2*d2Idr2 + 2.*djacdrho*dRdrho/Rr(2,:)**3
d2jacdr2 = d2jacdr2 + bi * jacrho / gpsi**2 * d2Idr2 + 2.*djacdrho * dRdrho / Rr(2, :)
end subroutine get_d2Idr2_d2jacdr2
subroutine get_d2varthdr2(dpsidrho, dIdrho)
implicit none
real, intent(in) :: dpsidrho, dIdrho
real, dimension(-nz:nz) :: dum
dum = bi * jacrho / (local%qinp * dpsidrho * Rr(2, :)**2) * ((dIdrho / bi - dqdr / local%qinp &
! dum = bi*jacrho/(local%qinp*Rr(2,:)**2)*( (dIdrho/bi - dqdr/local%qinp &
+ djacdrho / jacrho - 2.*dRdrho / Rr(2, :))**2 &
+ d2Idr2 / bi - (dIdrho / bi)**2 - local%d2qdr2 / local%qinp &
+ (dqdr / local%qinp)**2 + d2jacdr2 / jacrho - (djacdrho / jacrho)**2 &
- djacdrho * local%d2psidr2 / (dpsidrho * jacrho) &
- 2.*d2Rdr2 / Rr(2, :) + 2.*(dRdrho / Rr(2, :))**2)
call theta_integrate_indef(dum, d2varthdr2)
end subroutine get_d2varthdr2
subroutine get_d2Bdr2(bmag, dIdrho)
implicit none
real, dimension(-nz:), intent(in) :: bmag
real, intent(in) :: dIdrho
! d2gpsidr2 = 2.*( dgr2dr*(dRdrho/Rr(2,:) - djacdrho/jacrho) &
! + grho**2*(d2Rdr2/Rr(2,:) - (dRdrho/Rr(2,:))**2 - d2jacdr2/jacrho &
! + djacdrho*djacrdrho/jacrho**2) + (Rr(2,:)/jacrho)**2 &
! * (dRdth**2 + dRdth*d2Rdr2dth + dZdth**2 + dZdth*d2Zdr2dth &
! + 2.*(dRdrho/Rr(2,:) - djacrdrho/jacrho)*(dRdth*d2Rdrdth+dZdth*d2Zdrdth)) )
d2gpsidr2 = 2.*(dRdrho / Rr(2, :) - djacdrho / jacrho) * dgpsi2dr &
+ 2.*gpsi**2 * (d2Rdr2 / Rr(2, :) - (dRdrho / Rr(2, :))**2 - d2jacdr2 / jacrho + djacdrho * djacrdrho / jacrho**2) &
+ 2.*(Rr(2, :) * gpsi / jacrho)**2 * (d2Rdrdth**2 + dRdth * d2Rdr2dth + d2Zdrdth**2 + dZdth * d2Zdr2dth &
+ 2.*(dRdth * d2Rdrdth + dZdth * d2Zdrdth) * (dRdrho / Rr(2, :) - djacdrho / jacrho))
! d2gpsidr2 = 2.*(dpsidrho*Rr(2,:)/jacrho)**2 &
! * (2.*(dRdrho/Rr(2,:)-djacdrho/jacrho) &
! * ((dRdrho/Rr(2,:)-djacdrho/jacrho)*(dRdth**2+dZdth**2) &
! + 2.*(dRdth*d2Rdrdth+dZdth*d2Zdrdth)) &
! + (dRdth**2+dZdth**2)*(d2rdr2/Rr(2,:) - (dRdrho/Rr(2,:))**2 &
! - d2jacdr2/jacrho + (djacdrho/jacrho)**2) &
! + d2Rdrdth**2 + dRdth*d2Rdr2dth + d2Zdrdth**2 + dZdth*d2Zdr2dth) &
! + 4.*dpsidrho*local%d2psidr2*dgr2dr &
! + 2.*grho**2*(local%d2psidr2**2 + dpsidrho*local%d3psidr3)
! get d/drho (dB/drho)
d2Bdr2 = -dBdrho * dRdrho / Rr(2, :) + bmag * (dRdrho / Rr(2, :))**2 &
- bmag * d2Rdr2 / Rr(2, :) + 0.5 * (2.*(dIdrho**2 + bi * d2Idr2) &
+ d2gpsidr2) / (bmag * Rr(2, :)**2) &
- (dBdrho + bmag * dRdrho / Rr(2, :)) * (2.*dRdrho / Rr(2, :) + dBdrho / bmag)
end subroutine get_d2Bdr2
subroutine get_dcrossdr(dpsidrho, dIdrho, grho)
implicit none
real, intent(in) :: dpsidrho, dIdrho
real, dimension(-nz:), intent(in) :: grho
! dgr2 = d/drho (|grad rho|^2)
! dgr2 = 2.*(Rr(2,:)/jacrho)**2*((dRdrho/Rr(2,:)-djacdrho/jacrho)*(dRdth**2+dZdth**2) &
! + dRdth*d2Rdrdth + dZdth*d2Zdrdth)
! dgrgt = d/drho (grad rho . grad theta)
! dgrgt = -(Rr(2,:)/jacrho)**2*(2.*(dRdrho/Rr(2,:)-djacdrho/jacrho)*(dRdrho*dRdth+dZdrho*dZdth) &
! + d2Rdr2*dRdth+dRdrho*d2Rdrdth+d2Zdr2*dZdth+dZdrho*d2Zdrdth)
dgrgt = 2.*gradrho_gradthet * (dRdrho / Rr(2, :) - djacrdrho / jacrho) &
- (Rr(2, :) / jacrho)**2 * (d2Rdr2 * dRdth + dRdrho * d2Rdrdth + d2Zdr2 * dZdth + dZdrho * d2Zdrdth)
! dgt2 = d/drho (|grad theta|^2)
dgt2 = 2.*(Rr(2, :) / jacrho)**2 * ((dRdrho / Rr(2, :) - djacrdrho / jacrho) * (dRdrho**2 + dZdrho**2) &
+ dRdrho * d2Rdr2 + dZdrho * d2Zdr2)
! this is d/drho (|grad alph|^2)
! will later multiply it by 0.5*dpsidrho**2
dga2 = -2 * dRdrho / Rr(2, :)**3 + dgr2dr * (varthet * dqdr + local%qinp * dvarthdr)**2 &
+ (2.0 * grho**2 * (varthet * dqdr + local%qinp * dvarthdr) &
+ 2.*bi * jacrho * gradrho_gradthet / (dpsidrho * Rr(2, :)**2)) &
* (local%d2qdr2 * varthet + 2.*dqdr * dvarthdr + local%qinp * d2varthdr2) &
+ 2.*(varthet * dqdr + local%qinp * dvarthdr) * bi * jacrho / (dpsidrho * Rr(2, :)**2) &
* (dgrgt + gradrho_gradthet * (dIdrho / bi + djacdrho / jacrho - 2.*dRdrho / Rr(2, :))) &
+ (bi * jacrho / (dpsidrho * Rr(2, :)**2))**2 * (dgt2 + 2.*gradthet2 * (dIdrho / bi + djacdrho / jacrho &
- 2.*dRdrho / Rr(2, :)))
! dgagr = d/drho (grad alpha . grad rho)
dgagr = -grho**2 * (2.*dvarthdr * dqdr + varthet * local%d2qdr2 + local%qinp * d2varthdr2) &
- dgr2dr * (varthet * dqdr + local%qinp * dvarthdr) - bi * jacrho / (dpsidrho * Rr(2, :)**2) &
* (dgrgt + gradrho_gradthet * (dIdrho / bi + djacdrho / jacrho - 2.*dRdrho / Rr(2, :)))
! dgagt = d/drho (grad alpha . grad theta)
dgagt = -gradrho_gradthet * (2.*dvarthdr * dqdr + varthet * local%d2qdr2 + local%qinp * d2varthdr2) &
- dgrgt * (varthet * dqdr + local%qinp * dvarthdr) - bi * jacrho / (dpsidrho * Rr(2, :)**2) &
* (dgt2 + gradthet2 * (dIdrho / bi + djacdrho / jacrho - 2.*dRdrho / Rr(2, :)))
! dcrossdr = d/drho [(grad alpha x B) . grad theta)]
dcrossdr = dpsidrho * (dgagr * gradalph_gradthet + gradrho_gradalph * dgagt &
- dga2 * gradrho_gradthet - gradalph2 * dgrgt) + local%d2psidr2 * cross / dpsidrho
! this is (dpsi/drho)^2*d|grad alpha|^2/dr
dgds2dr = dga2 * dpsidrho_psi0**2
! this is (dpsi/drho)^2*d(grad alpha . grad q)/dr
! note that there will be multiplication by 2 in dist_fn.fpp
dgds21dr = (dgagr * dqdr + local%d2qdr2 * gradrho_gradalph) * dpsidrho_psi0**2
! this is (dpsi/drho)^2*d(|grad q|^2)/dr
dgds22dr = (dqdr**2 * dgr2dr + 2.*grho**2 * dqdr * local%d2qdr2) * dpsidrho_psi0**2
! note that dkperp2/dr = (n0/a)^2*(drho/dpsiN)^2*(dgds2dr + 2*theta0*dgds21dr + theta0^2*dgds22dr)
end subroutine get_dcrossdr
subroutine theta_integrate(integrand, integral)
implicit none
real, dimension(-nz2pi:), intent(in) :: integrand
real, intent(out) :: integral
! use trapezoidal rule to integrate in theta
integral = 0.5 * sum(delthet(-nz2pi:nz2pi - 1) * (integrand(-nz2pi:nz2pi - 1) + integrand(-nz2pi + 1:nz2pi)))
end subroutine theta_integrate
! get indefinite integral of integrand
subroutine theta_integrate_indef(integrand, integral)
implicit none
real, dimension(-nz:), intent(in) :: integrand
real, dimension(-nz:), intent(out) :: integral
integer :: i
! use trapezoidal rule to integrate in theta
integral(0) = 0.0
do i = 1, nz
integral(i) = integral(i - 1) + 0.5 * delthet(i - 1) * (integrand(i - 1) + integrand(i))
end do
do i = -1, -nz, -1
integral(i) = integral(i + 1) - 0.5 * delthet(i) * (integrand(i + 1) + integrand(i))
end do
end subroutine theta_integrate_indef
function Rpos(r, theta, j)
use constants, only: pi
integer, intent(in) :: j
real, intent(in) :: r, theta
real :: Rpos
real :: g, gp, dr
integer :: i
dr = r - local%rhoc
! For Y Xiao:
! g = local%delp/local%rhoc + local%d * sin(theta)**2
! Rpos = local%rmaj*(1.+r*(cos(theta)-g)-g*dr)
g = cos(theta + local%tri * sin(theta))
gp = -sin(theta + local%tri * sin(theta)) &
* local%triprim * sin(theta)
! allow for strange specification of R_psi
if (j == nz + 1) then
i = -nz
else
i = j
end if
! second line here is (1/2)*(r-r0)**2*d2R/dr|_r0
! note that d2R=0 unless read_profile_variation = T in input file
Rpos = local%rmaj + local%shift * dr + g * local%rhoc + (g + local%rhoc * gp) * dr &
+ 0.5 * (r - rhoc0)**2 * d2R(i)
end function Rpos
function Zpos(r, theta, j)
integer, intent(in) :: j
real, intent(in) :: r, theta
real :: Zpos, dr
integer :: i
! allow for strange specification of Z_psi
if (j == nz + 1) then
i = -nz
else
i = j
end if
dr = r - local%rhoc
! note that d2Z=0 unless read_profile_variation=T in input file
Zpos = local%kappa * sin(theta) * local%rhoc + (local%rhoc * local%kapprim + local%kappa) * sin(theta) * dr &
+ 0.5 * (r - rhoc0)**2 * d2Z(i)
end function Zpos
function mod2pi(theta)
real, intent(in) :: theta
real :: pi, th, mod2pi
real, parameter :: theta_tol = 1.e-6
logical :: out
pi = 2.*acos(0.)
if (theta <= pi .and. theta >= -pi) then
mod2pi = theta
return
end if
if (theta - theta_tol <= pi .and. theta >= -pi) then
mod2pi = pi
return
end if
if (theta <= pi .and. theta + theta_tol >= -pi) then
mod2pi = -pi
return
end if
th = theta
out = .true.
do while (out)
if (th > pi) th = th - 2.*pi
if (th < -pi) th = th + 2.*pi
if (th <= pi .and. th >= -pi) out = .false.
end do
mod2pi = th
end function mod2pi
end module millerlocal
|
{"hexsha": "bea47de47fae31f393715608faa1a5e5baa040fc", "size": 53935, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "geo/millerlocal.f90", "max_stars_repo_name": "AntonioG-Jerez/stella", "max_stars_repo_head_hexsha": "b9d0257ea3639218b7546116c1235ebd4a8e0752", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-12-15T08:23:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T15:14:42.000Z", "max_issues_repo_path": "geo/millerlocal.f90", "max_issues_repo_name": "AntonioG-Jerez/stella", "max_issues_repo_head_hexsha": "b9d0257ea3639218b7546116c1235ebd4a8e0752", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2021-07-05T16:41:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T15:58:05.000Z", "max_forks_repo_path": "geo/millerlocal.f90", "max_forks_repo_name": "AntonioG-Jerez/stella", "max_forks_repo_head_hexsha": "b9d0257ea3639218b7546116c1235ebd4a8e0752", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-07-05T15:35:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T09:23:42.000Z", "avg_line_length": 39.0267727931, "max_line_length": 145, "alphanum_fraction": 0.5813664596, "num_tokens": 19631}
|
#include <fstream>
#include <iostream>
#include <string>
#include <utility>
#include <vector>
#include <map>
#include <algorithm>
#include <stdint.h>
#include <boost/foreach.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/xml_parser.hpp>
using namespace boost::property_tree;
using namespace std;
int main(int nargc, char** args) {
const string source = "/home/ethan/DataSets/REID/PRW/Layout/Layout_prw_val.txt";
std::ifstream infile(source.c_str());
vector<string> lines;
std::string str_line;
while (std::getline(infile, str_line)) {
lines.push_back(str_line);
}
stringstream ss;
ss.clear();
ss.str(lines[0]);
string path;
int xys[4] = {0};
int id;
ss >> path >> id;
for (int i=0;i<4;i++){
ss>>xys[i];
}
cout<<xys[0]<<" "<<xys[1]<<" "<<xys[2]<<" "<<xys[3];
return 0;
}
|
{"hexsha": "814468f21591b04ad2ee72cd1e6e82b7f9c360ae", "size": 841, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "remodet_repository_wdh_part/tools/test_loadxml.cpp", "max_stars_repo_name": "UrwLee/Remo_experience", "max_stars_repo_head_hexsha": "a59d5b9d6d009524672e415c77d056bc9dd88c72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "remodet_repository_wdh_part/tools/test_loadxml.cpp", "max_issues_repo_name": "UrwLee/Remo_experience", "max_issues_repo_head_hexsha": "a59d5b9d6d009524672e415c77d056bc9dd88c72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "remodet_repository_wdh_part/tools/test_loadxml.cpp", "max_forks_repo_name": "UrwLee/Remo_experience", "max_forks_repo_head_hexsha": "a59d5b9d6d009524672e415c77d056bc9dd88c72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0285714286, "max_line_length": 82, "alphanum_fraction": 0.6634958383, "num_tokens": 245}
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import pylab as pl
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn import preprocessing, metrics, tree
from io import StringIO
import pydotplus
#read dataset
dataset = pd.read_csv("drug200.csv")
#feature set as numpy array
x = dataset[['Age', 'Sex', 'BP', 'Cholesterol', 'Na_to_K']].values
#convert categorical data to numerical data
le_sex = preprocessing.LabelEncoder()
le_sex.fit(['F','M'])
x[:,1] = le_sex.transform(x[:,1])
le_BP = preprocessing.LabelEncoder()
le_BP.fit([ 'LOW', 'NORMAL', 'HIGH'])
x[:,2] = le_BP.transform(x[:,2])
le_Chol = preprocessing.LabelEncoder()
le_Chol.fit([ 'NORMAL', 'HIGH'])
x[:,3] = le_Chol.transform(x[:,3])
#targets as numpy array
y = dataset["Drug"]
#split data into train data and test data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=3)
#create the entropy decision tree
drugTree = DecisionTreeClassifier(criterion="entropy", max_depth = 4)
#train the decision tree
drugTree.fit(x_train,y_train)
predicted_data = drugTree.predict(x_test)
#evaluate the decision tree
print("DecisionTrees's Accuracy: ", metrics.accuracy_score(y_test, predicted_data))
#visualize the decision tree
# dot_data = StringIO()
# filename = "drugtree.png"
# featureNames = dataset.columns[0:5]
# out = tree.export_graphviz(drugTree,feature_names=featureNames, out_file=dot_data, class_names= np.unique(y_train), filled=True, special_characters=True,rotate=False)
# graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# graph.write_png(filename)
# img = mpimg.imread(filename)
# plt.figure(figsize=(100, 200))
# plt.imshow(img,interpolation='nearest')
|
{"hexsha": "2afcf783632f51465c9e2f2290e153e614378cd7", "size": 1816, "ext": "py", "lang": "Python", "max_stars_repo_path": "Decision Tree Classification/app.py", "max_stars_repo_name": "fiend361/IBM-Machine-Learning-with-Python-Course", "max_stars_repo_head_hexsha": "e0d5e078eb79837f957a6d0c332639259992c384", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Decision Tree Classification/app.py", "max_issues_repo_name": "fiend361/IBM-Machine-Learning-with-Python-Course", "max_issues_repo_head_hexsha": "e0d5e078eb79837f957a6d0c332639259992c384", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Decision Tree Classification/app.py", "max_forks_repo_name": "fiend361/IBM-Machine-Learning-with-Python-Course", "max_forks_repo_head_hexsha": "e0d5e078eb79837f957a6d0c332639259992c384", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3188405797, "max_line_length": 171, "alphanum_fraction": 0.7533039648, "include": true, "reason": "import numpy", "num_tokens": 457}
|
from abc import ABCMeta, abstractmethod
from random import choice, random, uniform
from numpy import argmax, argmin
# Harmony Search class
# Taken from Solid library
# https://100.github.io/Solid/_modules/Solid/HarmonySearch.html
# Adapted for Python 3
# Class was not imported, as it contains python2 style prints
class HarmonySearch:
"""
Conducts harmony search
"""
__metaclass__ = ABCMeta
cur_steps = None
hms = None
hmcr = None
par = None
fw = None
memory = None
scores = None
best = None
max_steps = None
max_score = None
def __init__(self, hms, hmcr, par, fw, max_steps, max_score=None):
"""
:param hms: harmony memory size
:param hmcr: harmony memory considering rate
:param par: pitch adjustment rate
:param fw: fret width
:param max_steps: maximum number of steps to run algorithm for
:param max_score: objective function value to stop algorithm once reached
"""
if isinstance(hms, int) and hms > 0:
self.hms = hms
else:
raise TypeError('Harmony memory size must be a positive integer')
if isinstance(hmcr, float) and 0 <= hmcr <= 1:
self.hmcr = hmcr
else:
raise TypeError('Harmony memory considering rate must be a float between 0 and 1')
if isinstance(par, float) and 0 <= par <= 1:
self.par = par
else:
raise TypeError('Pitch adjustment rate must be a float between 0 and 1')
if isinstance(fw, (int, float)):
self.fw = float(fw)
else:
raise TypeError('Fret width must be a numeric type')
if isinstance(max_steps, int) and max_steps > 0:
self.max_steps = max_steps
else:
raise TypeError('Max steps must be a positive integer')
if max_score is not None:
if isinstance(max_score, (int, float)):
self.max_score = max_score
else:
raise TypeError('Max score must be a numeric type')
def __str__(self):
return ('HARMONY SEARCH: \n' +
'CURRENT STEPS: %d \n' +
'BEST SCORE: %f \n' +
'BEST MEMBER: %s \n\n') % \
(self.cur_steps, self._score(self.best), str(self.best))
def __repr__(self):
return self.__str__()
def _clear(self):
"""
Resets the variables that are altered on a per-run basis of the algorithm
:return: None
"""
self.cur_steps = 0
self.memory = list([self._random_harmony() for _ in range(self.hms)])
self.scores = None
@abstractmethod
def _random_harmony(self):
"""
Generates a random harmony, represented as a list of floats
:return: list of harmonies
"""
pass
@abstractmethod
def _score(self, harmony):
"""
Returns score of a harmony
:param harmony: a harmony
:return: score of harmony
"""
pass
def _score_all(self):
"""
Finds score of all current harmonies in memory
:return: None
"""
self.scores = [self._score(x) for x in self.memory]
def _worst_score(self):
"""
Returns index of worst harmony in memory
:return: index of worst harmony in memory
"""
return argmin(self.scores)
def _best_score(self):
"""
Returns index of best harmony in memory
:return: index of best harmony in memory
"""
return argmax(self.scores)
def run(self, verbose=True):
"""
Conducts harmony search
:param verbose: indicates whether or not to print progress regularly
:return: best state and objective function value of best state
"""
self._clear()
self._score_all()
for i in range(self.max_steps):
self.cur_steps += 1
if verbose and ((i + 1) % 100 == 0):
print(self)
self._score_all()
selected = [0.] * len(self.memory[0])
for i in range(len(selected)):
if self.hmcr >= random():
selected_component = choice(self.memory)[i]
if self.par >= random():
selected_component += uniform(-1, 1) * self.fw
else:
selected_component = self._random_harmony()[i]
selected[i] = selected_component
if self._score(selected) > self._score(self.memory[self._worst_score()]):
self.memory[self._worst_score()] = selected
self.scores[self._worst_score()] = self._score(selected)
self.best = self.memory[self._best_score()]
if self.max_score is not None and self._score(self.best) > self.max_score:
if verbose: print("TERMINATING - REACHED MAXIMUM SCORE")
return self.best, self._score(self.best)
if verbose: print("TERMINATING - REACHED MAXIMUM STEPS")
return self.best, self._score(self.best)
|
{"hexsha": "d7018461f5f1785c363cb9871a45171a5bba565a", "size": 5156, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/optimization/HarmonySearch.py", "max_stars_repo_name": "lyonva/Nue", "max_stars_repo_head_hexsha": "90680de00b0c76f6bfdbed71b785671e7c3a3f54", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/optimization/HarmonySearch.py", "max_issues_repo_name": "lyonva/Nue", "max_issues_repo_head_hexsha": "90680de00b0c76f6bfdbed71b785671e7c3a3f54", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/optimization/HarmonySearch.py", "max_forks_repo_name": "lyonva/Nue", "max_forks_repo_head_hexsha": "90680de00b0c76f6bfdbed71b785671e7c3a3f54", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6319018405, "max_line_length": 94, "alphanum_fraction": 0.5731186967, "include": true, "reason": "from numpy", "num_tokens": 1182}
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List
import numpy as np
from rl_coach.core_types import ActionType
from rl_coach.exploration_policies.exploration_policy import ExplorationPolicy, ExplorationParameters
from rl_coach.spaces import ActionSpace, DiscreteActionSpace, BoxActionSpace
class GreedyParameters(ExplorationParameters):
@property
def path(self):
return 'rl_coach.exploration_policies.greedy:Greedy'
class Greedy(ExplorationPolicy):
"""
The Greedy exploration policy is intended for both discrete and continuous action spaces.
For discrete action spaces, it always selects the action with the maximum value, as given by the agent.
For continuous action spaces, it always return the exact action, as it was given by the agent.
"""
def __init__(self, action_space: ActionSpace):
"""
:param action_space: the action space used by the environment
"""
super().__init__(action_space)
def get_action(self, action_values: List[ActionType]) -> ActionType:
if type(self.action_space) == DiscreteActionSpace:
return np.argmax(action_values)
if type(self.action_space) == BoxActionSpace:
return action_values
def get_control_param(self):
return 0
|
{"hexsha": "8abe030999e5409f36ccff8a48b245e0c639aeab", "size": 1854, "ext": "py", "lang": "Python", "max_stars_repo_path": "rl_coach/exploration_policies/greedy.py", "max_stars_repo_name": "jl45621/coach", "max_stars_repo_head_hexsha": "9a895a1ac73aff44b2e6eb8e4d01e8ec35ceb084", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-05T09:30:38.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-15T08:10:59.000Z", "max_issues_repo_path": "rl_coach/exploration_policies/greedy.py", "max_issues_repo_name": "jl45621/coach", "max_issues_repo_head_hexsha": "9a895a1ac73aff44b2e6eb8e4d01e8ec35ceb084", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rl_coach/exploration_policies/greedy.py", "max_forks_repo_name": "jl45621/coach", "max_forks_repo_head_hexsha": "9a895a1ac73aff44b2e6eb8e4d01e8ec35ceb084", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-19T06:39:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-19T06:39:18.000Z", "avg_line_length": 35.6538461538, "max_line_length": 107, "alphanum_fraction": 0.740021575, "include": true, "reason": "import numpy", "num_tokens": 403}
|
%Program for creating CSV File
%Author : Athi Narayanan S
%M.E, Embedded Systems,
%K.S.R College of Engineering
%Erode, Tamil Nadu, India.
%http://sites.google.com/site/athisnarayanan/
%Program Description
%This program generates a CSV file containing the colors in the output image.
%The CSV format is as follows
%PaletteNumber R G B
function writeCSV(out_map, OutCSVName)
s = size(out_map);
fid = fopen(OutCSVName,'w');
for i=1:s(1)
fprintf(fid,'%d,',i); %Palette No.
for j=1:s(2)
fprintf(fid,'%d,',double(out_map(i,j)));
end
fprintf(fid,'\n');
end
fclose(fid);
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/31687-color-quantization/K-Means-Color-Reduction/writeCSV.m"}
|
\documentclass{uofsthesis-cs}
% Documentation for the uofsthesis-cs class is given in uofsthesis-cs.dvi
%
% It is recommended that you read the CGSR thesis preparation
% guidelines before proceeding.
% They can be found at http://www.usask.ca/cgsr/thesis/index.htm
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% FRONTMATTER - In this section, specify information to be used to
% typeset the thesis frontmatter.
\usepackage{graphicx}
\usepackage{cite}
\usepackage{url}
\usepackage{todonotes}
\usepackage{textcomp}
\usepackage{hyperref}
\pdfminorversion=5
\pdfcompresslevel=9
\pdfobjcompresslevel=2
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% THESIS TITLE
% Specify the title. Set the capitalization how you want it.
\title{SynVisio: A Multiscale Tool to Explore Genomic Conservation}
% AUTHOR'S NAME
% Your name goes here.
\author{Venkat Kiran Bandi}
% DEGREE SOUGHT.
% Use \MSc or \PhD here
\degree{\MSc}
% THESIS DEFENCE DATE
% Should be month/year, e.g. July 2004
\defencedate{May/2020}
% NAME OF ACADEMIC UNIT
%
% The following two commands allow you to specify the academic unit you belong to.
% This will appear on the title page as
% ``<academic unit> of <department>''.
% So if you are in the division of biomedical engineering you would need to do:
% \department{Biomedical Engineering}
% \academicunit{Division}
%
% The default is ``Department of Computer Science'' if these commands
% are not given.
%
% If you are in a discipline other than Computer Science, uncomment the following line and
% specify your discipline/department. Default is 'Computer Science'.
% \department{If not Computer Science, put the name of your department here}
% If you are not in a department, but say, a division, uncomment the following line.
% \academicunit{Put the type of academic unit you belong to here, e.g. Division, College}
% PERMISSION TO USE ADDRESS
%
% If you are not in Comptuer Science you will want to change the
% address on the Permission to Use page. This is done using the
% \ptuaddress{}. Example:
%
% \ptuaddress{Head of the Department of Computer Science\\
% 176 Thorvaldson Building\\
% 110 Science Place\\
% University of Saskatchewan\\
% Saskatoon, Saskatchewan\\
% Canada\\
% S7N 5C9
% }
% ABSTRACT
\abstract{
Comparative analysis of genomes is an important area in biological research that can shed light on an organism's internal functions and evolutionary history. It involves comparing two or more genomes to identify similar regions that can indicate shared ancestry and in turn conservation of genetic information. Due to rapid advancements in sequencing systems, high-resolution genome data is readily available for a wide range of species, and comparative analysis of this data can offer crucial evolutionary insights that can be applied in plant breeding and medical research. Visualizing the location, size, and orientation of conserved regions can assist research in comparative analysis as it is a tedious process that requires extensive manual interpretation and human judgement. However, visualization tools for the analysis of conserved regions have not kept pace with the increasing availability of information and are not designed to support the diverse use cases of biological researchers. To address this we gathered feedback from experts in the field, and developed improvements for these tools through novel interaction techniques and visual representations. We then developed SynVisio, a web-based tool for exploring conserved regions at multiple resolutions (genome, chromosome, or gene), with several visual representations and interactive features, to meet the diverse needs of genome researchers. SynVisio supports multi-resolution analysis and interactive filtering as researchers move deeper into the genome. It also supports revisitation to specific interface configurations, and enables loosely-coupled collaboration over the genomic data. An evaluation of the system with five researchers from three expert groups coupled with a longitudinal study of web traffic to the system provides evidence about the success of our system's novel features for interactive exploration of genomic conservation.
}
% THESIS ACKNOWLEDGEMENTS -- This can be free-form.
\acknowledgements{
I would like to express my heartfelt gratitude to my supervisor Carl Gutwin, for constantly offering me support both academically and personally, and guiding me towards a field of research that was fantastic to explore and learn.
I am particularly grateful to Gwen Lancaster for her support when I joined the program and am also grateful to my lab mates and all the staff members of the Department of Computer Science who have helped me along the way. Also, I would like to thank the members of my thesis committee, Ian McQuillan and Debajyoti Mondal, whose comments and suggestions have greatly improved this manuscript.
Finally, I would like to thank Canada for offering me a chance at a fresh start and I acknowledge the support of my family and friends for helping me along this journey in starting a new life.
}
% THESIS DEDICATION -- Also free-form. If you don't want a dedication, comment out the following
% line.
\dedication{
\begin{center}
\textit{This thesis is dedicated to my Mom and Dad for their unconditional love and support.}
\end{center}
}
% LIST OF ABBREVIATIONS - Sample
% If you don't want a list of abbreviations, comment the following 4 lines.
\loa{
\abbrev{BLAST}{Basic Local Alignment Search Tool}
\abbrev{CNV}{Copy Number Variation}
\abbrev{CSS}{Cascading Style Sheet}
\abbrev{DNA}{Deoxyribonucleic Acid}
\abbrev{DOM}{Document Object Model}
\abbrev{FASTA}{Fast All}
\abbrev{GFF}{General Feature Format}
\abbrev{HTML}{Hypertext Markup Language}
\abbrev{mRNA}{Messenger RNA}
\abbrev{RNA}{Ribonucleic Acid}
\abbrev{SNP}{Single Nucleotide Polymorphism}
\abbrev{SVG}{Scalable Vector Graphics}
\abbrev{XSS}{Cross Site Scripting}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% END OF FRONTMATTER SECTION
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
% Typeset the title page
\maketitle
% Typeset the frontmatter.
\frontmatter
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% FIRST CHAPTER OF THESIS BEGINS HERE
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\input{root/chapter_1_introduction.tex}
\input{root/chapter_2_related_work.tex}
\input{root/chapter_3_requirement_analysis.tex}
\input{root/chapter_4_visual_design.tex}
\input{root/chapter_5_synvisio.tex}
\input{root/chapter_6_evaluation.tex}
\input{root/chapter_7_discussion.tex}
\input{root/chapter_8_conclusion.tex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% SUBSEQUENT CHAPTERS (or \input's) GO HERE
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% The Bibliograpy should go here. BEFORE appendices!
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Typeset the Bibliography. The bibliography style used is "plain".
% Optionally, you can specify the bibliography style to use:
% \uofsbibliography[stylename]{yourbibfile}
\uofsbibliography{reference.bib}
% If you are not using bibtex, comment the line above and uncomment
% the line below.
%Follow the line below with a thebibliography environmentand bibitems.
% Note: use of bibtex is usually the preferred method.
%\uofsbibliographynobibtex
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% APPENDICES
%
% Any chapters appearing after the \appendix command get numbered with
% capital letters starting with appendix 'A'.
% New chapters from here on will be called 'Appendix A', 'Appendix B'
% as opposed to 'Chapter 1', 'Chapter 2', etc.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Activate thesis appendix mode.
\uofsappendix
% Put appendix chapters in the appendices environment so that they appear correcty
% in the table of contents. You can use \input's here as well.
\begin{appendices}
\chapter{Exploring Conservation in Wheat}
To demonstrate SynVisio lets walk through the process of exploring conservation in Wheat. The data has been preloaded and can be accessed at \url{https://synvisio.usask.ca/#/Dashboard/ta_cs}.
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_1.PNG}
\captionof{figure}{Select analysis mode}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_2.PNG}
\captionof{figure}{Select default dashboard or an individual plot type}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_3.PNG}
\captionof{figure}{Select track type for supplementary datasets}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_4.PNG}
\captionof{figure}{Select source and target chromosomes which in this case belong to two sub genomes of wheat (A and B donors)}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_5.PNG}
\captionof{figure}{Composite analysis dashboard showing conservation between two sub genomes of wheat (A and B donors) }
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_6.PNG}
\captionof{figure}{Toggle track visibility}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_7.PNG}
\captionof{figure}{Filter conserved regions by gene count}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_8.PNG}
\captionof{figure}{Select multi genome analysis and tree view}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_9.PNG}
\captionof{figure}{Select chromosomes in each of the sub genomes of wheat.}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_10.PNG}
\captionof{figure}{Tree view for multi genome analysis showing conservation between the three sub genomes of wheat (A, B, and D donors)}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_11.PNG}
\captionof{figure}{Select multi genome analysis and hive view, then turn on normalized scales and chromosome labels}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_12.PNG}
\captionof{figure}{Hive view showing conservation between the three sub genomes of wheat (A, B, and D donors) }
\end{figure}
\begin{figure}[h]
\includegraphics[width=\textwidth]{images/appendix/step_13.PNG}
\captionof{figure}{Highlight conserved regions emerging from each sub genome by clicking on the corresponding marker for that genome.}
\end{figure}
\end{appendices}
\end{document}
|
{"hexsha": "32e4d9a48f2e86268bcb3d16341b2238034bc23b", "size": 11009, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "main.tex", "max_stars_repo_name": "kiranbandi/synvisio-thesis", "max_stars_repo_head_hexsha": "99a85c6c081c9c628a655eb9a9d841059f6f3e3b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.tex", "max_issues_repo_name": "kiranbandi/synvisio-thesis", "max_issues_repo_head_hexsha": "99a85c6c081c9c628a655eb9a9d841059f6f3e3b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.tex", "max_forks_repo_name": "kiranbandi/synvisio-thesis", "max_forks_repo_head_hexsha": "99a85c6c081c9c628a655eb9a9d841059f6f3e3b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.00390625, "max_line_length": 1919, "alphanum_fraction": 0.7171405214, "num_tokens": 2521}
|
import numpy as np
import matplotlib.pyplot as plt
import sys
def extract_significant_data(input_file):
raw_table = np.genfromtxt(input_file, dtype=None, delimiter="\t",
encoding="UTF-8", usecols=(0, 3, 5))
polished_table = []
# print(raw_table)
for row in raw_table:
# print(type(row[1]))
if (row[0] > 1.0) and (row[1] == "P") or row[2] == "unclassified":
polished_table.append([row[2].strip(), row[0]])
return polished_table
def plot(data, plot_name, plot_title, reads_num):
width = 0.35
bins = [row[0] for row in data]
freq = [row[1] for row in data]
plot = plt.bar(bins, freq, width)
xlocs, _ = plt.xticks()
for rect in plot:
height = rect.get_height()
plt.text(rect.get_x()+rect.get_width()/2., height + 0.1,
f"{height:.2f}%", ha='center', va='bottom')
plt.text(0.17, 85, f"N. of reads: {reads_num}", horizontalalignment='center',
verticalalignment='center')
plt.xlabel("Phylum", fontsize=14)
plt.ylabel("Frequency (%)", fontsize=14)
plt.title(plot_title, fontsize=18)
plt.yticks(np.arange(0, 100, 10), fontsize=12)
plt.xticks(rotation=35, fontsize=10)
plt.tight_layout
plt.subplots_adjust(bottom=0.22)
plt.savefig(plot_name, dpi=300)
plt.clf()
def main():
"""Usage: This scripts requires 3 arguments:
1. Path to Kraken2's report file;
2. Path to output (plot);
3. Title of the plot;
4. Reads num;"""
input_file = sys.argv[1]
plot_name = sys.argv[2]
plot_title = sys.argv[3]
reads_num = sys.argv[4]
print("Processing file:", input_file)
data = extract_significant_data(input_file)
print(data)
plot(data, plot_name, plot_title, reads_num)
main()
|
{"hexsha": "93d42295cb9c9629e222b69ecfed88e1e6de7a61", "size": 1815, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_processing/kraken/plot_single_sample_kraken_data.py", "max_stars_repo_name": "robymetallo/lferriphilum", "max_stars_repo_head_hexsha": "ab72a4e11bfd9b8947d1f2b2bb5fe1b852d313eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_processing/kraken/plot_single_sample_kraken_data.py", "max_issues_repo_name": "robymetallo/lferriphilum", "max_issues_repo_head_hexsha": "ab72a4e11bfd9b8947d1f2b2bb5fe1b852d313eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_processing/kraken/plot_single_sample_kraken_data.py", "max_forks_repo_name": "robymetallo/lferriphilum", "max_forks_repo_head_hexsha": "ab72a4e11bfd9b8947d1f2b2bb5fe1b852d313eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8095238095, "max_line_length": 81, "alphanum_fraction": 0.6132231405, "include": true, "reason": "import numpy", "num_tokens": 506}
|
""" A very simple FCFF NN intended to be used for comparing tensorflow to other
libraries. """
import tensorflow as tf
import numpy as np
class FeedforwardNetwork(object):
""" A simple, fully-connected feedforward neural network. """
def __init__(self, layers, outputs):
"""
Args:
layers: A list of ints denoting the number of inputs for each layer. It
is assumed that the outputs of one layer will be the same size as the
inputs of the next one.
outputs: The number of outputs of the network. """
self.__build_model(layers, outputs)
def __initialize_weights(self, layers, outputs):
""" Initializes tensors containing the weights for each layer.
Args:
layers: A list denoting the number of inputs of each layer.
outputs: The number of outputs of the network. """
self.__weights = []
# This is in case we have a single hidden layer.
fan_out = layers[0]
for i in range(0, len(layers) - 1):
fan_in = layers[i]
fan_out = layers[i + 1]
# Initialize weights randomly.
weights = tf.Variable(tf.random_normal(shape=[fan_in, fan_out], stddev=1))
self.__weights.append(weights)
# Include outputs also.
self.__weights.append(tf.Variable(tf.random_normal([fan_out, outputs])))
def __add_layers(self, first_inputs):
""" Adds as many hidden layers to our model as there are elements in
__weights.
Args:
first_inputs: The tensor to use as inputs to the first hidden layer. """
# Outputs from the previous layer that get used as inputs for the next
# layer.
next_inputs = first_inputs
for i in range(0, len(self.__weights)):
weights = self.__weights[i]
num_outputs = weights.get_shape()[1]
bias = tf.Variable(tf.constant(0.1, shape=[num_outputs]))
sums = tf.matmul(next_inputs, weights) + bias
if i < len(self.__weights) - 1:
next_inputs = tf.nn.relu(sums)
else:
# For the last layer, we don't use an activation function.
next_inputs = sums
self._layer_stack = next_inputs
def __build_model(self, layers, outputs):
""" Actually constructs the graph for this model.
Args:
layers: A list denoting the number of inputs of each layer.
outputs: The number of outputs of the network. """
# Initialize all the weights first.
self.__initialize_weights(layers, outputs)
# Inputs and outputs.
num_inputs = layers[0]
self._inputs = tf.placeholder("float", [None, num_inputs])
self._expected_outputs = tf.placeholder("float", [None, outputs])
# Build actual layer model.
self.__add_layers(self._inputs)
# Cost function.
cost = tf.reduce_mean( \
tf.nn.softmax_cross_entropy_with_logits(self._layer_stack,
self._expected_outputs))
# SGD optimizer.
self._optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(cost)
# Does an actual prediction.
self._prediction_operation = tf.argmax(self._layer_stack, 1)
def _extend_with_feedforward(self, inputs, layers, outputs):
""" Meant to be used by subclasses as a simple way to extend the graph of a
feedforward network. You pass in the inputs you want to use to create the
feedforward network, and it constructs the network around that.
Args:
inputs: The inputs to use for the feedforward network.
layers: A list denoting the number of inputs of each layer.
outputs: The number of outputs of the network. """
self.__initialize_weights(layers, outputs)
self.__add_layers(inputs)
def expected_outputs(self):
""" Set the value of the outputs we expect for this cycle. This should be
used to get one of the key values for the feed_dict argument of
Session.run().
Returns:
The key value for feed_dict for the expected outputs. """
return self._expected_outputs
def inputs(self):
""" Set the value of the outputs we expect for this cycle. This should be
used to get the other key value for the feed_dict argument of Session.run().
Returns:
The key value for feed_dict for the network inputs. """
return self._inputs
def predict(self):
""" Runs an actual prediction step for the network. It is intended that
the result here get passed as the target of Session.run().
Returns:
The prediction operation. """
return self._prediction_operation
def train(self):
""" Runs an SGD training step for the network. It is intended that the
result here get passed as the target of Session.run().
Returns:
The training operation. """
return self._optimizer
|
{"hexsha": "31172cf09f43f6af000d5045b016f6e720d20257", "size": 4670, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/simple_feedforward.py", "max_stars_repo_name": "djpetti/rpinets", "max_stars_repo_head_hexsha": "8b6ebc969f3c75a0d0f5b414ed7faa7b65754892", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensorflow/simple_feedforward.py", "max_issues_repo_name": "djpetti/rpinets", "max_issues_repo_head_hexsha": "8b6ebc969f3c75a0d0f5b414ed7faa7b65754892", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow/simple_feedforward.py", "max_forks_repo_name": "djpetti/rpinets", "max_forks_repo_head_hexsha": "8b6ebc969f3c75a0d0f5b414ed7faa7b65754892", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7716535433, "max_line_length": 80, "alphanum_fraction": 0.6845824411, "include": true, "reason": "import numpy", "num_tokens": 1054}
|
__precompile__(true)
module KernelDensityEstimate
using Gadfly, Colors, Cairo, Fontconfig
import Base: promote_rule, *, rand
export
kde!,
getPoints,
getBW,
root,
Npts,
Ndim,
getWeights,
marginal,
sample,
rand,
resample,
evaluateDualTree,
BallTree,
BallTreeDensity,
getKDERange,
getKDEMax,
getKDEMean,
# approximate intersection volume
intersIntgAppxIS,
# product operation with Multiscale Gibbs sampling
prodAppxMSGibbsS,
# Gadfly plotting functions
plotKDE,
stackMarginals,
vstackedPlots,
drawHorDens,
toggleYTicks,
# add * operator for kde product approximate
*
include("BallTree01.jl")
include("BallTreeDensity01.jl")
include("DualTree01.jl")
include("KDE01.jl")
include("KDEPlotting01.jl")
include("MSGibbs01.jl")
"""
A Julia package for Kernel Density Estimation and approximations of their products
"""
end
|
{"hexsha": "65fe673d28aa72a5019507c4a4106d33f580e2b0", "size": 942, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/KernelDensityEstimate.jl", "max_stars_repo_name": "tkelman/KernelDensityEstimate.jl", "max_stars_repo_head_hexsha": "c44bdd9dcae8aa07a24f6f885c2ec8787a1dae10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/KernelDensityEstimate.jl", "max_issues_repo_name": "tkelman/KernelDensityEstimate.jl", "max_issues_repo_head_hexsha": "c44bdd9dcae8aa07a24f6f885c2ec8787a1dae10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/KernelDensityEstimate.jl", "max_forks_repo_name": "tkelman/KernelDensityEstimate.jl", "max_forks_repo_head_hexsha": "c44bdd9dcae8aa07a24f6f885c2ec8787a1dae10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.2413793103, "max_line_length": 82, "alphanum_fraction": 0.6942675159, "num_tokens": 259}
|
# Copyright 2019 Prashant Singh, Fredrik Wrede and Andreas Hellander
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test suit for stochmet (model exploration)
"""
import numpy as np
from sciope.utilities.summarystats import auto_tsfresh
from sciope.utilities.priors import uniform_prior
from sciope.stochmet import stochmet
from gillespy2.solvers.numpy import NumPySSASolver
from sklearn.svm import SVC
from dask.distributed import Client
import gillespy2
import pytest
class ToggleSwitch(gillespy2.Model):
""" Gardner et al. Nature (1999)
'Construction of a genetic toggle switch in Escherichia coli'
"""
def __init__(self, parameter_values=None):
# Initialize the model.
gillespy2.Model.__init__(self, name="toggle_switch")
# Parameters
alpha1 = gillespy2.Parameter(name='alpha1', expression=1)
alpha2 = gillespy2.Parameter(name='alpha2', expression=1)
beta = gillespy2.Parameter(name='beta', expression="2.0")
gamma = gillespy2.Parameter(name='gamma', expression="2.0")
mu = gillespy2.Parameter(name='mu', expression=1.0)
self.add_parameter([alpha1, alpha2, beta, gamma, mu])
# Species
U = gillespy2.Species(name='U', initial_value=10)
V = gillespy2.Species(name='V', initial_value=10)
self.add_species([U, V])
# Reactions
cu = gillespy2.Reaction(name="r1", reactants={}, products={U: 1},
propensity_function="alpha1/(1+pow(V,beta))")
cv = gillespy2.Reaction(name="r2", reactants={}, products={V: 1},
propensity_function="alpha2/(1+pow(U,gamma))")
du = gillespy2.Reaction(name="r3", reactants={U: 1}, products={},
rate=mu)
dv = gillespy2.Reaction(name="r4", reactants={V: 1}, products={},
rate=mu)
self.add_reaction([cu, cv, du, dv])
self.timespan(np.linspace(0, 50, 101))
toggle_model = ToggleSwitch()
# Define simulator function
def set_model_parameters(params, model):
""" params - array, needs to have the same order as
model.listOfParameters """
for e, (pname, p) in enumerate(model.listOfParameters.items()):
model.get_parameter(pname).set_expression(params[e])
return model
# Here we use gillespy2 numpy solver, so performance will
# be quite slow for this model
def simulator(params, model):
model_update = set_model_parameters(params, model)
num_trajectories = 1 # TODO: howto handle ensembles
res = model_update.run(solver=NumPySSASolver, show_labels=False,
number_of_trajectories=num_trajectories)
tot_res = np.asarray([x.T for x in res]) # reshape to (N, S, T)
tot_res = tot_res[:, 1:, :] # should not contain timepoints
return tot_res
def simulator2(x):
return simulator(x, model=toggle_model)
# Set up the prior
default_param = np.array(list(toggle_model.listOfParameters.items()))[:, 1]
bound = []
for exp in default_param:
bound.append(float(exp.expression))
true_params = np.array(bound)
dmin = true_params * 0.5
dmax = true_params * 2.0
uni_prior = uniform_prior.UniformPrior(dmin, dmax)
default_fc_params = {'mean': None,
'variance': None,
'skewness': None,
'agg_autocorrelation':
[{'f_agg': 'mean', 'maxlag': 5},
{'f_agg': 'median', 'maxlag': 5},
{'f_agg': 'var', 'maxlag': 5}]}
summaries = auto_tsfresh.SummariesTSFRESH(features=default_fc_params)
def test_stochmet_toggleswitch_10points():
# multi-processing mode
met = stochmet.StochMET(sim=simulator2, sampler=uni_prior, summarystats=summaries)
met.compute(n_points=10, chunk_size=2)
np.testing.assert_equal(met.data.s.shape, (10, 1, 12))
np.testing.assert_equal(met.data.ts.shape, (10, 1, 2, 101))
np.testing.assert_equal(met.data.x.shape, (10, 5))
np.testing.assert_equal(met.data.user_labels.shape, (10,))
# cluster-mode
c = Client()
met.compute(n_points=10, chunk_size=2)
np.testing.assert_equal(met.data.s.shape, (20, 1, 12))
np.testing.assert_equal(met.data.ts.shape, (20, 1, 2, 101))
np.testing.assert_equal(met.data.x.shape, (20, 5))
np.testing.assert_equal(met.data.user_labels.shape, (20,))
c.close()
def test_stochmet_toggleswitch_100points():
# multi-processing mode
met = stochmet.StochMET(sim=simulator2, sampler=uni_prior, summarystats=summaries)
met.compute(n_points=100, chunk_size=2)
np.testing.assert_equal(met.data.s.shape, (100, 1, 12))
np.testing.assert_equal(met.data.ts.shape, (100, 1, 2, 101))
np.testing.assert_equal(met.data.x.shape, (100, 5))
np.testing.assert_equal(met.data.user_labels.shape, (100,))
# cluster-mode
c = Client()
met.compute(n_points=100, chunk_size=2)
np.testing.assert_equal(met.data.s.shape, (200, 1, 12))
np.testing.assert_equal(met.data.ts.shape, (200, 1, 2, 101))
np.testing.assert_equal(met.data.x.shape, (200, 5))
np.testing.assert_equal(met.data.user_labels.shape, (200,))
c.close()
def test_stochmet_with_prediction():
uni_prior = uniform_prior.UniformPrior(dmin, true_params * 0.6)
met = stochmet.StochMET(sim=simulator2, sampler=uni_prior, summarystats=summaries)
met.compute(n_points=50, chunk_size=2)
x_0 = met.data.s.reshape((50, 12))
y_0 = np.zeros(50)
uni_prior = uniform_prior.UniformPrior(true_params * 1.5, dmax)
met = stochmet.StochMET(sim=simulator2, sampler=uni_prior, summarystats=summaries)
met.compute(n_points=50, chunk_size=2)
x_1 = met.data.s.reshape((50, 12))
y_1 = np.ones(50)
X = np.vstack((x_0, x_1))
y = np.hstack((y_0, y_1))
clf = SVC()
clf.fit(X, y)
def predictor(x):
return clf.predict(x)
# multi-processing mode
uni_prior = uniform_prior.UniformPrior(dmin, dmax)
met = stochmet.StochMET(sim=simulator2, sampler=uni_prior, summarystats=summaries)
met.compute(n_points=10, chunk_size=2, predictor=predictor)
np.testing.assert_equal(met.data.s.shape, (10, 1, 12))
np.testing.assert_equal(met.data.ts.shape, (10, 1, 2, 101))
np.testing.assert_equal(met.data.x.shape, (10, 5))
np.testing.assert_equal(met.data.user_labels.shape, (10,))
np.testing.assert_equal(met.data.y.shape, (10, 1))
# cluster-mode
c = Client()
met.compute(n_points=10, chunk_size=2, predictor=predictor)
np.testing.assert_equal(met.data.s.shape, (20, 1, 12))
np.testing.assert_equal(met.data.ts.shape, (20, 1, 2, 101))
np.testing.assert_equal(met.data.x.shape, (20, 5))
np.testing.assert_equal(met.data.user_labels.shape, (20,))
np.testing.assert_equal(met.data.y.shape, (20, 1))
c.close()
|
{"hexsha": "5d6af6cd0d94a697378f227de5e8ce0495c2a21b", "size": 7364, "ext": "py", "lang": "Python", "max_stars_repo_path": "sciope/tests/test_stochmet.py", "max_stars_repo_name": "sciope/sciope", "max_stars_repo_head_hexsha": "4da87ef9a1e1a5561286ce3eaffdb51183bf5c94", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-05-21T18:56:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-02T20:09:43.000Z", "max_issues_repo_path": "sciope/tests/test_stochmet.py", "max_issues_repo_name": "vishalbelsare/sciope", "max_issues_repo_head_hexsha": "c3dfd7e5690bcc745d48989556fb29a9a64e0b11", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-10-16T08:11:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T09:35:46.000Z", "max_forks_repo_path": "sciope/tests/test_stochmet.py", "max_forks_repo_name": "sciope/sciope", "max_forks_repo_head_hexsha": "4da87ef9a1e1a5561286ce3eaffdb51183bf5c94", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-05-23T09:09:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-02T20:09:45.000Z", "avg_line_length": 34.5727699531, "max_line_length": 86, "alphanum_fraction": 0.6687941336, "include": true, "reason": "import numpy", "num_tokens": 2009}
|
"""Correlation inferencer."""
import logging
import numpy as np
import sys
import scipy.sparse as ss
from scipy.special import betainc
from ..collections.graph import Graph
from .network_inferencer import NetworkInferencer
from ..utils.stats import CORRECTIONS_SIGNIFICANCE
logger = logging.getLogger(__name__.split('.')[-1])
correlation_preprocess = {
'pearson': lambda x: x.values.T,
'spearman': lambda x: x.rank().values.T
}
class Correlation(NetworkInferencer):
"""
Correlation inferencer.
Attributes:
method (str): correlation method.
correction (str): correction method.
confidence_threshold (float): confidence threshold.
"""
method = None
def __init__(
self,
method=method,
correction=None,
confidence_threshold=0.05,
**kwargs
):
"""
Initialize correlation inferencer.
Args:
method (str, optional): correlation method. Defaults to method.
correction (str, optional): correction method. Defaults to None.
confidence_threshold (float, optional): confidence threshold.
Defaults to 0.05.
"""
self.method = method
self.correction = correction
self.confidence_threshold = confidence_threshold
super().__init__(**kwargs)
def _infer_network(self, data):
"""
Infer the network.
Args:
data (pd.DataFrame): data to be used for the inference.
"""
logger.debug('inferring with {} correlation'.format(self.method))
entities = data.columns
# compute correlations
pre_processed = correlation_preprocess[self.method](data)
rho = np.corrcoef(pre_processed)
n = rho.shape[0]
logger.debug('computed correlation')
# compute corrections mask
if self.correction in CORRECTIONS_SIGNIFICANCE:
triu_indices = np.triu_indices(n, 1)
rhof = rho[triu_indices]
dof = pre_processed.shape[1] - 2
ts = rhof * rhof * (
dof / (1 - rhof * rhof + sys.float_info.epsilon)
)
pf = betainc(0.5 * dof, 0.5, dof / (dof + ts))
significants = CORRECTIONS_SIGNIFICANCE[
self.correction](pf, self.confidence_threshold)
mask = ss.lil_matrix(rho.shape, dtype=np.int8)
mask[triu_indices] = significants
mask += (mask.T + ss.eye(n, dtype=np.uint8))
rho = mask.multiply(rho)
self.graph = Graph(adjacency=rho, labels=entities.values)
logger.debug('inferred with {} correlation'.format(self.method))
def __str__(self):
"""
Get the name of the inferencer.
Returns:
str: name of the inferencer.
"""
return self.method
|
{"hexsha": "436dc4420ae48bf62ab9896620ce81d4fba8e7c8", "size": 2853, "ext": "py", "lang": "Python", "max_stars_repo_path": "cosifer/inferencers/correlation.py", "max_stars_repo_name": "C-nit/cosifer", "max_stars_repo_head_hexsha": "550b3ee1055bf1ceb8883ee8736c8d538ceb6ee4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-01-17T17:29:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T09:53:50.000Z", "max_issues_repo_path": "cosifer/inferencers/correlation.py", "max_issues_repo_name": "C-nit/cosifer", "max_issues_repo_head_hexsha": "550b3ee1055bf1ceb8883ee8736c8d538ceb6ee4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-10-19T14:28:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-14T18:20:46.000Z", "max_forks_repo_path": "cosifer/inferencers/correlation.py", "max_forks_repo_name": "C-nit/cosifer", "max_forks_repo_head_hexsha": "550b3ee1055bf1ceb8883ee8736c8d538ceb6ee4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-11-02T15:42:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-24T12:37:34.000Z", "avg_line_length": 31.0108695652, "max_line_length": 76, "alphanum_fraction": 0.6084822993, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 628}
|
const RectilinearPointLoad{dim, T, N, M} = Union{PointLoadCantilever{dim, T, N, M}, HalfMBB{dim, T, N, M}, LBeam{T, N, M}}
@params struct ElementMatrix{T, TM <: AbstractMatrix{T}} <: AbstractMatrix{T}
matrix::TM
mask
meandiag::T
end
ElementMatrix(matrix, mask) = ElementMatrix(matrix, mask, sumdiag(matrix)/size(matrix, 1))
const StaticMatrices{m, T} = Union{StaticMatrix{m, m, T}, Symmetric{T, <:StaticMatrix{m, m, T}}}
@generated function sumdiag(K::StaticMatrices{m,T}) where {m,T}
return reduce((ex1,ex2) -> :($ex1 + $ex2), [:(K[$j,$j]) for j in 1:m])
end
Base.size(m::ElementMatrix) = size(m.matrix)
Base.getindex(m::ElementMatrix, i...) = m.matrix[i...]
rawmatrix(m::ElementMatrix) = m.matrix
rawmatrix(m::Symmetric{T, <:ElementMatrix{T}}) where {T} = Symmetric(m.data.matrix)
@generated function bcmatrix(m::ElementMatrix{T, TM}) where {dim, T, TM <: StaticMatrix{dim, dim, T}}
expr = Expr(:tuple)
for j in 1:dim, i in 1:dim
push!(expr.args, :(ifelse(m.mask[$i] && m.mask[$j], m.matrix[$i,$j], zero(T))))
end
return :($(Expr(:meta, :inline)); $TM($expr))
end
@generated function bcmatrix(m::Symmetric{T, <:ElementMatrix{T, TM}}) where {dim, T, TM <: StaticMatrix{dim, dim, T}}
expr = Expr(:tuple)
for j in 1:dim, i in 1:dim
push!(expr.args, :(ifelse(m.data.mask[$i] && m.data.mask[$j], m.data.matrix[$i,$j], zero(T))))
end
return :($(Expr(:meta, :inline)); Symmetric($TM($expr)))
end
@params struct ElementFEAInfo{dim, T}
Kes::AbstractVector{<:AbstractMatrix{T}}
fes::AbstractVector{<:AbstractVector{T}}
fixedload::AbstractVector{T}
cellvolumes::AbstractVector{T}
cellvalues::CellValues{dim, T}
facevalues::FaceValues{<:Any, T}
metadata::Metadata
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
cells
end
function ElementFEAInfo(sp, quad_order=2, ::Type{Val{mat_type}}=Val{:Static}) where {mat_type}
Kes, weights, dloads, cellvalues, facevalues = make_Kes_and_fes(sp, quad_order, Val{mat_type})
element_Kes = make_element_Kes(Kes, sp.ch.prescribed_dofs, sp.metadata.dof_cells)
fixedload = Vector(make_cload(sp))
assemble_f!(fixedload, sp, dloads)
cellvolumes = get_cell_volumes(sp, cellvalues)
cells = sp.ch.dh.grid.cells
ElementFEAInfo(element_Kes, weights, fixedload, cellvolumes, cellvalues, facevalues, sp.metadata, sp.black, sp.white, sp.varind, cells)
end
function make_element_Kes(Kes::AbstractVector{TMorSymm}, bc_dofs, dof_cells) where {N, T, TM <: StaticMatrix{N, N, T}, TMorSymm <: Union{TM, Symmetric{T, TM}}}
fill_matrix = zero(TM)
fill_mask = ones(SVector{N, Bool})
if TMorSymm <: Symmetric
element_Kes = fill(Symmetric(ElementMatrix(fill_matrix, fill_mask)), length(Kes))
else
element_Kes = fill(ElementMatrix(fill_matrix, fill_mask), length(Kes))
end
for i in bc_dofs
d_cells = dof_cells[i]
for c in d_cells
(cellid, localdof) = c
if TMorSymm <: Symmetric
Ke = element_Kes[cellid].data
else
Ke = element_Kes[cellid]
end
new_Ke = @set Ke.mask[localdof] = false
element_Kes[cellid] = Symmetric(new_Ke)
end
end
for e in 1:length(element_Kes)
if eltype(element_Kes) <: Symmetric
Ke = element_Kes[e].data
matrix = Kes[e].data
Ke = @set Ke.matrix = matrix
element_Kes[e] = Symmetric(@set Ke.meandiag = sumdiag(Ke.matrix))
else
Ke = element_Kes[e]
matrix = Kes[e]
Ke = @set Ke.matrix = matrix
element_Kes[e] = @set Ke.meandiag = sumdiag(Ke.matrix)
end
end
element_Kes
end
function make_element_Kes(Kes::AbstractVector{TM}, bc_dofs, dof_cells) where {T, TM <: AbstractMatrix{T}, TMorSymm <: Union{TM, Symmetric{T, TM}}}
N = size(Kes[1], 1)
fill_matrix = zero(TM)
fill_mask = ones(Bool, N)
if TM <: Symmetric
element_Kes = [Symmetric(deepcopy(ElementMatrix(fill_matrix, fill_mask))) for i in 1:length(Kes)]
else
element_Kes = [deepcopy(ElementMatrix(fill_matrix, fill_mask)) for i in 1:length(Kes)]
end
for i in bc_dofs
d_cells = dof_cells[i]
for c in d_cells
(cellid, localdof) = c
if TM <: Symmetric
Ke = element_Kes[cellid].data
else
Ke = element_Kes[cellid]
end
Ke.mask[localdof] = false
end
end
element_Kes
end
function get_cell_volumes(sp::StiffnessTopOptProblem{dim, T}, cellvalues) where {dim, T}
dh = sp.ch.dh
cellvolumes = zeros(T, getncells(dh.grid))
for (i, cell) in enumerate(CellIterator(dh))
reinit!(cellvalues, cell)
cellvolumes[i] = sum(JuAFEM.getdetJdV(cellvalues, q_point) for q_point in 1:JuAFEM.getnquadpoints(cellvalues))
end
return cellvolumes
end
mutable struct GlobalFEAInfo{T, TK<:AbstractMatrix{T}, Tf<:AbstractVector{T}}
K::TK
f::Tf
end
GlobalFEAInfo(::Type{T}) where T = GlobalFEAInfo{T}()
GlobalFEAInfo() = GlobalFEAInfo{Float64}()
GlobalFEAInfo{T}() where T = GlobalFEAInfo{T, SparseMatrixCSC{T, Int}, Vector{T}}(sparse(zeros(T, 0, 0)), zeros(T, 0))
GlobalFEAInfo(sp::StiffnessTopOptProblem) = GlobalFEAInfo(make_empty_K(sp), make_empty_f(sp))
make_empty_K(sp::StiffnessTopOptProblem) = Symmetric(create_sparsity_pattern(sp.ch.dh))
make_empty_f(sp::StiffnessTopOptProblem{dim, T}) where {dim, T} = zeros(T, ndofs(sp.ch.dh))
function make_Kes_and_fes(problem, quad_order=2)
make_Kes_and_fes(problem, quad_order, Val{:Static})
end
function make_Kes_and_fes(problem, ::Type{Val{mat_type}}) where mat_type
make_Kes_and_fes(problem, 2, Val{mat_type})
end
function make_Kes_and_fes(problem, quad_order, ::Type{Val{mat_type}}) where {mat_type}
T = floattype(problem)
dim = getdim(problem)
geom_order = getgeomorder(problem)
dh = getdh(problem)
E = getE(problem)
ν = getν(problem)
ρ = getdensity(problem)
refshape = JuAFEM.getrefshape(dh.field_interpolations[1])
λ = E*ν / ((1 + ν) * (1 - 2*ν))
μ = E / (2*(1 + ν))
δ(i,j) = i == j ? T(1) : T(0)
g(i,j,k,l) = λ*δ(i,j)*δ(k,l) + μ*(δ(i,k)*δ(j,l) + δ(i,l)*δ(j,k))
C = SymmetricTensor{4, dim}(g)
# Shape functions and quadrature rule
interpolation_space = Lagrange{dim, refshape, geom_order}()
quadrature_rule = QuadratureRule{dim, refshape}(quad_order)
cellvalues = CellScalarValues(quadrature_rule, interpolation_space)
facevalues = FaceScalarValues(QuadratureRule{dim-1, refshape}(quad_order), interpolation_space)
# Calculate element stiffness matrices
n_basefuncs = getnbasefunctions(cellvalues)
Kes, weights = _make_Kes_and_weights(dh, Val{mat_type}, Val{n_basefuncs}, Val{dim*n_basefuncs}, C, ρ, quadrature_rule, cellvalues)
dloads = _make_dloads(weights, problem, facevalues)
return Kes, weights, dloads, cellvalues, facevalues
end
const g = [0., 9.81, 0.] # N/kg or m/s^2
function _make_Kes_and_weights(dh::DofHandler{dim, N, T}, ::Type{Val{mat_type}}, ::Type{Val{n_basefuncs}}, ::Type{Val{ndofs_per_cell}}, C, ρ, quadrature_rule, cellvalues) where {dim, N, T, mat_type, n_basefuncs, ndofs_per_cell}
# Calculate element stiffness matrices
Kesize = ndofs_per_cell
nel = getncells(dh.grid)
body_force = ρ .* g # Force per unit volume
if !(T === BigFloat)
if mat_type === :Static || mat_type === :SMatrix
MatrixType = SMatrix{Kesize, Kesize, T, Kesize^2}
VectorType = SVector{Kesize, T}
elseif mat_type === :MMatrix
MatrixType = MMatrix{Kesize, Kesize, T, Kesize^2}
VectorType = MVector{Kesize, T}
else
MatrixType = Matrix{T}
VectorType = Vector{T}
end
else
if mat_type === :Static || mat_type === :SMatrix || mat_type === :MMatrix
MatrixType = SizedMatrix{Kesize, Kesize, T, Kesize^2}
VectorType = SizedVector{Kesize, T}
else
MatrixType = Matrix{T}
VectorType = Vector{T}
end
end
if MatrixType <: StaticArray
Kes = Symmetric{T, MatrixType}[]
sizehint!(Kes, nel)
weights = [zeros(VectorType) for i in 1:nel]
Ke_e = zeros(T, dim, dim)
fe = zeros(T, Kesize)
Ke_0 = Matrix{T}(undef, Kesize, Kesize)
celliterator = CellIterator(dh)
for (k, cell) in enumerate(celliterator)
Ke_0 .= 0
reinit!(cellvalues, cell)
fe = weights[k]
for q_point in 1:getnquadpoints(cellvalues)
dΩ = getdetJdV(cellvalues, q_point)
for b in 1:n_basefuncs
∇ϕb = shape_gradient(cellvalues, q_point, b)
ϕb = shape_value(cellvalues, q_point, b)
for d2 in 1:dim
fe = @set fe[(b-1)*dim + d2] += ϕb * body_force[d2] * dΩ
for a in 1:n_basefuncs
∇ϕa = shape_gradient(cellvalues, q_point, a)
Ke_e .= dotdot(∇ϕa, C, ∇ϕb) * dΩ
for d1 in 1:dim
#if dim*(b-1) + d2 >= dim*(a-1) + d1
Ke_0[dim*(a-1) + d1, dim*(b-1) + d2] += Ke_e[d1,d2]
#end
end
end
end
end
end
weights[k] = fe
if MatrixType <: SizedMatrix # Work around because full constructor errors
push!(Kes, Symmetric(SizedMatrix{Kesize,Kesize,T}(Ke_0)))
else
push!(Kes, Symmetric(MatrixType(Ke_0)))
end
end
else
Kes = let Kesize=Kesize, nel=nel
[Symmetric(zeros(T, Kesize, Kesize), :U) for i = 1:nel]
end
weights = let Kesize=Kesize, nel=nel
[zeros(T, Kesize) for i = 1:nel]
end
Ke_e = zeros(T, dim, dim)
celliterator = CellIterator(dh)
for (k, cell) in enumerate(celliterator)
reinit!(cellvalues, cell)
fe = weights[k]
for q_point in 1:getnquadpoints(cellvalues)
dΩ = getdetJdV(cellvalues, q_point)
for b in 1:n_basefuncs
∇ϕb = shape_gradient(cellvalues, q_point, b)
ϕb = shape_value(cellvalues, q_point, b)
for d2 in 1:dim
fe[(b-1)*dim + d2] += ϕb * body_force[d2] * dΩ
for a in 1:n_basefuncs
∇ϕa = shape_gradient(cellvalues, q_point, a)
Ke_e .= dotdot(∇ϕa, C, ∇ϕb) * dΩ
for d1 in 1:dim
#if dim*(b-1) + d2 >= dim*(a-1) + d1
Kes[k].data[dim*(a-1) + d1, dim*(b-1) + d2] += Ke_e[d1,d2]
#end
end
end
end
end
end
end
end
return Kes, weights
end
function _make_dloads(fes, problem, facevalues)
dim = getdim(problem)
N = nnodespercell(problem)
T = floattype(problem)
dloads = deepcopy(fes)
for i in 1:length(dloads)
if eltype(dloads) <: SArray
dloads[i] = zero(eltype(dloads))
else
dloads[i] .= 0
end
end
pressuredict = getpressuredict(problem)
dh = getdh(problem)
grid = dh.grid
boundary_matrix = grid.boundary_matrix
cell_coords = zeros(JuAFEM.Vec{dim, T}, N)
n_basefuncs = getnbasefunctions(facevalues)
for k in keys(pressuredict)
t = -pressuredict[k] # traction = negative the pressure
faceset = getfacesets(problem)[k]
for (cellid, faceid) in faceset
boundary_matrix[faceid, cellid] || throw("Face $((cellid, faceid)) not on boundary.")
fe = dloads[cellid]
getcoordinates!(cell_coords, grid, cellid)
reinit!(facevalues, cell_coords, faceid)
for q_point in 1:getnquadpoints(facevalues)
dΓ = getdetJdV(facevalues, q_point) # Face area
normal = getnormal(facevalues, q_point) # Nomral vector at quad point
for i in 1:n_basefuncs
ϕ = shape_value(facevalues, q_point, i) # Shape function value
for d = 1:dim
if fe isa SArray
fe = @set fe[(i-1)*dim + d] += ϕ * t * normal[d] * dΓ
else
fe[(i-1)*dim + d] += ϕ * t * normal[d] * dΓ
end
end
end
end
dloads[cellid] = fe
end
end
return dloads
end
function make_cload(problem)
T = floattype(problem)
dim = getdim(problem)
cloads = getcloaddict(problem)
dh = getdh(problem)
metadata = getmetadata(problem)
node_dofs = metadata.node_dofs
inds = Int[]
vals = T[]
for nodeidx in keys(cloads)
for (dofidx, force) in enumerate(cloads[nodeidx])
if force != 0
dof = node_dofs[(nodeidx-1)*dim+dofidx]
push!(inds, dof)
push!(vals, force)
end
end
end
return sparsevec(inds, vals, ndofs(dh))
end
|
{"hexsha": "6591303f6d92d3325a1ce97c46bd4284db604908", "size": 13553, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/TopOptProblems/matrices_and_vectors.jl", "max_stars_repo_name": "AlexanderBakerChris/TopOpt.jl", "max_stars_repo_head_hexsha": "eb9e8ee08f570a40d00eccef5c954353bffbad52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/TopOptProblems/matrices_and_vectors.jl", "max_issues_repo_name": "AlexanderBakerChris/TopOpt.jl", "max_issues_repo_head_hexsha": "eb9e8ee08f570a40d00eccef5c954353bffbad52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/TopOptProblems/matrices_and_vectors.jl", "max_forks_repo_name": "AlexanderBakerChris/TopOpt.jl", "max_forks_repo_head_hexsha": "eb9e8ee08f570a40d00eccef5c954353bffbad52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-09T17:25:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T17:25:38.000Z", "avg_line_length": 37.8575418994, "max_line_length": 227, "alphanum_fraction": 0.5782483583, "num_tokens": 3924}
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import logging
import numpy as np
import nevergrad.common.typing as tp
from nevergrad.parametrization import parameter as p
# run with LOGLEVEL=DEBUG for more debug information
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
class CrowdingDistance:
"""This class implements the calculation of crowding distance for NSGA-II."""
def accumulate_distance_per_objective(self, front: tp.List[p.Parameter], i: int):
if isinstance(front[0].losses, np.ndarray) and front[0].losses.shape != ():
is_multiobj: bool = len(front[0].losses) > 1 # isinstance(front[0].loss, np.ndarray)
else:
is_multiobj = False
assert (not is_multiobj and (i == 0)) or is_multiobj
# Sort the population by objective i
if is_multiobj:
front = sorted(front, key=lambda x: x.losses[i])
objective_minn = front[0].losses[i]
objective_maxn = front[-1].losses[i]
assert objective_minn <= objective_maxn
# Set the crowding distance
front[0]._meta["crowding_distance"] = float("inf")
front[-1]._meta["crowding_distance"] = float("inf")
# All other intermediate solutions are assigned a distance value equal
# to the absolute normalized difference in the function values of two
# adjacent solutions.
for j in range(1, len(front) - 1):
distance = front[j + 1].losses[i] - front[j - 1].losses[i]
# Check if minimum and maximum are the same (in which case do nothing)
if objective_maxn - objective_minn == 0:
pass # undefined
else:
distance = distance / float(objective_maxn - objective_minn)
logger.debug("front[j]: %s distance: %s", front[j].uid, distance)
# The overall crowding-distance value is calculated as the sum of
# individual distance values corresponding to each objective.
front[j]._meta["crowding_distance"] += distance
else:
front = sorted(front, key=lambda x: x.loss) # type: ignore
objective_minn = front[0].loss
objective_maxn = front[-1].loss
assert objective_minn <= objective_maxn
# Set the crowding distance
front[0]._meta["crowding_distance"] = float("inf")
front[-1]._meta["crowding_distance"] = float("inf")
# All other intermediate solutions are assigned a distance value equal
# to the absolute normalized difference in the function values of two
# adjacent solutions.
for j in range(1, len(front) - 1):
if front[j + 1].loss is None or front[j - 1].loss is None:
distance = 0
else:
distance = front[j + 1].loss if front[j + 1].loss is not None else 0
distance -= front[j - 1].loss if front[j - 1].loss is not None else 0
# Check if minimum and maximum are the same (in which case do nothing)
if objective_maxn - objective_minn == 0:
pass # undefined
else:
distance = distance / float(objective_maxn - objective_minn)
# The overall crowding-distance value is calculated as the sum of
# individual distance values corresponding to each objective.
front[j]._meta["crowding_distance"] += distance
def compute_distance(self, front: tp.List[p.Parameter]):
"""This function assigns the crowding distance to the solutions.
:param front: The list of solutions.
"""
size = len(front)
if size == 0:
return
# The boundary solutions (solutions with smallest and largest function values)
# are set to an infinite (maximum) distance value
if size == 1:
front[0]._meta["crowding_distance"] = float("inf")
return
if size == 2:
front[0]._meta["crowding_distance"] = float("inf")
front[1]._meta["crowding_distance"] = float("inf")
return
for f in front:
f._meta["crowding_distance"] = 0.0
if isinstance(front[0].losses, np.ndarray) and front[0].losses.shape != ():
number_of_objectives = len(front[0].losses)
else:
number_of_objectives = 1
for i in range(number_of_objectives):
self.accumulate_distance_per_objective(front, i)
def sort(self, candidates: tp.List[p.Parameter], in_place: bool = True) -> tp.List[p.Parameter]:
if in_place:
candidates.sort(
key=lambda elem: elem._meta["crowding_distance"], reverse=True
) # Larger -> Less crowded
return sorted(candidates, key=lambda elem: elem._meta["crowding_distance"], reverse=True)
class FastNonDominatedRanking:
"""Non-dominated ranking of NSGA-II proposed by Deb et al., see [Deb2002]"""
def compare(self, candidate1: p.Parameter, candidate2: p.Parameter) -> int:
"""Compare the domainance relation of two candidates.
:param candidate1: Candidate.
:param candidate2: Candidate.
"""
one_wins = np.sum(candidate1.losses < candidate2.losses)
two_wins = np.sum(candidate2.losses < candidate1.losses)
if one_wins > two_wins:
return -1
if two_wins > one_wins:
return 1
return 0
# pylint: disable=too-many-locals
def compute_ranking(
self, candidates: tp.List[p.Parameter], k: int = None
) -> tp.List[tp.List[p.Parameter]]:
"""Compute ranking of candidates.
:param candidates: List of candidates.
:param k: Number of individuals.
"""
n_cand: int = len(candidates)
# dominated_by_cnt[i]: number of candidates dominating ith candidate
dominated_by_cnt: tp.List[int] = [0] * n_cand # [0 for _ in range(len(candidates))]
# candidates_dominated[i]: List of candidates dominated by ith candidate
candidates_dominated: tp.List[tp.List[int]] = [[] for _ in range(n_cand)]
# front[i] contains the list of solutions belonging to front i
front: tp.List[tp.List[int]] = [[] for _ in range(n_cand + 1)]
uid2candidate = {c.uid: c for c in candidates}
uids = [c.uid for c in candidates]
for c1 in range(n_cand - 1):
uid1 = uids[c1]
for c2 in range(c1 + 1, n_cand):
uid2 = uids[c2]
dominance_test_result = self.compare(uid2candidate[uid1], uid2candidate[uid2])
# self.number_of_comparisons += 1
if dominance_test_result == -1:
# c1 wins
candidates_dominated[c1].append(c2)
dominated_by_cnt[c2] += 1
elif dominance_test_result == 1:
# c2 wins
candidates_dominated[c2].append(c1)
dominated_by_cnt[c1] += 1
# Reset rank
for cand in candidates:
cand._meta["non_dominated_rank"] = float("inf")
# Formation of front[0], i.e. candidates that do not dominated by others
front[0] = [c1 for c1 in range(n_cand) if dominated_by_cnt[c1] == 0]
last_fronts = 0
while len(front[last_fronts]) != 0:
last_fronts += 1
# Number of candidates in a frontier <= Number of candidates that dominate at least 1 candidate
assert len(front[last_fronts - 1]) <= len(candidates_dominated)
for c1 in front[last_fronts - 1]:
for c2 in candidates_dominated[c1]:
dominated_by_cnt[c2] -= 1
if dominated_by_cnt[c2] == 0:
front[last_fronts].append(c2)
# Convert index to uid
# Trim to frontiers that contain the k candidates of interest
ranked_sublists = []
count = 0
for front_i in range(last_fronts):
count += len(front[front_i])
for cand_i in front[front_i]:
uid2candidate[uids[cand_i]]._meta["non_dominated_rank"] = front_i
ranked_sublists.append([uid2candidate[uids[i]] for i in front[front_i]])
if (k is not None) and (count >= k):
break
return ranked_sublists
def rank(
population: tp.List[p.Parameter], n_selected: tp.Optional[int] = None
) -> tp.Dict[str, tp.Tuple[int, int, float]]:
"""implements the multi-objective ranking function of NSGA-II."""
frontier_ranker = FastNonDominatedRanking()
density_estimator = CrowdingDistance()
selected_pop: tp.Dict[str, tp.Tuple[int, int, float]] = {}
frontiers = frontier_ranker.compute_ranking(population)
count = 0
next_rank = 0
for front_i, p_frontier in enumerate(frontiers):
count += len(p_frontier)
if n_selected is None or count > n_selected:
density_estimator.compute_distance(p_frontier)
density_estimator.sort(p_frontier)
n_dist_calc = n_selected - len(selected_pop) if n_selected is not None else len(p_frontier)
for c_i in range(0, n_dist_calc):
selected_pop[p_frontier[c_i].uid] = (
next_rank,
front_i,
p_frontier[c_i]._meta["crowding_distance"],
)
next_rank += 1
if n_selected is not None:
break
if n_selected is not None:
for candidate in p_frontier:
selected_pop[candidate.uid] = (next_rank, front_i, float("inf"))
next_rank += 1
return selected_pop
|
{"hexsha": "0a6a76ff99417355a013d5f23dc21316984a26ad", "size": 10066, "ext": "py", "lang": "Python", "max_stars_repo_path": "nevergrad/optimization/multiobjective/nsga2.py", "max_stars_repo_name": "vishalbelsare/nevergrad", "max_stars_repo_head_hexsha": "f0fd681320609146e116322756cee9bf2388be9c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nevergrad/optimization/multiobjective/nsga2.py", "max_issues_repo_name": "vishalbelsare/nevergrad", "max_issues_repo_head_hexsha": "f0fd681320609146e116322756cee9bf2388be9c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nevergrad/optimization/multiobjective/nsga2.py", "max_forks_repo_name": "vishalbelsare/nevergrad", "max_forks_repo_head_hexsha": "f0fd681320609146e116322756cee9bf2388be9c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-08T02:54:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T02:54:44.000Z", "avg_line_length": 42.8340425532, "max_line_length": 107, "alphanum_fraction": 0.5950725214, "include": true, "reason": "import numpy", "num_tokens": 2317}
|
import numpy as np
from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm
from pymoo.docs import parse_doc_string
from pymoo.model.survival import Survival
from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
from pymoo.operators.sampling.random_sampling import FloatRandomSampling
from pymoo.operators.selection.tournament_selection import TournamentSelection, compare
from pymoo.util.display import SingleObjectiveDisplay
from pymoo.util.termination.default import SingleObjectiveDefaultTermination
# =========================================================================================================
# Survival
# =========================================================================================================
class FitnessSurvival(Survival):
def __init__(self) -> None:
super().__init__(True)
def _do(self, problem, pop, n_survive, out=None, **kwargs):
F = pop.get("F")
if F.shape[1] != 1:
raise ValueError("FitnessSurvival can only used for single objective single!")
return pop[np.argsort(F[:, 0])[:n_survive]]
# =========================================================================================================
# Implementation
# =========================================================================================================
def comp_by_cv_and_fitness(pop, P, **kwargs):
S = np.full(P.shape[0], np.nan)
for i in range(P.shape[0]):
a, b = P[i, 0], P[i, 1]
# if at least one solution is infeasible
if pop[a].CV > 0.0 or pop[b].CV > 0.0:
S[i] = compare(a, pop[a].CV, b, pop[b].CV, method='smaller_is_better', return_random_if_equal=True)
# both solutions are feasible just set random
else:
S[i] = compare(a, pop[a].F, b, pop[b].F, method='smaller_is_better', return_random_if_equal=True)
return S[:, None].astype(np.int)
class GA(GeneticAlgorithm):
def __init__(self,
pop_size=100,
sampling=FloatRandomSampling(),
selection=TournamentSelection(func_comp=comp_by_cv_and_fitness),
crossover=SimulatedBinaryCrossover(prob=0.9, eta=3),
mutation=PolynomialMutation(prob=None, eta=5),
survival=FitnessSurvival(),
eliminate_duplicates=True,
n_offsprings=None,
display=SingleObjectiveDisplay(),
**kwargs):
"""
Parameters
----------
pop_size : {pop_size}
sampling : {sampling}
selection : {selection}
crossover : {crossover}
mutation : {mutation}
eliminate_duplicates : {eliminate_duplicates}
n_offsprings : {n_offsprings}
"""
super().__init__(pop_size=pop_size,
sampling=sampling,
selection=selection,
crossover=crossover,
mutation=mutation,
survival=survival,
eliminate_duplicates=eliminate_duplicates,
n_offsprings=n_offsprings,
display=display,
**kwargs)
self.default_termination = SingleObjectiveDefaultTermination()
parse_doc_string(GA.__init__)
|
{"hexsha": "c3760ce3b65e8b6234e97b55c10e267df65c09be", "size": 3459, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymoo/algorithms/so_genetic_algorithm.py", "max_stars_repo_name": "gabicavalcante/pymoo", "max_stars_repo_head_hexsha": "1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-01-06T01:10:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T15:39:43.000Z", "max_issues_repo_path": "pymoo/algorithms/so_genetic_algorithm.py", "max_issues_repo_name": "gabicavalcante/pymoo", "max_issues_repo_head_hexsha": "1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2022-01-03T19:36:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T03:57:58.000Z", "max_forks_repo_path": "pymoo/algorithms/so_genetic_algorithm.py", "max_forks_repo_name": "gabicavalcante/pymoo", "max_forks_repo_head_hexsha": "1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-11-22T08:01:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T08:53:58.000Z", "avg_line_length": 35.6597938144, "max_line_length": 111, "alphanum_fraction": 0.5455333912, "include": true, "reason": "import numpy", "num_tokens": 671}
|
import numpy as np;
from .problem import Problem;
from ..utils.random import RandomGeneratable, RandomGenerator;
class TranslateProblem:
def __init__(self, problem_cls, spread= [100,None]):
self._problem = problem_cls;
self._spread = spread;
def random(self, random_state, dimension,**kwargs):
return TranslatedProblem.random(random_state, dimension, self._problem, self._spread, **kwargs)
class TranslatedProblem(Problem, RandomGeneratable):
def __init__(self, problem, translation):
assert isinstance(problem, Problem);
super().__init__(problem.dimension);
self._problem = problem;
self._type = "Translated_"+problem.type;
self._params = dict(
translation = translation,
problem = problem
);
@staticmethod
def random(random_state, dimension, problem_cls, spread = [100,None], **kwargs):
problem = problem_cls.random(random_state, dimension, **kwargs);
if hasattr(spread,'__iter__'):
spread = spread[:dimension];
if spread[-1] is None:
spread = spread[:-1]+[0]*(dimension-len(spread)+1);
else:
spread = [spread]**dimension;
assert len(spread) == dimension;
spread = np.array(spread);
translation = random_state.rand(dimension)*spread**2;
return TranslatedProblem(problem, np.sqrt(translation));
def fitness(self, xs):
super().fitness(xs);
xs_translated = xs - self._params['translation'];
return self._params['problem'].fitness(xs_translated);
@property
def optimum(self):
problem_opt = self._params['problem'].optimum;
return problem_opt + self._params['translation'];
|
{"hexsha": "f89895d1e5892265b73e5b8a9b3848a6c6f79cbe", "size": 1756, "ext": "py", "lang": "Python", "max_stars_repo_path": "learnedevolution/problems/translated.py", "max_stars_repo_name": "realtwister/LearnedEvolution", "max_stars_repo_head_hexsha": "2ec49b50a49acae9693cfb05ac114dfbcc4aa337", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "learnedevolution/problems/translated.py", "max_issues_repo_name": "realtwister/LearnedEvolution", "max_issues_repo_head_hexsha": "2ec49b50a49acae9693cfb05ac114dfbcc4aa337", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "learnedevolution/problems/translated.py", "max_forks_repo_name": "realtwister/LearnedEvolution", "max_forks_repo_head_hexsha": "2ec49b50a49acae9693cfb05ac114dfbcc4aa337", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.12, "max_line_length": 103, "alphanum_fraction": 0.6440774487, "include": true, "reason": "import numpy", "num_tokens": 372}
|
# ------------------------------------------------------------------------------
# @brief:
# ------------------------------------------------------------------------------
from .base_worker import base_worker
from mbbl.config import init_path
from mbbl.env.env_util import play_episode_with_env
from mbbl.util.common import logger
import numpy as np
class worker(base_worker):
def __init__(self, args, observation_size, action_size,
network_type, task_queue, result_queue, worker_id,
name_scope='planning_worker'):
# the base agent
super(worker, self).__init__(args, observation_size, action_size,
network_type, task_queue, result_queue,
worker_id, name_scope)
self._base_dir = init_path.get_base_dir()
self._previous_reward = -np.inf
# build the environments
self._build_env()
def _plan(self, planning_data):
raise NotImplementedError
def _play(self, planning_data):
if self.args.num_expert_episode_to_save > 0 and \
self._previous_reward > self._env_solved_reward and \
self._worker_id == 0:
start_save_episode = True
logger.info('Last episodic reward: %.4f' % self._previous_reward)
logger.info('Minimum reward of %.4f is needed to start saving'
% self._env_solved_reward)
logger.info('[SAVING] Worker %d will record its episode data'
% self._worker_id)
else:
start_save_episode = False
if self.args.num_expert_episode_to_save > 0 \
and self._worker_id == 0:
logger.info('Last episodic reward: %.4f' %
self._previous_reward)
logger.info('Minimum reward of %.4f is needed to start saving'
% self._env_solved_reward)
traj_episode = play_episode_with_env(
self._env, self._act,
{'use_random_action': planning_data['use_random_action'],
'record_flag': start_save_episode,
'num_episode': self.args.num_expert_episode_to_save,
'data_name': self.args.task + '_' + self.args.exp_id}
)
self._previous_reward = np.sum(traj_episode['rewards'])
return traj_episode
def _act(self, state,
control_info={'use_random_action': False}):
if 'use_random_action' in control_info and \
control_info['use_random_action']:
# use random policy
action = self._npr.uniform(-1, 1, [self._action_size])
return action, [-1], [-1]
else:
# call the policy network
return self._network['policy'][0].act({'start_state': state})
|
{"hexsha": "42786ee4051e5c6dee1a647f277e1e88fa636d64", "size": 2866, "ext": "py", "lang": "Python", "max_stars_repo_path": "mbbl_envs/mbbl/worker/mf_worker.py", "max_stars_repo_name": "hbutsuak95/iv_rl", "max_stars_repo_head_hexsha": "0f72a8f077a238237027ea96b7d1160c35ac9959", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2022-01-16T11:27:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T14:04:48.000Z", "max_issues_repo_path": "mbbl_envs/mbbl/worker/mf_worker.py", "max_issues_repo_name": "hbutsuak95/iv_rl", "max_issues_repo_head_hexsha": "0f72a8f077a238237027ea96b7d1160c35ac9959", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mbbl_envs/mbbl/worker/mf_worker.py", "max_forks_repo_name": "hbutsuak95/iv_rl", "max_forks_repo_head_hexsha": "0f72a8f077a238237027ea96b7d1160c35ac9959", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3661971831, "max_line_length": 80, "alphanum_fraction": 0.55931612, "include": true, "reason": "import numpy", "num_tokens": 575}
|
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from gensim.models.word2vec import Word2Vec
from csv import reader
TRAIN_FILE = "SampleSetConditions.csv"
TEST_FILE = "TestSet.csv"
print("loading samples...",end="")
X, y = [], []
with open(TRAIN_FILE,"r",encoding="utf-8") as infile:
csv_reader = reader(infile)
next(csv_reader,None) #ignore header
for row in csv_reader:
X.append(row[4].split())
y.append(row[1])
print("...done")
print ("total examples %s" % len(y))
print("vectorising...",end="")
model = Word2Vec(X, size=100, window=5, min_count=5, workers=2)
w2v = {w: vec for w, vec in zip(model.wv.index2word, model.wv.vectors)}
print("...done")
class MeanEmbeddingVectorizer(object):
def __init__(self, word2vec):
self.word2vec = word2vec
if len(word2vec)>0:
self.dim=len(word2vec[next(iter(w2v))])
else:
self.dim=0
def fit(self, X, y):
return self
def transform(self, X):
return np.array([
np.mean([self.word2vec[w] for w in words if w in self.word2vec]
or [np.zeros(self.dim)], axis=0)
for words in X
])
svm_w2v = Pipeline([
("word2vec vectorizer", MeanEmbeddingVectorizer(w2v)),
("SVM", SVC())])
print("fitting...",end="")
svm_w2v.fit(X,y)
print("...done")
#======TESTING======
unsorted_scores = (cross_val_score(svm_w2v, X, y, cv=5).mean())
print("cross validation check accuracy:",round(unsorted_scores*100,2),"%\n")
#read from testSet.csv
tests, correct_cat = [], []
with open(TEST_FILE,"r",encoding="utf-8") as infile:
csv_reader = reader(infile)
for row in csv_reader:
tests.append(row[4].split())
correct_cat.append(row[1])
predictions = svm_w2v.predict(tests)
correct = 0
for x in range(len(predictions)):
if predictions[x] == correct_cat[x]:
print("\t-Test",x,"correctly categorised as",predictions[x])
correct+=1
else:
print("\t-Test",x,"FAILED. prediction:",predictions[x],"should be",correct_cat[x])
print("correct percentage:",round((correct/len(predictions))*100,2),"%")
|
{"hexsha": "a695d5d94a966bf01c54070c6619f637afc8a574", "size": 2236, "ext": "py", "lang": "Python", "max_stars_repo_path": "script.py", "max_stars_repo_name": "christocs/MiningTenemantsSVM", "max_stars_repo_head_hexsha": "b7f239b5a2f9d1a4b326b477922998dd4aa416e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "script.py", "max_issues_repo_name": "christocs/MiningTenemantsSVM", "max_issues_repo_head_hexsha": "b7f239b5a2f9d1a4b326b477922998dd4aa416e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "script.py", "max_forks_repo_name": "christocs/MiningTenemantsSVM", "max_forks_repo_head_hexsha": "b7f239b5a2f9d1a4b326b477922998dd4aa416e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-27T05:27:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-27T05:27:07.000Z", "avg_line_length": 26.9397590361, "max_line_length": 90, "alphanum_fraction": 0.6381932021, "include": true, "reason": "import numpy", "num_tokens": 600}
|
# Copyright 2018 Cognibit Solutions LLP.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Generates classification report for the trained XGBoost models
"""
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix as cm
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report as report
def preprocessing(results, truth):
# preprocessing
results.loc[truth['before']==truth['after'],'truth']='RemainSelf'
results.loc[truth['before']!=truth['after'],'truth']='ToBeNormalized'
truth['class']=''
truth.loc[truth['before']!=truth['after'],'class']='ToBeNormalized'
truth.loc[truth['before']==truth['after'],'class']='RemainSelf'
return results, truth
def f1_scores(results, truth):
print(report(truth['class'].tolist(), results['class'].tolist()))
def confusion_matrix(results, truth, lang):
matrix = cm(truth['class'].tolist(), results['class'].tolist())
plot_confusion_matrix(matrix, classes=['ToBeNormalized', 'RemainSelf'],
title='XGBoost Confusion Matrix [{}]'.format(lang))
def pr_curve(results, truth, lang):
truth.loc[truth['class']=='ToBeNormalized', 'class'] = 1
truth.loc[truth['class']=='RemainSelf', 'class'] = 0
results.loc[results['class']=='ToBeNormalized', 'class'] = 1
results.loc[results['class']=='RemainSelf', 'class'] = 0
average_precision = average_precision_score(truth['class'].tolist(), results['class'].tolist())
precision, recall, threshold = precision_recall_curve(truth['class'].tolist(), results['class'].tolist())
plt.step(recall, precision, color='b', alpha=0.2, where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall Curve: AP={0:0.2f} [{1}]'.format(average_precision, lang))
plt.show()
def plot_confusion_matrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
|
{"hexsha": "3941703025fe2beb80c23167be7d3cc82ac99a96", "size": 3317, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/classification_report.py", "max_stars_repo_name": "cognibit/Text-Normalization-Demo", "max_stars_repo_head_hexsha": "36355f4a2c5187948fe786b7318259151f9a9db6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 66, "max_stars_repo_stars_event_min_datetime": "2018-06-04T05:19:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-08T23:15:13.000Z", "max_issues_repo_path": "src/classification_report.py", "max_issues_repo_name": "cognibit/Text-Normalization-Demo", "max_issues_repo_head_hexsha": "36355f4a2c5187948fe786b7318259151f9a9db6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-07-02T14:44:44.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-03T14:54:24.000Z", "max_forks_repo_path": "src/classification_report.py", "max_forks_repo_name": "cognibit/Text-Normalization-Demo", "max_forks_repo_head_hexsha": "36355f4a2c5187948fe786b7318259151f9a9db6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-06-12T14:22:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T01:18:12.000Z", "avg_line_length": 38.1264367816, "max_line_length": 106, "alphanum_fraction": 0.6804341272, "include": true, "reason": "import numpy", "num_tokens": 791}
|
# Imports
import pyperf as perf
# import icclim
import numpy as np
x = np.array(np.random.rand(1000))
P = np.linspace(0.01, 0.99, 50)
def bench_argsort():
# np.quantile(x, P)
x.argsort().argsort() / len(x)
def bench_quantile():
np.quantile(x, P)
# x.argsort.argsort()
runner = perf.Runner()
runner.bench_func('Quantile - argsort', bench_argsort)
runner.bench_func('Quantile - func', bench_quantile)
|
{"hexsha": "046d0c0aa5f5ad346722d7895d0af1c85a1c7469", "size": 422, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/quantile_bench.py", "max_stars_repo_name": "Ouranosinc/xclim-benchmark", "max_stars_repo_head_hexsha": "72fbb5db0cd29df6f263c536529fe815ac3cca48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/quantile_bench.py", "max_issues_repo_name": "Ouranosinc/xclim-benchmark", "max_issues_repo_head_hexsha": "72fbb5db0cd29df6f263c536529fe815ac3cca48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-01-16T22:15:56.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-05T14:42:41.000Z", "max_forks_repo_path": "scripts/quantile_bench.py", "max_forks_repo_name": "Ouranosinc/xclim-benchmark", "max_forks_repo_head_hexsha": "72fbb5db0cd29df6f263c536529fe815ac3cca48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.347826087, "max_line_length": 54, "alphanum_fraction": 0.682464455, "include": true, "reason": "import numpy", "num_tokens": 127}
|
theory SINVAR_NoRefl_impl
imports SINVAR_NoRefl "../TopoS_Interface_impl"
begin
code_identifier code_module SINVAR_NoRefl_impl => (Scala) SINVAR_NoRefl
subsubsection \<open>SecurityInvariant NoRefl List Implementation\<close>
fun sinvar :: "'v list_graph \<Rightarrow> ('v \<Rightarrow> node_config) \<Rightarrow> bool" where
"sinvar G nP = (\<forall> (s,r) \<in> set (edgesL G). s = r \<longrightarrow> nP s = Refl)"
definition NoRefl_offending_list:: "'v list_graph \<Rightarrow> ('v \<Rightarrow> node_config) \<Rightarrow> ('v \<times> 'v) list list" where
"NoRefl_offending_list G nP = (if sinvar G nP then
[]
else
[ [e \<leftarrow> edgesL G. case e of (e1,e2) \<Rightarrow> e1 = e2 \<and> nP e1 = NoRefl] ])"
definition "NetModel_node_props P = (\<lambda> i. (case (node_properties P) i of Some property \<Rightarrow> property | None \<Rightarrow> SINVAR_NoRefl.default_node_properties))"
definition "NoRefl_eval G P = (wf_list_graph G \<and>
sinvar G (SecurityInvariant.node_props SINVAR_NoRefl.default_node_properties P))"
interpretation NoRefl_impl:TopoS_List_Impl
where default_node_properties=SINVAR_NoRefl.default_node_properties
and sinvar_spec=SINVAR_NoRefl.sinvar
and sinvar_impl=sinvar
and receiver_violation=SINVAR_NoRefl.receiver_violation
and offending_flows_impl=NoRefl_offending_list
and node_props_impl=NetModel_node_props
and eval_impl=NoRefl_eval
apply(unfold TopoS_List_Impl_def)
apply(rule conjI)
apply(simp add: TopoS_NoRefl list_graph_to_graph_def)
apply(rule conjI)
apply(simp add: list_graph_to_graph_def NoRefl_offending_set NoRefl_offending_set_def NoRefl_offending_list_def)
apply(rule conjI)
apply(simp only: NetModel_node_props_def)
apply(metis NoRefl.node_props.simps NoRefl.node_props_eq_node_props_formaldef)
apply(simp only: NoRefl_eval_def)
apply(simp add: TopoS_eval_impl_proofrule[OF TopoS_NoRefl])
apply(simp add: list_graph_to_graph_def)
done
subsubsection \<open>PolEnforcePoint packing\<close>
definition SINVAR_LIB_NoRefl :: "('v::vertex, node_config) TopoS_packed" where
"SINVAR_LIB_NoRefl \<equiv>
\<lparr> nm_name = ''NoRefl'',
nm_receiver_violation = SINVAR_NoRefl.receiver_violation,
nm_default = SINVAR_NoRefl.default_node_properties,
nm_sinvar = sinvar,
nm_offending_flows = NoRefl_offending_list,
nm_node_props = NetModel_node_props,
nm_eval = NoRefl_eval
\<rparr>"
interpretation SINVAR_LIB_NoRefl_interpretation: TopoS_modelLibrary SINVAR_LIB_NoRefl
SINVAR_NoRefl.sinvar
apply(unfold TopoS_modelLibrary_def SINVAR_LIB_NoRefl_def)
apply(rule conjI)
apply(simp)
apply(simp)
by(unfold_locales)
text \<open>Examples\<close>
definition example_net :: "nat list_graph" where
"example_net \<equiv> \<lparr> nodesL = [1::nat,2,3],
edgesL = [(1,2),(2,2),(2,1),(1,3)] \<rparr>"
lemma "wf_list_graph example_net" by eval
definition example_conf where
"example_conf \<equiv> ((\<lambda>e. SINVAR_NoRefl.default_node_properties)(2:= Refl))"
lemma "sinvar example_net example_conf" by eval
lemma "NoRefl_offending_list example_net (\<lambda>e. SINVAR_NoRefl.default_node_properties) = [[(2, 2)]]" by eval
hide_const (open) NetModel_node_props
hide_const (open) sinvar
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Network_Security_Policy_Verification/Security_Invariants/SINVAR_NoRefl_impl.thy"}
|
from pathlib import Path
from shutil import copyfile
import pandas as pd
import numpy as np
import unicodedata
from haversine import haversine
import time
import ast
from sklearn.metrics import average_precision_score
import statistics
"""
Evaluate ranking for MAP
"""
def find_closest_distance(altname, gscoords):
"""
This method returns the distance (in kilometers) between the
candidate location and the gold standard coordinates. In the
case that a candidate name in the gazetteer can refer to more
than one entity, we select the entity closest to the gold
standard coordinates.
"""
tCoords = [list(k) for k in altname.values]
distance = 100000 # we instantiate "distance" with an impossibly large distance
for candCoord in tCoords:
candDistance = haversine(candCoord, gscoords)
if candDistance <= distance:
distance = candDistance
return distance
def mapeval_candidates(cand_distance, gazetteer, coords, km, maxCands, metrics, lowercase):
if type(cand_distance) == list:
cand_distance = cand_distance[0]
candidates_fd = sorted(cand_distance.items(), key=lambda kv: kv[1])[:maxCands]
highest = 0.0
try:
highest = candidates_fd[-1][1]
except IndexError:
highest = 0.0
candidates = []
for c in candidates_fd:
candidates.append(c[0])
closest_candidates = []
for cand in candidates:
if lowercase:
candcoords = gazetteer[gazetteer["altname"] == unicodedata.normalize('NFKD', str(cand.lower()))][["lat", "lon"]]
else:
candcoords = gazetteer[gazetteer["altname"] == unicodedata.normalize('NFKD', str(cand))][["lat", "lon"]]
closest_candidates.append(find_closest_distance(candcoords, coords))
y_truearray = []
y_scorearray = []
for i in range(len(closest_candidates)):
if closest_candidates[i] <= km:
y_truearray.append(1)
else:
y_truearray.append(0)
if metrics == "faiss":
if highest == 0.0:
y_scorearray.append(0.0)
else:
y_scorearray.append(1.0 - cand_distance[candidates[i]]/highest)
else:
y_scorearray.append(1.0 - cand_distance[candidates[i]])
return y_truearray, y_scorearray
def evaluate_ranking(gazetteer_name, candrank_dataset, deezymatch_model):
maxCands = 20 # Candidates cutoff for MAP
# if not Path("mapped_results/DeezyMapEval_" + candrank_dataset + "_" + gazetteer_name + "_" + deezymatch_model + ".txt", "w").is_file() and not Path("mapped_results/LevDamMapEval_" + candrank_dataset + "_" + gazetteer_name + ".txt").is_file() and not Path("mapped_results/ExactMapEval_" + candrank_dataset + "_" + gazetteer_name + ".txt").is_file():
# Load gazetteer (for DeezyMatch)
gazetteer = pd.read_pickle("../datasets/gazetteers/" + gazetteer_name + ".pkl")
gazetteer = gazetteer[gazetteer['lat'].notna()]
gazetteer = gazetteer[gazetteer['lon'].notna()]
gazetteer["altname"] = gazetteer["altname"].str.normalize("NFKD")
# Load gazetteer and lower-case it (for LevDam)
gazetteer_lc = pd.read_pickle("../datasets/gazetteers/" + gazetteer_name + ".pkl")
gazetteer_lc = gazetteer_lc[gazetteer_lc['lat'].notna()]
gazetteer_lc = gazetteer_lc[gazetteer_lc['lon'].notna()]
gazetteer_lc["altname"] = gazetteer_lc["altname"].str.lower().str.normalize("NFKD")
# Load gold standard dataset
datasetdf = pd.read_pickle("../datasets/candidate_ranking_datasets/" + candrank_dataset + ".pkl")
datasetdf = datasetdf[(datasetdf['lat'].notnull()) & (datasetdf['lon'].notnull())]
datasetdf["toponym"] = datasetdf["toponym"].str.normalize("NFKD")
# Load DeezyMatch results
deezyresultsdf = pd.read_pickle("ranker_results/" + candrank_dataset + "_" + gazetteer_name + "_" + deezymatch_model + ".pkl")
deezyresultsdf["toponym"] = deezyresultsdf["query"].str.normalize("NFKD")
# List of unique toponyms
toponyms = list(datasetdf["toponym"].unique())
# Gold standard dictionary: {toponym: (lat, lon)}
gold_standard = dict()
for i, row in datasetdf.iterrows():
toponym = unicodedata.normalize('NFKD', str(row["toponym"]))
coords = (row["lat"], row["lon"])
if candrank_dataset == "wotr_test":
coords = (float(row["lat"]), float(row["lon"]))
if toponym in gold_standard:
if not coords in gold_standard[toponym]:
gold_standard[toponym].append(coords)
else:
gold_standard[toponym] = [coords]
# Load LevDam results
levdamresults = pd.read_pickle("levdam_results/" + candrank_dataset + "_" + gazetteer_name + ".pkl")
levdamresults["toponym"] = levdamresults["toponym"].str.normalize("NFKD")
mapDeezy = dict()
mapLevdam = dict()
mapExact = dict()
# Store mapped ranking
with open("mapped_results/DeezyMapEval_" + candrank_dataset + "_" + gazetteer_name + "_" + deezymatch_model + ".txt", "w") as fw1, open("mapped_results/LevDamMapEval_" + candrank_dataset + "_" + gazetteer_name + ".txt", "w") as fw2, open("mapped_results/ExactMapEval_" + candrank_dataset + "_" + gazetteer_name + ".txt", "w") as fw3:
for toponym in gold_standard:
print(toponym)
toponym = unicodedata.normalize('NFKD', toponym)
gscoords = gold_standard[toponym]
gscoords = [coords for coords in gscoords if type(coords[0]) == float and type(coords[1]) == float]
for coords in gscoords:
# Deezy: find candidates
dzcands = deezyresultsdf[deezyresultsdf["toponym"].str.lower().str.contains("^" + toponym.lower() + "$")]
# LevDam: find candidates
ldcands = levdamresults[levdamresults["toponym"].str.lower().str.contains("^" + toponym.lower() + "$")]
# Exact: find candidates
exact_results = mapeval_candidates({toponym.lower(): 0.0}, gazetteer_lc, coords, 10, maxCands, "", True)
if not dzcands.empty and not ldcands.empty:
deezymap = mapeval_candidates(dzcands.iloc[0]["faiss_distance"], gazetteer, coords, 10, maxCands, "faiss", False)
fw1.write(toponym + "\t" + str(coords[0]) + "\t" + str(coords[1]) + "\t" + str(deezymap[0]) + "\t" + str(deezymap[1]) + "\n")
levdammap = mapeval_candidates(ldcands.iloc[0]["fuzzyCandidatesLevDam"], gazetteer_lc, coords, 10, maxCands, "levdam", True)
fw2.write(toponym + "\t" + str(coords[0]) + "\t" + str(coords[1]) + "\t" + str(levdammap[0]) + "\t" + str(levdammap[1]) + "\n")
fw3.write(toponym + "\t" + str(coords[0]) + "\t" + str(coords[1]) + "\t" + str(exact_results[0]) + "\t" + str(exact_results[1]) + "\n")
"""
Compute the MAP score
"""
def clip_candidates(a, b, numCandidates):
"""
Clip retrieved candidates to numCandidates or, in case
one method retrieived a smaller list than numCandidates,
clip retrieved candidates to the same number as the smaller
list.
"""
a = a[:numCandidates]
b = b[:numCandidates]
if len(b) <= len(a):
a = a[:len(b)]
return a
def map_score(gazetteer_name, candrank_dataset, deezymatch_model, numCandidates):
dfdeezy = pd.read_csv("mapped_results/DeezyMapEval_" + candrank_dataset + "_" + gazetteer_name + "_" + deezymatch_model + ".txt", sep="\t", index_col=False, header=None, usecols = [0, 1, 2, 3, 4], names = ["toponym", "lat", "lon", "dm_label", "dm_score"], na_filter=True)
dflevdam = pd.read_csv("mapped_results/LevDamMapEval_" + candrank_dataset + "_" + gazetteer_name + ".txt", sep="\t", index_col=False, header=None, usecols = [0, 1, 2, 3, 4], names = ["toponym", "lat", "lon", "ld_label", "ld_score"], na_filter=True)
dfexact = pd.read_csv("mapped_results/ExactMapEval_" + candrank_dataset + "_" + gazetteer_name + ".txt", sep="\t", index_col=False, header=None, usecols = [0, 1, 2, 3, 4], names = ["toponym", "lat", "lon", "exact_label", "exact_score"], na_filter=True)
dfboth = pd.merge(dfdeezy, dflevdam, on=['toponym', 'lat', 'lon'])
if numCandidates == 1:
dfboth = pd.merge(dfboth, dfexact, on=['toponym', 'lat', 'lon'])
# Remove rows for which we don't have results:
dfboth = dfboth[dfboth.astype(str)['dm_score'] != '[]']
dfboth = dfboth[dfboth.astype(str)['ld_score'] != '[]']
# Convert scores and labels to array
dfboth['dm_label'] = dfboth['dm_label'].apply(lambda x: ast.literal_eval(x))
dfboth['dm_score'] = dfboth['dm_score'].apply(lambda x: ast.literal_eval(x))
dfboth['ld_label'] = dfboth['ld_label'].apply(lambda x: ast.literal_eval(x))
dfboth['ld_score'] = dfboth['ld_score'].apply(lambda x: ast.literal_eval(x))
# Calculate average precision score:
dfboth["dm_ap"] = dfboth.apply(lambda x: average_precision_score(x["dm_label"], x["dm_score"]), axis=1)
dfboth["ld_ap"] = dfboth.apply(lambda x: average_precision_score(x["ld_label"], x["ld_score"]), axis=1)
# Remove lines if there is no correct match in the first twenty candidates neither in DM or in LD:
dfboth = dfboth.dropna(subset=["dm_ap", "ld_ap"], how='all')
# Clip number of candidates:
dfboth['dm_label'] = dfboth.apply(lambda x: clip_candidates(x['dm_label'], x['ld_label'], numCandidates), axis=1)
dfboth['dm_score'] = dfboth.apply(lambda x: clip_candidates(x['dm_score'], x['ld_score'], numCandidates), axis=1)
dfboth['ld_label'] = dfboth.apply(lambda x: clip_candidates(x['ld_label'], x['dm_label'], numCandidates), axis=1)
dfboth['ld_score'] = dfboth.apply(lambda x: clip_candidates(x['ld_score'], x['dm_score'], numCandidates), axis=1)
# Calculate average precision score:
dfboth["dm_ap"] = dfboth.apply(lambda x: average_precision_score(x["dm_label"], x["dm_score"]), axis=1)
dfboth["ld_ap"] = dfboth.apply(lambda x: average_precision_score(x["ld_label"], x["ld_score"]), axis=1)
# Otherwise, convert nan to zeros:
dfboth["ld_ap"] = dfboth["ld_ap"].fillna(0)
dfboth["dm_ap"] = dfboth["dm_ap"].fillna(0)
if numCandidates == 1:
dfboth['exact_label'] = dfboth['exact_label'].apply(lambda x: ast.literal_eval(x))
dfboth['exact_score'] = dfboth['exact_score'].apply(lambda x: ast.literal_eval(x))
dfboth["exact_ap"] = dfboth.apply(lambda x: average_precision_score(x["exact_label"], x["exact_score"]), axis=1)
dfboth["exact_ap"] = dfboth["exact_ap"].fillna(0)
print("EXACT P@1", round(dfboth.exact_ap.mean(),2))
print("DM P@1", round(dfboth.dm_ap.mean(),2))
print("LD P@1", round(dfboth.ld_ap.mean(),2))
else:
print("DM MAP", round(dfboth.dm_ap.mean(),2))
print("LD MAP", round(dfboth.ld_ap.mean(),2))
print()
|
{"hexsha": "a513cfd6f1e3fcb42c94f81ec475b123d68ccc2f", "size": 10931, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/evaluation_functions.py", "max_stars_repo_name": "Living-with-machines/LwM_SIGSPATIAL2020_ToponymMatching", "max_stars_repo_head_hexsha": "41fd23288b49cbd96d2ddd4ec763606e70e430a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-15T18:31:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-15T18:31:11.000Z", "max_issues_repo_path": "experiments/evaluation_functions.py", "max_issues_repo_name": "Living-with-machines/LwM_SIGSPATIAL2020_ToponymMatching", "max_issues_repo_head_hexsha": "41fd23288b49cbd96d2ddd4ec763606e70e430a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-02T14:00:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-03T15:32:56.000Z", "max_forks_repo_path": "experiments/evaluation_functions.py", "max_forks_repo_name": "Living-with-machines/LwM_SIGSPATIAL2020_ToponymMatching", "max_forks_repo_head_hexsha": "41fd23288b49cbd96d2ddd4ec763606e70e430a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.0179372197, "max_line_length": 354, "alphanum_fraction": 0.6444973013, "include": true, "reason": "import numpy", "num_tokens": 3016}
|
import argparse
import numpy as np
from gym_duckietown.envs import DuckietownEnv
import torch
import os
import sys
import cv2
import math
sys.path.append(os.path.join(os.path.dirname(__file__), "./gym-duckietown/learning/"))
sys.path.append(os.path.join(os.path.dirname(__file__), "./gym-duckietown/learning/reinforcement/pytorch"))
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.utils import get_vec_normalize
import ss_detector
import duckietown_model
from particle_filter import ParticleFilter
# CONSTANTS
SEEDS = {
"map1": [2, 3, 5, 9, 12],
"map2": [1, 2, 3, 5, 7, 8, 13, 16],
"map3": [1, 2, 4, 8, 9, 10, 15, 21],
"map4": [1, 2, 3, 4, 5, 7, 9, 10, 16, 18],
"map5": [1, 2, 4, 5, 7, 8, 9, 10, 16, 23]
}
HARD_SEEDS = {
"map1": [],
"map2": [7],
"map3": [],
"map4": [7],
"map5": [8]
}
SS_TRACK_THRES = 2.0
NUM_PARTICLES = 100
NUM_RANDOM_PARTICLES = 10
CLAMP_SPEED_DIST = 0.3 # allow some error
# declare the arguments
parser = argparse.ArgumentParser()
# Do not change this
parser.add_argument('--max_steps', type=int, default=1500, help='max_steps')
# You should set them to different map name and seed accordingly
parser.add_argument('--map-name', default='map5')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--load-dir', default='./model/')
parser.add_argument('--no-render', action="store_true", default=False)
args = parser.parse_args()
# please remove this line for your own policy
try:
actor_critic, obs_rms = torch.load(os.path.join(args.load_dir, "duckietown_" + args.map_name + "_s" + str(args.seed) + ".pt"), map_location="cpu")
print("load seed-specific model")
except Exception as e:
print(e)
actor_critic, obs_rms = torch.load(os.path.join(args.load_dir, "duckietown_" + args.map_name + ".pt"), map_location="cpu")
print("load default model")
recurrent_hidden_states = torch.zeros(1, actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, 1)
rl_env = make_vec_envs(
args.map_name,
[args.seed],
1,
None,
None,
device="cpu",
allow_early_resets=False)
rl_obs = rl_env.reset()
env = DuckietownEnv(
map_name = args.map_name,
domain_rand = False,
draw_bbox = False,
max_steps = args.max_steps,
seed = args.seed
)
obs = env.reset()
if not args.no_render:
env.render()
pf = None
total_reward = 0
rl_total_reward = 0
step = 0
gt_pos_ss = None
tmp_dist_ss_pf = 0
actions = []
while step < args.max_steps:
with torch.no_grad():
value, rl_action, _, recurrent_hidden_states = actor_critic.act(rl_obs, recurrent_hidden_states, masks, deterministic=True)
# if step <= 10:
# rl_action[0][0] = 0
# rl_action[0][1] = -1
# rl_action[0][0] = max(min(rl_action[0][0], 0.7), 0)
# rl_action[0][1] = max(min(rl_action[0][1], 0.875), -0.875)
rl_action[0][0] = max(min(rl_action[0][0], 0.8), 0)
rl_action[0][1] = max(min(rl_action[0][1], 1), -1)
obs = cv2.cvtColor(obs, cv2.COLOR_RGB2BGR)
# print(obs.shape)
# cv2.imshow("obs_np", obs)
# cv2.waitKey(1)
pos_ss_r = ss_detector.detect_stopsign(obs)
if pos_ss_r is not None:
pos_ss_r = duckietown_model.correct_ss_obs(pos_ss_r)
dist_to_ss = math.sqrt(pos_ss_r[0] ** 2 + pos_ss_r[1] ** 2)
if dist_to_ss < SS_TRACK_THRES:
if pf is None:
print("-----------Stop sign detected, start tracking!!!")
initial_particles_x = np.random.normal(pos_ss_r[0], duckietown_model.STD_X, NUM_PARTICLES).reshape(-1, 1)
initial_particles_y = np.random.normal(pos_ss_r[1], duckietown_model.STD_Y, NUM_PARTICLES).reshape(-1, 1)
initial_particles = np.concatenate((initial_particles_x, initial_particles_y), axis=1)
print(initial_particles.shape)
pf = ParticleFilter(initial_particles, duckietown_model.transit_state, duckietown_model.measurement_prob)
else:
print("----------Stop sign detected, pf update!!!")
ss_pos = pf.get_estimate()
print("predict: {}, meas: {}".format(ss_pos, pos_ss_r), end=" ")
pf.update(pos_ss_r)
tmp_dist_ss_pf = math.sqrt(ss_pos[0] ** 2 + ss_pos[1] ** 2)
if pf is not None:
ss_pos = pf.get_estimate()
dist_to_ss = math.sqrt(ss_pos[0] ** 2 + ss_pos[1] ** 2)
gt_pos_ss = ss_detector.detect_stopsign_gt(env, ss_pos)
if gt_pos_ss is not None:
gt_dist_to_ss = math.sqrt(gt_pos_ss[0] ** 2 + gt_pos_ss[1] ** 2)
print("pf estimate: {}, gt: {}".format(ss_pos, gt_pos_ss))
print("pf estimate: {}, gt: {}".format(dist_to_ss, gt_dist_to_ss))
# to prevent particle collapse
random_particles_x = np.random.normal(ss_pos[0], 0.5, NUM_RANDOM_PARTICLES).reshape(-1, 1)
random_particles_y = np.random.normal(ss_pos[1], 0.5, NUM_RANDOM_PARTICLES).reshape(-1, 1)
random_particles = np.concatenate((random_particles_x, random_particles_x), axis=1)
pf.add_random_samples(random_particles)
if dist_to_ss <= CLAMP_SPEED_DIST + tmp_dist_ss_pf * 0.6:
print("----------Close to stop sign, clamp speed to 0.15m/s!!!")
rl_action[0][0] = max(min(rl_action[0][0], 0.08), 0)
rl_action[0][1] = max(min(rl_action[0][1], 0.11), -0.08)
# rl_action[0][0] = max(min(rl_action[0][0], 0.1), 0)
# rl_action[0][1] = max(min(rl_action[0][1], 0.125), -0.125)
if dist_to_ss >= SS_TRACK_THRES:
print("-----------Too far from stop sign, stop tracking!!!")
pf = None
rl_obs, rl_reward, done, info = rl_env.step(rl_action)
masks.fill_(0.0 if done[0] else 1.0)
action = rl_action[0].numpy()
# action[0] = max(min(action[0], 0.8), 0)
# action[1] = max(min(action[1], 1), -1)
print(action)
actions.append(action)
obs, reward, done, info = env.step(action)
rl_total_reward += rl_reward[0][0].item()
total_reward += reward
step += 1
if done:
print("Done!!")
break
if reward <= -100:
print("Didnt stop!!")
break
# print('Steps = %s, Timestep Reward=%.3f, rl_total_reward=%.3f, Total Reward=%.3f' % (step, reward, rl_total_reward, total_reward))
if pf is not None:
pf.predict(action)
if not args.no_render:
env.render()
# step += 1
print("step_cnt", step)
print("Total Reward", total_reward)
# dump the controls using numpy
if args.map_name == "map1" or step >= 1500:
actions = np.array(actions)
np.savetxt('./control_files/{}_seed{}.txt'.format(args.map_name, args.seed), actions, delimiter=',')
print("control file saved!!")
|
{"hexsha": "93e30175e24963af9ee34dca42b94662342c77f6", "size": 6768, "ext": "py", "lang": "Python", "max_stars_repo_path": "our_policy.py", "max_stars_repo_name": "lyf44/CS4278-5478-Project-Materials", "max_stars_repo_head_hexsha": "685419c65847e72450e99586e9e0f3794369b4a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "our_policy.py", "max_issues_repo_name": "lyf44/CS4278-5478-Project-Materials", "max_issues_repo_head_hexsha": "685419c65847e72450e99586e9e0f3794369b4a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "our_policy.py", "max_forks_repo_name": "lyf44/CS4278-5478-Project-Materials", "max_forks_repo_head_hexsha": "685419c65847e72450e99586e9e0f3794369b4a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.671641791, "max_line_length": 150, "alphanum_fraction": 0.632535461, "include": true, "reason": "import numpy", "num_tokens": 2016}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 5 23:00:34 2017
@author: shenda
"""
from collections import Counter
import numpy as np
import pandas as pd
import MyEval
import ReadData
import dill
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from BasicCLF import MyXGB
from BasicCLF import MyLR
from OptF import OptF
import sklearn
import xgboost
from ReadData import shrink_set_to_seq
def gen_model():
with open('../data/features_all_v1.6.pkl', 'rb') as my_input:
all_pid = dill.load(my_input)
all_feature = dill.load(my_input)
all_label = dill.load(my_input)
all_pid = np.array(all_pid)
all_feature = np.array(all_feature)
all_label = np.array(all_label)
clf = MyXGB()
clf.fit(all_feature, all_label)
pred = clf.predict(all_feature)
print(MyEval.F1Score3(pred, all_label))
with open('../model/v1.6_xgb.pkl', 'wb') as fout:
dill.dump(clf, fout)
#if __name__ == "__main__":
def TestXGB(fout, original_pid, original_label, all_pid, all_feature, all_label):
# wrong_stat = []
## k-fold cross validation
original_pid = np.array(original_pid)
original_label = np.array(original_label)
all_feature = np.array(all_feature)
all_label = np.array(all_label)
F1_list_set = []
F1_list_seq = []
kf = StratifiedKFold(n_splits=5, shuffle=True)
i_fold = 1
for original_train_index, original_test_index in kf.split(original_label, original_label):
original_train_pid = set(original_pid[original_train_index])
original_test_pid = set(original_pid[original_test_index])
train_index = []
test_index = []
for ii in range(len(all_pid)):
ii_pid = all_pid[ii].split('_')[0]
if ii_pid in original_train_pid:
train_index.append(ii)
elif ii_pid in original_test_pid:
test_index.append(ii)
else:
print('wrong')
train_index = np.array(train_index)
test_index = np.array(test_index)
train_data = all_feature[train_index]
train_label = all_label[train_index]
train_pid = np.array(all_pid)[train_index]
test_data = all_feature[test_index]
test_label = all_label[test_index]
test_pid = np.array(all_pid)[test_index]
clf = MyXGB()
clf.fit(train_data, train_label)
pred = clf.predict(test_data)
pred_train = clf.predict(train_data)
_, pred_train_seq = shrink_set_to_seq(train_pid, pred_train)
_, train_label_seq = shrink_set_to_seq(train_pid, train_label)
print('pred_train')
MyEval.F1Score3(pred_train, train_label)
print('pred_train_seq')
MyEval.F1Score3(pred_train_seq, train_label_seq)
_, pred_seq = shrink_set_to_seq(test_pid, pred)
_, test_label_seq = shrink_set_to_seq(test_pid, test_label)
print('\n pred')
F1_list_set.append(MyEval.F1Score3(pred, test_label))
print('pred_seq')
f1_pred = MyEval.F1Score3(pred_seq, test_label_seq)
F1_list_seq.append(f1_pred)
print('=====================================')
# wrong_stat.extend(MyEval.WrongStat(i_fold, pred, test_label, test_pid))
fout.write('{0}, {1} \n'.format(i_fold, f1_pred))
i_fold += 1
# with open('../tmp_model/v1.9_xgb_z_'+str(f1_pred)+'.pkl', 'wb') as fout:
# dill.dump(f1_pred, fout)
# break
avg_f1 = np.mean(F1_list_seq)
print('\n\nAvg F1: ', avg_f1)
# wrong_stat = pd.DataFrame(wrong_stat, columns=['i_fold', 'pid', 'gt', 'pred'])
# wrong_stat.to_csv('../../result/wrong_stat.csv')
fout.write('avg, {0} \n'.format(f1_pred))
# print(clf.bst.get_fscore())
# xgboost.plot_importance(clf.bst)
# xgboost.plot_tree(clf.bst)
# xgboost.to_graphviz(clf.bst)
# with open('../tmp_model/v1.9_xgb_x_'+str(avg_f1)+'.pkl', 'wb') as fout:
# dill.dump(avg_f1, fout)
if __name__ == "__main__":
# with open('../data/features_all_v1.6.pkl', 'rb') as my_input:
# original_pid = dill.load(my_input)
# original_feature = dill.load(my_input)
# original_label = dill.load(my_input)
# del original_feature
# with open('../data/features_all_v1.9.pkl', 'rb') as my_input:
# all_pid = dill.load(my_input)
# all_feature = dill.load(my_input)
# all_label = dill.load(my_input)
# all_feature = np.nan_to_num(all_feature)
# print('read all data done')
fout = open('../tmp_model/stat.csv', 'w')
for i in range(100):
TestXGB(fout, original_pid, original_label, all_pid, all_feature, all_label)
# gen_model()
fout.close()
|
{"hexsha": "0f34f0b0963ca26e207e03f39bf32182889db138", "size": 4835, "ext": "py", "lang": "Python", "max_stars_repo_path": "references/encase/code/TestXGB_cut.py", "max_stars_repo_name": "wenh06/cinc2020", "max_stars_repo_head_hexsha": "b3757f54df86c8470e8f22f3399b4aecd64dd5d1", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-10-31T07:02:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T08:11:35.000Z", "max_issues_repo_path": "references/encase/code/TestXGB_cut.py", "max_issues_repo_name": "DeepPSP/cinc2020", "max_issues_repo_head_hexsha": "38105ed9dac6554e2dd51b94e5553fb8ba22dbe6", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "references/encase/code/TestXGB_cut.py", "max_forks_repo_name": "DeepPSP/cinc2020", "max_forks_repo_head_hexsha": "38105ed9dac6554e2dd51b94e5553fb8ba22dbe6", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-25T14:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-25T14:54:31.000Z", "avg_line_length": 31.6013071895, "max_line_length": 94, "alphanum_fraction": 0.639089969, "include": true, "reason": "import numpy", "num_tokens": 1249}
|
# Licensed with the 3-clause BSD license. See LICENSE for details.
from typing import List
import pytest
import numpy as np
from astropy.table import Table
from astropy.time import Time
import astropy.units as u
from astropy.tests.helper import remote_data
from ..ephemeris import (get_ephemeris_generator, set_ephemeris_generator,
EphemerisGenerator, Horizons)
from ..target import MovingTarget
from ..model import Ephemeris
@remote_data
def test_get_set_ephemeris_generator():
set_ephemeris_generator('jpl')
g: EphemerisGenerator = get_ephemeris_generator()
assert g == Horizons
@remote_data
def test_set_ephemeris_generator_error():
with pytest.raises(ValueError):
set_ephemeris_generator('invalid')
@remote_data
@pytest.fixture()
def encke() -> List[Ephemeris]:
target: MovingTarget = MovingTarget('2P')
g: Horizons = Horizons
start: Time = Time('2020-06-01')
stop: Time = Time('2020-07-01')
step: u.Quantity = 10 * u.day
return g.target_over_date_range(
'500@', target, start, stop, step=step, cache=True)
@remote_data
class TestHorizons:
def test_lt(self, encke):
assert not (encke[0] < encke[0])
assert encke[0] < encke[1]
assert not (encke[1] < encke[0])
def test_le(self, encke):
assert encke[0] <= encke[0]
assert encke[0] <= encke[1]
assert not (encke[1] <= encke[0])
def test_eq(self, encke):
assert encke[0] == encke[0]
assert not (encke[0] == encke[1])
assert not (encke[1] == encke[0])
def test_ne(self, encke):
assert not (encke[0] != encke[0])
assert encke[0] != encke[1]
assert encke[1] != encke[0]
def test_gt(self, encke):
assert not (encke[0] > encke[0])
assert not (encke[0] > encke[1])
assert encke[1] > encke[0]
def test_ge(self, encke):
assert encke[0] >= encke[0]
assert not (encke[0] >= encke[1])
assert encke[1] >= encke[0]
def test_at_dates(self):
target: MovingTarget = MovingTarget('2P')
# get these dates in reverse time order to be sure they are returned
# in reverse order (Horizons requires ephemerides in order).
dates: Time = Time(('2020-07-01', '2020-06-01'))
g: Horizons = Horizons
eph: List[Ephemeris] = g.target_at_dates(
'500@', target, dates, cache=True)
assert np.allclose([e.ra for e in eph], [120.97483, 65.4021],
rtol=1e-3)
assert np.allclose([e.dec for e in eph], [17.72957, 26.36761],
rtol=1e-3)
assert np.allclose([e.mjd for e in eph], dates.mjd)
def test_at_dates_single(self):
target: MovingTarget = MovingTarget('2P')
date: Time = Time('1980-06-01')
g: Horizons = Horizons
eph: List[Ephemeris] = g.target_at_dates(
'500@', target, date, cache=True)
assert np.allclose([e.ra for e in eph], [18.94464], rtol=1e-3)
assert np.allclose([e.dec for e in eph], [13.55878], rtol=1e-3)
assert np.allclose([e.mjd for e in eph], date.mjd)
def test_cometary_asteroid(self):
target: MovingTarget = MovingTarget('174P')
date: Time = Time('1980-06-01')
g: Horizons = Horizons
eph: List[Ephemeris] = g.target_at_dates(
'500@', target, date, cache=True)
assert len(eph) > 0
def test_over_date_range(self, encke):
assert np.allclose([e.ra for e in encke],
[65.4021, 81.36382, 100.97247, 120.97483],
rtol=1e-3)
assert np.allclose([e.dec for e in encke],
[26.36761, 26.87021, 24.42504, 17.72957],
rtol=1e-3)
assert np.allclose([e.mjd for e in encke],
[59001.0, 59011.0, 59021.0, 59031.0])
@pytest.mark.parametrize(
"start,stop",
(
('2018-08-13', '2018-08-17'),
('2018-11-03', '2018-11-07')
)
)
def test_over_date_range_adaptable(self, start, stop):
"""Wirtanen in 2018, closest approach 0.08 au.
This test will work only with approaching objects.
"""
target: MovingTarget = MovingTarget('46P')
g: Horizons = Horizons
start: Time = Time(start)
stop: Time = Time(stop)
eph: List[Ephemeris] = g.target_over_date_range('500@', target, start, stop,
cache=True)
d: np.ndarray = np.diff([e.mjd for e in eph])
delta: np.ndarray = np.array([e.delta for e in eph][1:])
limit: float
step: float
for limit, step in zip([1, 0.25, 0], [1, 4 / 24, 1 / 24]):
i: np.ndarray = delta > limit
if i.sum() > 1:
assert np.isclose(d[i][1:].mean(), step)
if i.sum() > 0:
delta = delta[~i]
d = d[~i]
|
{"hexsha": "581ba08e84e488b3aa25b0c1c13add67c75a45a0", "size": 5019, "ext": "py", "lang": "Python", "max_stars_repo_path": "sbsearch/test/test_ephemeris.py", "max_stars_repo_name": "mkelley/sbsearch", "max_stars_repo_head_hexsha": "7569e473cf0b8f5df3cfc7332ae4287a780d28d2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sbsearch/test/test_ephemeris.py", "max_issues_repo_name": "mkelley/sbsearch", "max_issues_repo_head_hexsha": "7569e473cf0b8f5df3cfc7332ae4287a780d28d2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sbsearch/test/test_ephemeris.py", "max_forks_repo_name": "mkelley/sbsearch", "max_forks_repo_head_hexsha": "7569e473cf0b8f5df3cfc7332ae4287a780d28d2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.46, "max_line_length": 84, "alphanum_fraction": 0.5714285714, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 1480}
|
import OpenGL
from OpenGL.GL import *
from OpenGL.GLU import *
import numpy
import random
from math_utils import *
def drawOrigin():
glBegin(GL_LINES)
glColor(1,0,0)
glVertex3f(0,0,0)
glVertex3f(0,1000,0)
glColor(0,1,0)
glVertex3f(0,0,0)
glVertex3f(1000,0,0)
glColor(0,0,1)
glVertex3f(0,0,0)
glVertex3f(0,0,1000)
glEnd()
def drawVessel(v):
# change color we render with
glColor(0.3, 0.3, 0.35)
# here we go
glPushMatrix()
# put us in correct position
glTranslatef(v.get_pos()[0], v.get_pos()[1], v.get_pos()[2])
# actually render model now, with triangles
for mesh in v.model.mesh_list:
glBegin(GL_POLYGON)
for face in mesh.faces:
for vertex_i in face:
vertex_i = v.model.vertices[vertex_i]
vertex_i = numpy.matmul(numpy.array(vertex_i), v.get_orient())
vertex_i = [vertex_i[0], vertex_i[1], vertex_i[2]]
glVertex3f(vertex_i[0], vertex_i[1], vertex_i[2])
glEnd()
# now get out
glPopMatrix()
def drawTerrain(t, current_ship, render_dist):
glPushMatrix()
glTranslatef(t.get_center()[0], t.get_center()[1], t.get_center()[2])
glColor(0.8, 0.8, 0.8)
glBegin(GL_POLYGON)
x_lines_num = t.x_lines_num
z_lines_num = t.z_lines_num
x_spacing = t.x_spacing
z_spacing = t.z_spacing
rect_render_dist = 250*render_dist + current_ship.get_pos()[1] * render_dist
rel_x = current_ship.get_pos()[0] - t.center[0]
rel_z = current_ship.get_pos()[2] - t.center[2]
render_min_x = rel_x - rect_render_dist
render_max_x = rel_x + rect_render_dist
x_min_index = max(int(x_lines_num/2 + render_min_x/x_spacing), 0)
x_max_index = max(int(x_lines_num/2 + render_max_x/x_spacing), x_lines_num-1)
render_min_z = rel_z - rect_render_dist
render_max_z = rel_z + rect_render_dist
z_min_index = max(int(z_lines_num/2 + render_min_z/z_spacing), 0)
z_max_index = max(int(z_lines_num/2 + render_max_z/z_spacing), z_lines_num-1)
#print(x_min_index, x_max_index)
#print(z_min_index, z_max_index)
indices = []
ai = 0
for a in range(z_min_index, z_max_index):
indices.append([])
for b in range(x_min_index, x_max_index):
indices[ai].append(a * x_lines_num + b)
ai += 1
for z in indices:
for x in z:
#print(x)
try:
glVertex3f(t.vertices[x][0], t.vertices[x][1], t.vertices[x][2])
glVertex3f(t.vertices[x+x_lines_num][0], t.vertices[x+x_lines_num][1], t.vertices[x+x_lines_num][2])
except:
pass
## for a in range(t.z_lines_num):
## for b in range(t.x_lines_num):
## # why the hell is drawing lines so bloody expensive??
## # anyway, don't draw those that are too far away
## if (abs(current_ship.get_pos()[0] - t.vertices[a*t.x_lines_num+b][0]) < 250*render_dist + current_ship.get_pos()[1] * render_dist and
## abs(current_ship.get_pos()[2] - t.vertices[a*t.x_lines_num+b][2]) < 250*render_dist + current_ship.get_pos()[1] * render_dist):
## if not b+1 == t.x_lines_num:
## glVertex3f(t.vertices[a*t.x_lines_num+b][0], t.vertices[a*t.x_lines_num+b][1], t.vertices[a*t.x_lines_num+b][2])
## #glVertex3f(t.vertices[a*t.x_lines_num+b+1][0], t.vertices[a*t.x_lines_num+b+1][1], t.vertices[a*t.x_lines_num+b+1][2])
##
## if not a-1 < 0:
## glVertex3f(t.vertices[(a-1)*t.x_lines_num+b][0], t.vertices[(a-1)*t.x_lines_num+b][1], t.vertices[(a-1)*t.x_lines_num+b][2])
## glVertex3f(t.vertices[a*t.x_lines_num+b][0], t.vertices[a*t.x_lines_num+b][1], t.vertices[a*t.x_lines_num+b][2])
glEnd()
glColor(0.0, 0.5, 0.0)
for z in indices:
glBegin(GL_LINE_STRIP)
for x in z:
#print(x)
try:
glVertex3f(t.vertices[x][0], t.vertices[x][1], t.vertices[x][2])
glVertex3f(t.vertices[x+x_lines_num+1][0], t.vertices[x+x_lines_num+1][1], t.vertices[x+x_lines_num+1][2])
except:
pass
## for a in range(t.z_lines_num):
## glBegin(GL_LINE_STRIP)
## for b in range(t.x_lines_num):
## # why the hell is drawing lines so bloody expensive??
## # anyway, don't draw those that are too far away
## if (abs(current_ship.get_pos()[0] - t.vertices[a*t.x_lines_num+b][0]) < 100 * render_dist + current_ship.get_pos()[1] * 0.625 * render_dist and
## abs(current_ship.get_pos()[2] - t.vertices[a*t.x_lines_num+b][2]) < 100 * render_dist + current_ship.get_pos()[1] * 0.625 * render_dist):
## if not b+1 == t.x_lines_num:
## glVertex3f(t.vertices[a*t.x_lines_num+b][0], t.vertices[a*t.x_lines_num+b][1], t.vertices[a*t.x_lines_num+b][2])
## #glVertex3f(t.vertices[a*t.x_lines_num+b+1][0], t.vertices[a*t.x_lines_num+b+1][1], t.vertices[a*t.x_lines_num+b+1][2])
##
## if not a-1 < 0:
## glVertex3f(t.vertices[(a-1)*t.x_lines_num+b][0], t.vertices[(a-1)*t.x_lines_num+b][1], t.vertices[(a-1)*t.x_lines_num+b][2])
## glVertex3f(t.vertices[a*t.x_lines_num+b][0], t.vertices[a*t.x_lines_num+b][1], t.vertices[a*t.x_lines_num+b][2])
glEnd()
glPopMatrix()
def drawLine2D(x1, y1, x2, y2, color, camera):
glPushMatrix()
glTranslate(-camera.get_pos()[0],
-camera.get_pos()[1],
-camera.get_pos()[2])
glColor(color[0], color[1], color[2])
glBegin(GL_LINES)
glVertex3f((x1) * camera.get_orient()[0][0] + (y1) * camera.get_orient()[1][0] + (-10) * camera.get_orient()[2][0],
(x1) * camera.get_orient()[0][1] + (y1) * camera.get_orient()[1][1] + (-10) * camera.get_orient()[2][1],
(x1) * camera.get_orient()[0][2] + (y1) * camera.get_orient()[1][2] + (-10) * camera.get_orient()[2][2])
glVertex3f((x2) * camera.get_orient()[0][0] + (y2) * camera.get_orient()[1][0] + (-10) * camera.get_orient()[2][0],
(x2) * camera.get_orient()[0][1] + (y2) * camera.get_orient()[1][1] + (-10) * camera.get_orient()[2][1],
(x2) * camera.get_orient()[0][2] + (y2) * camera.get_orient()[1][2] + (-10) * camera.get_orient()[2][2])
glEnd()
glPopMatrix()
def drawRectangle2D(x1, y1, x2, y2, color, camera):
drawLine2D(x1, y1, x2, y1, color, camera)
drawLine2D(x1, y1, x1, y2, color, camera)
drawLine2D(x2, y1, x2, y2, color, camera)
drawLine2D(x1, y2, x2, y2, color, camera)
def drawInterface(camera, ship, autopilot):
## # artificial horizon
## glPushMatrix()
## glTranslate(-camera.get_pos()[0],
## -camera.get_pos()[1],
## -camera.get_pos()[2])
##
## glColor(0.9, 0.9, 0.9)
##
## glBegin(GL_LINES)
##
## glVertex3f(5 * camera.get_orient()[0][0] + 0 * camera.get_orient()[1][0] + (-10) * camera.get_orient()[2][0],
## 0,
## 5 * camera.get_orient()[0][2] + 0 * camera.get_orient()[1][2] + (-10) * camera.get_orient()[2][2])
##
## glVertex3f(3 * camera.get_orient()[0][0] + 0 * camera.get_orient()[1][0] + (-10) * camera.get_orient()[2][0],
## 0,
## 3 * camera.get_orient()[0][2] + 0 * camera.get_orient()[1][2] + (-10) * camera.get_orient()[2][2])
## glEnd()
## glPopMatrix()
# thrust setting
percent_thrust = ship.get_percent_thrust()
thrust_line_y = percent_thrust/100 * 3 -5
drawRectangle2D(5,-2,6,-5,[1,0,1], camera)
drawLine2D(5,thrust_line_y,6,thrust_line_y,[1,0,1], camera)
# angular velocity display
drawRectangle2D(6, 4, 8, 6, [1,0,0], camera)
# centerlines
drawLine2D(6, 5, 8, 5, [0.5,0,0], camera)
drawLine2D(7, 4, 7, 6, [0.5,0,0], camera)
drawLine2D(7, 6, 7, 6.5, [0.5,0,0], camera)
# horiz line (moves in y direction)
drawLine2D(6, max(min(ship.get_ang_vel()[0]/8 + 5, 5.9), 4.1),
8, max(min(ship.get_ang_vel()[0]/8 + 5, 5.9), 4.1),
[1,0,0], camera)
# vert line (moves in x direction)
drawLine2D(max(min(-ship.get_ang_vel()[1]/8 + 7, 7.9), 6.1), 4,
max(min(-ship.get_ang_vel()[1]/8 + 7, 7.9), 6.1), 6,
[1,0,0], camera)
# roll
drawRectangle2D(6, 6, 8, 6.5, [1,0,0], camera)
drawLine2D(max(min(-ship.get_ang_vel()[2]/8 + 7,7.9),6.1), 6,
max(min(-ship.get_ang_vel()[2]/8 + 7,7.9),6.1), 6.5, [1,0,0], camera)
# linear velocity display
drawRectangle2D(3, 4, 5, 6, [0,1,1], camera)
# centerlines
drawLine2D(3, 5, 5, 5, [0,0.5,0.5], camera)
drawLine2D(3, 5, 5, 5, [0,0.5,0.5], camera)
horizontal_vel_x = ship.get_vel()[0] * ship.get_orient()[0][0] + ship.get_vel()[2] * ship.get_orient()[0][2]
horizontal_vel_z = ship.get_vel()[2] * ship.get_orient()[2][2] + ship.get_vel()[0] * ship.get_orient()[2][0]
# local horizontal z
drawLine2D(3,max(min(-horizontal_vel_z/25 + 5, 5.9), 4.1),
5,max(min(-horizontal_vel_z/25 + 5, 5.9), 4.1),
[0,1,1], camera)
# local horizontal x
drawLine2D(max(min(horizontal_vel_x/25 + 4, 4.9), 3.1), 4,
max(min(horizontal_vel_x/25 + 4, 4.9), 3.1), 6,
[0,1,1], camera)
# descent speed
drawRectangle2D(2.5, 4, 3, 6, [0,1,1], camera)
# centerline
drawLine2D(2.5, 5.5, 3, 5.5, [0,0.5,0.5], camera)
# descent rate
drawLine2D(2.5, max(min(ship.get_vel()[1]/10 + 5.5, 5.9), 4.1),
3, max(min(ship.get_vel()[1]/10 + 5.5, 5.9), 4.1),
[0,1,1], camera)
# AP light
if autopilot:
drawRectangle2D(5, -1.9, 5.4, -1.5, [0,0.8,0], camera)
if ship.get_prop_mass() < 500:
drawRectangle2D(5.6, -1.9, 6, -1.5, [1,0,0], camera)
|
{"hexsha": "1ab518a5c81a417a3b41fe367b304e866a25a77a", "size": 10009, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphics.py", "max_stars_repo_name": "arda-guler/miniLanding3D", "max_stars_repo_head_hexsha": "83c4884378e0f57853a1fb09b1fdbe36e15bacd7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "graphics.py", "max_issues_repo_name": "arda-guler/miniLanding3D", "max_issues_repo_head_hexsha": "83c4884378e0f57853a1fb09b1fdbe36e15bacd7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphics.py", "max_forks_repo_name": "arda-guler/miniLanding3D", "max_forks_repo_head_hexsha": "83c4884378e0f57853a1fb09b1fdbe36e15bacd7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7945736434, "max_line_length": 157, "alphanum_fraction": 0.569087821, "include": true, "reason": "import numpy", "num_tokens": 3293}
|
[STATEMENT]
lemma real_binomial_eq_mult_binomial_Suc:
assumes "k \<le> n"
shows "real(n choose k) = (n + 1 - k) / (n + 1) * (Suc n choose k)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real (n choose k) = (real n + 1 - real k) / (real n + 1) * real (Suc n choose k)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
k \<le> n
goal (1 subgoal):
1. real (n choose k) = (real n + 1 - real k) / (real n + 1) * real (Suc n choose k)
[PROOF STEP]
by (simp add: of_nat_binomial_eq_mult_binomial_Suc [of k n] add.commute of_nat_diff)
|
{"llama_tokens": 249, "file": null, "length": 2}
|
"""
Program to get average editing frequency from CrispEsso.
"""
import matplotlib
import pandas as pd
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
import string
import glob
import numpy as np
def revcomp(seq):
try: ## python2
tab = string.maketrans(b"ACTG", b"TGAC")
except: ## python3
tab = bytes.maketrans(b"ACTG", b"TGAC")
return seq.translate(tab)[::-1]
def get_ref_alt(input_gRNA,table_header_list):
table_header=[x.split(".")[0] for x in table_header_list]
table_header = "".join(table_header)
if input_gRNA == table_header:
return "A","G"
elif revcomp(input_gRNA) == table_header:
return "T","C"
else:
print ("something is wrong, Exist!")
exit()
def parse_df(x):
freq_table = "/".join(x.split("/")[:-1])+"/Quantification_window_nucleotide_percentage_table.txt"
gRNA = x.split("_")[-1].replace(".txt","")
df = pd.read_csv(freq_table,sep="\t",index_col=0)
df[df < 0.01] = 0
my_freq = []
flag_type = []
ref,alt = get_ref_alt(gRNA,df.columns.tolist())
for c in df.columns:
base = c.split(".")[0]
freq = 0
if base == ref:
freq = df.at[alt,c]
my_freq.append(freq)
if ref == "T":
return my_freq[::-1]
return my_freq
inputs = pd.read_csv("input.list",header=None)
inputs = inputs[0].tolist()
df = pd.DataFrame([parse_df(i) for i in inputs])
df.index = inputs
# print (df.head())
df = df.replace(0, np.NaN)
df.to_csv("average_model.csv")
avg = pd.DataFrame(df.mean())
avg[1] = avg.index.tolist()
print (avg)
plt.figure(figsize=(5,2))
sns.barplot(x=1,y=0,data=avg)
# plt.xticks(rotation=90)
plt.ylabel("Editing frequency")
plt.xlabel('A position')
plt.rcParams.update({'font.size': 10})
plt.savefig("editing_frequecy_barplot.pdf", bbox_inches='tight')
|
{"hexsha": "c9651f42d8f1d1f4b02e455062a477334640bb22", "size": 1848, "ext": "py", "lang": "Python", "max_stars_repo_path": "per_A_base_score/editing_frequency/average_model/get_average_frequency.py", "max_stars_repo_name": "YichaoOU/ABE_NonCoding_functional_score", "max_stars_repo_head_hexsha": "cef8dbbd74e5f9359feb6cf709fccaac1127cd80", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-03T06:23:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-03T06:23:00.000Z", "max_issues_repo_path": "per_A_base_score/editing_frequency/average_model/get_average_frequency.py", "max_issues_repo_name": "geng-lee/ABE_NonCoding_functional_score", "max_issues_repo_head_hexsha": "87db4000d7ee030e3ed813774e03f4d902ced587", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "per_A_base_score/editing_frequency/average_model/get_average_frequency.py", "max_forks_repo_name": "geng-lee/ABE_NonCoding_functional_score", "max_forks_repo_head_hexsha": "87db4000d7ee030e3ed813774e03f4d902ced587", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-17T03:00:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-03T06:22:57.000Z", "avg_line_length": 21.2413793103, "max_line_length": 99, "alphanum_fraction": 0.6461038961, "include": true, "reason": "import numpy", "num_tokens": 506}
|
From MetaCoq.Lob.Template.QuoteGround Require Export config utils Ast AstUtils Environment Primitive
LiftSubst UnivSubst EnvironmentTyping Reflect ReflectAst TermEquality WfAst.
From MetaCoq.Template Require Import Ast Typing.
#[export] Instance quote_isSort {T} : ground_quotable (isSort T) := ltac:(cbv [isSort]; exact _).
#[export] Instance quote_isArity {T} : ground_quotable (isArity T) := ltac:(induction T; exact _).
#[export] Instance quote_instantiate_params_subst_spec {params pars s pty s' pty'} : ground_quotable (@instantiate_params_subst_spec params pars s pty s' pty').
Proof.
revert params pars s pty s' pty'; induction params as [|a params]; intros; [ | destruct a as [? [] ?], pty ]; destruct pars.
all: try solve [ intro H; exfalso; inversion H ].
{ intro pf.
assert (s' = s) by now inversion pf.
assert (pty' = pty) by now inversion pf.
subst.
revert pf.
adjust_ground_quotable_by_econstructor_inversion (). }
adjust_ground_quotable_by_econstructor_inversion ().
adjust_ground_quotable_by_econstructor_inversion ().
adjust_ground_quotable_by_econstructor_inversion ().
Defined.
#[export] Instance quote_red1 {Σ Γ x y} : ground_quotable (@red1 Σ Γ x y).
Proof.
revert Γ x y; cbv [ground_quotable].
fix quote_red1 4; change (forall Γ x y, ground_quotable (@red1 Σ Γ x y)) in quote_red1.
intros Γ x y.
destruct 1; exact _.
Defined.
#[export] Instance quote_red {Σ Γ x y} : ground_quotable (@red Σ Γ x y) := ltac:(induction 1; exact _).
#[export] Instance quote_eq_term_nocast {cf Σ ϕ t u} : ground_quotable (@eq_term_nocast cf Σ ϕ t u) := _.
#[export] Instance quote_leq_term_nocast {cf Σ ϕ t u} : ground_quotable (@leq_term_nocast cf Σ ϕ t u) := _.
#[export] Instance quote_cumul_gen {cf Σ Γ pb t u} : ground_quotable (@cumul_gen cf Σ Γ pb t u) := ltac:(induction 1; exact _).
#[export] Instance quote_eq_opt_term {cf Σ ϕ t u} : ground_quotable (@eq_opt_term cf Σ ϕ t u) := _.
#[export] Instance quote_eq_decl {cf Σ ϕ d d'} : ground_quotable (@eq_decl cf Σ ϕ d d') := _.
#[export] Instance quote_eq_context {cf Σ ϕ d d'} : ground_quotable (@eq_context cf Σ ϕ d d') := _.
Module QuotationOfTemplateEnvTyping <: QuotationOfEnvTyping TemplateTerm Env TemplateTermUtils TemplateEnvTyping.
#[export] Instance qAll_local_env : inductive_quotation_of All_local_env := _.
#[export] Instance qAll_local_env_over_gen : inductive_quotation_of All_local_env_over_gen := _.
#[export] Instance qctx_inst : inductive_quotation_of ctx_inst := _.
Module Instances.
#[export] Existing Instances
qAll_local_env
qAll_local_env_over_gen
qctx_inst
.
End Instances.
End QuotationOfTemplateEnvTyping.
Export QuotationOfTemplateEnvTyping.Instances.
Module QuoteTemplateEnvTyping := QuoteEnvTyping TemplateTerm Env TemplateTermUtils TemplateEnvTyping QuoteTemplateTerm QuotationOfEnv QuotationOfTemplateEnvTyping.
Export QuoteTemplateEnvTyping.Instances.
Module QuotationOfTemplateConversion <: QuotationOfConversion TemplateTerm Env TemplateTermUtils TemplateEnvTyping TemplateConversion.
#[export] Instance qAll_decls_alpha_pb : inductive_quotation_of All_decls_alpha_pb := _.
Module Instances.
#[export] Existing Instances
qAll_decls_alpha_pb
.
End Instances.
End QuotationOfTemplateConversion.
Export QuotationOfTemplateConversion.Instances.
Module QuoteTemplateConversion := QuoteConversion TemplateTerm Env TemplateTermUtils TemplateEnvTyping TemplateConversion QuoteTemplateTerm QuotationOfEnv QuotationOfTemplateConversion.
Export QuoteTemplateConversion.Instances.
Module QuotationOfTemplateGlobalMaps <: QuotationOfGlobalMaps TemplateTerm Env TemplateTermUtils TemplateEnvTyping TemplateConversion TemplateLookup TemplateGlobalMaps.
#[export] Instance qpositive_cstr_arg : inductive_quotation_of positive_cstr_arg := _.
#[export] Instance qpositive_cstr : inductive_quotation_of positive_cstr := _.
#[export] Instance qon_constructor : inductive_quotation_of (@on_constructor) := _.
#[export] Instance qon_proj : inductive_quotation_of on_proj := _.
#[export] Instance qon_projections : inductive_quotation_of on_projections := _.
#[export] Instance qon_ind_body : inductive_quotation_of (@on_ind_body) := _.
#[export] Instance qon_inductive : inductive_quotation_of (@on_inductive) := _.
#[export] Instance qon_global_decls_data : inductive_quotation_of (@on_global_decls_data) := _.
#[export] Instance qon_global_decls : inductive_quotation_of (@on_global_decls) := _.
Module Instances.
#[export] Existing Instances
qpositive_cstr_arg
qpositive_cstr
qon_constructor
qon_proj
qon_projections
qon_ind_body
qon_inductive
qon_global_decls_data
qon_global_decls
.
End Instances.
End QuotationOfTemplateGlobalMaps.
Export QuotationOfTemplateGlobalMaps.Instances.
Module QuoteTemplateGlobalMaps := QuoteGlobalMaps TemplateTerm Env TemplateTermUtils TemplateEnvTyping TemplateConversion TemplateLookup TemplateGlobalMaps QuoteTemplateTerm QuotationOfEnv QuotationOfTemplateEnvTyping QuotationOfTemplateConversion QuotationOfTemplateGlobalMaps.
Export QuoteTemplateGlobalMaps.Instances.
Module QuoteTemplateConversionPar <: QuoteConversionPar TemplateTerm Env TemplateTermUtils TemplateEnvTyping TemplateConversionPar.
#[export] Instance qcumul_gen : quotation_of (@TemplateConversionPar.cumul_gen) := _.
#[export] Instance quote_cumul_gen {cf Σ Γ pb t t'} : ground_quotable (@TemplateConversionPar.cumul_gen cf Σ Γ pb t t') := _.
Module Instances.
#[export] Existing Instances
qcumul_gen
quote_cumul_gen
.
End Instances.
End QuoteTemplateConversionPar.
Export QuoteTemplateConversionPar.Instances.
Section quote_typing.
Context {cf : config.checker_flags} {Σ : global_env_ext}.
#[local] Hint Extern 1 => progress cbv zeta : typeclass_instances.
Fixpoint quote_typing' {Γ t T} (pf : @typing cf Σ Γ t T) : quotation_of pf
with quote_typing_spine' {Γ t s T} (pf : @typing_spine cf Σ Γ t s T) : quotation_of pf.
Proof.
all: change (forall Γ t T, ground_quotable (@typing cf Σ Γ t T)) in quote_typing'.
all: change (forall Γ t s T, ground_quotable (@typing_spine cf Σ Γ t s T)) in quote_typing_spine'.
all: destruct pf.
Time all: [ > time exact _ .. ].
Defined.
End quote_typing.
#[export] Instance quote_typing {cf Σ Γ t T} : ground_quotable (@typing cf Σ Γ t T) := quote_typing'.
#[export] Instance quote_typing_spine {cf Σ Γ t s T} : ground_quotable (@typing_spine cf Σ Γ t s T) := quote_typing_spine'.
#[export] Instance quote_has_nparams {npars ty} : ground_quotable (@has_nparams npars ty) := _.
#[export] Instance quote_infer_sorting {cf Σ Γ T} : ground_quotable (@infer_sorting cf Σ Γ T) := _.
Module QuoteTemplateTyping <: QuoteTyping TemplateTerm Env TemplateTermUtils TemplateEnvTyping
TemplateConversion TemplateConversionPar TemplateTyping.
#[export] Instance qtyping : quotation_of (@TemplateTyping.typing) := _.
#[export] Instance quote_typing {cf Σ Γ t T} : ground_quotable (@TemplateTyping.typing cf Σ Γ t T) := _.
Module Instances.
#[export] Existing Instances
qtyping
quote_typing
.
End Instances.
End QuoteTemplateTyping.
Export QuoteTemplateTyping.Instances.
Module QuoteTemplateDeclarationTyping
:= QuoteDeclarationTyping
TemplateTerm
Env
TemplateTermUtils
TemplateEnvTyping
TemplateConversion
TemplateConversionPar
TemplateTyping
TemplateLookup
TemplateGlobalMaps
TemplateDeclarationTyping
QuoteTemplateTerm QuotationOfEnv QuotationOfTemplateEnvTyping QuotationOfTemplateConversion QuotationOfTemplateGlobalMaps QuoteTemplateTyping.
Export QuoteTemplateDeclarationTyping.Instances.
#[export] Instance quote_wf {cf Σ} : ground_quotable (@wf cf Σ) := _.
#[export] Instance quote_wf_ext {cf Σ} : ground_quotable (@wf_ext cf Σ) := _.
#[export] Instance quote_Forall_typing_spine {cf Σ Γ P T t U tls} {qP : quotation_of P} {quoteP : forall x y, ground_quotable (P x y)} : ground_quotable (@Forall_typing_spine cf Σ Γ P T t U tls) := ltac:(induction 1; exact _).
|
{"author": "JasonGross", "repo": "metacoq-lob", "sha": "acfc938eb79cac82c3c7d306f6d7010a4ad6492e", "save_path": "github-repos/coq/JasonGross-metacoq-lob", "path": "github-repos/coq/JasonGross-metacoq-lob/metacoq-lob-acfc938eb79cac82c3c7d306f6d7010a4ad6492e/theories/Template/QuoteGround/Typing.v"}
|
import os
from pathlib import Path
from tqdm import tqdm
import numpy as np
import cv2
from PIL import Image
import torch
import torchvision as tv
from mycv.paths import IMAGENET_DIR
from mycv.utils.general import ANSI
from mycv.datasets.imcls import imcls_evaluate, get_input_normalization, get_tv_interpolation
def get_classes():
fpath = IMAGENET_DIR / 'annotations/classes.txt'
with open(fpath, 'r') as f:
wnids = f.read().strip().split('\n')
assert len(wnids) == 1000
wnid_to_idx = {s:i for i,s in enumerate(wnids)}
return wnids, wnid_to_idx
class _TransformAndSave(torch.utils.data.Dataset):
def __init__(self, source, target, transform):
self.source = Path(source)
self.target = Path(target)
self.transform = transform
img_names = []
for cname in os.listdir(self.source):
(self.target / cname).mkdir(parents=True, exist_ok=False)
cdir = self.source / cname
img_names.extend([cname+'/'+imname for imname in os.listdir(cdir)])
self.img_names = img_names
def __len__(self):
return len(self.img_names)
def __getitem__(self, index):
imname = self.img_names[index]
imname: str
impath = self.source / imname
img = Image.open(impath).convert('RGB')
img = self.transform(img)
img: Image.Image
assert imname.endswith('.JPEG'), f'image should be JPEG: {imname}'
svname = imname.replace('.JPEG', '.png')
svpath = self.target / svname
assert svpath.parent.is_dir(), f'Cannot find {svpath.parent}, which should exists.'
# save by PIL
# img.save(svpath)
# save by cv2
im = np.array(img)[:,:,::-1].copy()
cv2.imwrite(str(svpath), im)
return 0
def _cache_valset(
src_dir, tgt_dir,
img_size=224, crop_ratio=0.875, interp='bilinear',
batch_size=1, workers=0
):
interp = get_tv_interpolation(interp)
transform = tv.transforms.Compose([
tv.transforms.Resize(round(img_size/crop_ratio), interpolation=interp),
tv.transforms.CenterCrop(img_size)
])
saver = _TransformAndSave(source=src_dir, target=tgt_dir, transform=transform)
saveloader = torch.utils.data.DataLoader(
saver, batch_size=batch_size, shuffle=False, num_workers=workers,
pin_memory=True, drop_last=False
)
print(f'\ninterp={interp}, img_size={img_size},',
f'crop_ratio(resize)={crop_ratio}({round(img_size/crop_ratio)}),'
f'batch_size={batch_size}, workers={workers}')
print(f'Caching images in {tgt_dir}...')
for _ in tqdm(saveloader):
pass
print(ANSI.sccstr(f'Successfully created {tgt_dir}.\n'))
# def get_cached_loader(split: str, input_norm: str, batch_size, workers):
# _original = IMAGENET_DIR / 'val'
# root_dir = IMAGENET_DIR / 'cache' / split
# if not (root_dir).is_dir():
# if split == 'val224l':
# _cache_valset(_original, root_dir, 'bilinear', 224, 0.875, batch_size, workers)
# elif split == 'val224c':
# _cache_valset(_original, root_dir, 'bicubic', 224, 0.875, batch_size, workers)
# elif split == 'val224c0.9':
# _cache_valset(_original, root_dir, 'bicubic', 224, 0.9, batch_size, workers)
# else:
# raise ValueError(f'Unknown split: {split}')
# assert root_dir.is_dir(), f'{root_dir} does not exist.'
# transform = [tv.transforms.ToTensor()]
# if input_norm:
# norm = get_input_normalization(input_norm)
# transform.append(norm)
# transform = tv.transforms.Compose(transform)
# testset = tv.datasets.ImageFolder(root=root_dir, transform=transform)
# testloader = torch.utils.data.DataLoader(
# testset, batch_size=batch_size, shuffle=False, num_workers=workers,
# pin_memory=True, drop_last=False
# )
# return testloader
def get_valloader(split='val',
img_size=224, crop_ratio=0.875, interp='bilinear', input_norm='imagenet',
cache=False, batch_size=1, workers=0
):
_original = IMAGENET_DIR / split
if cache:
root_dir = IMAGENET_DIR / 'cache' / f'{split}_{img_size}_{crop_ratio}_{interp}'
if not root_dir.is_dir():
_cache_valset(_original, root_dir, img_size, crop_ratio, interp, batch_size, workers)
transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
get_input_normalization(input_norm)
])
else:
root_dir = _original
transform = tv.transforms.Compose([
tv.transforms.Resize(round(img_size/crop_ratio), interpolation=get_tv_interpolation(interp)),
tv.transforms.CenterCrop(img_size),
tv.transforms.ToTensor(),
get_input_normalization(input_norm)
])
dataset = tv.datasets.ImageFolder(root=root_dir, transform=transform)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=workers,
pin_memory=True, drop_last=False
)
return dataloader
def imagenet_val(
model: torch.nn.Module,
img_size=224, crop_ratio=0.875, interp='bilinear', input_norm='imagenet',
cache=False, batch_size=1, workers=0
):
""" ImageNet-1k val set evaluation
Args:
model (torch.nn.Module): pytorch model
img_size (int): testing image size.
crop_ratio (float): resize image to img_size/crop_ratio, then center crop.
interp (str): interpolation method: 'bilinear', 'bicubic'.
input_norm (str): 'imagenet', 'inception', None
cache (bool): if True, cache the resized & cropped images to imagenet/cache.
batch_size (int): testing batch size
workers (int): testing cpu workers
Returns:
dict, example: {'top1': 0.81378, 'top5': 0.9551}
"""
val_split = 'val' # or 'val_v2'
valloader = get_valloader(val_split,
img_size, crop_ratio, interp, input_norm,
cache, batch_size, workers
)
results = imcls_evaluate(model, valloader)
return results
# 'real' labels https://github.com/google-research/reassessed-imagenet
# if split == 'val':
# fpath = IMAGENET_DIR / 'annotations' / 'val_real.json'
# labels_real_all = json.load(open(fpath, 'r'))
# assert len(predictions) == len(labels_real_all)
# # compute accuracy for 'real' labels
# tps = [p.item() in t for p,t in zip(predictions,labels_real_all) if len(t) > 0]
# acc_real = sum(tps) / len(tps)
# else:
# acc_real = acc_top1
def imagenet_val_default224(model: torch.nn.Module, input_norm, batch_size=64, workers=4):
""" Imagenet validation with 224x224 resolution, BILINEAR interpolation.
"""
results = imagenet_val(model, input_norm=input_norm,
cache=True, batch_size=batch_size, workers=workers)
return results
if __name__ == "__main__":
from mycv.paths import MYCV_DIR
# import matplotlib.pyplot as plt
# dataset = ImageNetCls(split='train')
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, num_workers=0)
# for imgs, labels in tqdm(dataloader):
# # continue
# for im, lbl in zip(imgs, labels):
# im = im * dataset.input_std + dataset.input_mean
# im = im.permute(1,2,0).numpy()
# plt.imshow(im); plt.show()
# imgs = imgs
from mycv.models.cls.resnet import resnet50
model = resnet50(num_classes=1000, pretrained=True)
# model.load_state_dict(torch.load(MYCV_DIR / 'weights/resnet/res50_7587_nonorm.pt')['model'])
# from timm.models.efficientnet import efficientnet_b0
# model = efficientnet_b0(pretrained=True)
# from torchvision.models.vgg import vgg11
# model = vgg11(pretrained=True)
# model.load_state_dict(torch.load(MYCV_DIR / 'weights/vgg_7165.pt')['model'])
model = model.cuda()
model.eval()
# results = imagenet_val(model, split='val',
# img_size=224, batch_size=64, workers=8, input_norm='imagenet')
results = imagenet_val(model,
img_size=224, crop_ratio=0.875, interp='bilinear', input_norm='imagenet',
cache=True, batch_size=64, workers=8
)
# testloader = get_val_loader('val224l_jpeg28', 'imagenet', 64, 8)
# results = imcls_evaluate(model, testloader)
print(results)
|
{"hexsha": "c6c959d99b70cb24728fef6fab2f3fca740764e5", "size": 8448, "ext": "py", "lang": "Python", "max_stars_repo_path": "mycv/datasets/imagenet.py", "max_stars_repo_name": "duanzhiihao/mycv", "max_stars_repo_head_hexsha": "184b52f7a5c1b6f603122d4f4050952b65ba0ead", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mycv/datasets/imagenet.py", "max_issues_repo_name": "duanzhiihao/mycv", "max_issues_repo_head_hexsha": "184b52f7a5c1b6f603122d4f4050952b65ba0ead", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mycv/datasets/imagenet.py", "max_forks_repo_name": "duanzhiihao/mycv", "max_forks_repo_head_hexsha": "184b52f7a5c1b6f603122d4f4050952b65ba0ead", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5466666667, "max_line_length": 105, "alphanum_fraction": 0.6491477273, "include": true, "reason": "import numpy", "num_tokens": 2171}
|
# Hack. :)
import site; site.addsitedir("/usr/local/lib/python2.7/site-packages")
# Standard imports
import cv2;
import numpy as np;
# print cv2.__version__;
# Read image
im = cv2.imread("test-images/still2.jpg", cv2.IMREAD_GRAYSCALE)
im = cv2.bitwise_not(im)
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
params.maxThreshold = 200
params.minThreshold = 0
params.filterByArea = True
params.minArea = 150
params.filterByInertia = False
# params.minInertiaRatio = 0.5
#
params.filterByColor = False
params.minRepeatability = 0
params.filterByCircularity = False
# params.minCircularity = 0.85
# Set up the detector
detector = cv2.SimpleBlobDetector(params)
# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(cv2.bitwise_not(im), keypoints, np.array([]), (255, 180, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show keypoints
cv2.imshow("Keypoints", im_with_keypoints)
cv2.waitKey(0)
|
{"hexsha": "d18ddab76e208621ca79c38c5b1338421cf1433e", "size": 1150, "ext": "py", "lang": "Python", "max_stars_repo_path": "Trial1.py", "max_stars_repo_name": "aliceyoung9/bubblewrap", "max_stars_repo_head_hexsha": "46066605f114f07bd78efec0740ff2b6c5b83189", "max_stars_repo_licenses": ["WTFPL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Trial1.py", "max_issues_repo_name": "aliceyoung9/bubblewrap", "max_issues_repo_head_hexsha": "46066605f114f07bd78efec0740ff2b6c5b83189", "max_issues_repo_licenses": ["WTFPL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Trial1.py", "max_forks_repo_name": "aliceyoung9/bubblewrap", "max_forks_repo_head_hexsha": "46066605f114f07bd78efec0740ff2b6c5b83189", "max_forks_repo_licenses": ["WTFPL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5555555556, "max_line_length": 107, "alphanum_fraction": 0.7539130435, "include": true, "reason": "import numpy", "num_tokens": 308}
|
module LPA
using Graphs
export nsdlpa, nsdlpa1, nmi, voi, modularity, avedegree, similarity, triangle, quadrangle, triquadrsim
include("labelpropagation.jl")
include("modularity.jl")
include("nmi.jl")
include("voi.jl")
include("utils.jl")
include("rankedge.jl")
end # module
|
{"hexsha": "a5a38884d55d75b5b31cb54a793bd8e29dbd6138", "size": 278, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/LPA.jl", "max_stars_repo_name": "afternone/LPA", "max_stars_repo_head_hexsha": "deefee07b004e26e1bd6a35419c43b5cf0a7b775", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LPA.jl", "max_issues_repo_name": "afternone/LPA", "max_issues_repo_head_hexsha": "deefee07b004e26e1bd6a35419c43b5cf0a7b775", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LPA.jl", "max_forks_repo_name": "afternone/LPA", "max_forks_repo_head_hexsha": "deefee07b004e26e1bd6a35419c43b5cf0a7b775", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.8571428571, "max_line_length": 102, "alphanum_fraction": 0.7589928058, "num_tokens": 88}
|
import numpy as np
import cv2
from ndu_gate_camera.api.video_source import VideoSource, log
from ndu_gate_camera.utility.ndu_utility import NDUUtility
# try:
# from picamera import PiCamera
# except ImportError:
# print("picamera library not found - installing...")
# if NDUUtility.install_package("picamera"):
# import picamera
#
# try:
# from picamera.array import PiRGBArray
# except ImportError:
# print("picamera library not found - installing...")
# if NDUUtility.install_package("picamera[array]"):
# import picamera.array
class PiCameraVideoSource(VideoSource):
def __init__(self, frame_width=640, frame_height=480, framerate=32, rotation=180, show_preview=False):
super().__init__()
self.__camera = None
self.__frame_width = frame_width
self.__frame_height = frame_height
self.__framerate = framerate
self.__rotation = rotation
self.__show_preview = show_preview
self._set_capture()
def get_frames(self):
log.info("start camera streaming..")
frame_num = 0
for frameFromCam in self.__camera.capture_continuous(self.__rawCapture, format="bgr", use_video_port=True):
try:
frame = np.copy(frameFromCam.array)
frame_num += 1
frame_h = frame.shape[0]
frame_w = frame.shape[1]
yield frame_num, frame
except KeyboardInterrupt:
self.__rawCapture.truncate(0)
self.__camera.close()
cv2.destroyAllWindows()
log.info("exit from pi camera streaming")
break
log.info("finish pi camera streaming")
def reset(self):
pass
def stop(self):
pass
def _set_capture(self):
self.__camera = PiCamera()
self.__camera.resolution = (self.__frame_width, self.__frame_height)
self.__camera.framerate = self.__framerate
self.__camera.rotation = self.__rotation
self.__rawCapture = PiRGBArray(self.__camera, size=(self.__frame_width, self.__frame_height))
|
{"hexsha": "7fe45144fe04dea28cf17307131124cf42f50096", "size": 2143, "ext": "py", "lang": "Python", "max_stars_repo_path": "ndu_gate_camera/camera/video_sources/pi_camera_video_source.py", "max_stars_repo_name": "netcadlabs/ndu-gate", "max_stars_repo_head_hexsha": "f479c293284fa6582d8682c98abf88e3da33b406", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-11T12:33:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T23:33:22.000Z", "max_issues_repo_path": "ndu_gate_camera/camera/video_sources/pi_camera_video_source.py", "max_issues_repo_name": "netcadlabs/ndu-gate", "max_issues_repo_head_hexsha": "f479c293284fa6582d8682c98abf88e3da33b406", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ndu_gate_camera/camera/video_sources/pi_camera_video_source.py", "max_forks_repo_name": "netcadlabs/ndu-gate", "max_forks_repo_head_hexsha": "f479c293284fa6582d8682c98abf88e3da33b406", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-01T10:50:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-07T10:56:09.000Z", "avg_line_length": 32.4696969697, "max_line_length": 115, "alphanum_fraction": 0.6425571629, "include": true, "reason": "import numpy", "num_tokens": 470}
|
import collections
import torch
from torch.autograd import Variable
import numpy as np
NUMPY_RANDOM_STATE = np.random.RandomState()
def try_keys(input_dict, keys):
for k in keys:
try:
return input_dict[k]
except BaseException:
pass
return None
def try_next_on_generator(gen, iterable):
try:
return gen, next(gen)
except StopIteration:
gen = iter(iterable)
return gen, next(gen)
def apply_func_to_dict(input, f):
if isinstance(input, collections.Mapping):
for k, v in input.items():
input[k] = f(v)
return input
else:
return f(input)
def wrap_variable(batch_data, device):
def f(x):
return Variable(x).to(device)
return apply_func_to_dict(batch_data, f)
def get_hierarchy_label(batch_labels, hierarchy_level):
def f(v):
try:
if v.ndim == 2:
v = v[:, hierarchy_level]
return v
except BaseException:
return v
return apply_func_to_dict(batch_labels, f)
def numpy_to_torch(input):
def f(v):
try:
return torch.from_numpy(v)
except BaseException:
return v
return apply_func_to_dict(input, f)
def torch_to_numpy(input):
def f(v):
try:
return v.cpu().numpy()
except BaseException:
return v
return apply_func_to_dict(input, f)
def process_label(labels, hierarchy_level, label_map):
labels = get_hierarchy_label(labels, hierarchy_level)
labels = torch_to_numpy(labels)
labels = label_map(labels, hierarchy_level)
labels = numpy_to_torch(labels)
return labels
def pass_data_to_model(model, data, device, **kwargs):
if isinstance(data, collections.Mapping):
base_output = {}
for k, v in data.items():
base_output[k] = model(wrap_variable(v, device), k=k, **kwargs)
return base_output
else:
return model(wrap_variable(data, device), **kwargs)
def set_requires_grad(model, requires_grad):
for param in model.parameters():
param.requires_grad = requires_grad
def copy_params_to_another_model(from_model, to_model):
params1 = from_model.named_parameters()
params2 = to_model.named_parameters()
dict_params2 = dict(params2)
for name1, param1 in params1:
if name1 in dict_params2:
dict_params2[name1].data.copy_(param1.data)
def safe_random_choice(input_data, size):
"""
Randomly samples without replacement from a sequence. It is "safe" because
if len(input_data) < size, it will randomly sample WITH replacement
Args:
input_data is a sequence, like a torch tensor, numpy array,
python list, tuple etc
size is the number of elements to randomly sample from input_data
Returns:
An array of size "size", randomly sampled from input_data
"""
replace = len(input_data) < size
return NUMPY_RANDOM_STATE.choice(input_data, size=size, replace=replace)
def longest_list(list_of_lists):
return max(list_of_lists, key=len)
def slice_by_n(input_array, n):
output = []
for i in range(n):
output.append(input_array[i::n])
return output
def unslice_by_n(input_tensors):
n = len(input_tensors)
rows, cols = input_tensors[0].size()
output = torch.zeros((rows * n, cols)).to(input_tensors[0].device)
for i in range(n):
output[i::n] = input_tensors[i]
return output
def set_layers_to_eval(layer_name):
def set_to_eval(m):
classname = m.__class__.__name__
if classname.find(layer_name) != -1:
m.eval()
return set_to_eval
def get_train_dataloader(dataset, batch_size, sampler, num_workers, collate_fn):
return torch.utils.data.DataLoader(
dataset,
batch_size=int(batch_size),
sampler=sampler,
drop_last=True,
num_workers=num_workers,
collate_fn=collate_fn,
shuffle=sampler is None,
pin_memory=True
)
def get_eval_dataloader(dataset, batch_size, num_workers, collate_fn):
return torch.utils.data.DataLoader(
dataset,
batch_size=int(batch_size),
drop_last=False,
num_workers=num_workers,
collate_fn=collate_fn,
shuffle=False,
pin_memory=True
)
def try_torch_operation(torch_op, input_val):
return torch_op(input_val) if torch.is_tensor(input_val) else input_val
|
{"hexsha": "8f7f90ec90abf16de6479718c78a1a319be501f4", "size": 4679, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch_metric_learning/utils/common_functions.py", "max_stars_repo_name": "jacobdanovitch/pytorch_metric_learning", "max_stars_repo_head_hexsha": "dbcf2d49fffe92f7dc1221b939e182c214633520", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-18T01:51:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-18T01:51:04.000Z", "max_issues_repo_path": "pytorch_metric_learning/utils/common_functions.py", "max_issues_repo_name": "xianhuaxizi/pytorch_metric_learning", "max_issues_repo_head_hexsha": "dbcf2d49fffe92f7dc1221b939e182c214633520", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytorch_metric_learning/utils/common_functions.py", "max_forks_repo_name": "xianhuaxizi/pytorch_metric_learning", "max_forks_repo_head_hexsha": "dbcf2d49fffe92f7dc1221b939e182c214633520", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-18T01:51:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-18T01:51:00.000Z", "avg_line_length": 26.7371428571, "max_line_length": 81, "alphanum_fraction": 0.6306903184, "include": true, "reason": "import numpy", "num_tokens": 1011}
|
#include <boost/test/unit_test.hpp>
#include <boost/concept_check.hpp>
#include "che/atom.h" // header to test
using namespace biosim;
BOOST_AUTO_TEST_SUITE(suite_atom)
BOOST_AUTO_TEST_CASE(atom_ctor) {
che::atom a;
BOOST_CHECK(a.get_identifier().empty());
BOOST_CHECK(a.get_position() == math::point());
std::string atom_id("xx");
a = che::atom(atom_id);
BOOST_CHECK(a.get_identifier() == atom_id);
BOOST_CHECK(a.get_position() == math::point());
math::point p({1, 2, 3});
a = che::atom(atom_id, p);
BOOST_CHECK(a.get_identifier() == atom_id);
BOOST_CHECK(a.get_position() == p);
}
BOOST_AUTO_TEST_CASE(atom_cmp_operator) {
che::atom a;
che::atom a2("x");
BOOST_CHECK(!(a < a));
BOOST_CHECK(a < a2);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "ff479e49c15d165d74e897b6adff5b1f0d3d4a5c", "size": 769, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/che/atom.cpp", "max_stars_repo_name": "shze/biosim", "max_stars_repo_head_hexsha": "e9e6d97de0ccf8067e1db15980eb600389fff6ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/che/atom.cpp", "max_issues_repo_name": "shze/biosim", "max_issues_repo_head_hexsha": "e9e6d97de0ccf8067e1db15980eb600389fff6ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/che/atom.cpp", "max_forks_repo_name": "shze/biosim", "max_forks_repo_head_hexsha": "e9e6d97de0ccf8067e1db15980eb600389fff6ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.6176470588, "max_line_length": 49, "alphanum_fraction": 0.6866059818, "num_tokens": 210}
|
import numpy as np
from automon import SlackType, SyncType, AutomonCoordinator, RlvCoordinator
def _get_node(NodeClass, domain, d, node_idx, func_to_monitor, max_f_val=np.inf, min_f_val=-np.inf):
if max_f_val != np.inf or min_f_val != -np.inf:
node = NodeClass(idx=node_idx, d=d, domain=domain, func_to_monitor=func_to_monitor, max_f_val=max_f_val, min_f_val=min_f_val)
else:
node = NodeClass(idx=node_idx, d=d, domain=domain, func_to_monitor=func_to_monitor)
return node
def _get_coordinator(CoordinatorClass, NodeClass, conf, func_to_monitor, max_f_val=np.inf, min_f_val=-np.inf):
if CoordinatorClass is AutomonCoordinator:
coordinator = AutomonCoordinator(conf["num_nodes"], func_to_monitor, slack_type=SlackType(conf["slack_type"]), sync_type=SyncType(conf["sync_type"]),
error_bound=conf["error_bound"], neighborhood_size=conf["neighborhood_size"], d=conf["d"],
max_f_val=max_f_val, min_f_val=min_f_val, domain=conf["domain"])
elif CoordinatorClass is RlvCoordinator:
coordinator = RlvCoordinator(conf["num_nodes"], func_to_monitor, slack_type=SlackType(conf["slack_type"]), sync_type=SyncType(conf["sync_type"]),
error_bound=conf["error_bound"], d=conf["d"], max_f_val=max_f_val, min_f_val=min_f_val, domain=conf["domain"])
else:
coordinator = CoordinatorClass(NodeClass, conf["num_nodes"], func_to_monitor, slack_type=SlackType(conf["slack_type"]), sync_type=SyncType(conf["sync_type"]),
error_bound=conf["error_bound"], d=conf["d"], domain=conf["domain"])
return coordinator
def get_objects(NodeClass, CoordinatorClass, conf, func_to_monitor, max_f_val=np.inf, min_f_val=-np.inf):
nodes = [_get_node(NodeClass, conf["domain"], conf["d"], node_idx, func_to_monitor, max_f_val, min_f_val) for node_idx in range(conf["num_nodes"])]
coordinator = _get_coordinator(CoordinatorClass, NodeClass, conf, func_to_monitor, max_f_val, min_f_val)
return coordinator, nodes
|
{"hexsha": "5a5dd17a833456d25a0e3789c4a52982a51eb0e5", "size": 2112, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_utils/object_factory.py", "max_stars_repo_name": "hsivan/automon", "max_stars_repo_head_hexsha": "222b17651533bdb2abce7de36a80156ab7b9cc21", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-25T17:50:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T17:50:32.000Z", "max_issues_repo_path": "test_utils/object_factory.py", "max_issues_repo_name": "hsivan/automon", "max_issues_repo_head_hexsha": "222b17651533bdb2abce7de36a80156ab7b9cc21", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_utils/object_factory.py", "max_forks_repo_name": "hsivan/automon", "max_forks_repo_head_hexsha": "222b17651533bdb2abce7de36a80156ab7b9cc21", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-12T08:12:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T08:12:37.000Z", "avg_line_length": 68.1290322581, "max_line_length": 166, "alphanum_fraction": 0.7045454545, "include": true, "reason": "import numpy", "num_tokens": 514}
|
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <boost/serialization/export.hpp>
#include "loop-analysis/nest-analysis.hpp"
#include "model/level.hpp"
#include "mapping/mapping.hpp"
#include "compound-config/compound-config.hpp"
namespace model
{
class ArithmeticUnits : public Level
{
public:
struct Specs : public LevelSpecs
{
static const std::uint64_t kDefaultWordBits = 16;
const std::string Type() const override { return "ArithmeticUnits"; }
Attribute<std::string> name;
Attribute<std::size_t> instances;
Attribute<std::size_t> mesh_x;
Attribute<std::size_t> mesh_y;
Attribute<std::uint64_t> word_bits;
Attribute<double> energy_per_op;
Attribute<double> area;
Attribute<std::string>& Name() { return name; }
const Attribute<std::string>& Name() const { return name; }
Attribute<std::size_t>& Instances() { return instances; }
const Attribute<std::size_t>& Instances() const { return instances; }
Attribute<std::size_t>& MeshX() { return mesh_x; }
const Attribute<std::size_t>& MeshX() const { return mesh_x; }
Attribute<std::size_t>& MeshY() { return mesh_y; }
const Attribute<std::size_t>& MeshY() const { return mesh_y; }
Attribute<std::uint64_t>& WordBits() { return word_bits; }
const Attribute<std::uint64_t>& WordBits() const { return word_bits; }
Attribute<double>& EnergyPerOp() { return energy_per_op; }
const Attribute<double>& EnergyPerOp() const { return energy_per_op; }
Attribute<double>& Area() { return area; }
const Attribute<double>& Area() const { return area; }
// Serialization
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int version = 0)
{
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(LevelSpecs);
if (version == 0)
{
ar& BOOST_SERIALIZATION_NVP(instances);
ar& BOOST_SERIALIZATION_NVP(mesh_x);
ar& BOOST_SERIALIZATION_NVP(mesh_y);
ar& BOOST_SERIALIZATION_NVP(word_bits);
}
}
};
// FIXME: need Spec, Stats, etc.
private:
Specs specs_;
double energy_ = 0;
double area_ = 0;
std::uint64_t cycles_ = 0;
std::size_t utilized_instances_ = 0;
std::uint64_t maccs_ = 0;
// Serialization
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int version = 0)
{
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Level);
if (version == 0)
{
ar& BOOST_SERIALIZATION_NVP(specs_);
ar& BOOST_SERIALIZATION_NVP(energy_);
ar& BOOST_SERIALIZATION_NVP(area_);
ar& BOOST_SERIALIZATION_NVP(cycles_);
ar& BOOST_SERIALIZATION_NVP(utilized_instances_);
ar& BOOST_SERIALIZATION_NVP(maccs_);
}
}
public:
ArithmeticUnits() { }
ArithmeticUnits(const Specs & specs);
~ArithmeticUnits() { }
// The hierarchical ParseSpecs functions are static and do not
// affect the internal specs_ data structure, which is set by
// the dynamic Spec() call later.
static Specs ParseSpecs(config::CompoundConfigNode setting, uint32_t nElements);
std::string Name() const override;
double Energy(problem::Shape::DataSpaceID pv = problem::GetShape()->NumDataSpaces) const override;
double Area() const override;
double AreaPerInstance() const override;
std::uint64_t Cycles() const override;
void Print(std::ostream& out) const override;
// --- Unsupported overrides ---
bool DistributedMulticastSupported() override { return false; }
bool PreEvaluationCheck(const problem::PerDataSpace<std::size_t> working_set_sizes,
const tiling::CompoundMask mask,
const bool break_on_failure) override
{
(void) working_set_sizes;
(void) mask;
(void) break_on_failure;
return true;
}
bool Evaluate(const tiling::CompoundTile& tile, const tiling::CompoundMask& mask,
const double inner_tile_area, const std::uint64_t compute_cycles,
const bool break_on_failure) override
{
(void) tile;
(void) mask;
(void) inner_tile_area;
(void) compute_cycles;
(void) break_on_failure;
return false;
}
std::uint64_t Accesses(problem::Shape::DataSpaceID pv = problem::GetShape()->NumDataSpaces) const override
{
(void) pv;
return 0;
}
double CapacityUtilization() const override { return 0; }
std::uint64_t UtilizedCapacity(problem::Shape::DataSpaceID pv = problem::GetShape()->NumDataSpaces) const override
{
(void) pv;
return 0;
}
std::uint64_t UtilizedInstances(problem::Shape::DataSpaceID pv = problem::GetShape()->NumDataSpaces) const override
{
(void) pv;
return 0;
}
std::uint64_t MaxFanout() const override { return 0; }
// --- Temporary hack interfaces, these will be removed ---
bool HackEvaluate(analysis::NestAnalysis* analysis,
const problem::Workload& workload)
{
assert(is_specced_);
bool success = true;
auto body_info = analysis->GetBodyInfo();
utilized_instances_ = body_info.replication_factor;
auto compute_cycles = body_info.accesses;
// maccs_ = analysis->GetMACs();
// utilized_instances_ = maccs_ / analysis->GetComputeCycles(); // Yuck!!! FIXME.
// assert(body_info.accesses == analysis->GetComputeCycles());
// assert(body_info.replication_factor == utilized_instances_);
if (utilized_instances_ <= specs_.Instances().Get())
{
cycles_ = compute_cycles;
maccs_ = utilized_instances_ * compute_cycles;
energy_ = maccs_ * specs_.EnergyPerOp().Get();
// Scale energy for sparsity.
for (unsigned d = 0; d < problem::GetShape()->NumDataSpaces; d++)
{
if (!problem::GetShape()->IsReadWriteDataSpace.at(d))
energy_ *= workload.GetDensity(d);
}
is_evaluated_ = true;
}
else
{
success = false;
}
return success;
}
std::uint64_t MACCs() const
{
assert(is_evaluated_);
return maccs_;
}
double IdealCycles() const
{
// FIXME: why would this be different from Cycles()?
assert(is_evaluated_);
return double(maccs_) / specs_.Instances().Get();
}
};
} // namespace model
|
{"hexsha": "e61edba00e3cb50664b7e0127c25dc7e90563c38", "size": 7821, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/model/arithmetic.hpp", "max_stars_repo_name": "rbshi/timeloop", "max_stars_repo_head_hexsha": "434d15e85e7ec95a2b87f83fbf0f14885dc7fa75", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/model/arithmetic.hpp", "max_issues_repo_name": "rbshi/timeloop", "max_issues_repo_head_hexsha": "434d15e85e7ec95a2b87f83fbf0f14885dc7fa75", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model/arithmetic.hpp", "max_forks_repo_name": "rbshi/timeloop", "max_forks_repo_head_hexsha": "434d15e85e7ec95a2b87f83fbf0f14885dc7fa75", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0, "max_line_length": 117, "alphanum_fraction": 0.6901930699, "num_tokens": 1892}
|
"""
extract_col_feats(df, cols)
find mean, std, minimum, maximum in df[!, col]
default value of columns are all numeric columns except date
"""
function extract_col_statvals(df::DataFrame, cols::Array{Symbol, 1})
syms = []
types = []
vals = []
for col in cols
μ, σ = mean_and_std(skipmissing(df[!, col]))
minval = minimum(skipmissing(df[!, col]))
maxval = maximum(skipmissing(df[!, col]))
push!(syms, String(col))
push!(types, "μ")
push!(vals, μ)
push!(syms, String(col))
push!(types, "σ")
push!(vals, σ)
push!(syms, String(col))
push!(types, "minimum")
push!(vals, minval)
push!(syms, String(col))
push!(types, "maximum")
push!(vals, maxval)
end
ndsparse((
symbol = syms,
types = types),
(value = vals,))
end
"""
zscore!(df, col, new_col)
Apply zscore (normalization) to dataframe `df`
No need to implement `zscore`, just implement `zscore!``.
"""
function zscore!(df::DataFrame, col::Symbol, new_col::Symbol)
to_be_normalized = df[!, col]
df[!, new_col] = zscore(to_be_normalized)
end
function zscore!(df::DataFrame, col::Symbol, new_col::Symbol, μ::Real, σ::Real)
to_be_normalized = df[!, col]
df[!, new_col] = zscore(to_be_normalized, μ, σ)
end
function zscore!(df::DataFrame, cols::Array{Symbol, 1}, new_cols::Array{Symbol, 1})
for (col, new_col) in zip(cols, new_cols)
zscore!(df, col, new_col)
end
end
function zscore!(df::DataFrame, cols::Array{Symbol, 1}, new_cols::Array{Symbol, 1}, μσs::Array{Real, 1})
for (col, new_col) in zip(cols, new_cols)
μ, σ = μσs[String(col), "μ"].value, μσs[String(col), "σ"].value
zscore!(df, col, new_col, μ, σ)
end
end
"""
unzscore(A, μ, σ)
unzscore in Array using given μ and σ
"""
unzscore(A, μ::Real, σ::Real) = A .* σ .+ μ
"""
unzscore!(df, zcol, ocol, μ, σ)
revert zscore normalization (single column, zcol -> ocol)
"""
function unzscore!(df::DataFrame, zcol::Symbol, ocol::Symbol, μ::Real, σ::Real)
df[!, ocol] = unzscore(df[!, zcol], μ, σ)
end
"""
unzscore!(df, zcols, ocols, μσs)
revert zscore normalization (single column, zcol -> ocol)
"""
function unzscore!(df::DataFrame, zcols::Array{Symbol, 1}, ocols::Array{Symbol, 1}, stattb::AbstractNDSparse)
for (zcol, ocol) in zip(zcols, ocols)
unzscore!(df, zcol, ocol,
stattb[String(ocol), "μ"].value, stattb[String(ocol), "σ"].value)
end
end
minmax_scaling(X::AbstractVector, a::F, b::F) where F<:AbstractFloat =
a .+ (b - a) .* (X .- minimum(X)) ./ (maximum(X) - minimum(X))
minmax_scaling!(df::DataFrame, ocol::Symbol, mcol::Symbol,
a::F, b::F) where F<:AbstractFloat =
df[!, mcol] = minmax_scaling(df[!, ocol], a, b)
"""
minmax_scaling!(df, ocols, mcols, a, b)
min-max normalization from df[!, ocols] to df[!, mcols], new range is (a, b)
https://en.wikipedia.org/wiki/Feature_scaling#Rescaling_(min-max_normalization)
Y = a + (X - min(X)) * (b - a) / (maximum(X) - minimum(X))
"""
function minmax_scaling!(df::DataFrame, ocols::Array{Symbol, 1}, mcols::Array{Symbol, 1},
a::F, b::F) where F<:AbstractFloat
for (ocol, mcol) in zip(ocols, mcols)
minmax_scaling!(df, ocol, mcol, a, b)
end
end
unminmax_scaling(Y::AbstractVector, minY::Real, maxY::Real, a::F, b::F) where F<:AbstractFloat =
(Y .- a) .* ((maxY - minY) / (b - a)) .+ minY
"""
unminmax_scaling!(df, mcol, ocol, minY, maxY, a, b)
revert min-max normalization (single column)
"""
function unminmax_scaling!(df::DataFrame,
mcol::Symbol, ocol::Symbol, minY::Real, maxY::Real, a::F, b::F) where F<:AbstractFloat
df[!, ocol] = unminmax_scaling(df[!, mcol], minY, maxY, a, b)
end
"""
unminmax_scaling!(df, mcols, ocols, minY, maxY, a, b)
revert min-max normalization (multiple column)
"""
function unminmax_scaling!(df::DataFrame, zcols::Array{Symbol, 1}, ocols::Array{Symbol, 1},
stattb::AbstractNDSparse, a::F, b::F) where F<:AbstractFloat
for (zcol, ocol) in zip(zcols, ocols)
unminmax_scaling!(df, zcol, ocol,
stattb[String(ocol), "minimum"].value, stattb[String(ocol), "maximum"].value, a, b)
end
end
"""
exclude_elem(cols, target_col)
Exclude element and return new splited array
"""
function exclude_elem(cols, target_col)
new_cols = copy(cols)
deleteat!(new_cols, new_cols .== target_col)
new_cols
end
"""
findrow(df, col, val)
Find fist row number in df[!, `col`] as `val` by brute-force
"""
function findrow(df::DataFrame, col::Symbol, val::Union{<:Real, DateTime, ZonedDateTime})
idx = 0
for row in eachrow(df)
idx += 1
if (row[col] == val)
return idx
end
end
idx = 0
idx
end
"""
WHO_PM10(val::Real)
return WHO PM10 level
1: Good
2: Normal
3: Bad
4: Very Bad
"""
function WHO_PM10(val::Real)
if 0 <= val < 31
return 1
elseif 31 <= val < 81
return 2
elseif 81 <= val < 151
return 3
else
return 4
end
end
"""
WHO_PM25(val::Real)
return WHO PM25 level
1: Good
2: Normal
3: Bad
4: Very Bad
"""
function WHO_PM25(val::Real)
if 0 <= val < 16
return 1
elseif 16 <= val < 36
return 2
elseif 36 <= val < 76
return 3
else
return 4
end
end
|
{"hexsha": "281d1c3c6caf7b6c27e889d97951695ecebcbdb0", "size": 5400, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utils.jl", "max_stars_repo_name": "appleparan/Mise.jl", "max_stars_repo_head_hexsha": "2b4d3d6012d830ac37edf60e276fb47d8bc3a493", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/utils.jl", "max_issues_repo_name": "appleparan/Mise.jl", "max_issues_repo_head_hexsha": "2b4d3d6012d830ac37edf60e276fb47d8bc3a493", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils.jl", "max_forks_repo_name": "appleparan/Mise.jl", "max_forks_repo_head_hexsha": "2b4d3d6012d830ac37edf60e276fb47d8bc3a493", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6575342466, "max_line_length": 109, "alphanum_fraction": 0.6074074074, "num_tokens": 1735}
|
classdef ConstantDetectionProbabilityX < DetectionModelX
% ConstantDetectionProbabilityX class
%
% Summary of ConstantDetectionProbabilityX
% This is a class implementation of a detection model, described by a Poisson
% distributed false alarm rate and a Uniform spatial distribution.
%
% ConstantDetectionProbabilityX Properties:
% + DetectionProbability - The detection probability.
%
% ConstantDetectionProbabilityX Methods:
% + pdf - Function to evaluate the instensity p(b_t) at a given point in
% the state space.
%
% (+) denotes puplic properties/methods
%
% February 2018 Lyudmil Vladimirov, University of Liverpool
properties
DetectionProbability
end
methods (Access = protected)
function initialise_(this, config)
if(isfield(config,'DetectionProbability'))
this.DetectionProbability = config.DetectionProbability;
end
end
end
methods
function this = ConstantDetectionProbabilityX(varargin)
% ConstantDetectionProbabilityX Constructor method
%
% Parameters
% ----------
% DetectionProbability: scalar
% The detection probability over the search space.
%
% See also pdf.
% Call SuperClass method
this@DetectionModelX();
% First check to see if a structure was received
if(nargin==1)
if(isstruct(varargin{1}))
config = varargin{1};
this.initialise_(config);
return;
end
end
% Otherwise, fall back to input parser
parser = inputParser;
parser.KeepUnmatched = true;
parser.parse(varargin{:});
config = parser.Unmatched;
this.initialise_(config);
end
function int = pdf(this, varargin)
% pdf Evaluates the detection intensity at given points xk of the state space
%
% Parameters
% ----------
% xk: matrix
% A (NumStateDims x Ns) matrix, whose columns correspond to Ns individual state vectors.
%
% Returns
% -------
% prob: matrix
% A (1 x Ns) matrix, where :math:`prob(i,j)=p(D_k^i|x_k^j)
Ns = 1;
if(nargin>1)
xk = varargin{1};
Ns = size(xk,2);
end
int = this.DetectionProbability*ones(1,Ns);
x = [-3422.85261310741,-3191.58441229017,-2993.55608122595,-3228.08558459284, -3422.85261310741];
y = [1.472966334316646e+03,1.123217741935625e+03,1.243952901078573e+03,1.579667558371468e+03, 1.472966334316646e+03];
a = find(inpolygon(xk(1,:),xk(3,:),x,y));
if numel(a)>0
int(a) = 0.1;
end
end
end
end
|
{"author": "sglvladi", "repo": "TrackingX", "sha": "f737445c070f0d7d470f52f8a2b5540d5bb682da", "save_path": "github-repos/MATLAB/sglvladi-TrackingX", "path": "github-repos/MATLAB/sglvladi-TrackingX/TrackingX-f737445c070f0d7d470f52f8a2b5540d5bb682da/Models/Detection/ConstantDetectionProbabilityX/ConstantDetectionProbabilityX.m"}
|
using LinearAlgebra
using DynamicPolynomials
using SwitchOnSafety
using Combinatorics
using SparseArrays
using JuMP, Ipopt, MosekTools,NLopt
using SpecialFunctions
include("../src/RandomTrajectories.jl")
include("../src/AlgebraicLift.jl")
include("../src/ScenarioOpti.jl")
include("../src/ProbabilisticCertificates.jl")
include("../src/WhiteBoxAnalysis.jl")
dim = 2; numMode = 2; dimIn = 1
numScen_budget = 25000
#A = [[0.7218345749085846 0.013504049625046477; -0.763247200146052 0.823150250105976], [0.726724028000425 0.8385770769048322; -0.7232303846748045 0.19537267020502425]]
#B = [0.11672890767112509; 0.4312206258057798]
#A = [[-0.3734630321404846 -0.6513237917108774; 0.611847898506932 -0.14691027837992898], [0.3747919292302151 -0.08207253587307806; -0.8177042535328924 0.9426235103657543]]
#B = [-0.9774997862889991; -0.8641681288852348]
A = [[-1.6856 -0.1665; 0.7785 -1.6321], [-0.2915 -3.2824; 3.9761 -0.02274]]
B = [0.1975; 0.8640]
#White-box stabilization: 2.558311047916961 || K: [-1.8331184879886193 1.2753159154459983]
#JSR closed: 2.558246300039718
#N: 25000 || JSR closed 1: 2.6002448624504044 || K1: [-2.025691319888477 0.5406776018397965]
#N: 25000 || JSR closed 2: 2.1188168833776038 || K2: [-2.2516722982746136 0.9063335766321441]
#N: 25000 || JSR closed Prob1: 2.6035935786083138
#N: 25000 || JSR closed Prob2: 2.498819909826915
#********************************************************************************
#Any[2.695232520900475, 2.674074081741287, 2.729600649051829, 2.6667216693128517, 2.706072184752529, 2.702619595713564, 2.648464804760466, 2.6653129335199033, 2.665245140619883, 2.665281621475283, 2.658163946585573, 2.6708404866766746, 2.6723342532394714, 2.6384082203178854, 2.635717827065524, 2.633730855076457, 2.630347424450738, 2.632837466095163, 2.6026732357136044, 2.6026621569217685, 2.6023646183131173, 2.602361893532313, 2.6023594999299027, 2.6023573853992077, 2.6035935786083138]
#Any[3.8251690468073387, 3.20893549519724, 2.966930942539263, 2.8477772878809633, 2.772279061072514, 2.718173807725743, 2.6805996701615182, 2.651451591441884, 2.625969877375425, 2.6084164508827676, 2.5919475079323266, 2.5786916289788278, 2.566714341883191, 2.556709600267829, 2.5500015175118, 2.5422608841199703, 2.5354319232123, 2.526895233952895, 2.5257927399510214, 2.519090722975895, 2.5147916192546593, 2.511811356397815, 2.5079508952793628, 2.5020114643530764, 2.498819909826915]
##A = [[-2.9396 -3.6753; 1.8871 2.6726], [-4.1092 3.0318; 0.7372 -3.7320]]
##B = [0.2243; -3.6134]
##A = [[0.9223 0.2282; -1.2757 4.5807], [-2.5570 0.5024; 2.8555 0.7018]]
##B = [-3.0504; 2.6078]
##Any[9.0680584310352, 8.236969563479372, 5.040720605928883, 4.558580902486594, 4.490287179292396, 4.441211080373888, 4.423207321301255, 4.334792271008403, 4.389600160256621, 4.406849394359625, 4.402103593362474, 4.379457798753759, 4.380624734417577, 4.376779171976696, 4.389533867709163, 4.331405619534877, 4.366935413577316, 4.352774882451329, 4.3491741522480085, 4.353790613087748, 4.3731367194570545, 4.380324972077929, 4.379704065619696, 4.374382756817267, 4.367557105006393]
##Any[6.772360404335801, 5.34298685942547, 4.736107060832709, 4.425814269741812, 4.206162165147484, 4.058056181109807, 3.939541382590546, 3.847818018886943, 3.77855495773821, 3.718865231397386, 3.668302859622709, 3.6302297229240477, 3.5965757873267212, 3.560187164132347, 3.5348402709886293, 3.509900422563054, 3.488134646503217, 3.4694308495380604, 3.4506290076220116, 3.435153546014039, 3.420363339606061, 3.40697151166377, 3.3934353363000973, 3.382253461141298, 3.3727843666038697]
##White-box stabilization: 3.01351540984877 || K: [-0.28312813988790014 -0.29647902807944704]
##JSR closed: 3.013495908047195
##N: 25000 || JSR closed 1: 4.361339641605917 || K1: [-0.651004527631084 0.02797491475362021]
##N: 25000 || JSR closed 2: 2.9945408400720335 || K2: [-0.18150119321526154 -0.2400504391716816]
##N: 25000 || JSR closed Prob1: 4.367557105006393
##N: 25000 || JSR closed Prob2: 3.3727843666038697
##A = [[3.7951 0.4777; -3.2639 -3.6179], [4.1073 -0.9736; 1.8045 -2.1278]]
##B = [-2.7183; 2.3688]
##A = [[-2.4863 4.9076; 1.5580 -3.2878], [-0.2529 -1.0603; 2.3881 4.9242]]
##B = [1.1435; 1.9476]
##Any[8.370532899066617, 6.187095911542206, 5.84145466486238, 5.6928578727285455, 5.6422106988990315, 5.483080200501635, 5.514760283215316, 5.562694716182327, 5.571580746135338, 5.49819182839881, 5.507378578860731, 5.538420395466981, 5.529008550464572, 5.618853834095716, 5.560347716814375, 5.617553906114067, 5.616301274366272, 5.61287696775652, 5.612263567996752, 5.606516820923405, 5.511843891885627]
##Any[10.747090196272392, 7.702281702464826, 6.812757741029949, 6.2596051681031035, 5.960238479547801, 5.724785018557283, 5.551537061471664, 5.410718628045469, 5.30133764066168, 5.210556792796057, 5.1348637522562575, 5.071234315820118, 5.013204771234054, 4.962438766185439, 4.921199273807425, 4.882110960189069, 4.855767673983964, 4.82386981845116, 4.795728016171695, 4.769418292300936, 4.747311345089707]
##White-box stabilization: 3.9656700839325714 || K: [-0.8683244194245358 -0.5485599765184244]
##JSR closed: 3.965608491407373
##N: 21000 || JSR closed 1: 5.506434766615985 || K1: [-0.8594476289711871 -1.2410237558786164]
##N: 21000 || JSR closed 2: 4.067970658151304 || K2: [-0.8855679702001946 -0.3466089882223222]
##N: 21000 || JSR closed Prob1: 5.511843891885627
##N: 21000 || JSR closed Prob2: 4.747311345089707
##A = [[1.2847752917460404 -4.294823846678941; 0.3680548348155881 0.014732983067902161], [1.1712220013664698 -0.8818060555034961; 1.6530187731608859 -2.3732736764432127]]
##B = [0.32980020816855316; 2.992543632417817]
##A = [[-0.7758 -0.9779; 0.9748 0.7367], [0.7864 -0.2455; 0.9516 0.5357]]
##B = [-0.6358; -0.04498]
##A = [[-0.8350343770388022 -0.006107458495185014; 1.6179435194682048 0.08247284108420949], [-0.5991942074864163 -1.7792515399993318; 1.1279527670550538 0.9404192977577894]]
##B = [0.9917434505323639; 1.3179954593019438]
##A = [[-0.835 -0.006; 1.617 0.0825], [-0.599 -1.779; 1.128 0.9404]]
##B = [0.992; 1.318]
##A= [[2.196 3.187; 1.758 2.183], [2.4684 4.844; 1.383 4.565]]
##B= [-2.212; -1.980]
##A= [[2.1962930715063305 3.187441172268997; 1.7577099273813346 2.183228120757372], [2.4683584311233293 4.84422595233246; 1.3828879364062585 4.565095890995014]]
##B= [-2.212099386961055; -1.9795927901090082]
##A=[[2.671281119684634 -4.876263859500199; -4.392395002400294 2.7156007276877574], [4.645925349681839 2.206066227650134; -4.267978590442674 1.7848087937926742]]
##B= [3.743666731025746; -1.4650217059382165]
##A = [[-0.7757566511432756 -0.9778654656915116; 0.97480024345306 0.7366948958837525], [0.7863913424227396 -0.2454901059378174; 0.9515607611942283 0.535698625247981]]
##B = [-0.6357718059899597; -0.0449794151233891]
##A = [[0.3992966486009579 -0.8825975300456066; 0.9466203607162913 0.8220671916996221], [-0.05865322607237777 -0.32416336552150593; -0.5565696843656855 -0.8985422238439993]]
##B = [-0.5919429619683738; 0.18179898964335361]
##A = [[0.8388521952671368 -0.14857670536251177; 0.8633807216781597 0.490362061793125], [0.6013596101878895 0.6697432384693109; -0.12770751192259544 0.9150610500819121]]
##B = [0.9467520554779116; 0.20399193884377942]
B = reshape(B,dim,dimIn)
jsrbound = white_box_jsr(A)
gaTrue,K = white_box_stabilization_quad(A,B)
jsrboundclosed = white_box_jsr([Ai+B*K for Ai in A])
(state0_budget,state_budget) = generate_trajectories(1,A,numScen_budget)
K0 = zeros(dimIn,dim)
jrs_boundorder1 = []
jrs_boundorder2 = []
#=
for N in 200:100:1000
K1,jsr_bound1 = probabilistc_stability_certificate(state0_budget[:,1:N],state_budget[:,1:N];B=B,numMode=numMode,d=1,batchsize=N,K0=K0,beta=0.01,tol=1e-3)
K2,jsr_bound2 = probabilistc_stability_certificate(state0_budget[:,1:N],state_budget[:,1:N];B=B,numMode=numMode,d=2,batchsize=N,K0=K0,beta=0.01,tol=1e-3)
append!(jrs_boundorder1,jsr_bound1)
append!(jrs_boundorder2,jsr_bound2)
Aclose1 = [Ai+B*K1 for Ai in A]
jsrboundclose1 = white_box_jsr(Aclose1)
Aclose2 = [Ai+B*K2 for Ai in A]
jsrboundclose2 = white_box_jsr(Aclose2)
println(repeat('*', 80))
println("White-box stabilization: $gaTrue")
println("JSR closed: $jsrboundclosed")
println("N: $N|| JSR closed 1: $jsrboundclose1")
println("N: $N|| JSR closed 2: $jsrboundclose2")
println("N: $N|| JSR closed Prob1: $jsr_bound1")
println("N: $N|| JSR closed Prob2: $jsr_bound2")
println(repeat('*', 80))
println(jrs_boundorder1)
println(jrs_boundorder2)
end
=#
for N in 1000:1000:numScen_budget
K1,jsr_bound1 = probabilistc_stability_certificate(state0_budget[:,1:N],state_budget[:,1:N];B=B,numMode=numMode,d=1,batchsize=1000,K0=K0,beta=0.01,tol=1e-4)
K2,jsr_bound2 = probabilistc_stability_certificate(state0_budget[:,1:N],state_budget[:,1:N];B=B,numMode=numMode,d=2,batchsize=1000,K0=K0,beta=0.01,tol=1e-4)
append!(jrs_boundorder1,jsr_bound1)
append!(jrs_boundorder2,jsr_bound2)
Aclose1 = [Ai+B*K1 for Ai in A]
jsrboundclose1 = white_box_jsr(Aclose1)
Aclose2 = [Ai+B*K2 for Ai in A]
jsrboundclose2 = white_box_jsr(Aclose2)
println(repeat('*', 80))
println("White-box stabilization: $gaTrue || K: $K")
println("JSR closed: $jsrboundclosed")
println("N: $N || JSR closed 1: $jsrboundclose1 || K1: $K1")
println("N: $N || JSR closed 2: $jsrboundclose2 || K2: $K2")
println("N: $N || JSR closed Prob1: $jsr_bound1")
println("N: $N || JSR closed Prob2: $jsr_bound2")
println(repeat('*', 80))
println(jrs_boundorder1)
println(jrs_boundorder2)
end
#=
using Plots
gr(size = (450, 400))
fn = plot(1000:1000:25000, Any[jrs_boundorder1,jrs_boundorder2,fill(jsrboundclosed,25)],xticks = (5000:5000:25000, string.(5000:5000:25000)), xtickfontsize=8,ytickfontsize=12,label = ["Quadratic stabilization" "SOS stabilization (d=2)" ""],line = [:solid :solid :dashdot], lw = 2)
xlabel!("N")
ylabel!("Upper bound of the closed-loop JSR")
savefig(fn,"fn.png")
jrs_boundorder1 = [8.595098769674822, 5.840679902801846, 5.811875529890767, 5.6722257356414945, 5.67299484581915, 5.271039662245451, 5.60428293458043, 5.600297212959155, 5.6312980507395745, 5.625358913593358, 5.607825800405484, 5.603517531302264]
jrs_boundorder2 = [11.121139350134298, 7.678201860264276, 6.78494588724533, 6.281737427184408, 5.948202603570425, 5.717780994064778, 5.547373903923299, 5.407482275161193, 5.2937570819690025, 5.210422326909742, 5.133632949484108, 5.06780663383821]
=#
|
{"hexsha": "9b3f2be00a65f03894dbd0114489b490b3c3c01a", "size": 10701, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/QuadvsSOSexample.jl", "max_stars_repo_name": "zhemingwang/DataDrivenSwitchControl", "max_stars_repo_head_hexsha": "2bb43ad448d77d52a8c1633a225549c3160f2eb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/QuadvsSOSexample.jl", "max_issues_repo_name": "zhemingwang/DataDrivenSwitchControl", "max_issues_repo_head_hexsha": "2bb43ad448d77d52a8c1633a225549c3160f2eb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/QuadvsSOSexample.jl", "max_forks_repo_name": "zhemingwang/DataDrivenSwitchControl", "max_forks_repo_head_hexsha": "2bb43ad448d77d52a8c1633a225549c3160f2eb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.619047619, "max_line_length": 491, "alphanum_fraction": 0.7335763013, "num_tokens": 4594}
|
"""The WaveBlocks Project
Plot the evolution of the relations between the parameters P and Q
homogeneous or inhomogeneous Hagedorn wavepacket during the
time propagation.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
import sys
from numpy import conj, abs
from matplotlib.pyplot import *
from WaveBlocks import IOManager
import GraphicsDefaults as GD
def read_data_homogeneous(iom, blockid=0):
"""
:param iom: An ``IOManager`` instance providing the simulation data.
"""
parameters = iom.load_parameters()
timegrid = iom.load_wavepacket_timegrid(blockid=blockid)
time = timegrid * parameters["dt"]
Pi = iom.load_wavepacket_parameters(blockid=blockid)
Phist = [ Pi[:,0] ]
Qhist = [ Pi[:,1] ]
return (time, Phist, Qhist)
def read_data_inhomogeneous(iom, blockid=0):
"""
:param iom: An ``IOManager`` instance providing the simulation data.
"""
parameters = iom.load_parameters()
timegrid = iom.load_inhomogwavepacket_timegrid(blockid=blockid)
time = timegrid * parameters["dt"]
Pi = iom.load_inhomogwavepacket_parameters(blockid=blockid)
Phist = [ Pi[i][:,0] for i in xrange(parameters["ncomponents"]) ]
Qhist = [ Pi[i][:,1] for i in xrange(parameters["ncomponents"]) ]
return (time, Phist, Qhist)
def plot_parameters(blockid, timegrid, Phist, Qhist):
# Plot the time evolution of the parameters P, Q, S, p and q
fig = figure(figsize=(12,12))
ax = fig.gca()
for ptem, qtem in zip(Phist, Qhist):
ax.plot(timegrid, abs(conj(qtem)*ptem - conj(ptem)*qtem - 2.0j))
ax.grid(True)
ax.ticklabel_format(style="sci", scilimits=(0,0), axis="y")
ax.set_xlabel(r"Time $t$")
ax.set_ylabel(r"$| \overline{Q} P - \overline{P} Q - 2i |$")
ax.set_title(r"Compatibility condition $\overline{Q} P - \overline{P} Q = 2i$")
fig.savefig("conjQP-conjPQ_block"+str(blockid)+GD.output_format)
close(fig)
if __name__ == "__main__":
iom = IOManager()
# Read file with simulation data
try:
iom.open_file(filename=sys.argv[1])
except IndexError:
iom.open_file()
# Iterate over all blocks
for blockid in iom.get_block_ids():
print("Plotting PQ relation of data block '"+str(blockid)+"'")
# See if we have an inhomogeneous wavepacket in the current data block
if iom.has_inhomogwavepacket(blockid=blockid):
data = read_data_inhomogeneous(iom, blockid=blockid)
plot_parameters(blockid, *data)
# If not, we test for a homogeneous wavepacket next
elif iom.has_wavepacket(blockid=blockid):
data = read_data_homogeneous(iom, blockid=blockid)
plot_parameters(blockid, *data)
# There is no wavepacket in the current block
else:
print("Warning: No wavepacket found in block '"+str(blockid)+"'!")
iom.finalize()
|
{"hexsha": "51b56ab06a3c4ea38a073f2ed8990e556628d499", "size": 2944, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/plotters_simple/PlotPQRelation.py", "max_stars_repo_name": "WaveBlocks/WaveBlocks", "max_stars_repo_head_hexsha": "2af3730dcf27e54006ec602e696b4d4df25459d8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/plotters_simple/PlotPQRelation.py", "max_issues_repo_name": "WaveBlocks/WaveBlocks", "max_issues_repo_head_hexsha": "2af3730dcf27e54006ec602e696b4d4df25459d8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plotters_simple/PlotPQRelation.py", "max_forks_repo_name": "WaveBlocks/WaveBlocks", "max_forks_repo_head_hexsha": "2af3730dcf27e54006ec602e696b4d4df25459d8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6666666667, "max_line_length": 83, "alphanum_fraction": 0.6708559783, "include": true, "reason": "from numpy", "num_tokens": 800}
|
import time
from typing import Callable, Union
import numpy as np
from .utils import time_fn
from .stopping_reason import StoppingReason
class Settings:
def __init__(self,
n_max_iterations=50,
damping_constant=0.0,
loss_stop_threshold=0.0,
grad_norm_stop_threshold=0.0,
step_norm_stop_threshold=0.0,
verbose=True):
self.n_max_iterations = n_max_iterations
self.damping_constant = damping_constant
self.loss_stop_threshold = loss_stop_threshold
self.grad_norm_stop_threshold = grad_norm_stop_threshold
self.step_norm_stop_threshold = step_norm_stop_threshold
self.verbose = verbose
class OptimizationState:
def __init__(self):
self.iter_ind = None
self.variables_val = None
self.residuals_val = None
self.jacobian_val = None
self.gradient_val = None
self.gradient_norm = None
self.loss_val = None
self.hessian_val = None
self.step_val = None
self.step_norm = None
self.stopping_reason = StoppingReason.NotStopped
def gauss_newton(
residuals_func: Callable,
jacobian_func: Callable,
x0: Union[np.ndarray],
settings: Settings = None,
update_functor: Callable = None):
start_time_optimization = time.time()
if settings is None:
settings = Settings()
if not (type(x0) is np.ndarray):
x0 = np.array(x0, dtype=np.float32)
assert x0.dtype in [np.float, np.float32, np.float64]
state = OptimizationState()
state.variables_val = x0.copy()
n_variables = len(state.variables_val)
eye = np.eye(n_variables)
for iter_ind in range(settings.n_max_iterations):
state.iter_ind = iter_ind
state.residuals_val, elapsed_residuals = time_fn(residuals_func, state.variables_val)
state.jacobian_val, elapsed_jacobian = time_fn(jacobian_func, state.variables_val)
assert state.residuals_val.ndim == 1
n_residuals = len(state.residuals_val)
assert state.jacobian_val.ndim == 2
assert state.jacobian_val.shape == (n_residuals, n_variables)
state.gradient_val = state.jacobian_val.T @ state.residuals_val
state.gradient_norm = np.linalg.norm(state.gradient_val)
state.loss_val = 0.5 * state.residuals_val.T @ state.residuals_val
state.hessian_val = state.jacobian_val.T @ state.jacobian_val
state.hessian_val += settings.damping_constant * eye
state.step_val = -np.linalg.solve(state.hessian_val, state.gradient_val)
state.step_norm = np.linalg.norm(state.step_val)
elapsed_upd = 0
if update_functor is not None:
functor_result, elapsed_upd = time_fn(update_functor, state.variables_val, state)
if functor_result is False:
state.stopping_reason = StoppingReason.ByCallback
break
if settings.verbose:
print(
f"{iter_ind + 1}/{settings.n_max_iterations}. "
f"f(x) = {state.loss_val}, "
f"|∇f(x)| = {state.gradient_norm} "
f"|Δx| = {state.step_norm} "
f"t(f) = {elapsed_residuals}, "
f"t(∇f) = {elapsed_jacobian} "
f"t(upd) = {elapsed_upd} "
)
if state.loss_val <= settings.loss_stop_threshold:
state.stopping_reason = StoppingReason.ByLossValue
break
if state.gradient_norm <= settings.grad_norm_stop_threshold:
state.stopping_reason = StoppingReason.ByGradNorm
break
if state.step_norm <= settings.step_norm_stop_threshold:
state.stopping_reason = StoppingReason.ByStepNorm
break
state.variables_val += state.step_val
# end of main loop
if state.stopping_reason == StoppingReason.NotStopped:
state.stopping_reason = StoppingReason.ByMaxIterations
if settings.verbose:
print(f"Optimization elapsed: {time.time() - start_time_optimization}")
return state.variables_val, state
|
{"hexsha": "b898162c53987c297a15ddd28e30703408c68cc0", "size": 4168, "ext": "py", "lang": "Python", "max_stars_repo_path": "pygauss_newton/gauss_newton.py", "max_stars_repo_name": "Daiver/pygauss_newton", "max_stars_repo_head_hexsha": "63dd741c6edaeb2891842dc4d0b714412b30a04c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pygauss_newton/gauss_newton.py", "max_issues_repo_name": "Daiver/pygauss_newton", "max_issues_repo_head_hexsha": "63dd741c6edaeb2891842dc4d0b714412b30a04c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pygauss_newton/gauss_newton.py", "max_forks_repo_name": "Daiver/pygauss_newton", "max_forks_repo_head_hexsha": "63dd741c6edaeb2891842dc4d0b714412b30a04c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6239316239, "max_line_length": 93, "alphanum_fraction": 0.6473128599, "include": true, "reason": "import numpy", "num_tokens": 929}
|
"""
Tests sklearn Imputers: MissingIndicator and SimpleImputer
"""
import unittest
import warnings
import numpy as np
import torch
from sklearn.impute import MissingIndicator, SimpleImputer
try:
from sklearn.preprocessing import Imputer
except ImportError:
# Imputer was deprecate in sklearn >= 0.22
Imputer = None
from hummingbird.ml._utils import onnx_runtime_installed, tvm_installed
import hummingbird.ml
class TestSklearnSimpleImputer(unittest.TestCase):
def _test_simple_imputer(self, model, data, backend):
model.fit(data)
hb_model = hummingbird.ml.convert(model, backend, data)
self.assertIsNotNone(hb_model)
np.testing.assert_allclose(
model.transform(data), hb_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_simple_imputer_float_inputs(self):
model = SimpleImputer(strategy="mean", fill_value="nan")
data = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)
for backend in ["torch", "torch.jit"]:
self._test_simple_imputer(model, data, backend)
def test_simple_imputer_no_nan_inputs(self):
model = SimpleImputer(missing_values=0, strategy="most_frequent")
data = np.array([[1, 2], [1, 3], [7, 6]], dtype=np.float32)
for backend in ["torch", "torch.jit"]:
self._test_simple_imputer(model, data, backend)
def test_simple_imputer_nan_to_0(self):
model = SimpleImputer(strategy="constant", fill_value=0)
data = np.array([[1, 2], [1, 3], [7, 6]], dtype=np.float32)
for backend in ["torch", "torch.jit"]:
self._test_simple_imputer(model, data, backend)
# TVM tests
@unittest.skipIf(not (tvm_installed()), reason="TVM test requires TVM")
def test_simple_imputer_float_inputs_tvm(self):
model = SimpleImputer(strategy="mean", fill_value="nan")
data = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)
self._test_simple_imputer(model, data, "tvm")
@unittest.skipIf(not (tvm_installed()), reason="TVM test requires TVM")
def test_simple_imputer_no_nan_inputs_tvm(self):
model = SimpleImputer(missing_values=0, strategy="most_frequent")
data = np.array([[1, 2], [1, 3], [7, 6]], dtype=np.float32)
self._test_simple_imputer(model, data, "tvm")
@unittest.skipIf(not (tvm_installed()), reason="TVM test requires TVM")
def test_simple_imputer_nan_to_0_tvm(self):
model = SimpleImputer(strategy="constant", fill_value=0)
data = np.array([[1, 2], [1, 3], [7, 6]], dtype=np.float32)
self._test_simple_imputer(model, data, "tvm")
class TestSklearnImputer(unittest.TestCase):
def _test_imputer(self, model, data):
data_tensor = torch.from_numpy(data)
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertIsNotNone(torch_model)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data_tensor), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(Imputer is None, reason="Imputer was deprecated in scikit-learn >= 0.22")
def test_imputer_float_inputs(self):
model = Imputer(strategy="mean")
data = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)
self._test_imputer(model, data)
@unittest.skipIf(Imputer is None, reason="Imputer was deprecated in scikit-learn >= 0.22")
def test_imputer_no_nan_inputs(self):
model = Imputer(missing_values=0, strategy="most_frequent")
data = np.array([[1, 2], [1, 3], [7, 6]], dtype=np.float32)
self._test_imputer(model, data)
class TestSklearnMissingIndicator(unittest.TestCase):
def _test_sklearn_missing_indic(self, model, data, backend):
data_tensor = torch.from_numpy(data)
hb_model = hummingbird.ml.convert(model, backend, data)
self.assertIsNotNone(hb_model)
np.testing.assert_allclose(
model.transform(data), hb_model.transform(data_tensor), rtol=1e-06, atol=1e-06,
)
def test_missing_indicator_float_inputs(self):
for features in ["all", "missing-only"]:
model = MissingIndicator(features=features)
data = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)
model.fit(data)
for backend in ["torch", "torch.jit"]:
self._test_sklearn_missing_indic(model, data, backend)
def test_missing_indicator_float_inputs_isnan_false(self):
for features in ["all", "missing-only"]:
model = MissingIndicator(features=features, missing_values=0)
data = np.array([[1, 2], [0, 3], [7, 6]], dtype=np.float32)
model.fit(data)
for backend in ["torch", "torch.jit"]:
self._test_sklearn_missing_indic(model, data, backend)
# TVM tests
@unittest.skipIf(not (tvm_installed()), reason="TVM test requires TVM")
def test_missing_indicator_float_inputs_tvm(self):
for features in ["all", "missing-only"]:
model = MissingIndicator(features=features)
data = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)
model.fit(data)
self._test_sklearn_missing_indic(model, data, "tvm")
@unittest.skipIf(not (tvm_installed()), reason="TVM test requires TVM")
def test_missing_indicator_float_inputs_isnan_false_tvm(self):
for features in ["all", "missing-only"]:
model = MissingIndicator(features=features, missing_values=0)
data = np.array([[1, 2], [0, 3], [7, 6]], dtype=np.float32)
model.fit(data)
self._test_sklearn_missing_indic(model, data, "tvm")
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "d4e049e4c8eb759332ccca716aa561151db7386a", "size": 5777, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_sklearn_imputer_converter.py", "max_stars_repo_name": "vumichien/hummingbird", "max_stars_repo_head_hexsha": "8981e11ce2536167c329a5d9d20e81125a792fe4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2772, "max_stars_repo_stars_event_min_datetime": "2020-05-04T21:03:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T11:00:03.000Z", "max_issues_repo_path": "tests/test_sklearn_imputer_converter.py", "max_issues_repo_name": "vumichien/hummingbird", "max_issues_repo_head_hexsha": "8981e11ce2536167c329a5d9d20e81125a792fe4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 486, "max_issues_repo_issues_event_min_datetime": "2020-05-05T00:45:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T01:02:31.000Z", "max_forks_repo_path": "tests/test_sklearn_imputer_converter.py", "max_forks_repo_name": "vumichien/hummingbird", "max_forks_repo_head_hexsha": "8981e11ce2536167c329a5d9d20e81125a792fe4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 232, "max_forks_repo_forks_event_min_datetime": "2019-11-02T22:06:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T07:36:17.000Z", "avg_line_length": 37.0320512821, "max_line_length": 94, "alphanum_fraction": 0.6527609486, "include": true, "reason": "import numpy", "num_tokens": 1483}
|
using Test
using Unitful: m, s, cm
using UnitfulRecipes: recipe!, UnitFormatter
import RecipesBase
Attributes = Dict{Symbol, Any}
@testset "One Array" begin
attr = Attributes()
ys_val = [1, 2.3]
ys = ys_val * m
ys_ret = recipe!(attr, ys)
@test ys_ret ≈ ys_val
@test attr[:yformatter] == UnitFormatter(m)
attr = Attributes(:yunit => cm)
ys_ret = recipe!(attr, ys)
@test ys_ret ≈ ys_val * 100
@test !haskey(attr, :yunit)
attr = Attributes(:ylims => (100cm, 2m))
ys_ret = recipe!(attr, ys)
@test ys_ret ≈ ys_val
@test attr[:ylims] == (1, 2)
end
@testset "Multi Array" begin
attr = Attributes()
xs_val = randn(3)
ys_val = randn(3)
xu = s
yu = m/s
xs = xs_val * xu
ys = ys_val * yu
xs_ret, ys_ret = recipe!(attr, xs_val, ys)
@test xs_ret ≈ xs_val
@test ys_ret ≈ ys_val
@test !haskey(attr, :xformatter)
@test haskey(attr, :yformatter)
xs_ret, ys_ret = recipe!(attr, xs, ys)
@test xs_ret ≈ xs_val
@test ys_ret ≈ ys_val
@test haskey(attr, :xformatter)
@test haskey(attr, :yformatter)
zs_val = randn(3)
xs_ret, ys_ret, zs_ret = recipe!(attr, xs, ys, zs_val)
@test xs_ret ≈ xs_val
@test ys_ret ≈ ys_val
@test zs_ret ≈ zs_val
@test haskey(attr, :xformatter)
@test haskey(attr, :yformatter)
@test !haskey(attr, :zformatter)
end
@testset "format" begin
@test UnitFormatter(cm)(170.0) == "170.0cm"
@test_broken UnitFormatter(cm)(170.000000000001) == "170.0cm"
end
|
{"hexsha": "05d8ada9198fae44160d9ac24bd99ec25f8fdb8c", "size": 1537, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirror/UnitfulRecipes.jl-42071c24-d89e-48dd-8a24-8a12d9b8861f", "max_stars_repo_head_hexsha": "d7d1c530c2b5a60a7b0acad23c25445324d45dbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirror/UnitfulRecipes.jl-42071c24-d89e-48dd-8a24-8a12d9b8861f", "max_issues_repo_head_hexsha": "d7d1c530c2b5a60a7b0acad23c25445324d45dbb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirror/UnitfulRecipes.jl-42071c24-d89e-48dd-8a24-8a12d9b8861f", "max_forks_repo_head_hexsha": "d7d1c530c2b5a60a7b0acad23c25445324d45dbb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6166666667, "max_line_length": 65, "alphanum_fraction": 0.6174365647, "num_tokens": 525}
|
import torch
import scipy
from torch_geometric.utils import add_self_loops
from torch_scatter import scatter_add
############################# Our model
def get_directed_adj(edge_index, num_nodes, dtype, edge_weight=None):
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1),), dtype=dtype,
device=edge_index.device)
else:
edge_weight = torch.FloatTensor(edge_weight).to(edge_index.device)
fill_value = 1
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
# out degree
out_deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
# in degree
in_deg = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)
# avg_out_deg = sum(out_deg) / num_nodes
# avg_in_deg = sum(in_deg) / num_nodes
# out_deg = out_deg + avg_out_deg
# in_deg = in_deg + avg_in_deg
out_deg_inv_sqrt = out_deg.pow(-0.5)
in_deg_inv_sqrt = in_deg.pow(-0.5)
# deg_inv = out_deg.pow(-1)
# deg_inv[deg_inv == float('inf')] = 0
out_deg_inv_sqrt[out_deg_inv_sqrt == float('inf')] = 0
in_deg_inv_sqrt[in_deg_inv_sqrt == float('inf')] = 0
# print("deg :", deg.shape)
# print('edge_weight:\n', edge_weight.shape)
# print('deg[row]: \n', deg[row])
# print('deg[col]: \n', deg[col])
# zz
return edge_index, out_deg_inv_sqrt[row] * edge_weight * in_deg_inv_sqrt[col]
'''
The following code is followed by DiGCN.
'''
def get_undirected_adj(edge_index, num_nodes, dtype):
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
fill_value = 1
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
def get_pr_directed_adj(alpha, edge_index, num_nodes, dtype, edge_weight = None):
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
else:
edge_weight = torch.FloatTensor(edge_weight).to(edge_index.device)
fill_value = 1
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv = deg.pow(-1)
deg_inv[deg_inv == float('inf')] = 0
p = deg_inv[row] * edge_weight
p_dense = torch.sparse.FloatTensor(edge_index, p, torch.Size([num_nodes,num_nodes])).to_dense()
# pagerank p
p_pr = (1.0-alpha) * p_dense + alpha / num_nodes * torch.ones((num_nodes,num_nodes), dtype=dtype, device=p.device)
eig_value, left_vector = scipy.linalg.eig(p_pr.numpy(),left=True,right=False)
eig_value = torch.from_numpy(eig_value.real)
left_vector = torch.from_numpy(left_vector.real)
val, ind = eig_value.sort(descending=True)
# assert val[0] == 1.0
pi = left_vector[:,ind[0]] # choose the largest eig vector
pi = pi/pi.sum() # norm pi
# Note that by scaling the vectors, even the sign can change. That's why positive and negative elements might get flipped.
assert len(pi[pi<0]) == 0
pi_inv_sqrt = pi.pow(-0.5)
pi_inv_sqrt[pi_inv_sqrt == float('inf')] = 0
pi_inv_sqrt = pi_inv_sqrt.diag()
pi_sqrt = pi.pow(0.5)
pi_sqrt[pi_sqrt == float('inf')] = 0
pi_sqrt = pi_sqrt.diag()
# L_pr
L = (torch.mm(torch.mm(pi_sqrt, p_pr), pi_inv_sqrt) + torch.mm(torch.mm(pi_inv_sqrt, p_pr.t()), pi_sqrt)) / 2.0
# make nan to 0
L[torch.isnan(L)] = 0
# # let little possbility connection to 0, make L sparse
# L[ L < (1/num_nodes)] = 0
# L[ L < 5e-4] = 0
# transfer dense L to sparse
L_indices = torch.nonzero(L, as_tuple=False).t()
L_values = L[L_indices[0], L_indices[1]]
edge_index = L_indices
edge_weight = L_values
###########
L_indices_2 = torch.cat([L_indices[1], L_indices[0]], dim=0).reshape(2, -1)
L_values_2 = L[L_indices[1], L_indices[0]]
edge_index_2 = L_indices_2
###########
# row normalization
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return edge_index, edge_index_2, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col], deg_inv_sqrt[col] * edge_weight * deg_inv_sqrt[row]
def get_appr_directed_adj(alpha, edge_index, num_nodes, dtype, edge_weight=None):
if edge_weight ==None:
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
fill_value = 1
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv = deg.pow(-1)
deg_inv[deg_inv == float('inf')] = 0
p = deg_inv[row] * edge_weight
# personalized pagerank p
p_dense = torch.sparse.FloatTensor(edge_index, p, torch.Size([num_nodes,num_nodes])).to_dense()
p_v = torch.zeros(torch.Size([num_nodes+1,num_nodes+1]))
p_v[0:num_nodes,0:num_nodes] = (1-alpha) * p_dense
p_v[num_nodes,0:num_nodes] = 1.0 / num_nodes
p_v[0:num_nodes,num_nodes] = alpha
p_v[num_nodes,num_nodes] = 0.0
p_ppr = p_v
eig_value, left_vector = scipy.linalg.eig(p_ppr.numpy(),left=True,right=False)
eig_value = torch.from_numpy(eig_value.real)
left_vector = torch.from_numpy(left_vector.real)
val, ind = eig_value.sort(descending=True)
pi = left_vector[:,ind[0]] # choose the largest eig vector
pi = pi[0:num_nodes]
p_ppr = p_dense
pi = pi/pi.sum() # norm pi
# Note that by scaling the vectors, even the sign can change. That's why positive and negative elements might get flipped.
assert len(pi[pi<0]) == 0
pi_inv_sqrt = pi.pow(-0.5)
pi_inv_sqrt[pi_inv_sqrt == float('inf')] = 0
pi_inv_sqrt = pi_inv_sqrt.diag()
pi_sqrt = pi.pow(0.5)
pi_sqrt[pi_sqrt == float('inf')] = 0
pi_sqrt = pi_sqrt.diag()
# L_appr
L = (torch.mm(torch.mm(pi_sqrt, p_ppr), pi_inv_sqrt) + torch.mm(torch.mm(pi_inv_sqrt, p_ppr.t()), pi_sqrt)) / 2.0
# make nan to 0
L[torch.isnan(L)] = 0
# transfer dense L to sparse
L_indices = torch.nonzero(L,as_tuple=False).t()
L_values = L[L_indices[0], L_indices[1]]
edge_index = L_indices
edge_weight = L_values
# row normalization
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
def get_second_directed_adj(edge_index, num_nodes, dtype):
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
fill_value = 1
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv = deg.pow(-1)
deg_inv[deg_inv == float('inf')] = 0
p = deg_inv[row] * edge_weight
p_dense = torch.sparse.FloatTensor(edge_index, p, torch.Size([num_nodes,num_nodes])).to_dense()
L_in = torch.mm(p_dense.t(), p_dense)
L_out = torch.mm(p_dense, p_dense.t())
L_in_hat = L_in
L_out_hat = L_out
L_in_hat[L_out == 0] = 0
L_out_hat[L_in == 0] = 0
# L^{(2)}
L = (L_in_hat + L_out_hat) / 2.0
L[torch.isnan(L)] = 0
L_indices = torch.nonzero(L,as_tuple=False).t()
L_values = L[L_indices[0], L_indices[1]]
edge_index = L_indices
edge_weight = L_values
# row normalization
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
|
{"hexsha": "585f823bfdb591a7a40e49a20de9e52e5e175584", "size": 8357, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/get_adj.py", "max_stars_repo_name": "dfuttu1/AGNN", "max_stars_repo_head_hexsha": "579a28388ba3e28d3382ef71c4ab089bedb4705a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-10T00:00:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T00:00:53.000Z", "max_issues_repo_path": "code/get_adj.py", "max_issues_repo_name": "dfuttu1/AGNN", "max_issues_repo_head_hexsha": "579a28388ba3e28d3382ef71c4ab089bedb4705a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/get_adj.py", "max_forks_repo_name": "dfuttu1/AGNN", "max_forks_repo_head_hexsha": "579a28388ba3e28d3382ef71c4ab089bedb4705a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1774891775, "max_line_length": 141, "alphanum_fraction": 0.6547804236, "include": true, "reason": "import scipy", "num_tokens": 2394}
|
""" Object to process a single raw image"""
import inspect
import numpy as np
from pypeit import msgs
from pypeit.core import procimg
from pypeit.core import flat
from pypeit.images import pypeitimage
from pypeit.par import pypeitpar
from IPython import embed
class ProcessRawImage(pypeitimage.PypeItImage):
"""
Class to process a raw image
Args:
filename (:obj:`str` or None):
Filename
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
Spectrograph used to take the data.
det (:obj:`int`, optional):
The 1-indexed detector number to process.
par (:class:`pypeit.par.pypeitpar.ProcessImagesPar`):
Parameters that dictate the processing of the images. See
:class:`pypeit.par.pypeitpar.ProcessImagesPar` for the
defaults.
Attributes:
steps (dict):
Dict describing the steps performed on the image
_bpm (np.ndarray):
Holds the bad pixel mask once loaded
_rawdatasec_img (np.ndarray):
Holds the rawdatasec_img once loaded
hdu (fits.HDUList):
HDUList of the file
"""
def __init__(self, filename, spectrograph, det, par):
# Init me
pypeitimage.PypeItImage.__init__(self, spectrograph, det)
# Required parameters
if not isinstance(par, pypeitpar.ProcessImagesPar):
msgs.error("Bad par input")
self.par = par # ProcessImagesPar
self.filename = filename
# Attributes
self._reset_internals()
self._bpm = None
self._rawdatasec_img = None
self.hdu = None
# Load
self.load_rawframe()
# All possible processing steps
# Note these have to match the method names below
self.steps = dict(subtract_bias=False,
subtract_overscan=False,
subtract_dark=False,
trim=False,
apply_gain=False,
orient=False,
flatten=False,
)
@property
def amps(self):
"""
Return a list of the amplifier indices, 1-indexed
Returns:
list
"""
return np.unique(self.rawdatasec_img[self.rawdatasec_img > 0]).tolist()
@property
def bpm(self):
"""
Generate and return the bad pixel mask for this image
Warning: BPM masks are for processed (e.g. trimmed, rotated) images only!
Returns:
np.ndarray: Bad pixel mask with a bad pixel = 1
"""
if self._bpm is None:
self._bpm = self.spectrograph.bpm(shape=self.image.shape,
filename=self.filename,
det=self.det)
return self._bpm
@property
def rawdatasec_img(self):
"""
Generate and return the datasec image in the Raw reference frame
Returns:
np.ndarray
"""
if self._rawdatasec_img is None:
self._rawdatasec_img = self.spectrograph.get_rawdatasec_img(self.filename, self.det)
return self._rawdatasec_img
@property
def oscansec_img(self):
"""
Generate and return the oscansec image
Returns:
np.ndarray
"""
oimg = self.spectrograph.get_oscansec_img(self.filename, self.det)
return oimg
def _reset_steps(self):
"""
Reset all the processing steps to False
Should consider setting the Image to None too..
"""
for key in self.steps.keys():
self.steps[key] = False
def _reset_internals(self):
"""
Init or free up memory by resetting the Attributes to None
"""
self.rawvarframe = None
self.crmask = None
self.mask = None
self.rn2img = None
def apply_gain(self, force=False):
"""
Apply the Gain values to self.image
Args:
force (bool, optional):
Returns:
np.ndarray: copy of self.image
"""
step = inspect.stack()[0][3]
# Check if already trimmed
if self.steps[step] and (not force):
msgs.warn("Gain was already applied. Returning")
return self.image.copy()
gain = np.atleast_1d(self.spectrograph.detector[self.det - 1]['gain']).tolist()
# Apply
self.image *= procimg.gain_frame(self.rawdatasec_img, gain, trim=self.steps['trim'])
self.steps[step] = True
# Return
return self.image.copy()
def process(self, process_steps, pixel_flat=None, illum_flat=None,
bias=None, bpm=None, debug=False):
"""
Process the image
Note: The processing steps are currently 'frozen' as is.
We may choose to allow optional ordering of the steps
Args:
process_steps (list):
List of processing steps
pixel_flat (np.ndarray, optional):
Pixel flat image
illum_flat (np.ndarray, optional):
Illumination flat
bias (np.ndarray, optional):
Bias image
bpm (np.ndarray, optional):
Bad pixel mask image
"""
# Standard order
# -- May need to allow for other order some day..
if 'subtract_overscan' in process_steps:
self.subtract_overscan()
if 'trim' in process_steps:
self.trim()
if 'subtract_bias' in process_steps: # Bias frame, if it exists, is trimmed
self.subtract_bias(bias)
if 'apply_gain' in process_steps:
self.apply_gain()
if 'orient' in process_steps:
self.orient()
# Flat field
if 'flatten' in process_steps:
self.flatten(pixel_flat, illum_flat=illum_flat, bpm=bpm)
# Return copy of the image
return self.image.copy()
def flatten(self, pixel_flat, illum_flat=None, bpm=None, force=False):
"""
Flat field the image
Wrapper to flat.flatfield
Args:
pixel_flat (np.ndarray):
Pixel flat image
illum_flat (np.ndarray, optional):
Illumination flat image
bpm (np.ndarray, optional):
Bad pixel mask image; if provided, over-rides internal one
force (bool, optional):
Force the processing even if the image was already processed
"""
step = inspect.stack()[0][3]
# Check if already trimmed
if self.steps[step] and (not force):
msgs.warn("Image was already flat fielded. Returning the current image")
return self.image.copy()
# BPM
if bpm is None:
bpm = self.bpm
# Do it
self.image = flat.flatfield(self.image, pixel_flat, bpm, illum_flat=illum_flat)
self.steps[step] = True
def load_rawframe(self):
"""
Load a raw image from disk using the Spectrograph method load_raw_frame()
Also loads up the binning, exposure time, and header of the Primary image
And the HDUList in self.hdu
Args:
filename (str): Filename
"""
# Load
self.image, self.hdu, \
= self.spectrograph.load_raw_frame(self.filename, det=self.det)
self.head0 = self.hdu[0].header
# Shape
self.orig_shape = self.image.shape
# Exposure time
self.exptime = self.spectrograph.get_meta_value(self.filename, 'exptime')
# Binning
self.binning = self.spectrograph.get_meta_value(self.filename, 'binning')
if self.spectrograph.detector[self.det-1]['specaxis'] == 1:
self.binning_raw = (',').join(self.binning.split(',')[::-1])
else:
self.binning_raw = self.binning
def orient(self, force=False):
"""
Orient the image in the PypeIt format with spectra running blue (down)
to red (up).
Args:
force (bool, optional):
Force the processing even if the image was already processed
"""
step = inspect.stack()[0][3]
# Orient the image to have blue/red run bottom to top
# Check if already oriented
if self.steps[step] and not force:
msgs.warn("Image was already oriented. Returning current image")
return self.image.copy()
# Orient me
self.image = self.spectrograph.orient_image(self.image, self.det)
self.steps[step] = True
def subtract_bias(self, bias_image, force=False):
"""
Perform bias subtraction
Args:
bias_image (np.ndarray):
Bias image
force (bool, optional):
Force the processing even if the image was already processed
"""
step = inspect.stack()[0][3]
# Check if already trimmed
if self.steps[step] and (not force):
msgs.warn("Image was already bias subtracted. Returning the current image")
return self.image.copy()
# Do it
self.image -= bias_image
self.steps[step] = True
def subtract_overscan(self, force=False):
"""
Analyze and subtract the overscan from the image
Args:
force (bool, optional):
Force the processing even if the image was already processed
"""
step = inspect.stack()[0][3]
# Check if already trimmed
if self.steps[step] and (not force):
msgs.warn("Image was already trimmed")
temp = procimg.subtract_overscan(self.image, self.rawdatasec_img, self.oscansec_img,
method=self.par['overscan'],
params=self.par['overscan_par'])
# Fill
self.steps[step] = True
self.image = temp
def trim(self, force=False):
"""
Trim the image to include only the science data
Args:
force (bool, optional):
Force the processing even if the image was already processed
"""
step = inspect.stack()[0][3]
# Check input image matches the original
if self.orig_shape is not None:
if self.image.shape != self.orig_shape:
msgs.warn("Image shape does not match original. Returning current image")
return self.image.copy()
# Check if already trimmed
if self.steps[step] and (not force):
msgs.warn("Image was already trimmed. Returning current image")
return self.image
# Do it
trim_image = procimg.trim_frame(self.image, self.rawdatasec_img < 1)
# Overwrite
self.image = trim_image
self.steps[step] = True
def __repr__(self):
return ('<{:s}: file={}, steps={}>'.format(
self.__class__.__name__, self.filename, self.steps))
|
{"hexsha": "ec2c731d8c13c2458cd2bbc3c02aecc1c4970fa1", "size": 11177, "ext": "py", "lang": "Python", "max_stars_repo_path": "pypeit/images/processrawimage.py", "max_stars_repo_name": "seib2/PypeIt", "max_stars_repo_head_hexsha": "18ce33aa8aa12b8ee51303ad87a723ec81e0e6f8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-12-18T21:56:23.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-18T21:56:23.000Z", "max_issues_repo_path": "pypeit/images/processrawimage.py", "max_issues_repo_name": "feigewang/PypeIt", "max_issues_repo_head_hexsha": "4c68b38cb907345a480d7afee58200a05ecd4556", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pypeit/images/processrawimage.py", "max_forks_repo_name": "feigewang/PypeIt", "max_forks_repo_head_hexsha": "4c68b38cb907345a480d7afee58200a05ecd4556", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.117816092, "max_line_length": 96, "alphanum_fraction": 0.5700098416, "include": true, "reason": "import numpy", "num_tokens": 2426}
|
from coopihc.base.StateElement import StateElement
from coopihc.base.utils import (
StateNotContainedError,
StateNotContainedWarning,
)
from coopihc.base.elements import integer_set, box_space
import numpy
import pytest
import json
import copy
from tabulate import tabulate
def test_array_init_integer():
x = StateElement(2, integer_set(3))
assert hasattr(x, "space")
assert x.shape == ()
assert x == 2
def test_array_init_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="error"
)
assert hasattr(x, "space")
assert x.shape == (2, 2)
assert (x == numpy.zeros((2, 2))).all()
def test_array_init():
test_array_init_integer()
test_array_init_numeric()
def test_array_init_error_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="error")
with pytest.raises(StateNotContainedError):
x = StateElement(4, integer_set(3), out_of_bounds_mode="error")
with pytest.raises(StateNotContainedError):
x = StateElement(-3, integer_set(3), out_of_bounds_mode="error")
def test_array_init_error_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="error"
)
with pytest.raises(StateNotContainedError):
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
with pytest.raises(StateNotContainedError):
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
with pytest.raises(StateNotContainedError):
x = StateElement(
numpy.array([[0, 0], [-2, 0]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
def test_array_init_error():
test_array_init_error_integer()
test_array_init_error_numeric()
def test_array_init_warning_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="warning")
with pytest.warns(StateNotContainedWarning):
x = StateElement(4, integer_set(3), out_of_bounds_mode="warning")
with pytest.warns(StateNotContainedWarning):
x = StateElement(-3, integer_set(3), out_of_bounds_mode="warning")
def test_array_init_warning_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="warning"
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
numpy.array([[0, 0], [-2, 0]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
def test_array_init_warning():
test_array_init_warning_integer()
test_array_init_warning_numeric()
def test_array_init_clip_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="clip")
assert x == numpy.array([2])
x = StateElement(4, integer_set(3), out_of_bounds_mode="clip")
assert x == numpy.array([2])
x = StateElement(-3, integer_set(3), out_of_bounds_mode="clip")
assert x == numpy.array([0])
def test_array_init_clip_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="clip"
)
assert (x == numpy.zeros((2, 2))).all()
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
assert (x == numpy.ones((2, 2))).all()
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
assert (x == -1.0 * numpy.ones((2, 2))).all()
def test_array_init_clip():
test_array_init_clip_integer()
test_array_init_clip_numeric()
def test_array_init_dtype_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="warning")
assert x.dtype == numpy.int64
x = StateElement(2, integer_set(3, dtype=numpy.int16), out_of_bounds_mode="warning")
assert x.dtype == numpy.int16
def test_array_init_dtype_numeric():
x = StateElement(
numpy.zeros((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
assert x.dtype == numpy.float64
x = StateElement(
numpy.zeros((2, 2)),
box_space(numpy.ones((2, 2), dtype=numpy.float32)),
out_of_bounds_mode="warning",
)
assert x.dtype == numpy.float32
x = StateElement(
numpy.zeros((2, 2)),
box_space(numpy.ones((2, 2), dtype=numpy.int8)),
out_of_bounds_mode="warning",
)
assert x.dtype == numpy.int8
def test_array_init_dtype():
test_array_init_dtype_integer()
test_array_init_dtype_numeric()
# def test__array_ufunc__discrete():
# # Simple arithmetic
# global discr_space
# x = StateElement(2, discr_space, out_of_bounds_mode="error")
# assert x + numpy.array(1) == 3
# assert x + 1 == 3
# assert x - 1 == 1
# assert 3 - x == 1
# assert x - numpy.array(1) == 1
# assert numpy.array(3) - x == 1
# assert 1 + x == 3
# x += 1
# y = x - 1
# assert y.out_of_bounds_mode == "error"
# with pytest.raises(StateNotContainedError):
# 1 - x
# with pytest.raises(StateNotContainedError):
# x + 2
# with pytest.raises(StateNotContainedError):
# x += 5
# def test__array_ufunc__continuous():
# # some matrix operations
# global cont_space
# x = StateElement(numpy.zeros((2, 2)), cont_space, out_of_bounds_mode="error")
# assert (x + numpy.ones((2, 2)) == numpy.ones((2, 2))).all()
# assert (x + 1 == numpy.ones((2, 2))).all()
# assert (1 + x == numpy.ones((2, 2))).all()
# assert (x - 1 == -numpy.ones((2, 2))).all()
# assert (1 - x == numpy.ones((2, 2))).all()
# assert ((1 + x) * 0.5 == 0.5 * numpy.ones((2, 2))).all()
# assert (0.5 * (1 + x) @ numpy.ones((2, 2)) == numpy.ones((2, 2))).all()
# def test__array_ufunc__multidiscrete():
# global multidiscr_space
# x = StateElement([1, 1, 8], multidiscr_space, out_of_bounds_mode="error")
# assert (x + numpy.array([[1], [1], [-3]]) == numpy.array([[2], [2], [5]])).all()
# with pytest.raises(StateNotContainedError):
# x + numpy.array([[1], [1], [1]])
# def test__array_ufunc__comparisons():
# global discr_space
# x = StateElement(2, discr_space, out_of_bounds_mode="error")
# assert x > 1 == True
# global cont_space
# x = StateElement(numpy.zeros((2, 2)), cont_space, out_of_bounds_mode="error")
# assert (x < 0).all() == False
# global multidiscr_space
# x = StateElement(
# numpy.array([[1], [1], [1]]), multidiscr_space, out_of_bounds_mode="error"
# )
# assert (x >= numpy.array([[1], [0], [1]])).all() == True
# assert (x >= numpy.array([[1], [5], [1]])).all() == False
# comp = x >= numpy.array([[1], [5], [1]])
# assert (comp == numpy.array([[True], [False], [True]])).all()
# def test__array_ufunc__trigonometry():
# global cont_space
# x = StateElement(numpy.zeros((2, 2)), cont_space, out_of_bounds_mode="error")
# assert (numpy.cos(x) == numpy.ones((2, 2))).all()
# def test__array_ufunc__floating():
# global cont_space
# x = StateElement(
# numpy.array([[0.2, 0.3], [1, 0.95]]), cont_space, out_of_bounds_mode="error"
# )
# assert numpy.isfinite(x).all() == True
# def test__array_ufunc__out_of_bounds_mode():
# x = StateElement(
# numpy.array([[0.2, 0.3], [1, 0.95]]), cont_space, out_of_bounds_mode="error"
# )
# y = StateElement(
# numpy.array([[-0.2, -0.3], [-1, -0.95]]),
# cont_space,
# out_of_bounds_mode="warning",
# )
# z = StateElement(
# numpy.array([[0.0, 0.0], [0.0, 0.0]]),
# cont_space,
# out_of_bounds_mode="silent",
# )
# u = x + y
# assert u.out_of_bounds_mode == "error"
# u = y + x
# assert u.out_of_bounds_mode == "error"
# u = z + x
# assert u.out_of_bounds_mode == "error"
# u = y + z
# assert u.out_of_bounds_mode == "warning"
# u = z + 0
# assert u.out_of_bounds_mode == "silent"
# def test__array_ufunc__():
# test__array_ufunc__discrete()
# test__array_ufunc__continuous()
# test__array_ufunc__multidiscrete()
# test__array_ufunc__comparisons()
# test__array_ufunc__trigonometry()
# test__array_ufunc__floating()
# test__array_ufunc__out_of_bounds_mode()
# def test_amax_nothandled():
# StateElement.HANDLED_FUNCTIONS = {}
# cont_space = autospace(
# [[-1, -1], [-1, -1]], [[1, 1], [1, 1]], dtype=numpy.float64
# ) # Here the
# x = StateElement(
# numpy.array([[0, 0.1], [-0.5, 0.8]], dtype=numpy.float64),
# cont_space,
# out_of_bounds_mode="warning",
# )
# # Without handled function
# with pytest.warns(NumpyFunctionNotHandledWarning):
# y = numpy.max(x)
# assert isinstance(y, numpy.ndarray)
# assert not isinstance(y, StateElement)
# assert y == 0.8
# assert not hasattr(y, "space")
# assert not hasattr(y, "out_of_bounds_mode")
# def test_amax_implements_decorator():
# cont_space = autospace([[-1, -1], [-1, -2]], [[1, 1], [1, 3]], dtype=numpy.float64)
# x = StateElement(
# numpy.array([[0, 0.1], [-0.5, 0.8]], dtype=numpy.float64),
# cont_space,
# out_of_bounds_mode="warning",
# )
# @StateElement.implements(numpy.amax)
# def amax(arr, **keywordargs):
# space, out_of_bounds_mode, kwargs = (
# arr.space,
# arr.out_of_bounds_mode,
# arr.kwargs,
# )
# obj = arr.view(numpy.ndarray)
# argmax = numpy.argmax(obj, **keywordargs)
# index = numpy.unravel_index(argmax, arr.space.shape)
# obj = numpy.amax(obj, **keywordargs)
# obj = numpy.asarray(obj).view(StateElement)
# if arr.space.space_type == "continuous":
# obj.space = autospace(
# numpy.atleast_2d(arr.space.low[index[0], index[1]]),
# numpy.atleast_2d(arr.space.high[index[0], index[1]]),
# )
# else:
# raise NotImplementedError
# obj.out_of_bounds_mode = arr.out_of_bounds_mode
# obj.kwargs = arr.kwargs
# return obj
# y = numpy.amax(x)
# assert isinstance(y, StateElement)
# assert StateElement.HANDLED_FUNCTIONS.get(numpy.amax) is not None
# assert x.HANDLED_FUNCTIONS.get(numpy.amax) is not None
# assert y.shape == ()
# assert y == 0.8
# assert y.space.space_type == "continuous"
# assert y.space.shape == (1, 1)
# assert y.space.low == numpy.array([[-2]])
# assert y.space.high == numpy.array([[3]])
# def test_array_function_simple():
# test_amax_nothandled()
# test_amax_implements_decorator()
# def test__array_function__():
# test_array_function_simple()
def test_equals_integer():
int_space = integer_set(3)
other_int_space = integer_set(4)
x = StateElement(numpy.array(1), int_space)
y = StateElement(numpy.array(1), other_int_space)
assert x.equals(y)
assert not x.equals(y, mode="hard")
z = StateElement(numpy.array(2), int_space)
assert not x.equals(z)
def test_equals_numeric():
numeric_space = box_space(numpy.ones((2, 2)))
other_numeric_space = box_space(
low=numpy.array([[-1, -1], [-1, -2]]), high=numpy.array([[1, 2], [1, 1]])
)
x = StateElement(numpy.zeros((2, 2)), numeric_space)
y = StateElement(numpy.zeros((2, 2)), other_numeric_space)
assert (x.equals(y)).all()
assert not (x.equals(y, mode="hard")).all()
z = StateElement(numpy.eye(2), numeric_space)
assert not (x.equals(z)).all()
def test_equals():
test_equals_integer()
test_equals_numeric()
def test__iter__integer():
x = StateElement([2], integer_set(3))
with pytest.raises(TypeError):
next(iter(x))
def test__iter__numeric():
x = StateElement(
numpy.array([[0.2, 0.3], [0.4, 0.5]]), box_space(numpy.ones((2, 2)))
)
for i, _x in enumerate(x):
if i == 0:
assert (
_x == StateElement(numpy.array([0.2, 0.3]), box_space(numpy.ones((2,))))
).all()
if i == 1:
assert (
_x == StateElement(numpy.array([0.4, 0.5]), box_space(numpy.ones((2,))))
).all()
for j, _xx in enumerate(_x):
print(i, j)
if i == 0 and j == 0:
assert _xx == StateElement(
numpy.array(0.2), box_space(numpy.float64(1))
)
elif i == 0 and j == 1:
assert _xx == StateElement(
numpy.array(0.3), box_space(numpy.float64(1))
)
elif i == 1 and j == 0:
assert _xx == StateElement(
numpy.array(0.4), box_space(numpy.float64(1))
)
elif i == 1 and j == 1:
assert _xx == StateElement(
numpy.array(0.5), box_space(numpy.float64(1))
)
def test__iter__():
test__iter__integer()
test__iter__numeric()
def test__repr__integer():
x = StateElement(2, integer_set(3))
assert x.__repr__() == "StateElement(array(2), CatSet([0 1 2]), 'warning')"
def test__repr__numeric():
x = StateElement(numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))))
x.__repr__()
def test__repr__():
test__repr__integer()
test__repr__numeric()
def test_serialize_integer():
x = StateElement(numpy.array([2]), integer_set(3))
assert x.serialize() == {
"values": 2,
"space": {
"space": "CatSet",
"seed": None,
"array": [0, 1, 2],
"dtype": "dtype[int64]",
},
}
def test_serialize_numeric():
x = StateElement(numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))))
assert x.serialize() == {
"values": [[0.0, 0.0], [0.0, 0.0]],
"space": {
"space": "Numeric",
"seed": None,
"low,high": [[[-1.0, -1.0], [-1.0, -1.0]], [[1.0, 1.0], [1.0, 1.0]]],
"shape": (2, 2),
"dtype": "dtype[float64]",
},
}
def test_serialize():
test_serialize_integer()
test_serialize_numeric()
def test__getitem__integer():
x = StateElement(1, integer_set(3))
assert x[..., {"space": True}] == x
assert x[..., {"space": True}] is x
assert x[...] == x
def test__getitem__numeric():
x = StateElement(
numpy.array([[0.0, 0.1], [0.2, 0.3]]), box_space(numpy.ones((2, 2)))
)
assert x[0, 0] == 0.0
assert x[0, 0, {"space": True}] == StateElement(0.0, box_space(numpy.float64(1)))
assert x[0, 1, {"space": True}] == StateElement(0.1, box_space(numpy.float64(1)))
assert x[1, 0, {"space": True}] == StateElement(0.2, box_space(numpy.float64(1)))
assert x[1, 1, {"space": True}] == StateElement(0.3, box_space(numpy.float64(1)))
assert (x[:, 1] == numpy.array([0.1, 0.3])).all()
assert (
x[:, 1, {"space": True}]
== StateElement(numpy.array([0.1, 0.3]), box_space(numpy.ones((2,))))
).all()
x = StateElement(numpy.array(0), box_space(low=-1, high=1))
from coopihc import State
s = State()
s["x"] = x
fd = {"x": ...}
a = s.filter(mode="stateelement", filterdict=fd)
def test__getitem__():
test__getitem__integer()
test__getitem__numeric()
def test__setitem__integer():
x = StateElement(1, integer_set(3))
x[...] = 2
assert x == StateElement(2, integer_set(3))
with pytest.warns(StateNotContainedWarning):
x[...] = 4
def test__setitem__numeric():
x = StateElement(
numpy.array([[0.0, 0.1], [0.2, 0.3]]), box_space(numpy.ones((2, 2)))
)
x[0, 0] = 0.5
x[0, 1] = 0.6
x[1, 0] = 0.7
x[1, 1] = 0.8
assert (
x
== StateElement(
numpy.array([[0.5, 0.6], [0.7, 0.8]]), box_space(numpy.ones((2, 2)))
)
).all()
with pytest.warns(StateNotContainedWarning):
x[0, 0] = 1.3
x = StateElement(
numpy.array([[0.0, 0.1], [0.2, 0.3]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
x[:, 0] = numpy.array([0.9, 0.9])
x[0, :] = numpy.array([1.2, 0.2])
x[1, 1] = 0.5
assert (
x
== StateElement(
numpy.array([[1, 0.2], [0.9, 0.5]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
).all()
def test__setitem__():
test__setitem__integer()
test__setitem__numeric()
def test_reset_integer():
x = StateElement(numpy.array([2]), integer_set(3), out_of_bounds_mode="error")
xset = {}
for i in range(1000):
x.reset()
_x = x.squeeze().tolist()
xset.update({str(_x): _x})
assert sorted(xset.values()) == [0, 1, 2]
# forced reset:
x.reset(value=0)
assert x == StateElement(0, integer_set(3), out_of_bounds_mode="error")
with pytest.raises(StateNotContainedError):
x.reset(value=5)
x.out_of_bounds_mode = "clip"
x.reset(value=5)
assert x == StateElement(
numpy.array([2]), integer_set(3), out_of_bounds_mode="clip"
)
def test_reset_numeric():
x = StateElement(numpy.ones((2, 2)), box_space(numpy.ones((2, 2))))
for i in range(1000):
x.reset()
x.reset(0.59 * numpy.ones((2, 2)))
assert (
x == StateElement(0.59 * numpy.ones((2, 2)), box_space(numpy.ones((2, 2))))
).all()
def test_reset():
test_reset_integer()
test_reset_numeric()
def test_tabulate_integer():
x = StateElement(1, integer_set(3))
x._tabulate()
tabulate(x._tabulate()[0])
def test_tabulate_numeric():
x = StateElement(numpy.zeros((3, 3)), box_space(numpy.ones((3, 3))))
x._tabulate()
tabulate(x._tabulate()[0])
def test_tabulate():
test_tabulate_integer()
test_tabulate_numeric()
def test_cast_discrete_to_cont():
discr_box_space = box_space(low=numpy.int8(1), high=numpy.int8(3))
cont_box_space = box_space(low=numpy.float64(-1.5), high=numpy.float64(1.5))
x = StateElement(1, discr_box_space)
ret_stateElem = x.cast(cont_box_space, mode="edges")
assert ret_stateElem == StateElement(-1.5, cont_box_space)
ret_stateElem = x.cast(cont_box_space, mode="center")
assert ret_stateElem == StateElement(-1, cont_box_space)
x = StateElement(2, discr_box_space)
ret_stateElem = x.cast(cont_box_space, mode="edges")
assert ret_stateElem == StateElement(0, cont_box_space)
ret_stateElem = x.cast(cont_box_space, mode="center")
assert ret_stateElem == StateElement(0, cont_box_space)
x = StateElement(3, discr_box_space)
ret_stateElem = x.cast(cont_box_space, mode="edges")
assert ret_stateElem == StateElement(1.5, cont_box_space)
ret_stateElem = x.cast(cont_box_space, mode="center")
assert ret_stateElem == StateElement(1, cont_box_space)
def test_cast_cont_to_discrete():
cont_box_space = box_space(low=numpy.float64(-1.5), high=numpy.float64(1.5))
discr_box_space = box_space(low=numpy.int8(1), high=numpy.int8(3))
x = StateElement(0, cont_box_space)
ret_stateElem = x.cast(discr_box_space, mode="center")
assert ret_stateElem == StateElement(2, discr_box_space)
ret_stateElem = x.cast(discr_box_space, mode="edges")
assert ret_stateElem == StateElement(2, discr_box_space)
center = []
edges = []
for i in numpy.linspace(-1.5, 1.5, 100):
x = StateElement(i, cont_box_space)
ret_stateElem = x.cast(discr_box_space, mode="center")
if i < -0.75:
assert ret_stateElem == StateElement(1, discr_box_space)
if i > -0.75 and i < 0.75:
assert ret_stateElem == StateElement(2, discr_box_space)
if i > 0.75:
assert ret_stateElem == StateElement(3, discr_box_space)
center.append(ret_stateElem.tolist())
ret_stateElem = x.cast(discr_box_space, mode="edges")
if i < -0.5:
assert ret_stateElem == StateElement(1, discr_box_space)
if i > -0.5 and i < 0.5:
assert ret_stateElem == StateElement(2, discr_box_space)
if i > 0.5:
assert ret_stateElem == StateElement(3, discr_box_space)
edges.append(ret_stateElem.tolist())
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.plot(
# numpy.linspace(-1.5, 1.5, 100), numpy.array(center) - 0.05, "+", label="center"
# )
# ax.plot(
# numpy.linspace(-1.5, 1.5, 100), numpy.array(edges) + 0.05, "o", label="edges"
# )
# ax.legend()
# plt.show()
def test_cast_cont_to_cont():
cont_space = box_space(numpy.full((2, 2), 1), dtype=numpy.float32)
other_cont_space = box_space(
low=numpy.full((2, 2), 0), high=numpy.full((2, 2), 4), dtype=numpy.float32
)
for i in numpy.linspace(-1, 1, 100):
x = StateElement(numpy.full((2, 2), i), cont_space)
ret_stateElement = x.cast(other_cont_space)
assert (ret_stateElement == (x + 1) * 2).all()
def test_cast_discr_to_discr():
discr_box_space = box_space(low=numpy.int8(1), high=numpy.int8(4))
other_discr_box_space = box_space(low=numpy.int8(11), high=numpy.int8(14))
for i in [1, 2, 3, 4]:
x = StateElement(i, discr_box_space)
ret_stateElement = x.cast(other_discr_box_space)
assert ret_stateElement == x + 10
def test_cast():
test_cast_discrete_to_cont()
test_cast_cont_to_discrete()
test_cast_cont_to_cont()
test_cast_discr_to_discr()
if __name__ == "__main__":
test_array_init()
test_array_init_error()
test_array_init_warning()
test_array_init_clip()
test_array_init_dtype()
# test__array_ufunc__() # kept here just in case
# test__array_function__() # kept here just in case
test_equals()
test__iter__()
test__repr__()
test_serialize()
test__setitem__()
test__getitem__()
test_reset()
test_tabulate()
test_cast()
|
{"hexsha": "56d7927077bc5c01fd5071b79c41451ae9231fc8", "size": 22653, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/components/base/stateelement/test_statelement.py", "max_stars_repo_name": "jgori-ouistiti/CoopIHC", "max_stars_repo_head_hexsha": "0fe24c618a430517c1394625275faff3ce344f7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/components/base/stateelement/test_statelement.py", "max_issues_repo_name": "jgori-ouistiti/CoopIHC", "max_issues_repo_head_hexsha": "0fe24c618a430517c1394625275faff3ce344f7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2021-11-23T13:49:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T12:28:18.000Z", "max_forks_repo_path": "test/components/base/stateelement/test_statelement.py", "max_forks_repo_name": "jgori-ouistiti/CoopIHC", "max_forks_repo_head_hexsha": "0fe24c618a430517c1394625275faff3ce344f7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-08T11:10:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T11:10:24.000Z", "avg_line_length": 30.6121621622, "max_line_length": 89, "alphanum_fraction": 0.5982430583, "include": true, "reason": "import numpy", "num_tokens": 6547}
|
import matplotlib.pyplot as plt
from numpy import matlib
from scipy.sparse.linalg import svds
import numpy as np
from scipy.sparse import csc_matrix, lil_matrix
import multiprocessing as mp
m, n = 3, 10
l = int(1.0 * m * n * m * n)
dist = np.random.normal
dist_par = (0.0, 1.0 / np.sqrt(m * n))
w_size = (m * n, m * n)
w = dist(*dist_par, size=w_size)
w = np.vstack([wi - wi.mean(0) for wi in np.split(w, n)])
w = np.hstack(
[wj - wj.mean(1)[:, np.newaxis] for wj in np.split(w, n, axis=1)])
mm = m * np.arange(n)
states = np.empty((l, n), dtype=int)
states[0] = np.random.randint(m, size=n)
s = lil_matrix((l, m * n))
s[0, states[0] + mm] = 1
for t in range(1, l):
h = s[t - 1] * w
p = np.exp(h)
p = np.array([pi.cumsum() for pi in np.split(p, n, axis=1)])
p /= p[:, -1, np.newaxis]
u = np.random.uniform(size=(n, 1))
states[t] = (p < u).sum(1)
s[t, states[t] + mm] = 1
# s = s.tocsc()
s = s.toarray()
# import numpy as np
# np.random.seed(1)
# # parameters:
# n = 100
# g = 1.0
# fL = 1.0
# g = float(g)
# fL = float(fL)
# L0 = int(fL * n**2) + 1
# L = L0 - 1
# ##=============================================================================
# # generate sequences:
# W0_all = np.random.normal(0.0, g / np.sqrt(n), size=(n, n))
# s = np.ones((L0, n))
# for t in range(L):
# for i in range(n):
# H = np.sum(W0_all[i, :] * s[t, :])
# if (np.exp(-H) / (np.exp(H) + np.exp(-H))) > np.random.rand():
# s[t + 1, i] = -1.
##=============================================================================
## corvariance:
ds = s - s.mean(0)
C = np.empty((n * m, n * m))
for i in range(n * m):
for j in range(n * m):
C[i, j] = np.mean(ds[:, i] * ds[:, j])
C_inv = np.linalg.inv(C) # inverse matrix
s2 = s[1:].copy()
##=============================================================================
nloop = 1000
## predict W:
W_all = np.empty((n*m, n*m))
for i0 in range(n):
i1, i2 = m * i0, m * (i0 + 1)
W0 = W0_all[i1:i2, :]
s2i = s2[:, i1:i2]
H = s2i.copy() # initial value
cost = np.zeros(nloop + 1)
W1 = np.empty((nloop, n))
Hs = np.empty(n*m)
iloop = 1
stop_iloop = 0
while iloop < nloop and stop_iloop == 0:
for i in range(n*m):
Hs[i] = np.mean((H[0:L] - np.mean(H)) * ds[0:L, i])
W = np.dot(Hs[0:n], C_inv[0:n, 0:n])
H[0:L] = np.dot(s[0:L, 0:n], W[0:n])
cost[iloop] = np.mean((s2i - np.tanh(H))**2)
MSE = np.mean((W0[:] - W[:])**2)
if cost[iloop] > cost[iloop - 1] and iloop > 1:
stop_iloop = 1
H[:] = s2i[:] * H[:] / np.tanh(H[:])
W1[iloop, :] = W[:]
#print(i0,iloop,MSE,cost[iloop])
iloop += 1
niter = iloop - 2
print('i0:', i0, 'niter:', niter)
W = W1[niter, :]
W_all[i0, :] = W[0:n]
##=============================================================================
MSE = np.mean((W0_all - W_all)**2)
slope = np.sum(W0_all * W_all) / np.sum(W0_all**2)
print(float(L) / (n**2), MSE, slope)
MSE_out.write("%f %f %f %f \n" % (g, float(L) / (n**2), MSE, slope))
for i in range(n):
for j in range(n):
W_out.write("%i %i %f %f \n" % (i + 1, j + 1, float(W0_all[i, j]),
float(W_all[i, j])))
W_out.close()
|
{"hexsha": "5f0f0eb95d202642e5a2da5a0b987c9194047181", "size": 3322, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/scripts/2main.py", "max_stars_repo_name": "joepatmckenna/fem", "max_stars_repo_head_hexsha": "18b3e3cb0b83f7a4eb464c84f09f00673ea2fcb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/scripts/2main.py", "max_issues_repo_name": "joepatmckenna/fem", "max_issues_repo_head_hexsha": "18b3e3cb0b83f7a4eb464c84f09f00673ea2fcb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/scripts/2main.py", "max_forks_repo_name": "joepatmckenna/fem", "max_forks_repo_head_hexsha": "18b3e3cb0b83f7a4eb464c84f09f00673ea2fcb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-07-27T15:30:00.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-27T15:30:00.000Z", "avg_line_length": 24.0724637681, "max_line_length": 81, "alphanum_fraction": 0.4536423841, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1180}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path as op
import sys
import re
import logging
from astropy.table import Table, Column
from maize.apps.base import AttrDict, str2bool, eprint, sh, mkdir, which
from maize.formats.base import must_open
from maize.formats.pbs import PbsJob, create_job_chain
def check_cfg_mapping(c):
c.outdirs = c.outdir.split(",")
assert len(c.outdirs) == 2, "not 2 outdirs: %s" % c.outdir
for subdir in [c.dirw, c.temp_dir] + c.outdirs:
if not op.isdir(subdir):
mkdir(subdir)
for fn in [c.ilist, c.genome, c.gff]:
assert op.isfile(fn), "cannot read %s" % fn
for key in 'samtools parallel sambamba bcftools bedtools'.split():
fp = which(c[key])
assert fp is not None, "not executable: %s" % c[key]
c[key] = fp
c.paired = str2bool(c.paired)
if c.mapper == 'bwa':
c.bwa = which(c.bwa)
assert c.bwa is not None, "not executable: %s" % c.bwa
elif c.mapper == 'hisat2':
c.hisat2 = which(c.hisat2)
assert c.hisat2 is not None, "not executable: %s" % c.hisat2
elif c.mapper == 'bowtie2':
c.bowtie2 = which(c.bowtie2)
assert c.bowtie2 is not None, "not executable: %s" % c.bowtie2
else:
logging.error("unsupported mapper: %s" % c.mapper)
sys.exit(1)
njob = 3
c.pbs_walltimes = c.pbs_walltime.split(",")
c.pbs_ppns = c.pbs_ppn.split(",")
c.pbs_queues = c.pbs_queue.split(",")
assert njob == len(c.pbs_queues) == len(c.pbs_walltimes) == len(c.pbs_ppns), "not %d jobs: %s" % (njob, c.pbs_queue)
c.njob = njob
return c
def mapping(cfg, args):
c = AttrDict(cfg['mapping'])
c = check_cfg_mapping(c)
if args.check:
mapping_check(c)
return 0
os.chdir(c.dirw)
jcmds = [[
"cd %s" % c.dirw,
], [
"cd %s" % c.dirw,
], [
"cd %s" % c.dirw,
]]
bcfgs = [
[dict(opt = 'bash')],
[dict(opt = 'parallel', thread = c.pbs_ppns[1])],
[dict(opt = 'bash'),
dict(opt = 'parallel', thread = c.pbs_ppns[2]),
],
]
assert c.njob == len(bcfgs) == len(jcmds), "not %d jobs" % c.njob
jobs = []
for i in range(c.njob):
prefix = "%s.%d" % (c.job_prefix, i+1)
jcfg = {
'queue': c.pbs_queues[i],
'ppn': c.pbs_ppns[i],
'walltime': c.pbs_walltimes[i],
'email': c.pbs_email,
}
job = PbsJob.from_cfg(jcfg = jcfg, jcmds = jcmds[i], bcfgs = bcfgs[i],
prefix = prefix, njob = len(bcfgs[i]),
bash = c.bash, parallel = c.parallel)
jobs.append(job)
t = Table.read(c.ilist, format = 'ascii.tab')
nrow = len(t)
for i in range(nrow):
sid = t['sid'][i]
pre1= "%s/%s" % (c.outdirs[0], sid)
fsam = "%s.sam" % pre1
input_str = ''
if c.paired:
f1p = t["TrimmedReadFile1Paired"][i]
f1u = t["TrimmedReadFile1Unpaired"][i]
f2p = t["TrimmedReadFile2Paired"][i]
f2u = t["TrimmedReadFile2Unpaired"][i]
if c.mapper == 'hisat2' or c.mapper == 'bowtie2':
input_str = "-1 %s -2 %s -U %s,%s" % (f1p, f2p, f1u, f2u)
elif c.mapper == 'bwa':
input_str = "%s %s" % (f1p, f2p)
else:
ft = t["TrimmedReadFile"][i]
if c.mapper == 'hisat2' or c.mapper == 'bowtie2':
input_str = "-U %s" % ft
elif c.mapper == 'bwa':
input_str = "%s" % ft
if c.mapper == 'bwa':
jobs[0].subjobs[0].add_cmd("%s mem -t %s %s %s \
-R '@RG\\tID:%s\\tSM:%s' -a > %s.sam" % \
(c.bwa, c.pbs_ppns[0], c.bwa_db, input_str, \
sid, sid, pre1))
elif c.mapper == 'hisat2':
jobs[0].subjobs[0].add_cmd("%s -p %s -x %s -q %s \
--no-spliced-alignment --rg-id %s --rg SM:%s -S %s.sam" % \
(c.hisat2, c.pbs_ppns[0], c.hisat_db, input_str, \
sid, sid, pre1))
elif c.mapper == 'bowtie2':
jobs[0].subjobs[0].add_cmd("%s -p %s -x %s -q %s \
--rg-id %s --rg SM:%s --sensitive -S %s.sam" % \
(c.bowtie2, c.pbs_ppns[0], c.bowtie_db, input_str, \
sid, sid, pre1))
fbam = "%s.bam" % pre1
jobs[1].subjobs[0].add_cmd("%s view -Sb %s.sam -o %s.raw.bam" % \
(c.samtools, pre1, pre1))
jobs[2].subjobs[0].add_cmd("%s sort -t %s -m 60GB %s.raw.bam -o %s.bam" % \
(c.sambamba, c.pbs_ppns[2], pre1, pre1))
#bcmds[2].append("%s index -t %s %s.bam" % (sambamba, pbs_ppns[2], pre1))
pre2 = "%s/%s" % (c.outdirs[1], sid)
jobs[2].subjobs[1].add_cmd("bam stat %s.bam --isize %s.ins.tsv > %s.tsv" % \
(pre1, pre2, pre2))
for job in jobs:
job.write()
fj = "%s.sh" % c.job_prefix
create_job_chain([job.fname for job in jobs], fj)
logging.debug("job chain with %s jobs was created: %s" % (c.njob, fj))
def mapping_check(cfg):
t = Table.read(ilist, format = 'ascii.tab')
nrow = len(t)
newcols = ''
if c.paired:
newcols = '''BAM Pair Pair_Map Pair_Orphan Pair_Unmap
Pair_Map_Hq Unpair Unpair_Map Unpair_Map_Hq'''.split()
else:
newcols = '''BAM Total Mapped Mapped_Hq'''.split()
for newcol in newcols:
t.add_column(Column(name = newcol, length = nrow, dtype = object))
for i in range(nrow):
sid = t['sid'][i]
bam = "%s/%s.bam" % (c.outdirs[0], sid)
assert op.isfile(bam), "%s not exist" % bam
fs = "%s/%s.tsv" % (c.outdirs[1], sid)
assert op.isfile(fs), "%s not exist" % fs
if c.paired:
t['BAM'][i] = 0#bam
t['Pair'][i] = 0#pair
t['Pair_Map'][i] = 0#pair_map
t['Pair_Orphan'][i] = 0#pair_orphan
t['Pair_Unmap'][i] = 0#pair_unmap
t['Pair_Map_Hq'][i] = 0#pair_map_hq
t['Unpair'][i] = 0#unpair
t['Unpair_Map'][i] = 0#unpair_map
t['Unpair_Map_Hq'][i] = 0#unpair_map_hq
else:
t['BAM'] = 0#bam
t['Total'] = 0#unpair
t['Mapped'] = 0#unpair_map
t['Mapped_Hq'] = 0#unpair_map_hq
t.write(t.olist, format='ascii.tab', overwrite=True)
if __name__ == "__main__":
import argparse
import configparser
parser = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = 'Illumina DNA-Seq pipeline(s)'
)
parser.add_argument('--config', "--cfg", default = "config.ini", help = 'config file')
sp = parser.add_subparsers(title = 'available commands', dest = 'command')
sp1 = sp.add_parser("mapping",
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
help = 'mapping'
)
sp1.add_argument("--check", action = 'store_true', help = "run script in check mode")
sp1.set_defaults(func = mapping)
args = parser.parse_args()
assert op.isfile(args.config), "cannot read %s" % args.config
cfg = configparser.ConfigParser()
cfg._interpolation = configparser.ExtendedInterpolation()
cfg.read(args.config)
if args.command:
args.func(cfg, args)
else:
print('Error: need to specify a sub command\n')
parser.print_help()
|
{"hexsha": "4d0d63e284dc6afaf8bb5b791110dc92a4efc766", "size": 7532, "ext": "py", "lang": "Python", "max_stars_repo_path": "old/pipelines/dnaseq.py", "max_stars_repo_name": "orionzhou/biolib", "max_stars_repo_head_hexsha": "940fb66f1b2608d34a2d00ebdf41dc84c6381f42", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-22T20:35:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-25T10:01:50.000Z", "max_issues_repo_path": "old/pipelines/dnaseq.py", "max_issues_repo_name": "orionzhou/biolib", "max_issues_repo_head_hexsha": "940fb66f1b2608d34a2d00ebdf41dc84c6381f42", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "old/pipelines/dnaseq.py", "max_forks_repo_name": "orionzhou/biolib", "max_forks_repo_head_hexsha": "940fb66f1b2608d34a2d00ebdf41dc84c6381f42", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-19T03:10:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-19T03:10:14.000Z", "avg_line_length": 35.5283018868, "max_line_length": 120, "alphanum_fraction": 0.5296070101, "include": true, "reason": "from astropy", "num_tokens": 2361}
|
import uuid
import nibabel as nb
import numpy as np
import pytest
from ..nibabel import MergeROIs
@pytest.fixture
def create_roi(tmp_path):
files = []
def _create_roi(affine, img_data, roi_index):
img_data[tuple(roi_index)] = 1
nii = nb.Nifti1Image(img_data, affine)
filename = tmp_path / f"{str(uuid.uuid4())}.nii.gz"
files.append(filename)
nii.to_filename(filename)
return filename
yield _create_roi
for f in files:
f.unlink()
# create a slightly off affine
bad_affine = np.eye(4)
bad_affine[0, -1] = -1
@pytest.mark.parametrize(
"affine, data, roi_index, error, err_message",
[
(np.eye(4), np.zeros((2, 2, 2, 2), dtype=int), [1, 0], None, None),
(
np.eye(4),
np.zeros((2, 2, 3, 2), dtype=int),
[1, 0],
True,
"Mismatch in image shape",
),
(
bad_affine,
np.zeros((2, 2, 2, 2), dtype=int),
[1, 0],
True,
"Mismatch in affine",
),
(
np.eye(4),
np.zeros((2, 2, 2, 2), dtype=int),
[0, 0, 0],
True,
"Overlapping ROIs",
),
],
)
def test_merge_rois(tmpdir, create_roi, affine, data, roi_index, error, err_message):
tmpdir.chdir()
roi0 = create_roi(np.eye(4), np.zeros((2, 2, 2, 2), dtype=int), [0, 0])
roi1 = create_roi(np.eye(4), np.zeros((2, 2, 2, 2), dtype=int), [0, 1])
test_roi = create_roi(affine, data, roi_index)
merge = MergeROIs(in_files=[roi0, roi1, test_roi])
if error is None:
merge.run()
return
# otherwise check expected exceptions
with pytest.raises(AssertionError) as err:
merge.run()
assert err_message in str(err.value)
|
{"hexsha": "4b40d38420e7fc6d934cdb8a1bfe784b4b11749f", "size": 1824, "ext": "py", "lang": "Python", "max_stars_repo_path": "nibabies/interfaces/tests/test_nibabel.py", "max_stars_repo_name": "nipreps/nibabies", "max_stars_repo_head_hexsha": "55143215eaebca4924cefdc74d8008a347efea16", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-05-15T15:01:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T15:06:33.000Z", "max_issues_repo_path": "nibabies/interfaces/tests/test_nibabel.py", "max_issues_repo_name": "mgxd/nibabies", "max_issues_repo_head_hexsha": "4ae099af626b770142c9f2ced97c1436d17cae07", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 139, "max_issues_repo_issues_event_min_datetime": "2020-06-25T18:03:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:41:55.000Z", "max_forks_repo_path": "nibabies/interfaces/tests/test_nibabel.py", "max_forks_repo_name": "mgxd/nibabies", "max_forks_repo_head_hexsha": "4ae099af626b770142c9f2ced97c1436d17cae07", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-06-25T18:27:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T16:13:02.000Z", "avg_line_length": 24.6486486486, "max_line_length": 85, "alphanum_fraction": 0.5449561404, "include": true, "reason": "import numpy", "num_tokens": 531}
|
"""Train the model"""
import argparse
import os
import tensorflow as tf
from model.input_fn import train_input_fn
from model.input_fn import test_input_fn
from model.model_fn import TripletLoss
from model.utils import Params
import random
from tqdm import tqdm
from numpy import savez_compressed
import model.multi_modal_dataset as multi_modal_dataset
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', default='experiments/batch_hard',
help="Experiment directory containing params.json")
parser.add_argument('--data_dir', default='/Users/d22admin/USCGDrive/BeyondAssignment/small_dataset_useful/',
help="Directory containing the dataset")
parser.add_argument('--embed_dir', default='/Users/d22admin/USCGDrive/BeyondAssignment/Deliverables/Embeddings/')
parser.add_argument("--bert_model", default="bert-base-cased")
parser.add_argument("--result_dir", default="/Users/d22admin/USCGDrive/BeyondAssignment/Deliverables/Results/")
ANCHORS = [0, 1, 2] # 0 for "tweet", 1 for "image", 2 for "user"
if __name__ == '__main__':
tf.reset_default_graph()
#tf.logging.set_verbosity(tf.logging.INFO)
# Load the parameters from json file
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = Params(json_path)
# Get the datasets
#tf.logging.info("Getting the dataset...")
dataset_iter = train_input_fn(args.data_dir, params, args.embed_dir)
dataset_next = dataset_iter.get_next()
# Define the model
#tf.logging.info("Creating the model...")
model = TripletLoss(params)
num_train = 5031
num_train_steps = int(num_train/params.batch_size) * params.num_epochs
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
""" Training Module """
for i in tqdm(range(0, num_train_steps)):
sess.run(dataset_iter.initializer)
train = sess.run(dataset_next)
fd_train = {
model.is_training: True,
model.anchor_mode: random.choice(ANCHORS),
model.opt_mode: random.choice(ANCHORS),
model.images: train[0],
model.tweets: train[1],
model.user_ids: train[2],
model.team: train[3]
}
_, train_loss, train_acc, tweets_dense, images_dense = sess.run([model.train_op, model.total_loss,
model.class_acc, model.tweets_dense,
model.images_dense], fd_train)
print("iteration:", i, " train_loss:", train_loss, " train_acc:", train_acc)
tf.logging.info("Finished training!! Now saving model and embeddings")
model.save(os.path.join(args.model_dir, "sample_multi_modal_model"), sess)
tf.logging.info("Visualizing the embeddings on the TRAIN portion!")
dataset = multi_modal_dataset.train(args.data_dir, args.embed_dir)
dataset = dataset.batch(1684)
dataset = dataset.prefetch(1)
dataset = dataset.make_initializable_iterator()
dataset_next = dataset.get_next()
# Obtain the test labels
dataset_next = dataset.get_next()
sess.run(dataset.initializer)
train = sess.run(dataset_next)
fd_train = {
model.is_training: False,
model.anchor_mode: random.choice(ANCHORS),
model.opt_mode: random.choice(ANCHORS),
model.images: train[0],
model.tweets: train[1],
model.user_ids: train[2],
model.team: train[3]
}
train_acc, tweets_dense, images_dense = sess.run([model.class_acc, model.tweets_dense, model.images_dense],
fd_train)
print("tweets_dense.shape:", tweets_dense.shape, " images_dense.shape:", images_dense.shape, "train acc:",
train_acc)
savez_compressed(os.path.join(args.result_dir, 'text_emb_sample_both_train.npz'), tweets_dense)
savez_compressed(os.path.join(args.result_dir, 'img_emb_sample_both_train.npz'), images_dense)
tf.logging.info("Visualizing the embeddings on the TEST portion!")
dataset = multi_modal_dataset.test(args.data_dir, args.embed_dir)
dataset = dataset.batch(421)
dataset = dataset.prefetch(1)
dataset = dataset.make_initializable_iterator()
dataset_next = dataset.get_next()
# Obtain the test labels
dataset_next = dataset.get_next()
sess.run(dataset.initializer)
test = sess.run(dataset_next)
fd_test = {
model.is_training: False,
model.anchor_mode: random.choice(ANCHORS),
model.opt_mode: random.choice(ANCHORS),
model.images: test[0],
model.tweets: test[1],
model.user_ids: test[2],
model.team: test[3]
}
test_acc, tweets_dense, images_dense = sess.run([model.class_acc, model.tweets_dense, model.images_dense], fd_test)
print("tweets_dense.shape:", tweets_dense.shape, " images_dense.shape:", images_dense.shape, "test acc:",
test_acc)
savez_compressed(os.path.join(args.result_dir, 'text_emb_sample_both_test.npz'), tweets_dense)
savez_compressed(os.path.join(args.result_dir, 'img_emb_sample_both_test.npz'), images_dense)
|
{"hexsha": "3b42a03acfa7cfb4cfa43a94235ae6735a77703e", "size": 5662, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "meryemmhamdi1/tensorflow-triplet-loss", "max_stars_repo_head_hexsha": "9c40ad45fddfb8d2b955faf2973385f6985f72aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-30T22:18:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-23T00:33:13.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "meryemmhamdi1/tensorflow-triplet-loss", "max_issues_repo_head_hexsha": "9c40ad45fddfb8d2b955faf2973385f6985f72aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "meryemmhamdi1/tensorflow-triplet-loss", "max_forks_repo_head_hexsha": "9c40ad45fddfb8d2b955faf2973385f6985f72aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-30T22:19:41.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-30T22:19:41.000Z", "avg_line_length": 38.0, "max_line_length": 123, "alphanum_fraction": 0.6448251501, "include": true, "reason": "from numpy", "num_tokens": 1176}
|
#
from typing import List
import itertools as it
import difflib
import numpy as np
def to_string_seq(tour: List[int]) -> str:
"""Convert tour to a string sequence."""
return ' '.join(str(e) for e in tour)
def plan_to_string_seq(plan: List[List[int]]) -> str:
"""Convert tour plan represented as list of lists to a string sequence."""
return ' '.join(str(t) for t in it.chain.from_iterable(plan))
def plan_to_string_text(plan: List[List]) -> str:
"""Convert tour plan represented as list of lists to a
multiline text string with one line per list."""
return "\n".join(' '.join(str(e) for e in t) for t in plan)
def get_similarity_scores(anchor: str,
candidates: List[str],
) -> List[float]:
"""Get similarity score for each candidate
compared to the anchor sequence."""
matcher = difflib.SequenceMatcher(isjunk=lambda x: x == " ", a=anchor)
scores = []
for c in candidates:
matcher.set_seq2(c)
scores.append(matcher.ratio())
return scores
def get_most_diverse_idx(anchor: str,
candidates: List[str],
k: int = 1) -> List[int]:
"""Get the idx of the 'k' candidates which are
the most diverse from the anchor sequence."""
return np.argsort(get_similarity_scores(anchor, candidates))[:k]
def rm_common_subsequences(plan_a: List[List],
plan_b: List[List],
k: int = 1,
tau: float = 0.5,
) -> List[List]:
"""Remove the 'k' subsequences from plan_b which have
the most common elements with subsequences in plan_a."""
scores = []
b_strings = [to_string_seq(t) for t in plan_b if len(t) > 0]
for tour_a in plan_a:
if len(tour_a) == 0:
continue
a = to_string_seq(tour_a)
scores.append(get_similarity_scores(a, b_strings))
scores = np.array(scores).reshape(-1)
idx = (-scores.reshape(-1)).argsort()[:k] # -scores to sort in descending order
# check threshold
rm_idx = idx[scores[idx] >= tau]
# always remove at least one subsequence
if len(rm_idx) == 0:
rm_idx = np.array([idx[0]])
rm_idx = rm_idx % len(b_strings)
return [t for i, t in enumerate(plan_b) if i not in rm_idx]
# ============= #
# ### TEST #### #
# ============= #
def create_plan(n, k):
assert (n-5 > k) and (k > 2)
range_n = np.arange(5, n)
cuts = np.random.choice(range_n[1:-1], k-2, replace=False)
cuts.sort()
cuts = [0] + cuts.tolist() + [n]
plan = np.array(range_n).copy()
np.random.shuffle(plan)
return [list(range(5))] + [plan[s:e].tolist() for s, e in zip(cuts[:-1], cuts[1:])]
def _test1():
np.random.seed(1)
N = 40
K = 7
C = 5
plan_a = create_plan(N, K)
plan_b = create_plan(N, K)
print(plan_a)
print(plan_b)
plan_a = plan_to_string_seq(plan_a)
plan_b = plan_to_string_seq(plan_b)
print(plan_a)
print(plan_b)
candidate_plans = [create_plan(N, K) for _ in range(C)]
print(candidate_plans)
md_idx = get_most_diverse_idx(plan_a, [plan_to_string_seq(p) for p in candidate_plans], 3)
print(f"most div: {md_idx}")
print(candidate_plans[md_idx[0]])
def _test2():
import numpy as np
np.random.seed(1)
N = 30
K = 6
plan_a = create_plan(N, K)
plan_b = create_plan(N, K)
print(plan_a)
print(plan_b)
plan_b_new = rm_common_subsequences(plan_a, plan_b, k=2)
print(plan_b_new)
|
{"hexsha": "fa52c36757218242abdfc44511cf416e5fb4662e", "size": 3580, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/utils/seq_match.py", "max_stars_repo_name": "jokofa/JAMPR_plus", "max_stars_repo_head_hexsha": "6500c7ef36e7aad5d00b6c7cf98266b8a1979955", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/utils/seq_match.py", "max_issues_repo_name": "jokofa/JAMPR_plus", "max_issues_repo_head_hexsha": "6500c7ef36e7aad5d00b6c7cf98266b8a1979955", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/utils/seq_match.py", "max_forks_repo_name": "jokofa/JAMPR_plus", "max_forks_repo_head_hexsha": "6500c7ef36e7aad5d00b6c7cf98266b8a1979955", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8709677419, "max_line_length": 94, "alphanum_fraction": 0.5974860335, "include": true, "reason": "import numpy", "num_tokens": 956}
|
[STATEMENT]
lemma perp_per_2:
assumes "A B Perp A C"
shows "Per B A C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Per B A C
[PROOF STEP]
by (simp add: Perp_perm assms perp_per_1)
|
{"llama_tokens": 88, "file": "IsaGeoCoq_Tarski_Neutral", "length": 1}
|
import os
import torch
import torch.nn as nn
import numpy as np
from tensorboardX import SummaryWriter
import util
from config import Configuration
from dataset import Dataset
from models import ENAS, FPN #import ENAScontroller, ENAStrainer
def load_controller_and_trainer(args, logger, data):
if args.name == 'ENAS' :
controller = ENAS.ENAScontroller(args)
trainer = ENAS.ENAStrainer(args, logger, data, controller)
elif args.name == 'FPN' :
model = FPN.FPN(args)
trainer = FPN.FPNtrainer(args, logger, data, model)
controller = None
else :
raise NotImplementedError
return controller, trainer
def main():
args = Configuration()
util.set_paths(args)
util.set_seeds(args)
logger = util.set_logger(args)
dataset = Dataset(args)
controller, trainer = load_controller_and_trainer(args=args,
logger=logger,
data=dataset)
if args.fixed_train :
trainer.fixed_train()
else :
trainer.train()
if __name__ == '__main__' :
main()
|
{"hexsha": "ebc917971beeb7af4830457061447e85d974d7a9", "size": 990, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "imhgchoi/Architecture-Search", "max_stars_repo_head_hexsha": "38027d91fc8928b5c5a6ccde0013e571527a5293", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "imhgchoi/Architecture-Search", "max_issues_repo_head_hexsha": "38027d91fc8928b5c5a6ccde0013e571527a5293", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "imhgchoi/Architecture-Search", "max_forks_repo_head_hexsha": "38027d91fc8928b5c5a6ccde0013e571527a5293", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5217391304, "max_line_length": 64, "alphanum_fraction": 0.7282828283, "include": true, "reason": "import numpy", "num_tokens": 242}
|
[STATEMENT]
lemma ereal_MInf_plus[simp]: "-\<infinity> + x = (if x = \<infinity> then \<infinity> else -\<infinity>::ereal)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - \<infinity> + x = (if x = \<infinity> then \<infinity> else - \<infinity>)
[PROOF STEP]
by simp
|
{"llama_tokens": 117, "file": "Lower_Semicontinuous_Lower_Semicontinuous", "length": 1}
|
'''
Generating music
'''
import pickle
import numpy as np
from music21 import instrument,note, stream, chord
from keras.models import Sequential
from keras.layers import Activation, BatchNormalization, Dense, Dropout, LSTM
def generate():
with open('misc/notes','rb') as filepath:
notes = pickle.load(filepath)
pitchnames = sorted(set(item for item in notes))
n_vocab = len(set(notes))
net_in,norm_out = sequences(notes, pitchnames, n_vocab)
model = net(norm_in,n_vocab)
pred_op = generate_notes(model, net_in, pitchnames, n_vocab)
create_midi(pred_op)
def sequences(notes,pitchnames,n_vocab):
|
{"hexsha": "e45b16a1e2dda3412dbd04087d0502d76ce86bae", "size": 662, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate.py", "max_stars_repo_name": "7wikd/Jazz-LSTM", "max_stars_repo_head_hexsha": "9510ef1b4f427fde68d2afa8d72bcb7b4d96ad61", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generate.py", "max_issues_repo_name": "7wikd/Jazz-LSTM", "max_issues_repo_head_hexsha": "9510ef1b4f427fde68d2afa8d72bcb7b4d96ad61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate.py", "max_forks_repo_name": "7wikd/Jazz-LSTM", "max_forks_repo_head_hexsha": "9510ef1b4f427fde68d2afa8d72bcb7b4d96ad61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4615384615, "max_line_length": 77, "alphanum_fraction": 0.7039274924, "include": true, "reason": "import numpy", "num_tokens": 150}
|
import numpy as np
import igraph as ig
import matplotlib.pyplot as plt
from collections import defaultdict
error = 0.0001 # constante utilizada como limite para considerar dois valores float como iguais
class Point:
"""
Representação de um ponto com coordenadas x, y.
Alguns métodos foram implementados para tratar, por exemplo,
quando dois pontos são considerados iguais.
"""
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return np.abs(self.x - other.x) < error \
and np.abs(self.y - other.y) < error
def __hash__(self):
return hash(str(self))
def dist(self, other):
return (self.x - other.x) ** 2 + (self.y - other.y) ** 2
def val(self):
return self.x, self.y
def __iter__(self):
return iter([self.x, self.y])
class Segment:
"""
Representação de um segmento de reta, dados dois pontos P1 e P2 como entrada.
Os coeficientes que descrevem a reta logo são calculados.
b é o coeficiente angular
Método para verificar quando há interseção entre dois segmentos foi implementado também.
"""
def _b(self):
if self.p1.x == self.p2.x:
return 0
return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)
def _a(self):
return - self.b * self.p2.x + self.p2.y
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
# xb + a = y
self.b = self._b()
self.a = self._a()
def get_intersection(self, other):
x = (other.a - self.a) / (self.b - other.b) # teste para evitar denominador 0 já foi feito antes
return x
'''
Avalia se dois segmentos possuem interseção ou não.
Para isso, são consideradas as retas onde os segmentos passam, é calculado o ponto
da interseção (se houver) e por fim, se esse ponto pertence aos dois segmentos.
'''
def intersects(self, other):
# nesse caso, quando os pontos da extremidade são iguais não consideramos
if self.p1 == other.p1 or self.p1 == other.p2:
return False
# nesse caso, quando os pontos da extremidade são iguais não consideramos
if self.p2 == other.p1 or self.p2 == other.p2:
return False
# intervalos completamente diferentes
if max(other.p1.x, other.p2.x) < min(self.p1.x, self.p2.x):
return False
if np.abs(self.b - other.b) < error: # são paralelos e não coincidem
return False
x = self.get_intersection(other)
return min(self.p1.x, self.p2.x) < x < max(self.p1.x, self.p2.x) \
and min(other.p1.x, other.p2.x) < x < max(other.p1.x, other.p2.x)
def plot(self):
plt.plot([self.p1.x, self.p2.x], [self.p1.y, self.p2.y])
def is_possible(g, x, y):
from shapely.geometry import LineString
P1 = g.vs[x]['coord']
P2 = g.vs[y]['coord']
segm1 = Segment(P1, P2)
line = LineString([P1.val(), P2.val()])
for e in g.es:
P3 = g.vs[e.source]['coord']
P4 = g.vs[e.target]['coord']
segm2 = Segment(P3, P4)
my_test = segm1.intersects(segm2)
other = LineString([P3.val(), P4.val()])
shapely_test = line.intersects(other)
if my_test != shapely_test:
if x == e.source or x == e.target or y == e.source or y == e.target:
pass
else:
plt.figure()
segm1.plot()
segm2.plot()
plt.show()
if my_test:
return False
return True
def get_nearest_point(points, x, invalid):
arg_min = -1
min_dist = 9999
Px = points[x]
for i, Pother in enumerate(points):
if x == i or i in invalid:
continue
else:
d = Px.dist(Pother)
if d < min_dist:
arg_min = i
min_dist = d
return arg_min
def get_points(n):
points = set()
while len(points) < n:
x = np.random.uniform(0, 1)
y = np.random.uniform(0, 1)
points.add(Point(x, y))
points = list(points)
return points
'''
Método para a geração de instâncias de grafos planos.
O argumento n é o tamanho do grafo dado como saída.
O algoritmo segue a sugestão de implementação da questão 6.10 do livro do Russel.
'''
def get_color_map_instance(n):
points = get_points(n)
g = ig.Graph()
g.add_vertices(n)
g.vs['coord'] = points
invalid_edges = defaultdict(lambda: set())
valid_vertices = np.arange(0, n)
while len(valid_vertices) > 0:
np.random.shuffle(valid_vertices)
x = valid_vertices[0]
y = get_nearest_point(points, x, invalid_edges[x])
if is_possible(g, x, y):
g.add_edge(x, y)
invalid_edges[x].add(y)
invalid_edges[y].add(x)
if len(invalid_edges[x]) >= n - 1:
valid_vertices = valid_vertices[valid_vertices != x]
if len(invalid_edges[y]) >= n - 1:
valid_vertices = valid_vertices[valid_vertices != y]
return g
if __name__ == '__main__':
'''
Se esse arquivo for executado sozinho, é possível rodar esse pequeno teste de geração de instâncias
com visualização.
'''
for n in range(100, 251, 50):
print(n)
for j in range(10):
g = get_color_map_instance(n)
ig.plot(g, vertex_size=5, layout=g.vs['coord'])
|
{"hexsha": "76303996b5600869db689522c5d3412ad3fad62f", "size": 5702, "ext": "py", "lang": "Python", "max_stars_repo_path": "instance_generation.py", "max_stars_repo_name": "carolmb/ia_part1", "max_stars_repo_head_hexsha": "95b94e6570834cb60703b1d79180c21ab218b52d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "instance_generation.py", "max_issues_repo_name": "carolmb/ia_part1", "max_issues_repo_head_hexsha": "95b94e6570834cb60703b1d79180c21ab218b52d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "instance_generation.py", "max_forks_repo_name": "carolmb/ia_part1", "max_forks_repo_head_hexsha": "95b94e6570834cb60703b1d79180c21ab218b52d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9441624365, "max_line_length": 106, "alphanum_fraction": 0.5601543318, "include": true, "reason": "import numpy", "num_tokens": 1490}
|
Yolo Property Management is a residential property management company that manages over 300 units across Davis. Their most well known communities include Aspen Village, Glacier Point and Saratoga West Apartments located in West Davis. Yolo Property Management has recently partnered with the University to create Graduate Student Housing in Davis at 8th and Wake. 8th and Wake is currently under construction at the former Castilian Hall site and will be open for graduate student living by August 1, 2014. Lease Agreements are currently being signed through our onsite mobile leasing office. For most information about Yolo Property Management you can visit http://www.yolopropertymanagement.com/
For problems with or questions about your status as a renter, check out your renters rights.
Local properties managed by include
Aspen Village Apartments
Glacier Point Apartments
Saratoga West Apartments
8th and Wake
|
{"hexsha": "5c8a31d05827a66a5b67a85cb470475cf0703ecf", "size": 930, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Yolo_Property_Management.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Yolo_Property_Management.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Yolo_Property_Management.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 84.5454545455, "max_line_length": 701, "alphanum_fraction": 0.8225806452, "num_tokens": 187}
|
# Copyright 2017 Hugh Salimbeni
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import pandas
import logging
from datetime import datetime
from scipy.io import loadmat
from urllib.request import urlopen
logging.getLogger().setLevel(logging.INFO)
import zipfile
from bayesian_benchmarks.paths import DATA_PATH, BASE_SEED
_ALL_REGRESSION_DATATSETS = {}
_ALL_CLASSIFICATION_DATATSETS = {}
def add_regression(C):
_ALL_REGRESSION_DATATSETS.update({C.name:C})
return C
def add_classficiation(C):
_ALL_CLASSIFICATION_DATATSETS.update({C.name:C})
return C
def normalize(X):
X_mean = np.average(X, 0)[None, :]
X_std = 1e-6 + np.std(X, 0)[None, :]
return (X - X_mean) / X_std, X_mean, X_std
class Dataset(object):
def __init__(self, split=0, prop=0.9):
if self.needs_download:
self.download()
X_raw, Y_raw = self.read_data()
X, Y = self.preprocess_data(X_raw, Y_raw)
ind = np.arange(self.N)
np.random.seed(BASE_SEED + split)
np.random.shuffle(ind)
n = int(self.N * prop)
self.X_train = X[ind[:n]]
self.Y_train = Y[ind[:n]]
self.X_test = X[ind[n:]]
self.Y_test = Y[ind[n:]]
@property
def datadir(self):
dir = os.path.join(DATA_PATH, self.name)
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
@property
def datapath(self):
filename = self.url.split('/')[-1] # this is for the simple case with no zipped files
return os.path.join(self.datadir, filename)
@property
def needs_download(self):
return not os.path.isfile(self.datapath)
def download(self):
logging.info('donwloading {} data'.format(self.name))
is_zipped = np.any([z in self.url for z in ['.gz', '.zip', '.tar']])
if is_zipped:
filename = os.path.join(self.datadir, self.url.split('/')[-1])
else:
filename = self.datapath
with urlopen(self.url) as response, open(filename, 'wb') as out_file:
data = response.read()
out_file.write(data)
if is_zipped:
zip_ref = zipfile.ZipFile(filename, 'r')
zip_ref.extractall(self.datadir)
zip_ref.close()
# os.remove(filename)
logging.info('finished donwloading {} data'.format(self.name))
def read_data(self):
raise NotImplementedError
def preprocess_data(self, X, Y):
X, self.X_mean, self.X_std = normalize(X)
Y, self.Y_mean, self.Y_std = normalize(Y)
return X, Y
uci_base_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/'
@add_regression
class Boston(Dataset):
N, D, name = 506, 13, 'boston'
url = uci_base_url + 'housing/housing.data'
def read_data(self):
data = pandas.read_fwf(self.datapath, header=None).values
return data[:, :-1], data[:, -1].reshape(-1, 1)
@add_regression
class Concrete(Dataset):
N, D, name = 1030, 8, 'concrete'
url = uci_base_url + 'concrete/compressive/Concrete_Data.xls'
def read_data(self):
data = pandas.read_excel(self.datapath).values
return data[:, :-1], data[:, -1].reshape(-1, 1)
@add_regression
class Energy(Dataset):
N, D, name = 768, 8, 'energy'
url = uci_base_url + '00242/ENB2012_data.xlsx'
def read_data(self):
# NB this is the first output (aka Energy1, as opposed to Energy2)
data = pandas.read_excel(self.datapath).values[:, :-1]
return data[:, :-1], data[:, -1].reshape(-1, 1)
@add_regression
class Kin8mn(Dataset):
N, D, name = 8192, 8, 'kin8nm'
url = 'http://mldata.org/repository/data/download/csv/uci-20070111-kin8nm'
def read_data(self):
data = pandas.read_csv(self.datapath, header=None).values
return data[:, :-1], data[:, -1].reshape(-1, 1)
@add_regression
class Naval(Dataset):
N, D, name = 11934, 14, 'naval'
url = uci_base_url + '00316/UCI%20CBM%20Dataset.zip'
@property
def datapath(self):
return os.path.join(self.datadir, 'UCI CBM Dataset/data.txt')
def read_data(self):
data = pandas.read_fwf(self.datapath, header=None).values
# NB this is the first output
X = data[:, :-2]
Y = data[:, -2].reshape(-1, 1)
# dims 8 and 11 have std=0:
X = np.delete(X, [8, 11], axis=1)
return X, Y
@add_regression
class Power(Dataset):
N, D, name = 9568, 4, 'power'
url = uci_base_url + '00294/CCPP.zip'
@property
def datapath(self):
return os.path.join(self.datadir, 'CCPP/Folds5x2_pp.xlsx')
def read_data(self):
data = pandas.read_excel(self.datapath).values
return data[:, :-1], data[:, -1].reshape(-1, 1)
@add_regression
class Protein(Dataset):
N, D, name = 45730, 9, 'protein'
url = uci_base_url + '00265/CASP.csv'
def read_data(self):
data = pandas.read_csv(self.datapath).values
return data[:, 1:], data[:, 0].reshape(-1, 1)
@add_regression
class WineRed(Dataset):
N, D, name = 1599, 11, 'winered'
url = uci_base_url + 'wine-quality/winequality-red.csv'
def read_data(self):
data = pandas.read_csv(self.datapath, delimiter=';').values
return data[:, :-1], data[:, -1].reshape(-1, 1)
@add_regression
class WineWhite(WineRed):
N, D, name = 4898, 11, 'winewhite'
url = uci_base_url + 'wine-quality/winequality-white.csv'
@add_regression
class Yacht(Dataset):
N, D, name = 308, 6, 'yacht'
url = uci_base_url + '/00243/yacht_hydrodynamics.data'
def read_data(self):
data = pandas.read_fwf(self.datapath, header=None).values[:-1, :]
return data[:, :-1], data[:, -1].reshape(-1, 1)
class Classification(Dataset):
def preprocess_data(self, X, Y):
X, self.X_mean, self.X_std = normalize(X)
return X, Y
@property
def needs_download(self):
if os.path.isfile(os.path.join(DATA_PATH, 'classification_data', 'iris', 'iris_R.dat')):
return False
else:
return True
def download(self):
logging.info('donwloading classification data. WARNING: downloading 195MB file'.format(self.name))
filename = os.path.join(DATA_PATH, 'classification_data.tar.gz')
url = 'http://persoal.citius.usc.es/manuel.fernandez.delgado/papers/jmlr/data.tar.gz'
with urlopen(url) as response, open(filename, 'wb') as out_file:
data = response.read()
out_file.write(data)
import tarfile
tar = tarfile.open(filename)
tar.extractall(path=os.path.join(DATA_PATH, 'classification_data'))
tar.close()
logging.info('finished donwloading {} data'.format(self.name))
def read_data(self):
datapath = os.path.join(DATA_PATH, 'classification_data', self.name, self.name + '_R.dat')
if os.path.isfile(datapath):
data = np.array(pandas.read_csv(datapath, header=0, delimiter='\t').values).astype(float)
else:
data_path1 = os.path.join(DATA_PATH, 'classification_data', self.name, self.name + '_train_R.dat')
data1 = np.array(pandas.read_csv(data_path1, header=0, delimiter='\t').values).astype(float)
data_path2 = os.path.join(DATA_PATH, 'classification_data', self.name, self.name + '_test_R.dat')
data2 = np.array(pandas.read_csv(data_path2, header=0, delimiter='\t').values).astype(float)
data = np.concatenate([data1, data2], 0)
return data[:, :-1], data[:, -1].reshape(-1, 1)
rescale = lambda x, a, b: b[0] + (b[1] - b[0]) * x / (a[1] - a[0])
def convert_to_day_minute(d):
day_of_week = rescale(float(d.weekday()), [0, 6], [0, 2 * np.pi])
time_of_day = rescale(d.time().hour * 60 + d.time().minute, [0, 24 * 60], [0, 2 * np.pi])
return day_of_week, time_of_day
def process_time(pickup_datetime, dropoff_datetime):
d_pickup = datetime.strptime(pickup_datetime, "%Y-%m-%d %H:%M:%S")
d_dropoff = datetime.strptime(dropoff_datetime, "%Y-%m-%d %H:%M:%S")
duration = (d_dropoff - d_pickup).total_seconds()
pickup_day_of_week, pickup_time_of_day = convert_to_day_minute(d_pickup)
dropoff_day_of_week, dropoff_time_of_day = convert_to_day_minute(d_dropoff)
return [pickup_day_of_week, pickup_time_of_day,
dropoff_day_of_week, dropoff_time_of_day,
duration]
class NYTaxiBase(Dataset):
x_bounds = [-74.04, -73.75]
y_bounds = [40.62, 40.86]
too_close_radius = 0.00001
min_duration = 30
max_duration = 3 * 3600
name = 'nytaxi'
def _read_data(self):
data = pandas.read_csv(self.datapath)#, nrows=10000)
data = data.values
# print(data.dtypes.index)
# 'id', 0
# 'vendor_id', 1
# 'pickup_datetime', 2
# 'dropoff_datetime',3
# 'passenger_count', 4
# 'pickup_longitude', 5
# 'pickup_latitude',6
# 'dropoff_longitude', 7
# 'dropoff_latitude', 8
# 'store_and_fwd_flag',9
# 'trip_duration'10
pickup_loc = np.array((data[:, 5], data[:, 6])).T
dropoff_loc = np.array((data[:, 7], data[:, 8])).T
ind = np.ones(len(data)).astype(bool)
ind[data[:, 5] < self.x_bounds[0]] = False
ind[data[:, 5] > self.x_bounds[1]] = False
ind[data[:, 6] < self.y_bounds[0]] = False
ind[data[:, 6] > self.y_bounds[1]] = False
ind[data[:, 7] < self.x_bounds[0]] = False
ind[data[:, 7] > self.x_bounds[1]] = False
ind[data[:, 8] < self.y_bounds[0]] = False
ind[data[:, 8] > self.y_bounds[1]] = False
print('discarding {} out of bounds {} {}'.format(np.sum(np.invert(ind).astype(int)), self.x_bounds,
self.y_bounds))
early_stop = ((data[:, 5] - data[:, 7]) ** 2 + (data[:, 6] - data[:, 8]) ** 2 < self.too_close_radius)
ind[early_stop] = False
print('discarding {} trip less than {} gp dist'.format(np.sum(early_stop.astype(int)),
self.too_close_radius ** 0.5))
times = np.array([process_time(d_pickup, d_dropoff) for (d_pickup, d_dropoff) in data[:, 2:4]])
pickup_time = times[:, :2]
dropoff_time = times[:, 2:4]
duration = times[:, 4]
short_journeys = (duration < self.min_duration)
ind[short_journeys] = False
print('discarding {} less than {}s journeys'.format(np.sum(short_journeys.astype(int)), self.min_duration))
long_journeys = (duration > self.max_duration)
ind[long_journeys] = False
print(
'discarding {} more than {}h journeys'.format(np.sum(long_journeys.astype(int)), self.max_duration / 3600.))
pickup_loc = pickup_loc[ind, :]
dropoff_loc = dropoff_loc[ind, :]
pickup_time = pickup_time[ind, :]
dropoff_time = dropoff_time[ind, :]
duration = duration[ind]
print('{} total rejected journeys'.format(np.sum(np.invert(ind).astype(int))))
return pickup_loc, dropoff_loc, pickup_time, dropoff_time, duration
@property
def datapath(self):
filename = 'train.csv'
return os.path.join(self.datadir, filename)
def download(self):
raise NotImplementedError
@add_regression
class NYTaxiTimePrediction(NYTaxiBase):
N, D = 1420068, 8
# N, D = 9741, 6
def read_data(self):
path = os.path.join(DATA_PATH, 'taxitime_preprocessed.npz')
if os.path.isfile(path):
with open(path, 'rb') as file:
f = np.load(file)
X, Y = f['X'], f['Y']
else:
pickup_loc, dropoff_loc, pickup_datetime, dropoff_datetime, duration = self._read_data()
pickup_sc = np.array([np.sin(pickup_datetime[:, 0]),
np.cos(pickup_datetime[:, 0]),
np.sin(pickup_datetime[:, 1]),
np.cos(pickup_datetime[:, 1])]).T
X = np.concatenate([pickup_loc, dropoff_loc, pickup_sc], 1)
Y = duration.reshape(-1, 1)
X, Y = np.array(X).astype(float), np.array(Y).astype(float)
with open(path, 'wb') as file:
np.savez(file, X=X, Y=Y)
return X, Y
class NYTaxiLocationPrediction(NYTaxiBase):
N, D = 1420068, 6
def read_data(self):
path = os.path.join(DATA_PATH, 'taxiloc_preprocessed.npz')
if os.path.isfile(path):
with open(path, 'rb') as file:
f = np.load(file)
X, Y = f['X'], f['Y']
else:
pickup_loc, dropoff_loc, pickup_datetime, dropoff_datetime, duration = self._read_data()
pickup_sc = np.array([np.sin(pickup_datetime[:, 0]),
np.cos(pickup_datetime[:, 0]),
np.sin(pickup_datetime[:, 1]),
np.cos(pickup_datetime[:, 1])]).T
# X = np.concatenate([pickup_loc, pickup_sc, duration.reshape(-1, 1)], 1)
X = np.concatenate([pickup_loc, pickup_sc], 1)
Y = dropoff_loc
X, Y = np.array(X).astype(float), np.array(Y).astype(float)
with open(path, 'wb') as file:
np.savez(file, X=X, Y=Y)
return X, Y
def preprocess_data(self, X, Y):
return X, Y
# Andrew Wilson's datasets
#https://drive.google.com/open?id=0BxWe_IuTnMFcYXhxdUNwRHBKTlU
class WilsonDataset(Dataset):
@property
def datapath(self):
n = self.name[len('wilson_'):]
return '{}/uci/{}/{}.mat'.format(DATA_PATH, n, n)
def read_data(self):
data = loadmat(self.datapath)['data']
return data[:, :-1], data[:, -1, None]
@add_regression
class Wilson_3droad(WilsonDataset):
name, N, D = 'wilson_3droad', 434874, 3
@add_regression
class Wilson_challenger(WilsonDataset):
name, N, D = 'wilson_challenger', 23, 4
@add_regression
class Wilson_gas(WilsonDataset):
name, N, D = 'wilson_gas', 2565, 128
@add_regression
class Wilson_servo(WilsonDataset):
name, N, D = 'wilson_servo', 167, 4
@add_regression
class Wilson_tamielectric(WilsonDataset):
name, N, D = 'wilson_tamielectric', 45781, 3
@add_regression
class Wilson_airfoil(WilsonDataset):
name, N, D = 'wilson_airfoil', 1503, 5
@add_regression
class Wilson_concrete(WilsonDataset):
name, N, D = 'wilson_concrete', 1030, 8
@add_regression
class Wilson_machine(WilsonDataset):
name, N, D = 'wilson_machine', 209, 7
@add_regression
class Wilson_skillcraft(WilsonDataset):
name, N, D = 'wilson_skillcraft', 3338, 19
@add_regression
class Wilson_wine(WilsonDataset):
name, N, D = 'wilson_wine', 1599, 11
@add_regression
class Wilson_autompg(WilsonDataset):
name, N, D = 'wilson_autompg', 392, 7
@add_regression
class Wilson_concreteslump(WilsonDataset):
name, N, D = 'wilson_concreteslump', 103, 7
@add_regression
class Wilson_houseelectric(WilsonDataset):
name, N, D = 'wilson_houseelectric', 2049280, 11
@add_regression
class Wilson_parkinsons(WilsonDataset):
name, N, D = 'wilson_parkinsons', 5875, 20
@add_regression
class Wilson_slice(WilsonDataset):
name, N, D = 'wilson_slice', 53500, 385
@add_regression
class Wilson_yacht(WilsonDataset):
name, N, D = 'wilson_yacht', 308, 6
@add_regression
class Wilson_autos(WilsonDataset):
name, N, D = 'wilson_autos', 159, 25
@add_regression
class Wilson_elevators(WilsonDataset):
name, N, D = 'wilson_elevators', 16599, 18
@add_regression
class Wilson_housing(WilsonDataset):
name, N, D = 'wilson_housing', 506, 13
@add_regression
class Wilson_pendulum(WilsonDataset):
name, N, D = 'wilson_pendulum', 630, 9
@add_regression
class Wilson_sml(WilsonDataset):
name, N, D = 'wilson_sml', 4137, 26
@add_regression
class Wilson_bike(WilsonDataset):
name, N, D = 'wilson_bike', 17379, 17
@add_regression
class Wilson_energy(WilsonDataset):
name, N, D = 'wilson_energy', 768, 8
@add_regression
class Wilson_keggdirected(WilsonDataset):
name, N, D = 'wilson_keggdirected', 48827, 20
@add_regression
class Wilson_pol(WilsonDataset):
name, N, D = 'wilson_pol', 15000, 26
@add_regression
class Wilson_solar(WilsonDataset):
name, N, D = 'wilson_solar', 1066, 10
@add_regression
class Wilson_breastcancer(WilsonDataset):
name, N, D = 'wilson_breastcancer', 194, 33
@add_regression
class Wilson_fertility(WilsonDataset):
name, N, D = 'wilson_fertility', 100, 9
@add_regression
class Wilson_keggundirected(WilsonDataset):
name, N, D = 'wilson_keggundirected', 63608, 27
@add_regression
class Wilson_protein(WilsonDataset):
name, N, D = 'wilson_protein', 45730, 9
@add_regression
class Wilson_song(WilsonDataset):
name, N, D = 'wilson_song', 515345, 90
@add_regression
class Wilson_buzz(WilsonDataset):
name, N, D = 'wilson_buzz', 583250, 77
@add_regression
class Wilson_forest(WilsonDataset):
name, N, D = 'wilson_forest', 517, 12
@add_regression
class Wilson_kin40k(WilsonDataset):
name, N, D = 'wilson_kin40k', 40000, 8
@add_regression
class Wilson_pumadyn32nm(WilsonDataset):
name, N, D = 'wilson_pumadyn32nm', 8192, 32
@add_regression
class Wilson_stock(WilsonDataset):
name, N, D = 'wilson_stock', 536, 11
classification_datasets = [
['heart-va', 200, 13, 5],
['connect-4', 67557, 43, 2],
['wine', 178, 14, 3],
['tic-tac-toe', 958, 10, 2],
['fertility', 100, 10, 2],
['statlog-german-credit', 1000, 25, 2],
['car', 1728, 7, 4],
['libras', 360, 91, 15],
['spambase', 4601, 58, 2],
['pittsburg-bridges-MATERIAL', 106, 8, 3],
['hepatitis', 155, 20, 2],
['acute-inflammation', 120, 7, 2],
['pittsburg-bridges-TYPE', 105, 8, 6],
['arrhythmia', 452, 263, 13],
['musk-2', 6598, 167, 2],
['twonorm', 7400, 21, 2],
['nursery', 12960, 9, 5],
['breast-cancer-wisc-prog', 198, 34, 2],
['seeds', 210, 8, 3],
['lung-cancer', 32, 57, 3],
['waveform', 5000, 22, 3],
['audiology-std', 196, 60, 18],
['trains', 10, 30, 2],
['horse-colic', 368, 26, 2],
['miniboone', 130064, 51, 2],
['pittsburg-bridges-SPAN', 92, 8, 3],
['breast-cancer-wisc-diag', 569, 31, 2],
['statlog-heart', 270, 14, 2],
['blood', 748, 5, 2],
['primary-tumor', 330, 18, 15],
['cylinder-bands', 512, 36, 2],
['glass', 214, 10, 6],
['contrac', 1473, 10, 3],
['statlog-shuttle', 58000, 10, 7],
['zoo', 101, 17, 7],
['musk-1', 476, 167, 2],
['hill-valley', 1212, 101, 2],
['hayes-roth', 160, 4, 3],
['optical', 5620, 63, 10],
['credit-approval', 690, 16, 2],
['pendigits', 10992, 17, 10],
['pittsburg-bridges-REL-L', 103, 8, 3],
['dermatology', 366, 35, 6],
['soybean', 683, 36, 18],
['ionosphere', 351, 34, 2],
['planning', 182, 13, 2],
['energy-y1', 768, 9, 3],
['acute-nephritis', 120, 7, 2],
['pittsburg-bridges-T-OR-D', 102, 8, 2],
['letter', 20000, 17, 26],
['titanic', 2201, 4, 2],
['adult', 48842, 15, 2],
['lymphography', 148, 19, 4],
['statlog-australian-credit', 690, 15, 2],
['chess-krvk', 28056, 7, 18],
['bank', 4521, 17, 2],
['statlog-landsat', 6435, 37, 6],
['heart-hungarian', 294, 13, 2],
['flags', 194, 29, 8],
['mushroom', 8124, 22, 2],
['conn-bench-sonar-mines-rocks', 208, 61, 2],
['image-segmentation', 2310, 19, 7],
['congressional-voting', 435, 17, 2],
['annealing', 898, 32, 5],
['semeion', 1593, 257, 10],
['echocardiogram', 131, 11, 2],
['statlog-image', 2310, 19, 7],
['wine-quality-white', 4898, 12, 7],
['lenses', 24, 5, 3],
['plant-margin', 1600, 65, 100],
['post-operative', 90, 9, 3],
['thyroid', 7200, 22, 3],
['monks-2', 601, 7, 2],
['molec-biol-promoter', 106, 58, 2],
['chess-krvkp', 3196, 37, 2],
['balloons', 16, 5, 2],
['low-res-spect', 531, 101, 9],
['plant-texture', 1599, 65, 100],
['haberman-survival', 306, 4, 2],
['spect', 265, 23, 2],
['plant-shape', 1600, 65, 100],
['parkinsons', 195, 23, 2],
['oocytes_merluccius_nucleus_4d', 1022, 42, 2],
['conn-bench-vowel-deterding', 990, 12, 11],
['ilpd-indian-liver', 583, 10, 2],
['heart-cleveland', 303, 14, 5],
['synthetic-control', 600, 61, 6],
['vertebral-column-2clases', 310, 7, 2],
['teaching', 151, 6, 3],
['cardiotocography-10clases', 2126, 22, 10],
['heart-switzerland', 123, 13, 5],
['led-display', 1000, 8, 10],
['molec-biol-splice', 3190, 61, 3],
['wall-following', 5456, 25, 4],
['statlog-vehicle', 846, 19, 4],
['ringnorm', 7400, 21, 2],
['energy-y2', 768, 9, 3],
['oocytes_trisopterus_nucleus_2f', 912, 26, 2],
['yeast', 1484, 9, 10],
['oocytes_merluccius_states_2f', 1022, 26, 3],
['oocytes_trisopterus_states_5b', 912, 33, 3],
['breast-cancer-wisc', 699, 10, 2],
['steel-plates', 1941, 28, 7],
['mammographic', 961, 6, 2],
['monks-3', 554, 7, 2],
['balance-scale', 625, 5, 3],
['ecoli', 336, 8, 8],
['spectf', 267, 45, 2],
['monks-1', 556, 7, 2],
['page-blocks', 5473, 11, 5],
['magic', 19020, 11, 2],
['pima', 768, 9, 2],
['breast-tissue', 106, 10, 6],
['ozone', 2536, 73, 2],
['iris', 150, 5, 3],
['waveform-noise', 5000, 41, 3],
['cardiotocography-3clases', 2126, 22, 3],
['wine-quality-red', 1599, 12, 6],
['vertebral-column-3clases', 310, 7, 3],
['breast-cancer', 286, 10, 2],
['abalone', 4177, 9, 3],
]
for name, N, D, K in classification_datasets:
@add_classficiation
class C(Classification):
name, N, D, K = name, N, D, K
##########################
regression_datasets = list(_ALL_REGRESSION_DATATSETS.keys())
regression_datasets.sort()
classification_datasets = list(_ALL_CLASSIFICATION_DATATSETS.keys())
classification_datasets.sort()
def get_regression_data(name, *args, **kwargs):
return _ALL_REGRESSION_DATATSETS[name](*args, **kwargs)
def get_classification_data(name, *args, **kwargs):
return _ALL_CLASSIFICATION_DATATSETS[name](*args, **kwargs)
|
{"hexsha": "a16ea6150ea9619cadbcbfa4a67dae69369ed67f", "size": 22808, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/uci_exps/bayesian_benchmarks/data.py", "max_stars_repo_name": "b4thesunrise/drbayes", "max_stars_repo_head_hexsha": "9bc827aea2c7f084fb1ee77a4bd9f3c9726ecf8c", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2019-06-14T15:45:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T07:53:00.000Z", "max_issues_repo_path": "experiments/uci_exps/bayesian_benchmarks/data.py", "max_issues_repo_name": "b4thesunrise/drbayes", "max_issues_repo_head_hexsha": "9bc827aea2c7f084fb1ee77a4bd9f3c9726ecf8c", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-02-24T15:39:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T20:01:21.000Z", "max_forks_repo_path": "experiments/uci_exps/bayesian_benchmarks/data.py", "max_forks_repo_name": "b4thesunrise/drbayes", "max_forks_repo_head_hexsha": "9bc827aea2c7f084fb1ee77a4bd9f3c9726ecf8c", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2019-07-19T00:12:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T23:55:18.000Z", "avg_line_length": 29.5058214748, "max_line_length": 120, "alphanum_fraction": 0.6086460891, "include": true, "reason": "import numpy,from scipy", "num_tokens": 7215}
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE :
# AUTHOR :
# PROJECT :
#
# ----------------------------------------------------------------------------
# Docstring
"""GC Orbit Solution Script.
Convert the sky coordinates, distances, mean PM and line-of-sight velocities
of all clusters produced by runfit.py to Galactocentric cartesian coordinates,
sampling from uncertainty covariance matrix of all parameters.
Produces the file "posvel.txt" which contains bootstrapped samples (by default
100 for each cluster) of positions and velocities.
After this first step, compute the galactic orbit for each of these samples,
obtain peri/apocenter distances, orbital energy and actions, and store the
median and 68% confidence intervals on these quantities in a file
"result_orbits.txt".
This second step uses the best-fit potential from McMillan 2017, and employs
the Agama library ( https://github.com/GalacticDynamics-Oxford/Agama ) for
computing the orbits and actions.
For many clusters, these confidence intervals reported in "result_orbits.txt"
are small enough to realistically represent the uncertainties;
however, often the distribution of these parameters is significantly
correlated, elongated and does not resemble an ellipse at all,
hence these results may only serve as a rough guide.
DEPENDENCIES: numpy, astropy; optionall (for the 2nd step) agama.
RESOURCES: run time: ~30 CPU minutes (parallelized - wall-clock time is lower).
"""
__author__ = "Eugene Vasiliev"
__maintainer__ = "Nathaniel Starkman"
###############################################################################
# IMPORTS
# GENERAL
import os
import pathlib
import argparse
import warnings
from typing import Optional
import numpy as np
from astropy import units as u
from astropy import coordinates as coord
# CUSTOM
import agama
###############################################################################
# PARAMETERS
DATA = str(pathlib.Path(__file__).parent.absolute()) + "/data/"
###############################################################################
# Command Line
###############################################################################
def make_parser(inheritable=False):
"""Expose parser for ``main``.
Parameters
----------
inheritable: bool
whether the parser can be inherited from (default False).
if True, sets ``add_help=False`` and ``conflict_hander='resolve'``
Returns
-------
parser: ArgumentParser
"""
parser = argparse.ArgumentParser(
description="Run Orbits",
add_help=~inheritable,
conflict_handler="resolve" if ~inheritable else "error",
)
return parser
# /def
# ------------------------------------------------------------------------
def main(
args: Optional[list] = None, opts: Optional[argparse.Namespace] = None
):
"""Script Function.
Parameters
----------
args : list, optional
an optional single argument that holds the sys.argv list,
except for the script name (e.g., argv[1:])
opts : Namespace, optional
pre-constructed results of parsed args
if not None, used ONLY if args is None
"""
if opts is not None and args is None:
pass
else:
if opts is not None:
warnings.warn("Not using `opts` because `args` are given")
parser = make_parser()
opts = parser.parse_args(args)
foutname = DATA + "result_orbits.txt"
if not os.path.isfile(foutname):
# STEP 1: create Monte Carlo realizations of position and velocity of each cluster,
# sampling from their measured uncertainties.
# this file should have been produced by run_fit.py
tab = np.loadtxt(DATA + "summary.txt", dtype=str)
names = tab[:, 0] # 0th column is the cluster name (string)
tab = tab[:, 1:].astype(float) # remaining columns are numbers
ra0 = tab[:, 0] # coordinates of cluster centers [deg]
dec0 = tab[:, 1]
dist0 = tab[:, 2] # distance [kpc]
vlos0 = tab[:, 3] # line-of-sight velocity [km/s]
vlose = tab[:, 4] # its error estimate
pmra0 = tab[:, 7] # mean proper motion [mas/yr]
pmdec0 = tab[:, 8]
pmrae = tab[:, 9] # its uncertainty
pmdece = tab[:, 10]
pmcorr = tab[
:, 11
] # correlation coefficient for errors in two PM components
vlose = np.maximum(
vlose, 2.0
) # assumed error of at least 2 km/s on line-of-sight velocity
diste = (
dist0 * 0.46 * 0.1
) # assumed error of 0.1 mag in distance modulus
# create bootstrap samples
np.random.seed(42) # ensure repeatability of random samples
nboot = 100 # number of bootstrap samples for each cluster
nclust = len(tab)
ra = np.repeat(ra0, nboot)
dec = np.repeat(dec0, nboot)
pmra = np.repeat(pmra0, nboot)
pmdec = np.repeat(pmdec0, nboot)
for i in range(nclust):
# draw PM realizations from a correlated 2d gaussian for each cluster
A = np.random.normal(size=nboot)
B = (
np.random.normal(size=nboot) * (1 - pmcorr[i] ** 2) ** 0.5
+ A * pmcorr[i]
)
pmra[i * nboot : (i + 1) * nboot] += pmrae[i] * A
pmdec[i * nboot : (i + 1) * nboot] += pmdece[i] * B
vlos = np.repeat(vlos0, nboot) + np.hstack(
[np.random.normal(scale=e, size=nboot) for e in vlose]
)
dist = np.repeat(dist0, nboot) + np.hstack(
[np.random.normal(scale=e, size=nboot) for e in diste]
)
# convert coordinates from heliocentric (ra,dec,dist,PM,vlos) to Galactocentric (kpc and km/s)
u.kms = u.km / u.s
c_sky = coord.ICRS(
ra=ra * u.degree,
dec=dec * u.degree,
pm_ra_cosdec=pmra * u.mas / u.yr,
pm_dec=pmdec * u.mas / u.yr,
distance=dist * u.kpc,
radial_velocity=vlos * u.kms,
)
c_gal = c_sky.transform_to(
coord.Galactocentric(
galcen_distance=8.2 * u.kpc,
galcen_v_sun=coord.CartesianDifferential(
[10.0, 248.0, 7.0] * u.kms
),
)
)
pos = np.column_stack(
(c_gal.x / u.kpc, c_gal.y / u.kpc, c_gal.z / u.kpc)
)
vel = np.column_stack(
(c_gal.v_x / u.kms, c_gal.v_y / u.kms, c_gal.v_z / u.kms)
)
# add uncertainties from the solar position and velocity
pos[:, 0] += np.random.normal(
scale=0.1, size=nboot * nclust
) # uncertainty in solar distance from Galactic center
vel[:, 0] += np.random.normal(
scale=1.0, size=nboot * nclust
) # uncertainty in solar velocity
vel[:, 1] += np.random.normal(scale=3.0, size=nboot * nclust)
vel[:, 2] += np.random.normal(scale=1.0, size=nboot * nclust)
pos[
:, 0
] *= (
-1
) # revert back to normal orientation of coordinate system (solar position at x=+8.2)
vel[:, 0] *= -1 # same for velocity
posvel = np.column_stack((pos, vel)).value
np.savetxt(DATA + "posvel.txt", posvel, fmt="%.6g")
# STEP 2: compute the orbits, min/max galactocentric radii, and actions, for all Monte Carlo samples
print(
agama.setUnits(length=1, velocity=1, mass=1)
) # units: kpc, km/s, Msun; time unit ~ 1 Gyr
potential = agama.Potential(
DATA + "McMillan17.ini"
) # MW potential from McMillan(2017)
# compute orbits for each realization of initial conditions,
# integrated for 100 dynamical times or 20 Gyr (whichever is lower)
print(
"Computing orbits for %d realizations of cluster initial conditions"
% len(posvel)
)
inttime = np.minimum(20.0, potential.Tcirc(posvel) * 100)
orbits = agama.orbit(
ic=posvel, potential=potential, time=inttime, trajsize=1000
)[:, 1]
rmin = np.zeros(len(orbits))
rmax = np.zeros(len(orbits))
for i, o in enumerate(orbits):
r = np.sum(o[:, 0:3] ** 2, axis=1) ** 0.5
rmin[i] = np.min(r) if len(r) > 0 else np.nan
rmax[i] = np.max(r) if len(r) > 0 else np.nan
# replace nboot samples rmin/rmax with their median and 68% confidence intervals for each cluster
rmin = np.nanpercentile(
rmin.reshape(nclust, nboot), [16, 50, 84], axis=1
)
rmax = np.nanpercentile(
rmax.reshape(nclust, nboot), [16, 50, 84], axis=1
)
# compute actions for the same initial conditions
actfinder = agama.ActionFinder(potential)
actions = actfinder(posvel)
# again compute the median and 68% confidence intervals for each cluster
actions = np.nanpercentile(
actions.reshape(nclust, nboot, 3), [16, 50, 84], axis=1
)
# compute the same confidence intervals for the total energy
energy = potential.potential(posvel[:, 0:3]) + 0.5 * np.sum(
posvel[:, 3:6] ** 2, axis=1
)
energy = np.percentile(
energy.reshape(nclust, nboot), [16, 50, 84], axis=1
)
# write the orbit parameters, actions and energy - one line per cluster, with the median and uncertainties
fileout = open(foutname, "w")
fileout.write(
"# Name \t pericenter[kpc] \t apocenter[kpc] \t"
+ " Jr[kpc*km/s] \t Jz[kpc*km/s] \t Jphi[kpc*km/s] \t Energy[km^2/s^2] \n"
)
for i in range(nclust):
fileout.write(
("%-15s" + "\t%7.2f" * 6 + "\t%7.0f" * 12 + "\n")
% (
names[i],
rmin[0, i],
rmin[1, i],
rmin[2, i],
rmax[0, i],
rmax[1, i],
rmax[2, i],
actions[0, i, 0],
actions[1, i, 0],
actions[2, i, 0],
actions[0, i, 1],
actions[1, i, 1],
actions[2, i, 1],
actions[0, i, 2],
actions[1, i, 2],
actions[2, i, 2],
energy[0, i],
energy[1, i],
energy[2, i],
)
)
fileout.close()
# /if
# /def
###############################################################################
# END
|
{"hexsha": "5ac7d7104956548987c97732b8b6831ed8aebdca", "size": 10825, "ext": "py", "lang": "Python", "max_stars_repo_path": "jas1101finalproject/scripts/get_globular_clusters/run_orbits.py", "max_stars_repo_name": "nstarman/jas1101_project", "max_stars_repo_head_hexsha": "f54620b715eb2f7dbe7bd39d4a1e21e50bc06541", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-20T18:27:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-20T18:27:20.000Z", "max_issues_repo_path": "jas1101finalproject/scripts/get_globular_clusters/run_orbits.py", "max_issues_repo_name": "nstarman/jas1101_project", "max_issues_repo_head_hexsha": "f54620b715eb2f7dbe7bd39d4a1e21e50bc06541", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-20T00:15:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-20T00:16:27.000Z", "max_forks_repo_path": "jas1101finalproject/scripts/get_globular_clusters/run_orbits.py", "max_forks_repo_name": "nstarman/jas1101_project", "max_forks_repo_head_hexsha": "f54620b715eb2f7dbe7bd39d4a1e21e50bc06541", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1461038961, "max_line_length": 116, "alphanum_fraction": 0.5320092379, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2747}
|
import numpy as np
import pytest
from ermaket.utils import Singleton
_created = 0
class Dummy(metaclass=Singleton):
def __init__(self):
global _created
_created += 1
self.info = np.random.random()
def test_create():
global _created
_created = 0
a = Dummy()
b = Dummy()
c = Dummy()
assert _created == 1
assert a.info == b.info == c.info
def test_reset():
global _created
Singleton.reset()
_created = 0
a = Dummy()
b = Dummy()
Singleton.reset()
c = Dummy()
assert _created == 2
assert a.info == b.info
assert a.info != c.info
@pytest.mark.usefixtures('block_singletons')
def test_block():
global _created
_created = 0
a = Dummy()
b = Dummy()
c = Dummy()
assert _created == 3
assert a.info != b.info != c.info
|
{"hexsha": "b9eeb3a88fc6a0854872d822b27c34a4b6c45cd4", "size": 846, "ext": "py", "lang": "Python", "max_stars_repo_path": "ermaket/tests/test_singleton.py", "max_stars_repo_name": "SqrtMinusOne/ERMaket_Experiment", "max_stars_repo_head_hexsha": "c4a7b61651edd15a619d9b690e2aaeaab4de282d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ermaket/tests/test_singleton.py", "max_issues_repo_name": "SqrtMinusOne/ERMaket_Experiment", "max_issues_repo_head_hexsha": "c4a7b61651edd15a619d9b690e2aaeaab4de282d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ermaket/tests/test_singleton.py", "max_forks_repo_name": "SqrtMinusOne/ERMaket_Experiment", "max_forks_repo_head_hexsha": "c4a7b61651edd15a619d9b690e2aaeaab4de282d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.9622641509, "max_line_length": 44, "alphanum_fraction": 0.5933806147, "include": true, "reason": "import numpy", "num_tokens": 228}
|
[STATEMENT]
lemma I_def'_rl': "Der_1b \<D> \<Longrightarrow> \<forall>A p. (\<I> A p) \<longleftarrow> (\<exists>E. (\<I> E p) \<and> E \<^bold>\<preceq> A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Der_1b \<D> \<Longrightarrow> \<forall>A. contains (\<I> A) (\<lambda>p. nonEmpty (\<lambda>E. \<I> E p \<and> contains A E))
[PROOF STEP]
using MONO_def monI
[PROOF STATE]
proof (prove)
using this:
MONO ?\<phi> \<equiv> \<forall>A. contains (\<lambda>B. contains (?\<phi> B) (?\<phi> A)) (\<lambda>B. contains B A)
Der_1b \<D> \<Longrightarrow> MONO \<I>
goal (1 subgoal):
1. Der_1b \<D> \<Longrightarrow> \<forall>A. contains (\<I> A) (\<lambda>p. nonEmpty (\<lambda>E. \<I> E p \<and> contains A E))
[PROOF STEP]
by metis
|
{"llama_tokens": 297, "file": "Topological_Semantics_topo_derivative_algebra", "length": 2}
|
import os
from gtts import gTTS
import numpy as np
def to_speech(text):
tts = gTTS(text=text, lang = 'en')
return tts
def save(tts, filename):
tts.save(filename + '.mp3')
data = {'A':158, 'E':9307, 'M':1318, 'R':576, 'T':637, 'N':5707}
labels = ['A','E','M','R','T','N']
values = [158, 9307, 1318, 576, 637, 5707]
bounds = [np.sum(values[:i]) for i in xrange(0, 7)]
print bounds
n = np.sum(values)
print np.sum(values)
print values/np.sum(values, dtype=float)
p_labels = []
t_labels = []
for i in xrange(n):
a = int(n * np.random.random())
for j in xrange(6):
if bounds[j] <= a and a < bounds[j+1]:
p_labels.append(labels[j])
if bounds[j] <= i and i < bounds[j+1]:
t_labels.append(labels[j])
p_labels = np.array(p_labels)
t_labels = np.array(t_labels)
print p_labels[p_labels == t_labels].shape[0] / float(t_labels.shape[0])
|
{"hexsha": "994d9143b80f597bf70c1d9d9b23540a35923855", "size": 866, "ext": "py", "lang": "Python", "max_stars_repo_path": "to_speech.py", "max_stars_repo_name": "10DarkShadow01/Emotional-Intelligence-alpha-", "max_stars_repo_head_hexsha": "60542236f02c7955a786401e00a24b1a2853532d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "to_speech.py", "max_issues_repo_name": "10DarkShadow01/Emotional-Intelligence-alpha-", "max_issues_repo_head_hexsha": "60542236f02c7955a786401e00a24b1a2853532d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "to_speech.py", "max_forks_repo_name": "10DarkShadow01/Emotional-Intelligence-alpha-", "max_forks_repo_head_hexsha": "60542236f02c7955a786401e00a24b1a2853532d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.65, "max_line_length": 72, "alphanum_fraction": 0.6270207852, "include": true, "reason": "import numpy", "num_tokens": 285}
|
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
import os
import numpy as np
import pandas as pd
def read_rxt_file(file_path: str) -> (np.ndarray, np.ndarray, np.ndarray, float):
if not os.path.exists(file_path):
raise FileNotFoundError(f"The file {file_path} does not exist.")
if not os.path.isfile(file_path):
raise FileNotFoundError(f"You need to supply the path to a file, not {file_path}")
dataframe = pd.read_csv(file_path, " ", skiprows=1, header=None)
with open(file_path, "r") as metadata_path:
metadata = metadata_path.readline().split("\t")
return (np.asarray(dataframe[0].values), np.asarray(dataframe[1].values),
np.asarray(dataframe[2].values), float(metadata[2]))
def read_reference_spectra(file_path: str) -> (np.ndarray, np.ndarray, float):
if not os.path.exists(file_path):
raise FileNotFoundError(f"The file {file_path} does not exist.")
if not os.path.isfile(file_path):
raise FileNotFoundError(f"You need to supply the path to a file, not {file_path}")
data = np.load(file_path)
return data["mua_mean"], data["mus_mean"], data["g"]
|
{"hexsha": "50bac01b0515507a676e5b3ac3d4cb506185bb02", "size": 1254, "ext": "py", "lang": "Python", "max_stars_repo_path": "simpa_tests/manual_tests/test_with_experimental_measurements/utils.py", "max_stars_repo_name": "IMSY-DKFZ/simpa", "max_stars_repo_head_hexsha": "b8bddcf43a4bff2564f0ec208dc511b82e49bfb4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-03-14T15:40:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T02:34:25.000Z", "max_issues_repo_path": "simpa_tests/manual_tests/test_with_experimental_measurements/utils.py", "max_issues_repo_name": "jgroehl/simpa", "max_issues_repo_head_hexsha": "e56f0802e5a8555ee8bb139dd4f776025e7e9267", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-03-18T07:19:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T12:15:19.000Z", "max_forks_repo_path": "simpa_tests/manual_tests/test_with_experimental_measurements/utils.py", "max_forks_repo_name": "IMSY-DKFZ/simpa", "max_forks_repo_head_hexsha": "b8bddcf43a4bff2564f0ec208dc511b82e49bfb4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0, "max_line_length": 90, "alphanum_fraction": 0.7025518341, "include": true, "reason": "import numpy", "num_tokens": 307}
|
#! /usr/bin/python
# -*- coding: utf8 -*-
import struct
import os
import StringIO
import tempfile
import numpy as np
#from scipy.signal import butter
from eegpy.formats.iobase import (MemoryMappedBinaryDataFile,
EEGfiltered)
fmtF32header = "= 21p 7p i i H i d d d 13p i H f f 931p"
#Erklaerung
# = Native byte order, standard size and alignment
# 21p: Format-Name
# 7p: Format-Version
# i: start-byte der Kanalinformationen
# i: start-byte der Daten
# H: Anzahl Kanaele unsigned short
# i: Anzahl Datenpunkte
# d d d: Sampling-rate, Startzeit, Endzeit (Zeiten als Double, integraler Teil ist Anzahl Tage seit 12/30/1899, hinter komma dann bruchteil des Tages, der abgelaufen ist
# 13p: reserviert
# i: datatype
# H: samplesize
# f: minsample
# f: maxsample
# 931p:reserviert2, um header auf 1024b aufzublasen
fmtPosOfNumDatapoints = "= 21p 7p i i H "
#Zeigt die Position von NumDatapoints im header an, damit diese nachträglich geschrieben werden kann.
fmtF32channelinfo = "= 256p 32p 224p"
# 256p: Kanalname
# 32p: Einheit der Messgroesse (z.B. Volt)
# 224p: reserviert um Kanalinfo auf 512 bytes zu vergroessern
from _f32_old import F32Reader, F32Writer, F32WriterAdvanced
class F32(MemoryMappedBinaryDataFile):
"""Access to F32-datafiles
This is the new class used for both read and write access.
It uses numpy.memmap for rapid read/write"""
header = {"formatName":"Bonn F32 Data Format",
"formatVer":"1.0.00",
"sbChinfo":1024, # Because header has 1024 bytes
"sbData":None, # Where the binary data start
"numChannels":0,
"numDatapoints":0,
"samplRate":1.0,
"tStart":0.0,
"tEnd":0.0,
"reservedString":"",
"datatype":4096,
"samplesize":17533,
"minsample":0.0,
"maxsample":0.0,
"reserved2":""}
reserved3 = ""
dataToWrite = None
headerWritten = False
_mm = None #memmap-object
def __init__(self,filename,mode="r+",cNames=None,shape=None, Fs = 1000.0):
assert mode in ["r","r+","w+"], "Unsupported mode"
self._fn = filename
try:
self.f = open(self._fn, mode)
except:
raise IOError, "Die angegebene Datei konnte nicht geöffnet werden!"
if mode in ["r","r+"]:
s = self.f.read(struct.calcsize(fmtF32header))
(self.header["formatName"],self.header["formatVer"],self.header["sbChinfo"],self.header["sbData"],self.header["numChannels"],self.header["numDatapoints"],self.header["samplRate"],self.header["tStart"], self.header["tEnd"], self.header["reservedString"], self.header["datatype"], self.header["samplesize"], self.header["minsample"], self.header["maxsample"],self.header["reserved2"]) = struct.unpack(fmtF32header,s)
self.numChannels=self.header["numChannels"]
#print self.numChannels
if self.numChannels > 500 or self.numChannels < 1:
raise Exception, "The chosen file is either in an unrecognized format or corrupt!"
if self.header["sbData"] != (struct.calcsize(fmtF32header)+self.numChannels*struct.calcsize(fmtF32channelinfo)):
#print "Der Wert für das Anfangsbyte der Daten war falsch gesetzt. Korrigiere..."
self.header["sbData"] = (struct.calcsize(fmtF32header)+self.numChannels*struct.calcsize(fmtF32channelinfo))
self.fmtSample = "= %if" % self.numChannels
self._channel_names = []
self._channel_units = []
for i in range(0,self.numChannels):
s = self.f.read(struct.calcsize(fmtF32channelinfo))
(chName, chUnit, reserved3) = struct.unpack(fmtF32channelinfo,s)
self._channel_names.append(chName)
self._channel_units.append(chUnit)
self._shape = (self.header["numDatapoints"],self.header["numChannels"])
#FIXED: closinf self.f before opening memmap.
self.f.close()
shape_from_header = (self.header["numDatapoints"], self.header["numChannels"])
MemoryMappedBinaryDataFile.__init__(self, filename, mode, shape_from_header,
data_offset = self.header["sbData"],
dtype=np.float32, Fs = self.header["samplRate"],
channel_names=cNames)
elif mode=="w+":
assert shape != None, "Shape must be given."
assert len(shape) == 2, "Only 2d is possible"
if cNames != None:
assert len(cNames)>0, "List of channel names must be longer than 0!"
assert shape[1] == len(cNames)
self._channel_names = [str(x) for x in cNames]
self.header["sbData"] = int(self.header["sbChinfo"]+shape[1]*struct.calcsize(fmtF32channelinfo))
self.header["numChannels"] = shape[1]
self.header["numDatapoints"] = shape[0]
self.header["samplRate"] = float(Fs)
self.f.close()
MemoryMappedBinaryDataFile.__init__(self, filename, mode, shape,
data_offset = self.header["sbData"],
dtype=np.float32, Fs = Fs,
channel_names=cNames)
del self._mm
self._mm = None
self.f = open(self._fn, "r+")
self.writeHeader()
self.f.close()
self._reopen_memmap()
def __str__(self):
rv = StringIO.StringIO()
ks_header = self.header.keys()
ks_header.sort()
print >>rv, "eegpy F32-object\n----------------\n"
print >>rv, "Header-Information:"
for k in ks_header:
print >>rv, "%s: %s" %(str(k),str(self.header[k]))
print >>rv, "Channels:"
for i,c in enumerate(self.channel_names):
print >>rv, "%3i,%s" % (i,c)
return rv.getvalue()
def writeHeader(self):
"""Writing the header"""
#Preparation: chNames setzen, falls leer
if self._channel_names == None:
self._channel_names = ["%i"%(i+1) for i in range(self._shape[1])]
self.numChannels = len(self._channel_names)
#Preparation: chUnits setzen, falls leer
if self._channel_units == None:
self._channel_units = ["" for i in self._channel_names]
self.f.seek(0)
s=struct.pack(fmtF32header, self.header["formatName"],self.header["formatVer"],int(self.header["sbChinfo"]),self.header["sbData"],self.header["numChannels"],self.header["numDatapoints"],self.header["samplRate"],self.header["tStart"],self.header["tEnd"],self.header["reservedString"], self.header["datatype"], self.header["samplesize"], self.header["minsample"], self.header["maxsample"],self.header["reserved2"])
self.f.write(s)
for i in range(len(self._channel_names)):
#print len(self._channel_names), i
s=struct.pack(fmtF32channelinfo, self._channel_names[i], self._channel_units[i], self.reserved3)
self.f.write(s)
if(not (self.f.tell()==self.header["sbData"])):
raise Exception, "Irgendwie ist die Position falsch, %i != %i" % (self.f.tell(),self.header["sbData"])
self.headerWritten = True
def set_channel_names(self, cns):
assert len(cns) == self.shape[1], "List of channelnames must contain exactly %i elements" % self.shape[1]
assert self._mode in ["r+", "w+"], "Cannot set channel_names: F32 is read-only"
try:
del self._mm
self._mm = None
except Exception:
pass
self._channel_names = [str(x) for x in cns]
self.f = open(self._fn, "r+")
self.writeHeader()
self.f.close()
self._mm = np.memmap(self._fn,dtype=np.float32,offset=self.header["sbData"],shape=self._shape,mode="r+")
class F32filtered(EEGfiltered, F32):
"""F32 read-only object with included frequency-filteres"""
def __init__(self,filename,filter_function, **kw_args):
F32.__init__(self,filename,"r+", **kw_args)
EEGfiltered.__init__(self, filter_function)
class F32ReaderFiltered(F32Reader):
"""F32 read-only object with included frequency-filteres.
Uses old F32Reader, without memmap."""
def __init__(self,filename,filter_function):
F32Reader.__init__(self,filename)
self._filter_function = filter_function
@property
def ff(self):
return self._filter_function
def getData(self,start,length,stride=1,channelList = None):
"""Calls method of super-class, filters the return value"""
return self.ff(F32Reader.getData(self,start,length,stride,channelList))
class F32filteredWithCache(F32):
"""F32 read-only object with included frequency-filteres"""
def __init__(self,filename,btype='lp',fl=None,fh=None,border=2,filter_windowed=False,dirname=None):
from eegpy.ui.eegpylab import freqfilt_eeg
fd, tmpfn = tempfile.mkstemp(dir=dirname)
freqfilt_eeg(filename,tmpfn,btype,fl,fh,border,filter_windowed)
F32.__init__(self,tmpfn)
def __del__(self):
self.close()
try:
os.unlink(self._fn)
except Exception,e:
print "Cannot remove file %s"%self._fn, e
__all__ = ["F32", "F32filtered", "F32Reader", "F32ReaderFiltered", "F32filteredWithCache", "F32Writer", "F32WriterAdvanced"]
|
{"hexsha": "698ae822015eca40ba0b4257f4199d2b7cb0b707", "size": 9607, "ext": "py", "lang": "Python", "max_stars_repo_path": "eegpy/formats/f32.py", "max_stars_repo_name": "thorstenkranz/eegpy", "max_stars_repo_head_hexsha": "0f9461456999874abbb774896ca832eb27740a9d", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2015-05-12T10:42:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T02:08:03.000Z", "max_issues_repo_path": "eegpy/formats/f32.py", "max_issues_repo_name": "thorstenkranz/eegpy", "max_issues_repo_head_hexsha": "0f9461456999874abbb774896ca832eb27740a9d", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2015-11-19T11:36:30.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-21T05:00:09.000Z", "max_forks_repo_path": "eegpy/formats/f32.py", "max_forks_repo_name": "thorstenkranz/eegpy", "max_forks_repo_head_hexsha": "0f9461456999874abbb774896ca832eb27740a9d", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-09-21T22:41:34.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-28T13:55:19.000Z", "avg_line_length": 44.8925233645, "max_line_length": 426, "alphanum_fraction": 0.6149682523, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2441}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 19 18:05:04 2017
@author: David Jarron
"""
# !/usr/bin/python
import cv2
import numpy as np
import matplotlib.pyplot as plt
#import hough_line_linker as hll
img = cv2.imread('IMG_3380.JPG')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
height, width = gray.shape
# can aslo unse IMREAD_COLOR or IMREAD_UNCHANGED
#imshow using cv2 basic
#cv2.imshow('image',img)
#cv2.waitKey()
#plot using matplot lib
#plt.imshow(img, cmap='gray', interpolation = 'bicubic')
#plt.plot([500,1000],[800,1000],'c', linewidth = 3)
#plt.show()
#save image to file using cv2
#cv2.imwrite('testim.png',img)
# canny edge detection of image
edges = cv2.Canny(gray, 400, 50)
#edges = cv2.Canny(gray, 500, 50)
#kern = np.ones((2,2),np.uint8)
#
#edges = cv2.morphologyEx(edges, cv2.MORPH_OPEN,kern)
h2 = int(height / 2)
w2 = int(width / 2)
cv2.namedWindow("edges", cv2.WINDOW_NORMAL)
imS = cv2.resize(edges, (width, height))
cv2.imshow("edges", imS)
cv2.waitKey(10)
cv2.destroyAllWindows()
cv2.imwrite('edges.jpg', edges)
# Hough transform to detect lines in image from edges
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for line in lines:
rho = line[0][0]
theta = line[0][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imwrite('houghlines3.jpg',img)
# Probabilistic Hough Transform
plines = cv2.HoughLinesP(edges,0.5,np.pi/360,30,100,5)
img1 = cv2.imread('edges.jpg')
for pline in plines:
x1 = pline[0][0]
y1 = pline[0][1]
x2 = pline[0][2]
y2 = pline[0][3]
cv2.line(img1,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imwrite('phough_1.jpg',img1)
#two conditions work well
#first use a noisy edge image and adjust the hough parameters to be robust to that noise
# using params (edges, 0.5, pi/360, 30,100,25-50)
#OR
#Use a cleaner edge image and then a smaller maximum line dist can be chosen
#this seems to allow the majority of the edges in the edge image to be detected detected by the houghT
#(edges,0.5,np.pi/360,30,100,5) are the utilized params
# RANSAC Line Fitting
ex, ey = edges.shape
dim = ex*ey
edge_list = np.reshape(edges,[dim,1])
#yi = i/x (integer division)
#xi = i - yi*x
yi = np.arange(dim) // ex
xi = np.arange(dim) - yi*ex
edge_w_coords = np.zeros([dim,3])
edge_w_coords[:,0] = edge_list.transpose()
edge_w_coords[:,1] = yi
edge_w_coords[:,2] = xi
edges_only = edge_w_coords[~(edge_w_coords[:,0] == 0)]
edge_coords_fin = edges_only[:,1:2]
[vx,vy,x,y] = cv2.fitLine(edges_only, cv2.DIST_L1, 0, 0.01,0.01)
cv2.line(img1,(vx,vy),(x,y),(0,255,0),2)
cv2.imwrite('ransactest_1.jpg',img1)
|
{"hexsha": "9578e0025ee2ec5268d1fd092a3474b3145f85be", "size": 2870, "ext": "py", "lang": "Python", "max_stars_repo_path": "opencvtestfile.py", "max_stars_repo_name": "dmjarron/2D_3D", "max_stars_repo_head_hexsha": "5da8760b2df751f19c04fc2c1eeeeb8e6ad73e67", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "opencvtestfile.py", "max_issues_repo_name": "dmjarron/2D_3D", "max_issues_repo_head_hexsha": "5da8760b2df751f19c04fc2c1eeeeb8e6ad73e67", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "opencvtestfile.py", "max_forks_repo_name": "dmjarron/2D_3D", "max_forks_repo_head_hexsha": "5da8760b2df751f19c04fc2c1eeeeb8e6ad73e67", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8558558559, "max_line_length": 103, "alphanum_fraction": 0.6459930314, "include": true, "reason": "import numpy", "num_tokens": 978}
|
import geopandas as gpd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
stations_list = ['SJOSC120',
'SJOSC119',
'SJOSC118',
'SJOSC117',
'SJOSC116',
'SJOSC115',
'SJO SC29',
'SJO SC28',
'SJO SC27',
'SJO SC26',
'SJO SC25',
'SJO SC24',
'SJO SC23',
'SJO SC22',
'SJO SC21',
'SJO SC20',
'SJO SC19',
'SJO SC18',
'SJO SC17',
'SJO SC16',
'SJO SC15',
'SJO SC14',
'SJO SC13',
'SJO SC12',
'SJO SC11',
'SJO SC10',
'SJO SC9',
'SJO SC8',
'SJO SC7',
'SJO SC1',
'SJO SC2',
'SJO SC3',
'SJO SC4',
'SJO SC5',
'SJO SC6',
'SJO SC30',
'SJO SC31',
'SJO SC32',
'SJO SC33',
'SJO SC35',
'SJO SC36',
'SJO SC37',
'SJO SC38',
'SJO SC39',
'SJOSC100',
'SJOSC101',
'SJOSC102',
'SJOSC103',
'SJOSC104',
'SJOSC105',
'SJOSC106',
'SJOSC107',
'SJOSC108',
'SJOSC109',
'SJOSC110',
'SJOSC111',
'SJOSC112',
'SJOSC113',
'SJOSC114',
' ABN55',
' ABN54',
' ABN53',
' ABN52',
' ABN51',
' ABN50',
' ABN49',
' ABN48',
' ABN47',
' ABN46',
' ABN45',
' ABN44',
' ABN43',
' ABN42',
' ABN41',
' ABN40',
' ABN39',
' ABN38',
' ABN37',
' ABN36',
' ABN35',
' ABN34',
' ABN33',
' ABN32',
' ABN31',
' ABN30',
' ABN29',
' ABN28',
' ABN27',
' ABN26',
' ABN25',
' ABN24',
' ABN23',
' ABN22',
' ABN21',
' ABN20',
' ABN19',
' ABN18',
' ABN17',
' ABN16',
' ABN15',
' ABN14',
' ABN13',
' ABN12',
' ABN11',
' ABN10',
' ABN 9',
' ABN 8',
' ABN 7',
' ABN 6',
' ABN 5',
' ABN 4',
' ABN 3',
' ABN 2',
' ABN 1']
if __name__ == "__main__":
gdf = gpd.read_file("../data/geojson/calif_nev_ncei_grav.geojson")
stations_in_list = [row for ix, row in gdf.iterrows() if row["station_id"] in stations_list]
gdf_subset = pd.concat(stations_in_list, axis=0)
fig, ax = plt.subplots()
ax.scatter(gdf_subset["latitude"], gdf_subset["isostatic_anom"])
fig.savefig("task2.png")
|
{"hexsha": "66ef2ddb5eb06a2d490516b59104f5e0e03b0791", "size": 3800, "ext": "py", "lang": "Python", "max_stars_repo_path": "task_scripts/task2.py", "max_stars_repo_name": "collincr/ini_team_13", "max_stars_repo_head_hexsha": "dca3d88fc31515ec0127cfd03963ce0b5ed735d8", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-28T20:43:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-20T16:59:24.000Z", "max_issues_repo_path": "task_scripts/task2.py", "max_issues_repo_name": "collincr/ini_team_13", "max_issues_repo_head_hexsha": "dca3d88fc31515ec0127cfd03963ce0b5ed735d8", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "task_scripts/task2.py", "max_forks_repo_name": "collincr/ini_team_13", "max_forks_repo_head_hexsha": "dca3d88fc31515ec0127cfd03963ce0b5ed735d8", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3582089552, "max_line_length": 96, "alphanum_fraction": 0.2752631579, "include": true, "reason": "import numpy", "num_tokens": 936}
|
"""
Modified From https://github.com/OpenNMT/OpenNMT-tf/blob/r1/examples/library/minimal_transformer_training.py
MIT License
Copyright (c) 2017-present The OpenNMT Authors.
This example demonstrates how to train a standard Transformer model using
OpenNMT-tf as a library in about 200 lines of code. While relatively short,
this example contains some advanced concepts such as dataset bucketing and
prefetching, token-based batching, gradients accumulation, beam search, etc.
Currently, the beam search part is not easily customizable. This is expected to
be improved for TensorFlow 2.0 which is eager first.
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Use opennmt-tf-1.25.1
import argparse
import copy
from datetime import datetime
import numpy as np
import os
import sys
import tensorflow as tf
import opennmt as onmt
from opennmt import constants
from opennmt.utils import misc
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.decoding.utils.ft_decoding import ft_decoding
from examples.tensorflow.decoding.utils.bleu_score import bleu_score
from examples.tensorflow.decoder.utils.decoding import tf_sampling_decoding
from examples.tensorflow.decoder.utils.decoding import tf_beamsearch_decoding
from examples.tensorflow.decoder.utils.common import DecodingArgumentNew
from examples.tensorflow.decoder.utils.common import TransformerArgument
from examples.tensorflow.decoder.utils.common import DecodingSamplingArgument
from examples.tensorflow.decoder.utils.common import DecodingBeamsearchArgument
from examples.tensorflow.encoder.utils.encoder import ft_encoder_opennmt
from examples.tensorflow.encoder.utils.encoder import tf_encoder_opennmt
NUM_HEADS = 8
NUM_LAYERS = 6
HIDDEN_UNITS = 512
SIZE_PER_HEAD = 64
FFN_INNER_DIM = 2048
encoder = onmt.encoders.SelfAttentionEncoder(
num_layers=NUM_LAYERS,
num_units=HIDDEN_UNITS,
num_heads=NUM_HEADS,
ffn_inner_dim=FFN_INNER_DIM,
dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1)
decoder = onmt.decoders.SelfAttentionDecoder(
num_layers=NUM_LAYERS,
num_units=HIDDEN_UNITS,
num_heads=NUM_HEADS,
ffn_inner_dim=FFN_INNER_DIM,
dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1)
def translate(args_dict):
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
max_seq_len = args_dict['max_seq_len']
model_dir = args_dict["model_dir"]
source_file = args_dict["source"]
tgt_file = args_dict["target"]
time_args = args_dict["test_time"]
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
sampling_topk = args_dict['sampling_topk']
sampling_topp = args_dict['sampling_topp']
tf_datatype = tf.float32
max_ite = args_dict['max_iteration']
if args_dict['data_type'] == "fp16":
tf_datatype = tf.float16
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
# Define the "base" Transformer model.
source_inputter = onmt.inputters.WordEmbedder("source_vocabulary", embedding_size=512, dtype=tf_datatype)
target_inputter = onmt.inputters.WordEmbedder("target_vocabulary", embedding_size=512, dtype=tf_datatype)
inputter = onmt.inputters.ExampleInputter(source_inputter, target_inputter)
inputter.initialize({
"source_vocabulary": args_dict["source_vocabulary"],
"target_vocabulary": args_dict["target_vocabulary"]
})
mode = tf.estimator.ModeKeys.PREDICT
np.random.seed(1)
tf.set_random_seed(1)
# Create the inference dataset.
dataset = inputter.make_inference_dataset(source_file, batch_size)
iterator = dataset.make_initializable_iterator()
source = iterator.get_next()
encoder_args = TransformerArgument(beam_width=1,
head_num=NUM_HEADS,
size_per_head=SIZE_PER_HEAD,
inter_size=NUM_HEADS*SIZE_PER_HEAD*4,
num_layer=NUM_LAYERS,
dtype=tf_datatype,
remove_padding=True,
allow_gemm_test=False)
# Encode the source.
with tf.variable_scope("transformer/encoder"):
source_embedding = source_inputter.make_inputs(source)
source_embedding = tf.cast(source_embedding, tf_datatype)
# Using onmt fp16 for encoder.encode leads to significant accuracy drop
# So, we rewrite the encoder
# memory, _, _ = encoder.encode(source_embedding, source["length"], mode=mode)
memory = tf_encoder_opennmt(source_embedding, encoder_args, source["length"])
encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
encoder_variables_dict = {}
for v in encoder_vars:
encoder_variables_dict[v.name] = tf.cast(v, tf_datatype)
ft_encoder_result = ft_encoder_opennmt(inputs=source_embedding,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=source["length"])
# Generate the target.
with tf.variable_scope("transformer/decoder", reuse=tf.AUTO_REUSE):
target_inputter.build()
batch_size = tf.shape(memory)[0]
start_tokens = tf.fill([batch_size], constants.START_OF_SENTENCE_ID)
end_token = constants.END_OF_SENTENCE_ID
target_embedding = tf.cast(target_inputter.embedding, tf_datatype)
target_ids, _, target_length, _ = decoder.dynamic_decode_and_search(
target_embedding,
start_tokens,
end_token,
vocab_size=target_inputter.vocabulary_size,
beam_width=beam_size,
memory=memory,
memory_sequence_length=source["length"],
maximum_iterations=max_seq_len)
target_vocab_rev = target_inputter.vocabulary_lookup_reverse()
target_tokens = target_vocab_rev.lookup(tf.cast(target_ids, tf.int64))
decoder_args = TransformerArgument(beam_width=beam_size,
head_num=NUM_HEADS,
size_per_head=SIZE_PER_HEAD,
inter_size=NUM_HEADS*SIZE_PER_HEAD*4,
num_layer=NUM_LAYERS,
dtype=tf_datatype,
kernel_init_range=0.00,
bias_init_range=0.00)
decoder_args_2 = copy.deepcopy(decoder_args) # for beam search
decoder_args_2.__dict__ = copy.deepcopy(decoder_args.__dict__)
decoder_args_2.beam_width = 1 # for sampling
ft_decoder_beamsearch_args = DecodingBeamsearchArgument(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
decoder_args,
beam_search_diversity_rate)
ft_decoder_sampling_args = DecodingSamplingArgument(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
decoder_args_2,
sampling_topk,
sampling_topp)
decoding_beamsearch_args = DecodingArgumentNew(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
beam_search_diversity_rate,
0,
0.0,
decoder_args)
decoding_sampling_args = DecodingArgumentNew(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
0.0,
sampling_topk,
sampling_topp,
decoder_args_2)
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ft_target_ids, ft_target_length, _, _, _ = ft_decoding(ft_encoder_result,
source["length"],
target_embedding,
all_vars,
decoding_beamsearch_args)
ft_target_tokens = target_vocab_rev.lookup(tf.cast(ft_target_ids, tf.int64))
ft_sampling_target_ids, ft_sampling_target_length, _, _, _ = ft_decoding(ft_encoder_result,
source["length"],
target_embedding,
all_vars,
decoding_sampling_args)
ft_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(ft_sampling_target_ids, tf.int64))
# ### TF Sampling Decoding ###
tf_sampling_target_ids, tf_sampling_target_length = tf_sampling_decoding(memory,
source["length"],
target_embedding,
ft_decoder_sampling_args,
decoder_type=0)
# tf_sampling_target_tokens: [batch_size, seq_len]
tf_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(tf_sampling_target_ids, tf.int64))
# ### end of TF BeamSearch Decoding ###
### OP BeamSearch Decoder ###
ft_decoder_beamsearch_target_ids, ft_decoder_beamsearch_target_length, _, _, _ = tf_beamsearch_decoding(memory,
source["length"],
target_embedding,
ft_decoder_beamsearch_args,
decoder_type=1)
# ft_decoder_beamsearch_target_tokens: [batch_size, beam_width, seq_len]
ft_decoder_beamsearch_target_tokens = target_vocab_rev.lookup(tf.cast(ft_decoder_beamsearch_target_ids, tf.int64))
### end of OP BeamSearch Decoder ###
### OP Sampling Decoder ###
ft_decoder_sampling_target_ids, ft_decoder_sampling_target_length = tf_sampling_decoding(memory,
source["length"],
target_embedding,
ft_decoder_sampling_args,
decoder_type=1)
ft_decoder_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(ft_decoder_sampling_target_ids, tf.int64))
### end of OP BeamSearch Decoder ###
class TranslationResult(object):
def __init__(self, token_op, length_op, name):
self.token_op = token_op
self.length_op = length_op
self.name = name
self.file_name = name + ".txt"
self.token_list = []
self.length_list = []
self.batch_num = 0
self.execution_time = 0.0 # seconds
self.sentence_num = 0
self.bleu_score = None
translation_result_list = []
if time_args != "":
translation_result_list.append(TranslationResult(
tf_sampling_target_tokens, tf_sampling_target_length, "tf-decoding-sampling-for-warmup"))
if time_args.find("0") != -1:
translation_result_list.append(TranslationResult(
target_tokens, target_length, "tf-decoding-beamsearch"))
if time_args.find("1") != -1:
translation_result_list.append(TranslationResult(
ft_decoder_beamsearch_target_tokens, ft_decoder_beamsearch_target_length, "ft-decoder-beamsearch"))
if time_args.find("2") != -1:
translation_result_list.append(TranslationResult(
ft_target_tokens, ft_target_length, "ft-decoding-beamsearch"))
if time_args.find("3") != -1:
translation_result_list.append(TranslationResult(
tf_sampling_target_tokens, tf_sampling_target_length, "tf-decoding-sampling"))
if time_args.find("4") != -1:
translation_result_list.append(TranslationResult(
ft_decoder_sampling_target_tokens, ft_decoder_sampling_target_length, "ft-decoder-sampling"))
if time_args.find("5") != -1:
translation_result_list.append(TranslationResult(
ft_sampling_target_tokens, ft_sampling_target_length, "ft-decoding-sampling"))
# Iterates on the dataset.
float_checkpoint_path = tf.train.latest_checkpoint(model_dir)
half_checkpoint_path = tf.train.latest_checkpoint(model_dir + "_fp16")
float_var_list = []
half_var_list = []
for var in tf.global_variables():
if var.dtype.base_dtype == tf.float32:
float_var_list.append(var)
elif var.dtype.base_dtype == tf.float16:
half_var_list.append(var)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
for i in range(len(translation_result_list)):
with tf.Session(config=config) as sess:
if(len(float_var_list) > 0):
float_saver = tf.train.Saver(float_var_list)
float_saver.restore(sess, float_checkpoint_path)
if(len(half_var_list) > 0):
half_saver = tf.train.Saver(half_var_list)
half_saver.restore(sess, half_checkpoint_path)
sess.run(tf.tables_initializer())
sess.run(iterator.initializer)
t1 = datetime.now()
while True:
try:
batch_tokens, batch_length = sess.run([translation_result_list[i].token_op,
translation_result_list[i].length_op])
for tokens, length in zip(batch_tokens, batch_length):
# misc.print_bytes(b" ".join(tokens[0][:length[0] - 1]))
if translation_result_list[i].name.find("beamsearch") != -1:
translation_result_list[i].token_list.append(
b" ".join(tokens[0][:length[0] - 1]).decode("UTF-8"))
else:
translation_result_list[i].token_list.append(b" ".join(tokens[:length - 1]).decode("UTF-8"))
translation_result_list[i].batch_num += 1
if translation_result_list[i].name == "tf-decoding-sampling-for-warmup" and translation_result_list[i].batch_num > 20:
break
if translation_result_list[i].batch_num >= max_ite:
break
except tf.errors.OutOfRangeError:
break
t2 = datetime.now()
time_sum = (t2 - t1).total_seconds()
translation_result_list[i].execution_time = time_sum
with open(translation_result_list[i].file_name, "w") as file_b:
for s in translation_result_list[i].token_list:
file_b.write(s)
file_b.write("\n")
ref_file_path = "./.ref_file.txt"
os.system("head -n %d %s > %s" % (len(translation_result_list[i].token_list), tgt_file, ref_file_path))
translation_result_list[i].bleu_score = bleu_score(translation_result_list[i].file_name, ref_file_path)
os.system("rm {}".format(ref_file_path))
for t in translation_result_list:
if t.name == "tf-decoding-sampling-for-warmup":
continue
print("[INFO] {} translates {} batches taking {:.2f} sec to translate {} tokens, BLEU score: {:.2f}, {:.0f} tokens/sec.".format(
t.name, t.batch_num, t.execution_time, t.bleu_score.sys_len, t.bleu_score.score, t.bleu_score.sys_len / t.execution_time))
return translation_result_list
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, default=200, metavar='NUMBER',
help='max sequence length (default: 200)')
parser.add_argument("--source", default="../examples/tensorflow/decoding/utils/translation/test.en",
help="Path to the source file.")
parser.add_argument("--target", default="../examples/tensorflow/decoding/utils/translation/test.de",
help="Path to the target file.")
parser.add_argument("--source_vocabulary", default="../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
help="Path to the source vocabulary.")
parser.add_argument("--target_vocabulary", default="../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
help="Path to the target vocabulary.")
parser.add_argument("--model_dir", default="../translation/ckpt",
help="Directory where checkpoint are written.")
parser.add_argument('-time', '--test_time', type=str, default='', metavar='STRING',
help='''
Test the time of which one (default: '' (not test anyone) );
'': not test anyone
'0': test tf_decoding_beamsearch
'1': test op_decoder_beamsearch
'2': test op_decoding_beamsearch
'3': test tf_decoding_sampling
'4': test op_decoder_sampling
'5': test op_decoding_sampling
'e.g., if you want to test op_decoder_beamsearch and op_decoding_sampling,
then you need to use -time '15' ''')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beams earch.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-max_ite', '--max_iteration', type=int, default=100000, metavar='NUMBER',
help='Maximum iteraiton for translation, default is 100000 (as large as possible to run all test set).')
args = parser.parse_args()
translate(vars(args))
# example script
# python ../examples/tensorflow/decoding/translate_example.py --source ../examples/tensorflow/decoding/utils/translation/test.en --target ../examples/tensorflow/decoding/utils/translation/test.de --source_vocabulary ../examples/tensorflow/decoding/utils/translation/wmtende.vocab --target_vocabulary ../examples/tensorflow/decoding/utils/translation/wmtende.vocab --model_dir ../translation/ckpt/ -time 02
if __name__ == "__main__":
main()
|
{"hexsha": "5e2f1137ea89458380c94e9002f795712109dcc6", "size": 22291, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/tensorflow/decoding/translate_example.py", "max_stars_repo_name": "hieuhoang/FasterTransformer", "max_stars_repo_head_hexsha": "440695ccac874574b1d2e1121788e8fa674b4381", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/tensorflow/decoding/translate_example.py", "max_issues_repo_name": "hieuhoang/FasterTransformer", "max_issues_repo_head_hexsha": "440695ccac874574b1d2e1121788e8fa674b4381", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/tensorflow/decoding/translate_example.py", "max_forks_repo_name": "hieuhoang/FasterTransformer", "max_forks_repo_head_hexsha": "440695ccac874574b1d2e1121788e8fa674b4381", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.0738095238, "max_line_length": 405, "alphanum_fraction": 0.571755417, "include": true, "reason": "import numpy", "num_tokens": 4156}
|
import numpy as np
import pandas as pd
import argparse
def load_csv(data_dir, filename):
"""
Loades a pandas DataFrame df inside a data_dir folder with a filename.csv extension
Robust for all OS because of pathlib module
"""
import pandas as pd
from pathlib import Path
return pd.read_csv(Path(data_dir).joinpath(filename))
def normalize_document(doc):
from nltk import WordPunctTokenizer
from nltk.corpus import stopwords
import re
wpt = WordPunctTokenizer()
stop_words = set(stopwords.words('portuguese'))
# lowercase and remove special characters\whitespace
doc = re.sub(r'[^a-zA-Z\s]', '', doc, re.I | re.A)
doc = doc.lower()
doc = doc.strip()
# tokenize document
tokens = wpt.tokenize(doc)
# filter stopwords out of document
filtered_tokens = [token for token in tokens if token not in stop_words]
# re-create document from filtered tokens
doc = ' '.join(filtered_tokens)
return doc
normalize_corpus = np.vectorize(normalize_document)
def normalize_series(series):
return normalize_corpus(series.to_numpy())
def classifier(df, text, target_label):
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
train_corpus, test_corpus, train_label_names, test_label_names = train_test_split(
df[text], df[target_label],
test_size=0.2, random_state=42
)
clf = Pipeline(
steps=[
('TF-IDF Vectorizer', TfidfVectorizer(use_idf=True, min_df=0.0, max_df=1.0)),
('RF Classifier', RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=42))
])
clf.fit(train_corpus, train_label_names)
print(f"model accuracy: {clf.score(test_corpus, test_label_names):.3f}")
print(classification_report(test_label_names, clf.predict(test_corpus)))
return clf
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='This script will use your clean by data_prep.py and classify the tweets', add_help=True)
parser.add_argument(
'-i', '--input', help="string file path for the input labeled CSV file")
parser.add_argument(
'-l', '--label', help="string with the label column that should be used for training the classifier, i.e. the 0/1 column")
parser.add_argument(
'-d', '--data', help="string file path for the data CSV file that you want to generate predictions")
parser.add_argument(
'-o', '--output', help="string file path for the output CSV file with predictions")
args = parser.parse_args()
df = pd.read_csv(args.input)
df['text'] = normalize_series(df['text'])
clf = classifier(df, 'text', args.label)
full_df = pd.read_csv(args.data,
index_col=0,
names=['id', 'created_at', 'text', 'user_id', 'place', 'user_place', 'country', 'coordinates', 'undefined_col', 'undefined_col2', 'undefined_col3'])
# Clean NAs in text
print(f"We have {full_df.text.isna().sum()} NAs")
full_df.dropna(subset=['text'], inplace=True)
full_df['cleaned_text'] = normalize_series(full_df['text'])
full_df['predicted'] = clf.predict(full_df['cleaned_text'])
full_df['date'] = pd.to_datetime(full_df['created_at'])
full_df.groupby(full_df['date'].dt.date).agg(
{'predicted': 'sum'}).to_csv(args.output)
|
{"hexsha": "e8c19df01ed6e09ad1639d9045308af6b66156b1", "size": 3557, "ext": "py", "lang": "Python", "max_stars_repo_path": "tweet_classifier.py", "max_stars_repo_name": "codatmo/Brazil-Tweet-Classifier", "max_stars_repo_head_hexsha": "5aab126323df748e15189dd67f7e39a6ed3e678d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-21T22:18:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T22:18:13.000Z", "max_issues_repo_path": "tweet_classifier.py", "max_issues_repo_name": "codatmo/Brazil-Tweet-Classifier", "max_issues_repo_head_hexsha": "5aab126323df748e15189dd67f7e39a6ed3e678d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-06-21T10:22:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-21T21:10:18.000Z", "max_forks_repo_path": "tweet_classifier.py", "max_forks_repo_name": "codatmo/Brazil-Tweet-Classifier", "max_forks_repo_head_hexsha": "5aab126323df748e15189dd67f7e39a6ed3e678d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8850574713, "max_line_length": 172, "alphanum_fraction": 0.688782682, "include": true, "reason": "import numpy", "num_tokens": 825}
|
No push, lets try to reduce deprecated support in this.
Keep NEEDS just in case.
--- boost/system/config.hpp.orig 2016-09-21 17:33:27.000000000 +0300
+++ boost/system/config.hpp
@@ -10,6 +10,12 @@
#ifndef BOOST_SYSTEM_CONFIG_HPP
#define BOOST_SYSTEM_CONFIG_HPP
+#if defined(__DragonFly__) && !defined(BOOST_SYSTEM_NEEDS_DEPRECATED)
+#ifndef BOOST_SYSTEM_NO_DEPRECATED
+#define BOOST_SYSTEM_NO_DEPRECATED
+#endif
+#endif
+
#include <boost/config.hpp>
#include <boost/predef/platform.h>
#include <boost/system/api_config.hpp> // for BOOST_POSIX_API or BOOST_WINDOWS_API
|
{"hexsha": "690b7684d710d1fc7089efdf003db66f4a00afa4", "size": 595, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ports/devel/boost-libs/dragonfly/patch-boost_system_config.hpp", "max_stars_repo_name": "liweitianux/DeltaPorts", "max_stars_repo_head_hexsha": "b907de0ceb9c0e46ae8961896e97b361aa7c62c0", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 31.0, "max_stars_repo_stars_event_min_datetime": "2015-02-06T17:06:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T19:53:28.000Z", "max_issues_repo_path": "ports/devel/boost-libs/dragonfly/patch-boost_system_config.hpp", "max_issues_repo_name": "liweitianux/DeltaPorts", "max_issues_repo_head_hexsha": "b907de0ceb9c0e46ae8961896e97b361aa7c62c0", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": 236.0, "max_issues_repo_issues_event_min_datetime": "2015-06-29T19:51:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-16T22:46:38.000Z", "max_forks_repo_path": "ports/devel/boost-libs/dragonfly/patch-boost_system_config.hpp", "max_forks_repo_name": "liweitianux/DeltaPorts", "max_forks_repo_head_hexsha": "b907de0ceb9c0e46ae8961896e97b361aa7c62c0", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 52.0, "max_forks_repo_forks_event_min_datetime": "2015-02-06T17:05:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-21T12:13:06.000Z", "avg_line_length": 31.3157894737, "max_line_length": 84, "alphanum_fraction": 0.7512605042, "num_tokens": 151}
|
"""
Print out a length distribution for used WAV files.
TODO: This module is not updated to the current TXT to CSV changes.
"""
import os
import pickle
import sys
from multiprocessing import Pool, Lock, cpu_count
import numpy as np
from scipy.io import wavfile
from tqdm import tqdm
from asr.params import MIN_EXAMPLE_LENGTH, MAX_EXAMPLE_LENGTH
from asr.util.matplotlib_helper import pyplot_display
__DATASETS_PATH = '../datasets/speech_data'
def calculate_dataset_stats(txt_path, show_buckets=0):
"""Gather mean and standard deviation values. Averaged for every file in the
training txt data file.
Args:
txt_path (str): Path to the `train.txt`.
show_buckets (int): Display additional bucketing markers if `show_buckets > 0`.
Returns:
Nothing.
"""
# Check if results are buffered.
tmp_path = '/tmp/sample_length_dump_{}.p'.format(os.path.split(txt_path)[1])
if not (os.path.exists(tmp_path) and os.path.isfile(tmp_path)):
sample_lengths = [] # Output buffer.
sample_lengths_sec = [] # Output buffer.
# Read train.txt file.
with open(txt_path, 'r') as file_handle:
lines = file_handle.readlines()
# Setup threadpool.
lock = Lock()
with Pool(processes=cpu_count()) as pool:
for length, length_sec in tqdm(
pool.imap_unordered(_stat_calculator, lines, chunksize=4),
desc='Reading audio samples', total=len(lines), file=sys.stdout,
unit='samples', dynamic_ncols=True):
lock.acquire()
sample_lengths.append(length)
sample_lengths_sec.append(length_sec)
lock.release()
pickle.dump(sample_lengths_sec, open(tmp_path, 'wb'))
print('Stored data to {}'.format(tmp_path))
total_len = np.sum(sample_lengths_sec)
print('Total sample length={:.3f}s (~{}h) of {}.'
.format(total_len, int(total_len / 60 / 60), txt_path))
print('Mean sample length={:.0f} ({:.3f})s.'
.format(np.mean(sample_lengths), np.mean(sample_lengths_sec)))
else:
print('Loading stored dump from {}'.format(tmp_path))
sample_lengths_sec = pickle.load(open(tmp_path, 'rb'))
# Add optional bucket markers.
buckets = _bucketing(show_buckets, sample_lengths_sec)
# Plot histogram of WAV length distribution.
_plot_wav_lengths(sample_lengths_sec, buckets=buckets)
print('Done.')
def _bucketing(number_buckets, sample_lengths):
if number_buckets <= 0:
return None
number_examples = len(sample_lengths)
step = number_examples // number_buckets
sorted_lengths = sorted(sample_lengths)
buckets = [sorted_lengths[i] for i in range(0, len(sorted_lengths), step)]
# Make sure the last bucket aligns with the highest value.
if buckets[-1] != sorted_lengths[-1]:
buckets[-1] = sorted_lengths[-1]
return buckets
def _stat_calculator(line):
# Python multiprocessing helper method.
wav_path, _ = line.split(' ', 1)
wav_path = os.path.join(__DATASETS_PATH, wav_path)
if not os.path.isfile(wav_path):
raise ValueError('"{}" does not exist.'.format(wav_path))
# Load the audio files sample rate and data.
(sampling_rate, audio_data) = wavfile.read(wav_path)
length = len(audio_data)
length_sec = length / sampling_rate
if length_sec < MIN_EXAMPLE_LENGTH:
print('WARN: Too short example found: ', line, length_sec)
if length_sec > MAX_EXAMPLE_LENGTH:
print('WARN: Overlong example found: ', line, length_sec)
return length, length_sec
@pyplot_display
def _plot_wav_lengths(plt, sample_lengths_sec, buckets=None):
# Create figure.
fig = plt.figure(figsize=(7.0, 2.60))
plt.hist(sample_lengths_sec, bins=75, facecolor='green', alpha=0.75, histtype='bar')
if buckets is not None:
# plt.hist(buckets, bins=len(buckets), facecolor='red', alpha=0.75, stacked=False,
# histtype='bar', edgecolor='black', linewidth=0.6)
for bucket in buckets:
plt.axvline(bucket, color='red', linewidth=0.5, linestyle='-')
# Y axis ticks
# plt.yticks(range(0, 60000, 20000))
# plt.yscale('log')
ax = plt.gca()
import matplotlib
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), '5'))
)
plt.title('Sample Length in Seconds', visible=False)
plt.ylabel('Count', visible=True)
plt.xlabel('Length (s)', visible=True)
display_grid = buckets is None
plt.grid(b=True, which='major', axis='both', linestyle='dashed', linewidth=0.7, aa=False,
visible=display_grid)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
# Finish plot by tightening everything up.
plt.tight_layout()
# This line messes up the pyCharm preview image.
fig.savefig('/tmp/length-distribution-dev.pdf', bbox_inches='tight')
# fig.savefig('/tmp/bucketing-example.pdf', bbox_inches='tight')
return fig
if __name__ == '__main__':
# Path to `train.csv` test
__CSV_PATH = os.path.join('./data', 'train.csv')
# Display dataset stats.
calculate_dataset_stats(__CSV_PATH, show_buckets=0)
|
{"hexsha": "69ef2841d8c99da3b389749e15e48d9403af6284", "size": 5356, "ext": "py", "lang": "Python", "max_stars_repo_path": "asr/dataset/wav_lengths.py", "max_stars_repo_name": "aflyingwolf/ctc-asr", "max_stars_repo_head_hexsha": "6f1a17366b942c1a70230e170da4d0ae15fa52da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "asr/dataset/wav_lengths.py", "max_issues_repo_name": "aflyingwolf/ctc-asr", "max_issues_repo_head_hexsha": "6f1a17366b942c1a70230e170da4d0ae15fa52da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "asr/dataset/wav_lengths.py", "max_forks_repo_name": "aflyingwolf/ctc-asr", "max_forks_repo_head_hexsha": "6f1a17366b942c1a70230e170da4d0ae15fa52da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-19T13:11:53.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-19T13:11:53.000Z", "avg_line_length": 33.475, "max_line_length": 93, "alphanum_fraction": 0.6512322629, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1260}
|
include("../src/nn.jl")
using Test
Random.seed!(1)
"""
charge distribution -> potential (Poisson's eqn), electric field (Gauss's law)
"""
# input Scalar field
inranks = [0]
# output scalar field, vector field
outranks = [0, 1]
sz=(8,8,8)
dx = 0.1
dV=dx^3
rmax = 2dx
lmax = 1
# charge distribution
x = [zeros(sz...)]
ix=[5, 5, 5]
x[1][ix...] = 1.0
X=[x]
# generate data
# Green's fn for Poisson, Gauss
f1 = LinearOperator(:potential,dx;rmax=rmax)
f2 = LinearOperator(:field,dx;rmax=rmax)
y1 = f1(X[1])
y2 = f2(X[1])
# check
v=[1,1,1]
ix = ix.+v
r=norm(v)*dx
@test y1[1][ix...]≈dV/r
@test [y2[i][ix...] for i = 1:3]≈dV/r^2*ones(3)/sqrt(3)
##
# train
# linear layer: tensor field convolution
L = EquivConv(inranks, outranks, dx; rmax = rmax)
function nn(X)
L(X)
end
function loss()
y1hat, y2hat = nn(X)
l1 = Flux.mae(toArray(y1), toArray(y1hat))
l2 = Flux.mae(toArray(y2), toArray(y2hat))
l = l1 + l2
println(l)
l
end
loss()
##
ps = Flux.params(L)
data = [()]
opt = ADAM(0.1)
println("===\nTraining")
for i = 1:5
# global doplot = i % 50 == 0
Flux.train!(loss, ps, data, opt)
end
##
Random.seed!(1)
n=4
inranks=[0,0]
outranks=[0]
X=[[rand(n,n,n)],[rand(n,n,n)]]
y=[[X[1][1].*X[2][1]]]
# train
# linear layer: tensor field convolution
A = EquivAttn(inranks, outranks)
function nn(X)
A(X)
end
function loss()
yhat = nn(X)
l = Flux.mae(toArray(y[1]), toArray(yhat[1]))
println(l)
l
end
loss()
ps = Flux.params(A)
data = [()]
opt = ADAM(0.1)
println("===\nTraining")
for i = 1:10
# global doplot = i % 50 == 0
Flux.train!(loss, ps, data, opt)
end
|
{"hexsha": "57cfffa3198336379f7baf305a8af3edccae577b", "size": 1625, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "aced-differentiate/equivariant-operators-website", "max_stars_repo_head_hexsha": "62d65317d0f96c6fc8490a22d76ef307e585e972", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "aced-differentiate/equivariant-operators-website", "max_issues_repo_head_hexsha": "62d65317d0f96c6fc8490a22d76ef307e585e972", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "aced-differentiate/equivariant-operators-website", "max_forks_repo_head_hexsha": "62d65317d0f96c6fc8490a22d76ef307e585e972", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.1869158879, "max_line_length": 78, "alphanum_fraction": 0.5950769231, "num_tokens": 633}
|
import pandas as pd
import panel as pn
import numpy as np
from .config import filters, datastyles, categories, plot_data_path
def get_numbers(df, filt, datastyle):
if df is None:
return []
numbers = list(df.query(f'filt == "{filt}" and datastyle == "{datastyle}"').number.unique())
numbers.sort()
return numbers
def get_plots_list(df, filt, datastyle, number, category, compare=False, full_name=False):
"""Returns list of plot names given a datastyle, number, and category, and whether compare
"""
if df is None:
return []
q = f'filt == "{filt}" and datastyle == "{datastyle}" and number == {number} and category == "{category}" and compare == {compare}'
if full_name:
plots = list(df.query(q).basename)
else:
plots = list(df.query(q).content)
paths = list(df.query(q).filename)
inds = np.argsort(plots)
plots = list(np.array(plots)[inds])
paths = list(np.array(paths)[inds])
return plots, paths
|
{"hexsha": "237c5b0dbdea2130fb115090f6aab9dff8694caf", "size": 1003, "ext": "py", "lang": "Python", "max_stars_repo_path": "navigator/gui.py", "max_stars_repo_name": "timothydmorton/pipe-analysis-navigator", "max_stars_repo_head_hexsha": "85b05053324f693386a0c12b292ab38d452a01db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "navigator/gui.py", "max_issues_repo_name": "timothydmorton/pipe-analysis-navigator", "max_issues_repo_head_hexsha": "85b05053324f693386a0c12b292ab38d452a01db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "navigator/gui.py", "max_forks_repo_name": "timothydmorton/pipe-analysis-navigator", "max_forks_repo_head_hexsha": "85b05053324f693386a0c12b292ab38d452a01db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1081081081, "max_line_length": 135, "alphanum_fraction": 0.6440677966, "include": true, "reason": "import numpy", "num_tokens": 246}
|
import numpy as np
from flask import Flask
from flask import render_template,request
from sklearn.externals import joblib
from forms import CarForm
from config import Config
app = Flask("Car Price Prediction")
app.config.from_object(Config)
@app.route("/", methods=["GET"])
def home():
car_form = CarForm()
if car_form.validate_on_submit():
car_age = int(car_form.car_age.data)
car_fuel = car_form.car_fuel.data
car_doors = int(car_form.car_doors.data)
car_cc = int(car_form.car_cc.data)
car_horsepower = int(car_form.car_horsepower.data)
car_transmission = car_form.car_transmission.data
car_odometer = int(car_form.car_odometer.data)
car_weight = car_form.car_weight.data
car_color = car_form.car_color.data
return render_template("car.html", form=car_form)
@app.route("/", methods=["POST"])
def result():
try:
form = request.form
model = joblib.load("mlmodel/car_price_prediction.pkl")
if int(form['car_fuel']) == 1:
fuel = 1
else:
fuel = 0
if int(form['car_transmission']) == 1:
car_transmission = 1
else:
car_transmission = 0
if int(form['car_color']) == 1:
car_color = 0
else:
car_color = 1
new_car = np.array(
[int(form['car_odometer']), fuel, int(form['car_doors']), car_transmission,
int(form['car_horsepower']), car_color, int(form['car_cc']), int(form['car_weight']),
int(form['car_age'])]).reshape(1, -1)
predicted_price = model.predict(new_car)
if predicted_price < 0:
predicted_price = 0
return render_template("result.html", price=int(predicted_price))
except ValueError:
return render_template("error.html")
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9090, debug=True)
|
{"hexsha": "5295b32d2293fe5eca60d4e39c1e0332034ecca2", "size": 1928, "ext": "py", "lang": "Python", "max_stars_repo_path": "Day 17/src/app.py", "max_stars_repo_name": "vgaurav3011/100-Days-of-ML", "max_stars_repo_head_hexsha": "ec302b03fd492c459cff2592b3a4f5e38f9c9d72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-03-30T15:10:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T06:04:01.000Z", "max_issues_repo_path": "Day 17/src/app.py", "max_issues_repo_name": "vgaurav3011/100-Days-of-ML", "max_issues_repo_head_hexsha": "ec302b03fd492c459cff2592b3a4f5e38f9c9d72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-08T22:34:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T03:25:23.000Z", "max_forks_repo_path": "Day 17/src/app.py", "max_forks_repo_name": "vgaurav3011/100-Days-of-ML", "max_forks_repo_head_hexsha": "ec302b03fd492c459cff2592b3a4f5e38f9c9d72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-13T09:51:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-28T07:37:36.000Z", "avg_line_length": 32.1333333333, "max_line_length": 98, "alphanum_fraction": 0.6286307054, "include": true, "reason": "import numpy", "num_tokens": 464}
|
import numpy as np
import matplotlib.pyplot as plt
from imageio import imwrite
import matplotlib.patches as patches
from sklearn.cluster import DBSCAN
import json
from PIL import Image
def read_json_coords(label):
'''Read a json file containing bounding boxes into coordinate arrays '''
coords, centres = [], []
for object in label['shapes']:
# get coordinates of lower left & upper right corners of each bounding box
[[x1, y1], [x3, y3]] = object['points'][0], object['points'][1]
coords.append([[x1, y1], [x3, y3]])
# store centre coordinate of each bounding box for clustering
centres.append([(x1+x3)//2, (y1+y3)//2])
coords_array = np.array(coords)
centres_array = np.array(centres)
return coords_array, centres_array
def get_bbox_info_aerial(box_path):
'''Convert json bounding box file to correct image format'''
with open(box_path, 'r') as f:
label = json.load(f)
coords, centres = read_json_coords(label)
## DB-Scan algorithm for clustering ##
eps = 1 # threshold distance between two points to be in the same 'neighbourhood'
dbscan = DBSCAN(min_samples=1, eps=eps)
y = dbscan.fit_predict(centres)
# storing coordinates of clusters, relative to boundaries of image (not tile)
info = {}
for i in range(y.max()+1):
# calculate the max and min coords of all the bounding boxes in the cluster
box_centres = centres[np.where(y==i)[0]]
min_x, max_x = box_centres[:, 0].min(), box_centres[:, 0].max()
min_y, max_y = box_centres[:, 1].min(), box_centres[:, 1].max()
# assign each cluster of objects as an item
item = {}
item['centre'] = [(min_x+max_x)//2, (min_y+max_y)//2]
item['object_boxes'] = coords[np.where(y==i)[0]].tolist()
for name in label['shapes']:
if name['label'] == "whale":
item['name'] = "whale"
if name['label'] == "nonwhale":
item['name']= "nonwhale"
info[i] = item
return(info)
def save_aerial_files(image_path, box_path, output_dir, input_name):
'''
save each image as .png
along with corresponding bounding box labels in YOLO format as .txt
'''
image = Image.open(image_path)
info = get_bbox_info_aerial(box_path)
# initialise a figure to visualise output tiles
fig = plt.figure(figsize=(20, 100))
n_tiles = len(info.keys())
for i, k in enumerate(info.keys()):
# get centre of each bounding box cluster
x, y = info[k]['centre'][0], info[k]['centre'][1] # in pixels, with origin in lower (upper) left
width, height = image.getbbox()[2], image.getbbox()[3] #2,3 are right and lower bounds
# save image
image_name = '{}/{}.png'.format(output_dir, input_name)
imwrite(image_name, image)
ax = fig.add_subplot(n_tiles, 4, k+1)
ax.imshow(image)
# save label file
file = open(image_name.replace('.png', '.txt'), 'a')
for j, box in enumerate(info[k]['object_boxes']):
# get coordinates of upper left (x1) and lower right (x3) corner of bounding box
[[x1, y1], [x3, y3]] = box # in px, origin in lower left
# define bounding box centre & width
box_centre_x = (x1+x3)//2
box_centre_y = (y1+y3)//2
box_width = x3-x1
box_height = y3-y1
# add bounding boxes to tile subplot
rect = patches.Rectangle((x1, y1), box_width, box_height, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
title = str(k+1)
ax.set_title(title)
# write label to .txt file
if info[k]['name'] == 'whale' :
lab = '0 {} {} {} {}\n'.format(abs(box_centre_x/width), abs(box_centre_y/height), abs(box_width/width), abs(box_height/height))
if info[k]['name'] == 'nonwhale' :
lab = '1 {} {} {} {}\n'.format(abs(box_centre_x/width), abs(box_centre_y/height), abs(box_width/width), abs(box_height/height))
file.write(lab)
file.close()
plt.show()
|
{"hexsha": "ad7cbdd00daf97d63c4c09e57caa08f371c266ec", "size": 4422, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/preprocessing/aerialdataprocessing.py", "max_stars_repo_name": "KMacfarlaneGreen/mres_whales", "max_stars_repo_head_hexsha": "11b96a824d9d302afbcb192d24f5087e77721a30", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/preprocessing/aerialdataprocessing.py", "max_issues_repo_name": "KMacfarlaneGreen/mres_whales", "max_issues_repo_head_hexsha": "11b96a824d9d302afbcb192d24f5087e77721a30", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/preprocessing/aerialdataprocessing.py", "max_forks_repo_name": "KMacfarlaneGreen/mres_whales", "max_forks_repo_head_hexsha": "11b96a824d9d302afbcb192d24f5087e77721a30", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1206896552, "max_line_length": 143, "alphanum_fraction": 0.5687471732, "include": true, "reason": "import numpy", "num_tokens": 1097}
|
import warnings
warnings.simplefilter(action='ignore',category=FutureWarning)
import cv2 ## openCV
import os
import numpy as np
import matplotlib.pyplot as plt
import operator
from IPython.display import Markdown, display
def printmd(string, color=None):
colorstr = "<span style='color:{}'>{}</span>".format(color, string)
display(Markdown(colorstr))
original_path = os.getcwd()
## OpenCV haarcascade model for "face detection"
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
## for input images
img_width = 550
img_height = 750
## for cropped face
img_col = 96
img_row = 96
## hyperparams
box_size_factor = 10 # bigger value allows smaller bounding box
face_recog_thresh = 0.70
## `embed_image` to embed processed face images into 128d vectors
def embed_image(face_img,model):
'''
embed the RGB cropped face (input) into 128d vector
use with `detect_face()`
'''
img = cv2.resize(face_img, (img_col,img_row)).astype('float32')
img /= 255.0
img = np.expand_dims(img,axis=0)
embedding = model.predict_on_batch(img)
return embedding
## `detect_face` to detect frontal faces in *gray* (higher accuracy than doing it in color)
def detect_face(img,fc=face_cascade,flag='db',plot=False):
'''
Receive BGR format as an input and return coordinate(s) of detected face(s)
default: flag = 'db' --> assume only one face is present in the image and return only 1 face
flag = 'new' --> if to embed new images (possibly multiple faces)
'''
img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
height,width = img_grey.shape
faces_raw = fc.detectMultiScale(img_grey) # higher accuracy for faces with black glasses
faces = []
# get rid of errorneous small boxes
for face in faces_raw:
if face[2] > (min(height,width)/box_size_factor):
faces.append(face)
if flag == 'db':
face_box = [0,0,0,0]
for (x,y,w,h) in faces:
if w > face_box[2]:
face_box = [x,y,w,h] # IGNOTE ALL OTHER FALSY FACE BOXES for database embedding
(x,y,w,h) = face_box
faces = [face_box]
if flag == 'new':
faces = faces
if plot:
num_col = 5
img_color = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_color_for_crop = img_color.copy()
for (plot_x,plot_y,plot_w,plot_h) in faces:
img_color = cv2.rectangle(img_color, (plot_x,plot_y), (plot_x+plot_w,plot_y+plot_h), (255,0,0), 8)
plt.title('full image',fontdict={'fontsize':15,'fontweight':'bold'})
plt.imshow(img_color)
plt.axis('off')
if len(faces) == 1:
(plot_x,plot_y,plot_w,plot_h) = faces[0]
fig,ax=plt.subplots(1,1,figsize=(3,3))
cropped = img_color_for_crop[plot_y:plot_y+plot_h,plot_x:plot_x+plot_w]
ax.imshow(cropped)
ax.axis('off')
fig.suptitle('Cropped face image to be embedded',fontsize=15,fontweight='bold')
elif len(faces)<=num_col:
fig,axes=plt.subplots(1,len(faces),figsize=(3*len(faces),3))
for ax,(plot_x,plot_y,plot_w,plot_h) in zip(axes.flatten(),faces):
cropped = img_color_for_crop[plot_y:plot_y+plot_h,plot_x:plot_x+plot_w]
ax.imshow(cropped)
ax.axis('off')
fig.suptitle('Cropped face image to be embedded (not ordered)',fontsize=15,fontweight='bold')
else:
fig, axes = plt.subplots(int(np.ceil(len(faces)/num_col)),num_col,figsize=(15,3*int(np.ceil(len(faces)/num_col))))
fig.suptitle('Cropped face image to be embedded (not ordered)',fontsize=15,fontweight='bold')
for ax,(plot_x,plot_y,plot_w,plot_h) in zip(axes.flatten(),faces):
cropped = img_color_for_crop[plot_y:plot_y+plot_h,plot_x:plot_x+plot_w]
ax.imshow(cropped)
ax.axis('off')
if not len(faces)==len(axes.flatten()):
for i in axes.flatten()[len(faces)-len(axes.flatten()):]:
i.set_visible(False)
return faces
## `database_face_embedding` to process and embed images in the database
def database_face_embedding(model):
'''
embed the images in the database - folder name 'image_database' required
output = {'name':embedding,...}
'''
database_embeddings = {}
os.chdir(os.path.join(os.getcwd(),'image_database'))
for img_file in os.listdir():
name = img_file.split('.')[0]
image_file = cv2.imread(img_file)
image_file = cv2.resize(image_file,(img_width,img_height), interpolation = cv2.INTER_AREA)
faces = detect_face(image_file)
(x, y, w, h) = faces[0]
image_file = cv2.cvtColor(image_file, cv2.COLOR_BGR2RGB)
cropped = image_file[y:y+h,x:x+w]
database_embeddings[name] = embed_image(cropped, model)
os.chdir(original_path)
return database_embeddings
## `identify_singe_face` to identify person given a single face image
def identify_singe_face(new_face,database_embeddings,model,face_recog_thresh,verbose=None):
'''
receive one new RGB face as an input
return name_label of that face as one of the registered members
'''
new_face_embedding = embed_image(new_face,model)
name_label = ''
result = {}
min_dist = 100
for (registered_name,registered_embedding) in database_embeddings.items():
euc_dist = np.linalg.norm(new_face_embedding-registered_embedding)
euc_dist = round(euc_dist,3)
result[registered_name] = euc_dist
if euc_dist < min_dist:
min_dist = euc_dist
name = registered_name
if min_dist < face_recog_thresh:
if verbose:
printmd('@@@ this is '+'**{}**'.format(name.upper())+'! @@@\n',color='red')
print('Distance from:')
for i in sorted(result.items(),key=operator.itemgetter(1)):
if i[0] == name:
printmd('**{}**'.format(i),color='red')
else:
print(i)
print('')
name_label = name
return name_label
else:
if verbose:
print('@@@ not registered! @@@\n')
print('Distance from:')
for i in sorted(result.items(),key=operator.itemgetter(1)):
print(i)
print('')
name_label = 'n/a'
return name_label
## `recog_face` to recognize multiple faces in a single frame (image)
def recog_face(img,database_embeddings,model,face_recog_thresh=0.7,fc=face_cascade,verbose=None):
'''
receive BGR image as an input
return image with overlayed bounding boxes and names of the registered members
'''
img_color = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
faces = detect_face(img,flag='new')
for (x, y, w, h) in faces:
cropped = img_color[y:y+h,x:x+w]
if verbose:
name = identify_singe_face(cropped,database_embeddings,model,face_recog_thresh,verbose=True)
if not verbose:
name = identify_singe_face(cropped,database_embeddings,model,face_recog_thresh)
text = '{}'.format(name)
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_PLAIN,3,5)[0]
text_offset_x = x-3
text_offset_y = y
box_coords = ((text_offset_x, text_offset_y+10), (text_offset_x+text_width,text_offset_y-text_height-10))
if name != 'n/a':
img_color = cv2.rectangle(img_color, (x, y), (x+w, y+h), (255,0,0), 8)
img_color = cv2.rectangle(img_color, box_coords[0], box_coords[1], (255,0,0), cv2.FILLED)
img_color = cv2.putText(img_color,text,(x,y),cv2.FONT_HERSHEY_PLAIN,3,(255,255,255),4)
if name == 'n/a':
img_color = cv2.rectangle(img_color, (x, y), (x+w, y+h), (0,0,255), 8)
img_color = cv2.rectangle(img_color, box_coords[0], box_coords[1], (0,0,255), cv2.FILLED)
img_color = cv2.putText(img_color,text,(x,y),cv2.FONT_HERSHEY_PLAIN,3,(255,255,255),4)
plt.figure(figsize=(8,8))
plt.imshow(img_color)
plt.axis('off')
## load and preprocess the image
def load_test_img(file):
img = cv2.imread(os.path.join('test',file))
img = cv2.resize(img,(img_width,img_height),interpolation=cv2.INTER_AREA)
return img
|
{"hexsha": "6ebf67c7c073223c5e6799beaef1c7cad9d142a1", "size": 8293, "ext": "py", "lang": "Python", "max_stars_repo_path": "helper_module.py", "max_stars_repo_name": "sungsujaing/Insight_workshop", "max_stars_repo_head_hexsha": "a951903794cc37fb21d1228a38a3ad6798a7987f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-10-19T06:05:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-21T15:28:22.000Z", "max_issues_repo_path": "helper_module.py", "max_issues_repo_name": "sungsujaing/Insight_workshop", "max_issues_repo_head_hexsha": "a951903794cc37fb21d1228a38a3ad6798a7987f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-10-20T21:16:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:29:28.000Z", "max_forks_repo_path": "helper_module.py", "max_forks_repo_name": "sungsujaing/Insight_workshop", "max_forks_repo_head_hexsha": "a951903794cc37fb21d1228a38a3ad6798a7987f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8701923077, "max_line_length": 126, "alphanum_fraction": 0.6402990474, "include": true, "reason": "import numpy", "num_tokens": 2169}
|
"""Tasks for generating Sample Similarity results."""
import numpy as np
from sklearn.manifold import TSNE
from app.extensions import celery
from app.display_modules.utils import persist_result_helper, scrub_category_val
from app.tool_results.kraken import KrakenResultModule
from app.tool_results.krakenhll import KrakenHLLResultModule
from app.tool_results.metaphlan2 import Metaphlan2ResultModule
from .models import SampleSimilarityResult
from .constants import MODULE_NAME
def get_clean_samples(sample_dict, no_zero_features=True, zero_threshold=0.00001):
"""
Clean sample feature data by filling in missing features.
Parameters
----------
sample_dict : dict
Dictionary of the form {<sample_id>: <features>}.
no_zero_features : bool
If True, features with total value across all samples less than the
threshold are removed from all samples.
zero_threshold : float
The threshold to use for removing features as described above.
Returns
-------
dict
Cleaned sample set
"""
# Collect all feature IDs (species names)
feature_ids = set([])
for features in sample_dict.values():
for feature_id in features:
feature_ids.add(feature_id)
ordered_feature_ids = list(feature_ids)
# Fill in missing feature values with 0.0
samples = {sample_id: {feature_id: features.get(feature_id, 0.0)
for feature_id in ordered_feature_ids}
for sample_id, features in sample_dict.items()}
# Filter out features with low total
if no_zero_features:
# Score all features
feature_total_score = {feature_id: 0 for feature_id in ordered_feature_ids}
for features in samples.values():
for feature_id, value in features.items():
feature_total_score[feature_id] += value
# Assign passing grade
features_passing = {feature_id: value > zero_threshold
for feature_id, value in features.items()}
# Filter features failing to meet threshold from all samples
samples = {sample_id: {feature_id: value
for feature_id, value in features.items()
if features_passing[feature_id]}
for sample_id, features in samples.items()}
ordered_feature_ids = [feature_id for feature_id, is_passing
in features_passing.items() if is_passing]
return samples
def run_tsne(samples):
"""Run tSNE algorithm on array of features and return labeled results."""
feature_array = [[value for value in features.values()]
for features in samples.values()]
feature_array = np.array(feature_array)
params = {
'n_components': 2,
'perplexity': 30.0,
'early_exaggeration': 2.0,
'learning_rate': 120.0,
'n_iter': 1000,
'min_grad_norm': 1e-05,
'metric': 'euclidean',
}
return TSNE(**params).fit_transform(feature_array)
def label_tsne(tsne_results, sample_names, tool_label):
"""
Label tSNE results.
Parameters
----------
tsne_results : np.array
Output from run_tsne.
sample_names : list
List of sample names.
tool_label : str
The tool name to use for adding labels.
Returns
-------
dict
Dictionary of the form: {<sample_name>: <coordinate>}.
"""
tsne_labeled = {sample_names[i]: {f'{tool_label}_x': float(tsne_results[i][0]),
f'{tool_label}_y': float(tsne_results[i][1])}
for i in range(len(sample_names))}
return tsne_labeled
@celery.task()
def taxa_tool_tsne(samples, tool_name):
"""Run tSNE for tool results stored as 'taxa' property."""
tool = {
'x_label': f'{tool_name} tsne x',
'y_label': f'{tool_name} tsne y',
}
sample_dict = {sample['name']: sample[tool_name]['taxa']
for sample in samples}
samples = get_clean_samples(sample_dict)
taxa_tsne = run_tsne(samples)
sample_names = list(samples.keys())
tsne_labeled = label_tsne(taxa_tsne, sample_names, tool_name)
return (tool, tsne_labeled)
def update_data_records(samples, categories,
kraken_labeled, krakenhll_labeled, metaphlan_labeled):
"""Update data records."""
data_records = []
for sample in samples:
sample_id = sample['name']
data_record = {'SampleID': sample_id}
data_record.update(kraken_labeled[sample_id])
data_record.update(krakenhll_labeled[sample_id])
data_record.update(metaphlan_labeled[sample_id])
for category_name in categories.keys():
category_value = sample['metadata'].get(category_name, 'None')
category_value = scrub_category_val(category_value)
data_record[category_name] = category_value
data_records.append(data_record)
return data_records
@celery.task()
def sample_similarity_reducer(args, samples):
"""Combine Sample Similarity components."""
categories = args[0]
kraken_tool, kraken_labeled = args[1]
krakenhll_tool, krakenhll_labeled = args[2]
metaphlan_tool, metaphlan_labeled = args[3]
data_records = update_data_records(
samples,
categories,
kraken_labeled,
krakenhll_labeled,
metaphlan_labeled
)
tools = {
KrakenResultModule.name(): kraken_tool,
KrakenHLLResultModule.name(): krakenhll_tool,
Metaphlan2ResultModule.name(): metaphlan_tool,
}
result_data = {
'categories': categories,
'tools': tools,
'data_records': data_records,
}
return result_data
@celery.task(name='sample_similarity.persist_result')
def persist_result(result_data, analysis_result_id):
"""Persist Sample Similarity results."""
result = SampleSimilarityResult(**result_data)
persist_result_helper(result, analysis_result_id, MODULE_NAME)
|
{"hexsha": "9242b175e301bf9df4123201f6c613fd105d8e78", "size": 6071, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/display_modules/sample_similarity/tasks.py", "max_stars_repo_name": "MetaGenScope/metagenscope-server", "max_stars_repo_head_hexsha": "609cd57c626c857c8efde8237a1f22f4d1e6065d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app/display_modules/sample_similarity/tasks.py", "max_issues_repo_name": "MetaGenScope/metagenscope-server", "max_issues_repo_head_hexsha": "609cd57c626c857c8efde8237a1f22f4d1e6065d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/display_modules/sample_similarity/tasks.py", "max_forks_repo_name": "MetaGenScope/metagenscope-server", "max_forks_repo_head_hexsha": "609cd57c626c857c8efde8237a1f22f4d1e6065d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8162162162, "max_line_length": 83, "alphanum_fraction": 0.6555756877, "include": true, "reason": "import numpy", "num_tokens": 1354}
|
from __future__ import print_function
import time
import sys
from io import StringIO
import os
import shutil
import argparse
import csv
import json
import numpy as np
import pandas as pd
import logging
from sklearn.compose import ColumnTransformer
from sklearn.externals import joblib
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder
from sagemaker_containers.beta.framework import (
content_types, encoders, env, modules, transformer, worker)
# Since we get a headerless CSV file we specify the column names here.
feature_columns_names = [
'State',
'Account Length',
'Area Code',
'Phone',
"Int'l Plan",
'VMail Plan',
'VMail Message',
'Day Mins',
'Day Calls',
'Day Charge',
'Eve Mins',
'Eve Calls',
'Eve Charge',
'Night Mins',
'Night Calls',
'Night Charge',
'Intl Mins',
'Intl Calls',
'Intl Charge',
'CustServ Calls']
label_column = 'Churn?'
feature_columns_dtype = {
'State' : str,
'Account Length' : np.int64,
'Area Code' : str,
'Phone' : str,
"Int'l Plan" : str,
'VMail Plan' : str,
'VMail Message' : np.int64,
'Day Mins' : np.float64,
'Day Calls' : np.int64,
'Day Charge' : np.float64,
'Eve Mins' : np.float64,
'Eve Calls' : np.int64,
'Eve Charge' : np.float64,
'Night Mins' : np.float64,
'Night Calls' : np.int64,
'Night Charge' : np.float64,
'Intl Mins' : np.float64,
'Intl Calls' : np.int64,
'Intl Charge' : np.float64,
'CustServ Calls' : np.int64}
label_column_dtype = {'Churn?': str}
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def _is_inverse_label_transform():
"""Returns True if if it's running in inverse label transform."""
return os.getenv('TRANSFORM_MODE') == 'inverse-label-transform'
def _is_feature_transform():
"""Returns True if it's running in feature transform mode."""
return os.getenv('TRANSFORM_MODE') == 'feature-transform'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Sagemaker specific arguments. Defaults are set in the environment variables.
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
args = parser.parse_args()
input_files = [ os.path.join(args.train, file) for file in os.listdir(args.train) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(args.train, "train"))
raw_data = [ pd.read_csv(
file,
header=None,
names=feature_columns_names + [label_column],
dtype=merge_two_dicts(feature_columns_dtype, label_column_dtype)) for file in input_files ]
concat_data = pd.concat(raw_data)
print(concat_data.head(5))
numeric_features = list([
'Account Length',
'VMail Message',
'Day Mins',
'Day Calls',
'Eve Mins',
'Eve Calls',
'Night Mins',
'Night Calls',
'Intl Mins',
'Intl Calls',
'CustServ Calls'])
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['State','Area Code',"Int'l Plan",'VMail Plan']
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)],
remainder="drop")
preprocessor.fit(concat_data)
joblib.dump(preprocessor, os.path.join(args.model_dir, "model.joblib"))
print("saved model!")
def input_fn(input_data, request_content_type):
"""Parse input data payload
We currently only take csv input. Since we need to process both labelled
and unlabelled data we first determine whether the label column is present
by looking at how many columns were provided.
"""
print("input_fn-request_content_type: ", request_content_type)
print("input_fn-type of input_data: ", type(input_data))
content_type = request_content_type.lower(
) if request_content_type else "text/csv"
content_type = content_type.split(";")[0].strip()
if isinstance(input_data, str):
str_buffer = input_data
else:
str_buffer = str(input_data,'utf-8')
if _is_feature_transform():
logging.info(f"Input_fn, Mode: feature_transform")
if content_type == 'text/csv':
# Read the raw input data as CSV.
df = pd.read_csv(StringIO(input_data), header=None)
if len(df.columns) == len(feature_columns_names) + 1:
# This is a labelled example, includes the label
df.columns = feature_columns_names + [label_column]
elif len(df.columns) == len(feature_columns_names):
# This is an unlabelled example.
df.columns = feature_columns_names
return df
else:
raise ValueError("{} not supported by script!".format(content_type))
if _is_inverse_label_transform():
if (content_type == 'text/csv' or content_type == 'text/csv; charset=utf-8'):
# Read the raw input data as CSV.
df = pd.read_csv(StringIO(str_buffer), header=None)
logging.info(f"input_fn, Mode: inverse_label_transform")
logging.info(f"Shape of the requested data: '{df.shape}'")
return df
else:
raise ValueError("{} not supported by script!".format(content_type))
def output_fn(prediction, accept):
"""Format prediction output
The default accept/content-type between containers for serial inference is JSON.
We also want to set the ContentType or mimetype as the same value as accept so the next
container can read the response payload correctly.
"""
logging.info(f"Output_fn: prdiction - '{prediction}' ")
# Set to text/csv
accept = 'text/csv'
if type(prediction) is not np.ndarray:
prediction=prediction.toarray()
print("output_fn-type of prediction: ", type(prediction))
if accept == "application/json": # Code in the case of future use
instances = []
for row in prediction.tolist():
instances.append({"features": row})
json_output = {"instances": instances}
return worker.Response(json.dumps(json_output), mimetype=accept)
elif accept == 'text/csv':
return worker.Response(encoders.encode(prediction, accept), mimetype=accept)
else:
raise RuntimeException("{} accept type is not supported by this script.".format(accept))
def predict_fn(input_data, model):
"""Preprocess input data
We implement this because the default predict_fn uses .predict(), but our model is a preprocessor
so we want to use .transform().
The output is returned in the following order:
rest of features either one hot encoded or standardized
"""
if _is_feature_transform():
logging.info(f"predict_fn, Mode: feature_transform")
features = model.transform(input_data)
print("After trainsformation")
print(features[0:2])
if label_column in input_data:
# Return the label (as the first column) and the set of features.
label_features = np.insert(features.toarray(), 0, pd.get_dummies(input_data[label_column])['True.'], axis=1)
print("After insering a label")
print(label_features[0:2])
return label_features
else:
# Return only the set of features
return features
if _is_inverse_label_transform():
logging.info(f"predict_fn, Mode: inverse_transform - input_data: '{input_data}'")
features = input_data.iloc[:,0]>0.5
features = features.values
logging.info(f"predict_fn, Mode: inverse_transform - features after transformation: '{features}'")
return features
def model_fn(model_dir):
"""Deserialize fitted model
"""
if _is_feature_transform():
logging.info(f"model_fn, Mode: feature_transform")
preprocessor = joblib.load(os.path.join(model_dir, "model.joblib"))
return preprocessor
if _is_inverse_label_transform():
logging.info(f"model_fn, Mode: inverse_transform")
|
{"hexsha": "4610395b3205173ac6dd5ae978bde489e6cbbe2c", "size": 9424, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocessing.py", "max_stars_repo_name": "gonsoomoon-ml/churn-prediction-workshop2", "max_stars_repo_head_hexsha": "54d80cd3df42868ec26e5b28f9a15fd32ae80a3e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-14T13:41:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-14T13:41:17.000Z", "max_issues_repo_path": "preprocessing.py", "max_issues_repo_name": "gonsoomoon-ml/churn-prediction-workshop2", "max_issues_repo_head_hexsha": "54d80cd3df42868ec26e5b28f9a15fd32ae80a3e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocessing.py", "max_forks_repo_name": "gonsoomoon-ml/churn-prediction-workshop2", "max_forks_repo_head_hexsha": "54d80cd3df42868ec26e5b28f9a15fd32ae80a3e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4184397163, "max_line_length": 134, "alphanum_fraction": 0.6326400679, "include": true, "reason": "import numpy,from sage", "num_tokens": 2101}
|
################################################################################
# Script: sp.py
# Description: This script is for preparing all the fields for sample points
# All the cities should run this script first to get the pre-prepared sample points
# before running the aggregation.
# Two major outputs:
# 1. average poplulation and intersection density per sample sample point
# 2. accessibility, dailyliving and walkability score per sample point
import json
import os
import sys
import time
from multiprocessing import Pool, Value, cpu_count
import fiona
# notice: must close the geopackage connection in QGIS.Otherwise, an error occurred when reading
################################################################################
import geopandas as gpd
import numpy as np
import pandas as pd
import osmnx as ox
import setup_config as sc # import project config parameters
import setup_sp as ssp
if __name__ == "__main__":
# use the script from command line, change directory to '/process' folder
# then 'python sp.py odense.json' to process city-specific idnicators
startTime = time.time()
# get the work directory
dirname = os.path.abspath("")
# the configuration file should put in the '/configuration' folder located at the same folder as scripts
# load city-specific configeration file
jsonFile = os.path.join("configuration", sys.argv[1])
jsonPath = os.path.join(dirname, jsonFile)
try:
with open(jsonPath) as json_file:
config = json.load(json_file)
except Exception as e:
print("Failed to read json file.")
print(e)
# output the processing city name to users
print("Process city: {}".format(config["study_region"]))
# read projected graphml filepath
proj_graphml_filepath = os.path.join(dirname, config["folder"], config["graphmlProj_name"])
# define original graphml filepath
ori_graphml_filepath = os.path.join(dirname, config["folder"], config["graphmlName"])
G_proj = ssp.read_proj_graphml(proj_graphml_filepath, ori_graphml_filepath, config["to_crs"])
# geopackage path where to read all the required layers
gpkgPath = os.path.join(dirname, config["folder"], config["geopackagePath"])
# geopackage path where to save processing layers
gpkgPath_output = os.path.join(dirname, config["folder"], config["geopackagePath_output"])
# copy input geopackage to output geopackage, if not already exist
if not os.path.isfile(gpkgPath_output):
print("Create study region sample point output file")
for layer in fiona.listlayers(gpkgPath):
gpkgPath_input = gpd.read_file(gpkgPath, layer=layer)
gpkgPath_input.to_file(gpkgPath_output, layer=layer, driver="GPKG")
else:
print("Study region sample point output file exists")
# read hexagon layer of the city from disk, the hexagon layer is 250m*250m
# it should contain population estimates and intersection information
hexes = gpd.read_file(gpkgPath_output, layer=sc.parameters["hex250"])
# get nodes from the city projected graphml
gdf_nodes = ox.graph_to_gdfs(G_proj, nodes=True, edges=False)
gdf_nodes.osmid = gdf_nodes.osmid.astype(int)
gdf_nodes = gdf_nodes.drop_duplicates(subset="osmid")
# keep only the unique node id column
gdf_nodes_simple = gdf_nodes[["osmid"]].copy()
del gdf_nodes
# calculate average poplulation and intersection density for each sample point in study regions
# the steps are as follows:
# 1. use the OSM pedestrain network (graphml in disk) to calculate local 1600m neighborhood per urban
# sample points (in disk)
# 2. load 250m hex grid from disk with population and network intersections density data
# 3. then intersect 1600m sample point neighborhood with 250m hex grid
# to associate pop and intersections density data with sample points by averaging the hex-level density
# final result is urban sample point dataframe with osmid, pop density, and intersection density
# read pop density and intersection density filed names from the city-specific configeration file
pop_density = sc.samplePoint_fieldNames["sp_local_nh_avg_pop_density"]
intersection_density = sc.samplePoint_fieldNames["sp_local_nh_avg_intersection_density"]
# read from disk if exist
if os.path.isfile(os.path.join(dirname, config["folder"], config["tempCSV"])):
print("Read poplulation and intersection density from local file.")
gdf_nodes_simple = pd.read_csv(os.path.join(dirname, config["folder"], config["tempCSV"]))
# otherwise,calculate using single thred or multiprocessing
else:
print("Calculate average poplulation and intersection density.")
# Graph for Walkability analysis should not be directed
# (ie. it is assumed pedestrians are not influenced by one way streets)
# note that when you save the undirected G_proj feature, if you re-open it, it is directed again
#
# >>> G_proj = ox.load_graphml(proj_graphml_filepath)
# >>> nx.is_directed(G_proj)
# True
# >>> G_proj = ox.get_undirected(G_proj)
# >>> nx.is_directed(G_proj)
# False
# >>> ox.save_graphml(G_proj, proj_graphml_filepath)
# >>> G_proj = ox.load_graphml(proj_graphml_filepath)
# >>> nx.is_directed(G_proj)
# True
# so no point undirecting it before saving - you have to undirect again regardless
G_proj = ox.get_undirected(G_proj)
# read search distance from json file, the default should be 1600m
# the search distance is used to defined the radius of a sample point as a local neighborhood
distance = sc.parameters["search_distance"]
# get the nodes GeoDataFrame row length for use in later iteration
rows = gdf_nodes_simple.shape[0]
# if provide 'true' in command line, then using multiprocessing, otherwise, using single thread
# Notice: Meloubrne has the largest number of sample points, which needs 13 GB memory for docker using 3 cpus.
if len(sys.argv) > 2:
if sys.argv[2].lower() == "true":
# method1: new way to use multiprocessing
# get a list of nodes id for later iteration purpose
node_list = gdf_nodes_simple.osmid.tolist()
node_list.sort()
pool = Pool(cpu_count())
result_objects = pool.starmap_async(
ssp.calc_sp_pop_intect_density_multi,
[(G_proj, hexes, distance, rows, node, index) for index, node in enumerate(node_list)],
chunksize=1000,
).get()
pool.close()
pool.join()
gdf_nodes_simple = pd.DataFrame(result_objects, columns=["osmid", pop_density, intersection_density])
else:
# method 2: single thread, use pandas apply()
# create counter for loop
val = Value("i", 0)
df_result = gdf_nodes_simple["osmid"].apply(
ssp.calc_sp_pop_intect_density,
args=(G_proj, hexes, pop_density, intersection_density, distance, val, rows),
)
# Concatenate the average of population and intersections back to the df of sample points
gdf_nodes_simple = pd.concat([gdf_nodes_simple, df_result], axis=1)
# save the pop and intersection density to a CSV file
gdf_nodes_simple.to_csv(os.path.join(dirname, config["folder"], config["tempCSV"]))
# set osmid as index
gdf_nodes_simple.set_index("osmid", inplace=True, drop=False)
print("The time to finish average pop and intersection density is: {}".format(time.time() - startTime))
# Calculate accessibility to POI (supermarket,convenience,pt,pso) and
# walkability for sample points steps as follow:
# 1. using pandana packadge to calculate distance to access from sample
# points to destinations (daily living destinations, public open space)
# 2. calculate accessibiity score per sample point: transform accessibility
# distance to binary measure: 1 if access <= 500m, 0 otherwise
# 3. calculate daily living score by summing the accessibiity scores to all
# POIs (excluding pos)
# 4. calculate walkability score per sample point: get zscores for daily
# living accessibility, populaiton density and intersections pop_density;
# sum these three zscores at sample point level
print("Calculate assessbility to POIs.")
# read accessibility distance from configuration file, which is 500m
distance = sc.parameters["accessibility_distance"]
# create the pandana network, use network nodes and edges
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G_proj)
net = ssp.create_pdna_net(gdf_nodes, gdf_edges, predistance=distance)
# read 'daily living destinations' point layer (supermarket,convenience,pt) from disk
gdf_poi1 = gpd.read_file(gpkgPath_output, layer=sc.parameters["destinations"])
# read field names from json file
poi_names = [sc.parameters["supermarket"], sc.parameters["convenience"], sc.parameters["PT"]]
# define output field names from nearest nodes distance
dist_fieldNames1 = [
"sp_nearest_node_supermarket_dist",
"sp_nearest_node_convenience_dist",
"sp_nearest_node_pt_dist",
]
# zip the input and output field names
names1 = list(zip(poi_names, dist_fieldNames1))
# calculate the distance from each node to POI
gdf_poi_dist1 = ssp.cal_dist_node_to_nearest_pois(gdf_poi1, distance, net, *(names1))
# read open space 'aos_nodes_30m_line' layer from geopackage
gdf_poi2 = gpd.read_file(gpkgPath_output, layer=sc.parameters["pos"])
names2 = [(sc.parameters["pos"], "sp_nearest_node_pos_dist")]
# calculate the distance from each node to public open space,
# filterattr=False to indicate the layer is 'aos_nodes_30m_line'
gdf_poi_dist2 = ssp.cal_dist_node_to_nearest_pois(gdf_poi2, distance, net, *names2, filterattr=False)
# concatenate two dataframes into one
gdf_nodes_poi_dist = pd.concat([gdf_nodes, gdf_poi_dist1, gdf_poi_dist2], axis=1)
dist_fieldNames1.append("sp_nearest_node_pos_dist")
# set index of gdf_nodes_poi_dist, using 'osmid' as the index
gdf_nodes_poi_dist.set_index("osmid", inplace=True, drop=False)
# drop unuseful columns
gdf_nodes_poi_dist.drop(["geometry", "id", "lat", "lon", "y", "x", "highway", "ref"], axis=1, inplace=True)
# replace -999 values as nan
gdf_nodes_poi_dist = round(gdf_nodes_poi_dist, 0).replace(-999, np.nan).astype("Int64")
# read sample points from disk (in city-specific geopackage)
samplePointsData = gpd.read_file(gpkgPath_output, layer=sc.parameters["samplePoints"])
# create 'hex_id' for sample point, if it not exists
if "hex_id" not in samplePointsData.columns.tolist():
samplePointsData = ssp.createHexid(samplePointsData, hexes)
samplePointsData.set_index("point_id", inplace=True)
fulldist_FieldNames = [
sc.samplePoint_fieldNames["sp_supermarket_dist_m"],
sc.samplePoint_fieldNames["sp_convenience_dist_m"],
sc.samplePoint_fieldNames["sp_pt_dist_m"],
sc.samplePoint_fieldNames["sp_pos_dist_m"],
]
full_nodes = ssp.create_full_nodes(
samplePointsData,
gdf_nodes_simple,
gdf_nodes_poi_dist,
dist_fieldNames1,
fulldist_FieldNames,
pop_density,
intersection_density,
)
# convert full distance to binary index
binary_FieldNames = [
sc.samplePoint_fieldNames["sp_access_supermarket_binary"],
sc.samplePoint_fieldNames["sp_access_convenience_binary"],
sc.samplePoint_fieldNames["sp_access_pt_binary"],
sc.samplePoint_fieldNames["sp_access_pos_binary"],
]
names3 = list(zip(fulldist_FieldNames, binary_FieldNames))
full_nodes = ssp.convert_dist_to_binary(full_nodes, *names3)
samplePointsData = samplePointsData[["hex_id", "edge_ogc_fid", "geometry"]].join(full_nodes, how="left")
daily_living = sc.samplePoint_fieldNames["sp_daily_living_score"]
samplePointsData[daily_living] = samplePointsData[binary_FieldNames[:-1]].sum(axis=1)
oriFieldNames = [
sc.samplePoint_fieldNames[pop_density],
sc.samplePoint_fieldNames[intersection_density],
sc.samplePoint_fieldNames[daily_living],
]
newFieldNames = [
sc.samplePoint_fieldNames["sp_zscore_local_nh_avgpopdensity"],
sc.samplePoint_fieldNames["sp_zscore_local_nh_avgintdensity"],
sc.samplePoint_fieldNames["sp_zscore_daily_living_score"],
]
samplePointsData = ssp.cal_zscores(samplePointsData, oriFieldNames, newFieldNames)
# sum these three zscores for walkability
walkability_index = sc.samplePoint_fieldNames["sp_walkability_index"]
samplePointsData[walkability_index] = samplePointsData[newFieldNames].sum(axis=1)
int_fields = ["hex_id", "edge_ogc_fid"]
float_fields = (
fulldist_FieldNames
+ binary_FieldNames
+ [daily_living]
+ [pop_density]
+ [intersection_density]
+ newFieldNames
+ [walkability_index]
)
samplePointsData[int_fields] = samplePointsData[int_fields].astype(int)
samplePointsData[float_fields] = samplePointsData[float_fields].astype(float)
# save the sample points with all the desired results to a new layer in geopackage
samplePointsData.reset_index().to_file(gpkgPath_output, layer=sc.parameters["samplepointResult"], driver="GPKG")
endTime = time.time() - startTime
print("Total time is : {:.2f} minutes".format(endTime / 60))
|
{"hexsha": "40588f1859fe952e0d5af42b67d7ec702bd69af7", "size": 13778, "ext": "py", "lang": "Python", "max_stars_repo_path": "process/sp.py", "max_stars_repo_name": "carlhiggs/global-indicators", "max_stars_repo_head_hexsha": "c2973aaf372fde3ae2290ad75f0766f6263a4e18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "process/sp.py", "max_issues_repo_name": "carlhiggs/global-indicators", "max_issues_repo_head_hexsha": "c2973aaf372fde3ae2290ad75f0766f6263a4e18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "process/sp.py", "max_forks_repo_name": "carlhiggs/global-indicators", "max_forks_repo_head_hexsha": "c2973aaf372fde3ae2290ad75f0766f6263a4e18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.4719471947, "max_line_length": 118, "alphanum_fraction": 0.6985774423, "include": true, "reason": "import numpy", "num_tokens": 3231}
|
from SkateUtils.KeyPoseState import State
import numpy as np
import pydart2 as pydart
import pickle
if __name__ == '__main__':
pydart.init()
world = pydart.World(1./1200., '../data/skel/skater_3dof_with_ground.skel')
skel = world.skeletons[1]
pelvis_pos_y = skel.dof_indices(["j_pelvis_pos_y"])
pelvis_x = skel.dof_indices(["j_pelvis_rot_x"])
pelvis = skel.dof_indices(["j_pelvis_rot_y", "j_pelvis_rot_z"])
upper_body = skel.dof_indices(["j_abdomen_x", "j_abdomen_y", "j_abdomen_z"])
spine = skel.dof_indices(["j_spine_x", "j_spine_y", "j_spine_z"])
right_leg = skel.dof_indices(["j_thigh_right_x", "j_thigh_right_y", "j_thigh_right_z", "j_shin_right_z"])
left_leg = skel.dof_indices(["j_thigh_left_x", "j_thigh_left_y", "j_thigh_left_z", "j_shin_left_z"])
knee = skel.dof_indices(["j_shin_left_x", "j_shin_right_x"])
arms = skel.dof_indices(["j_bicep_left_x", "j_bicep_right_x"])
arms_y = skel.dof_indices(["j_bicep_left_y", "j_bicep_right_y"])
arms_z = skel.dof_indices(["j_bicep_left_z", "j_bicep_right_z"])
elbows = skel.dof_indices(["j_forearm_left_x", "j_forearm_right_x"])
foot = skel.dof_indices(["j_heel_left_x", "j_heel_left_y", "j_heel_left_z", "j_heel_right_x", "j_heel_right_y", "j_heel_right_z"])
leg_y = skel.dof_indices(["j_thigh_left_y", "j_thigh_right_y"])
# blade = skel.dof_indices(["j_heel_right_2"])
# ===========pushing side to side new===========
s00q = np.zeros(skel.ndofs)
s00q[upper_body] = 0.0, -0., -0.1
s00q[left_leg] = 0., 0., 0., -0.
s00q[right_leg] = -0., -0., 0., -0.
state00 = State("state00", 2.0, 0.0, 0.2, s00q)
s_stable_q = np.zeros(skel.ndofs)
# s_stable_q[upper_body] = 0., 0., -0.4
# s_stable_q[spine] = 0.0, 0., 0.4
s_stable_q[left_leg] = 0., 0., 0.3, -0.5
s_stable_q[right_leg] = -0., -0., 0.3, -0.5
s_stable_q[foot] = 0., 0., 0.2, -0., -0., 0.2
state_stable = State("state_stable", 3.0, 2.2, 0.0, s_stable_q)
s_forward_q = np.zeros(skel.ndofs)
s_forward_q[upper_body] = 0., 0., -0.2
# s_forward_q[spine] = 0.0, 0., 0.4
s_forward_q[left_leg] = 0., 0., 0.3, -0.5
s_forward_q[right_leg] = -0., -0., 0.3, -0.5
s_forward_q[leg_y] = 0.4, -0.4
s_forward_q[foot] = 0., 0.4, 0.2, -0., -0.4, 0.2
state_forward = State("state_forward", 0.5, 2.2, 0.0, s_forward_q)
s_backward_q = np.zeros(skel.ndofs)
# s_backward_q[upper_body] = 0., 0., 0.2
s_backward_q[spine] = 0.0, 0., 0.4
s_backward_q[left_leg] = 0., 0., 0.3, -0.5
s_backward_q[right_leg] = -0., -0., 0.3, -0.5
s_backward_q[leg_y] = -0.4, 0.4
s_backward_q[foot] = 0., -0.4, 0.2, -0., 0.4, 0.2
state_backward = State("state_backward", 0.5, 2.2, 0.0, s_backward_q)
s_terminal_q = np.zeros(skel.ndofs)
state_t = State("state_t", 50., 2.0, 2.0, s_terminal_q)
state00.set_next(state_forward)
state_forward.set_next(state_backward)
state_backward.set_next(state_t)
states = [state00, state_forward, state_backward, state_t]
filename = 'skating_swizzle_simple.skkey'
with open(filename, 'wb') as f:
pickle.dump(states, f)
|
{"hexsha": "c551df35159f9b0c82577d91165fdbe764a29441", "size": 3146, "ext": "py", "lang": "Python", "max_stars_repo_path": "swizzle/make_skate_keyframe.py", "max_stars_repo_name": "snumrl/skate", "max_stars_repo_head_hexsha": "a57ec2dc81dc2502da8886b92b870d2c8d65b838", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "swizzle/make_skate_keyframe.py", "max_issues_repo_name": "snumrl/skate", "max_issues_repo_head_hexsha": "a57ec2dc81dc2502da8886b92b870d2c8d65b838", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "swizzle/make_skate_keyframe.py", "max_forks_repo_name": "snumrl/skate", "max_forks_repo_head_hexsha": "a57ec2dc81dc2502da8886b92b870d2c8d65b838", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8571428571, "max_line_length": 134, "alphanum_fraction": 0.6484424666, "include": true, "reason": "import numpy", "num_tokens": 1234}
|
import numpy.testing as npt
from cvdm.score import fremantle, Fremantle
def test_fremantle():
tmp = fremantle(59, True, True, 8, 0.92, 0.79, True, False)
npt.assert_almost_equal(tmp, 0.062, decimal=3)
def test_fremantle_json():
fr = Fremantle()
tmp = fr.score({"index_age": 59,
"male": True,
"cvd_hist": True,
"hba1c": 8,
"albumin_creat_mgmmol":0.92,
"chol_hdl_mmol": 0.79,
"SEuro": True,
"Abor": False})
npt.assert_almost_equal(tmp, 0.062, decimal=3)
|
{"hexsha": "77f00e3a539aa81f026dfacc448f5c9db7dba2b4", "size": 624, "ext": "py", "lang": "Python", "max_stars_repo_path": "cvdm/score/tests/test_fremantle.py", "max_stars_repo_name": "joyceho/cvdm", "max_stars_repo_head_hexsha": "df386290221fd1388bef06104db0dd07978f91d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-29T00:05:05.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-01T23:34:17.000Z", "max_issues_repo_path": "cvdm/score/tests/test_fremantle.py", "max_issues_repo_name": "joyceho/cvdm", "max_issues_repo_head_hexsha": "df386290221fd1388bef06104db0dd07978f91d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cvdm/score/tests/test_fremantle.py", "max_forks_repo_name": "joyceho/cvdm", "max_forks_repo_head_hexsha": "df386290221fd1388bef06104db0dd07978f91d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3636363636, "max_line_length": 63, "alphanum_fraction": 0.5224358974, "include": true, "reason": "import numpy", "num_tokens": 184}
|
// This file is part of libigl, a simple c++ geometry processing library.
//
// Copyright (C) 2014 Daniele Panozzo <daniele.panozzo@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla Public License
// v. 2.0. If a copy of the MPL was not distributed with this file, You can
// obtain one at http://mozilla.org/MPL/2.0/.
#include "ViewerCore.h"
#include "ViewerData.h"
#include "gl.h"
#include "../quat_to_mat.h"
#include "../snap_to_fixed_up.h"
#include "../look_at.h"
#include "../frustum.h"
#include "../ortho.h"
#include "../massmatrix.h"
#include "../barycenter.h"
#include "../PI.h"
#include <Eigen/Geometry>
#include <iostream>
IGL_INLINE void igl::opengl::ViewerCore::align_camera_center(
const Eigen::MatrixXd& V,
const Eigen::MatrixXi& F)
{
if(V.rows() == 0)
return;
get_scale_and_shift_to_fit_mesh(V,F,camera_base_zoom,camera_base_translation);
// Rather than crash on empty mesh...
if(V.size() > 0)
{
object_scale = (V.colwise().maxCoeff() - V.colwise().minCoeff()).norm();
}
}
IGL_INLINE void igl::opengl::ViewerCore::get_scale_and_shift_to_fit_mesh(
const Eigen::MatrixXd& V,
const Eigen::MatrixXi& F,
float& zoom,
Eigen::Vector3f& shift)
{
if (V.rows() == 0)
return;
Eigen::MatrixXd BC;
if (F.rows() <= 1)
{
BC = V;
} else
{
igl::barycenter(V,F,BC);
}
return get_scale_and_shift_to_fit_mesh(BC,zoom,shift);
}
IGL_INLINE void igl::opengl::ViewerCore::align_camera_center(
const Eigen::MatrixXd& V)
{
if(V.rows() == 0)
return;
get_scale_and_shift_to_fit_mesh(V,camera_base_zoom,camera_base_translation);
// Rather than crash on empty mesh...
if(V.size() > 0)
{
object_scale = (V.colwise().maxCoeff() - V.colwise().minCoeff()).norm();
}
}
IGL_INLINE void igl::opengl::ViewerCore::get_scale_and_shift_to_fit_mesh(
const Eigen::MatrixXd& V,
float& zoom,
Eigen::Vector3f& shift)
{
if (V.rows() == 0)
return;
auto min_point = V.colwise().minCoeff();
auto max_point = V.colwise().maxCoeff();
auto centroid = (0.5*(min_point + max_point)).eval();
shift.setConstant(0);
shift.head(centroid.size()) = -centroid.cast<float>();
zoom = 2.0 / (max_point-min_point).array().abs().maxCoeff();
}
IGL_INLINE void igl::opengl::ViewerCore::clear_framebuffers()
{
// The glScissor call ensures we only clear this core's buffers,
// (in case the user wants different background colors in each viewport.)
glScissor(viewport(0), viewport(1), viewport(2), viewport(3));
glEnable(GL_SCISSOR_TEST);
glClearColor(background_color[0],
background_color[1],
background_color[2],
background_color[3]);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDisable(GL_SCISSOR_TEST);
}
IGL_INLINE void igl::opengl::ViewerCore::draw(
Eigen::Matrix4f &worldMat,
ViewerData& data,
bool update_matrices)
{
using namespace std;
using namespace Eigen;
if (depth_test)
glEnable(GL_DEPTH_TEST);
else
glDisable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
/* Bind and potentially refresh mesh/line/point data */
if (data.dirty)
{
data.updateGL(data, data.invert_normals, data.meshgl);
data.dirty = MeshGL::DIRTY_NONE;
}
data.meshgl.bind_mesh();
// Initialize uniform
glViewport(viewport(0), viewport(1), viewport(2), viewport(3));
if(update_matrices)
{
view = Eigen::Matrix4f::Identity();
proj = Eigen::Matrix4f::Identity();
norm = Eigen::Matrix4f::Identity();
float width = viewport(2);
float height = viewport(3);
// Set view
look_at( camera_eye, camera_center, camera_up, view);
view = view
* (trackball_angle * Eigen::Scaling(camera_zoom * camera_base_zoom)
* Eigen::Translation3f(camera_translation + camera_base_translation)).matrix()* worldMat*data.MakeTrans();
norm = view.inverse().transpose() ;
// Set projection
if (orthographic)
{
float length = (camera_eye - camera_center).norm();
float h = tan(camera_view_angle/360.0 * igl::PI) * (length);
ortho(-h*width/height, h*width/height, -h, h, camera_dnear, camera_dfar,proj);
}
else
{
float fH = tan(camera_view_angle / 360.0 * igl::PI) * camera_dnear;
float fW = fH * (double)width/(double)height;
frustum(-fW, fW, -fH, fH, camera_dnear, camera_dfar,proj);
}
}
// Send transformations to the GPU
GLint viewi = glGetUniformLocation(data.meshgl.shader_mesh,"view");
GLint proji = glGetUniformLocation(data.meshgl.shader_mesh,"proj");
GLint normi = glGetUniformLocation(data.meshgl.shader_mesh,"normal_matrix");
glUniformMatrix4fv(viewi, 1, GL_FALSE, view.data());
glUniformMatrix4fv(proji, 1, GL_FALSE, proj.data());
glUniformMatrix4fv(normi, 1, GL_FALSE, norm.data());
// Light parameters
GLint specular_exponenti = glGetUniformLocation(data.meshgl.shader_mesh,"specular_exponent");
GLint light_position_eyei = glGetUniformLocation(data.meshgl.shader_mesh,"light_position_eye");
GLint lighting_factori = glGetUniformLocation(data.meshgl.shader_mesh,"lighting_factor");
GLint fixed_colori = glGetUniformLocation(data.meshgl.shader_mesh,"fixed_color");
GLint texture_factori = glGetUniformLocation(data.meshgl.shader_mesh,"texture_factor");
glUniform1f(specular_exponenti, data.shininess);
glUniform3fv(light_position_eyei, 1, light_position.data());
glUniform1f(lighting_factori, lighting_factor); // enables lighting
glUniform4f(fixed_colori, 0.0, 0.0, 0.0, 0.0);
if (data.V.rows()>0)
{
// Render fill
if (is_set(data.show_faces))
{
// Texture
glUniform1f(texture_factori, is_set(data.show_texture) ? 1.0f : 0.0f);
data.meshgl.draw_mesh(true);
glUniform1f(texture_factori, 0.0f);
}
// Render wireframe
if (is_set(data.show_lines))
{
glLineWidth(data.line_width);
glUniform4f(fixed_colori,
data.line_color[0],
data.line_color[1],
data.line_color[2], 1.0f);
data.meshgl.draw_mesh(false);
glUniform4f(fixed_colori, 0.0f, 0.0f, 0.0f, 0.0f);
}
}
if (is_set(data.show_overlay))
{
if (is_set(data.show_overlay_depth))
glEnable(GL_DEPTH_TEST);
else
glDisable(GL_DEPTH_TEST);
if (data.lines.rows() > 0)
{
data.meshgl.bind_overlay_lines();
viewi = glGetUniformLocation(data.meshgl.shader_overlay_lines,"view");
proji = glGetUniformLocation(data.meshgl.shader_overlay_lines,"proj");
glUniformMatrix4fv(viewi, 1, GL_FALSE, view.data());
glUniformMatrix4fv(proji, 1, GL_FALSE, proj.data());
// This must be enabled, otherwise glLineWidth has no effect
glEnable(GL_LINE_SMOOTH);
glLineWidth(data.line_width);
data.meshgl.draw_overlay_lines();
}
if (data.points.rows() > 0)
{
data.meshgl.bind_overlay_points();
viewi = glGetUniformLocation(data.meshgl.shader_overlay_points,"view");
proji = glGetUniformLocation(data.meshgl.shader_overlay_points,"proj");
glUniformMatrix4fv(viewi, 1, GL_FALSE, view.data());
glUniformMatrix4fv(proji, 1, GL_FALSE, proj.data());
glPointSize(data.point_size);
data.meshgl.draw_overlay_points();
}
glEnable(GL_DEPTH_TEST);
}
}
IGL_INLINE void igl::opengl::ViewerCore::UpdateUniforms(Eigen::Matrix4f &worldMat, ViewerData& data, bool update_matrices)
{
}
IGL_INLINE void igl::opengl::ViewerCore::draw_buffer(Eigen::Matrix4f &worldMat, ViewerData& data,
bool update_matrices,
Eigen::Matrix<unsigned char,Eigen::Dynamic,Eigen::Dynamic>& R,
Eigen::Matrix<unsigned char,Eigen::Dynamic,Eigen::Dynamic>& G,
Eigen::Matrix<unsigned char,Eigen::Dynamic,Eigen::Dynamic>& B,
Eigen::Matrix<unsigned char,Eigen::Dynamic,Eigen::Dynamic>& A)
{
assert(R.rows() == G.rows() && G.rows() == B.rows() && B.rows() == A.rows());
assert(R.cols() == G.cols() && G.cols() == B.cols() && B.cols() == A.cols());
unsigned width = R.rows();
unsigned height = R.cols();
// https://learnopengl.com/Advanced-OpenGL/Anti-Aliasing
unsigned int framebuffer;
glGenFramebuffers(1, &framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
// create a multisampled color attachment texture
unsigned int textureColorBufferMultiSampled;
glGenTextures(1, &textureColorBufferMultiSampled);
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, textureColorBufferMultiSampled);
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA, width, height, GL_TRUE);
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D_MULTISAMPLE, textureColorBufferMultiSampled, 0);
// create a (also multisampled) renderbuffer object for depth and stencil attachments
unsigned int rbo;
glGenRenderbuffers(1, &rbo);
glBindRenderbuffer(GL_RENDERBUFFER, rbo);
glRenderbufferStorageMultisample(GL_RENDERBUFFER, 4, GL_DEPTH24_STENCIL8, width, height);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rbo);
assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// configure second post-processing framebuffer
unsigned int intermediateFBO;
glGenFramebuffers(1, &intermediateFBO);
glBindFramebuffer(GL_FRAMEBUFFER, intermediateFBO);
// create a color attachment texture
unsigned int screenTexture;
glGenTextures(1, &screenTexture);
glBindTexture(GL_TEXTURE_2D, screenTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, screenTexture, 0); // we only need a color buffer
assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
// Clear the buffer
glClearColor(background_color(0), background_color(1), background_color(2), 0.f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Save old viewport
Eigen::Vector4f viewport_ori = viewport;
viewport << 0,0,width,height;
// Draw
draw(worldMat,data,update_matrices);
// Restore viewport
viewport = viewport_ori;
glBindFramebuffer(GL_READ_FRAMEBUFFER, framebuffer);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, intermediateFBO);
glBlitFramebuffer(0, 0, width, height, 0, 0, width, height, GL_COLOR_BUFFER_BIT, GL_NEAREST);
glBindFramebuffer(GL_FRAMEBUFFER, intermediateFBO);
// Copy back in the given Eigen matrices
GLubyte* pixels = (GLubyte*)calloc(width*height*4,sizeof(GLubyte));
glReadPixels(0, 0,width, height,GL_RGBA, GL_UNSIGNED_BYTE, pixels);
// Clean up
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteTextures(1, &screenTexture);
glDeleteTextures(1, &textureColorBufferMultiSampled);
glDeleteFramebuffers(1, &framebuffer);
glDeleteFramebuffers(1, &intermediateFBO);
glDeleteRenderbuffers(1, &rbo);
int count = 0;
for (unsigned j=0; j<height; ++j)
{
for (unsigned i=0; i<width; ++i)
{
R(i,j) = pixels[count*4+0];
G(i,j) = pixels[count*4+1];
B(i,j) = pixels[count*4+2];
A(i,j) = pixels[count*4+3];
++count;
}
}
// Clean up
free(pixels);
}
IGL_INLINE void igl::opengl::ViewerCore::set_rotation_type(
const igl::opengl::ViewerCore::RotationType & value)
{
using namespace Eigen;
using namespace std;
const RotationType old_rotation_type = rotation_type;
rotation_type = value;
if(rotation_type == ROTATION_TYPE_TWO_AXIS_VALUATOR_FIXED_UP &&
old_rotation_type != ROTATION_TYPE_TWO_AXIS_VALUATOR_FIXED_UP)
{
snap_to_fixed_up(Quaternionf(trackball_angle),trackball_angle);
}
}
IGL_INLINE void igl::opengl::ViewerCore::set(unsigned int &property_mask, bool value) const
{
if (!value)
unset(property_mask);
else
property_mask |= id;
}
IGL_INLINE void igl::opengl::ViewerCore::unset(unsigned int &property_mask) const
{
property_mask &= ~id;
}
IGL_INLINE void igl::opengl::ViewerCore::toggle(unsigned int &property_mask) const
{
property_mask ^= id;
}
IGL_INLINE bool igl::opengl::ViewerCore::is_set(unsigned int property_mask) const
{
return (property_mask & id);
}
IGL_INLINE igl::opengl::ViewerCore::ViewerCore()
{
// Default colors
background_color << 0.3f, 0.3f, 0.5f, 1.0f;
// Default lights settings
light_position << 0.0f, 0.3f, 0.0f;
lighting_factor = 1.0f; //on
// Default trackball
trackball_angle = Eigen::Quaternionf::Identity();
set_rotation_type(ViewerCore::ROTATION_TYPE_TWO_AXIS_VALUATOR_FIXED_UP);
// Camera parameters
camera_base_zoom = 1.0f;
camera_zoom = 1.0f;
orthographic = false;
camera_view_angle = 45.0;
camera_dnear = 1.0;
camera_dfar = 100.0;
camera_base_translation << 0, 0, 0;
camera_translation << 0, 0, 0;
camera_eye << 0, 0, 5;
camera_center << 0, 0, 0;
camera_up << 0, 1, 0;
depth_test = true;
is_animating = false;
animation_max_fps = 30.;
viewport.setZero();
}
IGL_INLINE void igl::opengl::ViewerCore::init()
{
}
IGL_INLINE void igl::opengl::ViewerCore::shut()
{
}
|
{"hexsha": "4f9a657ca510983950ca9f27d025dfdc11a8215e", "size": 13443, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "igl/opengl/ViewerCore.cpp", "max_stars_repo_name": "chenhadad/EngineI_GL_new_Final", "max_stars_repo_head_hexsha": "31fd37c617a6d82117e36676786bac8c0f04c278", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-11-25T16:41:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-25T16:41:45.000Z", "max_issues_repo_path": "igl/opengl/ViewerCore.cpp", "max_issues_repo_name": "Danielsadoun/EngineIGLnewFinal", "max_issues_repo_head_hexsha": "a051367a9217f91ed8682f4e4b1f61b746610145", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "igl/opengl/ViewerCore.cpp", "max_forks_repo_name": "Danielsadoun/EngineIGLnewFinal", "max_forks_repo_head_hexsha": "a051367a9217f91ed8682f4e4b1f61b746610145", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5563380282, "max_line_length": 127, "alphanum_fraction": 0.7128617124, "num_tokens": 3672}
|
'''Uses pushshift to pull data from farther back than Reddit allows us to go'''
import sys
import requests
import numpy as np
from datetime import datetime as dt
from dateutil import tz
joke_file_base = 'data_%%%%.csv'
record_file_base = 'data_%%%%.txt'
base_URL = 'https://api.pushshift.io/reddit/submission/search/?q=&size=1000&\
subreddit=%%%%&'
def _get_created_time(submission):
try:
created_utc = submission['created_utc']
except KeyError:
try:
created_utc = submission['created']
except:
print(f'Error processing timestamp for\n{submission}')
raise
return int(created_utc)
def _retrieve_item(submission, key):
try:
return submission[key]
except KeyError:
return ''
def get_list(base_URL, how, UTC, sr):
'''Get list of submissions, before or after previously recorded
submissions.'''
if how not in ['before', 'after']:
while True:
which = input('Please select [b] before or [a] after: ')
if 'a' in which:
how = 'after'
elif 'b' in which:
how = 'before'
get_list(how, UTC)
try:
int(UTC)
except ValueError:
while True:
UTC = input(f'Enter a valid UTC timestamp to search {how}: ')
finally:
response = requests.get(base_URL.replace('%%%%', sr) + f'{how}={int(UTC)}')
try:
data = response.json()['data']
except:
print(UTC)
print(response)
raise
else:
return data
def parse_joke(data, target, sr, parsed=None, num_subs=0):
if not parsed:
parsed = []
for submission in data:
try:
subID = submission['id']
except:
print(submission)
raise
created = _get_created_time(submission)
try:
parents = submission['crosspost_parent_list']
except KeyError:
parent = None
title = _clean_str(_retrieve_item(submission, 'title'))
selftext = _clean_str(_retrieve_item(submission, 'selftext'))
else:
parent_post = parents[0]
parent = _get_created_time(parent_post)
title = _clean_str(_retrieve_item(parent_post, 'title'))
selftext = _clean_str(_retrieve_item(parent_post, 'selftext'))
finally:
num_subs += 1
author = _retrieve_item(submission, 'author')
if target == 'sql':
score = _retrieve_item(submission, 'score')
comments = _retrieve_item(submission, 'num_comments')
parsed.append([subID, created, parent, author, title, selftext,
score, comments])
else:
score = str(_retrieve_item(submission, 'score'))
comments = str(_retrieve_item(submission, 'num_comments'))
with open(joke_file_base.replace('%%%%', sr), 'a') as jk, open(
record_file_base.replace('%%%%', sr), 'a') as rec:
jk.write('|'.join([subID, str(created), str(parent),
author, title, selftext, score,
comments]) + '\n')
rec.write(str(created) + '\n')
return created, parsed, num_subs
def get_cutoff(how):
if how not in ['before', 'after']:
while True:
which = input('Please select [b]before or [a]after: ')
if 'a' in which:
how = 'after'
elif 'b' in which:
how = 'before'
utcs = []
with open(record_file_base, 'r') as rec_file:
for line in rec_file:
utcs.append(int(line))
utcs.sort()
if how == "before":
cutoff = utcs[0]
elif how == "after":
cutoff = utcs[-1]
return cutoff
def _clean_str(val):
val = val.replace('"', "'")
new_val = val.replace('|', '^')
return _make_repr(new_val)
def _make_repr(val):
rep = repr(val)[1:-1]
return '"' + rep + '"'
if __name__ == "__main__":
try:
subreddit = sys.argv[1]
except:
while True:
subreddit = input(f'Which subreddit would you like to search? ')
finally:
UTC = int(dt.utcnow().timestamp())
num_subs = 1000
while num_subs == 1000:
data = get_list(base_URL, 'before', UTC, subreddit)
UTC, parsed, num_subs = parse_joke(data, 'csv', subreddit)
last_datetime = dt.fromtimestamp(UTC, tz=tz.tzutc())
time_str = last_datetime.strftime('%Y-%m-%d %H:%M:%S')
print(f'{num_subs} jokes, ending at {time_str} (UTC: {str(UTC)})')
|
{"hexsha": "8414094985ec435ee2afb60a91886c207d27e867", "size": 4729, "ext": "py", "lang": "Python", "max_stars_repo_path": "subreddits/pushshift.py", "max_stars_repo_name": "jojordan3/dad-joke-ai", "max_stars_repo_head_hexsha": "a5e955fc78807e8810256d3fb758d3b3a089c136", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-02-18T23:07:35.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-18T23:07:35.000Z", "max_issues_repo_path": "subreddits/pushshift.py", "max_issues_repo_name": "jojordan3/dad-joke-ai", "max_issues_repo_head_hexsha": "a5e955fc78807e8810256d3fb758d3b3a089c136", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "subreddits/pushshift.py", "max_forks_repo_name": "jojordan3/dad-joke-ai", "max_forks_repo_head_hexsha": "a5e955fc78807e8810256d3fb758d3b3a089c136", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5266666667, "max_line_length": 83, "alphanum_fraction": 0.5500105731, "include": true, "reason": "import numpy", "num_tokens": 1098}
|
#!/usr/bin/env python
import numpy as np
import de421
from time import time
from jplephem import Ephemeris
from jplephem.spk import SPK
def main():
for size in 10, 1000, 100000:
jd = np.linspace(2414992.5, 2471184.50, size)
kernel = SPK.open('de421.bsp')
ephem = Ephemeris(de421)
mars = kernel[0,4]
print(size)
print('-- old code (2 successive runs):')
t0 = time()
ephem.position('mars', jd)
print(time() - t0)
t0 = time()
ephem.position('mars', jd)
print(time() - t0)
print('-- new SPK-powered code (2 successive runs):')
t0 = time()
mars.compute(jd)
print(time() - t0)
t0 = time()
mars.compute(jd)
print(time() - t0)
print()
if __name__ == '__main__':
main()
print(' Warmed up, running again '.center(72, '-'))
main()
|
{"hexsha": "9361df73e58a22f40f9e9ef28bcfae07500f1a80", "size": 906, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/benchmark.py", "max_stars_repo_name": "jayvdb/python-jplephem", "max_stars_repo_head_hexsha": "331f0fff156ddd83acd5d5a6d8e3be5e07d9dd04", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 71, "max_stars_repo_stars_event_min_datetime": "2015-02-07T18:15:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T09:32:02.000Z", "max_issues_repo_path": "bin/benchmark.py", "max_issues_repo_name": "jayvdb/python-jplephem", "max_issues_repo_head_hexsha": "331f0fff156ddd83acd5d5a6d8e3be5e07d9dd04", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2015-01-11T21:22:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-02T08:42:20.000Z", "max_forks_repo_path": "bin/benchmark.py", "max_forks_repo_name": "jayvdb/python-jplephem", "max_forks_repo_head_hexsha": "331f0fff156ddd83acd5d5a6d8e3be5e07d9dd04", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2015-04-04T17:55:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-26T00:09:32.000Z", "avg_line_length": 21.0697674419, "max_line_length": 61, "alphanum_fraction": 0.5430463576, "include": true, "reason": "import numpy", "num_tokens": 254}
|
# TODO: error calculation
import numpy as np
def get_mse(feature_data, gt, function, *params):
prediction = function(feature_data, *params)
squared_difference = np.square(np.subtract(gt, prediction))
return squared_difference.mean()
|
{"hexsha": "d4ad67cdaac3abcb2d07d7248fba278ed49049cd", "size": 245, "ext": "py", "lang": "Python", "max_stars_repo_path": "error_calc.py", "max_stars_repo_name": "Alice-OSENSE/feature_err_analysis", "max_stars_repo_head_hexsha": "39b0d79b9bd5b60a94851fc6a5b458c4d28427dc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "error_calc.py", "max_issues_repo_name": "Alice-OSENSE/feature_err_analysis", "max_issues_repo_head_hexsha": "39b0d79b9bd5b60a94851fc6a5b458c4d28427dc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "error_calc.py", "max_forks_repo_name": "Alice-OSENSE/feature_err_analysis", "max_forks_repo_head_hexsha": "39b0d79b9bd5b60a94851fc6a5b458c4d28427dc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0, "max_line_length": 63, "alphanum_fraction": 0.7551020408, "include": true, "reason": "import numpy", "num_tokens": 53}
|
# Model
include_model("hopper")
# Dimensions
nq = 4 # configuration dimension
nu = 2 # control dimension
nc = 1 # number of contact points
nf = 2 # number of faces for friction cone
nb = nc * nf
ns = nq
# Parameters
g = 9.81 # gravity
μ = 1.0 # coefficient of friction
mb = 10.0 # body mass
ml = 1.0 # leg mass
Jb = 2.5 # body inertia
Jl = 0.25 # leg inertia
n = 2 * nq
m = nu + nc + nb + nc + nb + ns
d = 0
idx_u = (1:nu)
idx_λ = nu .+ (1:nc)
idx_b = nu + nc .+ (1:nb)
idx_ψ = nu + nc + nb .+ (1:nc)
idx_η = nu + nc + nb + nc .+ (1:nb)
idx_s = nu + nc + nb + nc + nb .+ (1:ns)
model = Hopper(n, m, d,
mb, ml, Jb, Jl,
μ, g,
qL, qU,
nq,
nu,
nc,
nf,
nb,
ns,
idx_u,
idx_λ,
idx_b,
idx_ψ,
idx_η,
idx_s)
# Free-time model
model_ft = free_time_model(model)
function fd(model::Hopper, x⁺, x, u, w, h, t)
q3 = view(x⁺, model.nq .+ (1:model.nq))
q2⁺ = view(x⁺, 1:model.nq)
q2⁻ = view(x, model.nq .+ (1:model.nq))
q1 = view(x, 1:model.nq)
u_ctrl = view(u, model.idx_u)
λ = view(u, model.idx_λ)
b = view(u, model.idx_b)
s = view(u, model.idx_s)
h = u[end]
[q2⁺ - q2⁻;
((1.0 / h) * (M_func(model, q1) * (SVector{4}(q2⁺) - SVector{4}(q1))
- M_func(model, q2⁺) * (SVector{4}(q3) - SVector{4}(q2⁺)))
+ transpose(B_func(model, q3)) * SVector{2}(u_ctrl)
+ transpose(N_func(model, q3)) * SVector{1}(λ)
+ transpose(P_func(model, q3)) * SVector{2}(b)
- h * G_func(model, q2⁺)) + s]
end
function maximum_dissipation(model::Hopper, x⁺, u, h)
q3 = x⁺[model.nq .+ (1:model.nq)]
q2 = x⁺[1:model.nq]
ψ = u[model.idx_ψ]
ψ_stack = ψ[1] * ones(model.nb)
η = u[model.idx_η]
h = u[end]
return P_func(model, q3) * (q3 - q2) / h + ψ_stack - η
end
# Horizon
T = 21
# Time step
tf = 1.0
h = tf / (T - 1)
# Bounds
_uu = Inf * ones(model_ft.m)
_uu[model_ft.idx_u] .= 25.0
_uu[model_ft.idx_s] .= 0.0
_uu[end] = 1.0 * h
_ul = zeros(model_ft.m)
_ul[model_ft.idx_u] .= -25.0
_ul[model_ft.idx_s] .= 0.0
_ul[end] = 0.5 * h
ul, uu = control_bounds(model_ft, T, _ul, _uu)
# Initial and final states
q1 = [0.0, 0.5 , 0.0, 0.5]
q_right = [-0.25, 0.5 + 0.5, pi / 2.0, 0.25]
q_top = [-0.5, 0.5 + 1.0, pi, 0.25]
q_left = [-0.75, 0.5 + 0.5, 3.0 * pi / 2.0, 0.25]
qT = [-1.0, 0.5, 2.0 * pi, 0.5]
xl, xu = state_bounds(model_ft, T,
[model_ft.qL; model_ft.qL],
[model_ft.qU; model_ft.qU],
x1 = [q1; q1],
xT = [Inf * ones(model_ft.nq); qT])
q_ref = [linear_interpolation(q1, q_right, 6)...,
linear_interpolation(q_right, q_top, 6)[2:end]...,
linear_interpolation(q_top, q_left, 6)[2:end]...,
linear_interpolation(q_left, qT, 6)[2:end]...]
x_ref = configuration_to_state(q_ref)
# Objective
include_objective(["velocity"])
obj_tracking = quadratic_time_tracking_objective(
[Diagonal(1.0 * [1.0; 1.0; 0.0; 0.0; 1.0; 1.0; 0.0; 0.0]) for t = 1:T],
[Diagonal([1.0e-1, 1.0e-1, zeros(model_ft.m - model_ft.nu)...]) for t = 1:T-1],
[x_ref[t] for t = 1:T],
[zeros(model_ft.m) for t = 1:T-1],
1.0)
obj_velocity = velocity_objective([Diagonal(1.0 * ones(model_ft.nq)) for t = 1:T],
model_ft.nq, idx_angle = (3:3))
obj = MultiObjective([obj_tracking, obj_velocity])
# Constraints
include_constraints(["contact_al", "free_time"])
con_contact = contact_al_constraints(model_ft, T)
con_free_time = free_time_constraints(T)
con = multiple_constraints([con_free_time, con_contact])
# Problem
prob = trajectory_optimization_problem(model_ft,
obj,
T,
xl = xl,
xu = xu,
ul = ul,
uu = uu,
con = con)
# Trajectory initialization
x0 = deepcopy(x_ref) # linear interpolation on state
u0 = [[1.0e-5 * rand(model_ft.m - 1); h] for t = 1:T-1] # random controls
# Pack trajectories into vector
z0 = pack(x0, u0, prob)
using LinearAlgebra, ForwardDiff, SparseArrays, Optim, LineSearches
include(joinpath(pwd(),"src/solvers/augmented_lagrangian.jl"))
function f(x)
MOI.eval_objective(prob, x)
end
function g!(G, x)
MOI.eval_objective_gradient(prob, G, x)
nothing
end
function c!(c, x)
MOI.eval_constraint(prob, c, x)
c .*= -1.0
nothing
end
spar = sparsity_jacobian(prob)
global jac = zeros(length(spar))
global ii = [s[1] for s in spar]
global jj = [s[2] for s in spar]
function d!(D, x)
MOI.eval_constraint_jacobian(prob, jac, x)
D .= sparse(ii, jj, -1.0 .* jac)
end
function f_al(x, al::AugmentedLagrangian)
# evaluate constraints
c!(al.c, x)
bounds!(al, x)
active_set_update!(al)
# compute objective
J = f(x)
# add augmented Lagrangian terms
J += al.λ' * al.c + 0.5 * sum(al.as .* al.ρ .* (al.c.^2.0))
J += al.λl' * al.cl + 0.5 * sum(al.asl .* al.ρl .* (al.cl.^2.0))
J += al.λu' * al.cu + 0.5 * sum(al.asu .* al.ρu .* (al.cu.^2.0))
end
function g_al!(G, x, al::AugmentedLagrangian)
# compute objective gradient
g!(G, x)
# evaluate constraints
# ForwardDiff.jacobian!(al.∇c, c!, al.c, x)
c!(al.c, x)
d!(al.∇c, x)
bounds!(al, x)
active_set_update!(al)
# add augmented Lagrangian gradient terms
G .+= al.∇c' * (al.λ + al.as .* al.ρ .* al.c)
G[al.idx_l] -= (al.λl + al.asl .* al.ρl .* al.cl)
G[al.idx_u] += (al.λu + al.asu .* al.ρu .* al.cu)
return nothing
end
function solve(x, al; alg = :LBFGS, max_iter = 5, c_tol = 1.0e-3)
println("*augmented Lagrangian solve*")
# reset augmented Lagrangian
reset!(al)
println(" solving...")
for i = 1:max_iter
# update augmented Lagrangian methods
_f(z) = f_al(z, al)
_g!(G, z) = g_al!(G, z, al)
# solve
sol = Optim.optimize(_f, _g!, x, @eval $alg()) # linesearch = LineSearches.BackTracking()
# evaluate constraints
x = sol.minimizer
c_max = constraint_violation(al, x)
println(" iter: $i")
println(" c_max: $c_max")
# check for convergence -> update augmented Lagrangian
if c_max < c_tol
println("solve: success")
return x, sol
else
if i >= max_iter
println("solve: failed")
return x, sol
end
c!(al.c, x)
bounds!(al, x)
update!(al)
end
end
end
n = prob.num_var
m = prob.num_con
xl, xu = prob.primal_bounds
cl, cu = prob.constraint_bounds
idx_ineq = (1:m)[cu .> cl]
sum(isfinite.(xl))
sum(isfinite.(xu))
sum(cu .> cl)
al = augmented_lagrangian(n, m,
xl = xl, xu = xu, ρ0 = 1.0, s = 10.0,
idx_ineq = idx_ineq)
f_al(z0, al)
_c0 = zero(z0)
g_al!(_c0, z0, al)
_c0
@time x_sol_al, sol , info = solve(copy(z0), al,
alg = :BFGS, max_iter = 2, c_tol = 1.0e-2)
# Visualize
using Plots
x̄, ū = unpack(x_sol_al, prob)
plot(hcat(x̄...)', width = 2.0)
plot(hcat(ū...)[1:model.nu, :]', width = 2.0, linetype = :steppost)
s̄ = [ū[t][model.idx_s] for t = 1:T-1]
plot(hcat(s̄...)', width = 2.0, linetype = :steppost)
# using Plots
tf, t, h = get_time(ū)
plot(t[1:end-1], hcat(ū...)[1:2,:]', linetype=:steppost,
xlabel="time (s)", ylabel = "control",
label = ["angle" "length"],
width = 2.0, legend = :top)
plot(t[1:end-1], h, linetype=:steppost)
#
include(joinpath(pwd(), "models/visualize.jl"))
vis = Visualizer()
open(vis)
visualize!(vis, model_ft, state_to_configuration(x̄), Δt = h[1])
|
{"hexsha": "f5c7ed84c34606d25bf3101fe6020915cc7a74a4", "size": 7329, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/development/hopper_flip_al.jl", "max_stars_repo_name": "jmichaux/motion_planning", "max_stars_repo_head_hexsha": "9a36f394261ff11ca8325d8a5e9d8a79f18b2744", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2021-02-07T10:46:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T05:30:20.000Z", "max_issues_repo_path": "examples/development/hopper_flip_al.jl", "max_issues_repo_name": "jmichaux/motion_planning", "max_issues_repo_head_hexsha": "9a36f394261ff11ca8325d8a5e9d8a79f18b2744", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-10-07T05:36:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-11T17:16:28.000Z", "max_forks_repo_path": "examples/development/hopper_flip_al.jl", "max_forks_repo_name": "thowell/motion_planning", "max_forks_repo_head_hexsha": "d42d80e705c1e64e45f5872917b96c6a980398cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2021-01-25T19:23:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T06:43:01.000Z", "avg_line_length": 24.9285714286, "max_line_length": 97, "alphanum_fraction": 0.5827534452, "num_tokens": 2759}
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CLI Backend for the Analyzer Part of the Debugger.
The analyzer performs post hoc analysis of dumped intermediate tensors and
graph structure information from debugged Session.run() calls.
The other part of the debugger is the stepper (c.f. stepper_cli.py).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import re
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug import debug_data
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
# String constants for the depth-dependent hanging indent at the beginning
# of each line.
HANG_UNFINISHED = "| " # Used for unfinished recursion depths.
HANG_FINISHED = " "
HANG_SUFFIX = "|- "
# String constant for displaying depth and op type.
DEPTH_TEMPLATE = "(%d) "
OP_TYPE_TEMPLATE = "[%s] "
# String constants for control inputs/outputs, etc.
CTRL_LABEL = "(Ctrl) "
ELLIPSIS = "..."
class DebugAnalyzer(object):
"""Analyzer for debug data from dump directories."""
def __init__(self, debug_dump):
"""DebugAnalyzer constructor.
Args:
debug_dump: A DebugDumpDir object.
"""
self._debug_dump = debug_dump
# Initialize tensor filters state.
self._tensor_filters = {}
# Argument parsers for command handlers.
self._arg_parsers = {}
# Default threshold number of elements above which ellipses will be used
# when printing the value of the tensor.
self.default_ndarray_display_threshold = 2000
# Parser for list_tensors.
ap = argparse.ArgumentParser(
description="List dumped intermediate tensors.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-f",
"--tensor_filter",
dest="tensor_filter",
type=str,
default="",
help="List only Tensors passing the filter of the specified name")
ap.add_argument(
"-n",
"--node_name_filter",
dest="node_name_filter",
type=str,
default="",
help="filter node name by regex.")
ap.add_argument(
"-t",
"--op_type_filter",
dest="op_type_filter",
type=str,
default="",
help="filter op type by regex.")
self._arg_parsers["list_tensors"] = ap
# Parser for node_info.
ap = argparse.ArgumentParser(
description="Show information about a node.", usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an associated tensor, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-a",
"--attributes",
dest="attributes",
action="store_true",
help="Also list attributes of the node.")
ap.add_argument(
"-d",
"--dumps",
dest="dumps",
action="store_true",
help="Also list dumps available from the node.")
self._arg_parsers["node_info"] = ap
# Parser for list_inputs.
ap = argparse.ArgumentParser(
description="Show inputs to a node.", usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an output tensor from the node, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-c", "--control", action="store_true", help="Include control inputs.")
ap.add_argument(
"-d",
"--depth",
dest="depth",
type=int,
default=20,
help="Maximum depth of recursion used when showing the input tree.")
ap.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="Show inputs to the node recursively, i.e., the input tree.")
ap.add_argument(
"-t",
"--op_type",
action="store_true",
help="Show op types of input nodes.")
self._arg_parsers["list_inputs"] = ap
# Parser for list_outputs.
ap = argparse.ArgumentParser(
description="Show the nodes that receive the outputs of given node.",
usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an output tensor from the node, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-c", "--control", action="store_true", help="Include control inputs.")
ap.add_argument(
"-d",
"--depth",
dest="depth",
type=int,
default=20,
help="Maximum depth of recursion used when showing the output tree.")
ap.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="Show recipients of the node recursively, i.e., the output "
"tree.")
ap.add_argument(
"-t",
"--op_type",
action="store_true",
help="Show op types of recipient nodes.")
self._arg_parsers["list_outputs"] = ap
# Parser for print_tensor.
ap = argparse.ArgumentParser(
description="Print the value of a dumped tensor.",
usage=argparse.SUPPRESS)
ap.add_argument(
"tensor_name",
type=str,
help="Name of the tensor, followed by any slicing indices, "
"e.g., hidden1/Wx_plus_b/MatMul:0, "
"hidden1/Wx_plus_b/MatMul:0[1, :]")
ap.add_argument(
"-n",
"--number",
dest="number",
type=int,
default=-1,
help="0-based dump number for the specified tensor. "
"Required for tensor with multiple dumps.")
ap.add_argument(
"-r",
"--ranges",
dest="ranges",
type=str,
default="",
help="Numerical ranges to highlight tensor elements in. "
"Examples: -r 0,1e-8, -r [-0.1,0.1], "
"-r \"[[-inf, -0.1], [0.1, inf]]\"")
ap.add_argument(
"-a",
"--all",
dest="print_all",
action="store_true",
help="Print the tensor in its entirety, i.e., do not use ellipses.")
self._arg_parsers["print_tensor"] = ap
# TODO(cais): Implement list_nodes.
def _error(self, msg):
full_msg = "ERROR: " + msg
return debugger_cli_common.RichTextLines(
[full_msg], font_attr_segs={0: [(0, len(full_msg), "red")]})
def add_tensor_filter(self, filter_name, filter_callable):
"""Add a tensor filter.
A tensor filter is a named callable of the siganture:
filter_callable(dump_datum, tensor),
wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying
metadata about the dumped tensor, including tensor name, timestamps, etc.
tensor is the value of the dumped tensor as an numpy.ndarray object.
The return value of the function is a bool.
This is the same signature as the input argument to
debug_data.DebugDumpDir.find().
Args:
filter_name: (str) name of the filter. Cannot be empty.
filter_callable: (callable) a filter function of the signature described
as above.
Raises:
ValueError: If filter_name is an empty str.
TypeError: If filter_name is not a str.
Or if filter_callable is not callable.
"""
if not isinstance(filter_name, str):
raise TypeError("Input argument filter_name is expected to be str, "
"but is not.")
# Check that filter_name is not an empty str.
if not filter_name:
raise ValueError("Input argument filter_name cannot be empty.")
# Check that filter_callable is callable.
if not callable(filter_callable):
raise TypeError(
"Input argument filter_callable is expected to be callable, "
"but is not.")
self._tensor_filters[filter_name] = filter_callable
def get_tensor_filter(self, filter_name):
"""Retrieve filter function by name.
Args:
filter_name: Name of the filter set during add_tensor_filter() call.
Returns:
The callable associated with the filter name.
Raises:
ValueError: If there is no tensor filter of the specified filter name.
"""
if filter_name not in self._tensor_filters:
raise ValueError("There is no tensor filter named \"%s\"" % filter_name)
return self._tensor_filters[filter_name]
def get_help(self, handler_name):
return self._arg_parsers[handler_name].format_help()
def list_tensors(self, args, screen_info=None):
"""Command handler for list_tensors.
List tensors dumped during debugged Session.run() call.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# TODO(cais): Add annotations of substrings for dumped tensor names, to
# facilitate on-screen highlighting/selection of node names.
_ = screen_info
parsed = self._arg_parsers["list_tensors"].parse_args(args)
output = []
filter_strs = []
if parsed.op_type_filter:
op_type_regex = re.compile(parsed.op_type_filter)
filter_strs.append("Op type regex filter: \"%s\"" % parsed.op_type_filter)
else:
op_type_regex = None
if parsed.node_name_filter:
node_name_regex = re.compile(parsed.node_name_filter)
filter_strs.append("Node name regex filter: \"%s\"" %
parsed.node_name_filter)
else:
node_name_regex = None
if parsed.tensor_filter:
try:
filter_callable = self.get_tensor_filter(parsed.tensor_filter)
except ValueError:
return self._error(
"There is no tensor filter named \"%s\"." % parsed.tensor_filter)
data_to_show = self._debug_dump.find(filter_callable)
else:
data_to_show = self._debug_dump.dumped_tensor_data
# TODO(cais): Implement filter by lambda on tensor value.
dump_count = 0
for dump in data_to_show:
if node_name_regex and not node_name_regex.match(dump.node_name):
continue
if op_type_regex:
op_type = self._debug_dump.node_op_type(dump.node_name)
if not op_type_regex.match(op_type):
continue
rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0
output.append("[%.3f ms] %s:%d" % (rel_time, dump.node_name,
dump.output_slot))
dump_count += 1
output.insert(0, "")
output = filter_strs + output
if parsed.tensor_filter:
output.insert(0, "%d dumped tensor(s) passing filter \"%s\":" %
(dump_count, parsed.tensor_filter))
else:
output.insert(0, "%d dumped tensor(s):" % dump_count)
return debugger_cli_common.RichTextLines(output)
def node_info(self, args, screen_info=None):
"""Command handler for node_info.
Query information about a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# TODO(cais): Add annotation of substrings for node names, to facilitate
# on-screen highlighting/selection of node names.
_ = screen_info
parsed = self._arg_parsers["node_info"].parse_args(args)
# Get a node name, regardless of whether the input is a node name (without
# output slot attached) or a tensor name (with output slot attached).
node_name, unused_slot = debug_data.parse_node_or_tensor_name(
parsed.node_name)
if not self._debug_dump.node_exists(node_name):
return self._error(
"There is no node named \"%s\" in the partition graphs" % node_name)
# TODO(cais): Provide UI glossary feature to explain to users what the
# term "partition graph" means and how it is related to TF graph objects
# in Python. The information can be along the line of:
# "A tensorflow graph defined in Python is stripped of unused ops
# according to the feeds and fetches and divided into a number of
# partition graphs that may be distributed among multiple devices and
# hosts. The partition graphs are what's actually executed by the C++
# runtime during a run() call."
lines = ["Node %s" % node_name]
lines.append("")
lines.append(" Op: %s" % self._debug_dump.node_op_type(node_name))
lines.append(" Device: %s" % self._debug_dump.node_device(node_name))
# List node inputs (non-control and control).
inputs = self._debug_dump.node_inputs(node_name)
ctrl_inputs = self._debug_dump.node_inputs(node_name, is_control=True)
input_lines = self._format_neighbors("input", inputs, ctrl_inputs)
lines.extend(input_lines)
# List node output recipients (non-control and control).
recs = self._debug_dump.node_recipients(node_name)
ctrl_recs = self._debug_dump.node_recipients(node_name, is_control=True)
rec_lines = self._format_neighbors("recipient", recs, ctrl_recs)
lines.extend(rec_lines)
# Optional: List attributes of the node.
if parsed.attributes:
lines.extend(self._list_node_attributes(node_name))
# Optional: List dumps available from the node.
if parsed.dumps:
lines.extend(self._list_node_dumps(node_name))
return debugger_cli_common.RichTextLines(lines)
def list_inputs(self, args, screen_info=None):
"""Command handler for inputs.
Show inputs to a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# Screen info not currently used by this handler. Include this line to
# mute pylint.
_ = screen_info
# TODO(cais): Use screen info to format the output lines more prettily,
# e.g., hanging indent of long node names.
parsed = self._arg_parsers["list_inputs"].parse_args(args)
return self._list_inputs_or_outputs(
parsed.recursive,
parsed.node_name,
parsed.depth,
parsed.control,
parsed.op_type,
do_outputs=False)
def print_tensor(self, args, screen_info=None):
"""Command handler for print_tensor.
Print value of a given dumped tensor.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
parsed = self._arg_parsers["print_tensor"].parse_args(args)
if screen_info and "cols" in screen_info:
np_printoptions = {"linewidth": screen_info["cols"]}
else:
np_printoptions = {}
# Determine if any range-highlighting is required.
highlight_options = self._parse_ranges_highlight(parsed.ranges)
# Determine if there parsed.tensor_name contains any indexing (slicing).
if parsed.tensor_name.count("[") == 1 and parsed.tensor_name.endswith("]"):
tensor_name = parsed.tensor_name[:parsed.tensor_name.index("[")]
tensor_slicing = parsed.tensor_name[parsed.tensor_name.index("["):]
else:
tensor_name = parsed.tensor_name
tensor_slicing = ""
node_name, output_slot = debug_data.parse_node_or_tensor_name(tensor_name)
if output_slot is None:
return self._error("\"%s\" is not a valid tensor name" %
parsed.tensor_name)
if (self._debug_dump.loaded_partition_graphs and
not self._debug_dump.node_exists(node_name)):
return self._error(
"Node \"%s\" does not exist in partition graphs" % node_name)
watch_keys = self._debug_dump.debug_watch_keys(node_name)
# Find debug dump data that match the tensor name (node name + output
# slot).
matching_data = []
for watch_key in watch_keys:
debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
for datum in debug_tensor_data:
if datum.output_slot == output_slot:
matching_data.append(datum)
if not matching_data:
# No dump for this tensor.
return self._error(
"Tensor \"%s\" did not generate any dumps." % parsed.tensor_name)
elif len(matching_data) == 1:
# There is only one dump for this tensor.
if parsed.number <= 0:
return self._format_tensor(
matching_data[0].get_tensor(),
matching_data[0].watch_key,
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=highlight_options)
else:
return self._error(
"Invalid number (%d) for tensor %s, which generated one dump." %
(parsed.number, parsed.tensor_name))
else:
# There are more than one dumps for this tensor.
if parsed.number < 0:
lines = [
"Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name,
len(matching_data))
]
for i, datum in enumerate(matching_data):
rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key))
lines.append("")
lines.append(
"Use the -n (--number) flag to specify which dump to print.")
lines.append("For example:")
lines.append(" print_tensor %s -n 0" % parsed.tensor_name)
return debugger_cli_common.RichTextLines(lines)
elif parsed.number >= len(matching_data):
return self._error(
"Specified number (%d) exceeds the number of available dumps "
"(%d) for tensor %s" %
(parsed.number, len(matching_data), parsed.tensor_name))
else:
return self._format_tensor(
matching_data[parsed.number].get_tensor(),
matching_data[parsed.number].watch_key + " (dump #%d)" %
parsed.number,
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=highlight_options)
def _parse_ranges_highlight(self, ranges_string):
"""Process ranges highlight string.
Args:
ranges_string: (str) A string representing a numerical range of a list of
numerical ranges. See the help info of the -r flag of the print_tensor
command for more details.
Returns:
An instance of tensor_format.HighlightOptions, if range_string is a valid
representation of a range or a list of ranges.
"""
ranges = None
def ranges_filter(x):
r = np.zeros(x.shape, dtype=bool)
for rng_start, rng_end in ranges:
r = np.logical_or(r, np.logical_and(x >= rng_start, x <= rng_end))
return r
if ranges_string:
ranges = command_parser.parse_ranges(ranges_string)
return tensor_format.HighlightOptions(
ranges_filter, description=ranges_string)
else:
return None
def _format_tensor(self,
tensor,
watch_key,
np_printoptions,
print_all=False,
tensor_slicing=None,
highlight_options=None):
"""Generate formatted str to represent a tensor or its slices.
Args:
tensor: (numpy ndarray) The tensor value.
watch_key: (str) Tensor debug watch key.
np_printoptions: (dict) Numpy tensor formatting options.
print_all: (bool) Whether the tensor is to be displayed in its entirety,
instead of printing ellipses, even if its number of elements exceeds
the default numpy display threshold.
(Note: Even if this is set to true, the screen output can still be cut
off by the UI frontend if it consist of more lines than the frontend
can handle.)
tensor_slicing: (str or None) Slicing of the tensor, e.g., "[:, 1]". If
None, no slicing will be performed on the tensor.
highlight_options: (tensor_format.HighlightOptions) options to highlight
elements of the tensor. See the doc of tensor_format.format_tensor()
for more details.
Returns:
(str) Formatted str representing the (potentially sliced) tensor.
Raises:
ValueError: If tehsor_slicing is not a valid numpy ndarray slicing str.
"""
if tensor_slicing:
# Validate the indexing.
if not command_parser.validate_slicing_string(tensor_slicing):
raise ValueError("Invalid tensor-slicing string.")
value = eval("tensor" + tensor_slicing) # pylint: disable=eval-used
sliced_name = watch_key + tensor_slicing
else:
value = tensor
sliced_name = watch_key
if print_all:
np_printoptions["threshold"] = value.size
else:
np_printoptions["threshold"] = self.default_ndarray_display_threshold
return tensor_format.format_tensor(
value,
sliced_name,
include_metadata=True,
np_printoptions=np_printoptions,
highlight_options=highlight_options)
def list_outputs(self, args, screen_info=None):
"""Command handler for inputs.
Show inputs to a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# Screen info not currently used by this handler. Include this line to
# mute pylint.
_ = screen_info
# TODO(cais): Use screen info to format the output lines more prettily,
# e.g., hanging indent of long node names.
parsed = self._arg_parsers["list_outputs"].parse_args(args)
return self._list_inputs_or_outputs(
parsed.recursive,
parsed.node_name,
parsed.depth,
parsed.control,
parsed.op_type,
do_outputs=True)
def _list_inputs_or_outputs(self,
recursive,
node_name,
depth,
control,
op_type,
do_outputs=False):
"""Helper function used by list_inputs and list_outputs.
Format a list of lines to display the inputs or output recipients of a
given node.
Args:
recursive: Whether the listing is to be done recursively, as a boolean.
node_name: The name of the node in question, as a str.
depth: Maximum recursion depth, applies only if recursive == True, as an
int.
control: Whether control inputs or control recipients are included, as a
boolean.
op_type: Whether the op types of the nodes are to be included, as a
boolean.
do_outputs: Whether recipients, instead of input nodes are to be
listed, as a boolean.
Returns:
Input or recipient tree formatted as a RichTextLines object.
"""
if do_outputs:
tracker = self._debug_dump.node_recipients
type_str = "Recipients of"
short_type_str = "recipients"
else:
tracker = self._debug_dump.node_inputs
type_str = "Inputs to"
short_type_str = "inputs"
lines = []
# Check if this is a tensor name, instead of a node name.
node_name, _ = debug_data.parse_node_or_tensor_name(node_name)
# Check if node exists.
if not self._debug_dump.node_exists(node_name):
return self._error(
"There is no node named \"%s\" in the partition graphs" % node_name)
if recursive:
max_depth = depth
else:
max_depth = 1
if control:
include_ctrls_str = ", control %s included" % short_type_str
else:
include_ctrls_str = ""
lines.append("%s node \"%s\" (Depth limit = %d%s):" %
(type_str, node_name, max_depth, include_ctrls_str))
self._dfs_from_node(lines, node_name, tracker, max_depth, 1, [], control,
op_type)
# Include legend.
lines.append("")
lines.append("Legend:")
lines.append(" (d): recursion depth = d.")
if control:
lines.append(" (Ctrl): Control input.")
if op_type:
lines.append(" [Op]: Input node has op type Op.")
# TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes.
return debugger_cli_common.RichTextLines(lines)
def _dfs_from_node(self,
lines,
node_name,
tracker,
max_depth,
depth,
unfinished,
include_control=False,
show_op_type=False):
"""Perform depth-first search (DFS) traversal of a node's input tree.
Args:
lines: Text lines to append to, as a list of str.
node_name: Name of the node, as a str. This arg is updated during the
recursion.
tracker: A callable that takes one str as the node name input and
returns a list of str as the inputs/outputs.
This makes it this function general enough to be used with both
node-input and node-output tracking.
max_depth: Maximum recursion depth, as an int.
depth: Current recursion depth. This arg is updated during the
recursion.
unfinished: A stack of unfinished recursion depths, as a list of int.
include_control: Whether control dependencies are to be included as
inputs (and marked as such).
show_op_type: Whether op type of the input nodes are to be displayed
alongside the the nodes' names.
"""
# Make a shallow copy of the list because it may be extended later.
all_inputs = copy.copy(tracker(node_name, is_control=False))
is_ctrl = [False] * len(all_inputs)
if include_control:
# Sort control inputs or recipients in in alphabetical order of the node
# names.
ctrl_inputs = sorted(tracker(node_name, is_control=True))
all_inputs.extend(ctrl_inputs)
is_ctrl.extend([True] * len(ctrl_inputs))
if not all_inputs:
if depth == 1:
lines.append(" [None]")
return
unfinished.append(depth)
# Create depth-dependent hanging indent for the line.
hang = ""
for k in xrange(depth):
if k < depth - 1:
if k + 1 in unfinished:
hang += HANG_UNFINISHED
else:
hang += HANG_FINISHED
else:
hang += HANG_SUFFIX
if all_inputs and depth > max_depth:
lines.append(hang + ELLIPSIS)
unfinished.pop()
return
hang += DEPTH_TEMPLATE % depth
for i in xrange(len(all_inputs)):
inp = all_inputs[i]
if is_ctrl[i]:
ctrl_str = CTRL_LABEL
else:
ctrl_str = ""
op_type_str = ""
if show_op_type:
op_type_str = OP_TYPE_TEMPLATE % self._debug_dump.node_op_type(inp)
if i == len(all_inputs) - 1:
unfinished.pop()
lines.append(hang + ctrl_str + op_type_str + inp)
# Recursive call.
# The input's/output's name can be a tensor name, in the case of node
# with >1 output slots.
inp_node_name, _ = debug_data.parse_node_or_tensor_name(inp)
self._dfs_from_node(
lines,
inp_node_name,
tracker,
max_depth,
depth + 1,
unfinished,
include_control=include_control,
show_op_type=show_op_type)
def _format_neighbors(self, neighbor_type, non_ctrls, ctrls):
"""List neighbors (inputs or recipients) of a node.
Args:
neighbor_type: ("input" | "recipient")
non_ctrls: Non-control neighbor node names, as a list of str.
ctrls: Control neighbor node names, as a list of str.
Returns:
A list of text lines, as a list of str.
"""
# TODO(cais): Return RichTextLines instead, to allow annotation of node
# names.
lines = []
lines.append("")
lines.append(" %d %s(s) + %d control %s(s):" %
(len(non_ctrls), neighbor_type, len(ctrls), neighbor_type))
lines.append(" %d %s(s):" % (len(non_ctrls), neighbor_type))
for non_ctrl in non_ctrls:
lines.append(" [%s] %s" %
(self._debug_dump.node_op_type(non_ctrl), non_ctrl))
if ctrls:
lines.append("")
lines.append(" %d control %s(s):" % (len(ctrls), neighbor_type))
for ctrl in ctrls:
lines.append(" [%s] %s" %
(self._debug_dump.node_op_type(ctrl), ctrl))
return lines
def _list_node_attributes(self, node_name):
"""List neighbors (inputs or recipients) of a node.
Args:
node_name: Name of the node of which the attributes are to be listed.
Returns:
A list of text lines, as a list of str.
"""
lines = []
lines.append("")
lines.append("Node attributes:")
attrs = self._debug_dump.node_attributes(node_name)
for attr_key in attrs:
lines.append(" %s:" % attr_key)
attr_val_str = repr(attrs[attr_key]).strip().replace("\n", " ")
lines.append(" %s" % attr_val_str)
lines.append("")
return lines
def _list_node_dumps(self, node_name):
"""List dumped tensor data from a node.
Args:
node_name: Name of the node of which the attributes are to be listed.
Returns:
A list of text lines, as a list of str.
"""
lines = []
lines.append("")
watch_keys = self._debug_dump.debug_watch_keys(node_name)
dump_count = 0
for watch_key in watch_keys:
debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
for datum in debug_tensor_data:
dump_count += 1
lines.append(" Slot %d @ %s @ %.3f ms" %
(datum.output_slot, datum.debug_op,
(datum.timestamp - self._debug_dump.t0) / 1000.0))
lines.insert(1, "%d dumped tensor(s):" % dump_count)
return lines
|
{"hexsha": "e00f8c810ed28f442f17a44888c59b7bb723866c", "size": 31037, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/python/debug/cli/analyzer_cli.py", "max_stars_repo_name": "RMORIOKA/tensorflow", "max_stars_repo_head_hexsha": "6886eb9c73940fd3b4dfadc3d6964ae9aa71eef6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 101, "max_stars_repo_stars_event_min_datetime": "2016-12-03T11:40:52.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-23T02:02:03.000Z", "max_issues_repo_path": "tensorflow/python/debug/cli/analyzer_cli.py", "max_issues_repo_name": "RMORIOKA/tensorflow", "max_issues_repo_head_hexsha": "6886eb9c73940fd3b4dfadc3d6964ae9aa71eef6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2016-12-14T03:27:46.000Z", "max_issues_repo_issues_event_max_datetime": "2017-09-13T02:29:07.000Z", "max_forks_repo_path": "tensorflow/python/debug/cli/analyzer_cli.py", "max_forks_repo_name": "RMORIOKA/tensorflow", "max_forks_repo_head_hexsha": "6886eb9c73940fd3b4dfadc3d6964ae9aa71eef6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 47, "max_forks_repo_forks_event_min_datetime": "2016-12-04T12:37:24.000Z", "max_forks_repo_forks_event_max_datetime": "2018-01-14T18:13:07.000Z", "avg_line_length": 32.9479830149, "max_line_length": 80, "alphanum_fraction": 0.6402036279, "include": true, "reason": "import numpy", "num_tokens": 6989}
|
""" OSC server ofr BITalino R-IoT
"""
import argparse
import math
from tornado import websocket, web, ioloop
import _thread as thread
import asyncio
import websockets
import json
import signal
import numpy
import sys, traceback, os, time, platform
import subprocess
#from os.path import expanduser
from pythonosc import dispatcher
from pythonosc import osc_server
import netifaces
##MIDO
import mido
import copy
def start_mido():
global current_note
global kcounter
global mido_port
global port_is_open
global note_switch
global D_note
global A_note
D_note = [0, 0, 0]
A_note = ["", "", ""]
note_switch = False
print("start mido")
current_note = 72
kcounter = 0
outports = mido.get_output_names()
print("MIDO ports:", outports)
mido_port = mido.open_output('loopMIDI Port 1')
port_is_open = True
def stop_mido(port_is_open):
global mido_port
global D_note
if port_is_open:
for i in range(len(D_note)):
mido_port.send(mido.Message('note_off', note = D_note[i], velocity = 80, time = 0))
mido_port.close()
net_interface_type = "en0"
riot_ip = '192.168.1.100'
riot_ssid = 'riot'
class Utils:
OS = None
device_data = [""] # 1 json string for each device
device_ids = [0]
num_devices = len(device_ids)
osc_server_started = False
ut = Utils()
def printJSON(decoded_json_input):
try:
# pretty printing of json-formatted string
print (json.dumps(decoded_json_input, sort_keys=True , indent=4))
except (ValueError, KeyError, TypeError):
print ("JSON format error")
def tostring(data):
"""
:param data: object to be converted into a JSON-compatible `str`
:type data: any
:return: JSON-compatible `str` version of `data`
Converts `data` from its native data type to a JSON-compatible `str`.
"""
dtype = type(data).__name__
if dtype == 'ndarray':
if numpy.shape(data) != ():
data = data.tolist() # data=list(data)
else:
data = '"' + data.tostring() + '"'
elif dtype == 'dict' or dtype == 'tuple':
try:
data = json.dumps(data)
except:
pass
elif dtype == 'NoneType':
data = ''
elif dtype == 'str' or dtype == 'unicode':
data = json.dumps(data)
return str(data)
def new_device(n):
print ("new device connected!")
ut.device_ids.append(n)
ut.device_data.append("") #assign empty string to each device
def print_riot_data(unused_addr, *values):
d_id = (int(unused_addr[1]))
if d_id not in ut.device_ids:
new_device(d_id)
print("OSC Message %s from device %s" % (unused_addr, unused_addr[:2]))
print(values)
def assign_riot_data(unused_addr, *values):
d_id = (int(unused_addr[1]))
if d_id not in ut.device_ids: new_device(d_id)
channels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
labels = ["ACC_X", "ACC_Y", "ACC_Z", "GYRO_X", "GYRO_Y", "GYRO_Z", "MAG_X", "MAG_Y", "MAG_Z",
"TEMP", "IO", "A1", "A2", "C", "Q1", "Q2", "Q3", "Q4", "PITCH", "YAW", "ROLL", "HEAD"]
ch_mask = numpy.array(channels) - 1
try:
cols = numpy.arange(len(ch_mask))
res = "{"
for i in cols:
res += '"' + labels[i] + '":' + str(values[i]) + ','
res = res[:-1] + "}"
#if len(cl) > 0: cl[-1].write_message(res)
ut.device_data[d_id] = res
process_data(res)
except:
traceback.print_exc()
os._exit(0)
def assign_bitalino_data(unused_addr, *values):
d_id = (int(unused_addr[1]))
if d_id not in ut.device_ids: new_device(d_id)
channels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
labels = ["nSeq", "I1", "I2", "O1", "O2","A1","A2","A3","A4","A5","A6"]
ch_mask = numpy.array(channels) - 1
try:
cols = numpy.arange(len(ch_mask))
res = "{"
for i in cols:
res += '"' + labels[i] + '":' + str(values[i]) + ','
res = res[:-1] + "}"
#if len(cl) > 0: cl[-1].write_message(res)
ut.device_data[d_id] = res
except:
traceback.print_exc()
os._exit(0)
def process_data(data):
#TODO
data = json.loads(data)
# print(data["ACC_X"], data["ACC_Y"], data["ACC_Z"])
global kcounter
global note_switch
global D_note
global A_note
D_note_update = copy.deepcopy(D_note)
kcounter = kcounter + 1
# print(data["PITCH"], data["YAW"], data["ROLL"])
#On-off
if kcounter%20==0:
if not note_switch:
# mido_port.send(mido.Message('note_on', note = 80, velocity = 80, time = 0))
pass
if note_switch:
# mido_port.send(mido.Message('note_off', note = 80, velocity = 80, time = 0))
pass
note_switch = not note_switch
#Assign note C, G, E according to R-IoT PITCH val
D = [0,0,0]
D[0] = data["PITCH"]/180
D[1] = data["YAW"]/180
D[2] = data["ROLL"]/180
for i in range(len(D)):
if D[i] < -0.33:
D_note_update[i] = 60
elif D[i] > 0.33:
D_note_update[i] = 67
elif D[i] < 0.33:
D_note_update[i] = 64
D_note_update[2] = D_note_update[2]+12
D_note_update[1] = D_note_update[1]-12
for i in range(len(D)):
if D_note_update[i] != D_note[i]:
mido_port.send(mido.Message('note_off', note = D_note[i], velocity = 80, time = 0))
D_note[i] = D_note_update[i]
mido_port.send(mido.Message('note_on', note = D_note[i], velocity = 80, time = 0))
if (D_note[i]) == 60 or (D_note[i]) == 48 or (D_note[i]) == 72:
A_note[i] = "C"
elif (D_note[i]) == 64 or (D_note[i]) == 52 or (D_note[i]) == 76:
A_note[i] = "E"
elif (D_note[i]) == 67 or (D_note[i]) == 55 or (D_note[i]) == 79:
A_note[i] = "G"
# print(D_note)
print(A_note)
def riot_listener(ip, port):
# for d_id in ut.device_ids:
# recv_addr = str("/%i/raw"%d_id)
# riot_dispatcher = dispatcher.Dispatcher()
# riot_dispatcher.map(recv_addr, assign_riot_data)
riot_dispatcher = dispatcher.Dispatcher()
riot_dispatcher.map("/*/raw", assign_riot_data)
# riot_dispatcher.map("/*/bitalino", assign_bitalino_data)
# server = osc_server.ThreadingOSCUDPServer(
# (ip, port), riot_dispatcher)
server = osc_server.ThreadingOSCUDPServer(
(ip, port), riot_dispatcher)
print("Serving on {}".format(server.server_address))
ut.osc_server_started = True
server.serve_forever()
def detect_net_config(net_interface_type, OS):
if net_interface_type is not None:
net_interface_type, ssid = detect_wireless_interface([net_interface_type], OS)
while net_interface_type is None:
try:
print ("detecting wireless interface... (this can be set manually with --net)")
net_interface_type, ssid = detect_wireless_interface(OS, netifaces.interfaces())
print("Connected to wifi network: " + str(ssid))
except Exception as e:
print(e)
print ("could not retrieve ssid from %s" % net_interface_type)
print ("see available interfaces with: \n \
ifconfig -a (UNIX) \n ipconfig |findstr 'adapter' (WINDOWS)")
print ('{:^24s}'.format("====================="))
input ("please connect to a Wi-Fi network and press ENTER to continue")
return net_interface_type, ssid
def detect_wireless_interface(interface_list, OS):
det_interface = det_ssid = None
for interface in interface_list:
if ("linux" in OS or "Linux" in OS):
det_interface = os.popen('iwgetid').read()[:-1].split()[0]
det_ssid = os.popen('iwgetid -r').read()[:-1]
break
elif ("Windows" in OS):
det_interface = os.popen('netsh wlan show interfaces | findstr /r "^....Name"').read()[:-1].split()[-1]
det_ssid = os.popen('netsh wlan show interfaces | findstr /r "^....SSID"').read()[:-1].split()[-1]
break
else:
ssid = os.popen('networksetup -getairportnetwork ' + interface).read()[:-1]
print(ssid)
if '** Error: Error obtaining wireless information' not in ssid:
det_ssid = ssid[23:]
det_interface = interface
break
return det_interface, det_ssid
def detect_ipv4_address(net_interface_type, OS):
if "Windows" in OS:
ipv4_addr = os.popen('netsh interface ipv4 show config %s | findstr /r "^....IP Address"' % net_interface_type).read()[:-1].split()[-1]
print("Network interface %s address: %s" % (net_interface_type, ipv4_addr))
else:
addrs = netifaces.ifaddresses(net_interface_type)
ipv4_addr = addrs[netifaces.AF_INET][0]['addr']
print("Network interface %s address: %s" % (net_interface_type, ipv4_addr))
return ipv4_addr
def reconfigure_ipv4_address(riot_ip, ipv4_addr, OS):
if riot_ip not in ipv4_addr:
print ("The computer's IPv4 address must be changed to match")
if "Windows" in OS:
cmd = "netsh interface ip set address %s static %s 255.255.255.0 192.168.1.1" % (net_interface_type, args.ip)
input("press ENTER to auto re-configure network settings and continue. You may need to re-open R-IoT serverBIT")
try:
proc = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
timer(3)
return
except subprocess.CalledProcessError:
input ("There was an error, please run as administrator. You can also change the ipv4 address manually (see R-IoT guide) \
\nclose window and try again")
sys.exit(1)
else:
# UNIX ifconfig command with sudo
cmd = '"sudo ifconfig %s %s netmask 255.255.255.0"' % (net_interface_type, riot_ip)
if "Linix" in OS:
# to run command from terminal, will promt for sudo password
print(">>> paste the following command: ")
print ( cmd )
input("OR press ENTER to auto re-configure network settings and continue. You may need to re-open R-IoT serverBIT")
else:
# request OSX root privilege with GUI promt
cmd = "osascript -e 'do shell script %s with prompt %s with administrator privileges'" % (cmd, '"ServerBIT requires root access."')
try:
# wait unitl command has run before continuing
proc = subprocess.check_output(cmd, shell=True)
timer(3)
return
except subprocess.CalledProcessError:
print(cmd)
input ("There was an error running this command. You can also change the ipv4 address manually (see R-IoT guide) \
\nclose window and try again")
sys.exit(1)
def update_progress(count, total, status=''):
bar_len = 20
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
# As suggested by Rom Ruben (see: http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113#comment50529068_27871113)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush()
def timer(t, rate = 0.25, text=''):
tt=round((t+rate)/rate)
for i in range(tt):
update_progress(i, round(t/rate), text)
time.sleep(rate)
print("\n")
def reset():
time.sleep(0.5)
os.execl(sys.executable, os.path.abspath(_file_), *sys.argv)
async def webApp(ws, path):
# device_id = ws.port - 9001
print('LISTENING')
# print (ut.device_data[device_id])
# ws_path_id = path[-1]
while ut.device_data[0] != "":
await ws.send(ut.device_data[0])
await asyncio.sleep(0.1)
# for device_id in ut.device_ids:
# if True:
# # if ws_path_id.isdigit() and int(ws_path_id) == device_id:
# try:
# print ("streaming data from device %i to ws://%s:%i%s" % (device_id, ws.host, ws.port, path))
# while ut.device_data[device_id] != "":
# await ws.send(ut.device_data[device_id])
# await asyncio.sleep(0.1)
# except:
# pass
if __name__ == "__main__":
# -0- initialize mido
start_mido()
# -1- parse arguemnts
OS = platform.system()
parser = argparse.ArgumentParser()
parser.add_argument("--id",
type=int, default=0, help="This dictates the OSC reveive address: /<id>/raw")
parser.add_argument("--ip",
default=riot_ip, help="The ip to listen on (usually set to 192.168.1.00)")
parser.add_argument("--port",
type=int, default=8888, help="The port to listen on")
parser.add_argument("--ssid",
default=riot_ssid, help="name of the wifi network which R-IoT device is streaming data to")
parser.add_argument("--net_interface",
default=None, help="name of the wireless interface which the computer is using")
parser.add_argument("--websockets_ip",
default='127.0.0.1', help="destination ip for websocket handler")
parser.add_argument("--websockets_port",
type=int, default=9001, help="destination port for websocket handler is the port + device ID")
parser.add_argument("--find_new",
type=int, default=1, help="find new devices in network")
args = parser.parse_args()
# -2- network config
# -2.1- get network interface and ssid & assign module ip
# net_interface_type, ssid = detect_net_config(args.net_interface, OS)
# -2.2- get serverBIT host ipv4 address
# ipv4_addr = detect_ipv4_address(net_interface_type, OS)
# -2.3- check host ssid matches that assigned to the R-IoT module
# while ssid not in args.ssid:
# print ('{:^24s}'.format("====================="))
# print ("currently connected to '%s', please connect to the same network as the R-IoT (%s)" % (ssid, args.ssid))
## print ("(target ssid can be changed with --ssid 'name_of_network')")
# input("please re-open R-IoT_ServerBIT or press ENTER to retry")
# # update network properties
# net_interface_type, ssid = detect_net_config(args.net_interface, OS)
# -2.4- change host ipv4 to match the R-IoT module if required
if riot_ip not in args.ip:
print ("IP address changed from R-IoT default (%s)" % riot_ip)
# reconfigure_ipv4_address(args.ip, ipv4_addr, OS)
print ("Starting riot_serverBIT...")
timer(2)
# -3- stream device data to network
try:
thread.start_new_thread(riot_listener, (args.ip, args.port)) # one thread to listen to all devices on the same ip & port
while not ut.osc_server_started : time.sleep(0.1)
if args.find_new == 1: timer(5, text="searching for devices on this network")
while ut.device_data[0] == "" or len(ut.device_ids) == 0:
print ("no devices found")
timer(5, text="searching for devices on this network")
print ("found %i device(s)" % len(ut.device_ids))
start_server = websockets.serve(webApp, args.websockets_ip, args.websockets_port)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
except Exception as e:
print (e)
finally:
#Mido stop
stop_mido(port_is_open)
print ()
sys.exit(1)
|
{"hexsha": "83d95cc63191408d778eda62faf583aefb4761ce", "size": 15748, "ext": "py", "lang": "Python", "max_stars_repo_path": "CEG_pyr/xriot_serverBIT.py", "max_stars_repo_name": "malfarasplux/sigaloud", "max_stars_repo_head_hexsha": "cf35dd51aaa332e9938db57c6aaf19470cc7675e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CEG_pyr/xriot_serverBIT.py", "max_issues_repo_name": "malfarasplux/sigaloud", "max_issues_repo_head_hexsha": "cf35dd51aaa332e9938db57c6aaf19470cc7675e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CEG_pyr/xriot_serverBIT.py", "max_forks_repo_name": "malfarasplux/sigaloud", "max_forks_repo_head_hexsha": "cf35dd51aaa332e9938db57c6aaf19470cc7675e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2293144208, "max_line_length": 148, "alphanum_fraction": 0.6014097028, "include": true, "reason": "import numpy", "num_tokens": 4222}
|
import numpy as np
# ----------
# Functions to compute in log-domain.
# ----------
def logzero():
return -np.inf
def safe_log(x):
if x == 0:
return logzero()
return np.log(x)
def logsum_pair(logx, logy):
"""
Return log(x+y), avoiding arithmetic underflow/overflow.
logx: log(x)
logy: log(y)
Rationale:
x + y = e^logx + e^logy
= e^logx (1 + e^(logy-logx))
log(x+y) = logx + log(1 + e^(logy-logx)) (1)
Likewise,
log(x+y) = logy + log(1 + e^(logx-logy)) (2)
The computation of the exponential overflows earlier and is less precise
for big values than for small values. Due to the presence of logy-logx
(resp. logx-logy), (1) is preferred when logx > logy and (2) is preferred
otherwise.
"""
if logx == logzero():
return logy
elif logx > logy:
return logx + np.log1p(np.exp(logy-logx))
else:
return logy + np.log1p(np.exp(logx-logy))
def logsum(logv):
"""
Return log(v[0]+v[1]+...), avoiding arithmetic underflow/overflow.
"""
res = logzero()
for val in logv:
res = logsum_pair(res, val)
return res
# ----------
# This implementation is faster, but may give problems with log(0), so I
# commented it out
# def logsum(logv):
# '''
# Return log(v[0]+v[1]+...), avoiding arithmetic underflow/overflow.
# '''
# c = np.max(logv)
# return c + np.log(np.sum(np.exp(logv - c)))
# ----------
|
{"hexsha": "22519b691fc9ad1a1c452a70a1f93b516915b9ae", "size": 1474, "ext": "py", "lang": "Python", "max_stars_repo_path": "lxmls/sequences/log_domain.py", "max_stars_repo_name": "mtreviso/lxmls-toolkit", "max_stars_repo_head_hexsha": "7b135d98c8bde592649fface8e6f24f112939937", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 183, "max_stars_repo_stars_event_min_datetime": "2015-01-04T22:43:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T03:30:44.000Z", "max_issues_repo_path": "lxmls/sequences/log_domain.py", "max_issues_repo_name": "mtreviso/lxmls-toolkit", "max_issues_repo_head_hexsha": "7b135d98c8bde592649fface8e6f24f112939937", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 85, "max_issues_repo_issues_event_min_datetime": "2015-05-18T23:24:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-14T18:56:49.000Z", "max_forks_repo_path": "lxmls/sequences/log_domain.py", "max_forks_repo_name": "mtreviso/lxmls-toolkit", "max_forks_repo_head_hexsha": "7b135d98c8bde592649fface8e6f24f112939937", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 190, "max_forks_repo_forks_event_min_datetime": "2015-01-04T22:43:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-09T11:06:28.000Z", "avg_line_length": 22.3333333333, "max_line_length": 77, "alphanum_fraction": 0.5671641791, "include": true, "reason": "import numpy", "num_tokens": 436}
|
[STATEMENT]
lemma convex_same_rel_interior_closure_straddle:
fixes S :: "'n::euclidean_space set"
shows "\<lbrakk>convex S; convex T\<rbrakk>
\<Longrightarrow> rel_interior S = rel_interior T \<longleftrightarrow>
rel_interior S \<subseteq> T \<and> T \<subseteq> closure S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>convex S; convex T\<rbrakk> \<Longrightarrow> (rel_interior S = rel_interior T) = (rel_interior S \<subseteq> T \<and> T \<subseteq> closure S)
[PROOF STEP]
by (simp add: closure_eq_between convex_same_rel_interior_closure)
|
{"llama_tokens": 202, "file": null, "length": 1}
|
using LinearAlgebra
using MAT
using Plots
using Statistics
file = matread("Data2.mat");
X = file["data"];
# Visualize the first two dimension of the data
scatter(X[:,1], X[:,2], aspect_ratio=:equal, leg=false)
# input: X - data points
# output: E - distance matrix
#
function get_E(X)
n = size(X,1); # number of points
E = zeros(n,n); # the diagonal entries are 0
for ii=1:n
for jj=ii+1:n
E[ii,jj] = norm(X[ii,:] - X[jj,:]);
E[jj,ii] = E[ii,jj];
end
end
return E;
end
# get the distance matrix D
E = get_E(X);
# evaluate sigma
E_sort = sort(E, dims=2);
k = 7;
sigma_loc = E_sort[:, k+1];
# input 1: E - distance matrix
# input 2: sigma - constant
#
# output: K - kernal matrix
#
function get_K(E, sigma_loc)
n = size(E,1);
K = ones(n,n);
for ii = 1:n
for jj = ii+1:n
K[ii,jj] = exp(-E[ii,jj]^2/(sigma_loc[ii]*sigma_loc[jj]));
K[jj,ii] = K[ii,jj]
end
end
return K;
end
# get the kernal matrix K
K = get_K(E, sigma_loc);
# input: K - kernal matrix
#
# output 1: Q
# output 2: d_sq - sqrt{D}
#
function get_Q(K)
n = size(K,1);
Q = zeros(n,n);
d_sq = zeros(n);
# d_sq = sqrt{D}
for ii = 1:n
d_sq[ii] = sqrt(sum(K[ii,:]));
end
# get components of Q
for ii = 1:n
for jj = 1:n
Q[ii,jj] = K[ii,jj]/(d_sq[ii]*d_sq[jj]);
end
end
return Q, d_sq;
end
# get Q and d_sq
Q, d_sq = get_Q(K);
# input 1: Q
# input 2: d_sq - sqrt{D}
#
# output 1: v - eigenvectors
# output 2: s - eigenvalues
#
function get_eig(Q, d_sq)
n = size(Q, 1);
U,S,V = svd(Q); # U and S contains eigenvectors and eigenvalues repectively
# which is arranged in descending power.
v = zeros(n,n);
for ii = 1 : n
V[ii,:] = V[ii,:]/d_sq[ii];
end
for ii = 1 : n
v[:,ii] = V[:,ii]/norm(V[:,ii]);
end
return v, S;
end
c = 5 ; # the desired reduced dimension
v, s = get_eig(Q , d_sq);
p1 = scatter(s[1:10], label="eigenvalues 1:10");
p2 = plot(log.(s), label="eigenvalues in log");
plot(p1, p2, layout=2)
function get_Y(v, S, c)
n = size(v,1);
Y = zeros(n,c);
# get components of diffusion map Y
for ii = 1:c
Y[:,ii] = v[:,ii+1].*S[ii+1];
end
return Y ;
end
Y = get_Y(v, s, c);
# print diffution map
p1 = scatter(Y[:,1], Y[:,3], label="2D", aspect_ratio=:equal)
p2 = scatter(Y[:,1], Y[:,2], Y[:,3], label="3D", aspect_ratio=:equal)
plot(p1, p2, layout=2)
|
{"hexsha": "fc64a7c2288153876714b3cb69be5ce4991afad0", "size": 2533, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/diffuse_local.jl", "max_stars_repo_name": "teshenglin/diffusion_maps", "max_stars_repo_head_hexsha": "ee9ffff7773ebfa1953a833235e41eb13ddff9d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "julia/diffuse_local.jl", "max_issues_repo_name": "teshenglin/diffusion_maps", "max_issues_repo_head_hexsha": "ee9ffff7773ebfa1953a833235e41eb13ddff9d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "julia/diffuse_local.jl", "max_forks_repo_name": "teshenglin/diffusion_maps", "max_forks_repo_head_hexsha": "ee9ffff7773ebfa1953a833235e41eb13ddff9d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.9645390071, "max_line_length": 79, "alphanum_fraction": 0.5428345835, "num_tokens": 878}
|
#! /usr/bin/env python
# by weil
# Sep 16, 2020
import pandas as pd
import numpy as np
import Cell_BLAST as cb
import scipy
import os
import scanpy as sc
from anndata import AnnData
from utils import construct_dataset
# expr_mat
# choose to use raw read counts, not processed data
expr_mat=pd.read_csv("../download/Lukowski/ae_exp_raw_all.tsv", sep="\t")
expr_mat=expr_mat.T
# meta_df
# use cell type annotation after batch effect correction by CCA
meta_df=pd.read_csv("../download/Lukowski/retina_wong_cellbc_cellid.csv", index_col=0)
meta_df=meta_df[["cell.id.cca"]]
meta_df.columns=["cell_type1"]
# cell curation based on cell_type1
cell_mask=meta_df["cell_type1"]!="Others CCA3"
cell_use=np.intersect1d(expr_mat.index, meta_df.index[cell_mask])
meta_df=meta_df.loc[cell_use]
expr_mat=expr_mat.loc[cell_use]
# datasets meta
datasets_meta=pd.read_csv("../ACA_datasets.csv", header=0, index_col=False)
# cell ontology
cell_ontology = pd.read_csv("../cell_ontology/retina_cell_ontology.csv",
usecols=["cell_type1", "cell_ontology_class", "cell_ontology_id"])
# gene_meta
gene_meta=pd.DataFrame(index=expr_mat.columns)
construct_dataset("../data/Lukowski", expr_mat, meta_df, gene_meta,
datasets_meta=datasets_meta, cell_ontology=cell_ontology)
|
{"hexsha": "f9eaa036053edfdded4984ad53d7bd19e51f399e", "size": 1300, "ext": "py", "lang": "Python", "max_stars_repo_path": "Datasets/collect/collect_lukowski.py", "max_stars_repo_name": "gao-lab/Cell_BLAST", "max_stars_repo_head_hexsha": "45b14bbd3385b8a7be0b48ef5ab42bc946f3558f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2019-04-12T17:31:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T11:46:29.000Z", "max_issues_repo_path": "Datasets/collect/collect_lukowski.py", "max_issues_repo_name": "gao-lab/Cell_BLAST", "max_issues_repo_head_hexsha": "45b14bbd3385b8a7be0b48ef5ab42bc946f3558f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2019-08-16T21:19:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T09:13:58.000Z", "max_forks_repo_path": "Datasets/collect/collect_lukowski.py", "max_forks_repo_name": "gao-lab/Cell_BLAST", "max_forks_repo_head_hexsha": "45b14bbd3385b8a7be0b48ef5ab42bc946f3558f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-11-14T06:22:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-11T08:01:45.000Z", "avg_line_length": 30.2325581395, "max_line_length": 94, "alphanum_fraction": 0.7607692308, "include": true, "reason": "import numpy,import scipy", "num_tokens": 343}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.