text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
from copy import copy
cos=np.cos; sin=np.sin; pi=np.pi
def dh(d, theta, a, alpha):
"""
Calcular la matriz de transformacion homogenea asociada con los parametros
de Denavit-Hartenberg.
Los valores d, theta, a, alpha son escalares.
"""
T = 0
return T
def fkine_ur5(q):
"""
Calcular la cinematica directa del robot UR5 dados sus valores articulares.
q es un vector numpy de la forma [q1, q2, q3, q4, q5, q6]
"""
# Longitudes (en metros)
# Matrices DH (completar)
T1 = 0
T2 = 0
T3 = 0
T4 = 0
T5 = 0
T6 = 0
# Efector final con respecto a la base
T = 0
return T
def jacobian_ur5(q, delta=0.0001):
"""
Jacobiano analitico para la posicion. Retorna una matriz de 3x6 y toma como
entrada el vector de configuracion articular q=[q1, q2, q3, q4, q5, q6]
"""
# Alocacion de memoria
J = np.zeros((3,6))
# Transformacion homogenea inicial (usando q)
# Iteracion para la derivada de cada columna
for i in xrange(6):
# Copiar la configuracion articular inicial
dq = copy(q);
# Incrementar la articulacion i-esima usando un delta
# Transformacion homogenea luego del incremento (q+dq)
# Aproximacion del Jacobiano de posicion usando diferencias finitas
return J
def ikine_ur5(xdes, q0):
"""
Calcular la cinematica inversa de UR5 numericamente a partir de la configuración articular inicial de q0.
"""
epsilon = 0.001
max_iter = 1000
delta = 0.00001
q = copy(q0)
for i in range(max_iter):
# Main loop
pass
return q
|
{"hexsha": "83885ee19566b959f77b275b0eb7a519aae08828", "size": 1725, "ext": "py", "lang": "Python", "max_stars_repo_path": "lab4/src/lab4functions.py", "max_stars_repo_name": "andresperez86/LabRobotica20211", "max_stars_repo_head_hexsha": "3ba1e124f20ccb7e7de53c0742da5310d5013f64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab4/src/lab4functions.py", "max_issues_repo_name": "andresperez86/LabRobotica20211", "max_issues_repo_head_hexsha": "3ba1e124f20ccb7e7de53c0742da5310d5013f64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab4/src/lab4functions.py", "max_forks_repo_name": "andresperez86/LabRobotica20211", "max_forks_repo_head_hexsha": "3ba1e124f20ccb7e7de53c0742da5310d5013f64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.7831325301, "max_line_length": 110, "alphanum_fraction": 0.6208695652, "include": true, "reason": "import numpy", "num_tokens": 526}
|
/*
* Copyright (c) 2017-2018 Nicholas Corgan (n.corgan@gmail.com)
*
* Distributed under the MIT License (MIT) (See accompanying file LICENSE.txt
* or copy at http://opensource.org/licenses/MIT)
*/
#include "env.hpp"
#include "swig/cpp_wrappers/attribute_maps.hpp"
#include "swig/cpp_wrappers/breeding.hpp"
#include "swig/cpp_wrappers/daycare.hpp"
#include "swig/cpp_wrappers/daycare_helpers.hpp"
#include "swig/cpp_wrappers/item_slot.hpp"
#include "swig/cpp_wrappers/item_list.hpp"
#include "swig/cpp_wrappers/item_bag.hpp"
#include "swig/cpp_wrappers/pokedex.hpp"
#include "swig/cpp_wrappers/pokedex_helpers.hpp"
#include "swig/cpp_wrappers/pokemon.hpp"
#include "swig/cpp_wrappers/pokemon_helpers.hpp"
#include "swig/cpp_wrappers/pokemon_party.hpp"
#include "swig/cpp_wrappers/pokemon_box.hpp"
#include "swig/cpp_wrappers/pokemon_pc.hpp"
#include "swig/cpp_wrappers/game_save.hpp"
#include "swig/cpp_wrappers/time_duration.hpp"
#include <boost/filesystem.hpp>
#include <gtest/gtest.h>
namespace fs = boost::filesystem;
static const fs::path PKSAV_TEST_SAVES(pkmn_getenv("PKSAV_TEST_SAVES"));
TEST(cpp_swig_wrapper_test, test_attribute_maps)
{
fs::path save_filepath(PKSAV_TEST_SAVES / "firered_leafgreen" / "pokemon_firered.sav");
pkmn::game_save::sptr game_save = pkmn::game_save::from_file(save_filepath.string());
pkmn::swig::game_save game_save_swig(game_save);
// Numeric attributes
pkmn::swig::numeric_attribute_map<pkmn::game_save> game_save_numeric_attribute_map =
game_save_swig.get_numeric_attributes();
game_save->set_numeric_attribute("Casino coins", 500);
EXPECT_EQ(500, game_save_numeric_attribute_map.get_attribute("Casino coins"));
game_save_numeric_attribute_map.set_attribute("Textbox frame", 3);
EXPECT_EQ(3, game_save->get_numeric_attribute("Textbox frame"));
// String attributes
pkmn::swig::string_attribute_map<pkmn::game_save> game_save_string_attribute_map =
game_save_swig.get_string_attributes();
game_save->set_string_attribute("Sound output", "Stereo");
EXPECT_EQ("Stereo", game_save_string_attribute_map.get_attribute("Sound output"));
game_save_string_attribute_map.set_attribute("Battle style", "Set");
EXPECT_EQ("Set", game_save->get_string_attribute("Battle style"));
// Boolean attributes
pkmn::swig::boolean_attribute_map<pkmn::game_save> game_save_boolean_attribute_map =
game_save_swig.get_boolean_attributes();
game_save->set_boolean_attribute("National Dex unlocked?", false);
EXPECT_FALSE(game_save_boolean_attribute_map.get_attribute("National Dex unlocked?"));
game_save_boolean_attribute_map.set_attribute("Enable battle scene?", true);
EXPECT_TRUE(game_save->get_boolean_attribute("Enable battle scene?"));
// Test with a read-only attribute.
pkmn::pokemon::sptr pokemon = pkmn::pokemon::make(
pkmn::e_species::PIKACHU,
pkmn::e_game::RED,
"",
5
);
pkmn::swig::numeric_attribute_map<pkmn::pokemon> pokemon_numeric_attribute_map(pokemon);
EXPECT_EQ(190, pokemon_numeric_attribute_map.get_attribute("Catch rate"));
}
TEST(cpp_swig_wrapper_test, test_item_slot)
{
pkmn::item_list::sptr item_pocket = pkmn::item_list::make(
"Items",
pkmn::e_game::RED
);
pkmn::swig::item_slot first_slot(item_pocket, 0);
// Set the item name through the native class.
item_pocket->set_item(
0,
pkmn::e_item::POTION,
50
);
EXPECT_EQ(pkmn::e_item::POTION, first_slot.get_item());
// Set the item through the wrapper class.
first_slot.set_item(pkmn::e_item::MASTER_BALL);
EXPECT_EQ(pkmn::e_item::MASTER_BALL, item_pocket->at(0).item);
// Set the item amount through the native class.
item_pocket->set_item(
0,
pkmn::e_item::POTION,
20
);
EXPECT_EQ(20, first_slot.get_amount());
// Set the amount. through the wrapper class.
first_slot.set_amount(99);
EXPECT_EQ(99, item_pocket->at(0).amount);
// Add a second item so we can test both methods of deletion.
item_pocket->set_item(
1,
pkmn::e_item::REPEL,
10
);
pkmn::swig::item_slot second_slot(item_pocket, 1);
ASSERT_EQ(pkmn::e_item::REPEL, second_slot.get_item());
ASSERT_EQ(10, second_slot.get_amount());
// Delete an item by setting the item to pkmn::e_item::NONE.
first_slot.set_item(pkmn::e_item::NONE);
EXPECT_EQ(pkmn::e_item::REPEL, first_slot.get_item());
EXPECT_EQ(10, first_slot.get_amount());
EXPECT_EQ(pkmn::e_item::NONE, second_slot.get_item());
EXPECT_EQ(0, second_slot.get_amount());
// Delete an item by setting the amount to 0;
first_slot.set_amount(0);
EXPECT_EQ(pkmn::e_item::NONE, first_slot.get_item());
EXPECT_EQ(0, first_slot.get_amount());
EXPECT_EQ(pkmn::e_item::NONE, second_slot.get_item());
EXPECT_EQ(0, second_slot.get_amount());
}
TEST(cpp_swig_wrapper_test, test_item_list)
{
pkmn::swig::item_list swig_item_list("Items", pkmn::e_game::RED);
EXPECT_EQ("Items", swig_item_list.get_name());
EXPECT_EQ(pkmn::e_game::RED, swig_item_list.get_game());
EXPECT_EQ(20, swig_item_list.get_capacity());
EXPECT_EQ(0, swig_item_list.get_num_items());
swig_item_list.add(pkmn::e_item::POTION, 1);
EXPECT_EQ(pkmn::e_item::POTION, swig_item_list.at(0).get_item());
EXPECT_EQ(1, swig_item_list.at(0).get_amount());
EXPECT_EQ(1, swig_item_list.get_num_items());
swig_item_list.at(0).set_amount(0);
EXPECT_EQ(pkmn::e_item::NONE, swig_item_list.at(0).get_item());
EXPECT_EQ(0, swig_item_list.at(0).get_amount());
EXPECT_EQ(0, swig_item_list.get_num_items());
std::vector<pkmn::e_item> valid_items = swig_item_list.get_valid_items();
EXPECT_FALSE(valid_items.empty());
std::vector<std::string> valid_item_names = swig_item_list.get_valid_item_names();
EXPECT_FALSE(valid_item_names.empty());
}
TEST(cpp_swig_wrapper_test, test_item_bag)
{
pkmn::swig::item_bag swig_item_bag(pkmn::e_game::COLOSSEUM);
EXPECT_EQ(pkmn::e_game::COLOSSEUM, swig_item_bag.get_game());
const std::vector<std::string>& pocket_names = swig_item_bag.get_pocket_names();
EXPECT_GT(pocket_names.size(), 0);
for(const std::string& pocket_name: pocket_names)
{
pkmn::swig::item_list pocket = swig_item_bag.get_pocket(pocket_name);
EXPECT_EQ(pocket_name, pocket.get_name());
EXPECT_EQ(pkmn::e_game::COLOSSEUM, pocket.get_game());
ASSERT_FALSE(pocket.get_valid_items().empty());
pkmn::e_item valid_item = pocket.get_valid_items().at(0);
swig_item_bag.add(valid_item, 5);
EXPECT_EQ(1, pocket.get_num_items());
EXPECT_EQ(valid_item, pocket.at(0).get_item());
EXPECT_EQ(5, pocket.at(0).get_amount());
// Set through the pocket and check through another copy from the bag.
pocket.at(0).set_amount(50);
EXPECT_EQ(valid_item, swig_item_bag.get_pocket(pocket_name).at(0).get_item());
EXPECT_EQ(50, swig_item_bag.get_pocket(pocket_name).at(0).get_amount());
//Set through another copy from the bag and check through the existing pocket.
swig_item_bag.get_pocket(pocket_name).at(0).set_amount(0);
EXPECT_EQ(pkmn::e_item::NONE, pocket.at(0).get_item());
EXPECT_EQ(0, pocket.at(0).get_amount());
}
}
TEST(cpp_swig_wrapper_test, test_pokedex)
{
pkmn::pokedex::sptr cpp_pokedex = pkmn::pokedex::make(pkmn::e_game::RED);
pkmn::swig::pokedex swig_pokedex(cpp_pokedex);
pkmn::swig::pokedex_has_seen_helper seen_helper = swig_pokedex.get_has_seen();
pkmn::swig::pokedex_has_caught_helper caught_helper = swig_pokedex.get_has_caught();
// Set from the internal class and query from the SWIG wrapper.
cpp_pokedex->set_has_seen(pkmn::e_species::BULBASAUR, true);
cpp_pokedex->set_has_caught(pkmn::e_species::MEWTWO, true);
EXPECT_TRUE(seen_helper.get_has_seen(pkmn::e_species::BULBASAUR));
EXPECT_TRUE(caught_helper.get_has_caught(pkmn::e_species::MEWTWO));
EXPECT_EQ(2, swig_pokedex.get_num_seen());
EXPECT_EQ(1, swig_pokedex.get_num_caught());
EXPECT_EQ(2ULL, swig_pokedex.get_all_seen().size());
EXPECT_EQ(1ULL, swig_pokedex.get_all_caught().size());
// Set from the SWIG wrapper and check from the internal class.
seen_helper.set_has_seen(pkmn::e_species::MEWTWO, false);
EXPECT_FALSE(cpp_pokedex->has_seen(pkmn::e_species::MEWTWO));
EXPECT_FALSE(cpp_pokedex->has_caught(pkmn::e_species::MEWTWO));
EXPECT_EQ(1, cpp_pokedex->get_num_seen());
EXPECT_EQ(0, cpp_pokedex->get_num_caught());
EXPECT_EQ(1ULL, cpp_pokedex->get_all_seen().size());
EXPECT_EQ(0ULL, cpp_pokedex->get_all_caught().size());
}
static void test_EV_IV_keys(
const std::vector<pkmn::e_stat>& map_keys
)
{
static const std::vector<pkmn::e_stat> expected_keys =
{
pkmn::e_stat::HP,
pkmn::e_stat::ATTACK,
pkmn::e_stat::DEFENSE,
pkmn::e_stat::SPEED,
pkmn::e_stat::SPECIAL_ATTACK,
pkmn::e_stat::SPECIAL_DEFENSE
};
EXPECT_EQ(expected_keys, map_keys);
}
TEST(cpp_swig_wrapper_test, test_pokemon_helpers)
{
pkmn::pokemon::sptr pokemon = pkmn::pokemon::make(
pkmn::e_species::BULBASAUR,
pkmn::e_game::FIRERED,
"",
5
);
pkmn::swig::EV_map EV_map(pokemon);
pkmn::swig::IV_map IV_map(pokemon);
pkmn::swig::marking_map marking_map(pokemon);
pkmn::swig::ribbon_map ribbon_map(pokemon);
pkmn::swig::contest_stat_map contest_stat_map(pokemon);
//
// EVs
//
// Test getting information on the EV map.
EXPECT_EQ(6ULL, EV_map.size());
test_EV_IV_keys(EV_map.keys());
// Set EV through the Pokémon.
pokemon->set_EV(pkmn::e_stat::ATTACK, 25);
EXPECT_EQ(25, pokemon->get_EVs().at(pkmn::e_stat::ATTACK));
EXPECT_EQ(25, EV_map.get_EV(pkmn::e_stat::ATTACK));
// Set EV through the wrapper class.
EV_map.set_EV(pkmn::e_stat::DEFENSE, 5);
EXPECT_EQ(5, EV_map.get_EV(pkmn::e_stat::DEFENSE));
EXPECT_EQ(5, pokemon->get_EVs().at(pkmn::e_stat::DEFENSE));
// Test has_key.
EXPECT_TRUE(EV_map.has_key(pkmn::e_stat::HP));
EXPECT_FALSE(EV_map.has_key(pkmn::e_stat::NONE));
//
// IVs
//
// Test getting information on the IV map.
EXPECT_EQ(6ULL, IV_map.size());
test_EV_IV_keys(IV_map.keys());
// Set IV through the Pokémon.
pokemon->set_IV(pkmn::e_stat::ATTACK, 11);
EXPECT_EQ(11, pokemon->get_IVs().at(pkmn::e_stat::ATTACK));
EXPECT_EQ(11, IV_map.get_IV(pkmn::e_stat::ATTACK));
// Set IV through the wrapper class.
IV_map.set_IV(pkmn::e_stat::DEFENSE, 2);
EXPECT_EQ(2, IV_map.get_IV(pkmn::e_stat::DEFENSE));
EXPECT_EQ(2, pokemon->get_IVs().at(pkmn::e_stat::DEFENSE));
// Test has_key.
EXPECT_TRUE(IV_map.has_key(pkmn::e_stat::HP));
EXPECT_FALSE(IV_map.has_key(pkmn::e_stat::NONE));
//
// Markings
//
static const std::vector<pkmn::e_marking> expected_markings =
{
pkmn::e_marking::CIRCLE,
pkmn::e_marking::TRIANGLE,
pkmn::e_marking::SQUARE,
pkmn::e_marking::HEART
};
// Test getting information on the marking map.
EXPECT_EQ(4ULL, marking_map.size());
EXPECT_EQ(expected_markings, marking_map.keys());
// Set marking through the Pokémon.
pokemon->set_marking(pkmn::e_marking::CIRCLE, true);
EXPECT_TRUE(pokemon->get_markings().at(pkmn::e_marking::CIRCLE));
EXPECT_TRUE(marking_map.get_marking(pkmn::e_marking::CIRCLE));
// Set marking through the wrapper class.
marking_map.set_marking(pkmn::e_marking::SQUARE, true);
EXPECT_TRUE(marking_map.get_marking(pkmn::e_marking::SQUARE));
EXPECT_TRUE(pokemon->get_markings().at(pkmn::e_marking::SQUARE));
// Test has_key.
EXPECT_TRUE(marking_map.has_key(pkmn::e_marking::CIRCLE));
EXPECT_FALSE(marking_map.has_key(pkmn::e_marking::NONE));
//
// Ribbons
//
// Test getting information on the ribbon map.
// Don't bother with the 32-length vector.
EXPECT_EQ(32ULL, ribbon_map.size());
// Set ribbon through the Pokémon.
pokemon->set_ribbon("Cool", true);
EXPECT_TRUE(pokemon->get_ribbons().at("Cool"));
EXPECT_TRUE(ribbon_map.get_ribbon("Cool"));
// Set ribbon through the wrapper class.
ribbon_map.set_ribbon("Champion", true);
EXPECT_TRUE(ribbon_map.get_ribbon("Champion"));
EXPECT_TRUE(pokemon->get_ribbons().at("Champion"));
// Test has_key.
EXPECT_TRUE(ribbon_map.has_key("Cool"));
EXPECT_FALSE(ribbon_map.has_key("Not a key"));
//
// Contest stats
//
static const std::vector<pkmn::e_contest_stat> expected_contest_stats =
{
pkmn::e_contest_stat::COOL,
pkmn::e_contest_stat::BEAUTY,
pkmn::e_contest_stat::CUTE,
pkmn::e_contest_stat::SMART,
pkmn::e_contest_stat::TOUGH,
pkmn::e_contest_stat::FEEL
};
// Test getting information on the contest stat map.
EXPECT_EQ(6ULL, contest_stat_map.size());
EXPECT_EQ(expected_contest_stats, contest_stat_map.keys());
// Set contest stat through the Pokémon.
pokemon->set_contest_stat(pkmn::e_contest_stat::BEAUTY, 10);
EXPECT_EQ(10, pokemon->get_contest_stats().at(pkmn::e_contest_stat::BEAUTY));
EXPECT_EQ(10, contest_stat_map.get_contest_stat(pkmn::e_contest_stat::BEAUTY));
// Set contest stat through the wrapper class.
pokemon->set_contest_stat(pkmn::e_contest_stat::TOUGH, 123);
EXPECT_EQ(123, contest_stat_map.get_contest_stat(pkmn::e_contest_stat::TOUGH));
EXPECT_EQ(123, pokemon->get_contest_stats().at(pkmn::e_contest_stat::TOUGH));
// Test has_key.
EXPECT_TRUE(contest_stat_map.has_key(pkmn::e_contest_stat::COOL));
EXPECT_FALSE(contest_stat_map.has_key(pkmn::e_contest_stat::NONE));
}
TEST(cpp_swig_wrapper_test, test_pokemon)
{
pkmn::swig::pokemon swig_pokemon(
pkmn::e_species::BULBASAUR,
pkmn::e_game::FIRERED,
"",
5
);
const std::map<pkmn::e_stat, int>& stats = swig_pokemon.get_stats();
EXPECT_EQ(6, stats.size());
EXPECT_EQ(pkmn::e_species::BULBASAUR, swig_pokemon.get_species());
EXPECT_EQ(pkmn::e_game::FIRERED, swig_pokemon.get_game());
EXPECT_EQ("Standard", swig_pokemon.get_form());
EXPECT_EQ("Bulbasaur", swig_pokemon.get_database_entry().get_species_name());
swig_pokemon.set_is_egg(true);
EXPECT_TRUE(swig_pokemon.is_egg());
swig_pokemon.set_condition(pkmn::e_condition::FROZEN);
EXPECT_EQ(pkmn::e_condition::FROZEN, swig_pokemon.get_condition());
swig_pokemon.set_nickname("12345");
EXPECT_EQ("12345", swig_pokemon.get_nickname());
swig_pokemon.set_gender(pkmn::e_gender::FEMALE);
EXPECT_EQ(pkmn::e_gender::FEMALE, swig_pokemon.get_gender());
swig_pokemon.set_shininess(true);
EXPECT_TRUE(swig_pokemon.is_shiny());
swig_pokemon.set_held_item(pkmn::e_item::ORAN_BERRY);
EXPECT_EQ(pkmn::e_item::ORAN_BERRY, swig_pokemon.get_held_item());
swig_pokemon.set_pokerus_duration(9);
EXPECT_EQ(9, swig_pokemon.get_pokerus_duration());
swig_pokemon.set_original_trainer_name("abcdef");
EXPECT_EQ("abcdef", swig_pokemon.get_original_trainer_name());
swig_pokemon.set_original_trainer_public_id(0x1351);
EXPECT_EQ(0x1351, swig_pokemon.get_original_trainer_public_id());
swig_pokemon.set_original_trainer_secret_id(0x2135);
EXPECT_EQ(0x2135, swig_pokemon.get_original_trainer_secret_id());
swig_pokemon.set_original_trainer_id(0xABCDEF12);
EXPECT_EQ(0xABCDEF12, swig_pokemon.get_original_trainer_id());
swig_pokemon.set_original_trainer_gender(pkmn::e_gender::FEMALE);
EXPECT_EQ(pkmn::e_gender::FEMALE, swig_pokemon.get_original_trainer_gender());
swig_pokemon.set_language(pkmn::e_language::JAPANESE);
EXPECT_EQ(pkmn::e_language::JAPANESE, swig_pokemon.get_language());
swig_pokemon.set_current_trainer_friendship(100);
EXPECT_EQ(100, swig_pokemon.get_current_trainer_friendship());
swig_pokemon.set_ability(pkmn::e_ability::OVERGROW);
EXPECT_EQ(pkmn::e_ability::OVERGROW, swig_pokemon.get_ability());
swig_pokemon.set_ball(pkmn::e_ball::ULTRA_BALL);
EXPECT_EQ(pkmn::e_ball::ULTRA_BALL, swig_pokemon.get_ball());
swig_pokemon.set_level_met(2);
EXPECT_EQ(2, swig_pokemon.get_level_met());
swig_pokemon.set_location_met("Rock Tunnel");
EXPECT_EQ("Rock Tunnel", swig_pokemon.get_location_met());
swig_pokemon.set_original_game(pkmn::e_game::SAPPHIRE);
EXPECT_EQ(pkmn::e_game::SAPPHIRE, swig_pokemon.get_original_game());
swig_pokemon.set_personality(0x87654321);
EXPECT_EQ(0x87654321, swig_pokemon.get_personality());
swig_pokemon.set_experience(500000);
EXPECT_EQ(500000, swig_pokemon.get_experience());
swig_pokemon.set_level(50);
EXPECT_EQ(50, swig_pokemon.get_level());
swig_pokemon.get_moves().get_move_slot(0).set_move(pkmn::e_move::RAZOR_LEAF);
EXPECT_EQ(pkmn::e_move::RAZOR_LEAF, swig_pokemon.get_moves().get_move_slot(0).get_move());
EXPECT_EQ(25, swig_pokemon.get_moves().get_move_slot(0).get_pp());
swig_pokemon.get_moves().get_move_slot(0).set_move(pkmn::e_move::FISSURE);
EXPECT_EQ(pkmn::e_move::FISSURE, swig_pokemon.get_moves().get_move_slot(0).get_move());
EXPECT_EQ(5, swig_pokemon.get_moves().get_move_slot(0).get_pp());
swig_pokemon.get_moves().get_move_slot(0).set_pp(2);
EXPECT_EQ(2, swig_pokemon.get_moves().get_move_slot(0).get_pp());
swig_pokemon.set_current_hp(stats.at(pkmn::e_stat::HP)-1);
EXPECT_EQ(stats.at(pkmn::e_stat::HP)-1, swig_pokemon.get_current_hp());
swig_pokemon.get_EVs().set_EV(pkmn::e_stat::ATTACK, 5);
EXPECT_EQ(5, swig_pokemon.get_EVs().get_EV(pkmn::e_stat::ATTACK));
swig_pokemon.get_IVs().set_IV(pkmn::e_stat::ATTACK, 5);
EXPECT_EQ(5, swig_pokemon.get_IVs().get_IV(pkmn::e_stat::ATTACK));
swig_pokemon.get_markings().set_marking(pkmn::e_marking::TRIANGLE, true);
EXPECT_TRUE(swig_pokemon.get_markings().get_marking(pkmn::e_marking::TRIANGLE));
swig_pokemon.get_ribbons().set_ribbon("Cool Hyper", true);
EXPECT_TRUE(swig_pokemon.get_ribbons().get_ribbon("Cool Hyper"));
swig_pokemon.get_contest_stats().set_contest_stat(pkmn::e_contest_stat::SMART, 5);
EXPECT_EQ(5, swig_pokemon.get_contest_stats().get_contest_stat(pkmn::e_contest_stat::SMART));
EXPECT_TRUE(fs::exists(swig_pokemon.get_icon_filepath()));
EXPECT_TRUE(fs::exists(swig_pokemon.get_sprite_filepath()));
}
TEST(cpp_swig_wrapper_test, test_pokemon_party)
{
pkmn::swig::pokemon_party swig_pokemon_party(pkmn::e_game::FIRERED);
EXPECT_EQ(pkmn::e_game::FIRERED, swig_pokemon_party.get_game());
EXPECT_EQ(0, swig_pokemon_party.get_num_pokemon());
for(int i = 0; i < 6; ++i)
{
EXPECT_EQ(pkmn::e_species::NONE, swig_pokemon_party.get_pokemon(i).get_species());
}
pkmn::swig::pokemon new_pokemon(pkmn::e_species::CHARMANDER, pkmn::e_game::FIRERED, "", 10);
swig_pokemon_party.set_pokemon(0, new_pokemon);
EXPECT_EQ(pkmn::e_species::CHARMANDER, swig_pokemon_party.get_pokemon(0).get_species());
}
TEST(cpp_swig_wrapper_test, test_pokemon_box)
{
pkmn::swig::pokemon_box swig_pokemon_box(pkmn::e_game::FIRERED);
EXPECT_EQ(pkmn::e_game::FIRERED, swig_pokemon_box.get_game());
EXPECT_EQ(0, swig_pokemon_box.get_num_pokemon());
int capacity = swig_pokemon_box.get_capacity();
for(int i = 0; i < capacity; ++i)
{
EXPECT_EQ(pkmn::e_species::NONE, swig_pokemon_box.get_pokemon(i).get_species());
}
pkmn::swig::pokemon new_pokemon(pkmn::e_species::CHARMANDER, pkmn::e_game::FIRERED, "", 10);
swig_pokemon_box.set_pokemon(0, new_pokemon);
EXPECT_EQ(pkmn::e_species::CHARMANDER, swig_pokemon_box.get_pokemon(0).get_species());
}
TEST(cpp_swig_wrapper_test, test_pokemon_pc)
{
pkmn::swig::pokemon_pc swig_pokemon_pc(pkmn::e_game::FIRERED);
EXPECT_EQ(pkmn::e_game::FIRERED, swig_pokemon_pc.get_game());
for(int i = 0; i < swig_pokemon_pc.get_num_boxes(); ++i)
{
EXPECT_EQ(
swig_pokemon_pc.get_box_names()[i],
swig_pokemon_pc.get_box(i).get_name()
);
}
swig_pokemon_pc.get_box(4).set_name("COOL BOX");
EXPECT_EQ("COOL BOX", swig_pokemon_pc.get_box_names()[4]);
swig_pokemon_pc.get_box(4).set_pokemon(4, pkmn::swig::pokemon(pkmn::e_species::CHARIZARD, pkmn::e_game::FIRERED, "", 50));
EXPECT_EQ(pkmn::e_species::CHARIZARD, swig_pokemon_pc.get_box(4).get_pokemon(4).get_species());
}
TEST(cpp_swig_wrapper_test, test_game_save)
{
fs::path save_filepath(PKSAV_TEST_SAVES / "firered_leafgreen" / "pokemon_firered.sav");
EXPECT_EQ(
pkmn::swig::e_game_save_type::FIRERED_LEAFGREEN,
pkmn::swig::game_save::detect_type(save_filepath.string())
);
pkmn::swig::game_save swig_game_save(save_filepath.string());
EXPECT_EQ(save_filepath.string(), swig_game_save.get_filepath());
EXPECT_EQ(pkmn::e_game::FIRERED, swig_game_save.get_game());
swig_game_save.get_time_played().set_hours(100);
EXPECT_EQ(100, swig_game_save.get_time_played().get_hours());
swig_game_save.get_time_played().set_minutes(50);
EXPECT_EQ(50, swig_game_save.get_time_played().get_minutes());
swig_game_save.get_time_played().set_seconds(20);
EXPECT_EQ(20, swig_game_save.get_time_played().get_seconds());
swig_game_save.get_time_played().set_frames(10);
EXPECT_EQ(10, swig_game_save.get_time_played().get_frames());
swig_game_save.set_trainer_name("foobar");
EXPECT_EQ("foobar", swig_game_save.get_trainer_name());
swig_game_save.set_trainer_id(0xABCD1234);
EXPECT_EQ(0xABCD1234, swig_game_save.get_trainer_id());
swig_game_save.set_trainer_public_id(0x9753);
EXPECT_EQ(0x9753, swig_game_save.get_trainer_public_id());
swig_game_save.set_trainer_secret_id(0xFCA0);
EXPECT_EQ(0xFCA0, swig_game_save.get_trainer_secret_id());
swig_game_save.set_trainer_gender(pkmn::e_gender::FEMALE);
EXPECT_EQ(pkmn::e_gender::FEMALE, swig_game_save.get_trainer_gender());
swig_game_save.set_rival_name("abcdef");
EXPECT_EQ("abcdef", swig_game_save.get_rival_name());
swig_game_save.set_money(12345);
EXPECT_EQ(12345, swig_game_save.get_money());
swig_game_save.get_pokedex().get_has_seen().set_has_seen(pkmn::e_species::BULBASAUR, true);
EXPECT_TRUE(swig_game_save.get_pokedex().get_has_seen().get_has_seen(pkmn::e_species::BULBASAUR));
swig_game_save.get_pokedex().get_has_caught().set_has_caught(pkmn::e_species::CHARMANDER, true);
EXPECT_TRUE(swig_game_save.get_pokedex().get_has_caught().get_has_caught(pkmn::e_species::CHARMANDER));
/*
* These are the underlying calls for a fairly representative use case. This is the
* equivalent C# code.
*
* PKMN.GameSave gameSave = new PKMN.GameSave(filepath);
* gameSave.PokemonParty[1].EVs[PKMN.Stat.ATTACK] = 20;
* gameSave.PokemonPC[5][20].IVs[PKMN.Stat.ATTACK] = 5;
* gameSave.ItemBag["Items"][0].Item = PKMN.Item.REPEL;
*/
swig_game_save.get_pokemon_party().get_pokemon(1).get_EVs().set_EV(pkmn::e_stat::ATTACK, 20);
swig_game_save.get_pokemon_pc().get_box(5).get_pokemon(20).get_IVs().set_IV(pkmn::e_stat::HP, 5);
swig_game_save.get_item_bag().get_pocket("Items").at(0).set_item(pkmn::e_item::REPEL);
EXPECT_EQ(20, swig_game_save.get_pokemon_party().get_pokemon(1).get_EVs().get_EV(pkmn::e_stat::ATTACK));
EXPECT_EQ(5, swig_game_save.get_pokemon_pc().get_box(5).get_pokemon(20).get_IVs().get_IV(pkmn::e_stat::HP));
EXPECT_EQ(pkmn::e_item::REPEL, swig_game_save.get_item_bag().get_pocket("Items").at(0).get_item());
}
TEST(cpp_swig_wrapper_test, test_breeding)
{
pkmn::pokemon::sptr mother = pkmn::pokemon::make(pkmn::e_species::ILLUMISE, pkmn::e_game::RUBY, "", 50);
pkmn::pokemon::sptr father = pkmn::pokemon::make(pkmn::e_species::VOLBEAT, pkmn::e_game::RUBY, "", 50);
mother->set_move(pkmn::e_move::HELPING_HAND, 0);
father->set_move(pkmn::e_move::HELPING_HAND, 0);
father->set_move(pkmn::e_move::WATER_PULSE, 1);
pkmn::swig::pokemon mother_swig(mother);
pkmn::swig::pokemon father_swig(father);
// get_child_moves
const pkmn::e_species child_species = pkmn::e_species::ILLUMISE;
std::vector<pkmn::e_move> child_moves_cpp = pkmn::breeding::get_child_moves(
mother,
father,
child_species
);
std::vector<pkmn::e_move> child_moves_swig = pkmn::swig::breeding::get_child_moves(
mother_swig,
father_swig,
child_species
);
EXPECT_EQ(child_moves_cpp, child_moves_swig);
// get_ideal_child_IVs
const pkmn::e_gender child_gender = pkmn::e_gender::FEMALE;
std::map<pkmn::e_stat, int> ideal_child_IVs_cpp = pkmn::breeding::get_ideal_child_IVs(
mother,
father,
child_gender
);
std::map<pkmn::e_stat, int> ideal_child_IVs_swig = pkmn::swig::breeding::get_ideal_child_IVs(
mother_swig,
father_swig,
child_gender
);
EXPECT_EQ(ideal_child_IVs_cpp, ideal_child_IVs_swig);
}
TEST(cpp_swig_wrapper_test, test_daycare)
{
pkmn::daycare::sptr daycare = pkmn::daycare::make(pkmn::e_game::GOLD);
const pkmn::pokemon_list_t& levelup_pokemon_list =
daycare->get_levelup_pokemon_as_vector();
const pkmn::pokemon_list_t& breeding_pokemon_list =
daycare->get_breeding_pokemon_as_vector();
pkmn::swig::daycare daycare_swig(daycare);
pkmn::swig::daycare_levelup_pokemon levelup_pokemon_swig(daycare);
pkmn::swig::daycare_breeding_pokemon breeding_pokemon_swig(daycare);
ASSERT_EQ(
daycare->can_breed_pokemon(),
daycare_swig.can_breed_pokemon()
);
EXPECT_EQ(
daycare->get_levelup_pokemon_capacity(),
levelup_pokemon_swig.get_capacity()
);
EXPECT_EQ(
daycare->get_breeding_pokemon_capacity(),
breeding_pokemon_swig.get_capacity()
);
pkmn::pokemon::sptr new_pokemon_cpp = pkmn::pokemon::make(
pkmn::e_species::CHIKORITA,
pkmn::e_game::GOLD,
"",
10
);
pkmn::swig::pokemon new_pokemon_swig(
pkmn::e_species::CYNDAQUIL,
pkmn::e_game::GOLD,
"",
10
);
daycare->set_levelup_pokemon(0, new_pokemon_cpp);
EXPECT_EQ(
levelup_pokemon_list[0],
levelup_pokemon_swig.get_pokemon(0).get_internal()
);
breeding_pokemon_swig.set_pokemon(1, new_pokemon_swig);
EXPECT_EQ(
breeding_pokemon_swig.get_pokemon(0).get_internal(),
breeding_pokemon_list[0]
);
}
|
{"hexsha": "d1efe057ad71ca47d29d27bdbbc6c3d45e2a9db2", "size": 28283, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "testing/unit-tests/cpp/cpp_swig_wrapper_test.cpp", "max_stars_repo_name": "ncorgan/libpkmn", "max_stars_repo_head_hexsha": "c683bf8b85b03eef74a132b5cfdce9be0969d523", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2017-06-10T13:21:44.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-30T21:20:19.000Z", "max_issues_repo_path": "testing/unit-tests/cpp/cpp_swig_wrapper_test.cpp", "max_issues_repo_name": "PMArkive/libpkmn", "max_issues_repo_head_hexsha": "c683bf8b85b03eef74a132b5cfdce9be0969d523", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12.0, "max_issues_repo_issues_event_min_datetime": "2017-04-05T11:13:34.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-03T14:31:03.000Z", "max_forks_repo_path": "testing/unit-tests/cpp/cpp_swig_wrapper_test.cpp", "max_forks_repo_name": "PMArkive/libpkmn", "max_forks_repo_head_hexsha": "c683bf8b85b03eef74a132b5cfdce9be0969d523", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-01-22T21:02:31.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-30T21:20:20.000Z", "avg_line_length": 38.58526603, "max_line_length": 126, "alphanum_fraction": 0.6731605558, "num_tokens": 7898}
|
# Extended Kalman filter
This is an implementation of the example Kalman filter: [ExEKF.m](https://github.com/cybergalactic/MSS/blob/master/mssExamples/ExEKF.m).
ExEKF Discrete-time extended Kalman filter (EKF) implementation demonstrating
how the "predictor-corrector representation" can be applied to the
nonlinear model:
$dx_1/dt = x_2$ <br>
$dx_2/dt = a * x_2 * abs(x_2) + b * u + white noise $ <br>
$y = x_1 + white noise$ <br>
The GNSS position measurement frequency f_gnss [Hz] can be chosen smaller or
equal to the sampling frequency f_s [Hz]. The ratio between the
frequencies must be an integer:
Integer: Z = f_s/f_gnss >= 1
Author: Thor I. Fossen <br>
Date: 17 Oct, 2018 <br>
Revisions: 28 Feb. 2020, minor updates of notation <br>
```python
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import inv
import sympy as sp
import src.visualization.book_format as book_format
book_format.set_style()
from src.substitute_dynamic_symbols import lambdify
from sympy import Matrix
```
```python
x_1,x_2, a, b, u, w, h = sp.symbols("x_1,x_2, a, b, u, w, h")
jac = sp.eye(2) + Matrix([x_2,
a * x_2 * x_2 + b * u + w]).jacobian([x_1,x_2])*h
jac
```
```python
lambda_jacobian = lambdify(jac)
```
```python
lambda_jacobian(a=1,h=0.1,x_2=3)
```
```python
f = Matrix([x_2,
a * x_2 * sp.Abs(x_2) + b * u + w])
lambda_f = lambdify(f)
```
```python
lambda_f
```
```python
from src.kalman_filter import (extended_kalman_filter_example,
extended_kalman_filter_parameter_estimation_example, simulate_model)
```
```python
# simulation parameters
N = 100 # no. of iterations
f_m = 1 # sampling frequency [Hz]
h_m = 1 / f_m # sampling time: h = 1/f_s (s)
t = np.arange(0, N * h_m, h_m)
# initial values for x
x0 = np.array([[0, 0]]).T
us = 0.1 * np.sin(0.1 * t) # inputs
np.random.seed(42)
a_ = -0.9
ws = 0.1 * np.random.normal(scale=1, size=N) # process noise
df = simulate_model(x0=x0, us=us, ws=ws, t=t, a=a_)
fig, axes = plt.subplots(nrows=3)
df.plot(y="u", label="u (input)", ax=axes[0])
df.plot(y="x_1", ax=axes[1])
df.plot(y="x_2", ax=axes[2])
plt.show()
```
```python
## Measured yaw angle:
df["epsilon"] = 0.1 * np.random.normal(scale=3, size=N) # measurement noise
df["y"] = df["x_1"] + df["epsilon"]
ys = np.zeros((N, 1)) # 1!
ys[:, 0] = df["y"].values
```
```python
## Discretisized system matrixes:
f_s = 10 # sampling frequency [Hz]
h_s = 1 / f_s # sampling time: h = 1/f_s (s)
# initialization of Kalman filter
x0 = np.array([[3.5, 0]]).T
P_prd = np.diag([1, 1])
Qd = 1
Rd = 10
df2 = extended_kalman_filter_example(
x0=x0, P_prd=P_prd, lambda_f=lambda_f,
lambda_jacobian=lambda_jacobian, h_m=h_m, h=h_s, us=us, ys=ys, Qd=Qd, Rd=Rd
)
```
```python
df2.head()
```
```python
fig,axes=plt.subplots(nrows=3)
df.plot(y='u', label='u (input)', ax=axes[0])
axes[1].set_ylabel('$x_1$')
df.plot(y='y', style='.', alpha=0.7, ax=axes[1])
df.plot(y='x_1', label='model', ax=axes[1])
df2.plot(y='x_1 predictor', label='predictor', style='--', ax=axes[1])
df2.plot(y='x_1', label='kalman', style=':', ax=axes[1])
axes[2].set_ylabel('$x_2$')
df.plot(y='x_2', label='model', ax=axes[2]);
df2.plot(y='x_2 predictor', label='predictor', style='--', ax=axes[2]);
df2.plot(y='x_2', label='kalman', style=':', ax=axes[2])
```
## Parameter etimation
The extended Kalman Filter can also be used to estimate model parameters during the filtering.
### estimating $a$
```python
x_1,x_2, a, b, u, w, h = sp.symbols("x_1,x_2, a, b, u, w, h")
jac_a = sp.eye(3) + Matrix([x_2,
a * x_2 * x_2 + b * u + w, 0]).jacobian([x_1,x_2,a])*h
jac_a
```
```python
lambda_jacobian_a = lambdify(jac_a)
```
```python
lambda_jacobian_a
```
```python
def lambda_jacobian_a(a, h, x_2):
jac = np.array([
[1, h, 0],
[0, 2*a*h*np.abs(x_2) + 1, h*x_2*np.abs(x_2)],
[0,0,1]
])
return jac
```
```python
f_a = Matrix([x_2,
a * x_2 * sp.Abs(x_2) + b * u + w,0])
lambda_f_a = lambdify(f_a)
```
```python
f_a
```
```python
# simulation parameters
N = 4000 # no. of iterations
#f_m = 1 # sampling frequency [Hz]
h_m = 0.05 # sampling time: h = 1/f_s (s)
t = np.arange(0, N * h_m, h_m)
# initial values for x
x0 = np.array([[0, 0]]).T
us = 0.1 * np.sin(0.1 * t) # inputs
np.random.seed(42)
process_noise = 0
ws = process_noise * np.random.normal(scale=1, size=N) # process noise
a_ = -0.9
df = simulate_model(x0=x0, us=us, ws=ws, t=t, a=a_)
```
```python
## Measured yaw angle:
noise = 0
df["epsilon"] = noise * np.random.normal(scale=3, size=N) # measurement noise
df["y"] = df["x_1"] + df["epsilon"]
ys = np.zeros((N, 1)) # 1!
ys[:, 0] = df["y"].values
```
```python
## Discretisized system matrixes:
h_s = h_m
e=1
E = np.array([[0,0],
[e,0],
[0,e]])
Cd = np.array([[1, 0, 0]])
# initialization of Kalman filter
x0 = np.array([[0, 0, 0]]).T
P_prd = np.diag([1, 1, 1])
Qd = np.diag([1, 0.1]) # Q = diag( Q_x2 Q_a )
Rd = 10 # R = diag( R_x1 )
time_steps = extended_kalman_filter_parameter_estimation_example(
x0=x0, P_prd=P_prd, lambda_f=lambda_f_a,
lambda_jacobian=lambda_jacobian_a, h=h_s, us=us, ys=ys, Qd=Qd, Rd=Rd, E=E, Cd=Cd
)
```
```python
x_hats = np.array([time_step['x_hat'] for time_step in time_steps]).T
x_hats.shape
```
```python
kalman_gains = np.array([time_step['K'] for time_step in time_steps]).T
kalman_gains.shape
```
```python
time = np.array([time_step['time'] for time_step in time_steps]).T
time.shape
```
```python
fig,axes=plt.subplots(nrows=3)
ax=axes[0]
ax.set_ylabel('$x_1$')
df.plot(y='y', style='-', alpha=0.7, ax=ax)
df.plot(y='x_1', label='model', ax=ax)
ax.plot(time,x_hats[0,:], '--', label='kalman')
ax.legend()
ax=axes[1]
ax.set_ylabel('$x_2$')
df.plot(y='x_2', label='model', ax=ax);
ax.plot(time,x_hats[1,:], '--', label='kalman')
ax.legend()
ax=axes[2]
ax.set_ylabel('$a$')
ax.plot(time,x_hats[2,:], '--', label='kalman')
ax.plot([time[0], time[-1]], [a_,a_], label='$a$')
ax.legend()
```
```python
fig,ax=plt.subplots()
for k in kalman_gains:
ax.plot(time, k)
ax.legend(['$x_1$','$x_2$','$a$'])
ax.set_title('Kalman gains')
```
|
{"hexsha": "f1844f47136022c27405cbaee5ee49da2325171b", "size": 12738, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "notebooks/15.31_EKF_fossen.ipynb", "max_stars_repo_name": "martinlarsalbert/wPCC", "max_stars_repo_head_hexsha": "16e0d4cc850d503247916c9f5bd9f0ddb07f8930", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/15.31_EKF_fossen.ipynb", "max_issues_repo_name": "martinlarsalbert/wPCC", "max_issues_repo_head_hexsha": "16e0d4cc850d503247916c9f5bd9f0ddb07f8930", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/15.31_EKF_fossen.ipynb", "max_forks_repo_name": "martinlarsalbert/wPCC", "max_forks_repo_head_hexsha": "16e0d4cc850d503247916c9f5bd9f0ddb07f8930", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5375, "max_line_length": 145, "alphanum_fraction": 0.5223739991, "converted": true, "num_tokens": 2290}
|
import numpy as np
import xeno
try:
from sklearn.datasets import load_digits
except:
print("your sklearn library needs to be install with whl of numpy+MKL :(\n")
# prepare
xeno.utils.random.set_seed(1234)
# data
digits = load_digits()
X_train = digits.data
X_train /= np.max(X_train)
Y_train = digits.target
n_classes = np.unique(Y_train).size
# model
model = xeno.model.Model()
model.add(xeno.layers.Dense(n_out=500, n_in=64, activation=xeno.activations.ReLU()))
model.add(xeno.layers.Dense(n_out=n_classes, activation=xeno.activations.Softmax()))
model.compile(loss=xeno.objectives.SCCE(), optimizer=xeno.optimizers.SGD(lr=0.005))
# train
model.fit(X_train, xeno.utils.data.one_hot(Y_train), max_iter=150, validation_split=0.1)
|
{"hexsha": "b65017cd98adc5bb77092a0b7383197718e2d0b6", "size": 742, "ext": "py", "lang": "Python", "max_stars_repo_path": "Machine Learning Projects/Xeno-Deep Learning library from scratch/test.py", "max_stars_repo_name": "TeacherManoj0131/HacktoberFest2020-Contributions", "max_stars_repo_head_hexsha": "c7119202fdf211b8a6fc1eadd0760dbb706a679b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 256, "max_stars_repo_stars_event_min_datetime": "2020-09-30T19:31:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-20T18:09:15.000Z", "max_issues_repo_path": "Machine Learning Projects/Xeno-Deep Learning library from scratch/test.py", "max_issues_repo_name": "TeacherManoj0131/HacktoberFest2020-Contributions", "max_issues_repo_head_hexsha": "c7119202fdf211b8a6fc1eadd0760dbb706a679b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 293, "max_issues_repo_issues_event_min_datetime": "2020-09-30T19:14:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-06T02:34:47.000Z", "max_forks_repo_path": "Machine Learning Projects/Xeno-Deep Learning library from scratch/test.py", "max_forks_repo_name": "TeacherManoj0131/HacktoberFest2020-Contributions", "max_forks_repo_head_hexsha": "c7119202fdf211b8a6fc1eadd0760dbb706a679b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1620, "max_forks_repo_forks_event_min_datetime": "2020-09-30T18:37:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T20:54:22.000Z", "avg_line_length": 28.5384615385, "max_line_length": 88, "alphanum_fraction": 0.7628032345, "include": true, "reason": "import numpy", "num_tokens": 199}
|
"""
Example for Anthropomorphic Arm.
"""
# Funções das Bibliotecas Utilizadas
from sympy import symbols, trigsimp, pprint
from sympy.physics.mechanics import dynamicsymbols
from sympy.physics.vector import ReferenceFrame, Vector
from sympy.physics.vector import time_derivative
# Variáveis Simbólicas
THETA_1, THETA_2, THETA_3 = dynamicsymbols('THETA_1 THETA_2 THETA_3')
L_1, L_2 = symbols('L_1 L_2', positive=True)
# Referenciais
# Referencial Parado
B0 = ReferenceFrame('B0')
# Referencial móvel: THETA_1 em relação a B0.y
B1 = ReferenceFrame('B1')
B1.orient(B0, 'Axis', [THETA_1, B0.y])
# Referencial móvel: THETA_2 em relação a B1.z
B2 = ReferenceFrame('B2')
B2.orient(B1, 'Axis', [THETA_2, B1.z])
# Referencial móvel: THETA_3 em relação a B2.z
B3 = ReferenceFrame('B3')
B3.orient(B2, 'Axis', [THETA_3, B2.z])
# Vetores Posição entre os Pontos
# Vetor Nulo
B0_R_OA = Vector(0)
# Vetor que liga os pontos A e B expresso no referencial móvel B2
B2_R_AB = L_1 * B2.x
# Vetor que liga os pontos B e C expresso no referencial óel B3
B3_R_BC = L_2 * B3.x
# Cinemática do ponto A em relação ao referencial B0
R_A = B0_R_OA
V_A = time_derivative(R_A, B0)
A_A = time_derivative(V_A, B0)
# Cinemática do ponto B em relação ao referencial B0
R_B = R_A + B2_R_AB.express(B0)
V_B = time_derivative(R_B, B0)
A_B = time_derivative(V_B, B0)
# Cinemática do ponto C em relação ao referencial B0
R_C = B3_R_BC.express(B0)
V_C = (time_derivative(R_C, B0))
A_C = (time_derivative(V_C, B0))
# Simplificação dos Resultados
R_A = (R_A.to_matrix(B0)).applyfunc(trigsimp)
V_A = (V_A.to_matrix(B0)).applyfunc(trigsimp)
A_A = (A_A.to_matrix(B0)).applyfunc(trigsimp)
R_B = (R_B.to_matrix(B0)).applyfunc(trigsimp)
V_B = (V_B.to_matrix(B0)).applyfunc(trigsimp)
A_B = (A_B.to_matrix(B0)).applyfunc(trigsimp)
R_C = (R_C.to_matrix(B0)).applyfunc(trigsimp)
V_C = (V_C.to_matrix(B0)).applyfunc(trigsimp)
A_C = (A_C.to_matrix(B0)).applyfunc(trigsimp)
# Resultados de C
pprint(R_C)
pprint(V_C)
pprint(A_C)
|
{"hexsha": "25b95e3e59e4e21a2d9e4aa753aabedeb1b2e14e", "size": 1981, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/antropomorphic_robot/script_kinematics_3dof_anthropomorphic.py", "max_stars_repo_name": "abhikamath/pydy", "max_stars_repo_head_hexsha": "0d11df897c40178bb0ffd9caa9e25bccd1d8392a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 298, "max_stars_repo_stars_event_min_datetime": "2015-01-31T11:43:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T02:18:21.000Z", "max_issues_repo_path": "examples/antropomorphic_robot/script_kinematics_3dof_anthropomorphic.py", "max_issues_repo_name": "abhikamath/pydy", "max_issues_repo_head_hexsha": "0d11df897c40178bb0ffd9caa9e25bccd1d8392a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 359, "max_issues_repo_issues_event_min_datetime": "2015-01-17T16:56:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T05:27:08.000Z", "max_forks_repo_path": "examples/antropomorphic_robot/script_kinematics_3dof_anthropomorphic.py", "max_forks_repo_name": "pydy/pydy", "max_forks_repo_head_hexsha": "4a2c46faae44d06017b64335e48992ee8c53e1b6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 109, "max_forks_repo_forks_event_min_datetime": "2015-02-03T13:02:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-21T12:57:21.000Z", "avg_line_length": 30.4769230769, "max_line_length": 69, "alphanum_fraction": 0.7476022211, "include": true, "reason": "from sympy", "num_tokens": 704}
|
"""
Client side code to perform a single API call to a tensorflow model up and running.
"""
import argparse
import json
import numpy as np
import requests
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import plot_util
from object_detection.utils import label_map_util
import object_detection.utils.ops as utils_ops
from PIL import Image
import os
import json
import requests
from flask import Flask, request, jsonify, send_from_directory, send_file
from flask_cors import CORS
import uuid
import glob
app = Flask(__name__)
cors = CORS(app, resources={r"*": {"origins": "*"}})
SERVER_URL = 'http://localhost:8501/v1/models/my_model:predict'
PATH_TO_LABELS = './xray/labels.pbtxt'
@app.route('/')
def hello_world():
return 'X-RAY Bone Fracture Detection API v1.0'
@app.route('/sample/<string:pid>')
def sample(pid):
dir = os.getcwd() + "/xray/image/"
for name in glob.glob(dir + pid):
return send_file(name)
return json.dumps({'success': False}), 400, {'ContentType': 'application/json'}
@app.route('/treatment/<string:pid>')
def treatment(pid):
dir = os.getcwd() + "/xray/treatment/"
for name in glob.glob(dir + pid):
return send_file(name)
return json.dumps({'success': False}), 400, {'ContentType': 'application/json'}
@app.route('/supporting/<string:pid>')
def supporting(pid):
dir = os.getcwd() + "/xray/supporting/"
for name in glob.glob(dir + pid):
return send_file(name)
return json.dumps({'success': False}), 400, {'ContentType': 'application/json'}
@app.route('/output/<string:pid>')
def output(pid):
dir = os.getcwd() + "/images/output/"
for name in glob.glob(dir + pid + '.*'):
return send_file(name)
return json.dumps({'success': False}), 400, {'ContentType': 'application/json'}
@app.route('/input/<string:pid>')
def input(pid):
input_dir = os.getcwd() + "/images/input/"
for name in glob.glob(input_dir + pid + '.*'):
return send_file(name)
return json.dumps({'success': False}), 400, {'ContentType': 'application/json'}
@app.route('/predict', methods=['POST'])
def predict():
data = {}
print(request)
if not request.files["image"]:
return jsonify({"status": 400, "message": 'No image passed'})
img = request.files["image"]
extension = img.filename.split('.')[-1]
new_filename = str(uuid.uuid1()) + "." + extension
new_fullpath = os.path.join(os.getcwd() + "/images/input", new_filename)
img.save(new_fullpath)
output_filename = process(fullpath=new_fullpath, filename=new_filename)
return jsonify({"status": 200, "data": {
"file_name" : img.filename,
"url" : 'http://178.128.105.21:5000/output/' + output_filename
}})
print(output_filename)
print(os.getcwd())
full_output_path = os.path.join(os.getcwd(), output_filename)
print(full_output_path)
# if os.path.exists(full_output_path):
# return 'sucess'
# else:
# return ' failed'
return send_from_directory(os.getcwd(), output_filename)
def format_mask(detection_masks, detection_boxes, N, image_size):
"""
Format the m*m detection soft masks as full size binary masks.
Args:
detection_masks (np.array): of size N * m * m
detection_boxes (np.array): of size N * 4 with the normalized bow coordinates.
Coordinates are written as [y_min, x_min, y_max, x_max]
N (int): number of detections in the image
image_size (tuple(int))
Returns:
detection_masks (np.array): of size N * H * W where H and W are the image Height and Width.
"""
(height, width, _) = image_size
output_masks = np.zeros((N, image_size[0], image_size[1]))
# Process the masks related to the N objects detected in the image
for i in range(N):
normalized_mask = detection_masks[i].astype(np.float32)
normalized_mask = Image.fromarray(normalized_mask, 'F')
# Boxes are expressed with 4 scalars - normalized coordinates [y_min, x_min, y_max, x_max]
[y_min, x_min, y_max, x_max] = detection_boxes[i]
# Compute absolute boundary of box
box_size = (int((x_max - x_min) * width),
int((y_max - y_min) * height))
# Resize the mask to the box size using LANCZOS appoximation
resized_mask = normalized_mask.resize(box_size, Image.LANCZOS)
# Convert back to array
resized_mask = np.array(resized_mask).astype(np.float32)
# Binarize the image by using a fixed threshold
binary_mask_box = np.zeros(resized_mask.shape)
thresh = 0.5
(h, w) = resized_mask.shape
for k in range(h):
for j in range(w):
if resized_mask[k][j] >= thresh:
binary_mask_box[k][j] = 1
binary_mask_box = binary_mask_box.astype(np.uint8)
# Replace the mask in the context of the original image size
binary_mask = np.zeros((height, width))
x_min_at_scale = int(x_min * width)
y_min_at_scale = int(y_min * height)
d_x = int((x_max - x_min) * width)
d_y = int((y_max - y_min) * height)
for x in range(d_x):
for y in range(d_y):
binary_mask[y_min_at_scale +
y][x_min_at_scale + x] = binary_mask_box[y][x]
# Update the masks array
output_masks[i][:][:] = binary_mask
# Cast mask as integer
output_masks = output_masks.astype(np.uint8)
return output_masks
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def pre_process(image_path):
"""
Pre-process the input image to return a json to pass to the tf model
Args:
image_path (str): Path to the jpeg image
Returns:
formatted_json_input (str)
"""
image = Image.open(image_path).convert("RGB")
image_np = plot_util.load_image_into_numpy_array(image)
# Expand dims to create bach of size 1
image_tensor = np.expand_dims(image_np, 0)
formatted_json_input = json.dumps(
{"signature_name": "serving_default", "instances": image_tensor.tolist()})
return formatted_json_input
def post_process(server_response, image_size):
"""
Post-process the server response
Args:
server_response (requests.Response)
image_size (tuple(int))
Returns:
post_processed_data (dict)
"""
response = json.loads(server_response.text)
output_dict = response['predictions'][0]
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'])
output_dict['detection_classes'] = np.array(
[int(class_id) for class_id in output_dict['detection_classes']])
output_dict['detection_boxes'] = np.array(output_dict['detection_boxes'])
output_dict['detection_scores'] = np.array(output_dict['detection_scores'])
# Process detection mask
if 'detection_masks' in output_dict:
# Determine a threshold above wihc we consider the pixel shall belong to the mask
# thresh = 0.5
output_dict['detection_masks'] = np.array(
output_dict['detection_masks'])
output_dict['detection_masks'] = format_mask(
output_dict['detection_masks'], output_dict['detection_boxes'], output_dict['num_detections'], image_size)
return output_dict
def process(fullpath, filename):
# image_path = os.path.join(os.getcwd(), filename)
print('image_path', fullpath)
# image_path = args.image_path
# output_image = os.path.join(os.getcwd(), 'OUTPUT.JSON')
# save_output_image = os.path.join(os.getcwd(), 'OUTPUT.PNG')
# path_to_labels = args.label_map
# Build input data
print(f'\n\nPre-processing input file {fullpath}...\n')
formatted_json_input = pre_process(fullpath)
print('Pre-processing done! \n')
# Call tensorflow server
headers = {"content-type": "application/json"}
print(f'\n\nMaking request to {SERVER_URL}...\n')
server_response = requests.post(
SERVER_URL, data=formatted_json_input, headers=headers)
print(f'Request returned\n')
# Post process output
print(f'\n\nPost-processing server response...\n')
image = Image.open(fullpath).convert("RGB")
image_np = load_image_into_numpy_array(image)
output_dict = post_process(server_response, image_np.shape)
print(f'Post-processing done!\n')
# Save output on disk
# print(f'\n\nSaving output to {output_image}\n\n')
# with open(output_image, 'w+') as outfile:
# json.dump(json.loads(server_response.text), outfile)
# print(f'Output saved!\n')
# if save_output_image:
# Save output on disk
category_index = label_map_util.create_category_index_from_labelmap(
PATH_TO_LABELS, use_display_name=True)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8,
)
output_with_no_extension = filename.split('.', 1)[0]
output_image = ''.join([output_with_no_extension, '.jpeg'])
print(output_image)
img = Image.fromarray(image_np)
output_full_path = os.path.join(
os.getcwd() + "/images/output/", output_image)
img.save(output_full_path)
print('\n\nImage saved\n\n')
return output_with_no_extension
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, debug=True)
|
{"hexsha": "6dd015a1af21731612341548f3a7f592c8f427a9", "size": 9872, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "fakhrul/xray_detection_client", "max_stars_repo_head_hexsha": "802606023d97f747e593bf93510124fc2d829106", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "fakhrul/xray_detection_client", "max_issues_repo_head_hexsha": "802606023d97f747e593bf93510124fc2d829106", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "fakhrul/xray_detection_client", "max_forks_repo_head_hexsha": "802606023d97f747e593bf93510124fc2d829106", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9066666667, "max_line_length": 118, "alphanum_fraction": 0.663897893, "include": true, "reason": "import numpy", "num_tokens": 2345}
|
Base.write(io::IO, ::MIME"text/plain", ::Void) = nothing
function Base.read(io::IO, ::MIME"text/plain", ::Type{Void})
readline(io)
return nothing
end
Base.write(io::IO, ::MIME"text/plain", i::Integer) = print(io, i)
Base.read{I<:Integer}(io::IO, ::MIME"text/plain", ::Type{I}) = parse(I, readline(io))
|
{"hexsha": "dbb78611e6a5299a4715b3fb20af14ed715e75a6", "size": 314, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/common.jl", "max_stars_repo_name": "corydoras69dev/Records.jl", "max_stars_repo_head_hexsha": "84853787eeba3aefcdfca1316a18f87d20af9790", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/common.jl", "max_issues_repo_name": "corydoras69dev/Records.jl", "max_issues_repo_head_hexsha": "84853787eeba3aefcdfca1316a18f87d20af9790", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/common.jl", "max_forks_repo_name": "corydoras69dev/Records.jl", "max_forks_repo_head_hexsha": "84853787eeba3aefcdfca1316a18f87d20af9790", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5454545455, "max_line_length": 85, "alphanum_fraction": 0.6369426752, "num_tokens": 99}
|
using BinDeps
@BinDeps.setup
world = library_dependency("libworld", aliases=["libworld", "world-0"])
const version = "0.3.1"
# TODO
if Sys.iswindows() && Sys.WORD_SIZE == 32
error("Your platform isn't supported yet.")
end
github_root = "https://github.com/r9y9/World-cmake"
arch = Sys.WORD_SIZE == 64 ? "x86_64" : "i686"
major = version[1]
provides(Binaries,
URI("$(github_root)/releases/download/v$(version)/WORLD-$(major)_mingw$(Sys.WORD_SIZE)_$(arch).zip"),
world, unpacked_dir = "usr/lib", os = :Windows)
provides(Sources,
URI("$(github_root)/archive/v$(version).tar.gz"),
world,
unpacked_dir="World-cmake-$(version)")
prefix = joinpath(BinDeps.depsdir(world), "usr")
srcdir = joinpath(BinDeps.depsdir(world), "src", "World-cmake-$(version)")
builddir = joinpath(srcdir, "build")
provides(SimpleBuild,
(@build_steps begin
GetSources(world)
@build_steps begin
ChangeDirectory(builddir)
`cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=ON -DCMAKE_INSTALL_PREFIX=$prefix ..`
`make`
`make install`
end
end), world, os = :Unix)
@BinDeps.install Dict(:libworld => :libworld)
|
{"hexsha": "1a4a4ef38e1d71f49ca23012a143348ad0ab31aa", "size": 1271, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "deps/build.jl", "max_stars_repo_name": "giordano/WORLD.jl", "max_stars_repo_head_hexsha": "bf927a5213f76222e4abae19dc3975f659ebdb2f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2015-02-10T22:52:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-23T06:19:31.000Z", "max_issues_repo_path": "deps/build.jl", "max_issues_repo_name": "giordano/WORLD.jl", "max_issues_repo_head_hexsha": "bf927a5213f76222e4abae19dc3975f659ebdb2f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2015-02-11T09:00:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-19T02:33:49.000Z", "max_forks_repo_path": "deps/build.jl", "max_forks_repo_name": "giordano/WORLD.jl", "max_forks_repo_head_hexsha": "bf927a5213f76222e4abae19dc3975f659ebdb2f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-10-18T18:55:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T12:09:18.000Z", "avg_line_length": 30.2619047619, "max_line_length": 110, "alphanum_fraction": 0.6223446105, "num_tokens": 351}
|
/*
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
/**
* \file cgs_lda.cpp
*
* \brief This file contains a GraphLab based implementation of the
* Collapsed Gibbs Sampler (CGS) for the Latent Dirichlet Allocation
* (LDA) model.
*
*
*
* \author Joseph Gonzalez, Diana Hu
*/
#include <vector>
#include <set>
#include <algorithm>
#include "util/atomic.hpp"
#include <boost/math/special_functions/gamma.hpp>
#include <vector>
#include <algorithm>
#include <boost/algorithm/string.hpp>
#include <boost/config/warning_disable.hpp>
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/include/phoenix_core.hpp>
#include <boost/spirit/include/phoenix_operator.hpp>
#include <boost/spirit/include/phoenix_stl.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/input_sequence.hpp>
// Global Types
// ============================================================================
typedef int count_type;
/**
* \brief The factor type is used to store the counts of tokens in
* each topic for words, documents, and assignments.
*
* Atomic counts are used because we violate the abstraction by
* modifying adjacent vertex data on scatter. As a consequence
* multiple threads on the same machine may try to update the same
* vertex data at the same time. The graphlab::atomic type ensures
* that multiple increments are serially consistent.
*/
typedef std::vector< graphchi::atomic<count_type> > factor_type;
/**
* \brief We use the factor type in accumulators and so we define an
* operator+=
*/
inline factor_type& operator+=(factor_type& lvalue,
const factor_type& rvalue) {
if(!rvalue.empty()) {
if(lvalue.empty()) lvalue = rvalue;
else {
for(size_t t = 0; t < lvalue.size(); ++t) lvalue[t] += rvalue[t];
}
}
return lvalue;
} // end of operator +=
/**
* \brief The latent topic id of a token is the smallest reasonable
* type.
*/
typedef uint16_t topic_id_type;
// We require a null topic to represent the topic assignment for
// tokens that have not yet been assigned.
#define NULL_TOPIC (topic_id_type(-1))
#define NTOPICS 20
/**
* \brief The assignment type is used on each edge to store the
* assignments of each token. There can be several occurrences of the
* same word in a given document and so a vector is used to store the
* assignments of each occurrence.
*/
typedef uint16_t assignment_type[NTOPICS];
// Global Variables
// ============================================================================
/**
* \brief The alpha parameter determines the sparsity of topics for
* each document.
*/
double ALPHA = 1;
/**
* \brief the Beta parameter determines the sparsity of words in each
* document.
*/
double BETA = 0.1;
/**
* \brief the total number of topics to uses
*/
/**
* \brief The total number of words in the dataset.
*/
size_t NWORDS = 0;
/**
* \brief The total number of docs in the dataset.
*/
size_t NDOCS = 0;
/**
* \brief The total number of tokens in the corpus
*/
size_t NTOKENS = 0;
/**
* \brief The number of top words to display during execution (from
* each topic).
*/
size_t TOPK = 5;
/**
* \brief The interval to display topics during execution.
*/
size_t INTERVAL = 10;
/**
* \brief The global variable storing the global topic count across
* all machines. This is maintained periodically using aggregation.
*/
factor_type GLOBAL_TOPIC_COUNT;
/**
* \brief A dictionary of words used to print the top words during
* execution.
*/
std::vector<std::string> DICTIONARY;
/**
* \brief The maximum occurences allowed for an individual term-doc
* pair. (edge data)
*/
size_t MAX_COUNT = 100;
/**
* \brief The time to run until the first sample is taken. If less
* than zero then the sampler will run indefinitely.
*/
float BURNIN = -1;
// Graph Types
// ============================================================================
/**
* \brief The vertex data represents each term and document in the
* corpus and contains the counts of tokens in each topic.
*/
struct vertex_data {
///! The total number of updates
uint32_t nupdates;
///! The total number of changes to adjacent tokens
uint32_t nchanges;
///! The count of tokens in each topic
factor_type factor;
vertex_data() : nupdates(0), nchanges(0), factor(NTOPICS) { }
}; // end of vertex_data
/**
* \brief The edge data represents the individual tokens (word,doc)
* pairs and their assignment to topics.
*/
struct edge_data {
///! The number of changes on the last update
uint16_t nchanges;
///! The assignment of all tokens
assignment_type assignment;
edge_data(size_t ntokens = 0) : nchanges(0) {
for(int i=0; i<NTOPICS; i++) assignment[i] = 0;
}
}; // end of edge_data
typedef graphlab::distributed_graph<vertex_data, edge_data> graph_type;
static void parse(edge_data &x, const char * s) {
size_t count = atol(s);
count = std::min(count, MAX_COUNT);
x = (edge_data(count));
}
/**
* \brief Edge data parser used in graph.load_json
*
* Make sure that the edge file list
* has docids from -2 to -(total #docid) and wordids 0 to (total #words -1)
*/
bool eparser(edge_data& ed, const std::string& line){
const int BASE = 10;
char* next_char_ptr = NULL;
size_t count = strtoul(line.c_str(), &next_char_ptr, BASE);
if(next_char_ptr ==NULL) return false;
//threshold count
count = std::min(count, MAX_COUNT);
ed = (edge_data(count));
return true;
}
/**
* \brief Vertex data parser used in graph.load_json
*/
bool vparser(vertex_data& vd, const std::string& line){
vd = vertex_data();
return true;
}
/**
* \brief Determine if the given vertex is a word vertex or a doc
* vertex.
*
* For simplicity we connect docs --> words and therefore if a vertex
* has in edges then it is a word.
*/
inline bool is_word(const graph_type::vertex_type& vertex) {
return vertex.num_in_edges() > 0 ? 1 : 0;
}
/**
* \brief Determine if the given vertex is a doc vertex
*
* For simplicity we connect docs --> words and therefore if a vertex
* has out edges then it is a doc
*/
inline bool is_doc(const graph_type::vertex_type& vertex) {
return vertex.num_out_edges() > 0 ? 1 : 0;
}
/**
* \brief return the number of tokens on a particular edge.
*/
inline size_t count_tokens(const graph_type::edge_type& edge) {
return edge.data().assignment.size();
}
/**
* \brief Get the other vertex in the edge.
*/
inline graph_type::vertex_type
get_other_vertex(const graph_type::edge_type& edge,
const graph_type::vertex_type& vertex) {
return vertex.id() == edge.source().id()? edge.target() : edge.source();
}
// ========================================================
// The Collapsed Gibbs Sampler Function
/**
* \brief The gather type for the collapsed Gibbs sampler is used to
* collect the topic counts on adjacent edges so that the apply
* function can compute the correct topic counts for the center
* vertex.
*
*/
struct gather_type {
factor_type factor;
uint32_t nchanges;
gather_type() : nchanges(0) { };
gather_type(uint32_t nchanges) : factor(NTOPICS), nchanges(nchanges) { };
gather_type& operator+=(const gather_type& other) {
factor += other.factor;
nchanges += other.nchanges;
return *this;
}
}; // end of gather type
/**
* \brief The collapsed Gibbs sampler vertex program updates the topic
* counts for the center vertex and then draws new topic assignments
* for each edge durring the scatter phase.
*
*/
class cgs_lda_vertex_program :
public graphlab::ivertex_program<graph_type, gather_type> {
public:
/**
* \brief At termination we want to disable sampling to allow the
* correct final counts to be computed.
*/
static bool DISABLE_SAMPLING;
/** \brief gather on all edges */
edge_dir_type gather_edges(icontext_type& context,
const vertex_type& vertex) const {
return graphlab::ALL_EDGES;
} // end of gather_edges
/**
* \brief Collect the current topic count on each edge.
*/
gather_type gather(icontext_type& context, const vertex_type& vertex,
edge_type& edge) const {
gather_type ret(edge.data().nchanges);
const assignment_type& assignment = edge.data().assignment;
foreach(topic_id_type asg, assignment) {
if(asg != NULL_TOPIC) ++ret.factor[asg];
}
return ret;
} // end of gather
/**
* \brief Update the topic count for the center vertex. This
* ensures that the center vertex has the correct topic count before
* resampling the topics for each token along each edge.
*/
void apply(icontext_type& context, vertex_type& vertex,
const gather_type& sum) {
const size_t num_neighbors = vertex.num_in_edges() + vertex.num_out_edges();
ASSERT_GT(num_neighbors, 0);
// There should be no new edge data since the vertex program has been cleared
vertex_data& vdata = vertex.data();
ASSERT_EQ(sum.factor.size(), NTOPICS);
ASSERT_EQ(vdata.factor.size(), NTOPICS);
vdata.nupdates++;
vdata.nchanges = sum.nchanges;
vdata.factor = sum.factor;
} // end of apply
/**
* \brief Scatter on all edges if the computation is on-going.
* Computation stops after bunrin or when disable sampling is set to
* true.
*/
edge_dir_type scatter_edges(icontext_type& context,
const vertex_type& vertex) const {
return (DISABLE_SAMPLING || (BURNIN > 0 && context.elapsed_seconds() > BURNIN))?
graphlab::NO_EDGES : graphlab::ALL_EDGES;
}; // end of scatter edges
/**
* \brief Draw new topic assignments for each edge token.
*
* Note that we exploit the GraphLab caching model here by DIRECTLY
* modifying the topic counts of adjacent vertices. Making the
* changes immediately visible to any adjacent vertex programs
* running on the same machine. However, these changes will be
* overwritten during the apply step and are only used to accelerate
* sampling. This is a potentially dangerous violation of the
* abstraction and should be taken with caution. In our case all
* vertex topic counts are preallocated and atomic operations are
* used. In addition during the sampling phase we must be careful
* to guard against potentially negative temporary counts.
*/
void scatter(icontext_type& context, const vertex_type& vertex,
edge_type& edge) const {
factor_type& doc_topic_count = is_doc(edge.source()) ?
edge.source().data().factor : edge.target().data().factor;
factor_type& word_topic_count = is_word(edge.source()) ?
edge.source().data().factor : edge.target().data().factor;
ASSERT_EQ(doc_topic_count.size(), NTOPICS);
ASSERT_EQ(word_topic_count.size(), NTOPICS);
// run the actual gibbs sampling
std::vector<double> prob(NTOPICS);
assignment_type& assignment = edge.data().assignment;
edge.data().nchanges = 0;
foreach(topic_id_type& asg, assignment) {
const topic_id_type old_asg = asg;
if(asg != NULL_TOPIC) { // construct the cavity
--doc_topic_count[asg];
--word_topic_count[asg];
--GLOBAL_TOPIC_COUNT[asg];
}
for(size_t t = 0; t < NTOPICS; ++t) {
const double n_dt =
std::max(count_type(doc_topic_count[t]), count_type(0));
const double n_wt =
std::max(count_type(word_topic_count[t]), count_type(0));
const double n_t =
std::max(count_type(GLOBAL_TOPIC_COUNT[t]), count_type(0));
prob[t] = (ALPHA + n_dt) * (BETA + n_wt) / (BETA * NWORDS + n_t);
}
asg = graphlab::random::multinomial(prob);
// asg = std::max_element(prob.begin(), prob.end()) - prob.begin();
++doc_topic_count[asg];
++word_topic_count[asg];
++GLOBAL_TOPIC_COUNT[asg];
if(asg != old_asg) {
++edge.data().nchanges;
}
} // End of loop over each token
// singla the other vertex
context.signal(get_other_vertex(edge, vertex));
} // end of scatter function
}; // end of cgs_lda_vertex_program
bool cgs_lda_vertex_program::DISABLE_SAMPLING = false;
/**
* \brief The icontext type associated with the cgs_lda_vertex program
* is needed for all aggregators.
*/
typedef cgs_lda_vertex_program::icontext_type icontext_type;
// ========================================================
// Aggregators
/**
* \brief The topk aggregator is used to periodically compute and
* display the topk most common words in each topic.
*
* The number of words is determined by the global variable \ref TOPK
* and the interval is determined by the global variable \ref INTERVAL.
*
*/
class topk_aggregator {
typedef std::pair<float, graphlab::vertex_id_type> cw_pair_type;
private:
std::vector< std::set<cw_pair_type> > top_words;
size_t nchanges, nupdates;
public:
topk_aggregator(size_t nchanges = 0, size_t nupdates = 0) :
nchanges(nchanges), nupdates(nupdates) { }
topk_aggregator& operator+=(const topk_aggregator& other) {
nchanges += other.nchanges;
nupdates += other.nupdates;
if(other.top_words.empty()) return *this;
if(top_words.empty()) top_words.resize(NTOPICS);
for(size_t i = 0; i < top_words.size(); ++i) {
// Merge the topk
top_words[i].insert(other.top_words[i].begin(),
other.top_words[i].end());
// Remove excess elements
while(top_words[i].size() > TOPK)
top_words[i].erase(top_words[i].begin());
}
return *this;
} // end of operator +=
static topk_aggregator map(icontext_type& context,
const graph_type::vertex_type& vertex) {
topk_aggregator ret_value;
const vertex_data& vdata = vertex.data();
ret_value.nchanges = vdata.nchanges;
ret_value.nupdates = vdata.nupdates;
if(is_word(vertex)) {
const graphlab::vertex_id_type wordid = vertex.id();
ret_value.top_words.resize(vdata.factor.size());
for(size_t i = 0; i < vdata.factor.size(); ++i) {
const cw_pair_type pair(vdata.factor[i], wordid);
ret_value.top_words[i].insert(pair);
}
}
return ret_value;
} // end of map function
static void finalize(icontext_type& context,
const topk_aggregator& total) {
if(context.procid() != 0) return;
for(size_t i = 0; i < total.top_words.size(); ++i) {
std::cout << "Topic " << i << ": ";
rev_foreach(cw_pair_type pair, total.top_words[i]) {
std::cout << DICTIONARY[pair.second]
<< "(" << pair.first << ")" << ", ";
}
std::cout << std::endl;
}
std::cout << "\nNumber of token changes: " << total.nchanges << std::endl;
std::cout << "\nNumber of updates: " << total.nupdates << std::endl;
} // end of finalize
}; // end of topk_aggregator struct
/**
* \brief The global counts aggregator computes the total number of
* tokens in each topic across all words and documents and then
* updates the \ref GLOBAL_TOPIC_COUNT variable.
*
*/
struct global_counts_aggregator {
typedef graph_type::vertex_type vertex_type;
static factor_type map(icontext_type& context, const vertex_type& vertex) {
return vertex.data().factor;
} // end of map function
static void finalize(icontext_type& context, const factor_type& total) {
size_t sum = 0;
for(size_t t = 0; t < total.size(); ++t) {
GLOBAL_TOPIC_COUNT[t] =
std::max(count_type(total[t]/2), count_type(0));
sum += GLOBAL_TOPIC_COUNT[t];
}
context.cout() << "Total Tokens: " << sum << std::endl;
} // end of finalize
}; // end of global_counts_aggregator struct
/**
* \brief The Likelihood aggregators maintains the current estimate of
* the log-likelihood of the current token assignments.
*
* llik_words_given_topics = ...
* ntopics * (gammaln(nwords * beta) - nwords * gammaln(beta)) - ...
* sum_t(gammaln( n_t + nwords * beta)) +
* sum_w(sum_t(gammaln(n_wt + beta)));
*
* llik_topics = ...
* ndocs * (gammaln(ntopics * alpha) - ntopics * gammaln(alpha)) + ...
* sum_d(sum_t(gammaln(n_td + alpha)) - gammaln(sum_t(n_td) + ntopics * alpha));
*/
class likelihood_aggregator : public graphlab::IS_POD_TYPE {
typedef graph_type::vertex_type vertex_type;
double lik_words_given_topics;
double lik_topics;
public:
likelihood_aggregator() : lik_words_given_topics(0), lik_topics(0) { }
likelihood_aggregator& operator+=(const likelihood_aggregator& other) {
lik_words_given_topics += other.lik_words_given_topics;
lik_topics += other.lik_topics;
return *this;
} // end of operator +=
static likelihood_aggregator
map(icontext_type& context, const vertex_type& vertex) {
using boost::math::lgamma;
const factor_type& factor = vertex.data().factor;
ASSERT_EQ(factor.size(), NTOPICS);
likelihood_aggregator ret;
if(is_word(vertex)) {
for(size_t t = 0; t < NTOPICS; ++t) {
const double value = std::max(count_type(factor[t]), count_type(0));
ret.lik_words_given_topics += lgamma(value + BETA);
}
} else { ASSERT_TRUE(is_doc(vertex));
double ntokens_in_doc = 0;
for(size_t t = 0; t < NTOPICS; ++t) {
const double value = std::max(count_type(factor[t]), count_type(0));
ret.lik_topics += lgamma(value + ALPHA);
ntokens_in_doc += factor[t];
}
ret.lik_topics -= lgamma(ntokens_in_doc + NTOPICS * ALPHA);
}
return ret;
} // end of map function
static void finalize(icontext_type& context, const likelihood_aggregator& total) {
using boost::math::lgamma;
// Address the global sum terms
double denominator = 0;
for(size_t t = 0; t < NTOPICS; ++t) {
denominator += lgamma(GLOBAL_TOPIC_COUNT[t] + NWORDS * BETA);
} // end of for loop
const double lik_words_given_topics =
NTOPICS * (lgamma(NWORDS * BETA) - NWORDS * lgamma(BETA)) -
denominator + total.lik_words_given_topics;
const double lik_topics =
NDOCS * (lgamma(NTOPICS * ALPHA) - NTOPICS * lgamma(ALPHA)) +
total.lik_topics;
const double lik = lik_words_given_topics + lik_topics;
context.cout() << "Likelihood: " << lik << std::endl;
} // end of finalize
}; // end of likelihood_aggregator struct
/**
* \brief The selective signal functions are used to signal only the
* vertices corresponding to words or documents. This is done by
* using the iengine::map_reduce_vertices function.
*/
struct signal_only {
/**
* \brief Signal only the document vertices and skip the word
* vertices.
*/
static graphlab::empty
docs(icontext_type& context, const graph_type::vertex_type& vertex) {
if(is_doc(vertex)) context.signal(vertex);
return graphlab::empty();
} // end of signal_docs
/**
* \brief Signal only the word vertices and skip the document
* vertices.
*/
static graphlab::empty
words(icontext_type& context, const graph_type::vertex_type& vertex) {
if(is_word(vertex)) context.signal(vertex);
return graphlab::empty();
} // end of signal_words
}; // end of selective_only
/**
* \brief Load the dictionary global variable from the file containing
* the terms (one term per line).
*
* Note that while graphs can be loaded from multiple files the
* dictionary must be in a single file. The dictionary is loaded
* entirely into memory and used to display word clouds and the top
* terms in each topic.
*
* \param [in] fname the file containing the dictionary data. The
* data can be located on HDFS and can also be gzipped (must end in
* ".gz").
*
*/
bool load_dictionary(const std::string& fname) {
// std::cout << "staring load on: "
// << graphlab::get_local_ip_as_str() << std::endl;
const bool gzip = boost::ends_with(fname, ".gz");
// test to see if the graph_dir is an hadoop path
std::cout << "opening: " << fname << std::endl;
std::ifstream in_file(fname.c_str(),
std::ios_base::in | std::ios_base::binary);
boost::iostreams::filtering_stream<boost::iostreams::input> fin;
fin.push(in_file);
if(!fin.good() || !fin.good()) {
logstream(LOG_ERROR) << "Error loading dictionary: "
<< fname << std::endl;
return false;
}
std::string term;
std::cout << "Loooping" << std::endl;
while(std::getline(fin, term).good()) DICTIONARY.push_back(term);
fin.pop();
in_file.close();
// std::cout << "Finished load on: "
// << graphlab::get_local_ip_as_str() << std::endl;
std::cout << "Dictionary Size: " << DICTIONARY.size() << std::endl;
return true;
} // end of load dictionary
struct count_saver {
bool save_words;
count_saver(bool save_words) : save_words(save_words) { }
typedef graph_type::vertex_type vertex_type;
typedef graph_type::edge_type edge_type;
std::string save_vertex(const vertex_type& vertex) const {
// Skip saving vertex data if the vertex type is not consistent
// with the save type
if((save_words && is_doc(vertex)) ||
(!save_words && is_word(vertex))) return "";
// Proceed to save
std::stringstream strm;
if(save_words) {
const graphlab::vertex_id_type vid = vertex.id();
strm << vid << '\t';
} else { // save documents
const graphlab::vertex_id_type vid = (-vertex.id()) - 2;
strm << vid << '\t';
}
const factor_type& factor = vertex.data().factor;
for(size_t i = 0; i < factor.size(); ++i) {
strm << factor[i];
if(i+1 < factor.size()) strm << '\t';
}
strm << '\n';
return strm.str();
}
std::string save_edge(const edge_type& edge) const {
return ""; //nop
}
}; // end of prediction_saver
|
{"hexsha": "b82ed5dd370aa2360539f0b427444a32480c20e1", "size": 22556, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "graphlab_toolkit_ports/lda/cgs_lda_vertexprogram.hpp", "max_stars_repo_name": "libingzheren/GraphChi", "max_stars_repo_head_hexsha": "cd960212ebc171bacbd3169e4c9bcd44e680aadb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9.0, "max_stars_repo_stars_event_min_datetime": "2020-09-02T08:49:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T06:51:44.000Z", "max_issues_repo_path": "graphlab_toolkit_ports/lda/cgs_lda_vertexprogram.hpp", "max_issues_repo_name": "mrgloom/graphchi-cpp", "max_issues_repo_head_hexsha": "7dfaa37a9da8f5d8901fc95435f98fb8844c7217", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphlab_toolkit_ports/lda/cgs_lda_vertexprogram.hpp", "max_forks_repo_name": "mrgloom/graphchi-cpp", "max_forks_repo_head_hexsha": "7dfaa37a9da8f5d8901fc95435f98fb8844c7217", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2021-01-02T06:17:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T18:54:05.000Z", "avg_line_length": 29.6399474376, "max_line_length": 85, "alphanum_fraction": 0.6575190637, "num_tokens": 5598}
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import models, layers
import matplotlib.pyplot as plt
from PIL import Image
image_data = np.load('../preprocessing/ImageData.npy')
labels = np.load('../preprocessing/labels.npy')
flipped_image_data = np.load('../imageAugmentation/flipped_ImageData.npy')
flipped_labels = np.load('../imageAugmentation/flipped_labels.npy')
mirrored_image_data = np.load('../imageAugmentation/mirrored_ImageData.npy')
mirrored_labels = np.load('../imageAugmentation/mirrored_labels.npy')
blurred_image_data = np.load('../imageAugmentation/blurred_ImageData.npy')
blurred_labels = np.load('../imageAugmentation/blurred_labels.npy')
train_test_split = 0.8 #training percentage
image_data_train = image_data[:int(image_data.shape[0]*(train_test_split))]
image_data_test = image_data[int(image_data.shape[0]*train_test_split):]
labels_train = labels[:int(labels.shape[0]*(train_test_split))]
labels_test = labels[int(labels.shape[0]*train_test_split):]
flipped_image_data_train = flipped_image_data[:int(flipped_image_data.shape[0]*(train_test_split))]
flipped_image_data_test = flipped_image_data[int(flipped_image_data.shape[0]*train_test_split):]
flipped_labels_train = flipped_labels[:int(flipped_labels.shape[0]*(train_test_split))]
flipped_labels_test = flipped_labels[int(flipped_labels.shape[0]*train_test_split):]
mirrored_image_data_train = mirrored_image_data[:int(mirrored_image_data.shape[0]*(train_test_split))]
mirrored_image_data_test = mirrored_image_data[int(mirrored_image_data.shape[0]*train_test_split):]
mirrored_labels_train = mirrored_labels[:int(mirrored_labels.shape[0]*(train_test_split))]
mirrored_labels_test = mirrored_labels[int(mirrored_labels.shape[0]*train_test_split):]
blurred_image_data_train = blurred_image_data[:int(blurred_image_data.shape[0]*(train_test_split))]
blurred_image_data_test = blurred_image_data[int(blurred_image_data.shape[0]*train_test_split):]
blurred_labels_train = blurred_labels[:int(blurred_labels.shape[0]*(train_test_split))]
blurred_labels_test = blurred_labels[int(blurred_labels.shape[0]*train_test_split):]
X_train = np.concatenate((image_data_train, flipped_image_data_train, mirrored_image_data_train, blurred_image_data_train), axis=0)
X_test = np.concatenate((image_data_test, flipped_image_data_test, mirrored_image_data_test, blurred_image_data_test), axis=0)
# for i in range(5):
# img = Image.fromarray((X_train[i+250] * 255).astype(np.uint8), mode='RGB')
# img.show()
# print(X_train.shape)
y_train = np.concatenate((labels_train, flipped_labels_train, mirrored_labels_train, blurred_labels_train), axis=0)
y_test = np.concatenate((labels_test, flipped_labels_test, mirrored_labels_test, blurred_labels_test), axis=0)
# print(y_train.shape)
model = models.Sequential()
model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(128, 128, 3)))
model.add(layers.MaxPool2D(2,2))
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPool2D(2,2))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.MaxPool2D(2,2))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(2, activation='softmax'))
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
training_history = model.fit(X_train, y_train, epochs=20, validation_data=(X_test, y_test))
predictions = model.predict(X_test)
error = []
for i in range(predictions.shape[0]):
if y_test[i] == 1:
error.append(1 - predictions[i][1])
else:
error.append(1 - predictions[i][0])
# for i in range(len(error)):
# if error[i] > 0.5:
# array = X_test[i]
# img = Image.fromarray(array, 'RGB')
# img.show()
results = model.evaluate(X_test, y_test)
print('\nTesting Accuracy: {:.2f}%'.format(results[1]*100))
plt.plot(training_history.history['accuracy'], label='accuracy')
plt.plot(training_history.history['val_accuracy'], label='val_accuracy')
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(loc='lower right')
plt.show()
|
{"hexsha": "7f7808a02cb3d2170930b276415df134f6089679", "size": 4277, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/convNN.py", "max_stars_repo_name": "PranavPusarla/intoxication-identifier", "max_stars_repo_head_hexsha": "72091955440791e23673267ab0cc9e0eeb714cbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/convNN.py", "max_issues_repo_name": "PranavPusarla/intoxication-identifier", "max_issues_repo_head_hexsha": "72091955440791e23673267ab0cc9e0eeb714cbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/convNN.py", "max_forks_repo_name": "PranavPusarla/intoxication-identifier", "max_forks_repo_head_hexsha": "72091955440791e23673267ab0cc9e0eeb714cbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.5301204819, "max_line_length": 131, "alphanum_fraction": 0.7720364742, "include": true, "reason": "import numpy", "num_tokens": 1047}
|
# -*- coding: utf-8 -*-
# Portions Copyright 2021 Huawei Technologies Co., Ltd
# Portions Copyright 2017 The OpenFermion Developers.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""This module is generated the Fermion Operator."""
import ast
import json
from functools import lru_cache
import numpy as np
from scipy.sparse import csr_matrix, kron
from mindquantum.core.parameterresolver import ParameterResolver
from mindquantum.utils.type_value_check import _check_input_type, _check_int_type
from ._base_operator import _Operator
@lru_cache()
def _n_sz(n):
if n == 0:
return csr_matrix(np.array([1]), dtype=np.complex128)
tmp = [csr_matrix(np.array([[1, 0], [0, -1]], dtype=np.complex128)) for _ in range(n)]
for i in tmp[1:]:
tmp[0] = kron(tmp[0], i)
return tmp[0]
@lru_cache()
def _n_identity(n):
"""N_identity."""
if n == 0:
return csr_matrix(np.array([1]), dtype=np.complex128)
tmp = [csr_matrix(np.array([[1, 0], [0, 1]], dtype=np.complex128)) for _ in range(n)]
for i in tmp[1:]:
tmp[0] = kron(tmp[0], i)
return tmp[0]
@lru_cache()
def _single_fermion_word(idx, dag, n_qubits):
"""Single_fermion_word."""
m = csr_matrix(np.array([[0, 1], [0, 0]], dtype=np.complex128))
if dag:
m = csr_matrix(np.array([[0, 0], [1, 0]], dtype=np.complex128))
return kron(_n_identity(n_qubits - 1 - idx), kron(m, _n_sz(idx)))
@lru_cache()
def _two_fermion_word(idx1, dag1, idx2, dag2, n_qubits):
"""Two_fermion_word."""
return _single_fermion_word(idx1, dag1, n_qubits) * _single_fermion_word(idx2, dag2, n_qubits)
def _check_valid_fermion_operator_term(term):
"""Check valid fermion operator term."""
if term is not None and term != '':
if not isinstance(term, (str, tuple)):
raise ValueError('Fermion operator requires a string or a tuple, but get {}'.format(type(term)))
if isinstance(term, str):
terms = term.split(' ')
for t in terms:
if (t.endswith('^') and not t[:-1].isdigit()) or (not t.endswith('^') and not t.isdigit()):
if t:
raise ValueError('Invalid fermion operator term {}'.format(t))
if isinstance(term, tuple):
for t in term:
if (
len(t) != 2
or not isinstance(t[0], int)
or not isinstance(t[1], int)
or t[0] < 0
or t[1] not in [0, 1]
):
raise ValueError('Invalid fermion operator term {}'.format(t))
class FermionOperator(_Operator):
r"""
Definition of a Fermion Operator.
The Fermion Operator such as FermionOperator(' 4^ 3 9 3^ ') are used to represent :math:`a_4^\dagger a_3 a_9
a_3^\dagger`.
These are the Basic Operators to describe a fermionic system, such as a Molecular system.
The FermionOperator are follows the anti-commutation relationship.
Args:
terms (str): The input term of fermion operator. Default: None.
coefficient (Union[numbers.Number, str, ParameterResolver]): The coefficient for the corresponding single
operators Default: 1.0.
Examples:
>>> from mindquantum.core.operators import FermionOperator
>>> a_p_dagger = FermionOperator('1^')
>>> a_p_dagger
1.0 [1^]
>>> a_q = FermionOperator('0')
>>> a_q
1.0 [0]
>>> zero = FermionOperator()
>>> zero
0
>>> identity= FermionOperator('')
>>> identity
1.0 []
>>> para_op = FermionOperator('0 1^', 'x')
>>> para_op
x [0 1^]
>>> para_dt = {'x':2}
>>> op = para_op.subs(para_dt)
>>> op
2 [0 1^]
"""
__hash__ = None
def __init__(self, term=None, coefficient=1.0):
"""Initialize a FermionOperator object."""
super(FermionOperator, self).__init__(term, coefficient)
_check_valid_fermion_operator_term(term)
self.operators = {1: '^', 0: '', '^': '^', '': ''}
self.gates_number = 0
self.qubit_type = False
if term is not None:
if term == '':
term = self._parse_term(())
else:
term = self._parse_term(term)
self.terms[term] = self.coefficient
def _simplify(self, terms, coefficient=1.0):
"""Simplify a term."""
return coefficient, tuple(terms)
def _parse_string(self, terms_string):
"""
Parse a term given as a string type.
e.g. For FermionOperator:
4^ 3 -> ((4, 1),(3, 0))
Note here the '1' and '0' in the second col represents creation and annihilaiton operator respectively
Returns:
tuple, return a tuple list, such as ((4, 1),(3, 0))
Raises:
'1.5 4^ 3' is not the proper format and
could raise TypeError.
"""
def map_operator_to_integer_rep(operator):
"""Map operator to integer."""
return 1 if operator == '^' else 0
terms = terms_string.split()
terms_to_tuple = []
for sub_term in terms:
index = int(sub_term[0])
operator = sub_term[1:]
# Handle such cases: 10^, 100^, ...
if len(sub_term) >= 2:
if '^' in sub_term:
operator = '^'
index = int(sub_term[: sub_term.index(operator)])
else:
operator = ''
index = int(sub_term)
if operator not in self.operators:
raise ValueError(
'Invalid type of operator {}.'
'The Fermion operator should be one of this {}'.format(operator, self.operators)
)
if index < 0:
raise ValueError(
"Invalid index {}.The qubit index should be\
non negative integer".format(
self.operators
)
)
terms_to_tuple.append((index, map_operator_to_integer_rep(operator)))
# check the commutate terms with same index in the list and
# replace it with the corresponding commutation relationship
return tuple(terms_to_tuple)
def to_openfermion(self):
"""Convert fermion operator to openfermion format."""
from openfermion import FermionOperator as OFFermionOperator
terms = {}
for k, v in self.terms.items():
if not v.is_const():
raise ValueError("Cannot convert parameteized fermion operator to openfermion format")
terms[k] = v.const
of = OFFermionOperator()
of.terms = terms
return of
def __str__(self):
"""Return an easy-to-read string representation of the FermionOperator."""
if not self.terms:
return '0'
string_rep = ''
term_cnt = 0
for term, coeff in sorted(self.terms.items()):
term_cnt += 1
if isinstance(coeff, ParameterResolver):
tmp_string = '{} ['.format(coeff.expression()) # begin of the '['
else:
tmp_string = '{} ['.format(coeff) # begin of the '['
# deal with this situation (1,'X') or [1, 'X']
if term == ():
if self.size == 1:
tmp_string.join(' ]')
else:
pass
elif isinstance(term[0], int):
index, operator = term
if operator in self.operators:
tmp_string += '{}{} '.format(index, self.operators[operator])
else:
for sub_term in term:
index, operator = sub_term
# check validity, if checked before,
# then we can take away this step
if operator in self.operators:
tmp_string += '{}{} '.format(index, self.operators[operator])
if term_cnt < len(self.terms):
string_rep += '{}] +\n'.format(tmp_string.strip()) # end of the ']'
else:
string_rep += '{}] '.format(tmp_string.strip()) # end of the ']'
return string_rep
def __repr__(self):
"""Return a string representation of the object."""
return str(self)
def matrix(self, n_qubits=None):
"""
Convert this fermion operator to csr_matrix under jordan_wigner mapping.
Args:
n_qubits (int): The total qubit of final matrix. If None, the value will be
the maximum local qubit number. Default: None.
"""
from mindquantum.core.operators.utils import count_qubits
if not self.terms:
raise ValueError("Cannot convert empty fermion operator to matrix")
n_qubits_local = count_qubits(self)
if n_qubits_local == 0 and n_qubits is None:
raise ValueError("You should specific n_qubits for converting a identity fermion operator.")
if n_qubits is None:
n_qubits = n_qubits_local
_check_int_type("n_qubits", n_qubits)
if n_qubits < n_qubits_local:
raise ValueError(
f"Given n_qubits {n_qubits} is small than qubit of fermion operator, which is {n_qubits_local}."
)
out = 0
for term, coeff in self.terms.items():
if not coeff.is_const():
raise RuntimeError("Cannot convert a parameterized fermion operator to matrix.")
coeff = coeff.const
if not term:
out += csr_matrix(np.identity(2**n_qubits, dtype=np.complex128)) * coeff
else:
tmp = 1
group = [[]]
for idx, dag in term:
if len(group[-1]) < 4:
group[-1].append(idx)
group[-1].append(dag)
if len(group[-1]) == 4:
group.append([])
for g in group:
if g:
if len(g) == 4:
tmp *= _two_fermion_word(g[0], g[1], g[2], g[3], n_qubits)
else:
tmp *= _single_fermion_word(g[0], g[1], n_qubits)
out += tmp * coeff
return out
@property
def imag(self):
"""
Convert the coeff to its imag part.
Returns:
FermionOperator, the imag part of this fermion operator.
Examples:
>>> from mindquantum.core.operators import FermionOperator
>>> f = FermionOperator('0', 1 + 2j) + FermionOperator('0^', 'a')
>>> f.imag.compress()
2.0 [0]
"""
out = FermionOperator()
for k, v in self.terms.items():
out.terms[k] = v.imag
return out
@property
def real(self):
"""
Convert the coeff to its real part.
Returns:
FermionOperator, the real part of this fermion operator.
Examples:
>>> from mindquantum.core.operators import FermionOperator
>>> f = FermionOperator('0', 1 + 2j) + FermionOperator('0^', 'a')
>>> f.real.compress()
1.0 [0] +
a [0^]
"""
out = FermionOperator()
for k, v in self.terms.items():
out.terms[k] = v.real
return out
def normal_ordered(self):
"""
Return the normal ordered form of the Fermion Operator.
Returns:
FermionOperator, the normal ordered FermionOperator.
Examples:
>>> from mindquantum.core.operators import FermionOperator
>>> origin = FermionOperator('0 1^')
>>> origin
1.0 [0 1^]
>>> origin.normal_ordered()
-1.0 [1^ 0]
"""
ordered_op = self.__class__()
for term, coeff in self.terms.items():
ordered_op += _normal_ordered_term(term, coeff)
return ordered_op
def dumps(self, indent=4):
r"""
Dump FermionOperator into JSON(JavaScript Object Notation).
Args:
indent (int): Then JSON array elements and object members will be
pretty-printed with that indent level. Default: 4.
Returns:
JSON (str), the JSON strings of FermionOperator
Examples:
>>> from mindquantum.core.operators import FermionOperator
>>> f = FermionOperator('0', 1 + 2j) + FermionOperator('0^', 'a')
>>> print(f.dumps())
{
"((0, 0),)": "(1+2j)",
"((0, 1),)": "{"a": 1, "__class__": "ParameterResolver", "__module__": \
"parameterresolver.parameterresolver", "no_grad_parameters": []}",
"__class__": "FermionOperator",
"__module__": "operators.fermion_operator"
}
"""
if indent is not None:
_check_int_type('indent', indent)
d = self.terms
# Convert key type from tuple to str
key_list = list(d.keys())
for i, k in enumerate(key_list):
if isinstance(k, tuple):
key_list[i] = k.__str__()
# Convert value type from complex/PR into str
value_list = list(d.values())
for j, v in enumerate(value_list):
if isinstance(v, (complex, int, float)):
value_list[j] = str(v)
elif isinstance(v, ParameterResolver):
value_list[j] = v.dumps(None)
else:
raise ValueError(
"Coefficient must be a complex/int/float type or a ParameterResolver, \
but get {}.".format(
type(v)
)
)
dic = dict(zip(key_list, value_list))
dic['__class__'] = self.__class__.__name__
dic['__module__'] = self.__module__
return json.dumps(dic, indent=indent)
@staticmethod
def loads(strs):
"""
Load JSON(JavaScript Object Notation) into FermionOperator.
Args:
strs (str): The dumped fermion operator string.
Returns:
FermionOperator, the FermionOperator load from strings
Examples:
>>> from mindquantum.core.operators import FermionOperator
>>> strings == '{"((0, 0),)": "(1+2j)", "((0, 1),)": {"a": 1}, \
"__class__": "FermionOperator", "__module__": "__main__"}'
>>> obj = FermionOperator.loads(strings)
>>> print(obj)
(1+2j) [0] + a [0^]
"""
_check_input_type('strs', str, strs)
dic = json.loads(strs)
if '__class__' in dic:
class_name = dic.pop('__class__')
if class_name == 'FermionOperator':
module_name = dic.pop('__module__')
module = __import__(module_name)
class_ = getattr(module, class_name)
# Convert key type from str into tuple
key_list = list(dic.keys())
for i, k in enumerate(key_list):
key_list[i] = tuple(ast.literal_eval(k))
# Convert value type from str into ParameterResolver/complex
value_list = list(dic.values())
for j, v in enumerate(value_list):
if isinstance(v, str):
if '__class__' in v:
value_list[j] = ParameterResolver.loads(v)
else:
value_list[j] = complex(v)
terms = dict(zip(key_list, value_list))
f_op = FermionOperator()
for key, value in terms.items():
f_op += class_(key, value)
else:
raise TypeError("Require a FermionOperator class, but get {} class".format(class_name))
else:
raise ValueError("Expect a '__class__' in strings, but not found")
return f_op
def _normal_ordered_term(term, coefficient):
r"""
Return the normal ordered term of the FermionOperator with high index and creation operator in front.
eg. :math:`a_3\dagger a_2\dagger a_1 a_0`
"""
term = list(term)
ordered_term = FermionOperator()
for i in range(1, len(term)):
for j in range(i, 0, -1):
left_sub_term = term[j - 1]
right_sub_term = term[j]
# Swap operators if left operator is a and right operator is
# a^\dagger
if not left_sub_term[1] and right_sub_term[1]:
term[j], term[j - 1] = left_sub_term, right_sub_term
coefficient = -1 * coefficient
# If indice are same, employ the anti-commutation relationship
# And generate the new term
if left_sub_term[0] == right_sub_term[0]:
new_term = term[: (j - 1)] + term[(j + 1) :] # noqa: E203
ordered_term += _normal_ordered_term(new_term, -1 * coefficient)
elif left_sub_term[1] == right_sub_term[1]:
# If indice are same,evaluate it to zero.
if left_sub_term[0] == right_sub_term[0]:
return ordered_term
# Swap them if same operator but lower index on left
if left_sub_term[0] < right_sub_term[0]:
term[j], term[j - 1] = left_sub_term, right_sub_term
coefficient = -1 * coefficient
# Add the term and return.
ordered_term += FermionOperator(_fermion_tuple_to_string(tuple(term)), coefficient)
return ordered_term
def _fermion_tuple_to_string(term):
s = []
for i in term:
if i[1] == 1:
s.append('{}^'.format(i[0]))
else:
s.append(str(i[0]))
return ' '.join(s)
|
{"hexsha": "9b8e00dc7b8db30dc6a71c87c54fdf77db1c7f01", "size": 18707, "ext": "py", "lang": "Python", "max_stars_repo_path": "mindquantum/core/operators/fermion_operator.py", "max_stars_repo_name": "Takishima/mindquantum", "max_stars_repo_head_hexsha": "e90dfe474b759023d7ae18281b9a87cb8d223d04", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mindquantum/core/operators/fermion_operator.py", "max_issues_repo_name": "Takishima/mindquantum", "max_issues_repo_head_hexsha": "e90dfe474b759023d7ae18281b9a87cb8d223d04", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mindquantum/core/operators/fermion_operator.py", "max_forks_repo_name": "Takishima/mindquantum", "max_forks_repo_head_hexsha": "e90dfe474b759023d7ae18281b9a87cb8d223d04", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.837164751, "max_line_length": 113, "alphanum_fraction": 0.5383546266, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4296}
|
from omnibelt import unspecified_argument
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import timm
# from ..framework import util
from ..framework import Extractor, Rooted, Device
from . import spaces
# class Extractor(nn.Module):
# def get_encoder_fingerprint(self):
# raise NotImplementedError
#
#
# def encode(self, x):
# return self(x)
#
#
# def extract(self, x):
# return self.encode(x)
def get_inceptionV3(): # TODO: enable different block indices
return Timm_Extractor('inceptionv3')
class Timm_Extractor(Rooted, Device, Extractor, nn.Module):
def __init__(self, model_name=None, pool_features=True, din=None,
auto_resize=unspecified_argument, resize_mode='bilinear',
pretrained=True, checkpoint_path='', global_pool='avg', drop_rate=0.,
create_kwargs=None, **kwargs):
super().__init__(din=din, **kwargs)
self.model_name = model_name
if create_kwargs is None:
create_kwargs = {}
create_kwargs['model_name'] = model_name
create_kwargs['pretrained'] = create_kwargs.get('pretrained', pretrained)
create_kwargs['global_pool'] = create_kwargs.get('global_pool', global_pool)
create_kwargs['drop_rate'] = create_kwargs.get('drop_rate', drop_rate)
self._create_kwargs = create_kwargs
create_kwargs['checkpoint_path'] = checkpoint_path
model, pool = None, None
if model_name is not None:
model = timm.create_model(**create_kwargs)
pool = self._find_global_pool(model) if pool_features else None
self.model = model
self.pool = pool
if auto_resize is unspecified_argument and din is None:
auto_resize = self._infer_auto_resize_inputs(model_name)
if auto_resize is not None and isinstance(auto_resize, int):
auto_resize = auto_resize, auto_resize
self.auto_resize_input = auto_resize
self.auto_resize_mode = resize_mode
self._fix_channels = self.auto_resize_input is None
self.din, self.dout = self._infer_dim(din)
self.to(self.device)
def _infer_auto_resize_inputs(self, model_name):
return {'inception_v3': 299, }.get(model_name, 128)
def _to(self, device, **kwargs):
return super(Device, self).to(device)
def extract(self, observation):
return self(observation)
def get_extractor_key(self):
return self._create_kwargs.copy()
@classmethod
def _find_global_pool(cls, model):
for n, c in model.named_children():
if n == 'global_pool':
return c
else:
out = cls._find_global_pool(c)
if out is not None:
return out
def _infer_dim(self, din=None):
dout = None
Cout = self.model.num_features
if din is not None:
Cin, Hin, Win = din.shape if isinstance(din, spaces.Dim) else din
if Cin != 3:
self._fix_channels = True
factor = self.model.feature_info[-1]['reduction']
if Hin is not None:
Hout = int(np.ceil(Hin/factor))
if Win is not None:
Wout = int(np.ceil(Win/factor))
din = din if isinstance(din, spaces.Image) else spaces.Image(Cin, Hin, Win)
dout = spaces.Image(Cout, Hout, Wout)
if self.pool:
dout = spaces.Unbound(shape=(Cout,))
return din, dout
def get_classifier(self):
return self.model.get_classifier()
def forward(self, x):
device = x.device
x = x.to(self.device)
if self._fix_channels and x.shape[1] != 3:
if x.shape[1] == 1:
x = torch.cat([x]*3, 1)
else:
assert x.shape[1] > 3, f'bad shape: {x.shape}'
x = x[:, :3]
if self.auto_resize_input is not None and x.shape[-2:] != self.auto_resize_input:
x = F.interpolate(x, self.auto_resize_input, mode=self.auto_resize_mode)
f = self.model.forward_features(x)
if self.pool is not None:
f = self.pool(f)
f = f.view(f.size(0), -1)
return f.to(device)
|
{"hexsha": "63516503c11ae6150fd9a1020598695ab68c7fbf", "size": 3717, "ext": "py", "lang": "Python", "max_stars_repo_path": "plethora/framework/extractors.py", "max_stars_repo_name": "felixludos/plethora", "max_stars_repo_head_hexsha": "ceaf13065a182923ef2721d3060a39f42bbea594", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plethora/framework/extractors.py", "max_issues_repo_name": "felixludos/plethora", "max_issues_repo_head_hexsha": "ceaf13065a182923ef2721d3060a39f42bbea594", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plethora/framework/extractors.py", "max_forks_repo_name": "felixludos/plethora", "max_forks_repo_head_hexsha": "ceaf13065a182923ef2721d3060a39f42bbea594", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9347826087, "max_line_length": 83, "alphanum_fraction": 0.7075598601, "include": true, "reason": "import numpy", "num_tokens": 985}
|
import os
from others.amdegroot.data.coco import COCO_ROOT, COCODetection
from others.amdegroot.data.voc0712 import VOC_ROOT, VOCDetection
from others.amdegroot.data.uad import UAD_ROOT, UADDetection
from others.amdegroot.utils.augmentations import SSDAugmentation
from others.amdegroot.data.config import *
from loaders.uadetrac_loader import UADetracLoader
from logger import Logger
import numpy as np
available_datasets = {'COCO': COCO_ROOT
, 'VOC': VOC_ROOT
, 'UAD': UAD_ROOT
, 'JNET': UAD_ROOT}
logger = Logger()
def create_dataset2():
"""
Concat the original images with output of the JNET to train the network
:return:
"""
logger.info("We are loading UADetrac!!")
cfg = uad
dataset = UADDetection(transform=SSDAugmentation(cfg['min_dim'], MEANS))
loader = UADetracLoader()
images = loader.load_cached_images(name='uad_train_images.npy', vi_name='uad_train_vi.npy')
boxes = loader.load_cached_boxes(name='uad_train_boxes.npy')
labels = loader.load_cached_labels(name='uad_train_labels.npy')
labels = labels['vehicle']
images, labels, boxes = loader.filter_input3(images, labels, boxes)
images2 = loader.load_cached_images(name='jnet_train-200-300.npy', vi_name = 'uad_train_vi.npy')
boxes2 = loader.load_cached_boxes(name = 'uad_train_boxes.npy')
labels2 = loader.load_cached_labels(name = 'uad_train_labels.npy')
labels2 = labels2['vehicle']
images2, labels2, boxes2 = loader.filter_input3(images2, labels2, boxes2)
final_images = np.concatenate((images, images2), axis = 0)
final_boxes = np.concatenate((boxes, boxes2), axis = 0)
final_labels = np.concatenate((labels, labels2), axis = 0)
logger.info(f"original: {images.shape}, {len(labels)}, {len(boxes)}")
logger.info(f"final: {final_images.shape}, {final_boxes.shape}, {final_labels.shape}")
dataset.set_images(images)
dataset.set_labels(labels)
dataset.set_boxes(boxes)
return dataset, cfg
def create_dataset(dataset_name, is_train=None, cache_name = None):
if dataset_name not in available_datasets.keys():
logger.error(f"dataset: {dataset_name} not in {available_datasets.keys()}")
if (dataset_name is 'UAD' or dataset_name is 'JNET') and (is_train is None):
logger.error(f"Must specify training or testing for UAD and JNET!")
if dataset_name == 'COCO':
if not os.path.exists(COCO_ROOT):
logger.error('Must specify dataset_root if specifying dataset')
cfg = coco
dataset = COCODetection(root=COCO_ROOT,
transform=SSDAugmentation(cfg['min_dim'],
MEANS))
elif dataset_name == 'VOC':
cfg = voc
dataset = VOCDetection(root=VOC_ROOT,
transform=SSDAugmentation(cfg['min_dim'],
MEANS))
elif dataset_name == 'UAD' and is_train:
logger.info("We are loading UADetrac!!")
cfg = uad
dataset = UADDetection(transform=SSDAugmentation(cfg['min_dim'], MEANS))
loader = UADetracLoader()
images = loader.load_cached_images(name='uad_train_images.npy', vi_name = 'uad_train_vi.npy' )
boxes = loader.load_cached_boxes(name = 'uad_train_boxes.npy')
labels = loader.load_cached_labels(name = 'uad_train_labels.npy')
labels = labels['vehicle']
images, labels, boxes = loader.filter_input3(images, labels, boxes)
dataset.set_images(images)
dataset.set_labels(labels)
dataset.set_boxes(boxes)
elif dataset_name == 'UAD' and not is_train:
logger.info("We are loading UADetrac!!")
cfg = uad
dataset = UADDetection(transform=SSDAugmentation(cfg['min_dim'], MEANS))
loader = UADetracLoader()
images = loader.load_cached_images(name = 'uad_test_images.npy', vi_name = 'uad_test_vi.npy')
boxes = loader.load_cached_boxes(name = 'uad_test_boxes.npy')
labels = loader.load_cached_labels(name = 'uad_test_labels.npy')
labels = labels['vehicle']
images, labels, boxes = loader.filter_input3(images, labels, boxes)
images = images[:4000]
labels = labels[:4000]
boxes = boxes[:4000]
dataset.set_images(images)
dataset.set_labels(labels)
dataset.set_boxes(boxes)
elif dataset_name == 'JNET' and is_train:
if cache_name is None:
logger.error("Cache name is required for JNET!! returning...")
return None
logger.info("We are loading JNET - UADetrac!!")
cfg = uad
dataset = UADDetection(transform=SSDAugmentation(cfg['min_dim'], MEANS))
loader = UADetracLoader()
images = loader.load_cached_images(name=cache_name, vi_name = 'uad_train_vi.npy')
labels = loader.load_cached_labels(name='uad_train_labels.npy')
boxes = loader.load_cached_boxes(name='uad_train_boxes.npy')
labels = labels['vehicle']
images, labels, boxes = loader.filter_input3(images, labels, boxes)
logger.info(f"images shape is {images.shape}")
logger.info(f"labels length is {len(labels)}")
logger.info(f"boxes length is {len(boxes)}")
assert(images.shape[0] == len(labels))
assert(len(labels) == len(boxes))
dataset.set_images(images)
dataset.set_labels(labels)
dataset.set_boxes(boxes)
elif dataset_name == 'JNET' and not is_train:
if cache_name is None:
logger.error("Cache name is required for JNET! returning....")
return
logger.info("We are loading JNET - UADetrac!!")
cfg = uad
dataset = UADDetection(transform=SSDAugmentation(cfg['min_dim'], MEANS))
loader = UADetracLoader()
images = loader.load_cached_images(name = cache_name, vi_name = 'uad_test_vi.npy')
labels = loader.load_cached_labels(name='uad_test_labels.npy')
boxes = loader.load_cached_boxes(name = 'uad_test_boxes.npy')
labels = labels['vehicle']
images, labels, boxes = loader.filter_input3(images, labels, boxes)
###FIXED: we will make this really small so that numbers appear fast
images = images[:2000]
labels = labels[:2000]
boxes = boxes[:2000]
logger.info(f"images shape is {images.shape}")
logger.info(f"labels length is {len(labels)}")
logger.info(f"boxes length is {len(boxes)}")
dataset.set_images(images)
dataset.set_labels(labels)
dataset.set_boxes(boxes)
return dataset, cfg
if __name__ == "__main__":
### let's save some things
### save the images, labels, boxes for all test and train
logger.info("starting.....")
loader = UADetracLoader()
"""
images = loader.load_images(dir='/nethome/jbang36/eva_jaeho/data/ua_detrac/4_images')
labels, boxes = loader.load_labels('/nethome/jbang36/eva_jaeho/data/ua_detrac/4_xml')
assert(len(images) == len(boxes))
loader.save_images(name = 'uad_train_images.npy', vi_name='uad_train_vi.npy')
loader.save_labels(name = 'uad_train_labels.npy')
loader.save_boxes(name = 'uad_train_boxes.npy')
logger.info("Saved all train data!")
"""
test_images = loader.load_images(dir='/nethome/jbang36/eva_jaeho/data/ua_detrac/5_images')
test_labels, test_boxes = loader.load_labels('/nethome/jbang36/eva_jaeho/data/ua_detrac/5_xml')
assert(len(test_images) == len(test_boxes))
loader.save_images(name='uad_test_images.npy', vi_name='uad_test_vi.npy')
loader.save_labels(name='uad_test_labels.npy')
loader.save_boxes(name='uad_test_boxes.npy')
logger.info("Saved all test data!")
|
{"hexsha": "312e765af5435718b8bc33dc433de2d61776e456", "size": 7830, "ext": "py", "lang": "Python", "max_stars_repo_path": "others/amdegroot/data/create_dataset_wrapper.py", "max_stars_repo_name": "jaehobang/Eva", "max_stars_repo_head_hexsha": "e7f649990b8bca3bc29b3832c0ecf32efb402647", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "others/amdegroot/data/create_dataset_wrapper.py", "max_issues_repo_name": "jaehobang/Eva", "max_issues_repo_head_hexsha": "e7f649990b8bca3bc29b3832c0ecf32efb402647", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "others/amdegroot/data/create_dataset_wrapper.py", "max_forks_repo_name": "jaehobang/Eva", "max_forks_repo_head_hexsha": "e7f649990b8bca3bc29b3832c0ecf32efb402647", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1538461538, "max_line_length": 102, "alphanum_fraction": 0.6607918263, "include": true, "reason": "import numpy", "num_tokens": 1885}
|
__author__ = ['gleicher', 'cbodden']
"""
an attempt to define spacetime problems
at one level, all a spacetime problem is is a function that given a vector
(the KeyVariables - see states.py) returns the value of the objective function,
and the vector of constraint values (well, two - one for eqs, one for ineqs)
to do this, it needs to keep a lot of stuff
"""
from itertools import chain
import numpy as N
import trajopt.utilities.adInterface as AD
import trajopt.spacetime.states as ST
from trajopt.utilities.timer import Timer
def returnListOrNone(lst):
try:
if len(lst) > 0:
return lst
except:
pass
print "Zero Length Constraint List May Be a Problem!"
return []
def pointVels(a, b, _noZ):
if _noZ:
return [(a[i][0] - b[i][0], a[i][1] - b[i][1]) for i in range(len(a))]
else:
return [(a[i][0] - b[i][0], a[i][1] - b[i][1], a[i][2] - b[i][2]) for i in range(len(a))]
def pointAccs(a, b, c, _noZ):
if _noZ:
return [(a[i][0] - b[i][0] * 2 + c[i][0], a[i][1] - b[i][1] * 2 + c[i][1]) for i in range(len(a))]
else:
return [(a[i][0] - b[i][0] * 2 + c[i][0], a[i][1] - b[i][1] * 2 + c[i][1], a[i][2] - b[i][2] * 2 + c[i][2]) for
i in range(len(a))]
class Spacetime:
def __init__(self, robot, nstates):
self.excludeKeys = []
# things that define the spacetime problem
self.robot = robot
self.interpolationScheme = None
# we need a state vector for injecting the keyvariables into
self.defaultState = ST.KeyVector(nstates, self.robot.nvars)
for i in range(len(self.defaultState)):
self.defaultState[i] = robot.default
# the constraints and objectives that make up the problem
# note: these should be added by the "add" functions - so that
# appropriate checking happens - that's why the names have the underscores
# constraints
self._pointConstraints = [] # tuples (time,constraint)
self._allTimesConstraints = [] # point constraints applied to all times
# objective functions - needs to be a tuple (weight, PointObjTerm)
self._pointObjectives = [] # applied to ALL times (where the derivatives exist)
# we need to know whether or not we have different kinds of constraints
# before we ever gather them up (for example, in setting up the solver)
self.hasIneq = False
self.hasEq = False
# it's also useful to know if we need derivatives in evaluation (so we can save
# effort)
self.maxStateDeriv = 0
self.maxPointDeriv = 0
# keep track of these kinds of things
self.evalTimer = Timer("eval")
self.nobjGTimer = Timer("nObjG")
self.evalGTimer = Timer("evalG")
def __len__(self):
return len(self.defaultState)
def makeBlankState(self):
"""
this makes something like the default state, but since the default state is special,
the code is redundant
:return: a state vector with the correct initial configuration
"""
newstate = ST.KeyVector(len(self), self.robot.nvars)
for i in range(len(self)):
newstate[i] = self.robot.default
return newstate
def addConstraint(self, t, cons):
self._pointConstraints.append((t, cons))
if cons.eqs: self.hasEq = True
if cons.ineqs: self.hasIneq = True
self.maxStateDeriv = max(self.maxStateDeriv, cons.usesStateDerivatives)
self.maxPointDeriv = max(self.maxPointDeriv, cons.usesPointDerivatives)
def addAllTimeConstraint(self, cons):
self._allTimesConstraints.append(cons)
if cons.eqs: self.hasEq = True
if cons.ineqs: self.hasIneq = True
self.maxStateDeriv = max(self.maxStateDeriv, cons.usesStateDerivatives)
self.maxPointDeriv = max(self.maxPointDeriv, cons.usesPointDerivatives)
def addPointObjective(self, tupleOrObjective, weight=1.0):
try:
ptObjective = tupleOrObjective[0]
weight = tupleOrObjective[1]
except:
ptObjective = tupleOrObjective
self._pointObjectives.append((ptObjective, weight))
self.maxStateDeriv = max(self.maxStateDeriv, ptObjective.usesStateDerivatives)
self.maxPointDeriv = max(self.maxPointDeriv, ptObjective.usesPointDerivatives)
def changeWeight(self, objective, newWeight):
changed = None
# since we cannot change the tuple, we have to go through the list
# ugly, and non Pythonic
for i in range(len(self._pointObjectives)):
if self._pointObjectives[i][0] == objective:
changed = True
self._pointObjectives[i] = (objective, newWeight)
if changed is None:
raise KeyError("didn't find objective")
def makeStateVector(self, keyvariables):
"""
this makes a state vector (an array of state variables) from a key vector
(an array of variables, with only the active variables)
:param keyvariables: remember this takes a KEYVARIABLES (see states.py)
:return:
"""
# make the state vector
keyvec = self.defaultState.inject(keyvariables, self.excludeKeys)
# turn this into a state sequence
states = keyvec if self.interpolationScheme == None else self.interpolationScheme(keyvec)
return states
def getStates(self, keyvariablesOrStateVector):
"""
if you're passed either key variables or a state vector, make good use of it
:param keyvariablesOrStateVector:
:return: a state vector appropriate for this spacetime problem
"""
# if we're passed a state vector, allow us to evaluate it
nstates = len(self.defaultState)
try:
if keyvariablesOrStateVector.nkeys == nstates:
states = keyvariablesOrStateVector
else:
raise IndexError("Wrong size State Vector to Spacetime Eval")
except AttributeError:
states = self.makeStateVector(keyvariablesOrStateVector)
return states
def getVarBounds(self):
nstates = len(self) - len(self.excludeKeys)
upper = N.empty(nstates * self.robot.nvars)
lower = N.empty(nstates * self.robot.nvars)
for i in range(nstates):
upper[i * self.robot.nvars:(i + 1) * self.robot.nvars] = self.robot.xUBounds
lower[i * self.robot.nvars:(i + 1) * self.robot.nvars] = self.robot.xLBounds
return (lower, upper)
def eval(self, keyvariablesOrStateVector):
"""
evaluate the spacetime problem from a given state vector
:param keyvariablesOrStateVector:
:return: three values a scalar (objective) and a lists of the eqs and ineqs
"""
self.evalTimer.start()
# keep this around and handy
nstates = len(self.defaultState)
states = self.getStates(keyvariablesOrStateVector)
# compute the velocity and acceleration vectors
# just in case we need them
# note: the ends might not be useful - but we compute something anyway
stvels = None if self.maxStateDeriv < 1 else [
(states[i] - states[i - 1] if i > 0 else states[i + 1] - states[i]) for i in range(nstates)]
stacc = None if self.maxStateDeriv < 2 else \
[states[1] * 2 - states[0] - states[2]] + \
[(states[i] * 2 - states[i - 1] - states[i + 1]) for i in range(1, nstates - 1)] + \
[states[nstates - 2] * 2 - states[nstates - 3] - states[nstates - 1]]
# compute the point position for each point for each time frame
# might be a little wasteful, but can serve as a caching strategy if they are used
# note: the ends may be bogus, so don't use them if you really care
# points = [self.robot(state) for state in states]
points = []
frames = []
for state in states:
p, f = self.robot.getFrames(state)
points.append(p)
frames.append(f)
ptvels = None if self.maxPointDeriv < 1 else \
[pointVels(points[1], points[0], self.robot.noZ)] + \
[pointVels(points[i], points[i - 1], self.robot.noZ) for i in range(1, nstates)]
ptacc = None if self.maxPointDeriv < 2 else \
[pointAccs(points[0], points[1], points[2], self.robot.noZ)] + \
[pointAccs(points[i - 1], points[i], points[i + 1], self.robot.noZ) for i in range(1, nstates - 1)] + \
[pointAccs(points[nstates - 3], points[nstates - 2], points[nstates - 1], self.robot.noZ)]
#######################
# now gather up all of the constraints - point constraints are at specific
# times
eqs = []
ineqs = []
# CB - pass constraints values similar to objectives
conTerms = {"states": states, "points": points, "t": 0}
# first let the robot add the constraints it wants to
for t in range(nstates):
if t not in self.excludeKeys:
conTerms["t"] = t
conTerms["state"] = states[t]
conTerms["points"] = points[t]
conTerms["frames"] = frames[t]
if stvels: conTerms["stvel"] = stvels[t]
if stacc: conTerms["stacc"] = stacc[t]
if ptvels: conTerms["ptvel"] = ptvels[t]
if ptacc: conTerms["ptacc"] = ptacc[t]
e, i = self.robot.constraint(**conTerms)
eqs.append(e)
ineqs.append(i)
# now add the point constraints
# check to avoid things on excluded keys
for t, c in self._pointConstraints:
if t not in self.excludeKeys:
conTerms["t"] = t
conTerms["state"] = states[t]
conTerms["points"] = points[t]
conTerms["frames"] = frames[t]
if stvels: conTerms["stvel"] = stvels[t]
if stacc: conTerms["stacc"] = stacc[t]
if ptvels: conTerms["ptvel"] = ptvels[t]
if ptacc: conTerms["ptacc"] = ptacc[t]
e, i = c.constraint(**conTerms)
eqs.append(e)
ineqs.append(i)
# now add the all times constraints
# note that we skip times with excluded keys
for t in range(nstates):
if t not in self.excludeKeys:
for c in self._allTimesConstraints:
conTerms["t"] = t
conTerms["state"] = states[t]
conTerms["points"] = points[t]
conTerms["frames"] = frames[t]
if stvels: conTerms["stvel"] = stvels[t]
if stacc: conTerms["stacc"] = stacc[t]
if ptvels: conTerms["ptvel"] = ptvels[t]
if ptacc: conTerms["ptacc"] = ptacc[t]
e, i = c.constraint(**conTerms)
eqs.append(e)
ineqs.append(i)
#######################
# now make the objective function
# warning - rather than += (have obj be a number) collect all the terms
# as a list and use sum - this way automatic differentiation can look at
# all terms together
# this did not actually seem to make a difference in terms of performance
# so maybe it could be switched back
# the issue is that sum just seems to use radd - so we could provide a better
# implementation of sum someday
# because objTerms is build at each time step, we want to loop over time steps
objlist = []
objTerms = {"states": states, "points": points, "t": 0}
for t in range(nstates):
objTerms["t"] = t
objTerms["state"] = states[t]
objTerms["points"] = points[t]
objTerms["frames"] = frames[t]
if stvels: objTerms["stvel"] = stvels[t]
if stacc: objTerms["stacc"] = stacc[t]
if ptvels: objTerms["ptvel"] = ptvels[t]
if ptacc: objTerms["ptacc"] = ptacc[t]
for po in self._pointObjectives:
# we assume that its a tuple (obj, weight)
try:
p = po[0]
w = po[1]
except:
p = po
w = 1
# in the event that the end derivatives aren't useful, avoid using them
# for first derivatives, the 0 time is suspect, for 2nd derivatives, the end is as well
dmax = max(p.usesPointDerivatives, p.usesStateDerivatives)
if dmax < 1 or t > 0: # if we use derivatives skip the first
if dmax < 2 or t < nstates - 1: # if we do second derivatives, skip the last
objlist.append(p(**objTerms) * w)
# ad does this in a naive way
obj = AD.fsum(objlist)
self.lastKeyVariables = keyvariablesOrStateVector
self.lastStates = states
self.lastPoints = points
self.lastFrames = frames
self.evalTimer.end()
return obj, list(chain.from_iterable(eqs)), list(chain.from_iterable(ineqs))
# evaluation with derivatives
def evalG(self, x):
self.evalGTimer.start()
v = self.makeAdVars(x)
fv, ev, iv = self.eval(v)
f = fv.x if isinstance(fv, AD.ADF) else fv
fg = fv.gradient(v) if isinstance(fv, AD.ADF) else N.zeros((len(x)))
if ev != None and len(ev):
e = [(c.x if isinstance(c, AD.ADF) else c) for c in ev]
el = [(c.gradient(v) if isinstance(c, AD.ADF) else N.zeros((len(x)))) for c in ev]
eg = N.vstack(el)
else:
e = []
eg = []
if iv != None and len(iv):
i = [(c.x if isinstance(c, AD.ADF) else c) for c in iv]
ig = N.vstack([(c.gradient(v) if isinstance(c, AD.ADF) else N.zeros((len(x)))) for c in iv])
else:
i = []
ig = []
self.evalGTimer.stop()
return f, e, i, fg, eg, ig
def makeAdVars(self, vector):
"""
this makes a keyvariables vector - but makes each of the variables an adnumber
so we can take derivates of it. each is assigned a meaningful name.
:param vector: the initial state of the variables
:return: a vector of adnumbers
"""
stateIds = [i for i in range(self.defaultState.nkeys) if i not in self.excludeKeys]
adv = []
c = 0
for t in stateIds:
for (i, vn) in enumerate(self.robot.varnames):
adv.append(AD.adnumber(vector[c], "%d:%s" % (t, vn)))
c += 1
return N.array(adv)
|
{"hexsha": "999567180b6525386c5653526329ea8170d4b7a2", "size": 14850, "ext": "py", "lang": "Python", "max_stars_repo_path": "trajopt/spacetime/spacetime.py", "max_stars_repo_name": "uwgraphics/trajectoryoptimizer-public", "max_stars_repo_head_hexsha": "51a5f7c183184c033f2f12964e7dd935532331ff", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trajopt/spacetime/spacetime.py", "max_issues_repo_name": "uwgraphics/trajectoryoptimizer-public", "max_issues_repo_head_hexsha": "51a5f7c183184c033f2f12964e7dd935532331ff", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trajopt/spacetime/spacetime.py", "max_forks_repo_name": "uwgraphics/trajectoryoptimizer-public", "max_forks_repo_head_hexsha": "51a5f7c183184c033f2f12964e7dd935532331ff", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.8309859155, "max_line_length": 119, "alphanum_fraction": 0.577979798, "include": true, "reason": "import numpy", "num_tokens": 3720}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 25 12:30:44 2017
@author: Big Pigeon
"""
import pdb
import os
import keras
import h5py
from keras.models import Sequential
from keras.layers import Input, Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import shuffle
import numpy as np
import imagenet_utils
import time
from scipy import ndimage
from scipy import misc
import pickle
import shutil
#Upodate Theano
#sudo pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps
#To change backend: edit .keras/keras.json, use either theano or tensorflow
#Implementation of the VGG16 model using Keras
#Uses pre-trained weights that need to be laoded from a specific path
#Drop out pops off the classification and dropout layers to gain access to embedding vectors, enable with True flag
def keras_VGG16(weights_path, dropFinalCNN=False):
print("loading neural net")
fstart = time.time()
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
if not dropFinalCNN:
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
print('Model loaded.')
fend = time.time()
print(fend - fstart)
return model
#Preprocessing steps for VGG models so that test images are resized according to
#the size images the model was trained on
def preprocess_image(img_path, model):
# scale the image, according to the format used during VGG training on ImageNet
# if model == 'InceptionV3':
# im = image.load_img(img_path, target_size=(299, 299))
# else:
im = image.load_img(img_path, target_size=(224, 224))
# plt.figure(figsize=(4, 4))
# plt.axis("off")
# plt.imshow(im)
x = image.img_to_array(im)
x = np.expand_dims(x, axis=0)
x = imagenet_utils.preprocess_input(x)
return x
datagen = ImageDataGenerator(
rotation_range=360,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=False,
fill_mode='nearest')
def dataGen(srcdir, targetdir, n):
shutil.rmtree(targetdir, ignore_errors=True, onerror=None)
os.mkdir(targetdir)
for a in set([im.split("_")[0] for im in os.listdir(srcdir)]):
os.mkdir(os.path.join(targetdir, a))
for im in os.listdir(srcdir):
img = load_img(os.path.join(srcdir, im))
x = img_to_array(img) # this is a Numpy array with shape (3, 150, 150)
x = x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 3, 150, 150)
i = 0
splits = im.split("_")
cap_class = im.split("_")[0]
if len(splits) == 3:
SAVE_PREFIX = cap_class + "_" + splits[1] + "_" + splits[2].split(".")[0]
else:
SAVE_PREFIX = cap_class + "_" + splits[1].split(".")[0]
for batch in datagen.flow(x, batch_size=1,
save_to_dir=targetdir + "\\" + cap_class, save_prefix=SAVE_PREFIX, save_format='jpeg'):
i += 1
if i > n - 1:
break
def getTraining(imgRootDir):
trainX = []
trainY = []
for im in os.listdir(imgRootDir):
preprocessed = []
preprocessed = np.ndarray((len(os.listdir(imgRootDir + "\\" + im)), 3, 224, 224), dtype=np.float32)
count = 0
for img in os.listdir(imgRootDir + "\\" + im):
scaledIm = preprocess_image(imgRootDir + "\\" + im + "\\" + img, "th")
preprocessed[count] = scaledIm
count = count + 1
trainX.extend(preprocessed)
ys = np.tile(im, len(preprocessed))
trainY.extend(ys)
trainData = {"data": trainX,
"label": trainY}
return trainData
def computeBottleneck(model, imgRootDir, pklFileName):
trainX = []
trainY = []
for im in os.listdir(imgRootDir):
preprocessed = []
preprocessed = np.ndarray((len(os.listdir(imgRootDir + "\\" + im)), 3, 224, 224), dtype=np.float32)
count = 0
for img in os.listdir(imgRootDir + "\\" + im):
scaledIm = preprocess_image(imgRootDir + "\\" + im + "\\" + img, "th")
preprocessed[count] = scaledIm
count = count + 1
print (preprocessed.shape)
p = model.predict(preprocessed, verbose=1)
trainX.extend(p)
ys = np.tile(im, len(preprocessed))
trainY.extend(ys)
trainData = {"data": trainX,
"label": trainY}
output = open(pklFileName, 'wb')
pickle.dump(trainData, output)
output.close
return trainData
def computeBottleneckTest(model, imgRootDir, pklFileName):
trainX = []
trainY = []
preprocessed = []
preprocessed = np.ndarray((len(os.listdir(imgRootDir)), 3, 224, 224), dtype=np.float32)
count = 0
for im in os.listdir(imgRootDir):
scaledIm = preprocess_image(imgRootDir + "\\" + im, "th")
preprocessed[count] = scaledIm
ys = np.tile(im.split("_")[0], 1)
trainY.extend(ys)
count = count + 1
print (preprocessed.shape)
p = model.predict(preprocessed, verbose=1)
trainX.extend(p)
trainData = {"data": trainX,
"label": trainY}
output = open(pklFileName, 'wb')
pickle.dump(trainData, output)
output.close
return trainData
def createClassWeights(trainDir, images):
classWeights = {}
count = 0
classes = len(os.listdir(trainDir))
for im in os.listdir(trainDir):
classWeights[count] = images / classes / len(os.listdir(trainDir + "\\" + im))
count = count + 1
return classWeights
def train_top_model(trainPklFile, testPklFiles, topModelWeightsPath, trainOutDir, epochs):
trainInput = open(trainPklFile, 'rb')
trainData = pickle.load(trainInput)
y_all = LabelEncoder().fit_transform(trainData["label"])
y_all = np_utils.to_categorical(y_all)
print(len(y_all), "label size")
trainNp = np.array(trainData["data"])
model = Sequential()
model.add(Flatten(input_shape=trainNp.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(os.listdir(trainOutDir)), activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
trainNp, y_all = shuffle(trainNp, y_all, random_state=0)
model.fit(trainNp, y_all,
nb_epoch=epochs, batch_size=1024,validation_split=0.2, shuffle=True,
verbose=1, class_weight = createClassWeights(trainOutDir, len(y_all)))
for testPklFile in testPklFiles:
testInput = open(testPklFile, 'rb')
testData = pickle.load(testInput)
preds = model.predict(np.array(testData["data"]), verbose = 1)
model.save_weights(topModelWeightsPath)
class_names = sorted(set(testData["label"]))
correct = 0
class_truth = {}
for cn in class_names:
class_truth[cn] = {'correct':0, 'incorrect':0}
for i in range(len(preds)):
maxIndex = np.argmax(preds[i])
this_truth = testData["label"][i]
if (this_truth == class_names[maxIndex]):
correct += 1
class_truth[this_truth]['correct'] += 1
else:
class_truth[this_truth]['incorrect'] += 1
print(" ")
for i in class_names:
print(i, " accuracy: ", class_truth[i]['correct'] / (class_truth[i]['correct'] + class_truth[i]['incorrect']))
print("accuracy: ", correct / len(testData["label"]))
|
{"hexsha": "3f7b313f1816154beee3599a30ff74cc240ff6da", "size": 10482, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "hprovyn/keras-experiments", "max_stars_repo_head_hexsha": "630d6edcc4662e11e23321e6a498d8430cc4a8b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model.py", "max_issues_repo_name": "hprovyn/keras-experiments", "max_issues_repo_head_hexsha": "630d6edcc4662e11e23321e6a498d8430cc4a8b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model.py", "max_forks_repo_name": "hprovyn/keras-experiments", "max_forks_repo_head_hexsha": "630d6edcc4662e11e23321e6a498d8430cc4a8b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6789667897, "max_line_length": 127, "alphanum_fraction": 0.5823316161, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2649}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_toeplitz_spectral [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_toeplitz_spectral&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_toeplitz_spectral).
import numpy as np
from numpy import linalg
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz
from arpym.tools import pca_cov, add_logo
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_toeplitz_spectral-parameters)
b = 0.5
sigma2_eps = 1-b**2
tvec = range(200, 600)
delta_orth = np.zeros((len(tvec), 1))
delta_decomp = np.zeros((len(tvec), 1))
delta_spectrum = np.zeros((len(tvec), 1))
for t in range(1, len(tvec)+1):
# ## Step 1: Compute Autocovariance function
k_x = sigma2_eps*b**(np.arange(2*t+1))/(1-b**2) # autocovariance fun
cv_x = toeplitz(k_x) # autocovariance matrix
# ## Step 2: Compute theoretical eigenvectors
omega_vec = np.zeros((2*t+1, 1))
for j in range(1, 2*t+2):
omega_vec[j-1] = ((-1)**(j-1))*j*np.pi/(2*t+1)
omega_vec = np.sort(omega_vec, axis=None)
delta_omega = 2*np.pi/(2*t+1)
s = np.zeros((2*t+1, t))
c = np.zeros((2*t+1, t+1))
for j in range(t):
s[:, j] = np.sin(omega_vec[j]*np.linspace(-t, t, 2*t+1))
for j in range(t+1):
c[:, j] = np.cos(omega_vec[j+t]*np.linspace(-t, t, 2*t+1))
p = np.c_[s, c]
# ## Step 3: Compute spectral density of the AR(1)
ktilde_x = sigma2_eps/(1-2*b*np.cos(omega_vec) + b**2)
# ## Step 4: Compute empirical eigenvelues and eigenvectors
e, lambda2 = pca_cov(cv_x)
if b < 0:
ind_asc = np.argsort(lambda2)
lambda2 = lambda2[ind_asc]
e = e[:, ind_asc]
lambda2_new = []
ind_e = []*(2*t+1)
for n in range(1, 2*t+2):
if n % 2 == 1:
lambda2_new = np.append(lambda2_new, lambda2[n-1])
ind_e = np.append(ind_e, n-1)
else:
lambda2_new = np.append(lambda2[n-1], lambda2_new)
ind_e = np.append(n-1, ind_e)
ind_e1 = [int(i) for i in ind_e]
lambda2 = lambda2_new
e = e[:, ind_e1]
# ## Step 5: Compute spectrum error
delta_spectrum[t-1] = linalg.norm(lambda2-ktilde_x)/linalg.norm(ktilde_x)
# ## Step 6: Compute decomposition error
cv_x_recov = p@np.diag(ktilde_x)@p.T
eta = np.sqrt(np.pi/(delta_omega))*e
delta_decomp[t-1] = linalg.norm(eta@np.diag(lambda2)@eta.T-cv_x_recov)/linalg.norm(cv_x_recov)
# ## Step 7: Compute orthogonalization error
delta_orth[t-1] = linalg.norm(p.T@p-np.pi/(delta_omega)*np.eye(2*t+1))/linalg.norm(np.pi/(delta_omega)*np.eye(2*t+1))
# ## Plots
# +
plt.style.use('arpm')
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'STIXGeneral:italic'
plt.rcParams['mathtext.bf'] = 'STIXGeneral:italic:bold'
darkgreen = [0, 0.7, 0]
darkred = [.9, 0, 0]
darkgrey = [.1, .1, .1]
mydpi = 72.0
f = plt.figure(figsize=(1280.0/mydpi, 720.0/mydpi), dpi=mydpi)
taulim = [tvec[0], tvec[-1]]
plt.plot(tvec, delta_spectrum, color='darkgreen', linewidth=1)
plt.plot(tvec, delta_decomp, color='darkred', linewidth=1)
plt.plot(tvec, delta_orth, color='darkgrey', linewidth=1)
plt.xlabel('$t$', fontsize=17)
plt.legend([r'Spectrum error', r'Decomposition error', r'Orthogonalization error'])
plt.title('Spectral theorem for Toeplitz matrices', fontsize=20)
add_logo(f, location=4)
plt.tight_layout()
|
{"hexsha": "2392d1a0f5b1b64ec550e0ec2507c683b0b5d27c", "size": 3787, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/sources/s_toeplitz_spectral.py", "max_stars_repo_name": "dpopadic/arpmRes", "max_stars_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-04-10T13:24:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T08:20:42.000Z", "max_issues_repo_path": "scripts/sources/s_toeplitz_spectral.py", "max_issues_repo_name": "dpopadic/arpmRes", "max_issues_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/sources/s_toeplitz_spectral.py", "max_forks_repo_name": "dpopadic/arpmRes", "max_forks_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-08-13T22:02:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T17:49:12.000Z", "avg_line_length": 32.6465517241, "max_line_length": 209, "alphanum_fraction": 0.6416688672, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1274}
|
import sys
import random
from collections import deque
import time
import numpy as np
import torch
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.utils.rendertools import RenderTool
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.utils.rendertools import RenderTool, AgentRenderVariant
# make sure the root path is in system path
from pathlib import Path
base_dir = Path(__file__).resolve().parent.parent
sys.path.append(str(base_dir))
from importlib_resources import path
import fc_treeobs.nets
from fc_treeobs.dueling_double_dqn import Agent
from fc_treeobs.utils import normalize_observation
def main(argv):
random.seed(1)
np.random.seed(1)
# Initialize a random map with a random number of agents
x_dim = np.random.randint(20, 40)
y_dim = np.random.randint(20, 40)
n_agents = np.random.randint(3, 4)
n_goals = n_agents + np.random.randint(0, 3)
min_dist = int(0.75 * min(x_dim, y_dim))
tree_depth = 4
# Get an observation builder and predictor
predictor = ShortestPathPredictorForRailEnv()
observation_helper = TreeObsForRailEnv(max_depth=tree_depth, predictor=predictor)
# Use a the malfunction generator to break agents from time to time
stochastic_data = {'prop_malfunction': 0.0, # Percentage of defective agents
'malfunction_rate': 0, # Rate of malfunction occurrence
'min_duration': 3, # Minimal duration of malfunction
'max_duration': 20 # Max duration of malfunction
}
# Different agent types (trains) with different speeds.
speed_ration_map = {1.: 0.25, # Fast passenger train
1. / 2.: 0.25, # Fast freight train
1. / 3.: 0.25, # Slow commuter train
1. / 4.: 0.25} # Slow freight train
env = RailEnv(width=x_dim,
height=y_dim,
rail_generator=sparse_rail_generator(max_num_cities=3,
# Number of cities in map (where train stations are)
seed=1, # Random seed
grid_mode=False,
max_rails_between_cities=2,
max_rails_in_city=3),
schedule_generator=sparse_schedule_generator(speed_ration_map),
number_of_agents=n_agents,
stochastic_data=stochastic_data, # Malfunction data generator
obs_builder_object=observation_helper)
env.reset(True, True)
# Initiate the renderer
env_renderer = RenderTool(env, gl="PILSVG",
agent_render_variant=AgentRenderVariant.AGENT_SHOWS_OPTIONS_AND_BOX,
show_debug=False,
screen_height=1000, # Adjust these parameters to fit your resolution
screen_width=1000) # Adjust these parameters to fit your resolution
handle = env.get_agent_handles()
num_features_per_node = env.obs_builder.observation_dim
nr_nodes = 0
for i in range(tree_depth + 1):
nr_nodes += np.power(4, i)
state_size = 2 * num_features_per_node * nr_nodes
action_size = 5
n_trials = 10
observation_radius = 10
max_steps = int(3 * (env.height + env.width))
action_dict = dict()
time_obs = deque(maxlen=2)
agent_obs = [None] * env.get_num_agents()
# Init and load agent
agent = Agent(state_size, action_size)
with path(fc_treeobs.nets, "multi_agent_2ts_checkpoint200.pth") as file_in:
agent.qnetwork_local.load_state_dict(torch.load(file_in))
# Vars used to record agent performance
record_images = False
frame_step = 0
for trials in range(1, n_trials + 1):
# Reset environment
obs, info = env.reset(True, True)
env_renderer.reset()
# Build first two-time step observation
for a in range(env.get_num_agents()):
obs[a] = normalize_observation(obs[a], tree_depth, observation_radius=10)
# Accumulate two time steps of observation (Here just twice the first state)
for i in range(2):
time_obs.append(obs)
# Build the agent specific double ti
for a in range(env.get_num_agents()):
agent_obs[a] = np.concatenate((time_obs[0][a], time_obs[1][a]))
# Run episode
for step in range(max_steps):
time.sleep(0.01)
env_renderer.render_env(show=True, show_observations=False, show_predictions=True)
if record_images:
env_renderer.gl.save_image("./Images/Avoiding/flatland_frame_{:04d}.bmp".format(frame_step))
frame_step += 1
# Perform action for each agent
for a in range(env.get_num_agents()):
action = agent.act(agent_obs[a], eps=0)
action_dict.update({a: action})
# Environment step
next_obs, all_rewards, done, _ = env.step(action_dict)
# Collect observation after environment step
for a in range(env.get_num_agents()):
next_obs[a] = normalize_observation(next_obs[a], tree_depth, observation_radius=10)
# Add new obs to the obs vector
# Since time_obs is a deque of max_len = 2, an append on the right side when the deque is full
# provokes a pop of the element from the left side
time_obs.append(next_obs)
# Create obs using obs at time step t-1 and ob at time step t
for a in range(env.get_num_agents()):
agent_obs[a] = np.concatenate((time_obs[0][a], time_obs[1][a]))
if done['__all__']:
break
if __name__ == '__main__':
main(sys.argv[1:])
|
{"hexsha": "b77bacd88de5c4aefce4d7520d82ac38cbd7c61f", "size": 6180, "ext": "py", "lang": "Python", "max_stars_repo_path": "fc_treeobs/inference_2ts.py", "max_stars_repo_name": "giulic3/flatland-challenge-marl", "max_stars_repo_head_hexsha": "391197188c9ddf56cfac7a03f48bb3bbf8e53dd5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-05-02T15:55:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T07:33:50.000Z", "max_issues_repo_path": "fc_treeobs/inference_2ts.py", "max_issues_repo_name": "giulic3/flatland-challenge-marl", "max_issues_repo_head_hexsha": "391197188c9ddf56cfac7a03f48bb3bbf8e53dd5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-02-20T12:41:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-01T10:40:23.000Z", "max_forks_repo_path": "fc_treeobs/inference_2ts.py", "max_forks_repo_name": "giulic3/flatland-challenge-marl", "max_forks_repo_head_hexsha": "391197188c9ddf56cfac7a03f48bb3bbf8e53dd5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-03T09:41:59.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-03T09:41:59.000Z", "avg_line_length": 40.6578947368, "max_line_length": 108, "alphanum_fraction": 0.6231391586, "include": true, "reason": "import numpy", "num_tokens": 1379}
|
[STATEMENT]
lemma Ri_effective:
assumes
in_\<gamma>: "\<gamma> \<in> \<Gamma>" and
concl_of_in_n_un_rf_n: "concl_of \<gamma> \<in> N \<union> Rf N"
shows "\<gamma> \<in> Ri N"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
obtain CC D E where
\<gamma>: "\<gamma> = Infer CC D E"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>CC D E. \<gamma> = Infer CC D E \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases \<gamma>)
[PROOF STATE]
proof (state)
this:
\<gamma> = Infer CC D E
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<gamma> = Infer CC D E
[PROOF STEP]
have cc: "CC = side_prems_of \<gamma>" and d: "D = main_prem_of \<gamma>" and e: "E = concl_of \<gamma>"
[PROOF STATE]
proof (prove)
using this:
\<gamma> = Infer CC D E
goal (1 subgoal):
1. CC = side_prems_of \<gamma> &&& D = main_prem_of \<gamma> &&& E = concl_of \<gamma>
[PROOF STEP]
unfolding \<gamma>
[PROOF STATE]
proof (prove)
using this:
Infer CC D E = Infer CC D E
goal (1 subgoal):
1. CC = side_prems_of (Infer CC D E) &&& D = main_prem_of (Infer CC D E) &&& E = concl_of (Infer CC D E)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
CC = side_prems_of \<gamma>
D = main_prem_of \<gamma>
E = concl_of \<gamma>
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
note e_in_n_un_rf_n = concl_of_in_n_un_rf_n[folded e]
[PROOF STATE]
proof (state)
this:
E \<in> N \<union> Rf N
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
E \<in> N \<union> Rf N
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
assume "E \<in> N"
[PROOF STATE]
proof (state)
this:
E \<in> N
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
E \<in> N
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
have "E < D"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. E < D
[PROOF STEP]
using \<Gamma>_reductive e d in_\<gamma>
[PROOF STATE]
proof (prove)
using this:
?\<gamma> \<in> \<Gamma> \<Longrightarrow> concl_of ?\<gamma> < main_prem_of ?\<gamma>
E = concl_of \<gamma>
D = main_prem_of \<gamma>
\<gamma> \<in> \<Gamma>
goal (1 subgoal):
1. E < D
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
E < D
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
E \<in> N
E < D
[PROOF STEP]
have
"set_mset {#E#} \<subseteq> N" and "\<forall>I. I \<Turnstile>m {#E#} + CC \<longrightarrow> I \<Turnstile> E" and "\<forall>D'. D' \<in># {#E#} \<longrightarrow> D' < D"
[PROOF STATE]
proof (prove)
using this:
E \<in> N
E < D
goal (1 subgoal):
1. set_mset {#E#} \<subseteq> N &&& \<forall>I. I \<Turnstile>m {#E#} + CC \<longrightarrow> I \<Turnstile> E &&& \<forall>D'. D' \<in># {#E#} \<longrightarrow> D' < D
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
set_mset {#E#} \<subseteq> N
\<forall>I. I \<Turnstile>m {#E#} + CC \<longrightarrow> I \<Turnstile> E
\<forall>D'. D' \<in># {#E#} \<longrightarrow> D' < D
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
set_mset {#E#} \<subseteq> N
\<forall>I. I \<Turnstile>m {#E#} + CC \<longrightarrow> I \<Turnstile> E
\<forall>D'. D' \<in># {#E#} \<longrightarrow> D' < D
[PROOF STEP]
have "redundant_infer N \<gamma>"
[PROOF STATE]
proof (prove)
using this:
set_mset {#E#} \<subseteq> N
\<forall>I. I \<Turnstile>m {#E#} + CC \<longrightarrow> I \<Turnstile> E
\<forall>D'. D' \<in># {#E#} \<longrightarrow> D' < D
goal (1 subgoal):
1. redundant_infer N \<gamma>
[PROOF STEP]
using redundant_infer_def cc d e
[PROOF STATE]
proof (prove)
using this:
set_mset {#E#} \<subseteq> N
\<forall>I. I \<Turnstile>m {#E#} + CC \<longrightarrow> I \<Turnstile> E
\<forall>D'. D' \<in># {#E#} \<longrightarrow> D' < D
redundant_infer ?N ?\<gamma> = (\<exists>DD. set_mset DD \<subseteq> ?N \<and> (\<forall>I. I \<Turnstile>m DD + side_prems_of ?\<gamma> \<longrightarrow> I \<Turnstile> concl_of ?\<gamma>) \<and> (\<forall>D. D \<in># DD \<longrightarrow> D < main_prem_of ?\<gamma>))
CC = side_prems_of \<gamma>
D = main_prem_of \<gamma>
E = concl_of \<gamma>
goal (1 subgoal):
1. redundant_infer N \<gamma>
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
redundant_infer N \<gamma>
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
E \<in> N \<Longrightarrow> redundant_infer N \<gamma>
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
E \<in> N \<Longrightarrow> redundant_infer N \<gamma>
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
E \<in> N \<Longrightarrow> redundant_infer N \<gamma>
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
assume "E \<in> Rf N"
[PROOF STATE]
proof (state)
this:
E \<in> Rf N
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
E \<in> Rf N
[PROOF STEP]
obtain DD where
dd_sset: "set_mset DD \<subseteq> N" and
dd_imp_e: "\<forall>I. I \<Turnstile>m DD \<longrightarrow> I \<Turnstile> E" and
dd_lt_e: "\<forall>C'. C' \<in># DD \<longrightarrow> C' < E"
[PROOF STATE]
proof (prove)
using this:
E \<in> Rf N
goal (1 subgoal):
1. (\<And>DD. \<lbrakk>set_mset DD \<subseteq> N; \<forall>I. I \<Turnstile>m DD \<longrightarrow> I \<Turnstile> E; \<forall>C'. C' \<in># DD \<longrightarrow> C' < E\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding Rf_def
[PROOF STATE]
proof (prove)
using this:
E \<in> {C. \<exists>DD. set_mset DD \<subseteq> N \<and> (\<forall>I. I \<Turnstile>m DD \<longrightarrow> I \<Turnstile> C) \<and> (\<forall>D. D \<in># DD \<longrightarrow> D < C)}
goal (1 subgoal):
1. (\<And>DD. \<lbrakk>set_mset DD \<subseteq> N; \<forall>I. I \<Turnstile>m DD \<longrightarrow> I \<Turnstile> E; \<forall>C'. C' \<in># DD \<longrightarrow> C' < E\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
set_mset DD \<subseteq> N
\<forall>I. I \<Turnstile>m DD \<longrightarrow> I \<Turnstile> E
\<forall>C'. C' \<in># DD \<longrightarrow> C' < E
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
from dd_lt_e
[PROOF STATE]
proof (chain)
picking this:
\<forall>C'. C' \<in># DD \<longrightarrow> C' < E
[PROOF STEP]
have "\<forall>Da. Da \<in># DD \<longrightarrow> Da < D"
[PROOF STATE]
proof (prove)
using this:
\<forall>C'. C' \<in># DD \<longrightarrow> C' < E
goal (1 subgoal):
1. \<forall>Da. Da \<in># DD \<longrightarrow> Da < D
[PROOF STEP]
using d e in_\<gamma> \<Gamma>_reductive less_trans
[PROOF STATE]
proof (prove)
using this:
\<forall>C'. C' \<in># DD \<longrightarrow> C' < E
D = main_prem_of \<gamma>
E = concl_of \<gamma>
\<gamma> \<in> \<Gamma>
?\<gamma> \<in> \<Gamma> \<Longrightarrow> concl_of ?\<gamma> < main_prem_of ?\<gamma>
\<lbrakk>?x < ?y; ?y < ?z\<rbrakk> \<Longrightarrow> ?x < ?z
goal (1 subgoal):
1. \<forall>Da. Da \<in># DD \<longrightarrow> Da < D
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<forall>Da. Da \<in># DD \<longrightarrow> Da < D
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>Da. Da \<in># DD \<longrightarrow> Da < D
[PROOF STEP]
have "redundant_infer N \<gamma>"
[PROOF STATE]
proof (prove)
using this:
\<forall>Da. Da \<in># DD \<longrightarrow> Da < D
goal (1 subgoal):
1. redundant_infer N \<gamma>
[PROOF STEP]
using redundant_infer_def dd_sset dd_imp_e cc d e
[PROOF STATE]
proof (prove)
using this:
\<forall>Da. Da \<in># DD \<longrightarrow> Da < D
redundant_infer ?N ?\<gamma> = (\<exists>DD. set_mset DD \<subseteq> ?N \<and> (\<forall>I. I \<Turnstile>m DD + side_prems_of ?\<gamma> \<longrightarrow> I \<Turnstile> concl_of ?\<gamma>) \<and> (\<forall>D. D \<in># DD \<longrightarrow> D < main_prem_of ?\<gamma>))
set_mset DD \<subseteq> N
\<forall>I. I \<Turnstile>m DD \<longrightarrow> I \<Turnstile> E
CC = side_prems_of \<gamma>
D = main_prem_of \<gamma>
E = concl_of \<gamma>
goal (1 subgoal):
1. redundant_infer N \<gamma>
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
redundant_infer N \<gamma>
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
E \<in> Rf N \<Longrightarrow> redundant_infer N \<gamma>
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
E \<in> N \<Longrightarrow> redundant_infer N \<gamma>
E \<in> Rf N \<Longrightarrow> redundant_infer N \<gamma>
[PROOF STEP]
show "\<gamma> \<in> Ri N"
[PROOF STATE]
proof (prove)
using this:
E \<in> N \<Longrightarrow> redundant_infer N \<gamma>
E \<in> Rf N \<Longrightarrow> redundant_infer N \<gamma>
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
using in_\<gamma> e_in_n_un_rf_n
[PROOF STATE]
proof (prove)
using this:
E \<in> N \<Longrightarrow> redundant_infer N \<gamma>
E \<in> Rf N \<Longrightarrow> redundant_infer N \<gamma>
\<gamma> \<in> \<Gamma>
E \<in> N \<union> Rf N
goal (1 subgoal):
1. \<gamma> \<in> Ri N
[PROOF STEP]
unfolding Ri_def
[PROOF STATE]
proof (prove)
using this:
E \<in> N \<Longrightarrow> redundant_infer N \<gamma>
E \<in> Rf N \<Longrightarrow> redundant_infer N \<gamma>
\<gamma> \<in> \<Gamma>
E \<in> N \<union> Rf N
goal (1 subgoal):
1. \<gamma> \<in> {\<gamma> \<in> \<Gamma>. redundant_infer N \<gamma>}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<gamma> \<in> Ri N
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4162, "file": "Ordered_Resolution_Prover_Standard_Redundancy", "length": 44}
|
[STATEMENT]
lemma card_Mi_le_floor_div_2_Vi:
assumes "OSC L E \<and> matching V E M \<and> i > 1"
shows "card (matching_i i V E M L) \<le> (card (V_i i V E M L)) div 2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card (matching_i i V E M L) \<le> card (V_i i V E M L) div 2
[PROOF STEP]
using card_Mi_twice_card_Vi[OF assms]
[PROOF STATE]
proof (prove)
using this:
2 * card (matching_i i V E M L) = card (V_i i V E M L)
goal (1 subgoal):
1. card (matching_i i V E M L) \<le> card (V_i i V E M L) div 2
[PROOF STEP]
by arith
|
{"llama_tokens": 257, "file": "Max-Card-Matching_Matching", "length": 2}
|
# Aims to provide functions for fast periodic daubechies transforms (forward and inverse) in 2D
# https://github.com/amitgroup/amitgroup/tree/master/amitgroup/util/wavelet
import numpy as np
import scipy
import scipy.sparse
SPARSITY_THRESHOLD = 256
def _populate(W, filtr, yoffset):
N = len(filtr)
for i in range(W.shape[1]//2):
for j in range(N):
W[yoffset+i, (-(N-2)//2+2*i+j)%W.shape[1]] += filtr[j]
def _create_W(shape, level, filter_low, filter_high):
d = 1 << (level-1)
sh = (shape[0]//d, shape[1]//d)
if np.min(sh) >= SPARSITY_THRESHOLD:
W = scipy.sparse.lil_matrix(sh)
else:
W = np.asmatrix(np.zeros(sh))
_populate(W, filter_low, 0)
_populate(W, filter_high, shape[0]//(2*d))
if scipy.sparse.issparse(W):
return W.tocsr() # Faster if you're not changing it anymore
else:
return W
def _create_single(shape, level, filtr):
d = 1 << (level-1)
sh = (shape[0]//(2*d), shape[1]//d)
if np.min(sh) >= SPARSITY_THRESHOLD:
GH = scipy.sparse.lil_matrix(sh)
else:
GH = np.asmatrix(np.zeros(sh))
_populate(GH, filtr, 0)
if scipy.sparse.issparse(GH):
return GH.tocsr() # Faster if you're not changing it anymore
else:
return GH
def _qdot(X, A):
return X * A * X.T
_db_kernels = {
'haar': np.array([0.70710678118654757, 0.70710678118654757]),
'db1': np.array([0.70710678118654757, 0.70710678118654757]),
'db2': np.array([0.48296291314469025, 0.83651630373746899, 0.22414386804185735, -0.12940952255092145]), # D4
'db3': np.array([0.33267055295095688, 0.80689150931333875, 0.45987750211933132, -0.13501102001039084, -0.085441273882241486, 0.035226291882100656]),
'db4': np.array([0.23037781330885523, 0.71484657055254153, 0.63088076792959036, -0.027983769416983849, -0.18703481171888114, 0.030841381835986965, 0.032883011666982945, -0.010597401784997278]),
'db5': np.array([0.16010239797412501, 0.60382926979747287, 0.72430852843857441, 0.13842814590110342, -0.24229488706619015, -0.03224486958502952, 0.077571493840065148, -0.0062414902130117052, -0.012580751999015526, 0.0033357252850015492]),
'db6': np.array([0.11154074335008017, 0.49462389039838539, 0.75113390802157753, 0.3152503517092432, -0.22626469396516913, -0.12976686756709563, 0.097501605587079362, 0.027522865530016288, -0.031582039318031156, 0.0005538422009938016, 0.0047772575110106514, -0.0010773010849955799]),
'db7': np.array([0.077852054085062364, 0.39653931948230575, 0.72913209084655506, 0.4697822874053586, -0.14390600392910627, -0.22403618499416572, 0.071309219267050042, 0.080612609151065898, -0.038029936935034633, -0.01657454163101562, 0.012550998556013784, 0.00042957797300470274, -0.0018016407039998328, 0.00035371380000103988]),
'db8': np.array([0.054415842243081609, 0.31287159091446592, 0.67563073629801285, 0.58535468365486909, -0.015829105256023893, -0.28401554296242809, 0.00047248457399797254, 0.12874742662018601, -0.017369301002022108, -0.044088253931064719, 0.013981027917015516, 0.0087460940470156547, -0.0048703529930106603, -0.00039174037299597711, 0.00067544940599855677, -0.00011747678400228192]),
'db9': np.array([0.038077947363167282, 0.24383467463766728, 0.6048231236767786, 0.65728807803663891, 0.13319738582208895, -0.29327378327258685, -0.096840783220879037, 0.14854074933476008, 0.030725681478322865, -0.067632829059523988, 0.00025094711499193845, 0.022361662123515244, -0.004723204757894831, -0.0042815036819047227, 0.0018476468829611268, 0.00023038576399541288, -0.00025196318899817888, 3.9347319995026124e-005]),
'db10': np.array([0.026670057900950818, 0.18817680007762133, 0.52720118893091983, 0.68845903945259213, 0.28117234366042648, -0.24984642432648865, -0.19594627437659665, 0.12736934033574265, 0.093057364603806592, -0.071394147165860775, -0.029457536821945671, 0.033212674058933238, 0.0036065535669883944, -0.010733175482979604, 0.0013953517469940798, 0.0019924052949908499, -0.00068585669500468248, -0.0001164668549943862, 9.3588670001089845e-005, -1.3264203002354869e-005]),
'db11': np.array([0.018694297761470441, 0.14406702115061959, 0.44989976435603013, 0.68568677491617847, 0.41196436894789695, -0.16227524502747828, -0.27423084681792875, 0.066043588196690886, 0.14981201246638268, -0.04647995511667613, -0.066438785695020222, 0.031335090219045313, 0.020840904360180039, -0.015364820906201324, -0.0033408588730145018, 0.0049284176560587777, -0.00030859285881515924, -0.00089302325066623663, 0.00024915252355281426, 5.4439074699366381e-005, -3.4634984186983789e-005, 4.4942742772363519e-006]),
'db12': np.array([0.013112257957229239, 0.10956627282118277, 0.37735513521420411, 0.65719872257929113, 0.51588647842780067, -0.044763885653777619, -0.31617845375277914, -0.023779257256064865, 0.18247860592758275, 0.0053595696743599965, -0.09643212009649671, 0.010849130255828966, 0.041546277495087637, -0.01221864906974642, -0.012840825198299882, 0.0067114990087955486, 0.0022486072409952287, -0.0021795036186277044, 6.5451282125215034e-006, 0.00038865306282092672, -8.8504109208203182e-005, -2.4241545757030318e-005, 1.2776952219379579e-005, -1.5290717580684923e-006]),
'db13': np.array([0.0092021335389622788, 0.082861243872901946, 0.31199632216043488, 0.61105585115878114, 0.58888957043121193, 0.086985726179645007, -0.31497290771138414, -0.12457673075080665, 0.17947607942935084, 0.072948933656788742, -0.10580761818792761, -0.026488406475345658, 0.056139477100276156, 0.0023799722540522269, -0.023831420710327809, 0.0039239414487955773, 0.0072555894016171187, -0.002761911234656831, -0.0013156739118922766, 0.00093232613086724904, 4.9251525126285676e-005, -0.00016512898855650571, 3.0678537579324358e-005, 1.0441930571407941e-005, -4.7004164793608082e-006, 5.2200350984547998e-007]),
'db14': np.array([0.0064611534600864905, 0.062364758849384874, 0.25485026779256437, 0.55430561794077093, 0.63118784910471981, 0.21867068775886594, -0.27168855227867705, -0.21803352999321651, 0.13839521386479153, 0.13998901658445695, -0.086748411568110598, -0.071548955503983505, 0.05523712625925082, 0.026981408307947971, -0.030185351540353976, -0.0056150495303375755, 0.012789493266340071, -0.00074621898926387534, -0.003849638868019787, 0.001061691085606874, 0.00070802115423540481, -0.00038683194731287514, -4.1777245770370672e-005, 6.875504252695734e-005, -1.0337209184568496e-005, -4.3897049017804176e-006, 1.7249946753674012e-006, -1.7871399683109222e-007]),
'db15': np.array([0.0045385373615773762, 0.046743394892750617, 0.20602386398692688, 0.49263177170797529, 0.64581314035721027, 0.33900253545462167, -0.19320413960907623, -0.28888259656686216, 0.065282952848765688, 0.19014671400708816, -0.039666176555733602, -0.11112093603713753, 0.033877143923563204, 0.054780550584559995, -0.025767007328366939, -0.020810050169636805, 0.015083918027862582, 0.0051010003604228726, -0.0064877345603061454, -0.00024175649075894543, 0.0019433239803823459, -0.00037348235413726472, -0.00035956524436229364, 0.00015589648992055726, 2.579269915531323e-005, -2.8133296266037558e-005, 3.3629871817363823e-006, 1.8112704079399406e-006, -6.3168823258794506e-007, 6.1333599133037138e-008]),
'db16': np.array([0.0031892209253436892, 0.034907714323629047, 0.1650642834886438, 0.43031272284545874, 0.63735633208298326, 0.44029025688580486, -0.089751089402363524, -0.32706331052747578, -0.02791820813292813, 0.21119069394696974, 0.027340263752899923, -0.13238830556335474, -0.0062397227521562536, 0.075924236044457791, -0.0075889743686425939, -0.036888397691556774, 0.010297659641009963, 0.013993768859843242, -0.0069900145633907508, -0.0036442796214883506, 0.00312802338120381, 0.00040789698084934395, -0.00094102174935854332, 0.00011424152003843815, 0.00017478724522506327, -6.103596621404321e-005, -1.394566898819319e-005, 1.133660866126152e-005, -1.0435713423102517e-006, -7.3636567854418147e-007, 2.3087840868545578e-007, -2.1093396300980412e-008]),
'db17': np.array([0.0022418070010387899, 0.025985393703623173, 0.13121490330791097, 0.37035072415288578, 0.61099661568502728, 0.5183157640572823, 0.027314970403312946, -0.32832074836418546, -0.12659975221599248, 0.19731058956508457, 0.10113548917744287, -0.12681569177849797, -0.057091419631858077, 0.081105986654080822, 0.022312336178011833, -0.046922438389378908, -0.0032709555358783646, 0.022733676583919053, -0.0030429899813869555, -0.0086029215203478147, 0.0029679966915180638, 0.0023012052421511474, -0.001436845304805, -0.00032813251941022427, 0.00043946542776894542, -2.5610109566546042e-005, -8.2048032024582121e-005, 2.3186813798761639e-005, 6.9906009850812941e-006, -4.5059424772259631e-006, 3.0165496099963414e-007, 2.9577009333187617e-007, -8.4239484460081536e-008, 7.2674929685663697e-009]),
'db18': np.array([0.0015763102184365595, 0.019288531724094969, 0.10358846582214751, 0.31467894133619284, 0.57182680776508177, 0.57180165488712198, 0.14722311196952223, -0.29365404073579809, -0.21648093400458224, 0.14953397556500755, 0.16708131276294505, -0.092331884150304119, -0.10675224665906288, 0.064887216212358198, 0.057051247739058272, -0.04452614190225633, -0.023733210395336858, 0.026670705926689853, 0.0062621679544386608, -0.013051480946517112, 0.00011863003387493042, 0.0049433436054565939, -0.0011187326669886426, -0.0013405962983313922, 0.00062846568296447147, 0.0002135815619103188, -0.00019864855231101547, -1.5359171230213409e-007, 3.7412378807308472e-005, -8.5206025374234635e-006, -3.3326344788769603e-006, 1.7687129836228861e-006, -7.691632689865049e-008, -1.1760987670250871e-007, 3.0688358630370302e-008, -2.5079344549419292e-009]),
'db19': np.array([0.0011086697631864314, 0.01428109845082521, 0.08127811326580564, 0.26438843174202237, 0.52443637746688621, 0.60170454913009164, 0.26089495265212009, -0.22809139421653665, -0.28583863175723145, 0.074652269708066474, 0.21234974330662043, -0.033518541903202262, -0.14278569504021468, 0.027584350624887129, 0.086906755555450702, -0.026501236250778635, -0.045674226277784918, 0.021623767409452484, 0.019375549889114482, -0.013988388678695632, -0.0058669222811121953, 0.0070407473670804953, 0.00076895435922424884, -0.0026875518007344408, 0.00034180865344939543, 0.0007358025205041731, -0.00026067613568119951, -0.00012460079173506306, 8.7112704672504432e-005, 5.1059504870906939e-006, -1.6640176297224622e-005, 3.0109643163099385e-006, 1.5319314766978769e-006, -6.8627556577981102e-007, 1.4470882988040879e-008, 4.6369377758023682e-008, -1.1164020670405678e-008, 8.6668488390344833e-010]),
'db20': np.array([0.00077995361366591117, 0.010549394624937735, 0.063423780459005291, 0.21994211355113222, 0.47269618531033147, 0.61049323893785579, 0.36150229873889705, -0.13921208801128787, -0.32678680043353758, -0.016727088308801888, 0.22829105082013823, 0.039850246458519104, -0.15545875070604531, -0.024716827337521424, 0.10229171917513397, 0.0056322468576854544, -0.061722899624668884, 0.0058746818113949465, 0.032294299530119162, -0.0087893249245557647, -0.013810526137727442, 0.0067216273018096935, 0.0044205423867663502, -0.003581494259744107, -0.00083156217287724745, 0.0013925596193045254, -5.3497598443404532e-005, -0.0003851047486990061, 0.00010153288973669777, 6.7742808283730477e-005, -3.7105861833906152e-005, -4.3761438621821972e-006, 7.2412482876637907e-006, -1.0119940100181473e-006, -6.847079596993149e-007, 2.633924226266962e-007, 2.0143220235374613e-010, -1.8148432482976221e-008, 4.05612705554717e-009, -2.9988364896157532e-010]),
}
def _get_filters(wavelet):
global _db_kernels
try:
lowpass = _db_kernels[wavelet]
except KeyError:
raise ValueError("Wavelet type not supported: ('{0}')".format(wavelet))
highpass = lowpass[::-1].copy()
highpass[1::2] *= -1
return lowpass, highpass
def _arrange_filter_matrices(shape, wavelet):
filter_low, filter_high = _get_filters(wavelet)
assert len(shape) == 2
assert shape[0] == shape[1], "Shape must be square (at least for now)"
levels_list = range(1, int(np.log2(shape[0]))+1)
max_level = int(np.log2(max(shape)))
# Setup matrices
Ws = [_create_W(shape, level, filter_low, filter_high) for level in levels_list]
Wgs = []
# Combine all the matrices for the steps where we throw away the coefficients.
Wg = np.asmatrix(np.eye(shape[0], shape[1]))
for l in range(0, max_level):
new_M = Ws[l] * Wg
if np.min(new_M.shape) >= SPARSITY_THRESHOLD:
new_M = scipy.sparse.csr_matrix(new_M)
Wgs.append(new_M)
Wg = _create_single(shape, l+1, filter_low) * Wg
Wgs.append(Wg)
Wgs = Wgs[::-1]
WsT = [W.T for W in Ws]
WgsT = [Wg.T for Wg in Wgs]
return Wgs, WgsT, Ws, WsT, max_level
def daubechies_factory(shape, wavelet='db2'):
"""
Creates a forward and an inverse discrete wavelet transform function.
The function is specialized for a specific size and wavelet.
.. seealso::
:ref:`wavelet2d`
Parameters
----------
shape : tuple
A tuple describing the size of the input, for instance ``(32, 32)``. Values must be powers of two.
wavelet : str
Type of wavelet described as a string. Supported values are ``'db1'``, ``'db2'``, ... ``'db20'``. What is called, for instance, `db3` is what is generally called the D6 wavelet (since it uses a kernel of size 6). The string ``'haar'`` is a synonym for ``'db1'``.
Returns
-------
wavedec2 : func(A[, levels])
Returns a function that takes an argument, `A`, input data that must be of the size specified above. It also takes an optional argument `levels`, where you can specify how many coefficient levels you plan to use. It will return an array with the coefficients of shape ``(2**levels, 2**levels)``.
waverec2 : func(coefs)
Returns a function that takes a single argument, `coefs`, the coefficients to use to reconstruct the spatial information.
Examples
--------
>>> import amitgroup as ag
>>> import amitgroup.util.wavelet
>>> import matplotlib.pylab as plt
>>> face = ag.io.load_example('faces')[0]
To compress a face and then inspect the results, let's first create the transform functions:
>>> wavedec2, waverec2 = ag.util.wavelet.daubechies_factory(face.shape, 'db8')
And then deconstruct a face to coefficients and the reconstruct it again. Since we only used 4 coefficient levels, information will be lost.
>>> new_face = waverec2(wavedec2(face, levels=4))
>>> ag.plot.images([face, new_face])
>>> plt.show()
"""
if isinstance(shape, int): # One dimensional!
Wgs, WgsT, Ws, WsT, max_level = _arrange_filter_matrices((shape, shape), wavelet)
def wavedec(A, levels=np.inf):
A = A.reshape((len(A), 1))
levels = min(max_level, levels)
coefs = Wgs[levels] * A
for l in range(levels-1, 0, -1):
N = 1 << l
coefs[:N] = Ws[max_level-l] * coefs[:N]
return np.asarray(coefs).flatten()
def waverec(coefs):
levels = int(np.log2(coefs.shape[0]))
A = coefs.reshape((len(coefs), 1)).copy()
for l in range(1, levels):
N = 1 << l
A[:N] = WsT[max_level-l] * A[:N]
A = WgsT[levels] * A
return np.asarray(A).flatten()
return wavedec, waverec
elif isinstance(shape, tuple) and len(shape) == 2: # 2 dimensional!
Wgs, WgsT, Ws, WsT, max_level = _arrange_filter_matrices(shape, wavelet)
def wavedec2(A, levels=np.inf):
levels = min(max_level, levels)
coefs = Wgs[levels] * A * WgsT[levels]
for l in range(levels-1, 0, -1):
N = 1 << l
L = max_level-l
coefs[:N,:N] = Ws[L] * coefs[:N,:N] * WsT[L]
return np.asarray(coefs)
def waverec2(coefs, levels=np.inf):
#print coefs.shape
levels = int(np.log2(coefs.shape[0]))
#levels = min(max_level, levels)
A = coefs.copy()
for l in range(1, levels):
N = 1 << l
L = max_level-l
A[:N,:N] = WsT[L] * A[:N,:N] * Ws[L]
return np.asarray(WgsT[levels] * A * Wgs[levels])
return wavedec2, waverec2
else:
raise ValueError("Shape must be either integer or tuple of size two")
# CACHED 1-D
################################################################################
_db_wavedec_cache = {}
_db_waverec_cache = {}
def wavedec(A, wavelet='db2', levels=np.inf, length=None):
"""
Performs a 1D wavelet decomposition (forward transform).
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet1d`
Parameters
----------
A : ndarray
1D input data. Length must be powers of two.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
levels : int
Specify how many levels of coefficients you plan to use. The default is ``np.inf``, which will default to the maximum number possible, which will make the coefficient array the same length as `A`. Notice that `levels` is zero-based, in the sense that entering 0 is valid and will the transform operate only on the energy-level coefficient.
"""
global _db_wavedec_cache, _db_waverec_cache
tup = (length or len(A), wavelet)
try:
dec = _db_wavedec_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec_cache[tup] = dec
_db_waverec_cache[tup] = rec
return dec(A, levels)
def waverec(coefs, wavelet='db2', length=None):
"""
Performs a 1D wavelet reconstruction (inverse transform).
In :func:`wavedec`, you specify `levels`, which is not done in this function since it can be inferred from the shape of `coefs`.
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet1d`
Parameters
----------
A : ndarray
1D input data. Length must be powers of two and square.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
"""
global _db_wavedec_cache, _db_waverec_cache
tup = (length or len(coefs), wavelet)
try:
rec = _db_waverec_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec_cache[tup] = dec
_db_waverec_cache[tup] = rec
return rec(coefs)
# CACHED 2-D
################################################################################
_db_wavedec2_cache = {}
_db_waverec2_cache = {}
def wavedec2(A, wavelet='db2', levels=np.inf, shape=None):
"""
Performs a 2D wavelet decomposition (forward transform).
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet2d`
Parameters
----------
A : ndarray
2D input data. Shape must be powers of two and square.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
levels : int
Specify how many levels of coefficients you plan to use. The default is ``np.inf``, which will default to the maximum number possible, which will make the coefficient array the same size as `A`. Notice that `levels` is zero-based, in the sense that entering 0 is valid and will the transform operate only on the energy-level coefficient.
"""
global _db_wavedec2_cache, _db_waverec2_cache
tup = (shape or A.shape, wavelet)
try:
dec = _db_wavedec2_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec2_cache[tup] = dec
_db_waverec2_cache[tup] = rec
return dec(A, levels)
def waverec2(coefs, wavelet='db2', shape=None):
"""
Performs a 2D wavelet reconstruction (inverse transform).
In :func:`wavedec2`, you specify `levels`, which is not done in this function since it can be inferred from the shape of `coefs`.
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet2d`
Parameters
----------
A : ndarray
2D input data. Shape must be powers of two and square.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
"""
global _db_wavedec2_cache, _db_waverec2_cache
tup = (shape or coefs.shape, wavelet)
try:
rec = _db_waverec2_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec2_cache[tup] = dec
_db_waverec2_cache[tup] = rec
return rec(coefs)
# HELPER FUNCTIONS
################################################################################
def smart_flatten(coefficients):
"""
This flattens 2D coefficients in a smart way, so that all coefficients levels are grouped into contiguous blocks, starting from the low-frequency coefficients going to the high-frequency ones.
Notice that 1D coefficients are already flat and sorted.
Parameters
----------
coefficients : ndarray
Wavelet coefficients returned by :func:`wavedec2`.
"""
assert coefficients.shape == (8, 8), "TODO: Has not been generalized, only works with shape (8, 8), not {0}".format(coefficients.shape)
olds = np.zeros(64)
olds[0] = coefficients[0,0]
olds[1] = coefficients[1,0]
olds[2] = coefficients[0,1]
olds[3] = coefficients[1,1]
olds[4:8] = coefficients[2:4,0:2].flatten()
olds[8:12] = coefficients[0:2,2:4].flatten()
olds[12:16] = coefficients[2:4,2:4].flatten()
olds[16:32] = coefficients[4:8,0:4].flatten()
olds[32:48] = coefficients[0:4,4:8].flatten()
olds[48:64] = coefficients[4:8,4:8].flatten()
return olds
def smart_deflatten(flatcoefs):
"""
Inverse function of :func:`smart_flatten`.
Parameters
----------
flatcoefs : ndarray
Flat array of coefficients returned by :func:`smart_flatten`.
"""
N = int(np.sqrt(len(flatcoefs)))
A = np.arange(N*N, dtype=int).reshape(N, N)
indices = new2old(A).astype(int)
new_indices = np.empty(indices.shape, dtype=int)
for i, index in enumerate(indices):
new_indices[index] = i
news = flatcoefs[new_indices].reshape(8, 8).copy()
return news
def structured_to_contiguous(structured_coefs):
"""
Converts a structured list-of-tuples-of-arrays-of-coefficients to a contiguous block.
The input format follows `PyWavelets <http://www.pybytes.com/pywavelets/>`_.
Works for both 1D and 2D coefficients.
Parameters
----------
structured_coefs : list
List of coefficients.
"""
in2d = structured_coefs[0][0].ndim == 2
if in2d:
N = 1 << (len(structured_coefs)-1)
u = np.zeros((N, N))
u[0,0] = float(structured_coefs[0])
for level, c in enumerate(structured_coefs):
if level != 0:
S = len(c[0])
u[S:2*S,:S] = c[0]
u[:S,S:2*S] = c[1]
u[S:2*S,S:2*S] = c[2]
return u
else:
N = 1 << (len(structured_coefs)-1)
u = np.zeros(N)
u[0] = float(structured_coefs[0])
for level, c in enumerate(structured_coefs):
if level != 0:
S = len(c)
u[S:2*S] = c
return u
def contiguous_to_structured(contiguous_coefs, levels=np.inf):
"""
Convert from continguous array to a structured format (identical to the one used in PyWavelets).
Works for both 1D and 2D coefficients.
Parameters
----------
contiguous_coefs : ndarray
Coefficients as returned by our wavelet functions.
levels : int, optional
If you don't want all levels, you can set this value to specify how many you want. Notice that this
refers to levels of `wavelet` coefficients, which means that the scaling coefficient is not included and
will always be returned, even if `levels` is set to zero.
"""
u = contiguous_coefs
in2d = contiguous_coefs.ndim == 2
N = int(np.log2(len(contiguous_coefs)))
coefs = []
if in2d:
coefs.append( contiguous_coefs[:1,:1] )
for level in range(min(levels, N)):
S = 1 << level
coefs.append( (contiguous_coefs[S:2*S,:S], contiguous_coefs[:S,S:2*S], contiguous_coefs[S:2*S,S:2*S]) )
else:
coefs.append( contiguous_coefs[:1] )
for level in range(min(levels, N)):
S = 1 << level
coefs.append( contiguous_coefs[S:2*S] )
return coefs
|
{"hexsha": "eaf47ba0f80a5fd970e220861856bd4f3effceff", "size": 25382, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyiacsun/sparse/wavelet.py", "max_stars_repo_name": "aasensio/pyiacsun", "max_stars_repo_head_hexsha": "56bdaca98461be7b927f8d5fbbc9e64517c889fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2015-10-30T17:38:13.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-04T19:11:34.000Z", "max_issues_repo_path": "pyiacsun/sparse/wavelet.py", "max_issues_repo_name": "aasensio/pyiacsun", "max_issues_repo_head_hexsha": "56bdaca98461be7b927f8d5fbbc9e64517c889fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2015-10-15T21:55:46.000Z", "max_issues_repo_issues_event_max_datetime": "2015-10-16T19:04:54.000Z", "max_forks_repo_path": "pyiacsun/sparse/wavelet.py", "max_forks_repo_name": "aasensio/pyiacsun", "max_forks_repo_head_hexsha": "56bdaca98461be7b927f8d5fbbc9e64517c889fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-10-18T17:20:31.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-10T00:47:36.000Z", "avg_line_length": 55.9074889868, "max_line_length": 957, "alphanum_fraction": 0.6966354109, "include": true, "reason": "import numpy,import scipy", "num_tokens": 8840}
|
libc.so.6`strdup
|
{"hexsha": "9b09daa0c12b4b98c50c3a5242077934139ad92e", "size": 53, "ext": "r", "lang": "R", "max_stars_repo_path": "test/unittest/vars/tst.ucaller.r", "max_stars_repo_name": "alan-maguire/dtrace-utils", "max_stars_repo_head_hexsha": "53b33a89ef7eaeba5ce06d50a4c73fe91c1fa99e", "max_stars_repo_licenses": ["UPL-1.0"], "max_stars_count": 66, "max_stars_repo_stars_event_min_datetime": "2018-04-16T14:28:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T14:36:05.000Z", "max_issues_repo_path": "test/unittest/vars/tst.ucaller.r", "max_issues_repo_name": "tjfontaine/dtrace-utils", "max_issues_repo_head_hexsha": "1bd5b3825ca0dd641694f795734b9bbbfd3f2ebb", "max_issues_repo_licenses": ["UPL-1.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-01-06T16:28:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-10T18:46:58.000Z", "max_forks_repo_path": "test/unittest/vars/tst.ucaller.r", "max_forks_repo_name": "tjfontaine/dtrace-utils", "max_forks_repo_head_hexsha": "1bd5b3825ca0dd641694f795734b9bbbfd3f2ebb", "max_forks_repo_licenses": ["UPL-1.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2018-07-23T22:35:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T01:04:36.000Z", "avg_line_length": 26.5, "max_line_length": 52, "alphanum_fraction": 0.2452830189, "num_tokens": 10}
|
# Developed for the LSST System Integration, Test and Commissioning Team.
# This product includes software developed by the LSST Project
# (http://www.lsst.org).
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
import asyncio
import math
from lsst_efd_client import EfdClient
import numpy as np
from .. import utils
__all__ = ('main')
async def run(opts):
efd = EfdClient(opts.location)
cscs = utils.CSC.get_from_list(",".join(utils.OFFLINE_CSCS))
summary_state = 4 # OFFLINE
time_window = 120.0 # seconds
time_format = '%Y-%m-%dT%H:%M:%S.%f'
print("#########################################################")
print("# OFFLINE Report #")
print("#########################################################")
for csc in cscs:
ss_df = await efd.select_top_n(csc.efd_topic("logevent_summaryState"),
["private_sndStamp", "summaryState"],
1, csc.index)
is_camera = False
ods_topic = None
if "Camera" in csc.name:
ods_topic = "offlineDetailedState"
ods_df = await efd.select_top_n(csc.efd_topic(f"logevent_{ods_topic}"),
["private_sndStamp", "substate"],
2, csc.index)
is_camera = True
else:
ods_topic = "commandableByDDS"
ods_df = await efd.select_top_n(csc.efd_topic(f"logevent_{ods_topic}"),
["private_sndStamp", "state"],
1, csc.index)
sv_df = await efd.select_top_n(csc.efd_topic("logevent_softwareVersions"),
"*",
1, csc.index)
print("---------------------------------------------------------")
print(f"CSC: {csc.full_name}")
try:
ss_df = utils.convert_timestamps(ss_df, ["private_sndStamp"])
if ss_df.summaryState[0] != summary_state:
print("CSC not in OFFLINE State")
else:
print("CSC in OFFLINE State")
print(f"Time of Summary State: {ss_df.private_sndStamp[0].strftime(time_format)}")
except (AttributeError, KeyError):
print("summaryState event not present")
try:
ods_df = utils.convert_timestamps(ods_df, ["private_sndStamp"])
delta = utils.time_delta(ods_df.private_sndStamp.values[0],
ss_df.private_sndStamp.values[0])
if math.fabs(delta) > time_window:
print(f"Large delay in {ods_topic} publish: {delta:.1f} seconds")
if is_camera:
substate_order = np.array([1, 2])
ss_order = ods_df.substate.values
does_transition = np.all(ss_order == substate_order)
if does_transition:
print("Offline Detailed States Order Correct!")
else:
print(f"Incorrect Offline Detailed States Order: {ss_order}")
else:
if bool(ods_df.state.values[0]):
print("CSC Ready for DDS Commands!")
else:
print("CSC NOT Ready for DDS Commands!")
except (AttributeError, KeyError):
print(f"{ods_topic} event not present")
try:
sv_df = utils.convert_timestamps(sv_df, ["private_sndStamp"])
delta = utils.time_delta(utils.get_now(), sv_df.private_sndStamp.values[0])
print("softwareVersions present")
print(f"Publication time gap: {delta:.1f} seconds")
utils.check_correct_value(opts.xml, sv_df["xmlVersion"][0], "XML version")
utils.check_correct_value(opts.sal, sv_df["salVersion"][0], "SAL version")
except (AttributeError, KeyError):
print("softwareVersions event not present")
def main():
parser = utils.create_parser()
args = parser.parse_args()
asyncio.run(run(args))
|
{"hexsha": "ae15b8f05b3deba37910594df49a0003999ddedb", "size": 4317, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/lsst/integration_test_reporting/bin/offlinereport.py", "max_stars_repo_name": "lsst-sitcom/integration_test_reporting", "max_stars_repo_head_hexsha": "1d8790d03e87c0f1a3824116170ad389bb6944a8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/lsst/integration_test_reporting/bin/offlinereport.py", "max_issues_repo_name": "lsst-sitcom/integration_test_reporting", "max_issues_repo_head_hexsha": "1d8790d03e87c0f1a3824116170ad389bb6944a8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/lsst/integration_test_reporting/bin/offlinereport.py", "max_forks_repo_name": "lsst-sitcom/integration_test_reporting", "max_forks_repo_head_hexsha": "1d8790d03e87c0f1a3824116170ad389bb6944a8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9126213592, "max_line_length": 98, "alphanum_fraction": 0.5350938151, "include": true, "reason": "import numpy", "num_tokens": 908}
|
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from rlstructures.env_wrappers import GymEnv
from rlalgos.tools import weight_init
import torch.nn as nn
import copy
import torch
import time
import numpy as np
import torch.nn.functional as F
from rlalgos.reinforce_diayn.agent import (
DIAYNAgent,
DIAYNActionModel,
DIAYNBaselineModel,
DIAYNModel,
)
from rlalgos.reinforce_diayn.reinforce_diayn import Reinforce
import gym
from gym.wrappers import TimeLimit
# We write the 'create_env' and 'create_agent' function in the main file to allow these functions to be used with pickle when creating the batcher processes
def create_gym_env(env_name):
return gym.make(env_name)
# Create a rlstructures.VecEnv from multiple gym.Env, limiting the number of steps
def create_env(n_envs, env_name=None, max_episode_steps=None, seed=None):
envs = []
for k in range(n_envs):
e = create_gym_env(env_name)
e = TimeLimit(e, max_episode_steps=max_episode_steps)
envs.append(e)
return GymEnv(envs, seed)
# Create a rlstructures.Agent
def create_agent(model, n_actions=1):
return DIAYNAgent(model=model, n_actions=n_actions)
class Experiment(Reinforce):
def __init__(self, config, create_env, create_agent):
super().__init__(config, create_env, create_agent)
def _create_model(self):
action_model = DIAYNActionModel(
self.obs_dim, self.n_actions, 16, self.config["n_policies"]
)
baseline_model = DIAYNBaselineModel(self.obs_dim, 16, self.config["n_policies"])
return DIAYNModel(action_model, baseline_model)
def _create_discriminator(self):
classifier = nn.Linear(self.obs_dim, self.config["n_policies"])
classifier.apply(weight_init)
return classifier
if __name__ == "__main__":
print(
"DISCLAIMER: DIAYN is just provided as an example. It has not been tested deeply !!"
)
# We use spawn mode such that most of the environment will run in multiple processes
import torch.multiprocessing as mp
mp.set_start_method("spawn")
config = {
"env_name": "CartPole-v0",
"n_envs": 4,
"max_episode_steps": 100,
"env_seed": 42,
"n_processes": 4,
"n_evaluation_processes": 2,
"n_evaluation_envs": 128,
"time_limit": 3600,
"lr": 0.01,
"lr_discriminator": 0.01,
"discount_factor": 0.9,
"baseline_coef": 0.1,
"discriminator_coef": 1.0,
"entropy_coef": 0.01,
"reinforce_coef": 1.0,
"evaluation_mode": "stochastic",
"logdir": "./results",
"n_policies": 5,
}
exp = Experiment(config, create_env, create_agent)
exp.run()
|
{"hexsha": "3e80bc9cb0e987045062da00cc260c1212eac4f2", "size": 2865, "ext": "py", "lang": "Python", "max_stars_repo_path": "rlalgos/reinforce_diayn/run_diayn.py", "max_stars_repo_name": "Purple-PI/rlstructures", "max_stars_repo_head_hexsha": "9b201b083715bbda2f3534b010c84e11dfc0a1c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 281, "max_stars_repo_stars_event_min_datetime": "2021-01-13T14:20:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T08:46:56.000Z", "max_issues_repo_path": "rlalgos/reinforce_diayn/run_diayn.py", "max_issues_repo_name": "Purple-PI/rlstructures", "max_issues_repo_head_hexsha": "9b201b083715bbda2f3534b010c84e11dfc0a1c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-01-22T23:28:34.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-29T22:05:42.000Z", "max_forks_repo_path": "rlalgos/reinforce_diayn/run_diayn.py", "max_forks_repo_name": "Purple-PI/rlstructures", "max_forks_repo_head_hexsha": "9b201b083715bbda2f3534b010c84e11dfc0a1c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2021-01-15T14:53:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T11:12:54.000Z", "avg_line_length": 30.1578947368, "max_line_length": 156, "alphanum_fraction": 0.6869109948, "include": true, "reason": "import numpy", "num_tokens": 740}
|
import re
import json
import datetime
from datetime import datetime
from datetime import timedelta
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import argparse
import os
import csv
class ProcessTweets(object):
def __init__(self, filename, outname):
self.filename = filename
self.outname = outname
json_file = open(filename)
json_str = json_file.read()
self.json = json.loads(json_str)
self.sid = SentimentIntensityAnalyzer()
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def get_sentiment(self, tweet):
polarity_scores = self.sid.polarity_scores(tweet)
return polarity_scores['neg'], polarity_scores['pos'], polarity_scores['neu']
def get_tweets(self):
df = pd.DataFrame.from_dict(self.json)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.sort_values(by=['timestamp'], inplace=True, ascending=True)
df.reset_index(inplace=True)
self.json = df.to_dict()
timestamps = self.json['timestamp']
start_date = pd.to_datetime(timestamps[0])
end_date = start_date + timedelta(hours=1)
sentiments = dict()
temp = []
tweets = self.json['text']
for count, tweet in enumerate(tweets, start=0):
tweet = tweets[tweet]
curr_time = timestamps[count]
if isinstance(tweet, int):
print(tweet)
if curr_time >= start_date and curr_time < end_date:
neg, pos, neu = self.get_sentiment(self.clean_tweet(tweet))
temp.append([neg, pos, neu])
else:
means = np.mean(np.asarray(temp), axis=0)
obj = {'neg': means[0], 'pos': means[1], 'neu': means[2]}
sentiments[start_date.strftime("%Y-%m-%d %H:%M:%S")] = obj
temp = []
start_date = end_date
end_date = start_date + timedelta(hours=1)
neg, pos, neu = self.get_sentiment(self.clean_tweet(tweet))
temp.append([neg, pos, neu])
tmp_df = pd.DataFrame.from_dict(sentiments)
neg = tmp_df.loc['neg', :]
pos = tmp_df.loc['pos', :]
neu = tmp_df.loc['neu', :]
df = pd.DataFrame()
df['neg'] = neg
df['pos'] = pos
df['neu'] = neu
df = df.set_index(pd.to_datetime(tmp_df.columns.values))
df.index.name = 'date'
df.to_csv(self.outname, sep=',')
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--input_dir', required=True, help='directory containing json files from twitterscraper')
ap.add_argument('--output_dir', required=True, help='directory of resulting sentiment csv files')
args = ap.parse_args()
if(not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir)
files = []
for dirpath, dirnames, filenames in os.walk(args.input_dir):
for f in filenames:
if f.split('.')[-1] == 'json':
files.append((f.split('.')[0], os.path.join(dirpath, f)))
for f in files:
ProcessTweets(f[1], os.path.join(args.output_dir, f[0] + '.csv')).get_tweets()
if __name__ == "__main__":
main()
|
{"hexsha": "815601f799cb4e0c512db183200d6102c4717299", "size": 3396, "ext": "py", "lang": "Python", "max_stars_repo_path": "hisa/learn/sentiment/sentiment.py", "max_stars_repo_name": "rittikaadhikari/stock-recommendation", "max_stars_repo_head_hexsha": "1f14276a955301b1c6fa1c00bd88b00cf5668d8c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hisa/learn/sentiment/sentiment.py", "max_issues_repo_name": "rittikaadhikari/stock-recommendation", "max_issues_repo_head_hexsha": "1f14276a955301b1c6fa1c00bd88b00cf5668d8c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hisa/learn/sentiment/sentiment.py", "max_forks_repo_name": "rittikaadhikari/stock-recommendation", "max_forks_repo_head_hexsha": "1f14276a955301b1c6fa1c00bd88b00cf5668d8c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.96, "max_line_length": 109, "alphanum_fraction": 0.5948174323, "include": true, "reason": "import numpy", "num_tokens": 789}
|
#ifndef MPLLIBS_METAMONAD_V1_IF__HPP
#define MPLLIBS_METAMONAD_V1_IF__HPP
// Copyright Abel Sinkovics (abel@sinkovics.hu) 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <mpllibs/metamonad/v1/fwd/if_.hpp>
#include <boost/mpl/eval_if.hpp>
namespace mpllibs
{
namespace metamonad
{
namespace v1
{
// MPLLIBS_V1_METAFUNCTION depends on it
template <class C, class T, class F>
struct if_ : boost::mpl::eval_if<typename C::type, T, F> {};
}
}
}
#endif
|
{"hexsha": "f7b7b727cf59be2f5164fcea12635cf02241e023", "size": 625, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "mpllibs/metamonad/v1/if_.hpp", "max_stars_repo_name": "sabel83/mpllibs", "max_stars_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 70.0, "max_stars_repo_stars_event_min_datetime": "2015-01-15T09:05:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T15:49:31.000Z", "max_issues_repo_path": "mpllibs/metamonad/v1/if_.hpp", "max_issues_repo_name": "sabel83/mpllibs", "max_issues_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2015-06-18T19:25:34.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-13T19:49:51.000Z", "max_forks_repo_path": "mpllibs/metamonad/v1/if_.hpp", "max_forks_repo_name": "sabel83/mpllibs", "max_forks_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2015-07-10T08:18:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-01T07:17:57.000Z", "avg_line_length": 22.3214285714, "max_line_length": 66, "alphanum_fraction": 0.696, "num_tokens": 192}
|
def n: ℕ := 1
lemma p: 0 = 0 := eq.refl 0
lemma s: "Hello Lean!" = "Hello Lean!" := eq.refl "Hello Lean!"
lemma s1: tt = tt := eq.refl tt
/-def p' : 0 = 0 := eq.refl 1-/
theorem s' : 0 = 0 := eq.refl 0
lemma oeqo : 1 = 1 := eq.refl 1
lemma teqt: 2 = 1 + 1 := eq.refl (1+1)
lemma h : "Hello" = "He" ++ "llo" := rfl
lemma pp : 3*3 + 4*4 = 5*5 := rfl
lemma tthof : 2 + 3 = 1 + 4 := rfl
lemma hpleqhl : "Hello " ++ "Lean!" = "Hello Lean!" := rfl
#check "Hello" = "Hello"
|
{"author": "hanzhi713", "repo": "lean-proofs", "sha": "4d8356a878645b9ba7cb036f87737f3f1e68ede5", "save_path": "github-repos/lean/hanzhi713-lean-proofs", "path": "github-repos/lean/hanzhi713-lean-proofs/lean-proofs-4d8356a878645b9ba7cb036f87737f3f1e68ede5/src/lessons/lesson1.lean"}
|
jrz may add some information about himself or herself here.
20091203 23:14:18 nbsp Hello, I fixed your comment on the Starbucks page. In order to add a link you have to use the square brackets . You put the URL, a space, and then the text you want after the space. Users/hankim
20091203 23:29:54 nbsp Great, Thanks hankim! Users/jrz
|
{"hexsha": "feb85484d410a83bc967179457eee0abfc8fd0f5", "size": 337, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/jrz.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/jrz.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/jrz.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.1428571429, "max_line_length": 218, "alphanum_fraction": 0.7685459941, "num_tokens": 94}
|
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable as V
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def cuda(x):
if torch.cuda.is_available():
return x.cuda()
else :
return x
class LossMulti:
def __init__(self, jaccard_weight=0, num_classes=2):
#self.nll_loss = nn.CrossEntropyLoss()
self.nll_loss = nn.NLLLoss()
self.jaccard_weight = jaccard_weight
self.num_classes = num_classes
def __call__(self, outputs, targets):
output = F.log_softmax(outputs, dim =1)
loss = self.nll_loss(output, targets)
eps=1e-7
num_classes = outputs.shape[1]
true_1_hot = torch.eye(num_classes)[targets.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(outputs, dim=1)
true_1_hot = true_1_hot.type(outputs.type())
dims = (0,) + tuple(range(2, targets.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
score = 2. * intersection / (cardinality + eps)
jacard = intersection / (cardinality - intersection + eps)
dice_loss = 1- score.mean()
return loss , dice_loss, jacard.mean()
|
{"hexsha": "f68283674565491976e39c5d0d339d1bdf371962", "size": 1365, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/losses.py", "max_stars_repo_name": "bharat3012/Attention_LadderNet", "max_stars_repo_head_hexsha": "66d9bc2b389540fc297d22e7a35e200480b63764", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-02-23T13:32:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-09T08:37:41.000Z", "max_issues_repo_path": "src/losses.py", "max_issues_repo_name": "bharat3012/LadNetx", "max_issues_repo_head_hexsha": "66d9bc2b389540fc297d22e7a35e200480b63764", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/losses.py", "max_forks_repo_name": "bharat3012/LadNetx", "max_forks_repo_head_hexsha": "66d9bc2b389540fc297d22e7a35e200480b63764", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-28T04:54:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-28T04:54:25.000Z", "avg_line_length": 29.0425531915, "max_line_length": 66, "alphanum_fraction": 0.6395604396, "include": true, "reason": "import numpy", "num_tokens": 353}
|
#include <boost/hana/fwd/concept/sequence.hpp>
|
{"hexsha": "4dbd302f55e3a3f524f0214dfde5ea7b6d1107e6", "size": 47, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_hana_fwd_concept_sequence.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_hana_fwd_concept_sequence.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_hana_fwd_concept_sequence.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 23.5, "max_line_length": 46, "alphanum_fraction": 0.7872340426, "num_tokens": 13}
|
[STATEMENT]
lemma PD7: "Der_1 \<phi> \<Longrightarrow> Der_2 \<phi> \<Longrightarrow> \<forall>A. \<phi>(\<phi>\<^sup>d A) \<^bold>\<preceq> \<phi>(\<phi> A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Der_1 \<phi>; Der_2 \<phi>\<rbrakk> \<Longrightarrow> \<forall>A. contains (\<phi> (\<phi> A)) (\<phi> (\<phi>\<^sup>d A))
[PROOF STEP]
by (metis (mono_tags, lifting) PC1 PD1 PD6)
|
{"llama_tokens": 167, "file": "Topological_Semantics_topo_operators_derivative", "length": 1}
|
import os
import matplotlib.pyplot as plt
import numpy as np
class Logger(object):
def __init__(self, result_dir, model) -> None:
self.result_dir = result_dir
self.model = model
self.prepare_log_file()
def prepare_log_file(self):
log_path = self.result_dir / "log.csv"
if not os.path.isfile(log_path):
os.makedirs(self.result_dir, exist_ok=True)
with open(log_path, "w") as f:
header_key = [
"w_1hot",
"w_key_unique",
"beta",
"gamma",
"num_sweeps",
"trotter",
"num_reads",
"energy_avg",
"energy_std",
"energy_min",
"valids",
"cost_min",
"keys",
]
f.write(",".join(header_key) + "\n")
def log(self, weight, annealing_params, states):
energies = np.array([self.model.energy(state, **weight) for state in states])
valids = np.array([self.model.validate(state) for state in states])
valid_states = states[valids]
valid_costs = energies[valids]
# write statics
with open(self.result_dir / "log.csv", "a") as f:
values = [
weight["w_1hot"],
weight["w_key_unique"],
annealing_params["beta"],
annealing_params["gamma"],
annealing_params["num_sweeps"],
annealing_params["trotter"],
annealing_params["num_reads"],
np.average(energies),
np.std(energies),
np.min(energies),
sum(valids),
]
values_str = [str(v) for v in values]
f.write(",".join(values_str) + ",")
if len(valid_states) == 0:
print("There are no valid results.")
with open(self.result_dir / "log.csv", "a") as f:
f.write(",\n")
return
cost_min = valid_costs.min()
min_state = valid_states[valid_costs.argmin()]
keymap = self.model.keys_from_state(min_state)
print("cost_min:", cost_min)
print("key_map:")
for row in keymap:
print(row)
with open(self.result_dir / "log.csv", "a") as f:
f.write(str(cost_min) + ',"' + "".join(keymap.flatten()) + '"\n')
detail_dir = self.result_dir / str(cost_min)
if not os.path.isdir(detail_dir):
os.makedirs(detail_dir, exist_ok=True)
self.save_keymap(keymap, detail_dir)
def save_keymap(self, keys, dir):
ly, lx = keys.shape
fig = plt.figure(figsize=(lx, ly))
ax = fig.add_subplot()
ax.set_aspect("equal")
# plot keyboard grid
plt.xticks(range(lx + 1))
plt.yticks(range(ly + 1))
ax.invert_yaxis()
ax.grid()
for y, row in enumerate(keys):
for x, char in enumerate(row):
ax.text(
x + 0.5,
y + 0.5,
char,
size=20,
horizontalalignment="center",
verticalalignment="center",
)
plt.tight_layout()
# plt.show()
plt.savefig(dir / "keymap.png")
|
{"hexsha": "68105a08bec83b1bc774e67f8a57787aef04eb23", "size": 3443, "ext": "py", "lang": "Python", "max_stars_repo_path": "quantum_keymap/logger.py", "max_stars_repo_name": "aoirohn/quantum_keymap", "max_stars_repo_head_hexsha": "61a7c817c81d75b4fc4ccf4ed55573f4f38b18f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-22T14:21:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-23T11:59:04.000Z", "max_issues_repo_path": "quantum_keymap/logger.py", "max_issues_repo_name": "aoirohn/quantum_keymap", "max_issues_repo_head_hexsha": "61a7c817c81d75b4fc4ccf4ed55573f4f38b18f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "quantum_keymap/logger.py", "max_forks_repo_name": "aoirohn/quantum_keymap", "max_forks_repo_head_hexsha": "61a7c817c81d75b4fc4ccf4ed55573f4f38b18f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-21T15:06:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-21T15:06:59.000Z", "avg_line_length": 31.3, "max_line_length": 85, "alphanum_fraction": 0.4844612257, "include": true, "reason": "import numpy", "num_tokens": 743}
|
"""
Mock / synthetic data objects for use in testing.
"""
import numpy as np
from sourcefinder.accessors.dataaccessor import DataAccessor
from sourcefinder.utility.coordinates import WCS
import datetime
class Mock(object):
def __init__(self, returnvalue=None):
self.callcount = 0
self.callvalues = []
self.returnvalue = returnvalue
def __call__(self, *args, **kwargs):
self.callcount += 1
self.callvalues.append((args, kwargs))
return self.returnvalue
def make_wcs(crval=None,
cdelt=None,
crpix=None
):
"""
Make a WCS object for insertion into a synthetic image.
Args:
crval (tuple): Tuple of (RA, Dec) in decimal degrees at the reference
position.
crpix (tuple): Tuple of (x,y) co-ordinates describing the reference
pixel location corresponding to the crval sky-position.
cdelt (tuple): Tuple of (cdelt0, cdelt1) in decimal degrees.
This is the pixel width in degrees of arc, but not necessarily
aligned to RA, Dec unless `crota` is (0,0). If that *is* the case,
then typically cdelt0 is negative since the x-axis is in direction
of West (decreasing RA).
"""
# For any arguments not set we simply assign an arbitrary valid value:
if crval is None:
crval = 100., 45.
if cdelt is None:
pixel_width_arcsec = 40
pixel_width_deg = pixel_width_arcsec / 3600.
cdelt = (-pixel_width_deg, pixel_width_deg)
if crpix is None:
crpix = (256.0, 256.0)
wcs = WCS()
wcs.cdelt = cdelt
wcs.crota = (0.0, 0.0)
wcs.crpix = crpix
wcs.crval = crval
wcs.ctype = ('RA---SIN', 'DEC--SIN')
wcs.cunit = ('deg', 'deg')
return wcs
class SyntheticImage(DataAccessor):
def __init__(self,
wcs=None,
data=None,
beam=(1.5,1.5,0),
freq_eff=150e6,
freq_bw=2e6,
tau_time=1800,
taustart_ts=datetime.datetime(2015,1,1)
):
"""
Generate a synthetic image for use in tests
Args:
wcs (tkp.utility.coordinates.WCS): WCS for the image.
data (array_like): Data for the image. Default is a 512x512 array of
zeroes.
beam (tuple): Beamsemi-major axis (in pixels), semi-minor axis (pixels)
and position angle (radians).
freq_eff(float): Effective frequency of the image in Hz.
That is, the mean frequency of all the visibility data which
comprises this image.
freq_bw(float): The frequency bandwidth of this image in Hz.
tau_time(float): Total time on sky in seconds.
taustart_ts(float): Timestamp of the first integration which
constitutes part of this image. MJD in seconds.
"""
self.url = "SyntheticImage"
self.wcs = wcs
if self.wcs is None:
self.wcs = make_wcs()
self.data = data
if self.data is None:
self.data = np.zeros((512,512))
self.beam = beam
self.freq_eff = freq_eff
self.freq_bw = freq_bw
self.tau_time = tau_time
self.taustart_ts = taustart_ts
self.pixelsize = self.parse_pixelsize()
self.centre_ra, self.centre_decl = self.calculate_phase_centre()
def calculate_phase_centre(self):
x, y = self.data.shape
centre_ra, centre_decl = self.wcs.p2s((x / 2, y / 2))
return centre_ra, centre_decl
|
{"hexsha": "6a6426a093b4297828d8843b82a78d7e2d576a83", "size": 3645, "ext": "py", "lang": "Python", "max_stars_repo_path": "sourcefinder/testutil/mock.py", "max_stars_repo_name": "transientskp/PySE", "max_stars_repo_head_hexsha": "9a59e2f5b4ec50ff5b0d735cfe76c6b9eeaa88ae", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-07-02T17:34:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T11:43:00.000Z", "max_issues_repo_path": "sourcefinder/testutil/mock.py", "max_issues_repo_name": "transientskp/PySE", "max_issues_repo_head_hexsha": "9a59e2f5b4ec50ff5b0d735cfe76c6b9eeaa88ae", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2017-05-31T15:09:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-06T08:43:10.000Z", "max_forks_repo_path": "sourcefinder/testutil/mock.py", "max_forks_repo_name": "transientskp/PySE", "max_forks_repo_head_hexsha": "9a59e2f5b4ec50ff5b0d735cfe76c6b9eeaa88ae", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-05-31T14:25:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-24T09:32:49.000Z", "avg_line_length": 34.7142857143, "max_line_length": 83, "alphanum_fraction": 0.5917695473, "include": true, "reason": "import numpy", "num_tokens": 884}
|
[STATEMENT]
lemma ru_t_event: "reaches_on ru_t t ts t' \<Longrightarrow> t = l_t0 \<Longrightarrow> ru_t t' = Some (t'', x) \<Longrightarrow>
\<exists>rho e tt. t' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length ts) \<and>
x = \<tau> \<sigma> (length ts)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>reaches_on ru_t t ts t'; t = l_t0; ru_t t' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. t' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length ts) \<and> x = \<tau> \<sigma> (length ts)
[PROOF STEP]
proof (induction t ts t' arbitrary: t'' x rule: reaches_on_rev_induct)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>s t'' x. \<lbrakk>s = l_t0; ru_t s = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length []) \<and> x = \<tau> \<sigma> (length [])
2. \<And>s s' v vs s'' t'' x. \<lbrakk>reaches_on ru_t s vs s'; \<And>t'' x. \<lbrakk>s = l_t0; ru_t s' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length vs) \<and> x = \<tau> \<sigma> (length vs); ru_t s' = Some (s'', v); s = l_t0; ru_t s'' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s'' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length (vs @ [v])) \<and> x = \<tau> \<sigma> (length (vs @ [v]))
[PROOF STEP]
case (1 s)
[PROOF STATE]
proof (state)
this:
s = l_t0
ru_t s = Some (t'', x)
goal (2 subgoals):
1. \<And>s t'' x. \<lbrakk>s = l_t0; ru_t s = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length []) \<and> x = \<tau> \<sigma> (length [])
2. \<And>s s' v vs s'' t'' x. \<lbrakk>reaches_on ru_t s vs s'; \<And>t'' x. \<lbrakk>s = l_t0; ru_t s' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length vs) \<and> x = \<tau> \<sigma> (length vs); ru_t s' = Some (s'', v); s = l_t0; ru_t s'' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s'' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length (vs @ [v])) \<and> x = \<tau> \<sigma> (length (vs @ [v]))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>rho e tt. s = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length []) \<and> x = \<tau> \<sigma> (length [])
[PROOF STEP]
using 1 reaches_on_run_hd[OF reaches_on.intros(1)]
[PROOF STATE]
proof (prove)
using this:
s = l_t0
ru_t s = Some (t'', x)
run_hd init_hd = Some (?s', ?t, ?X) \<Longrightarrow> ?t = \<tau> \<sigma> (length []) \<and> ?X = \<Gamma> \<sigma> (length [])
goal (1 subgoal):
1. \<exists>rho e tt. s = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length []) \<and> x = \<tau> \<sigma> (length [])
[PROOF STEP]
by (auto simp: t0_def split: option.splits intro!: reaches_on.intros)
[PROOF STATE]
proof (state)
this:
\<exists>rho e tt. s = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length []) \<and> x = \<tau> \<sigma> (length [])
goal (1 subgoal):
1. \<And>s s' v vs s'' t'' x. \<lbrakk>reaches_on ru_t s vs s'; \<And>t'' x. \<lbrakk>s = l_t0; ru_t s' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length vs) \<and> x = \<tau> \<sigma> (length vs); ru_t s' = Some (s'', v); s = l_t0; ru_t s'' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s'' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length (vs @ [v])) \<and> x = \<tau> \<sigma> (length (vs @ [v]))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>s s' v vs s'' t'' x. \<lbrakk>reaches_on ru_t s vs s'; \<And>t'' x. \<lbrakk>s = l_t0; ru_t s' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length vs) \<and> x = \<tau> \<sigma> (length vs); ru_t s' = Some (s'', v); s = l_t0; ru_t s'' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s'' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length (vs @ [v])) \<and> x = \<tau> \<sigma> (length (vs @ [v]))
[PROOF STEP]
case (2 s s' v vs s'')
[PROOF STATE]
proof (state)
this:
reaches_on ru_t s vs s'
ru_t s' = Some (s'', v)
\<lbrakk>s = l_t0; ru_t s' = Some (?t'', ?x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length vs) \<and> ?x = \<tau> \<sigma> (length vs)
s = l_t0
ru_t s'' = Some (t'', x)
goal (1 subgoal):
1. \<And>s s' v vs s'' t'' x. \<lbrakk>reaches_on ru_t s vs s'; \<And>t'' x. \<lbrakk>s = l_t0; ru_t s' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length vs) \<and> x = \<tau> \<sigma> (length vs); ru_t s' = Some (s'', v); s = l_t0; ru_t s'' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s'' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length (vs @ [v])) \<and> x = \<tau> \<sigma> (length (vs @ [v]))
[PROOF STEP]
obtain rho e tt where rho_def: "s' = Some (e, tt)" "reaches_on run_hd init_hd rho e"
"length rho = Suc (length vs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>e tt rho. \<lbrakk>s' = Some (e, tt); reaches_on run_hd init_hd rho e; length rho = Suc (length vs)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using 2(3)[OF 2(4,2)]
[PROOF STATE]
proof (prove)
using this:
\<exists>rho e tt. s' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length vs) \<and> v = \<tau> \<sigma> (length vs)
goal (1 subgoal):
1. (\<And>e tt rho. \<lbrakk>s' = Some (e, tt); reaches_on run_hd init_hd rho e; length rho = Suc (length vs)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
s' = Some (e, tt)
reaches_on run_hd init_hd rho e
length rho = Suc (length vs)
goal (1 subgoal):
1. \<And>s s' v vs s'' t'' x. \<lbrakk>reaches_on ru_t s vs s'; \<And>t'' x. \<lbrakk>s = l_t0; ru_t s' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length vs) \<and> x = \<tau> \<sigma> (length vs); ru_t s' = Some (s'', v); s = l_t0; ru_t s'' = Some (t'', x)\<rbrakk> \<Longrightarrow> \<exists>rho e tt. s'' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length (vs @ [v])) \<and> x = \<tau> \<sigma> (length (vs @ [v]))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
s' = Some (e, tt)
reaches_on run_hd init_hd rho e
length rho = Suc (length vs)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
s' = Some (e, tt)
reaches_on run_hd init_hd rho e
length rho = Suc (length vs)
goal (1 subgoal):
1. \<exists>rho e tt. s'' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length (vs @ [v])) \<and> x = \<tau> \<sigma> (length (vs @ [v]))
[PROOF STEP]
using 2(2,5) reaches_on_app[OF rho_def(2)] reaches_on_run_hd[OF rho_def(2)]
[PROOF STATE]
proof (prove)
using this:
s' = Some (e, tt)
reaches_on run_hd init_hd rho e
length rho = Suc (length vs)
ru_t s' = Some (s'', v)
ru_t s'' = Some (t'', x)
run_hd e = Some (?s'', ?v) \<Longrightarrow> reaches_on run_hd init_hd (rho @ [?v]) ?s''
run_hd e = Some (?s', ?t, ?X) \<Longrightarrow> ?t = \<tau> \<sigma> (length rho) \<and> ?X = \<Gamma> \<sigma> (length rho)
goal (1 subgoal):
1. \<exists>rho e tt. s'' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length (vs @ [v])) \<and> x = \<tau> \<sigma> (length (vs @ [v]))
[PROOF STEP]
by (fastforce split: option.splits)
[PROOF STATE]
proof (state)
this:
\<exists>rho e tt. s'' = Some (e, tt) \<and> reaches_on run_hd init_hd rho e \<and> length rho = Suc (length (vs @ [v])) \<and> x = \<tau> \<sigma> (length (vs @ [v]))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3679, "file": "VYDRA_MDL_Monitor", "length": 15}
|
# -*- coding: utf-8 -*-
import numpy as np
import talib # pylint: skip-file
import config
from app.ta.helpers import indicator, nan_to_null
Config = config.BaseConfig()
# Elliott Wave Oscillator:
@indicator("EWO", ["ewo"])
def EWO(data, limit, fast=5, slow=35):
start = Config.MAGIC_LIMIT
close = data.close
real = talib.EMA(close, fast) - talib.EMA(close, slow)
return {"ewo": nan_to_null(real.tolist()[-limit:]), "info": []}
# Keltner channels:
@indicator("KC", ["upper_band", "middle_band", "lower_band"])
def KC(data, limit):
# Keltner Channels
# Middle Line: 20-day exponential moving average
# Upper Channel Line: 20-day EMA + (2 x ATR(10))
# Lower Channel Line: 20-day EMA - (2 x ATR(10))
close = data.close
high = data.high
low = data.low
mid = talib.SMA(close, 20)
upperch = mid + (2 * talib.ATR(high, low, close, 10))
lowerch = mid - (2 * talib.ATR(high, low, close, 10))
return {
"middle_band": nan_to_null(mid.tolist()[-limit:]),
"upper_band": nan_to_null(upperch.tolist()[-limit:]),
"lower_band": nan_to_null(lowerch.tolist()[-limit:]),
"info": [],
}
# Ichimoku Cloud:
@indicator("ICM", ["leading_span_a", "leading_span_b", "base_line"])
def ICM(data, limit):
margin = Config.MARGIN
high, low, close = data.high, data.low, data.close
close_size = close.size
# Tenkan-sen (Conversion Line): (9-period high + 9-period low)/2))
n1 = 9
conversion_line = [0] * (n1 - 1)
for i in range(n1, close_size):
conversion_line.append((np.max(high[i - n1 : i]) + np.min(low[i - n1 : i])) / 2)
conversion_line = np.array(conversion_line)
# Kijun-sen (Base Line): (26-period high + 26-period low)/2))
n2 = 26
base_line = [0] * (n2 - 1)
for i in range(n2, close_size):
base_line.append((np.max(high[i - n2 : i]) + np.min(low[i - n2 : i])) / 2)
base_line = np.array(base_line)
# Senkou Span A (Leading Span A): (Conversion Line + Base Line)/2))
leading_span_a = (conversion_line + base_line) / 2
# Senkou Span B (Leading Span B): (52-period high + 52-period low)/2))
n3 = 52
leading_span_b = [0] * (n3 - 1)
for i in range(n3, close_size):
leading_span_b.append((np.max(high[i - n3 : i]) + np.min(low[i - n3 : i])) / 2)
leading_span_b = np.array(leading_span_b)
# Some magic
leading_span_a = leading_span_a[-(limit + margin) :]
leading_span_b = leading_span_b[-(limit + margin) :]
# Tokens
info = []
actual_a = leading_span_a[-margin]
actual_b = leading_span_b[-margin]
if actual_a >= actual_b and close[-1] < actual_a:
if close[-1] < actual_b:
info.append("PIERCED_UP")
else:
info.append("IN_CLOUD_UP")
elif actual_b > actual_a and close[-1] > actual_a:
if close[-1] > actual_b:
info.append("PIERCED_DOWN")
else:
info.append("IN_CLOUD_DOWN")
width = np.abs(leading_span_a - leading_span_b)
p1 = np.percentile(width, 0.80)
p2 = np.percentile(width, 0.25)
if width[-margin] >= p1:
info.append("CLOUD_WIDE")
elif width[-margin] <= p2:
info.append("CLOUD_THIN")
return {
"leading_span_a": nan_to_null(leading_span_a.tolist()),
"leading_span_b": nan_to_null(leading_span_b.tolist()),
"base_line": nan_to_null(base_line.tolist()[-limit:]),
"info": info,
}
|
{"hexsha": "06102507c0d4395bf8ac96405a0106eebb916074", "size": 3458, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/app/ta/indicators/custom.py", "max_stars_repo_name": "bitreport-org/Bitreport", "max_stars_repo_head_hexsha": "f480c410d340e57645a9a23d12fe2a2d3d2add39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-29T10:50:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T03:29:42.000Z", "max_issues_repo_path": "core/app/ta/indicators/custom.py", "max_issues_repo_name": "bitreport-org/Bitreport", "max_issues_repo_head_hexsha": "f480c410d340e57645a9a23d12fe2a2d3d2add39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-10-11T09:15:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T01:46:38.000Z", "max_forks_repo_path": "core/app/ta/indicators/custom.py", "max_forks_repo_name": "bitreport-org/Bitreport", "max_forks_repo_head_hexsha": "f480c410d340e57645a9a23d12fe2a2d3d2add39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-15T10:46:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-15T10:46:04.000Z", "avg_line_length": 31.4363636364, "max_line_length": 88, "alphanum_fraction": 0.6098901099, "include": true, "reason": "import numpy", "num_tokens": 1037}
|
import argparse
import pickle
from collections import namedtuple
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
def discount(sequence, Gamma = 0.99):
R = 0
reward = []
for r in sequence[::-1]:
R = r + Gamma * R
reward.insert(0, R)
return reward
def makedir():
if not os.path.exists('./exp'):
os.makedirs('./exp/model')
os.makedirs('./exp/logs')
def save_model(model, iteration_time):
path = './exp/model/model'+str(iteration_time)+'.pkl'
torch.save(model.state_dict(), path)
|
{"hexsha": "bba9404ae01e04d5767f565bfe5cfafc3f879eb6", "size": 571, "ext": "py", "lang": "Python", "max_stars_repo_path": "Char6 AlphaGo/utils.py", "max_stars_repo_name": "rh01/Deep-reinforcement-learning-with-pytorch", "max_stars_repo_head_hexsha": "fd1853495b885514927c82834f562d2a4df06b28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-30T17:47:31.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-30T17:47:31.000Z", "max_issues_repo_path": "Char6 AlphaGo/utils.py", "max_issues_repo_name": "rh01/Deep-reinforcement-learning-with-pytorch", "max_issues_repo_head_hexsha": "fd1853495b885514927c82834f562d2a4df06b28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Char6 AlphaGo/utils.py", "max_forks_repo_name": "rh01/Deep-reinforcement-learning-with-pytorch", "max_forks_repo_head_hexsha": "fd1853495b885514927c82834f562d2a4df06b28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-25T06:40:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-20T12:38:55.000Z", "avg_line_length": 18.4193548387, "max_line_length": 57, "alphanum_fraction": 0.6374781086, "include": true, "reason": "import numpy", "num_tokens": 140}
|
import copy
import time
import torch
import numpy as np
import matplotlib.pyplot as plt
from models.test import test_img
from models.CNN import CNN_mnist, CNN_cifar
from models.MLP import MLP
from utilities.arguments import parser
from utilities.grouping import mnist_iid, mnist_noniid, cifar_iid
from torchvision import datasets, transforms
from models.weight_update import LocalUpdate, FedAvg
if __name__ == '__main__':
# parse args
args = parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
# load dataset and split users
if args.dataset == 'mnist':
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset_train = datasets.MNIST("./data/mnist/", train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST("./data/mnist/", train=False, download=True, transform=trans_mnist)
# sample users
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10("./data/cifar", train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10("./data/cifar", train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
# build model
if args.model == 'cnn':
if args.dataset == 'cifar':
net_glob = CNN_cifar(args=args).to(args.device)
elif args.dataset == 'mnist':
net_glob = CNN_mnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in dataset_train[0][0].shape:
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
net_glob.train()
# copy weights
w_glob = net_glob.state_dict()
# training
loss_train = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
val_acc_list, net_list = [], []
start = time.process_time()
if args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(args.num_users)]
for iter in range(args.epochs):
loss_locals = []
if not args.all_clients:
w_locals = []
m = max(int(args.frac * args.num_users), 1)
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
local = LocalUpdate(args, dataset_train, dict_users[idx])
w, loss = local.train(model=copy.deepcopy(net_glob).to(args.device))
if args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_glob = FedAvg(w_locals)
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
print('Epoch {:d}, Average loss {:.3f}'.format(iter, loss_avg))
loss_train.append(loss_avg)
end = time.process_time()
# plot loss curve
plt.figure()
plt.plot(range(len(loss_train)), loss_train)
plt.ylabel('train_loss')
plt.savefig('./save/fed_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid))
# testing
net_glob.eval()
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train))
print("Testing accuracy: {:.2f}".format(acc_test))
print("Training time: {:.2f}".format(end-start))
|
{"hexsha": "9432d43d9c59fb6f1fd81d9e700b0952bccb455b", "size": 3754, "ext": "py", "lang": "Python", "max_stars_repo_path": "main_fed.py", "max_stars_repo_name": "DimensionPrism/Federated-Learning", "max_stars_repo_head_hexsha": "e5c7c7c0d3a08c2f8fea83ff26cd701c6349750f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main_fed.py", "max_issues_repo_name": "DimensionPrism/Federated-Learning", "max_issues_repo_head_hexsha": "e5c7c7c0d3a08c2f8fea83ff26cd701c6349750f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main_fed.py", "max_forks_repo_name": "DimensionPrism/Federated-Learning", "max_forks_repo_head_hexsha": "e5c7c7c0d3a08c2f8fea83ff26cd701c6349750f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2212389381, "max_line_length": 116, "alphanum_fraction": 0.7213638785, "include": true, "reason": "import numpy", "num_tokens": 1016}
|
SUBROUTINE SFHSTAT(pos,model,ssfr6,ssfr7,ssfr8,ave_age)
!compute basic statistics given a parameterized star formation history.
!required inputs are the parameter set and a single element output
!structure from compsp
USE sps_vars
IMPLICIT NONE
TYPE(PARAMS), INTENT(in) :: pos
TYPE(COMPSPOUT), INTENT(in) :: model
REAL(SP), INTENT(inout) :: ssfr6,ssfr7,ssfr8,ave_age
REAL(SP) :: dt
!---------------------------------------------------------------!
!---------------------------------------------------------------!
dt = 10**model%age/1E9 - pos%sf_start
IF (dt.LT.0.) THEN
WRITE(*,*) 'SFHSTAT ERROR: dt<0.0, stopping....'
STOP
ENDIF
!compute mass-weighted stellar age of tau component
IF (pos%sfh.EQ.1) THEN
ave_age = pos%tau*(1.-EXP(-dt/pos%tau)*(dt/pos%tau+1.))/&
(1.-EXP(-dt/pos%tau))
ssfr6 = (EXP(-(dt-1E-3)/pos%tau)-EXP(-dt/pos%tau))/(1.-EXP(-dt/pos%tau))
ssfr7 = (EXP(-(dt-1E-2)/pos%tau)-EXP(-dt/pos%tau))/(1.-EXP(-dt/pos%tau))
ssfr8 = (EXP(-(dt-1E-1)/pos%tau)-EXP(-dt/pos%tau))/(1.-EXP(-dt/pos%tau))
ELSE IF (pos%sfh.EQ.4) THEN
ave_age = (2.-EXP(-dt/pos%tau)*(dt/pos%tau*(dt/pos%tau+2.)+2.))*&
pos%tau/(1.-EXP(-dt/pos%tau)*(dt/pos%tau+1))
ssfr6 = (EXP(-(dt-1E-3)/pos%tau)*((dt-1E-3)/pos%tau)-&
EXP(-dt/pos%tau)*(dt/pos%tau))/&
(1.-EXP(-dt/pos%tau)*(dt/pos%tau+1))
ssfr7 = (EXP(-(dt-1E-2)/pos%tau)*((dt-1E-2)/pos%tau)-&
EXP(-dt/pos%tau)*(dt/pos%tau))/&
(1.-EXP(-dt/pos%tau)*(dt/pos%tau+1))
ssfr8 = (EXP(-(dt-1E-1)/pos%tau)*((dt-1E-1)/pos%tau)-&
EXP(-dt/pos%tau)*(dt/pos%tau))/&
(1.-EXP(-dt/pos%tau)*(dt/pos%tau+1))
ELSE
WRITE(*,*) 'SFHSTAT ERROR: you should not be calling sfhstat '//&
'for sfh types NE 1 or 4, stopping....'
STOP
ENDIF
!add constant component
ave_age = ave_age*(1.-pos%const) + pos%const*dt/2.
ssfr6 = ssfr6 *(1.-pos%const) + pos%const*1E-3/dt
ssfr7 = ssfr7 *(1.-pos%const) + pos%const*1E-2/dt
ssfr8 = ssfr8 *(1.-pos%const) + pos%const*1E-1/dt
!convert to lookback time
ave_age = dt - ave_age
!only add the burst if the burst has happened
IF (10**model%age/1E9.GT.pos%tburst) &
ave_age = (1.-pos%fburst)*ave_age + pos%fburst*pos%tburst
IF (dt-pos%tburst.LE.1E-3) ssfr6 = ssfr6 + pos%fburst
IF (dt-pos%tburst.LE.1E-2) ssfr7 = ssfr7 + pos%fburst
IF (dt-pos%tburst.LE.1E-1) ssfr8 = ssfr8 + pos%fburst
!convert from integral(SFR) to log(<SSFR>), in 1/Gyr
ssfr6 = LOG10(MAX(ssfr6/model%mass_csp/1E-3,tiny_number))
ssfr7 = LOG10(MAX(ssfr7/model%mass_csp/1E-2,tiny_number))
ssfr8 = LOG10(MAX(ssfr8/model%mass_csp/1E-1,tiny_number))
END SUBROUTINE SFHSTAT
|
{"hexsha": "936e134e352c01eb9957796c9efd7ac56a6bf66d", "size": 2732, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/sfhstat.f90", "max_stars_repo_name": "christopherlovell/fsps", "max_stars_repo_head_hexsha": "1c09a47d7b0fb15a7f245ee3e9b2a7c54122ffdf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 55, "max_stars_repo_stars_event_min_datetime": "2015-04-17T18:36:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T01:47:28.000Z", "max_issues_repo_path": "src/sfhstat.f90", "max_issues_repo_name": "christopherlovell/fsps", "max_issues_repo_head_hexsha": "1c09a47d7b0fb15a7f245ee3e9b2a7c54122ffdf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2015-11-04T03:19:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-15T22:03:19.000Z", "max_forks_repo_path": "src/sfhstat.f90", "max_forks_repo_name": "christopherlovell/fsps", "max_forks_repo_head_hexsha": "1c09a47d7b0fb15a7f245ee3e9b2a7c54122ffdf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 43, "max_forks_repo_forks_event_min_datetime": "2015-04-29T09:24:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-20T19:31:46.000Z", "avg_line_length": 36.9189189189, "max_line_length": 77, "alphanum_fraction": 0.5761346999, "num_tokens": 1071}
|
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import cirtorch.layers.functional as LF
from cirtorch.layers.normalization import L2N
# --------------------------------------
# Pooling layers
# --------------------------------------
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.shape[0], x.shape[1], -1)
def __repr__(self):
return self.__class__.__name__ + '()'
class MAC(nn.Module):
def __init__(self):
super(MAC, self).__init__()
def forward(self, x):
return LF.mac(x)
def __repr__(self):
return self.__class__.__name__ + '()'
class SPoC(nn.Module):
def __init__(self):
super(SPoC, self).__init__()
def forward(self, x):
return LF.spoc(x)
def __repr__(self):
return self.__class__.__name__ + '()'
class GeM(nn.Module):
def __init__(self, p=3.0, eps=1e-6, freeze_p=True):
super(GeM, self).__init__()
self.p = p if freeze_p else Parameter(torch.ones(1) * p)
self.eps = eps
def forward(self, x):
return LF.gem(x, p=self.p, eps=self.eps)
def __repr__(self):
if isinstance(self.p, float):
p = self.p
else:
p = self.p.data.tolist()[0]
return self.__class__.__name__ +\
'(' + 'p=' + '{:.4f}'.format(p) +\
', ' + 'eps=' + str(self.eps) + ')'
class RMAC(nn.Module):
def __init__(self, L=3, eps=1e-6):
super(RMAC, self).__init__()
self.L = L
self.eps = eps
def forward(self, x):
return LF.rmac(x, L=self.L, eps=self.eps)
def __repr__(self):
return self.__class__.__name__ + '(' + 'L=' + '{}'.format(self.L) + ')'
class Rpool(nn.Module):
def __init__(self, rpool, whiten=None, L=3, eps=1e-6):
super(Rpool, self).__init__()
self.rpool = rpool
self.L = L
self.whiten = whiten
self.norm = L2N()
self.eps = eps
def forward(self, x, aggregate=True):
# features -> roipool
o = LF.roipool(x, self.rpool, self.L, self.eps) # size: #im, #reg, D, 1, 1
# concatenate regions from all images in the batch
s = o.size()
o = o.view(s[0] * s[1], s[2], s[3], s[4]) # size: #im x #reg, D, 1, 1
# rvecs -> norm
o = self.norm(o)
# rvecs -> whiten -> norm
if self.whiten is not None:
o = self.norm(self.whiten(o.squeeze(-1).squeeze(-1)))
# reshape back to regions per image
o = o.view(s[0], s[1], s[2], s[3], s[4]) # size: #im, #reg, D, 1, 1
# aggregate regions into a single global vector per image
if aggregate:
# rvecs -> sumpool -> norm
o = self.norm(o.sum(1, keepdim=False)) # size: #im, D, 1, 1
return o
def __repr__(self):
return super(Rpool, self).__repr__() + '(' + 'L=' + '{}'.format(self.L) + ')'
class CompactBilinearPooling(nn.Module):
"""
Compute compact bilinear pooling over two bottom inputs.
Args:
output_dim: output dimension for compact bilinear pooling.
sum_pool: (Optional) If True, sum the output along height and width
dimensions and return output shape [batch_size, output_dim].
Otherwise return [batch_size, height, width, output_dim].
Default: True.
rand_h_1: (Optional) an 1D numpy array containing indices in interval
`[0, output_dim)`. Automatically generated from `seed_h_1`
if is None.
rand_s_1: (Optional) an 1D numpy array of 1 and -1, having the same shape
as `rand_h_1`. Automatically generated from `seed_s_1` if is
None.
rand_h_2: (Optional) an 1D numpy array containing indices in interval
`[0, output_dim)`. Automatically generated from `seed_h_2`
if is None.
rand_s_2: (Optional) an 1D numpy array of 1 and -1, having the same shape
as `rand_h_2`. Automatically generated from `seed_s_2` if is
None.
"""
def __init__(self, input_dim=2048, output_dim=2048,
sum_pool=True, cuda=True,
rand_h_1=None, rand_s_1=None, rand_h_2=None, rand_s_2=None):
super(CompactBilinearPooling, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.sum_pool = sum_pool
if rand_h_1 is None:
np.random.seed(1)
rand_h_1 = np.random.randint(output_dim, size=self.input_dim)
if rand_s_1 is None:
np.random.seed(3)
rand_s_1 = 2 * np.random.randint(2, size=self.input_dim) - 1
self.sparse_sketch_matrix1 = self.generate_sketch_matrix(
rand_h_1, rand_s_1, self.output_dim)
if rand_h_2 is None:
np.random.seed(5)
rand_h_2 = np.random.randint(output_dim, size=self.input_dim)
if rand_s_2 is None:
np.random.seed(7)
rand_s_2 = 2 * np.random.randint(2, size=self.input_dim) - 1
self.sparse_sketch_matrix2 = self.generate_sketch_matrix(
rand_h_2, rand_s_2, self.output_dim)
if cuda:
self.sparse_sketch_matrix1 = self.sparse_sketch_matrix1.cuda()
self.sparse_sketch_matrix2 = self.sparse_sketch_matrix2.cuda()
def forward(self, bottom):
batch_size, _, height, width = bottom.size()
bottom_flat = bottom.permute(0, 2, 3, 1).contiguous().view(-1, self.input_dim)
sketch_1 = bottom_flat.mm(self.sparse_sketch_matrix1)
sketch_2 = bottom_flat.mm(self.sparse_sketch_matrix2)
im_zeros_1 = torch.zeros(sketch_1.size()).to(sketch_1.device)
im_zeros_2 = torch.zeros(sketch_2.size()).to(sketch_2.device)
fft1 = torch.fft(torch.cat([sketch_1.unsqueeze(-1), im_zeros_1.unsqueeze(-1)], dim=-1), 1)
fft2 = torch.fft(torch.cat([sketch_2.unsqueeze(-1), im_zeros_2.unsqueeze(-1)], dim=-1), 1)
fft_product_real = fft1[..., 0].mul(fft2[..., 0]) - fft1[..., 1].mul(fft2[..., 1])
fft_product_imag = fft1[..., 0].mul(fft2[..., 1]) + fft1[..., 1].mul(fft2[..., 0])
cbp_flat = torch.ifft(torch.cat([
fft_product_real.unsqueeze(-1),
fft_product_imag.unsqueeze(-1)],
dim=-1), 1)[..., 0]
cbp = cbp_flat.view(batch_size, height, width, self.output_dim)
if self.sum_pool:
cbp = cbp.sum(dim=[1, 2])
return cbp
@staticmethod
def generate_sketch_matrix(rand_h, rand_s, output_dim):
"""
Return a sparse matrix used for tensor sketch operation in compact bilinear
pooling
Args:
rand_h: an 1D numpy array containing indices in interval `[0, output_dim)`.
rand_s: an 1D numpy array of 1 and -1, having the same shape as `rand_h`.
output_dim: the output dimensions of compact bilinear pooling.
Returns:
a sparse matrix of shape [input_dim, output_dim] for tensor sketch.
"""
# Generate a sparse matrix for tensor count sketch
rand_h = rand_h.astype(np.int64)
rand_s = rand_s.astype(np.float32)
assert(rand_h.ndim == 1 and rand_s.ndim ==
1 and len(rand_h) == len(rand_s))
assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))
input_dim = len(rand_h)
indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],
rand_h[..., np.newaxis]), axis=1)
indices = torch.from_numpy(indices)
rand_s = torch.from_numpy(rand_s)
sparse_sketch_matrix = torch.sparse.FloatTensor(
indices.t(), rand_s, torch.Size([input_dim, output_dim]))
return sparse_sketch_matrix.to_dense()
|
{"hexsha": "88fc178a9ca611cc2bfb1a09757a8a7481f80b7d", "size": 7966, "ext": "py", "lang": "Python", "max_stars_repo_path": "cirtorch/layers/pooling.py", "max_stars_repo_name": "smly/Landmark2019-1st-and-3rd-Place-Solution", "max_stars_repo_head_hexsha": "9839c9cbc6bec15e69e91d1d7c8be144531d5a33", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-07-24T09:02:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-16T08:45:21.000Z", "max_issues_repo_path": "cirtorch/layers/pooling.py", "max_issues_repo_name": "smly/Landmark2019-1st-and-3rd-Place-Solution", "max_issues_repo_head_hexsha": "9839c9cbc6bec15e69e91d1d7c8be144531d5a33", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cirtorch/layers/pooling.py", "max_forks_repo_name": "smly/Landmark2019-1st-and-3rd-Place-Solution", "max_forks_repo_head_hexsha": "9839c9cbc6bec15e69e91d1d7c8be144531d5a33", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-25T19:35:36.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-25T19:35:36.000Z", "avg_line_length": 32.9173553719, "max_line_length": 98, "alphanum_fraction": 0.5804669847, "include": true, "reason": "import numpy", "num_tokens": 2099}
|
#!/usr/bin/env python3
import numpy as np
import sklearn.decomposition as sd
def subtract_mean_vec(vectors):
return vectors - vectors.mean(axis=0)
def subtract_top_components(vectors, d=None):
"""Subtract d top PCA components."""
pca = sd.PCA().fit(vectors)
for cn in range(d):
component = pca.components_[cn, :]
weights = np.array([component.dot(vectors.T)])
vectors = vectors - weights.T.dot(np.array([component]))
return vectors
def postprocess(vectors, d=None):
"""
Postprocess vectors following:
Jiaqi Mu, Suma Bhat, Pramod Viswanath. 2017.
All-but-the-Top: Simple and Effective Postpro-cessing for Word
Representations.https://arxiv.org/abs/1702.01417
"""
if d is None:
# this is the default recommended in the paper
d = int(vectors.shape[1]/100)
vectors = subtract_mean_vec(vectors)
vectors = subtract_top_components(vectors, d=d)
return vectors
|
{"hexsha": "b7311333b193d4a404f889817367d352623ce26a", "size": 966, "ext": "py", "lang": "Python", "max_stars_repo_path": "snaut-english/utils/space_utils.py", "max_stars_repo_name": "porcelluscavia/vectors-webtool", "max_stars_repo_head_hexsha": "4dfcd0ce72685900ebfc4be4f08fe9fbdb01581e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-04-19T13:16:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T14:41:57.000Z", "max_issues_repo_path": "snaut-english/utils/space_utils.py", "max_issues_repo_name": "porcelluscavia/vectors-webtool", "max_issues_repo_head_hexsha": "4dfcd0ce72685900ebfc4be4f08fe9fbdb01581e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-04-19T13:23:39.000Z", "max_issues_repo_issues_event_max_datetime": "2016-08-14T08:57:37.000Z", "max_forks_repo_path": "snaut-english/utils/space_utils.py", "max_forks_repo_name": "porcelluscavia/vectors-webtool", "max_forks_repo_head_hexsha": "4dfcd0ce72685900ebfc4be4f08fe9fbdb01581e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-08-07T03:44:42.000Z", "max_forks_repo_forks_event_max_datetime": "2018-03-08T14:41:19.000Z", "avg_line_length": 24.15, "max_line_length": 66, "alphanum_fraction": 0.6739130435, "include": true, "reason": "import numpy", "num_tokens": 242}
|
/-
Copyright (c) 2022 Jujian Zhang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jujian Zhang
! This file was ported from Lean 3 source module algebra.module.graded_module
! leanprover-community/mathlib commit 59cdeb0da2480abbc235b7e611ccd9a7e5603d7c
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.RingTheory.GradedAlgebra.Basic
import Mathbin.Algebra.GradedMulAction
import Mathbin.Algebra.DirectSum.Decomposition
import Mathbin.Algebra.Module.BigOperators
/-!
# Graded Module
Given an `R`-algebra `A` graded by `𝓐`, a graded `A`-module `M` is expressed as
`direct_sum.decomposition 𝓜` and `set_like.has_graded_smul 𝓐 𝓜`.
Then `⨁ i, 𝓜 i` is an `A`-module and is isomorphic to `M`.
## Tags
graded module
-/
section
open DirectSum
variable {ι : Type _} (A : ι → Type _) (M : ι → Type _)
namespace DirectSum
open GradedMonoid
/-- A graded version of `distrib_mul_action`. -/
class GdistribMulAction [AddMonoid ι] [GMonoid A] [∀ i, AddMonoid (M i)] extends
GMulAction A M where
smul_add {i j} (a : A i) (b c : M j) : smul a (b + c) = smul a b + smul a c
smul_zero {i j} (a : A i) : smul a (0 : M j) = 0
#align direct_sum.gdistrib_mul_action DirectSum.GdistribMulAction
/-- A graded version of `module`. -/
class Gmodule [AddMonoid ι] [∀ i, AddMonoid (A i)] [∀ i, AddMonoid (M i)] [GMonoid A] extends
GdistribMulAction A M where
add_smul {i j} (a a' : A i) (b : M j) : smul (a + a') b = smul a b + smul a' b
zero_smul {i j} (b : M j) : smul (0 : A i) b = 0
#align direct_sum.gmodule DirectSum.Gmodule
/-- A graded version of `semiring.to_module`. -/
instance Gsemiring.toGmodule [DecidableEq ι] [AddMonoid ι] [∀ i : ι, AddCommMonoid (A i)]
[Gsemiring A] : Gmodule A A :=
{ GMonoid.toGMulAction A with
smul_add := fun _ _ => Gsemiring.mul_add
smul_zero := fun i j => Gsemiring.mul_zero
add_smul := fun i j => Gsemiring.add_mul
zero_smul := fun i j => Gsemiring.zero_mul }
#align direct_sum.gsemiring.to_gmodule DirectSum.Gsemiring.toGmodule
variable [AddMonoid ι] [∀ i : ι, AddCommMonoid (A i)] [∀ i, AddCommMonoid (M i)]
/-- The piecewise multiplication from the `has_mul` instance, as a bundled homomorphism. -/
@[simps]
def gsmulHom [GMonoid A] [Gmodule A M] {i j} : A i →+ M j →+ M (i + j)
where
toFun a :=
{ toFun := fun b => GSmul.smul a b
map_zero' := GdistribMulAction.smul_zero _
map_add' := GdistribMulAction.smul_add _ }
map_zero' := AddMonoidHom.ext fun a => Gmodule.zero_smul a
map_add' a₁ a₂ := AddMonoidHom.ext fun b => Gmodule.add_smul _ _ _
#align direct_sum.gsmul_hom DirectSum.gsmulHom
namespace Gmodule
/-- For graded monoid `A` and a graded module `M` over `A`. `gmodule.smul_add_monoid_hom` is the
`⨁ᵢ Aᵢ`-scalar multiplication on `⨁ᵢ Mᵢ` induced by `gsmul_hom`. -/
def smulAddMonoidHom [DecidableEq ι] [GMonoid A] [Gmodule A M] :
(⨁ i, A i) →+ (⨁ i, M i) →+ ⨁ i, M i :=
toAddMonoid fun i =>
AddMonoidHom.flip <|
toAddMonoid fun j => AddMonoidHom.flip <| (of M _).compHom.comp <| gsmulHom A M
#align direct_sum.gmodule.smul_add_monoid_hom DirectSum.Gmodule.smulAddMonoidHom
section
open GradedMonoid DirectSum Gmodule
instance [DecidableEq ι] [GMonoid A] [Gmodule A M] : SMul (⨁ i, A i) (⨁ i, M i)
where smul x y := smulAddMonoidHom A M x y
@[simp]
theorem smul_def [DecidableEq ι] [GMonoid A] [Gmodule A M] (x : ⨁ i, A i) (y : ⨁ i, M i) :
x • y = smulAddMonoidHom _ _ x y :=
rfl
#align direct_sum.gmodule.smul_def DirectSum.Gmodule.smul_def
@[simp]
theorem smulAddMonoidHom_apply_of_of [DecidableEq ι] [GMonoid A] [Gmodule A M] {i j} (x : A i)
(y : M j) :
smulAddMonoidHom A M (DirectSum.of A i x) (of M j y) = of M (i + j) (GSmul.smul x y) := by
simp [smul_add_monoid_hom]
#align direct_sum.gmodule.smul_add_monoid_hom_apply_of_of DirectSum.Gmodule.smulAddMonoidHom_apply_of_of
@[simp]
theorem of_smul_of [DecidableEq ι] [GMonoid A] [Gmodule A M] {i j} (x : A i) (y : M j) :
DirectSum.of A i x • of M j y = of M (i + j) (GSmul.smul x y) :=
smulAddMonoidHom_apply_of_of _ _ _ _
#align direct_sum.gmodule.of_smul_of DirectSum.Gmodule.of_smul_of
open AddMonoidHom
-- Almost identical to the proof of `direct_sum.one_mul`
private theorem one_smul [DecidableEq ι] [GMonoid A] [Gmodule A M] (x : ⨁ i, M i) :
(1 : ⨁ i, A i) • x = x :=
by
suffices smulAddMonoidHom A M 1 = AddMonoidHom.id (⨁ i, M i) from AddMonoidHom.congr_fun this x
apply DirectSum.addHom_ext; intro i xi
unfold One.one
rw [smul_add_monoid_hom_apply_of_of]
exact DirectSum.of_eq_of_gradedMonoid_eq (one_smul (GradedMonoid A) <| GradedMonoid.mk i xi)
#align direct_sum.gmodule.one_smul direct_sum.gmodule.one_smul
-- Almost identical to the proof of `direct_sum.mul_assoc`
private theorem mul_smul [DecidableEq ι] [Gsemiring A] [Gmodule A M] (a b : ⨁ i, A i)
(c : ⨁ i, M i) : (a * b) • c = a • b • c :=
by
suffices
(-- `λ a b c, (a * b) • c` as a bundled hom
smulAddMonoidHom
A M).compHom.comp
(DirectSum.mulHom A) =
(AddMonoidHom.compHom AddMonoidHom.flipHom <|
(smulAddMonoidHom A M).flip.compHom.comp <| smulAddMonoidHom A M).flip
from-- `λ a b c, a • (b • c)` as a bundled hom
AddMonoidHom.congr_fun
(AddMonoidHom.congr_fun (AddMonoidHom.congr_fun this a) b) c
ext (ai ax bi bx ci cx) : 6
dsimp only [coe_comp, Function.comp_apply, comp_hom_apply_apply, flip_apply, flip_hom_apply]
rw [smul_add_monoid_hom_apply_of_of, smul_add_monoid_hom_apply_of_of, DirectSum.mulHom_of_of,
smul_add_monoid_hom_apply_of_of]
exact
DirectSum.of_eq_of_gradedMonoid_eq
(mul_smul (GradedMonoid.mk ai ax) (GradedMonoid.mk bi bx) (GradedMonoid.mk ci cx))
#align direct_sum.gmodule.mul_smul direct_sum.gmodule.mul_smul
/-- The `module` derived from `gmodule A M`. -/
instance module [DecidableEq ι] [Gsemiring A] [Gmodule A M] : Module (⨁ i, A i) (⨁ i, M i)
where
smul := (· • ·)
one_smul := one_smul _ _
mul_smul := mul_smul _ _
smul_add r := (smulAddMonoidHom A M r).map_add
smul_zero r := (smulAddMonoidHom A M r).map_zero
add_smul r s x := by simp only [smul_def, map_add, AddMonoidHom.add_apply]
zero_smul x := by simp only [smul_def, map_zero, AddMonoidHom.zero_apply]
#align direct_sum.gmodule.module DirectSum.Gmodule.module
end
end Gmodule
end DirectSum
end
open DirectSum BigOperators
variable {ι R A M σ σ' : Type _}
variable [AddMonoid ι] [CommSemiring R] [Semiring A] [Algebra R A]
variable (𝓐 : ι → σ') [SetLike σ' A]
variable (𝓜 : ι → σ)
namespace SetLike
include σ' A σ M
instance gmulAction [AddMonoid M] [DistribMulAction A M] [SetLike σ M] [SetLike.GradedMonoid 𝓐]
[SetLike.GradedSmul 𝓐 𝓜] : GradedMonoid.GMulAction (fun i => 𝓐 i) fun i => 𝓜 i :=
{
SetLike.toGSmul 𝓐
𝓜 with
one_smul := fun ⟨i, m⟩ => Sigma.subtype_ext (zero_add _) (one_smul _ _)
mul_smul := fun ⟨i, a⟩ ⟨j, a'⟩ ⟨k, b⟩ => Sigma.subtype_ext (add_assoc _ _ _) (mul_smul _ _ _) }
#align set_like.gmul_action SetLike.gmulAction
instance gdistribMulAction [AddMonoid M] [DistribMulAction A M] [SetLike σ M]
[AddSubmonoidClass σ M] [SetLike.GradedMonoid 𝓐] [SetLike.GradedSmul 𝓐 𝓜] :
DirectSum.GdistribMulAction (fun i => 𝓐 i) fun i => 𝓜 i :=
{
SetLike.gmulAction 𝓐
𝓜 with
smul_add := fun i j a b c => Subtype.ext <| smul_add _ _ _
smul_zero := fun i j a => Subtype.ext <| smul_zero _ }
#align set_like.gdistrib_mul_action SetLike.gdistribMulAction
variable [AddCommMonoid M] [Module A M] [SetLike σ M] [AddSubmonoidClass σ' A]
[AddSubmonoidClass σ M] [SetLike.GradedMonoid 𝓐] [SetLike.GradedSmul 𝓐 𝓜]
/-- `[set_like.graded_monoid 𝓐] [set_like.has_graded_smul 𝓐 𝓜]` is the internal version of graded
module, the internal version can be translated into the external version `gmodule`. -/
instance gmodule : DirectSum.Gmodule (fun i => 𝓐 i) fun i => 𝓜 i :=
{
SetLike.gdistribMulAction 𝓐
𝓜 with
smul := fun i j x y => ⟨(x : A) • (y : M), SetLike.GradedSmul.smul_mem x.2 y.2⟩
add_smul := fun i j a a' b => Subtype.ext <| add_smul _ _ _
zero_smul := fun i j b => Subtype.ext <| zero_smul _ _ }
#align set_like.gmodule SetLike.gmodule
end SetLike
namespace GradedModule
include σ' A σ M
variable [AddCommMonoid M] [Module A M] [SetLike σ M] [AddSubmonoidClass σ' A]
[AddSubmonoidClass σ M] [SetLike.GradedMonoid 𝓐] [SetLike.GradedSmul 𝓐 𝓜]
/-- The smul multiplication of `A` on `⨁ i, 𝓜 i` from `(⨁ i, 𝓐 i) →+ (⨁ i, 𝓜 i) →+ ⨁ i, 𝓜 i`
turns `⨁ i, 𝓜 i` into an `A`-module
-/
def isModule [DecidableEq ι] [GradedRing 𝓐] : Module A (⨁ i, 𝓜 i) :=
{ Module.compHom _ (DirectSum.decomposeRingEquiv 𝓐 : A ≃+* ⨁ i, 𝓐 i).toRingHom with
smul := fun a b => DirectSum.decompose 𝓐 a • b }
#align graded_module.is_module GradedModule.isModule
attribute [local instance] GradedModule.isModule
/-- `⨁ i, 𝓜 i` and `M` are isomorphic as `A`-modules.
"The internal version" and "the external version" are isomorphism as `A`-modules.
-/
def linearEquiv [DecidableEq ι] [GradedRing 𝓐] [DirectSum.Decomposition 𝓜] : M ≃ₗ[A] ⨁ i, 𝓜 i :=
{
DirectSum.decomposeAddEquiv
𝓜 with
toFun := DirectSum.decomposeAddEquiv 𝓜
map_smul' := fun x y => by
classical
rw [← DirectSum.sum_support_decompose 𝓐 x, map_sum, Finset.sum_smul, map_sum,
Finset.sum_smul, Finset.sum_congr rfl fun i hi => _]
rw [RingHom.id_apply, ← DirectSum.sum_support_decompose 𝓜 y, map_sum, Finset.smul_sum,
map_sum, Finset.smul_sum, Finset.sum_congr rfl fun j hj => _]
simp only [(· • ·), DirectSum.decomposeAddEquiv_apply, DirectSum.decompose_coe,
DirectSum.Gmodule.smulAddMonoidHom_apply_of_of]
convert DirectSum.decompose_coe 𝓜 _
rfl }
#align graded_module.linear_equiv GradedModule.linearEquiv
end GradedModule
|
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/Algebra/Module/GradedModule.lean"}
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import functools
from glob import glob
import imghdr
import os
import os.path
import numpy as np
import PIL.Image
from lmnet.datasets.base import Base, StoragePathCustomizable
from lmnet import data_processor
from lmnet.utils.random import shuffle, train_test_split
class ImageFolderBase(StoragePathCustomizable, Base):
"""Abstract class of dataset for loading image files stored in a folder.
structure like
$DATA_DIR/extend_dir/cat/0001.jpg
$DATA_DIR/extend_dir/cat/xxxa.jpeg
$DATA_DIR/extend_dir/cat/yyyb.png
$DATA_DIR/extend_dir/dog/123.jpg
$DATA_DIR/extend_dir/dog/023.jpg
$DATA_DIR/extend_dir/dog/wwww.jpg
When child class has `validation_extend_dir`, the `validation` subset consists from the folders.
$DATA_DIR/validation_extend_dir/cat/0001.jpg
$DATA_DIR/validation_extend_dir/cat/xxxa.png
"""
def __init__(
self,
is_shuffle=True,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.is_shuffle = is_shuffle
self.element_counter = 0
@property
@functools.lru_cache(maxsize=None)
def classes(self):
"""Returns the classes list in the data set."""
classes = os.listdir(self.data_dir)
classes = [class_name for class_name in classes if class_name != ".DS_Store"]
classes.sort(key=lambda item: item.lower())
return classes
@property
def num_classes(self):
return len(self.classes)
@property
def num_per_epoch(self):
return len(self.data_files)
def _all_files(self):
all_image_files = []
for image_class in self.classes:
image_dir = os.path.join(self.data_dir, image_class)
for image_path in glob(os.path.join(image_dir, "*")):
if os.path.isfile(image_path) and imghdr.what(image_path) in ["jpeg", "png"]:
all_image_files.append(image_path)
return all_image_files
@property
@functools.lru_cache(maxsize=None)
def data_files(self):
all_image_files = self._all_files()
if self.validation_size > 0:
train_image_files, test_image_files = train_test_split(
all_image_files, test_size=self.validation_size, seed=1)
if self.subset == "train":
files = train_image_files
else:
files = test_image_files
return files
return all_image_files
def get_label(self, filename):
"""Returns label."""
class_name = os.path.basename(os.path.dirname(filename))
label = self.classes.index(class_name)
return label
def get_image(self, filename):
"""Returns numpy array of an image"""
image = PIL.Image.open(filename)
# sometime image data is gray.
image = image.convert("RGB")
image = np.array(image)
return image
@property
def feed_indices(self):
if not hasattr(self, "_feed_indices"):
if self.subset == "train" and self.is_shuffle:
self._feed_indices = shuffle(range(self.num_per_epoch), seed=self.seed)
else:
self._feed_indices = list(range(self.num_per_epoch))
return self._feed_indices
def _get_index(self, counter):
return self.feed_indices[counter]
def _shuffle(self):
if self.subset == "train" and self.is_shuffle:
self._feed_indices = shuffle(range(self.num_per_epoch), seed=self.seed)
print("Shuffle {} train dataset with random state {}.".format(self.__class__.__name__, self.seed))
self.seed = self.seed + 1
def _element(self):
"""Return an image and label."""
index = self._get_index(self.element_counter)
self.element_counter += 1
if self.element_counter == self.num_per_epoch:
self.element_counter = 0
self._shuffle()
target_file = self.data_files[index]
image = self.get_image(target_file)
label = self.get_label(target_file)
samples = {'image': image}
if callable(self.augmentor) and self.subset == "train":
samples = self.augmentor(**samples)
if callable(self.pre_processor):
samples = self.pre_processor(**samples)
image = samples['image']
return image, label
def feed(self):
"""Returns batch size numpy array of images and binarized labels."""
images, labels = zip(*[self._element() for _ in range(self.batch_size)])
labels = data_processor.binarize(labels, self.num_classes)
images = np.array(images)
if self.data_format == 'NCHW':
images = np.transpose(images, [0, 3, 1, 2])
return images, labels
|
{"hexsha": "163d251242df574beabbdd500762ee3e266eefb1", "size": 5555, "ext": "py", "lang": "Python", "max_stars_repo_path": "lmnet/lmnet/datasets/image_folder.py", "max_stars_repo_name": "toohsk/blueoil", "max_stars_repo_head_hexsha": "596922caa939db9c5ecbac3286fbf6f703865ee6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lmnet/lmnet/datasets/image_folder.py", "max_issues_repo_name": "toohsk/blueoil", "max_issues_repo_head_hexsha": "596922caa939db9c5ecbac3286fbf6f703865ee6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-07T12:20:52.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-08T07:22:48.000Z", "max_forks_repo_path": "lmnet/lmnet/datasets/image_folder.py", "max_forks_repo_name": "toohsk/blueoil", "max_forks_repo_head_hexsha": "596922caa939db9c5ecbac3286fbf6f703865ee6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.384180791, "max_line_length": 110, "alphanum_fraction": 0.6336633663, "include": true, "reason": "import numpy", "num_tokens": 1207}
|
# coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; Ruixiong Zhang; Lan Yu;
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode
# pylint: disable=no-member, invalid-name, relative-beyond-top-level
# pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes
""" Implementation of GriffinLim vocoder """
import os
import numpy as np
import librosa
from scipy.io.wavfile import write as write_wav
class GriffinLim:
"""python implementation of griffinlim algorithm"""
def __init__(self, data_descriptions):
"""Reference: to paper "Multiband Excitation Vocoder"
"""
assert data_descriptions.audio_featurizer is not None
assert data_descriptions.audio_featurizer.feat is not None
assert data_descriptions.hparams.audio_config is not None
params_func = data_descriptions.audio_featurizer.feat.params
params = params_func(data_descriptions.hparams.audio_config)
self.channels = params.filterbank_channel_count
self.sample_rate = params.sample_rate
self.window_length = int(params.window_length * self.sample_rate)
self.hop_length = int(params.frame_length * self.sample_rate)
self.n_fft = self._get_nfft(self.window_length)
self.lower_frequency_limit = params.lower_frequency_limit
self.upper_frequency_limit = params.upper_frequency_limit
self.window_type = params.window_type
self.EPS = 1e-10
def _get_nfft(self, window_length):
"""n_fft is an exponential power of 2 closest to and larger than win_length"""
nfft = 2
while nfft < window_length:
nfft *= 2
return nfft
def __call__(self, feats, hparams, name=None):
linear_feats = self._logmel_to_linear(feats)
samples = self._griffin_lim(linear_feats, hparams.gl_iters)
samples = samples / 32768
if not os.path.exists(hparams.output_directory):
os.makedirs(hparams.output_directory)
output_path = os.path.join(hparams.output_directory, '%s.wav' % str(name))
write_wav(output_path,
self.sample_rate,
(samples * np.iinfo(np.int16).max).astype(np.int16))
seconds = float(samples.shape[0]) / self.sample_rate
return seconds
def _logmel_to_linear(self, feats):
"""Convert FBANK to linear spectrogram.
Args:
feats: FBANK feats, shape: [length, channels]
Returns:
linear_feats: Linear spectrogram
"""
assert feats.shape[1] == self.channels
linear_feats = np.power(10.0, feats)
linear_basis = librosa.filters.mel(self.sample_rate,
self.n_fft,
self.channels,
self.lower_frequency_limit,
self.upper_frequency_limit)
linear_basis = np.linalg.pinv(linear_basis)
linear_feats = np.maximum(self.EPS, np.dot(linear_basis, linear_feats.T).T)
return linear_feats
def _griffin_lim(self, linear_feats, gl_iters):
"""Convert linear spectrogram into waveform
Args:
linear_feats: linear spectrogram
gl_iters: num of gl iterations
Returns:
waveform: Reconstructed waveform (N,).
"""
assert linear_feats.shape[1] == self.n_fft // 2 + 1
linear_feats = np.abs(linear_feats.T)
samples = librosa.griffinlim(S=linear_feats,
n_iter=gl_iters,
hop_length=self.hop_length,
win_length=self.window_length,
window=self.window_type)
return samples
|
{"hexsha": "c3153d84d785d15325d09bfee4ea75152b71114c", "size": 4569, "ext": "py", "lang": "Python", "max_stars_repo_path": "athena/tools/vocoder.py", "max_stars_repo_name": "leixiaoning/Athena-Giga", "max_stars_repo_head_hexsha": "d599cee4027126fc4efd27cefd69ce89b77530e0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "athena/tools/vocoder.py", "max_issues_repo_name": "leixiaoning/Athena-Giga", "max_issues_repo_head_hexsha": "d599cee4027126fc4efd27cefd69ce89b77530e0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "athena/tools/vocoder.py", "max_forks_repo_name": "leixiaoning/Athena-Giga", "max_forks_repo_head_hexsha": "d599cee4027126fc4efd27cefd69ce89b77530e0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-15T08:04:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-15T08:04:15.000Z", "avg_line_length": 43.5142857143, "max_line_length": 105, "alphanum_fraction": 0.6176406216, "include": true, "reason": "import numpy,from scipy", "num_tokens": 930}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct
import numpy as np
def f64_to_bytes( value, endianess="<" ):
return bytes( struct.pack( f"{endianess}d", np.float64( value ) ) )
|
{"hexsha": "cfd2739622141433a939cdc29732899440a988ff", "size": 196, "ext": "py", "lang": "Python", "max_stars_repo_path": "converters/helpers.py", "max_stars_repo_name": "martinschwinzerl/sixtracklib_testdata", "max_stars_repo_head_hexsha": "3e74369844fa357d00e422f07d54f460a362e3b9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-30T18:28:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-30T18:28:34.000Z", "max_issues_repo_path": "converters/helpers.py", "max_issues_repo_name": "martinschwinzerl/sixtracklib_testdata", "max_issues_repo_head_hexsha": "3e74369844fa357d00e422f07d54f460a362e3b9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "converters/helpers.py", "max_forks_repo_name": "martinschwinzerl/sixtracklib_testdata", "max_forks_repo_head_hexsha": "3e74369844fa357d00e422f07d54f460a362e3b9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.6, "max_line_length": 71, "alphanum_fraction": 0.6479591837, "include": true, "reason": "import numpy", "num_tokens": 58}
|
[STATEMENT]
lemma box_an_bot:
"|an(x)]bot = n(x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. | an x ] bot = n x
[PROOF STEP]
by (simp add: box_x_bot n_an_def)
|
{"llama_tokens": 83, "file": "Correctness_Algebras_N_Semirings_Modal", "length": 1}
|
[STATEMENT]
lemma timpls_transformable_to_refl:
"timpls_transformable_to TI t t" (is ?A)
"timpls_transformable_to' TI t t" (is ?B)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. timpls_transformable_to TI t t &&& timpls_transformable_to' TI t t
[PROOF STEP]
by (induct t) (auto simp add: list_all2_conv_all_nth)
|
{"llama_tokens": 143, "file": "Automated_Stateful_Protocol_Verification_Term_Implication", "length": 1}
|
# Bond Helpers
type FixedRateBondHelper <: BondHelper
price::Quote
bond::FixedRateBond
end
value(b::FixedRateBondHelper) = b.price.value
maturity_date(b::FixedRateBondHelper) = maturity_date(b.bond)
# bond helper functions
function implied_quote{B <: BondHelper}(bond_h::B, clean::Bool = true)
bond = bond_h.bond
recalculate!(bond)
settlement_value = bond.settlementValue
return clean ? clean_price(bond, settlement_value, settlement_date(bond)) : dirty_price(bond, settlement_value, settlement_date(bond))
end
function clone(fixedRateBondHelper::FixedRateBondHelper, ts::TermStructure)
# first we have to clone the PE
newPE = clone(fixedRateBondHelper.bond.pricingEngine, ts)
# then we have to clone the bond
newBond = clone(fixedRateBondHelper.bond, newPE)
return FixedRateBondHelper(fixedRateBondHelper.price, newBond)
end
update_termstructure(bondHelper::FixedRateBondHelper, ts::TermStructure) = clone(bondHelper, ts)
|
{"hexsha": "df6e7b1c4b704f56d0a31773857df5c7736ce9cb", "size": 949, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/termstructures/yield/bond_helpers.jl", "max_stars_repo_name": "JuliaQuant/QuantLib.jl", "max_stars_repo_head_hexsha": "b1a806daa3b15b1f3705e36f716e66cc24c1dd5f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2016-03-07T07:29:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T09:43:02.000Z", "max_issues_repo_path": "src/termstructures/yield/bond_helpers.jl", "max_issues_repo_name": "JuliaQuant/QuantLib.jl", "max_issues_repo_head_hexsha": "b1a806daa3b15b1f3705e36f716e66cc24c1dd5f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/termstructures/yield/bond_helpers.jl", "max_forks_repo_name": "JuliaQuant/QuantLib.jl", "max_forks_repo_head_hexsha": "b1a806daa3b15b1f3705e36f716e66cc24c1dd5f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2016-03-09T08:33:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-15T18:21:38.000Z", "avg_line_length": 35.1481481481, "max_line_length": 136, "alphanum_fraction": 0.7913593256, "num_tokens": 253}
|
import os
import argparse
import numpy as np
import scipy
from scipy.sparse import csr_matrix
from scipy.sparse import lil_matrix
import pandas as pd
import matplotlib
from matplotlib import pyplot
from PIL import Image
import cv2
import numba
import deap
from deap import base
from deap import creator
from deap import tools
from module import prim_mst, FitnessEvaluation, reproduction, labeling, save_segment_img
print('Versions:')
print('\tNumPy version: ', np.__version__)
print('\tSciPy version: ', scipy.__version__)
print('\tMatplotlib version: ', matplotlib.__version__)
print('\tcv2 version: ', cv2.__version__)
print('\tNumba version: ', numba.__version__)
print('\tDEAP version:', deap.__version__)
print('')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evolutionary Image Segmentation Based on Multiobjective Clustering')
parser.add_argument('--image', '-i', type=str, default='paprika.png', help='Image file')
parser.add_argument('--color', '-c', type=str, default='RGB', help='Color space (\'RGB\' or \'Lab\')')
args = parser.parse_args()
img_name = args.image
color_space = args.color
out_dir = 'out/'
max_gen = 300
pop_size = 50
offspring_size = 50
mutate_rate = 0.0001
cross_rate = 0.7
min_region_num = 1
max_region_num = 50
min_region_size = 100
# Read image file
img_bgr = cv2.imread(img_name, cv2.IMREAD_COLOR) # height x width x 3 (BGR)
if img_bgr is None:
print('The image file \"{}\" cannot read.'.format(img_name))
exit()
print('Input image name: ', img_name)
print('Input image size: ', img_bgr.shape)
# Color space
if color_space == 'Lab':
print('Color space: L*a*b*')
img_arr = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LAB).astype(np.float)
# Scaling
img_arr[:, :, 0] = img_arr[:, :, 0] * 100. / 255.
img_arr[:, :, 1] = img_arr[:, :, 1] - 128
img_arr[:, :, 2] = img_arr[:, :, 2] - 128
else:
print('Color space: RGB')
img_arr = img_bgr.astype(np.float)
# Resize
max_len = np.max(img_arr.shape)
if max_len > 128:
img_arr = cv2.resize(img_arr, (img_arr.shape[1] * 128 // max_len, img_arr.shape[0] * 128 // max_len))
print('The image is resized to ', img_arr.shape)
# The number of pixels
N = img_arr.shape[0] * img_arr.shape[1]
# Create graph composed of connections between neighboring pixels
lil_mat = lil_matrix((N, N))
W = img_arr.shape[1]
H = img_arr.shape[0]
for n in range(N):
x, y = n % W, n // W
if x < W - 1: # right
lil_mat[n, n+1] = np.sqrt(np.sum((img_arr[y, x] - img_arr[y, x+1])**2)) + 1.
if y < H - 1: # down
lil_mat[n, n+W] = np.sqrt(np.sum((img_arr[y, x] - img_arr[y+1, x])**2)) + 1.
if x > 0: # left
lil_mat[n, n-1] = np.sqrt(np.sum((img_arr[y, x] - img_arr[y, x-1])**2)) + 1.
if y > 0: # upper
lil_mat[n, n-W] = np.sqrt(np.sum((img_arr[y, x] - img_arr[y-1, x])**2)) + 1.
print('Creating MST...')
mst = prim_mst(csr_matrix(lil_mat), W, N)
# evolutionary algorithm
evaluate = FitnessEvaluation(img_arr, min_region_num=min_region_num, max_region_num=max_region_num,
min_region_size=min_region_size)
creator.create('FitnessMin', base.Fitness, weights=(-1.0, -1.0))
creator.create('Individual', np.ndarray, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register('attr', (lambda init_gene: init_gene), mst)
toolbox.register('individual', tools.initRepeat, creator.Individual, toolbox.attr, n=1)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('evaluate', evaluate)
# Initialize population
pop = toolbox.population(n=pop_size)
# Fitness evaluation
fits = toolbox.map(toolbox.evaluate, pop)
for fit, ind in zip(fits, pop):
ind.fitness.values = fit
# Evolution loop
gen = 0
print('Start evolution loop...')
while gen < max_gen:
# Generation of offspring
offspring = reproduction(pop, offspring_size, W, toolbox, mutate_rate=mutate_rate, cross_rate=cross_rate)
# Archive truncation
pop = tools.selSPEA2(pop + offspring, pop_size)
gen += 1
if gen % 10 == 0:
print('({}/{}) '.format(gen, max_gen), end="", flush=True)
print('')
dev, edge = np.empty(pop_size), np.empty(pop_size)
for i, p in enumerate(pop):
dev[i], edge[i] = p.fitness.values[0], p.fitness.values[1]
df = pd.DataFrame(index=np.arange(len(pop)), columns=[])
print('Saving data...')
# Sorting and normalization
index = np.argsort(edge)
pop_sort = [pop[i] for i in index]
df['dev'] = dev[index]
df['edge'] = edge[index]
df['dev_norm'] = (dev[index] - np.min(dev)) / (np.max(dev) - np.min(dev))
df['edge_norm'] = (edge[index] - np.min(edge)) / (np.max(edge) - np.min(edge))
# Number of regions
region_num = np.empty(len(pop_sort), dtype=np.int)
for i, p in enumerate(pop_sort):
num, _ = labeling(p[0], W)
region_num[i] = num
df['region_num'] = region_num
# Selection
sel = int(np.argmin(df['dev_norm'] + df['edge_norm']))
selection = np.zeros(len(pop_sort), dtype=np.int)
selection[sel] = 1
df['selection'] = selection
os.makedirs(out_dir, exist_ok=True)
df.to_csv(out_dir + 'solutions.csv')
# Plot Pareto solutions
pyplot.figure(figsize=(5, 5))
pyplot.scatter(df['edge_norm'], df['dev_norm'], color='black')
pyplot.ylabel('Overall Deviation')
pyplot.xlabel('Edge')
pyplot.grid(True)
pyplot.savefig(out_dir + 'solutions.pdf')
# Save the selected segmentation image
array = save_segment_img(pop_sort[sel][0], W, H)
img = Image.fromarray(np.uint8(array))
img.save(out_dir + 'select_sol.png')
for i, p in enumerate(pop_sort):
array = save_segment_img(p[0], W, H)
img = Image.fromarray(np.uint8(array))
img.save(out_dir + '{:03d}.png'.format(i))
|
{"hexsha": "b63192753c617412bf7ac5e40637c233b53e69d2", "size": 6144, "ext": "py", "lang": "Python", "max_stars_repo_path": "mock_segmentation.py", "max_stars_repo_name": "shirakawas/mock-segmentation", "max_stars_repo_head_hexsha": "dd6806687642716698c2fa267c5938f695fae504", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-06T19:15:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-06T19:15:33.000Z", "max_issues_repo_path": "mock_segmentation.py", "max_issues_repo_name": "shirakawas/mock-segmentation", "max_issues_repo_head_hexsha": "dd6806687642716698c2fa267c5938f695fae504", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mock_segmentation.py", "max_forks_repo_name": "shirakawas/mock-segmentation", "max_forks_repo_head_hexsha": "dd6806687642716698c2fa267c5938f695fae504", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8556149733, "max_line_length": 118, "alphanum_fraction": 0.6280924479, "include": true, "reason": "import numpy,import scipy,from scipy,import numba", "num_tokens": 1712}
|
import math
import numpy as np
import matplotlib.pyplot as plt
from nclt2ros.visualizer.plotter import Plotter
from nclt2ros.transformer.coordinate_frame import CoordinateFrame
class GPS_RTK(Plotter):
"""Class to visualize the GPS RTK data as a kml and png file
USAGE:
GPS_RTK(date='2013-01-10', output_file='gps_rtk', plt_show=True)
"""
def __init__(self, date, output_file='gps_rtk', plt_show=True):
if isinstance(output_file, str):
self.output_file = output_file
else:
raise TypeError("'output_file' must be type of string")
self.date = date
self.plt_show = plt_show
# init base class
Plotter.__init__(self, date=self.date)
# transform coordinate frame into 'odom' or 'gt'
if self.date == '2013-01-10':
self.gps_rtk_converter = CoordinateFrame(origin='odom')
else:
self.gps_rtk_converter = CoordinateFrame(origin='gt')
# load data
self.gps_rtk = self.reader.read_gps_rtk_csv(all_in_one=True)
def save_kml_line(self):
"""visualize the gps rtk data as a kml file
"""
lat = self.gps_rtk[:, 3]
lng = self.gps_rtk[:, 4]
gps_rtk_list = list()
for (i_lat, j_lng) in zip(lat, lng):
if not math.isnan(i_lat) and not math.isnan(j_lng):
tup = (np.rad2deg(j_lng), np.rad2deg(i_lat)) # swap and convert lat long to deg
gps_rtk_list.append(tup)
ls = self.kml.newlinestring(name="gps rtk", coords=gps_rtk_list, description="latitude and longitude from gps rtk")
ls.style.linestyle.width = 1
ls.style.linestyle.color = self.red
self.kml.save(self.visualization_kml_dir + self.output_file + '.kml')
def get_gps_rtk_data(self):
"""get gps rtk data for visualization
:return: list for x coordinates, list for y coordinates
"""
lat = self.gps_rtk[:, 3]
lng = self.gps_rtk[:, 4]
x_list = list()
y_list = list()
for (i_lat, j_lng) in zip(lat, lng):
if not math.isnan(i_lat) and not math.isnan(j_lng):
x = self.gps_rtk_converter.get_x(lat=np.rad2deg(i_lat))
y = self.gps_rtk_converter.get_y(lon=np.rad2deg(j_lng))
x_list.append(x)
y_list.append(y)
return x_list, y_list
def save_gps_rtk_png(self):
"""visualize the gps rtk data as a png file
"""
x_list, y_list = self.get_gps_rtk_data()
plt.plot(y_list, x_list, 'r-', label='gps rtk')
plt.title('GPS RTK')
plt.xlabel('x in meter')
plt.ylabel('y in meter')
plt.legend(loc='upper left')
plt.grid()
plt.savefig(self.visualization_png_gps_rtk_dir + 'gps_rtk.png')
if self.plt_show:
plt.show()
def get_png_gps_rtk_dir(self):
"""get the png gps rtk directory
:return: path to png gps rtk directory
"""
return self.visualization_png_gps_rtk_dir
if __name__ == '__main__':
gps = GPS_RTK(date='2012-01-08')
gps.save_gps_rtk_png()
|
{"hexsha": "1a72e843d703c793ac473cbb0fc6ca834940eff7", "size": 3174, "ext": "py", "lang": "Python", "max_stars_repo_path": "nclt2ros/visualizer/gps_rtk.py", "max_stars_repo_name": "bierschi/nclt2ros", "max_stars_repo_head_hexsha": "77b30ca6750d4b0cd82ccb6660f2fd0946581091", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2019-09-09T08:18:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T13:39:56.000Z", "max_issues_repo_path": "nclt2ros/visualizer/gps_rtk.py", "max_issues_repo_name": "bierschi/nclt2rosbag", "max_issues_repo_head_hexsha": "77b30ca6750d4b0cd82ccb6660f2fd0946581091", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nclt2ros/visualizer/gps_rtk.py", "max_forks_repo_name": "bierschi/nclt2rosbag", "max_forks_repo_head_hexsha": "77b30ca6750d4b0cd82ccb6660f2fd0946581091", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-09-09T08:18:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-07T21:04:00.000Z", "avg_line_length": 29.6635514019, "max_line_length": 123, "alphanum_fraction": 0.6061751733, "include": true, "reason": "import numpy", "num_tokens": 813}
|
import sys
import numpy as np
def nn_opt(x0, grd, opt_itrs=1000, step_sched=lambda i: 1. / (i + 1), b1=0.9, b2=0.99, eps=1e-8, verbose=False):
x = x0.copy()
m1 = np.zeros(x.shape[0])
m2 = np.zeros(x.shape[0])
for i in range(opt_itrs):
g = grd(x)
if verbose:
sys.stdout.write('itr ' + str(i + 1) + '/' + str(opt_itrs) + ': ||inactive constraint grads|| = ' + str(
np.sqrt((g[x > 0] ** 2).sum())) + ' \r')
sys.stdout.flush()
m1 = b1 * m1 + (1. - b1) * g
m2 = b2 * m2 + (1. - b2) * g ** 2
upd = step_sched(i) * m1 / (1. - b1 ** (i + 1)) / (eps + np.sqrt(m2 / (1. - b2 ** (i + 1))))
x -= upd
# project onto x>=0
x = np.maximum(x, 0.)
if verbose:
sys.stdout.write('\n')
sys.stdout.flush()
return x
|
{"hexsha": "f56c20c2f8182da4a1fa39318af3cdb949f47431", "size": 858, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/bayesiancoresets/util/opt.py", "max_stars_repo_name": "DominicBroadbentCompass/bayesian-coresets-optimization", "max_stars_repo_head_hexsha": "3657f2ebfc4f0e6b36f5c651b0651f06d7e3d6b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-05-21T02:34:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T15:17:26.000Z", "max_issues_repo_path": "experiments/bayesiancoresets/util/opt.py", "max_issues_repo_name": "DominicBroadbentCompass/bayesian-coresets-optimization", "max_issues_repo_head_hexsha": "3657f2ebfc4f0e6b36f5c651b0651f06d7e3d6b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-03-12T04:07:52.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-15T12:56:05.000Z", "max_forks_repo_path": "experiments/bayesiancoresets/util/opt.py", "max_forks_repo_name": "DominicBroadbentCompass/bayesian-coresets-optimization", "max_forks_repo_head_hexsha": "3657f2ebfc4f0e6b36f5c651b0651f06d7e3d6b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-06-23T04:51:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-04T15:07:41.000Z", "avg_line_length": 29.5862068966, "max_line_length": 116, "alphanum_fraction": 0.4522144522, "include": true, "reason": "import numpy", "num_tokens": 303}
|
module Included
using Hijack, Test
@testset "included testset" for _=1:1
@test true
push!(Hijack.RUN, 2)
# test that `testset=true` is forwarded
include("included_testset2.jl")
end
end
|
{"hexsha": "84fc89c832e4ae0787b8c8cd71f2fb32d701b927", "size": 202, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Hijack/test/included_testset.jl", "max_stars_repo_name": "danielsoutar/ReTest.jl", "max_stars_repo_head_hexsha": "4831b0fc23f2897bbeb999de9bdd14e968653199", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2020-11-04T22:33:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T13:37:31.000Z", "max_issues_repo_path": "test/Hijack/test/included_testset.jl", "max_issues_repo_name": "rfourquet/InlineTest", "max_issues_repo_head_hexsha": "4831b0fc23f2897bbeb999de9bdd14e968653199", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-11-05T20:31:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T16:02:16.000Z", "max_forks_repo_path": "test/Hijack/test/included_testset.jl", "max_forks_repo_name": "rfourquet/InlineTest", "max_forks_repo_head_hexsha": "4831b0fc23f2897bbeb999de9bdd14e968653199", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-04-14T23:41:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T18:48:54.000Z", "avg_line_length": 18.3636363636, "max_line_length": 43, "alphanum_fraction": 0.698019802, "num_tokens": 65}
|
# I need to better understand the general case before designing a nice dispatch system...
"""
An abstract types which dictates the problem to be solved. For example, all problem with two spatial dimensions, all of which use 2D effective wavenumbers, we have the type TwoDimensions{T} <: PhysicalSetup{T}. In particular, for planar interface, we then have PlaneWaves2D{T} <: TwoDimensions{T}.
"""
abstract type ProblemSetup{T<:AbstractFloat, Dim<:Integer} end
abstract type PlaneWaves3D{T} <: ProblemSetup{T,3} end
"""Extract the dimension of the space that this physical property lives in"""
dim(p::ProblemSetup{T,Dim}) where {Dim,T} = Dim
"""
A setup with an incident plane wave, from the left, on a plate region filled with particles.
"""
struct Plate2D{T} <: ProblemSetup{T,2}
"""The plate is the region defined by x in the interval [x1, x2]"""
x1::T
x2::T
"""The background medium"""
medium::Medium{T}
"""the species of the particles"""
species::Vector{Specie{T}}
"""maximum order of hankel functions"""
hankel_order::Int
"""minimal particle distance multiplier"""
radius_multiplier::T
"""orientation of wave-vector (k*cos(θin), k*sin(θin))"""
θin::T
end
"""
A setup with an incident plane wave, from the left, on a plate region filled with particles.
"""
struct Sphere{T} <: ProblemSetup{T,3}
centre::T
radius::T
"""The background medium"""
medium::Medium{T}
"""the species of the particles"""
species::Vector{Specie{T}}
"""maximum order of hankel functions"""
hankel_order::Int
"""minimal particle distance multiplier"""
radius_multiplier::T
"""orientation of wave-vector (k*cos(θin), k*sin(θin))"""
θin::T
end
|
{"hexsha": "eea41c77134565e148adbbd5f18fd78a7472b022", "size": 1723, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/problem_setup.jl", "max_stars_repo_name": "UnofficialJuliaMirror/EffectiveWaves.jl-37e8709b-1ed2-53db-b26a-3571262b3cb4", "max_stars_repo_head_hexsha": "ec9829c42554ae993fe52f93a05c00b4c86105be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/problem_setup.jl", "max_issues_repo_name": "UnofficialJuliaMirror/EffectiveWaves.jl-37e8709b-1ed2-53db-b26a-3571262b3cb4", "max_issues_repo_head_hexsha": "ec9829c42554ae993fe52f93a05c00b4c86105be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/problem_setup.jl", "max_forks_repo_name": "UnofficialJuliaMirror/EffectiveWaves.jl-37e8709b-1ed2-53db-b26a-3571262b3cb4", "max_forks_repo_head_hexsha": "ec9829c42554ae993fe52f93a05c00b4c86105be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.46, "max_line_length": 297, "alphanum_fraction": 0.6894950667, "num_tokens": 443}
|
import os
import numpy as np
import cv2
class VideoRazor:
"""
Slices videos into N sections.
"""
def __init__(self, input_path, output_path, splits):
self.input_path = input_path
if not isinstance(self.input_path, str):
raise TypeError("Output must be a string")
self.output_path = output_path
if not isinstance(self.output_path, str):
raise TypeError("Output must be a string")
self.splits = abs(splits)
if not isinstance(self.splits, int):
raise TypeError("Splits must be a int")
if self.splits < 1:
raise ValueError("Must be > 0")
self.fps = None
def get_frames(self):
"""
Extracts each frame of the video to a list.
:return:
-------
frames : list
Frames which make up a video
"""
# Read video
cap = cv2.VideoCapture(self.input_path)
# Get fps
self.fps = cap.get(cv2.CAP_PROP_FPS)
# Make sure video is being read
if cap.isOpened():
success, frame = cap.read()
frames = list()
while success:
frames.append(frame)
# Read new frame
success, frame = cap.read()
return frames
def get_roi_measurements(self):
"""
Gets the height and width each output video will be.
:return:
-------
height, width : float
height and width of region of interest
"""
return map(lambda x: x / self.splits, self.get_frames()[0].shape[:2])
def get_roi_frames(self):
"""
Gets region of interest for each frame in the list of frames
and appends it to a new list of frames.
:return:
-------
frames : list
A list of frames
"""
# Get h, w of video sections
roi_h, roi_w = self.get_roi_measurements()
# split frames into sections
frames = list()
# For each horizontal section
for i in range(0, self.splits):
# For each vertical section
for j in range(0, self.splits):
# For each frame in frames
for frame in self.get_frames():
# Get roi from frame
frame = frame[int(i * roi_h): int(i * roi_h) + int(roi_h),
int(j * roi_w): int(j * roi_w) + int(roi_w)]
frames.append(frame)
return frames
def create_output_path(self):
"""
Creates file path, for video, which does not already exist.
:return:
----------
output_path : str
Output directory
"""
filename = self.output_path + '{}.mp4'
counter = 0
while os.path.isfile(filename.format(counter)):
counter += 1
# Apply counter to filename
return filename.format(counter)
def get_num_videos(self):
"""
Calculates how many videos will be output.
:return:
num_of_out_vids : int
Number of videos to be output
"""
return self.splits ** 2
def init_video_writer_list(self):
"""
Initializes a placeholder list of None(s) for VideoWriter
objects.
:return:
"""
return [None] * self.get_num_videos()
def split_frames_list(self):
"""
Splits frame list into sections which are the length of each
video to be output.
:return:
frames : list
List of lists of frames. inner list contains a full video's
frames.
"""
return np.array_split(self.get_roi_frames(), self.get_num_videos())
def create_videos(self, frames_split, out_videos, roi_w, roi_h):
"""
Creates all output videos from frame list.
:param frames_split: list
List of NumPy arrays
:param out_videos: list
List of None(s)
:param roi_w: float
Region of Interest width
:param roi_h: float
Region of Interest height
"""
# For each videos worth of frames
for i in range(len(frames_split)):
# Create output path
output_path = self.create_output_path()
# Set up fourcc
four_cc = cv2.VideoWriter_fourcc(*'MJPG')
# Create video writer for each i-th in list
out_videos[i] = cv2.VideoWriter(output_path,
four_cc,
self.fps,
(int(roi_w), int(roi_h)))
# Get inner list
video_frames = frames_split[i]
# Write frames to file
for frame in video_frames:
out_videos[i].write(frame)
# Release video writer
out_videos[i].release()
def slice(self):
"""
Main function.
"""
# Get h, w of video sections
roi_h, roi_w = self.get_roi_measurements()
# Init list of Nones length of list
out_videos = self.init_video_writer_list()
# Split list into list of lists
frames_split = self.split_frames_list()
self.create_videos(frames_split, out_videos, roi_w, roi_h)
if __name__ == '__main__':
input_file = 'tests/test_data/test.mp4'
output = 'tests/output/test_out'
razor = VideoRazor(input_file, output, 3)
razor.slice()
|
{"hexsha": "955bc357ff1491f08efa29611a968ba5c3de48f0", "size": 5576, "ext": "py", "lang": "Python", "max_stars_repo_path": "video_razor/razor.py", "max_stars_repo_name": "5starkarma/video-razor", "max_stars_repo_head_hexsha": "2a3aa4825ba91ad4bd50ca51aae257b74ac2cf7b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "video_razor/razor.py", "max_issues_repo_name": "5starkarma/video-razor", "max_issues_repo_head_hexsha": "2a3aa4825ba91ad4bd50ca51aae257b74ac2cf7b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "video_razor/razor.py", "max_forks_repo_name": "5starkarma/video-razor", "max_forks_repo_head_hexsha": "2a3aa4825ba91ad4bd50ca51aae257b74ac2cf7b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6818181818, "max_line_length": 78, "alphanum_fraction": 0.5385581062, "include": true, "reason": "import numpy", "num_tokens": 1184}
|
struct ConstrainedTimeInvariantLQR{T <: Number}
"""
The predicted system
"""
sys::AbstractStateSpace
"""
The horizon length
"""
N::Integer
"""
The state weight matrix
"""
Q::AbstractMatrix{T}
"""
The Q weighting matrix taking into account the prestabilizing controller
"""
Qₖ::AbstractMatrix{T}
"""
The input weight matrix
"""
R::AbstractMatrix{T}
"""
The terminal state weight matrix
"""
P::AbstractMatrix{T}
"""
The cross-term weight matrix
"""
S::AbstractMatrix{T}
"""
A prestabilizing controller (if any) that forms A-BK
"""
K::AbstractMatrix{T}
"""
The stage state constraint coefficient matrix
"""
E::AbstractMatrix{T}
"""
The stage input constraint coefficient matrix
"""
F::AbstractMatrix{T}
"""
The stage right hand side of the constraints
"""
g::AbstractVector{T}
"""
The lower bounds on the state variables
"""
xₗ::AbstractVector{Union{T, Missing}}
"""
The upper bounds on the state variables
"""
xᵤ::AbstractVector{Union{T, Missing}}
"""
The lower bounds on the input variables
"""
uₗ::AbstractVector{Union{T, Missing}}
"""
The upper bounds on the input variables
"""
uᵤ::AbstractVector{Union{T, Missing}}
end
function ConstrainedTimeInvariantLQR( sys::AbstractStateSpace, N::Integer, Q::AbstractNumOrUniform, R::AbstractNumOrUniform, P::AbstractNumOrUniformOrSymbol = :dare;
S::AbstractNumOrMatrix = 0, K::AbstractNumOrMatrixOrSymbol = 0, E::AbstractNumOrMatrix = 0, F::AbstractNumOrMatrix = 0, g::AbstractNumOrVector = 0,
xₗ::AbstractNumOrVector = -Inf, xᵤ::AbstractNumOrVector = Inf, uₗ::AbstractNumOrVector = -Inf, uᵤ::AbstractNumOrVector = Inf )
T = promote_type( eltype( Q ), eltype( R ), eltype( F ), eltype( E ), eltype( g ), eltype( S ) )
nₓ = nstates( sys )
nᵤ = ninputs( sys )
# Handle the UniformScaling cases
if( typeof( Q ) <: UniformScaling )
Q = to_matrix( T, Q, nₓ )
else
Q = to_matrix( T, Q )
end
if( typeof( R ) <: UniformScaling )
R = to_matrix( T, R, nᵤ )
else
R = to_matrix( T, R )
end
# Verify arguments
if( N < 1 )
throw( DomainError( N, "Horizon length must be greater than 1" ) )
end
if( !isposdef( R ) || !issymmetric( R ) )
throw( DomainError( R, "R must be symmetric positive definite") )
end
if( !issymmetric( Q ) )
throw( DomainError( Q, "Q must be symmetric" ) )
end
if( size( Q, 1 ) != nₓ )
throw( DimensionMismatch( "Q must be square with dimension the same as the number of states" ) )
end
if( size( R, 1 ) != nᵤ )
throw( DimensionMismatch( "R must be square with dimension the same as the number of rows" ) )
end
if( S != 0 )
if( size( S, 1 ) != nₓ || size( S, 2 ) != nᵤ )
throw( DimensionMismatch( "S must have $nₓ rows and $nᵤ columns" ) )
end
S = to_matrix( T, S )
else
S = zeros( T, nₓ, nᵤ )
end
if( typeof( K ) <: Symbol )
if( K == :dlqr )
_, _, K = ared( sys.A, sys.B, R, Q )
else
throw( ArgumentError( "Unknown value $K for K. Allowed values are :dlqr or a $nₓ by $nᵤ matrix." ) )
end
elseif( K != 0 )
if( size( K, 1 ) != nᵤ || size( K, 2 ) != nₓ )
throw( DimensionMismatch( "K must have $nₓ rows and $nᵤ columns" ) )
end
K = to_matrix( T, K )
else
K = zeros( T, nᵤ, nₓ )
end
# Create the controlled state weighting matrix (this is just the normal weighting matrix if there is no prestabilization)
Qₖ = Q + K'*R*K
# Handle the final state weighting matrix
if( typeof( P ) <: Symbol )
if( P == :Q )
P = Qₖ
elseif( P == :dare )
# This computation is done using the original weighting matrices
P, = ared( sys.A, sys.B, R, Q )
elseif( P == :dlyap )
# This computation is done using the original weighting matrices
P = lyapd( sys.A', Q )
else
throw( ArgumentError( "Unknown value $P for P. Allowed values are :Q, :dare, :dlyap or a $nₓ by $nₓ matrix." ) )
end
elseif( typeof( P ) <: UniformScaling )
P = to_matrix( T, P, nₓ )
else
if( size( P, 1 ) != nₓ )
throw( DimensionMismatch( "P must be square with dimension the same as the number of states" ) )
elseif( !issymmetric(P) )
throw( DomainError( P, "P must be symmetric" ) )
end
P = to_matrix( T, P )
end
# Verify the constraints when there are some
if( g != 0 )
( size( E, 1 ) != size( F, 1 ) ) && throw( DimensionMismatch( "E and F must have the same number of rows" ) )
( size( g, 1 ) != size( F, 1 ) ) && throw( DimensionMismatch( "g must have $size( F, 1 ) rows" ) )
( size( E, 2 ) != nₓ ) && throw( DimensionMismatch( "E must have $nₓ columns" ) )
( size( F, 2 ) != nᵤ ) && throw( DimensionMismatch( "F must have $nᵤ columns" ) )
E = to_matrix( T, E )
F = to_matrix( T, F )
else
# Create an empty matrix for the constraints
E = zeros( T, 1, nₓ )
F = zeros( T, 1, nᵤ )
g = zeros( T, 1 )
end
# Create the bounds for the state and inputs
( size( xₗ, 1 ) != 1 ) && ( size( xₗ, 1 ) != nₓ ) && throw( DimensionMismatch( "xₗ must have $nₓ rows" ) )
( size( xᵤ, 1 ) != 1 ) && ( size( xᵤ, 1 ) != nₓ ) && throw( DimensionMismatch( "xᵤ must have $nₓ rows" ) )
( size( uₗ, 1 ) != 1 ) && ( size( uₗ, 1 ) != nᵤ ) && throw( DimensionMismatch( "uₗ must have $nᵤ rows" ) )
( size( uᵤ, 1 ) != 1 ) && ( size( uᵤ, 1 ) != nᵤ ) && throw( DimensionMismatch( "uᵤ must have $nᵤ rows" ) )
xₗ = convertboundstomissing( T, xₗ, nₓ )
xᵤ = convertboundstomissing( T, xᵤ, nₓ )
uₗ = convertboundstomissing( T, uₗ, nᵤ )
uᵤ = convertboundstomissing( T, uᵤ, nᵤ )
ConstrainedTimeInvariantLQR{T}( sys, N, Q, Qₖ, R, P, S, K, E, F, g, xₗ, xᵤ, uₗ, uᵤ )
end
# Helper function to perform the Inf -> missing conversion on the bounds
function convertboundstomissing( T, v, n )
if isa( v, AbstractVector )
# Convert infinite bounds to missing
temp = Vector{Union{T, Missing}}(undef, n)
temp[isinf.( v )] .= missing
temp[.!isinf.( v )] .= v[.!isinf.( v )]
return temp
else
return fill( isinf( v ) ? missing : v, (n, ) )
end
end
"""
getsystem(clqr::ConstrainedTimeInvariantLQR; prestabilized::Bool = true)
Constructs the potentially prestabilized system for a specific constrained LQR controller.
If a prestabilizing controller `K` is given in the LQR controller, that controller will be applied
to the system forming `A - BK` when `prestabilized` is true and the controlled system will be returned. If `prestabilized`
is false, the raw system is returned.
"""
function getsystem( clqr::ConstrainedTimeInvariantLQR; prestabilized::Bool = true )
sys = clqr.sys
if( prestabilized )
return StateSpace( sys.A - sys.B*clqr.K, sys.B, sys.C, sys.D, sys.Ts )
else
return sys
end
end
function isprestabilized( clqr::ConstrainedTimeInvariantLQR )
return !iszero( clqr.K )
end
################################################################################
# Pretty print the CLQR struct
################################################################################
function _string_mat_with_headers(X::Matrix)
p = (io, m) -> Base.print_matrix(io, m)
return replace(sprint(p, X), "\"" => " ")
end
function print_constraints( io, clqr )
rowsE = UnitRange( axes( clqr.E, 1 ) )
colsE = UnitRange( axes( clqr.E, 2 ) )
alignE = Base.alignment( io, clqr.E, rowsE, colsE, typemax( Int ), typemax( Int ), 2 )
rowsF = UnitRange( axes( clqr.F, 1 ) )
colsF = UnitRange( axes( clqr.F, 2 ) )
alignF = Base.alignment( io, clqr.F, rowsF, colsF, typemax( Int ), typemax( Int ), 2 )
rowsg = UnitRange( axes( clqr.g, 1 ) )
colsg = UnitRange( axes( clqr.g, 2 ) )
aligng = Base.alignment( io, clqr.g, rowsg, colsg, typemax( Int ), typemax( Int ), 2 )
numConstraints = length( rowsF )
# Figure out if a space should be added before the first elements
v = view( clqr.E, :, 1 )
spaceE = ( minimum( v ) >= 0 )
v = view( clqr.F, :, 1 )
spaceF = ( minimum( v ) >= 0 )
v = view( clqr.g, :, 1 )
spaceg = ( minimum( v ) >= 0 )
# rowsF = rowsA - otherwise constraints are invalid
for i in rowsF
if( i == first( rowsF ) )
premat = "⎡"
postmat = " ⎤"
elseif( i == last( rowsF ) )
premat = "⎣"
postmat = " ⎦"
else
premat = "⎢"
postmat = " ⎥"
end
print( io, spaceE ? premat * " " : premat )
Base.print_matrix_row( io, clqr.E, alignE, i, colsE, " " )
print( io, postmat )
if( i == ceil( numConstraints / 2 ) )
print( io, " x + " )
else
print( io, " " )
end
print( io, spaceF ? premat * " " : premat )
Base.print_matrix_row( io, clqr.F, alignF, i, colsF, " " )
print( io, postmat )
if( i == ceil( numConstraints / 2 ) )
print( io, " u ≤ " )
else
print( io, " " )
end
print( io, spaceg ? premat * " " : premat )
Base.print_matrix_row( io, clqr.g, aligng, i, colsg, " " )
print( io, postmat )
println( io )
end
end
function convertboundsfrommissing( T, v, n, neg )
temp = Vector{Union{T, Float64}}( undef, n )
temp[ismissing.( v )] .= ( neg ? -Inf : Inf )
temp[.!ismissing.( v )] .= v[.!ismissing.( v )]
return temp
end
function print_bounds( io, clqr )
nₓ = nstates( getsystem( clqr ) )
nᵤ = ninputs( getsystem( clqr ) )
T = eltype( clqr.Q )
xᵤ = convertboundsfrommissing( T, clqr.xᵤ, nₓ, false )
xₗ = convertboundsfrommissing( T, clqr.xₗ, nₓ, true )
uᵤ = convertboundsfrommissing( T, clqr.uᵤ, nᵤ, false )
uₗ = convertboundsfrommissing( T, clqr.uₗ, nᵤ, true )
rowsxᵤ = UnitRange( axes( xᵤ, 1 ) )
colsxᵤ = UnitRange( axes( xᵤ, 2 ) )
alignxᵤ = Base.alignment( io, xᵤ, rowsxᵤ, colsxᵤ, typemax( Int ), typemax( Int ), 2 )
rowsxₗ = UnitRange( axes( xₗ, 1 ) )
colsxₗ = UnitRange( axes( xₗ, 2 ) )
alignxₗ = Base.alignment( io, xₗ, rowsxₗ, colsxₗ, typemax( Int ), typemax( Int ), 2 )
rowsuᵤ = UnitRange( axes( uᵤ, 1 ) )
colsuᵤ = UnitRange( axes( uᵤ, 2 ) )
alignuᵤ = Base.alignment( io, uᵤ, rowsuᵤ, colsuᵤ, typemax( Int ), typemax( Int ), 2 )
rowsuₗ = UnitRange( axes( uₗ, 1 ) )
colsuₗ = UnitRange( axes( uₗ, 2 ) )
alignuₗ = Base.alignment( io, uₗ, rowsuₗ, colsuₗ, typemax( Int ), typemax( Int ), 2 )
# Figure out if a space should be added before the first elements
spacexᵤ = any( xᵤ .< 0 )
spacexₗ = any( xₗ .< 0 )
spaceuᵤ = any( uᵤ .< 0 )
spaceuₗ = any( uₗ .< 0 )
anyinfxᵤ = any( isinf.( xᵤ ) )
anyinfxₗ = any( isinf.( xₗ ) )
anyinfuᵤ = any( isinf.( uᵤ ) )
anyinfuₗ = any( isinf.( uₗ ) )
havex = !all( ismissing.( clqr.xᵤ ) ) || !all( ismissing.( clqr.xₗ ) )
haveu = !all( ismissing.( clqr.uᵤ ) ) || !all( ismissing.( clqr.uₗ ) )
printx = true
printxₙ = true
printu = true
printuₙ = true
for i in max( rowsxᵤ, rowsuᵤ )
if havex
printx = printxₙ
if ( i == first( rowsxᵤ ) ) && ( i == last( rowsxᵤ ) )
printxₙ = false
prematx = "["
postmatx = " ]"
elseif i == first( rowsxᵤ )
printxₙ = true
prematx = "⎡"
postmatx = " ⎤"
elseif i == last( rowsxᵤ )
printxₙ = false
prematx = "⎣"
postmatx = " ⎦"
elseif printx == true
prematx = "⎢"
postmatx = " ⎥"
else
prematx = " "
postmatx = " "
end
if printx == true
print( io, anyinfxₗ ? ( isinf( xₗ[i] ) ? prematx * " " : prematx ) : prematx * " " )
Base.print_matrix_row( io, xₗ, alignxₗ, i, colsxₗ, " " )
print( io, postmatx )
if( i == ceil( nₓ/ 2 ) )
print( io, " ≤ x ≤ " )
else
print( io, " " )
end
if spacexᵤ
# This case happens if there are any negative numbers in the array
# In that case we need to add 2 spaces before any infinities, and only
# 1 space in front of the negative numbers
print( io, isinf( xᵤ[i] ) ? prematx * " " : prematx * " " )
elseif anyinfxᵤ
# This case happens if there are infinities and only positive numbers.
# In that case we only add the space before the infinities.
print( io, isinf( xᵤ[i] ) ? prematx * " " : prematx )
else
# In the case of all positive numbers, always add a space before every number
print( io, prematx * " " )
end
Base.print_matrix_row( io, xᵤ, alignxᵤ, i, colsxᵤ, " " )
print( io, postmatx )
end
end
if havex && haveu
print( io, " " )
end
if haveu
printu = printuₙ
if ( i == first( rowsuᵤ ) ) && ( i == last( rowsuᵤ ) )
printuₙ = false
prematu = "["
postmatu = " ]"
elseif i == first( rowsuᵤ )
printuₙ = true
prematu = "⎡"
postmatu = " ⎤"
elseif i == last( rowsuᵤ )
printuₙ = false
prematu = "⎣"
postmatu = " ⎦"
elseif printu == true
prematu = "⎢"
postmatu = " ⎥"
else
prematu = " "
postmatu = " "
end
if printu == true
print( io, anyinfuₗ ? ( isinf( uₗ[i] ) ? prematu * " " : prematu ) : prematu * " " )
Base.print_matrix_row( io, uₗ, alignuₗ, i, colsuₗ, " " )
print( io, postmatu )
if( i == ceil( nᵤ/ 2 ) )
print( io, " ≤ u ≤ " )
else
print( io, " " )
end
if spaceuᵤ
# This case happens if there are any negative numbers in the array
# In that case we need to add 2 spaces before any infinities, and only
# 1 space in front of the negative numbers
print( io, isinf( uᵤ[i] ) ? prematu * " " : prematu * " " )
elseif anyinfuᵤ
# This case happens if there are infinities and only positive numbers.
# In that case we only add the space before the infinities.
print( io, isinf( uᵤ[i] ) ? prematu * " " : prematu )
else
# In the case of all positive numbers, always add a space before every number
print( io, prematu * " " )
end
Base.print_matrix_row( io, uᵤ, alignuᵤ, i, colsuᵤ, " " )
print( io, postmatu )
end
end
println( io )
end
end
Base.print(io::IO, clqr::ConstrainedTimeInvariantLQR) = show(io, clqr)
function Base.show(io::IO, clqr::ConstrainedTimeInvariantLQR)
println( io, "Cost function:" )
println( io, "Q = \n", _string_mat_with_headers( clqr.Q ) )
println( io, "R = \n", _string_mat_with_headers( clqr.R ) )
println( io, "P = \n", _string_mat_with_headers( clqr.P ) )
if !iszero( clqr.S )
println( io, "S = \n", _string_mat_with_headers( clqr.S ) )
end
println( io )
println( io, "Predicted system:" )
print( io, clqr.sys )
println( io )
println( io )
if isprestabilized( clqr )
println( io, "Prestabilizing controller:" )
println( io, "K = \n", _string_mat_with_headers( clqr.K ) )
else
println( io, "No prestabilizing controller" )
end
println( io )
if !all( ismissing.( clqr.xᵤ ) ) || !all( ismissing.( clqr.xₗ ) ) || !all( ismissing.( clqr.uᵤ ) ) || !all( ismissing.( clqr.uₗ ) )
println( io, "Bounds:" )
print_bounds( io, clqr )
end
if !iszero( clqr.E ) || ! iszero( clqr.F )
println( io, "Constraints:" )
println( io )
print_constraints( io, clqr )
end
end
|
{"hexsha": "30da13cf2bc53942449505e194f3b495e1ec5cb4", "size": 17025, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/types/clqr.jl", "max_stars_repo_name": "imciner2/PredictiveControl.jl", "max_stars_repo_head_hexsha": "05c12f853e3a2d273003afe1354b6cdb2714b478", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-05T14:39:46.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-05T14:39:46.000Z", "max_issues_repo_path": "src/types/clqr.jl", "max_issues_repo_name": "imciner2/PredictiveControl.jl", "max_issues_repo_head_hexsha": "05c12f853e3a2d273003afe1354b6cdb2714b478", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/types/clqr.jl", "max_forks_repo_name": "imciner2/PredictiveControl.jl", "max_forks_repo_head_hexsha": "05c12f853e3a2d273003afe1354b6cdb2714b478", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7630597015, "max_line_length": 185, "alphanum_fraction": 0.518061674, "num_tokens": 5371}
|
(* Title: HOL/Auth/Guard/List_Msg.thy
Author: Frederic Blanqui, University of Cambridge Computer Laboratory
Copyright 2001 University of Cambridge
*)
section{*Lists of Messages and Lists of Agents*}
theory List_Msg imports Extensions begin
subsection{*Implementation of Lists by Messages*}
subsubsection{*nil is represented by any message which is not a pair*}
abbreviation (input)
cons :: "msg => msg => msg" where
"cons x l == {|x,l|}"
subsubsection{*induction principle*}
lemma lmsg_induct: "[| !!x. not_MPair x ==> P x; !!x l. P l ==> P (cons x l) |]
==> P l"
by (induct l) auto
subsubsection{*head*}
primrec head :: "msg => msg" where
"head (cons x l) = x"
subsubsection{*tail*}
primrec tail :: "msg => msg" where
"tail (cons x l) = l"
subsubsection{*length*}
fun len :: "msg => nat" where
"len (cons x l) = Suc (len l)" |
"len other = 0"
lemma len_not_empty: "n < len l ==> EX x l'. l = cons x l'"
by (cases l) auto
subsubsection{*membership*}
fun isin :: "msg * msg => bool" where
"isin (x, cons y l) = (x=y | isin (x,l))" |
"isin (x, other) = False"
subsubsection{*delete an element*}
fun del :: "msg * msg => msg" where
"del (x, cons y l) = (if x=y then l else cons y (del (x,l)))" |
"del (x, other) = other"
lemma notin_del [simp]: "~ isin (x,l) ==> del (x,l) = l"
by (induct l) auto
lemma isin_del [rule_format]: "isin (y, del (x,l)) --> isin (y,l)"
by (induct l) auto
subsubsection{*concatenation*}
fun app :: "msg * msg => msg" where
"app (cons x l, l') = cons x (app (l,l'))" |
"app (other, l') = l'"
lemma isin_app [iff]: "isin (x, app(l,l')) = (isin (x,l) | isin (x,l'))"
by (induct l) auto
subsubsection{*replacement*}
fun repl :: "msg * nat * msg => msg" where
"repl (cons x l, Suc i, x') = cons x (repl (l,i,x'))" |
"repl (cons x l, 0, x') = cons x' l" |
"repl (other, i, M') = other"
subsubsection{*ith element*}
fun ith :: "msg * nat => msg" where
"ith (cons x l, Suc i) = ith (l,i)" |
"ith (cons x l, 0) = x" |
"ith (other, i) = other"
lemma ith_head: "0 < len l ==> ith (l,0) = head l"
by (cases l) auto
subsubsection{*insertion*}
fun ins :: "msg * nat * msg => msg" where
"ins (cons x l, Suc i, y) = cons x (ins (l,i,y))" |
"ins (l, 0, y) = cons y l"
lemma ins_head [simp]: "ins (l,0,y) = cons y l"
by (cases l) auto
subsubsection{*truncation*}
fun trunc :: "msg * nat => msg" where
"trunc (l,0) = l" |
"trunc (cons x l, Suc i) = trunc (l,i)"
lemma trunc_zero [simp]: "trunc (l,0) = l"
by (cases l) auto
subsection{*Agent Lists*}
subsubsection{*set of well-formed agent-list messages*}
abbreviation
nil :: msg where
"nil == Number 0"
inductive_set agl :: "msg set"
where
Nil[intro]: "nil:agl"
| Cons[intro]: "[| A:agent; I:agl |] ==> cons (Agent A) I :agl"
subsubsection{*basic facts about agent lists*}
lemma del_in_agl [intro]: "I:agl ==> del (a,I):agl"
by (erule agl.induct, auto)
lemma app_in_agl [intro]: "[| I:agl; J:agl |] ==> app (I,J):agl"
by (erule agl.induct, auto)
lemma no_Key_in_agl: "I:agl ==> Key K ~:parts {I}"
by (erule agl.induct, auto)
lemma no_Nonce_in_agl: "I:agl ==> Nonce n ~:parts {I}"
by (erule agl.induct, auto)
lemma no_Key_in_appdel: "[| I:agl; J:agl |] ==>
Key K ~:parts {app (J, del (Agent B, I))}"
by (rule no_Key_in_agl, auto)
lemma no_Nonce_in_appdel: "[| I:agl; J:agl |] ==>
Nonce n ~:parts {app (J, del (Agent B, I))}"
by (rule no_Nonce_in_agl, auto)
lemma no_Crypt_in_agl: "I:agl ==> Crypt K X ~:parts {I}"
by (erule agl.induct, auto)
lemma no_Crypt_in_appdel: "[| I:agl; J:agl |] ==>
Crypt K X ~:parts {app (J, del (Agent B,I))}"
by (rule no_Crypt_in_agl, auto)
end
|
{"author": "Josh-Tilles", "repo": "isabelle", "sha": "990accf749b8a6e037d25012258ecae20d59ca62", "save_path": "github-repos/isabelle/Josh-Tilles-isabelle", "path": "github-repos/isabelle/Josh-Tilles-isabelle/isabelle-990accf749b8a6e037d25012258ecae20d59ca62/src/HOL/Auth/Guard/List_Msg.thy"}
|
import glob
import os
import random
import cv2
import numpy as np
from numpy.core.fromnumeric import sort
import torch
from torch.utils.data import Dataset, DataLoader
from utils import load_dicom
class RsnaDataset:
"""
paths: Subject IDs from the dataset
targets: MGMT_values for the respective subjects
channels: The idea is to combine 4 different MRI structures as 4 channels -- FLAIR, T1w, T1wCE, and T2w
"""
def __init__(self, paths, targets, args, transform=None) -> None:
self.paths = paths
self.targets = targets
self.args = args
self.transform = transform
def __len__(self) -> int:
return len(self.paths)
def __getitem__(self, idx):
sub_id = self.paths[idx]
patient_path = os.path.join(
self.args.data_path, self.args.data_dir, str(sub_id).zfill(5)
)
channels = []
for data_type in ("FLAIR", "T1w", "T1wCE", "T2w"):
data_path = os.path.join(patient_path, data_type)
channels.append(self.read_images(data_path))
X = torch.tensor(channels).float()
return {
"X": X.permute(1, 0, 2, 3),
"y": torch.tensor(self.targets[idx], dtype=torch.int),
}
def add_img_paths(self, path):
"""
In case when the img_directory does not contain sufficient images required to complete the
desired sequence length, this function repeats the same earlier images by adding them at the
end to meet the requirement
"""
img_paths = sorted([os.path.join(path, dir) for dir in os.listdir(path)])
new_img_paths = list(img_paths)
while len(new_img_paths) <= self.args.sequence_length:
remaining = self.args.sequence_length - len(new_img_paths)
new_img_paths += img_paths[: min(remaining, len(img_paths))]
return new_img_paths
def read_images(self, path):
if len(os.listdir(path)) < self.args.sequence_length:
# In this case, we will need to restart adding from the first image to complete the sequence
img_paths = self.add_img_paths(path)
else:
img_paths = random.sample(
sorted([os.path.join(path, dir) for dir in os.listdir(path)]),
self.args.sequence_length,
)
images = [
cv2.resize(load_dicom(img_path), self.args.img_shape) / 255
for img_path in img_paths
]
return images
# def __getitem__(self, idx):
# sub_id = self.paths[idx]
# patient_path = os.path.join(
# self.args.data_path, self.args.data_dir, str(sub_id).zfill(5)
# )
# channels = []
# for type in ("FLAIR", "T1w", "T1wCE"): # , "T2w"
# # glob.glob is equivalent to os.listdir() function
# dcm_paths = sorted(
# glob.glob(os.path.join(patient_path, type, "*")),
# key=lambda x: int(x[:-4].split("-")[-1]),
# )
# x = len(dcm_paths)
# if x < 10:
# r = range(x)
# else:
# d = x // 10
# r = range(d, x - d, d)
# channel = []
# for i in r:
# channel.append(cv2.resize(load_dicom(dcm_paths[i]), (256, 256)) / 255)
# # channel.append(load_dicom(dcm_paths[i]))
# channel = np.mean(channel, axis=0)
# channels.append(channel)
# X = torch.tensor(channels).float()
# if self.transform:
# X = self.transform(X)
# y = torch.tensor(self.targets[idx], dtype=torch.float)
# return {"X": X, "y": y}
|
{"hexsha": "4f6c4b900c8944559b0c056fabf08728b67cb522", "size": 3722, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/dataset.py", "max_stars_repo_name": "asheeshcric/kaggle-rsna-miccai", "max_stars_repo_head_hexsha": "8afd184572029e60618595ae154a92d13a1dae6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/dataset.py", "max_issues_repo_name": "asheeshcric/kaggle-rsna-miccai", "max_issues_repo_head_hexsha": "8afd184572029e60618595ae154a92d13a1dae6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/dataset.py", "max_forks_repo_name": "asheeshcric/kaggle-rsna-miccai", "max_forks_repo_head_hexsha": "8afd184572029e60618595ae154a92d13a1dae6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.649122807, "max_line_length": 107, "alphanum_fraction": 0.5679742074, "include": true, "reason": "import numpy,from numpy", "num_tokens": 910}
|
"""
Tests of Tax-Calculator utility functions.
"""
# CODING-STYLE CHECKS:
# pycodestyle test_utils.py
# pylint --disable=locally-disabled test_utils.py
#
# pylint: disable=missing-docstring
import os
import math
import random
import numpy as np
import pandas as pd
import pytest
# pylint: disable=import-error
from taxcalc import Policy, Records, Calculator
from taxcalc.utils import (DIST_VARIABLES,
DIST_TABLE_COLUMNS, DIST_TABLE_LABELS,
DIFF_VARIABLES,
DIFF_TABLE_COLUMNS, DIFF_TABLE_LABELS,
SOI_AGI_BINS,
create_difference_table,
weighted_sum, weighted_mean,
wage_weighted, agi_weighted,
expanded_income_weighted,
add_income_table_row_variable,
add_quantile_table_row_variable,
mtr_graph_data, atr_graph_data,
xtr_graph_plot, write_graph_file,
read_egg_csv, read_egg_json, delete_file,
bootstrap_se_ci,
certainty_equivalent,
ce_aftertax_expanded_income)
DATA = [[1.0, 2, 'a'],
[-1.0, 4, 'a'],
[3.0, 6, 'a'],
[2.0, 4, 'b'],
[3.0, 6, 'b']]
WEIGHT_DATA = [[1.0, 2.0, 10.0],
[2.0, 4.0, 20.0],
[3.0, 6.0, 30.0]]
DATA_FLOAT = [[1.0, 2, 'a'],
[-1.0, 4, 'a'],
[0.0000000001, 3, 'a'],
[-0.0000000001, 1, 'a'],
[3.0, 6, 'a'],
[2.0, 4, 'b'],
[0.0000000001, 3, 'b'],
[-0.0000000001, 1, 'b'],
[3.0, 6, 'b']]
def test_validity_of_name_lists():
assert len(DIST_TABLE_COLUMNS) == len(DIST_TABLE_LABELS)
records_varinfo = Records(data=None)
assert set(DIST_VARIABLES).issubset(records_varinfo.CALCULATED_VARS |
{'s006', 'XTOT'})
extra_vars_set = set(['count',
'count_StandardDed',
'count_ItemDed',
'count_AMT'])
assert (set(DIST_TABLE_COLUMNS) - set(DIST_VARIABLES)) == extra_vars_set
def test_create_tables(cps_subsample):
# pylint: disable=too-many-statements,too-many-branches
# create a current-law Policy object and Calculator object calc1
rec = Records.cps_constructor(data=cps_subsample)
pol = Policy()
calc1 = Calculator(policy=pol, records=rec)
calc1.calc_all()
# create a policy-reform Policy object and Calculator object calc2
reform = {'II_rt1': {2013: 0.15}}
pol.implement_reform(reform)
calc2 = Calculator(policy=pol, records=rec)
calc2.calc_all()
test_failure = False
# test creating various difference tables
diff = create_difference_table(calc1.dataframe(DIFF_VARIABLES),
calc2.dataframe(DIFF_VARIABLES),
'standard_income_bins', 'combined')
assert isinstance(diff, pd.DataFrame)
tabcol = 'pc_aftertaxinc'
expected = [np.nan,
np.nan,
-0.2,
-0.8,
-0.8,
-0.5,
-0.8,
-0.7,
-0.7,
-0.7,
-0.3,
-0.1,
-0.1,
-0.6]
if not np.allclose(diff[tabcol].values, expected,
atol=0.1, rtol=0.0, equal_nan=True):
test_failure = True
print('diff xbin', tabcol)
for val in diff[tabcol].values:
print('{:.1f},'.format(val))
diff = create_difference_table(calc1.dataframe(DIFF_VARIABLES),
calc2.dataframe(DIFF_VARIABLES),
'weighted_deciles', 'combined')
assert isinstance(diff, pd.DataFrame)
tabcol = 'tot_change'
expected = [0.0,
0.0,
0.3,
2.5,
2.8,
2.5,
4.4,
5.2,
6.3,
8.1,
10.6,
10.3,
53.0,
6.2,
3.5,
0.6]
if not np.allclose(diff[tabcol].values, expected,
atol=0.1, rtol=0.0):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.1f},'.format(val))
tabcol = 'share_of_change'
expected = [0.0,
0.0,
0.5,
4.8,
5.3,
4.8,
8.3,
9.9,
11.8,
15.2,
19.9,
19.4,
100.0,
11.7,
6.6,
1.1]
if not np.allclose(diff[tabcol].values, expected,
atol=0.1, rtol=0.0):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.1f},'.format(val))
tabcol = 'pc_aftertaxinc'
expected = [np.nan,
np.nan,
-0.3,
-1.0,
-0.7,
-0.5,
-0.7,
-0.7,
-0.7,
-0.7,
-0.7,
-0.3,
-0.6,
-0.6,
-0.3,
-0.1]
if not np.allclose(diff[tabcol].values, expected,
atol=0.1, rtol=0.0, equal_nan=True):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.1f},'.format(val))
# test creating various distribution tables
dist, _ = calc2.distribution_tables(None, 'weighted_deciles')
assert isinstance(dist, pd.DataFrame)
tabcol = 'iitax'
expected = [0.0,
0.0,
-1.8,
2.0,
5.0,
6.7,
18.0,
26.1,
35.2,
82.7,
148.8,
734.1,
1056.9,
152.4,
234.7,
347.1]
if not np.allclose(dist[tabcol].values, expected,
atol=0.1, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.1f},'.format(val))
tabcol = 'count_ItemDed'
expected = [0.0,
0.0,
0.4,
1.5,
2.5,
2.6,
4.5,
5.3,
6.3,
7.7,
11.0,
13.1,
54.9,
6.2,
5.5,
1.4]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.1, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.1f},'.format(val))
tabcol = 'expanded_income'
expected = [0.0,
0.0,
105.9,
294.0,
417.1,
528.4,
658.9,
818.2,
1037.8,
1324.7,
1788.1,
4047.6,
11020.7,
1286.6,
1511.9,
1249.2]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.1, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.1f},'.format(val))
tabcol = 'aftertax_income'
expected = [0.0,
0.0,
97.7,
261.9,
378.1,
485.6,
586.2,
723.2,
919.6,
1130.7,
1474.0,
2994.5,
9051.5,
1002.5,
1147.6,
844.4]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.1, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.1f},'.format(val))
dist, _ = calc2.distribution_tables(None, 'standard_income_bins')
assert isinstance(dist, pd.DataFrame)
tabcol = 'iitax'
expected = [0.0,
0.0,
-0.5,
-0.1,
3.4,
9.3,
18.5,
54.6,
89.3,
298.0,
290.2,
99.5,
194.9,
1056.9]
if not np.allclose(dist[tabcol], expected,
atol=0.1, rtol=0.0):
test_failure = True
print('dist xbin', tabcol)
for val in dist[tabcol].values:
print('{:.1f},'.format(val))
tabcol = 'count_ItemDed'
expected = [0.0,
0.0,
0.1,
1.1,
2.4,
3.5,
4.8,
10.1,
8.8,
17.0,
6.2,
0.5,
0.3,
54.9]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.1, rtol=0.0):
test_failure = True
print('dist xbin', tabcol)
for val in dist[tabcol].values:
print('{:.1f},'.format(val))
if test_failure:
assert 1 == 2
def test_diff_count_precision():
"""
Estimate bootstrap standard error and confidence interval for count
statistics ('tax_cut' and 'tax_inc') in difference table generated
using puf.csv input data taking no account of tbi privacy fuzzing and
assuming all filing units in each bin have the same weight. These
assumptions imply that the estimates produced here are likely to
over-estimate the precision of the count statistics.
Background information on unweighted number of filing units by bin:
DECILE BINS:
0 16268
1 14897
2 13620
3 15760
4 16426
5 18070
6 18348
7 19352
8 21051
9 61733 <--- largest unweighted bin count
A 215525
STANDARD BINS:
0 7081 <--- negative income bin was dropped in TaxBrain display
1 19355
2 22722
3 20098
4 17088
5 14515
6 24760
7 15875
8 25225
9 15123
10 10570 <--- smallest unweighted bin count
11 23113 <--- second largest unweighted WEBAPP bin count
A 215525
Background information on Trump2017.json reform used in TaxBrain run 16649:
STANDARD bin 10 ($500-1000 thousand) has weighted count of 1179 thousand;
weighted count of units with tax increase is 32 thousand.
So, the mean weight for all units in STANDARD bin 10 is 111.5421 and the
unweighted number with a tax increase is 287 assuming all units in that
bin have the same weight. (Note that 287 * 111.5421 is about 32,012.58,
which rounds to the 32 thousand shown in the TaxBrain difference table.)
STANDARD bin 11 ($1000+ thousand) has weighted count of 636 thousand;
weighted count of units with tax increase is 27 thousand.
So, the mean weight for all units in STANDARD bin 11 is about 27.517 and
the unweighted number with a tax increase is 981 assuming all units in
that bin have the same weight. (Note that 981 * 27.517 is about 26,994.18,
which rounds to the 27 thousand shown in the TaxBrain difference table.)
"""
dump = False # setting to True implies results printed and test fails
seed = 123456789
bs_samples = 1000
alpha = 0.025 # implies 95% confidence interval
# compute stderr and confidence interval for STANDARD bin 10 increase count
data_list = [111.5421] * 287 + [0.0] * (10570 - 287)
assert len(data_list) == 10570
data = np.array(data_list)
assert (data > 0).sum() == 287
data_estimate = np.sum(data) * 1e-3
assert abs((data_estimate / 32) - 1) < 0.0005
bsd = bootstrap_se_ci(data, seed, bs_samples, np.sum, alpha)
stderr = bsd['se'] * 1e-3
cilo = bsd['cilo'] * 1e-3
cihi = bsd['cihi'] * 1e-3
if dump:
res = '{}EST={:.1f} B={} alpha={:.3f} se={:.2f} ci=[ {:.2f} , {:.2f} ]'
print(
res.format('STANDARD-BIN10: ',
data_estimate, bs_samples, alpha, stderr, cilo, cihi)
)
assert abs((stderr / 1.90) - 1) < 0.0008
# NOTE: a se of 1.90 thousand implies that when comparing the difference
# in the weighted number of filing units in STANDARD bin 10 with a
# tax increase, the difference statistic has a bigger se (because
# the variance of the difference is the sum of the variances of the
# two point estimates). So, in STANDARD bin 10 if the point
# estimates both had se = 1.90, then the difference in the point
# estimates has has a se = 2.687. This means that the difference
# would have to be over 5 thousand in order for there to be high
# confidence that the difference was different from zero in a
# statistically significant manner.
# Or put a different way, a difference of 1 thousand cannot be
# accurately detected while a difference of 10 thousand can be
# accurately detected.
assert abs((cilo / 28.33) - 1) < 0.0012
assert abs((cihi / 35.81) - 1) < 0.0012
# compute stderr and confidence interval for STANDARD bin 11 increase count
data_list = [27.517] * 981 + [0.0] * (23113 - 981)
assert len(data_list) == 23113
data = np.array(data_list)
assert (data > 0).sum() == 981
data_estimate = np.sum(data) * 1e-3
assert abs((data_estimate / 27) - 1) < 0.0005
bsd = bootstrap_se_ci(data, seed, bs_samples, np.sum, alpha)
stderr = bsd['se'] * 1e-3
cilo = bsd['cilo'] * 1e-3
cihi = bsd['cihi'] * 1e-3
if dump:
res = '{}EST={:.1f} B={} alpha={:.3f} se={:.2f} ci=[ {:.2f} , {:.2f} ]'
print(
res.format('STANDARD-BIN11: ',
data_estimate, bs_samples, alpha, stderr, cilo, cihi)
)
assert abs((stderr / 0.85) - 1) < 0.0040
# NOTE: a se of 0.85 thousand implies that when comparing the difference
# in the weighted number of filing units in STANDARD bin 11 with a
# tax increase, the difference statistic has a bigger se (because
# the variance of the difference is the sum of the variances of the
# two point estimates). So, in STANDARD bin 11 if point estimates
# both had se = 0.85, then the difference in the point estimates has
# has a se = 1.20. This means that the difference would have to be
# over 2.5 thousand in order for there to be high confidence that the
# difference was different from zero in a statistically significant
# manner.
# Or put a different way, a difference of 1 thousand cannot be
# accurately detected while a difference of 10 thousand can be
# accurately detected.
assert abs((cilo / 25.37) - 1) < 0.0012
assert abs((cihi / 28.65) - 1) < 0.0012
# fail if doing dump
assert not dump
def test_weighted_mean():
dfx = pd.DataFrame(data=DATA, columns=['tax_diff', 's006', 'label'])
grouped = dfx.groupby('label')
diffs = grouped.apply(weighted_mean, 'tax_diff')
exp = pd.Series(data=[16.0 / 12.0, 26.0 / 10.0], index=['a', 'b'])
exp.index.name = 'label'
pd.util.testing.assert_series_equal(exp, diffs)
def test_wage_weighted():
dfx = pd.DataFrame(data=WEIGHT_DATA, columns=['var', 's006', 'e00200'])
wvar = wage_weighted(dfx, 'var')
assert round(wvar, 4) == 2.5714
def test_agi_weighted():
dfx = pd.DataFrame(data=WEIGHT_DATA, columns=['var', 's006', 'c00100'])
wvar = agi_weighted(dfx, 'var')
assert round(wvar, 4) == 2.5714
def test_expanded_income_weighted():
dfx = pd.DataFrame(data=WEIGHT_DATA,
columns=['var', 's006', 'expanded_income'])
wvar = expanded_income_weighted(dfx, 'var')
assert round(wvar, 4) == 2.5714
def test_weighted_sum():
dfx = pd.DataFrame(data=DATA, columns=['tax_diff', 's006', 'label'])
grouped = dfx.groupby('label')
diffs = grouped.apply(weighted_sum, 'tax_diff')
exp = pd.Series(data=[16.0, 26.0], index=['a', 'b'])
exp.index.name = 'label'
pd.util.testing.assert_series_equal(exp, diffs)
EPSILON = 1e-5
def test_add_income_trow_var():
dta = np.arange(1, 1e6, 5000)
vdf = pd.DataFrame(data=dta, columns=['expanded_income'])
vdf = add_income_table_row_variable(vdf, 'expanded_income', SOI_AGI_BINS)
gdf = vdf.groupby('table_row')
idx = 1
for name, _ in gdf:
assert name.closed == 'left'
assert abs(name.right - SOI_AGI_BINS[idx]) < EPSILON
idx += 1
def test_add_quantile_trow_var():
dfx = pd.DataFrame(data=DATA, columns=['expanded_income', 's006', 'label'])
dfb = add_quantile_table_row_variable(dfx, 'expanded_income',
100, decile_details=False,
weight_by_income_measure=False)
bin_labels = dfb['table_row'].unique()
default_labels = set(range(1, 101))
for lab in bin_labels:
assert lab in default_labels
dfb = add_quantile_table_row_variable(dfx, 'expanded_income',
100, decile_details=False)
assert 'table_row' in dfb
with pytest.raises(ValueError):
dfb = add_quantile_table_row_variable(dfx, 'expanded_income',
100, decile_details=True)
def test_dist_table_sum_row(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample)
calc = Calculator(policy=Policy(), records=rec)
calc.calc_all()
# create three distribution tables and compare the ALL row contents
tb1, _ = calc.distribution_tables(None, 'standard_income_bins')
tb2, _ = calc.distribution_tables(None, 'soi_agi_bins')
tb3, _ = calc.distribution_tables(None, 'weighted_deciles')
tb4, _ = calc.distribution_tables(None, 'weighted_deciles',
pop_quantiles=True)
assert np.allclose(tb1.loc['ALL'], tb2.loc['ALL'])
assert np.allclose(tb1.loc['ALL'], tb3.loc['ALL'])
# make sure population count is larger than filing-unit count
assert tb4.at['ALL', 'count'] > tb1.at['ALL', 'count']
# make sure population table has same ALL row values as filing-unit table
for col in ['count', 'count_StandardDed', 'count_ItemDed', 'count_AMT']:
tb4.at['ALL', col] = tb1.at['ALL', col]
assert np.allclose(tb1.loc['ALL'], tb4.loc['ALL'])
# make sure population table has same ALL tax liabilities as diagnostic tbl
dgt = calc.diagnostic_table(1)
assert np.allclose([tb4.at['ALL', 'iitax'],
tb4.at['ALL', 'payrolltax']],
[dgt.at['Ind Income Tax ($b)', calc.current_year],
dgt.at['Payroll Taxes ($b)', calc.current_year]])
def test_diff_table_sum_row(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample)
# create a current-law Policy object and Calculator calc1
pol = Policy()
calc1 = Calculator(policy=pol, records=rec)
calc1.calc_all()
# create a policy-reform Policy object and Calculator calc2
reform = {'II_rt4': {2013: 0.56}}
pol.implement_reform(reform)
calc2 = Calculator(policy=pol, records=rec)
calc2.calc_all()
# create three difference tables and compare their content
dv1 = calc1.dataframe(DIFF_VARIABLES)
dv2 = calc2.dataframe(DIFF_VARIABLES)
dt1 = create_difference_table(dv1, dv2, 'standard_income_bins', 'iitax')
dt2 = create_difference_table(dv1, dv2, 'soi_agi_bins', 'iitax')
dt3 = create_difference_table(dv1, dv2, 'weighted_deciles', 'iitax',
pop_quantiles=False)
dt4 = create_difference_table(dv1, dv2, 'weighted_deciles', 'iitax',
pop_quantiles=True)
assert np.allclose(dt1.loc['ALL'], dt2.loc['ALL'])
assert np.allclose(dt1.loc['ALL'], dt3.loc['ALL'])
# make sure population count is larger than filing-unit count
assert dt4.at['ALL', 'count'] > dt1.at['ALL', 'count']
def test_mtr_graph_data(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample)
calc = Calculator(policy=Policy(), records=recs)
year = calc.current_year
with pytest.raises(ValueError):
mtr_graph_data(None, year, mars='bad',
income_measure='agi',
dollar_weighting=True)
with pytest.raises(ValueError):
mtr_graph_data(None, year, mars=0,
income_measure='expanded_income',
dollar_weighting=True)
with pytest.raises(ValueError):
mtr_graph_data(None, year, mars=list())
with pytest.raises(ValueError):
mtr_graph_data(None, year, mars='ALL', mtr_variable='e00200s')
with pytest.raises(ValueError):
mtr_graph_data(None, year, mtr_measure='badtax')
with pytest.raises(ValueError):
mtr_graph_data(None, year, income_measure='badincome')
mtr = 0.20 * np.ones_like(cps_subsample['e00200'])
vdf = calc.dataframe(['s006', 'MARS', 'e00200'])
vdf['mtr1'] = mtr
vdf['mtr2'] = mtr
vdf = vdf[vdf['MARS'] == 1]
gdata = mtr_graph_data(vdf, year, mars=1,
mtr_wrt_full_compen=True,
income_measure='wages',
dollar_weighting=True)
assert isinstance(gdata, dict)
def test_atr_graph_data(cps_subsample):
pol = Policy()
rec = Records.cps_constructor(data=cps_subsample)
calc = Calculator(policy=pol, records=rec)
year = calc.current_year
with pytest.raises(ValueError):
atr_graph_data(None, year, mars='bad')
with pytest.raises(ValueError):
atr_graph_data(None, year, mars=0)
with pytest.raises(ValueError):
atr_graph_data(None, year, mars=list())
with pytest.raises(ValueError):
atr_graph_data(None, year, atr_measure='badtax')
calc.calc_all()
vdf = calc.dataframe(['s006', 'MARS', 'expanded_income'])
tax = 0.20 * np.ones_like(vdf['expanded_income'])
vdf['tax1'] = tax
vdf['tax2'] = tax
gdata = atr_graph_data(vdf, year, mars=1, atr_measure='combined')
gdata = atr_graph_data(vdf, year, atr_measure='itax')
gdata = atr_graph_data(vdf, year, atr_measure='ptax')
assert isinstance(gdata, dict)
def test_xtr_graph_plot(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample)
calc = Calculator(policy=Policy(), records=recs)
mtr = 0.20 * np.ones_like(cps_subsample['e00200'])
vdf = calc.dataframe(['s006', 'MARS', 'c00100'])
vdf['mtr1'] = mtr
vdf['mtr2'] = mtr
gdata = mtr_graph_data(vdf, calc.current_year, mtr_measure='ptax',
income_measure='agi',
dollar_weighting=False)
gplot = xtr_graph_plot(gdata)
assert gplot
vdf = calc.dataframe(['s006', 'expanded_income'])
vdf['mtr1'] = mtr
vdf['mtr2'] = mtr
gdata = mtr_graph_data(vdf, calc.current_year, mtr_measure='itax',
alt_e00200p_text='Taxpayer Earnings',
income_measure='expanded_income',
dollar_weighting=False)
assert isinstance(gdata, dict)
def temporary_filename(suffix=''):
# Return string containing the temporary filename.
return 'tmp{}{}'.format(random.randint(10000000, 99999999), suffix)
def test_write_graph_file(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample)
calc = Calculator(policy=Policy(), records=recs)
mtr = 0.20 * np.ones_like(cps_subsample['e00200'])
vdf = calc.dataframe(['s006', 'e00200', 'c00100'])
vdf['mtr1'] = mtr
vdf['mtr2'] = mtr
gdata = mtr_graph_data(vdf, calc.current_year, mtr_measure='ptax',
alt_e00200p_text='Taxpayer Earnings',
income_measure='agi',
dollar_weighting=False)
gplot = xtr_graph_plot(gdata)
assert gplot
htmlfname = temporary_filename(suffix='.html')
try:
write_graph_file(gplot, htmlfname, 'title')
except Exception: # pylint: disable=broad-except
if os.path.isfile(htmlfname):
try:
os.remove(htmlfname)
except OSError:
pass # sometimes we can't remove a generated temporary file
assert 'write_graph_file()_ok' == 'no'
# if try was successful, try to remove the file
if os.path.isfile(htmlfname):
try:
os.remove(htmlfname)
except OSError:
pass # sometimes we can't remove a generated temporary file
def test_ce_aftertax_income(cps_subsample):
# test certainty_equivalent() function with con>cmin
con = 5000
cmin = 1000
assert con == round(certainty_equivalent(con, 0, cmin), 6)
assert con > round(certainty_equivalent((math.log(con) - 0.1), 1, cmin), 6)
# test certainty_equivalent() function with con<cmin
con = 500
cmin = 1000
assert con == round(certainty_equivalent(con, 0, cmin), 6)
# test with require_no_agg_tax_change equal to False
rec = Records.cps_constructor(data=cps_subsample)
cyr = 2020
# specify calc1 and calc_all() for cyr
pol = Policy()
calc1 = Calculator(policy=pol, records=rec)
calc1.advance_to_year(cyr)
calc1.calc_all()
# specify calc2 and calc_all() for cyr
reform = {'II_em': {2019: 1000}}
pol.implement_reform(reform)
calc2 = Calculator(policy=pol, records=rec)
calc2.advance_to_year(cyr)
calc2.calc_all()
df1 = calc1.dataframe(['s006', 'combined', 'expanded_income'])
df2 = calc2.dataframe(['s006', 'combined', 'expanded_income'])
cedict = ce_aftertax_expanded_income(df1, df2,
require_no_agg_tax_change=False)
assert isinstance(cedict, dict)
np.allclose(cedict['ceeu1'], [55641, 27167, 5726, 2229, 1565],
atol=0.5, rtol=0.0)
np.allclose(cedict['ceeu2'], [54629, 26698, 5710, 2229, 1565],
atol=0.5, rtol=0.0)
# test with require_no_agg_tax_change equal to True
with pytest.raises(ValueError):
ce_aftertax_expanded_income(df1, df2, require_no_agg_tax_change=True)
# test with require_no_agg_tax_change equal to False and custom_params
params = {'crra_list': [0, 2], 'cmin_value': 2000}
with pytest.raises(ValueError):
ce_aftertax_expanded_income(df1, df2, require_no_agg_tax_change=True,
custom_params=params)
def test_read_egg_csv():
with pytest.raises(ValueError):
read_egg_csv('bad_filename')
def test_read_egg_json():
with pytest.raises(ValueError):
read_egg_json('bad_filename')
def test_create_delete_temp_file():
# test temporary_filename() and delete_file() functions
fname = temporary_filename()
with open(fname, 'w') as tmpfile:
tmpfile.write('any content will do')
assert os.path.isfile(fname) is True
delete_file(fname)
assert os.path.isfile(fname) is False
def test_bootstrap_se_ci():
# Use treated mouse data from Table 2.1 and
# results from Table 2.2 and Table 13.1 in
# Bradley Efron and Robert Tibshirani,
# "An Introduction to the Bootstrap"
# (Chapman & Hall, 1993).
data = np.array([94, 197, 16, 38, 99, 141, 23], dtype=np.float64)
assert abs(np.mean(data) - 86.86) < 0.005 # this is just rounding error
bsd = bootstrap_se_ci(data, 123456789, 1000, np.mean, alpha=0.025)
# following comparisons are less precise because of r.n. stream differences
assert abs(bsd['se'] / 23.02 - 1) < 0.02
assert abs(bsd['cilo'] / 45.9 - 1) < 0.02
assert abs(bsd['cihi'] / 135.4 - 1) < 0.03
def test_table_columns_labels():
# check that length of two lists are the same
assert len(DIST_TABLE_COLUMNS) == len(DIST_TABLE_LABELS)
assert len(DIFF_TABLE_COLUMNS) == len(DIFF_TABLE_LABELS)
|
{"hexsha": "f4227bf8e35e45b23943bfd7f0c7f1976154f5d2", "size": 28679, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tax-Calculator-2.9.0/taxcalc/tests/test_utils.py", "max_stars_repo_name": "grantseiter/Biden-Tax-Proposals", "max_stars_repo_head_hexsha": "c215ff845264f3fce9281c7fbb343ed10758a4b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tax-Calculator-2.9.0/taxcalc/tests/test_utils.py", "max_issues_repo_name": "grantseiter/Biden-Tax-Proposals", "max_issues_repo_head_hexsha": "c215ff845264f3fce9281c7fbb343ed10758a4b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tax-Calculator-2.9.0/taxcalc/tests/test_utils.py", "max_forks_repo_name": "grantseiter/Biden-Tax-Proposals", "max_forks_repo_head_hexsha": "c215ff845264f3fce9281c7fbb343ed10758a4b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4409148666, "max_line_length": 79, "alphanum_fraction": 0.5620140172, "include": true, "reason": "import numpy", "num_tokens": 7807}
|
""" KL-Divergence estimation through K-Nearest Neighbours
This module provides four implementations of the K-NN divergence estimator of
Qing Wang, Sanjeev R. Kulkarni, and Sergio Verdú.
"Divergence estimation for multidimensional densities via
k-nearest-neighbor distances." Information Theory, IEEE Transactions on
55.5 (2009): 2392-2405.
The implementations are through:
numpy (naive_estimator)
scipy (scipy_estimator)
scikit-learn (skl_estimator)
No guarantees are made w.r.t the efficiency of these implementations.
"""
import warnings
import numpy as np
from scipy.spatial import KDTree
from sklearn.neighbors import NearestNeighbors
def knn_distance(point, sample, k):
""" Euclidean distance from `point` to it's `k`-Nearest
Neighbour in `sample` """
norms = np.linalg.norm(sample-point, axis=1)
return np.sort(norms)[k]
def verify_sample_shapes(s1, s2, k):
# Expects [N, D]
assert(len(s1.shape) == len(s2.shape) == 2)
# Check dimensionality of sample is identical
assert(s1.shape[1] == s2.shape[1])
def naive_estimator(s1, s2, k=1):
""" KL-Divergence estimator using brute-force (numpy) k-NN
s1: (N_1,D) Sample drawn from distribution P
s2: (N_2,D) Sample drawn from distribution Q
k: Number of neighbours considered (default 1)
return: estimated D(P|Q)
"""
verify_sample_shapes(s1, s2, k)
n, m = len(s1), len(s2)
D = np.log(m / (n - 1))
d = float(s1.shape[1])
for p1 in s1:
nu = knn_distance(p1, s2, k-1) # -1 because 'p1' is not in 's2'
rho = knn_distance(p1, s1, k)
D += (d/n)*np.log(nu/rho)
return D
def scipy_estimator(s1, s2, k=1):
""" KL-Divergence estimator using scipy's KDTree
s1: (N_1,D) Sample drawn from distribution P
s2: (N_2,D) Sample drawn from distribution Q
k: Number of neighbours considered (default 1)
return: estimated D(P|Q)
"""
verify_sample_shapes(s1, s2, k)
n, m = len(s1), len(s2)
d = float(s1.shape[1])
D = np.log(m / (n - 1))
nu_d, nu_i = KDTree(s2).query(s1, k)
rho_d, rhio_i = KDTree(s1).query(s1, k+1)
# KTree.query returns different shape in k==1 vs k > 1
if k > 1:
D += (d/n)*np.sum(np.log(nu_d[::, -1]/rho_d[::, -1]))
else:
D += (d/n)*np.sum(np.log(nu_d/rho_d[::, -1]))
return D
def skl_estimator(s1, s2, k=1):
""" KL-Divergence estimator using scikit-learn's NearestNeighbours
s1: (N_1,D) Sample drawn from distribution P
s2: (N_2,D) Sample drawn from distribution Q
k: Number of neighbours considered (default 1)
return: estimated D(P|Q)
"""
verify_sample_shapes(s1, s2, k)
n, m = len(s1), len(s2)
d = float(s1.shape[1])
D = np.log(m / (n - 1))
s1_neighbourhood = NearestNeighbors(k+1, 10).fit(s1)
s2_neighbourhood = NearestNeighbors(k, 10).fit(s2)
for p1 in s1:
s1_distances, indices = s1_neighbourhood.kneighbors([p1], k+1)
s2_distances, indices = s2_neighbourhood.kneighbors([p1], k)
rho = s1_distances[0][-1]
nu = s2_distances[0][-1]
D += (d/n)*np.log(nu/rho)
return D
def skl_estimator_efficient(s1, s2, k=1):
""" KL-Divergence estimator using scikit-learn's NearestNeighbours
s1: (N_1,D) Sample drawn from distribution P
s2: (N_2,D) Sample drawn from distribution Q
k: Number of neighbours considered (default 1)
return: estimated D(P|Q)
"""
verify_sample_shapes(s1, s2, k)
n, m = len(s1), len(s2)
d = float(s1.shape[1])
radius = 10 # this is useless
s1_neighbourhood = NearestNeighbors(n_neighbors=k + 1, radius=radius, algorithm='kd_tree').fit(s1)
s2_neighbourhood = NearestNeighbors(n_neighbors=k, radius=radius, algorithm='kd_tree').fit(s2)
s1_distances, indices = s1_neighbourhood.kneighbors(s1, k + 1)
s2_distances, indices = s2_neighbourhood.kneighbors(s1, k)
rho = s1_distances[:, -1]
nu = s2_distances[:, -1]
if np.any(rho == 0):
warnings.warn(
f"The distance between an element of the first dataset and its {k}-th NN in the same dataset "
f"is 0; this causes divergences in the code, and it is due to elements which are repeated "
f"{k + 1} times in the first dataset. Increasing the value of k usually solves this.",
RuntimeWarning)
D = np.sum(np.log(nu / rho))
return (d / n) * D + np.log(m / (n - 1)) # this second term should be enough for it to be valid for m \neq n
# List of all estimators
Estimators = [naive_estimator, scipy_estimator, skl_estimator]
|
{"hexsha": "f3136cae290fc3bec8634e6311ca3b24b12870d6", "size": 4798, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/knn_divergence.py", "max_stars_repo_name": "LoryPack/KL-divergence-estimators", "max_stars_repo_head_hexsha": "051415977d324d3d5d735a7a25be8099ad3781c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2018-09-04T09:17:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T19:25:44.000Z", "max_issues_repo_path": "src/knn_divergence.py", "max_issues_repo_name": "LoryPack/KL-divergence-estimators", "max_issues_repo_head_hexsha": "051415977d324d3d5d735a7a25be8099ad3781c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-06T15:58:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-06T15:58:22.000Z", "max_forks_repo_path": "src/knn_divergence.py", "max_forks_repo_name": "LoryPack/KL-divergence-estimators", "max_forks_repo_head_hexsha": "051415977d324d3d5d735a7a25be8099ad3781c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2018-07-07T10:28:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-28T09:55:38.000Z", "avg_line_length": 34.2714285714, "max_line_length": 117, "alphanum_fraction": 0.6215089621, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1417}
|
#!/usr/bin/env python
import argparse
import numpy as np
import tensorflow as tf
import os.path as osp
import models
import dataset
def display_results(image_paths, probs):
'''Displays the classification results given the class probability for each image'''
# Get a list of ImageNet class labels
with open('imagenet-classes.txt', 'rb') as infile:
class_labels = map(str.strip, infile.readlines())
# Pick the class with the highest confidence for each image
class_indices = np.argmax(probs, axis=1)
# Display the results
print('\n{:20} {:30} {}'.format('Image', 'Classified As', 'Confidence'))
print('-' * 70)
for img_idx, image_path in enumerate(image_paths):
img_name = osp.basename(image_path)
class_name = class_labels[class_indices[img_idx]]
confidence = round(probs[img_idx, class_indices[img_idx]] * 100, 2)
print('{:20} {:30} {} %'.format(img_name, class_name, confidence))
def classify(model_data_path, image_paths):
'''Classify the given images using GoogleNet.'''
# Get the data specifications for the GoogleNet model
spec = models.get_data_spec(model_class=models.GoogleNet)
# Create a placeholder for the input image
input_node = tf.placeholder(tf.float32,
shape=(None, spec.crop_size, spec.crop_size, spec.channels))
# Construct the network
net = models.GoogleNet({'data': input_node})
# Create an image producer (loads and processes images in parallel)
image_producer = dataset.ImageProducer(image_paths=image_paths, data_spec=spec)
with tf.Session() as sesh:
# Start the image processing workers
coordinator = tf.train.Coordinator()
threads = image_producer.start(session=sesh, coordinator=coordinator)
# Load the converted parameters
print('Loading the model')
net.load(model_data_path, sesh)
# Load the input image
print('Loading the images')
indices, input_images = image_producer.get(sesh)
# Perform a forward pass through the network to get the class probabilities
print('Classifying')
probs = sesh.run(net.get_output(), feed_dict={input_node: input_images})
display_results([image_paths[i] for i in indices], probs)
# Stop the worker threads
coordinator.request_stop()
coordinator.join(threads, stop_grace_period_secs=2)
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('model_path', help='Converted parameters for the GoogleNet model')
parser.add_argument('image_paths', nargs='+', help='One or more images to classify')
args = parser.parse_args()
# Classify the image
classify(args.model_path, args.image_paths)
if __name__ == '__main__':
main()
|
{"hexsha": "fa4b030300badbe29a5436afb3d4244930c987dd", "size": 2831, "ext": "py", "lang": "Python", "max_stars_repo_path": "caffe-tensorflow/examples/imagenet/classify.py", "max_stars_repo_name": "petercheng00/PSPNet-Keras-tensorflow", "max_stars_repo_head_hexsha": "d50583786a3e8782dd1b735d268e57392cd8c646", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3209, "max_stars_repo_stars_event_min_datetime": "2015-11-10T06:52:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T05:17:28.000Z", "max_issues_repo_path": "caffe-tensorflow/examples/imagenet/classify.py", "max_issues_repo_name": "petercheng00/PSPNet-Keras-tensorflow", "max_issues_repo_head_hexsha": "d50583786a3e8782dd1b735d268e57392cd8c646", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 174, "max_issues_repo_issues_event_min_datetime": "2015-11-10T21:32:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-09T15:51:30.000Z", "max_forks_repo_path": "caffe-tensorflow/examples/imagenet/classify.py", "max_forks_repo_name": "petercheng00/PSPNet-Keras-tensorflow", "max_forks_repo_head_hexsha": "d50583786a3e8782dd1b735d268e57392cd8c646", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1218, "max_forks_repo_forks_event_min_datetime": "2015-11-10T23:55:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-07T07:36:57.000Z", "avg_line_length": 35.835443038, "max_line_length": 92, "alphanum_fraction": 0.6884493112, "include": true, "reason": "import numpy", "num_tokens": 624}
|
import pyaudio
import os
import struct
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft
import time
from tkinter import TclError
# constants
CHUNK = 1024 * 2 # samples per frame
FORMAT = pyaudio.paInt16 # audio format (bytes per sample?)
CHANNELS = 1 # single channel for microphone
RATE = 44100 # samples per second
# create matplotlib figure and axes
fig, ax2 = plt.subplots(1, figsize=(15, 7))
# pyaudio class instance
p = pyaudio.PyAudio()
# stream object to get data from microphone
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK
)
# variable for plotting
# x = np.arange(0, 2 * CHUNK, 2) # samples (waveform)
xf = np.linspace(0, RATE, CHUNK) # frequencies (spectrum)
# create a line object with random data
# line, = ax1.plot(x, np.random.rand(CHUNK), '-', lw=2)
# create semilogx line for spectrum
line_fft, = ax2.semilogx(xf, np.random.rand(CHUNK), '-', lw=2)
# format spectrum axes
ax2.set_xlim(20, RATE / 2)
ax2.set_title('Analiza widmowa')
ax2.set_xlabel('Częstotliwość (Hz)')
ax2.set_ylabel('Amplituda')
fig.show()
print('stream started')
while True:
# binary data
data = stream.read(CHUNK)
# convert data to integers, make np array, then offset it by 127
data_int = struct.unpack(str(2*CHUNK) + 'B', data)
# data_int = np.frombuffer(data, dtype='h')
# data_np = np.array(data_int, dtype='h')/140 + 255
# create np array and offset by 128
data_np = np.array(data_int, dtype='b')[::2] + 128
#line.set_ydata(data_np)
# compute FFT and update line
yf = fft(data_int)
ydata=np.abs(yf[0:CHUNK]) / (128 * CHUNK)
line_fft.set_ydata(ydata)
for i,j in zip(xf,ydata):
if i>600 and i<1300 and j >0.3:
print ('karetka')
# update figure canvas
try:
fig.canvas.draw()
fig.canvas.flush_events()
except TclError:
# calculate average frame rate
print('stream stopped')
break
|
{"hexsha": "9ab1ef5a0ba6461666f8b4f554f11b5f68f2ad7e", "size": 2239, "ext": "py", "lang": "Python", "max_stars_repo_path": "audio_detectionPlot.py", "max_stars_repo_name": "kubs0ne/Emergency-Vehicle-Detector", "max_stars_repo_head_hexsha": "6dcdc574614ed268ed02ceef971b6abbcb71d59d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "audio_detectionPlot.py", "max_issues_repo_name": "kubs0ne/Emergency-Vehicle-Detector", "max_issues_repo_head_hexsha": "6dcdc574614ed268ed02ceef971b6abbcb71d59d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "audio_detectionPlot.py", "max_forks_repo_name": "kubs0ne/Emergency-Vehicle-Detector", "max_forks_repo_head_hexsha": "6dcdc574614ed268ed02ceef971b6abbcb71d59d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4431818182, "max_line_length": 69, "alphanum_fraction": 0.6096471639, "include": true, "reason": "import numpy,from scipy", "num_tokens": 604}
|
"""
File: deep-fus/src/models.py
Author: Tommaso Di Ianni (todiian@stanford.edu)
Copyright 2021 Tommaso Di Ianni
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import tensorflow as tf
# from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Input, Conv2D, Conv3D, Activation, MaxPooling2D, Dropout, Concatenate, Conv2DTranspose, Reshape, Add
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.initializers import he_uniform
def UNet5(input_shape = (96, 96, 125), do_rate=0.5):
"""
Implementation of a 5-layer U-Net model for the reconstruction of power Doppler images
from sparse compound data.
Convolutional blocks are made of Conv2D + ReLU activations.
Downsampling is implemented with MaxPooling2D.
Upsampling is implemented with Conv2DTranspose.
Dropout used on all convolutional blocks.
Arguments:
input_shape -- dimensions of compounded dataset
do_rate -- dropout factor
Returns:
model -- a Model() instance in Keras
"""
def conv_block(X_in, nf, k, dr):
X = Conv2D(filters=nf, kernel_size=(k,k), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X_in)
X = Activation('relu')(X)
X = Dropout(dr)(X)
X = Conv2D(filters=nf, kernel_size=(k,k), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = Activation('relu')(X)
X = Dropout(dr)(X)
return X
# Filter kernel size used for convolutional layers
filt_k = 3
# Number of filters for respective layers
F1 = 32
F2 = 64
F3 = 128
F4 = 256
F5 = 512
# Input layer
X_input = Input(input_shape, dtype=tf.float32, name="input")
# Encoder L1
X = conv_block(X_input, F1, filt_k, do_rate)
X_skip1 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L2
X = conv_block(X, F2, filt_k, do_rate)
X_skip2 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L3
X = conv_block(X, F3, filt_k, do_rate)
X_skip3 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L4
X = conv_block(X, F4, filt_k, do_rate)
X_skip4 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L5
X = conv_block(X, F5, filt_k, do_rate)
X = Conv2DTranspose(filters=F4, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L4
X = Concatenate(axis=3)([X,X_skip4])
X = conv_block(X, F4, filt_k, do_rate)
X = Conv2DTranspose(filters=F3, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L3
X = Concatenate(axis=3)([X,X_skip3])
X = conv_block(X, F3, filt_k, do_rate)
X = Conv2DTranspose(filters=F2, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L2
X = Concatenate(axis=3)([X,X_skip2])
X = conv_block(X, F2, filt_k, do_rate)
X = Conv2DTranspose(filters=F1, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L1
X = Concatenate(axis=3)([X,X_skip1])
X = conv_block(X, F1, filt_k, do_rate)
# Output layer
X = Conv2D(filters=1, kernel_size=(1,1), strides=1, padding='same')(X)
# Reshape output
X = Reshape((input_shape[0],input_shape[1]))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='UNet5')
return model
def UNet53D(input_shape = (96, 96, 125), do_rate=0.5, filt0_k12=3, filt0_k3=8):
"""
Implementation of a 5-layer U-Net model for the reconstruction of power Doppler images
from sparse compound data. This model includes an initial Conv3D block that extracts
spatiotemporal features.
Convolutional blocks are made of Conv2D + ReLU activations.
Downsampling is implemented with MaxPooling2D.
Upsampling is implemented with Conv2DTranspose.
Dropout used on all convolutional blocks.
Arguments:
input_shape -- dimensions of compound dataset
do_rate -- dropout factor
filt0_k12 -- first and second kernel dimensions for first Conv3D layer
filt0_k3 -- third kernel dimension of first Conv3D layer
Returns:
model -- a Model() instance in Keras
"""
def conv_block(X_in, nf, k, dr):
X = Conv2D(filters=nf, kernel_size=(k,k), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X_in)
X = Activation('relu')(X)
X = Dropout(dr)(X)
X = Conv2D(filters=nf, kernel_size=(k,k), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = Activation('relu')(X)
X = Dropout(dr)(X)
return X
# Filter kernel size used for convolutional layers
filt_k = 3
# Number of filters for respective layers
F0 = 4
F1 = 32
F2 = 64
F3 = 128
F4 = 256
F5 = 512
# Input layer
X_input = Input(input_shape, dtype=tf.float32, name="input")
# Reshape input to 4-D
X = Reshape((input_shape[0],input_shape[1],input_shape[2],1))(X_input)
# Conv3D layer
X = Conv3D(filters=F0, kernel_size=(filt0_k12,filt0_k12,filt0_k3), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = Activation('relu')(X)
# Reshape input to 3-D
X = Reshape((input_shape[0],input_shape[1],input_shape[2]*F0))(X)
# Encoder L1
X = conv_block(X, F1, filt_k, do_rate)
X_skip1 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L2
X = conv_block(X, F2, filt_k, do_rate)
X_skip2 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L3
X = conv_block(X, F3, filt_k, do_rate)
X_skip3 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L4
X = conv_block(X, F4, filt_k, do_rate)
X_skip4 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L5
X = conv_block(X, F5, filt_k, do_rate)
X = Conv2DTranspose(filters=F4, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L4
X = Concatenate(axis=3)([X,X_skip4])
X = conv_block(X, F4, filt_k, do_rate)
X = Conv2DTranspose(filters=F3, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L3
X = Concatenate(axis=3)([X,X_skip3])
X = conv_block(X, F3, filt_k, do_rate)
X = Conv2DTranspose(filters=F2, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L2
X = Concatenate(axis=3)([X,X_skip2])
X = conv_block(X, F2, filt_k, do_rate)
X = Conv2DTranspose(filters=F1, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L1
X = Concatenate(axis=3)([X,X_skip1])
X = conv_block(X, F1, filt_k, do_rate)
# Output layer
X = Conv2D(filters=1, kernel_size=(1,1), strides=1, padding='same')(X)
# Reshape output
X = Reshape((input_shape[0],input_shape[1]))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='UNet53D')
return model
def ResUNet53D(input_shape = (96, 96, 125), do_rate=0.5, filt0_k12=3, filt0_k3=8):
"""
Implementation of a 5-layer U-Net model with residual blocks for the reconstruction of power
Doppler images from sparse compound data. This model includes an initial Conv3D block that
extracts spatiotemporal features.
Residual blocks are made of Conv2D + ReLU activations.
Downsampling is implemented with MaxPooling2D.
Upsampling is implemented with Conv2DTranspose.
Dropout used on all residual blocks.
Arguments:
input_shape -- dimensions of compound dataset
do_rate -- dropout factor
filt0_k12 -- first and second kernel dimensions for first Conv3D layer
filt0_k3 -- third kernel dimension of first Conv3D layer
Returns:
model -- a Model() instance in Keras
"""
def res_block(X_in, nf, k, dr):
X = Conv2D(filters=nf, kernel_size=(k,k), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X_in)
X = Activation('relu')(X)
X = Dropout(dr)(X)
X = Conv2D(filters=nf, kernel_size=(k,k), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = Activation('relu')(X)
X = Dropout(dr)(X)
X_out = Add()([X, X_in])
return X_out
# Filter kernel size used for convolutional layers
filt_k = 3
# Number of filters for respective layers
F0 = 4
F1 = 32
F2 = 64
F3 = 128
F4 = 256
F5 = 512
# Input layer
X_input = Input(input_shape, dtype=tf.float32, name="input")
# Reshape input to 4-D
X = Reshape((input_shape[0],input_shape[1],input_shape[2],1))(X_input)
# Conv3D layer
X = Conv3D(filters=F0, kernel_size=(filt0_k12,filt0_k12,filt0_k3), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = Activation('relu')(X)
# Reshape input to 3-D
X = Reshape((input_shape[0],input_shape[1],input_shape[2]*F0))(X)
# Encoder L1
X = Conv2D(filters=F1, kernel_size=(1,1), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = res_block(X, F1, filt_k, do_rate)
X_skip1 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L2
X = Conv2D(filters=F2, kernel_size=(1,1), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = res_block(X, F2, filt_k, do_rate)
X_skip2 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L3
X = Conv2D(filters=F3, kernel_size=(1,1), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = res_block(X, F3, filt_k, do_rate)
X_skip3 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L4
X = Conv2D(filters=F4, kernel_size=(1,1), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = res_block(X, F4, filt_k, do_rate)
X_skip4 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L5
X = Conv2D(filters=F5, kernel_size=(1,1), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = res_block(X, F5, filt_k, do_rate)
X = Conv2DTranspose(filters=F4, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L4
X = Concatenate(axis=3)([X,X_skip4])
X = Conv2D(filters=F4, kernel_size=(1,1), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = res_block(X, F4, filt_k, do_rate)
X = Conv2DTranspose(filters=F3, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L3
X = Concatenate(axis=3)([X,X_skip3])
X = Conv2D(filters=F3, kernel_size=(1,1), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = res_block(X, F3, filt_k, do_rate)
X = Conv2DTranspose(filters=F2, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L2
X = Concatenate(axis=3)([X,X_skip2])
X = Conv2D(filters=F2, kernel_size=(1,1), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = res_block(X, F2, filt_k, do_rate)
X = Conv2DTranspose(filters=F1, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L1
X = Concatenate(axis=3)([X,X_skip1])
X = Conv2D(filters=F1, kernel_size=(1,1), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = res_block(X, F1, filt_k, do_rate)
# Output layer
X = Conv2D(filters=1, kernel_size=(1,1), strides=1, padding='same')(X)
# Reshape output
X = Reshape((input_shape[0],input_shape[1]))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='ResUNet53D')
return model
def UNet5_postproc(input_shape = (96, 96), do_rate=0.5):
"""
Implementation of a 5-layer U-Net model for the post-processing of power Doppler images
reconstructed using sparse compound data.
Convolutional blocks are made of Conv2D + ReLU activations.
Downsampling is implemented with MaxPooling2D.
Upsampling is implemented with Conv2DTranspose.
Dropout used on all convolutional blocks.
Arguments:
input_shape -- dimensions of compounded dataset
do_rate -- dropout factor
Returns:
model -- a Model() instance in Keras
"""
def conv_block(X_in, nf, k, dr):
X = Conv2D(filters=nf, kernel_size=(k,k), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X_in)
X = Activation('relu')(X)
X = Dropout(dr)(X)
X = Conv2D(filters=nf, kernel_size=(k,k), strides=1, padding='same', kernel_initializer=he_uniform(seed=0))(X)
X = Activation('relu')(X)
X = Dropout(dr)(X)
return X
# Filter kernel size used for convolutional layers
filt_k = 3
# Number of filters for respective layers
F1 = 32
F2 = 64
F3 = 128
F4 = 256
F5 = 512
# Input layer
X_input = Input(input_shape, dtype=tf.float32, name="input")
# Reshape input to 4-D
X = Reshape((input_shape[0],input_shape[1],1))(X_input)
# Encoder L1
X = conv_block(X, F1, filt_k, do_rate)
X_skip1 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L2
X = conv_block(X, F2, filt_k, do_rate)
X_skip2 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L3
X = conv_block(X, F3, filt_k, do_rate)
X_skip3 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L4
X = conv_block(X, F4, filt_k, do_rate)
X_skip4 = X
X = MaxPooling2D(pool_size=(2,2), strides=2, padding='same')(X)
# Encoder L5
X = conv_block(X, F5, filt_k, do_rate)
X = Conv2DTranspose(filters=F4, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L4
X = Concatenate(axis=3)([X,X_skip4])
X = conv_block(X, F4, filt_k, do_rate)
X = Conv2DTranspose(filters=F3, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L3
X = Concatenate(axis=3)([X,X_skip3])
X = conv_block(X, F3, filt_k, do_rate)
X = Conv2DTranspose(filters=F2, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L2
X = Concatenate(axis=3)([X,X_skip2])
X = conv_block(X, F2, filt_k, do_rate)
X = Conv2DTranspose(filters=F1, kernel_size=(filt_k,filt_k), strides=(2,2), padding='same')(X)
X = Activation('relu')(X)
# Decoder L1
X = Concatenate(axis=3)([X,X_skip1])
X = conv_block(X, F1, filt_k, do_rate)
# Output layer
X = Conv2D(filters=1, kernel_size=(1,1), strides=1, padding='same')(X)
# Reshape output
X = Reshape((input_shape[0],input_shape[1]))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='UNet5_postproc')
return model
|
{"hexsha": "2ecaad7820a61b92379be65155cd5b6726f339f8", "size": 16682, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models.py", "max_stars_repo_name": "todiian/deep-fus", "max_stars_repo_head_hexsha": "c403cf306ef70640ff2fb9376362b1b614806f30", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-12-08T13:00:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T07:29:30.000Z", "max_issues_repo_path": "src/models.py", "max_issues_repo_name": "todiian/deep-fus", "max_issues_repo_head_hexsha": "c403cf306ef70640ff2fb9376362b1b614806f30", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models.py", "max_forks_repo_name": "todiian/deep-fus", "max_forks_repo_head_hexsha": "c403cf306ef70640ff2fb9376362b1b614806f30", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8960573477, "max_line_length": 139, "alphanum_fraction": 0.6224673301, "include": true, "reason": "import numpy", "num_tokens": 5061}
|
function main(r::Robot)
path = go_to_west_south_corner_and_return_path!(r; go_around_barriers = true)
for i ∈ (North, East, South, West)
go_to_border_and_return_path!(r, i; markers = true)
end
go_by_path!(r, path)
end
|
{"hexsha": "d4b5b56898237ce595dd92c873887471bd2410cb", "size": 248, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "15.jl", "max_stars_repo_name": "Savko-fokin/mirea-progs", "max_stars_repo_head_hexsha": "796428c41bd106a5364d2092f7af9b1e0af09d93", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "15.jl", "max_issues_repo_name": "Savko-fokin/mirea-progs", "max_issues_repo_head_hexsha": "796428c41bd106a5364d2092f7af9b1e0af09d93", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "15.jl", "max_forks_repo_name": "Savko-fokin/mirea-progs", "max_forks_repo_head_hexsha": "796428c41bd106a5364d2092f7af9b1e0af09d93", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8, "max_line_length": 81, "alphanum_fraction": 0.6774193548, "num_tokens": 74}
|
# 状態方程式の導出
```julia
using Symbolics
using Latexify
```
```julia
@variables t M m l g D_θ D_x u
@variables x(t) θ(t)
Dt = Differential(t)
v = Dt(x)
ω = Dt(θ)
Dx = Differential(x)
Dv = Differential(v)
Dθ = Differential(θ)
Dω = Differential(ω)
```
(::Differential) (generic function with 2 methods)
## エネルギー
運動エネルギー,ポテンシャルエネルギー,損失エネルギーは次である.
<br>
運動エネルギーを定義
```julia
K = 2//3*m*l^2*ω^2 + m*l*v*ω*cos(θ) + 1//2*(M+m)*v^2
```
\begin{equation}
\left( \frac{dx(t)}{dt} \right)^{2} \left( \frac{1}{2} M + \frac{1}{2} m \right) + \frac{2}{3} \left( \frac{d\theta(t)}{dt} \right)^{2} l^{2} m + l m \frac{dx(t)}{dt} \frac{d\theta(t)}{dt} \cos\left( \theta\left( t \right) \right)
\end{equation}
ポテンシャルエネルギーを定義
```julia
U = m*g*l*cos(θ)
```
\begin{equation}
g l m \cos\left( \theta\left( t \right) \right)
\end{equation}
ポテンシャルエネルギーを定義
```julia
D = 1//2*D_θ*ω^2 + 1//2*D_x*v^2
```
\begin{equation}
\frac{1}{2} \left( \frac{dx(t)}{dt} \right)^{2} D_{x} + \frac{1}{2} \left( \frac{d\theta(t)}{dt} \right)^{2} D_{\theta}
\end{equation}
## ラグランジュの運動方程式に代入
ラグランジュの運動方程式は次式.
<br>
<br>
### x方向
運動エネルギーの速度偏微分の時間微分
```julia
dKdv_dot = K |> (Dv * Dt) |> expand_derivatives
```
\begin{equation}
2 \mathrm{\frac{d}{d t}}\left( \frac{dx(t)}{dt} \right) \left( \frac{1}{2} M + \frac{1}{2} m \right) + l m \cos\left( \theta\left( t \right) \right) \mathrm{\frac{d}{d t}}\left( \frac{d\theta(t)}{dt} \right) - \left( \frac{d\theta(t)}{dt} \right)^{2} l m \sin\left( \theta\left( t \right) \right)
\end{equation}
運動エネルギーの位置偏微分
```julia
dKdx = K |> Dx |> expand_derivatives
```
\begin{equation}
0
\end{equation}
ポテンシャルエネルギーの位置偏微分
```julia
dUdx = U |> Dx |> expand_derivatives
```
\begin{equation}
0
\end{equation}
損失エネルギーの速度偏微分
```julia
dDdv = D |> Dv |> expand_derivatives
```
\begin{equation}
D_{x} \frac{dx(t)}{dt}
\end{equation}
x方向の入力
```julia
u_x = u
```
\begin{equation}
u
\end{equation}
### theta方向
運動エネルギーの角速度偏微分の時間微分
```julia
dKdω_dot = K |> (Dω * Dt) |> expand_derivatives
```
\begin{equation}
l m \cos\left( \theta\left( t \right) \right) \mathrm{\frac{d}{d t}}\left( \frac{dx(t)}{dt} \right) + \frac{4}{3} l^{2} m \mathrm{\frac{d}{d t}}\left( \frac{d\theta(t)}{dt} \right) - 2 l m \frac{dx(t)}{dt} \frac{d\theta(t)}{dt} \sin\left( \theta\left( t \right) \right)
\end{equation}
運動エネルギーの角度偏微分
```julia
dKdθ = K |> Dθ |> expand_derivatives
```
\begin{equation}
- l m \frac{dx(t)}{dt} \frac{d\theta(t)}{dt} \sin\left( \theta\left( t \right) \right)
\end{equation}
ポテンシャルエネルギーの角度偏微分
```julia
dUdθ = U |> Dθ |> expand_derivatives
```
\begin{equation}
- g l m \sin\left( \theta\left( t \right) \right)
\end{equation}
損失エネルギーの角速度偏微分
```julia
dDdω = D |> Dω |> expand_derivatives
```
\begin{equation}
D_{\theta} \frac{d\theta(t)}{dt}
\end{equation}
回転方向の入力
```julia
u_θ = 0
```
0
## 加速度を求める
これまでの演算から運動方程式が得られた.
x方向の運動方程式
```julia
f_x = dKdv_dot - dKdx + dUdx + dDdv - u_x
```
\begin{equation}
D_{x} \frac{dx(t)}{dt} - u + 2 \mathrm{\frac{d}{d t}}\left( \frac{dx(t)}{dt} \right) \left( \frac{1}{2} M + \frac{1}{2} m \right) + l m \cos\left( \theta\left( t \right) \right) \mathrm{\frac{d}{d t}}\left( \frac{d\theta(t)}{dt} \right) - \left( \frac{d\theta(t)}{dt} \right)^{2} l m \sin\left( \theta\left( t \right) \right)
\end{equation}
theta方向の運動方程式
```julia
f_θ = dKdω_dot - dKdθ + dUdθ + dDdω - u_θ
```
\begin{equation}
D_{\theta} \frac{d\theta(t)}{dt} + l m \cos\left( \theta\left( t \right) \right) \mathrm{\frac{d}{d t}}\left( \frac{dx(t)}{dt} \right) + \frac{4}{3} l^{2} m \mathrm{\frac{d}{d t}}\left( \frac{d\theta(t)}{dt} \right) - g l m \sin\left( \theta\left( t \right) \right) - l m \frac{dx(t)}{dt} \frac{d\theta(t)}{dt} \sin\left( \theta\left( t \right) \right)
\end{equation}
状態方程式を求めるために,加速度を求める.
運動方程式は加速度と角加速度の連立方程式になっているため,`sy.solve`を使って解く.
ついでに式を簡略化.
```julia
@variables x_dot, θ_dot, x_ddot, θ_ddot, h, f
f_x = substitute(
f_x,
Dict([
expand_derivatives(Dt(v)) => x_ddot,
expand_derivatives(Dt(ω)) => θ_ddot,
expand_derivatives(Dt(x)) => x_dot,
expand_derivatives(Dt(θ)) => θ_dot,
x => h,
θ => f,
]))
f_θ = substitute(
f_θ,
Dict([
expand_derivatives(Dt(v)) => x_ddot,
expand_derivatives(Dt(ω)) => θ_ddot,
expand_derivatives(Dt(x)) => x_dot,
expand_derivatives(Dt(θ)) => θ_dot,
x => h,
θ => f,
]));
```
```julia
f_x
```
\begin{equation}
D_{x} x_{dot} - u + 2 x_{ddot} \left( \frac{1}{2} M + \frac{1}{2} m \right) + l m \theta_{ddot} \cos\left( f \right) - \theta_{dot}^{2} l m \sin\left( f \right)
\end{equation}
```julia
f_θ
```
\begin{equation}
D_{\theta} \theta_{dot} + l m x_{ddot} \cos\left( f \right) + \frac{4}{3} l^{2} m \theta_{ddot} - g l m \sin\left( f \right) - l m x_{dot} \theta_{dot} \sin\left( f \right)
\end{equation}
```julia
Symbolics.solve_for([f_x~0, f_θ~0], [x_ddot, θ_ddot])
```
|
{"hexsha": "e5033f0ce6419c88f5f8a49e43fb6b85a6916b1d", "size": 26209, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "excercise/julia_src/derivation.ipynb", "max_stars_repo_name": "YoshimitsuMatsutaIe/abc_2022", "max_stars_repo_head_hexsha": "9c6fb487c7ec22fdc57cc1eb0abec4c9786ad995", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "excercise/julia_src/derivation.ipynb", "max_issues_repo_name": "YoshimitsuMatsutaIe/abc_2022", "max_issues_repo_head_hexsha": "9c6fb487c7ec22fdc57cc1eb0abec4c9786ad995", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "excercise/julia_src/derivation.ipynb", "max_forks_repo_name": "YoshimitsuMatsutaIe/abc_2022", "max_forks_repo_head_hexsha": "9c6fb487c7ec22fdc57cc1eb0abec4c9786ad995", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0856760375, "max_line_length": 694, "alphanum_fraction": 0.5421420123, "converted": true, "num_tokens": 2382}
|
// Copyright (c) 2010 Satoshi Nakamoteed
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "assert.h"
#include "chainparams.h"
#include "main.h"
#include "util.h"
#include <boost/assign/list_of.hpp>
using namespace boost::assign;
unsigned int pnSeed[] =
{
0x7b0272a7, 0xada272a7
};
struct SeedSpec6 {
uint8_t addr[16];
uint16_t port;
};
#include "chainparamsseeds.h"
//
// Main network
//
// Convert the pnSeeds6 array into usable address objects.
static void convertSeed6(std::vector<CAddress> &vSeedsOut, const SeedSpec6 *data, unsigned int count)
{
// It'll only connect to one or two seed nodes because once it connects,
// it'll get a pile of addresses with newer timestamps.
// Seed nodes are given a random 'last seen time' of between one and two
// weeks ago.
const int64_t nOneWeek = 7*24*60*60;
for (unsigned int i = 0; i < count; i++)
{
struct in6_addr ip;
memcpy(&ip, data[i].addr, sizeof(ip));
CAddress addr(CService(ip, data[i].port));
addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek;
vSeedsOut.push_back(addr);
}
}
class CMainParams : public CChainParams {
public:
CMainParams() {
// The message start string is designed to be unlikely to occur in normal data.
// The characters are rarely used upper ASCII, not valid as UTF-8, and produce
// a large 4-byte int at any alignment.
pchMessageStart[0] = 0xa2;
pchMessageStart[1] = 0x7a;
pchMessageStart[2] = 0xc1;
pchMessageStart[3] = 0x7c;
vAlertPubKey = ParseHex("046d4a82462704df06381163858c5998e85277d995967632348ecaa24668d48b7910bae9c427f6ae929907fc5a33b5d731dfb0e53a057a8200bc11eab87d693ef7");
nDefaultPort = 20302;
nRPCPort = 20304;
bnProofOfWorkLimit = CBigNum(~uint256(0) >> 20);
const char* pszTimestamp = "2015 DraftCoin Start!";
std::vector<CTxIn> vin;
vin.resize(1);
vin[0].scriptSig = CScript() << 0 << CBigNum(42) << vector<unsigned char>((const unsigned char*)pszTimestamp, (const unsigned char*)pszTimestamp + strlen(pszTimestamp));
std::vector<CTxOut> vout;
vout.resize(1);
vout[0].SetEmpty();
CTransaction txNew(1, 1432374838, vin, vout, 0);
genesis.vtx.push_back(txNew);
genesis.hashPrevBlock = 0;
genesis.hashMerkleRoot = genesis.BuildMerkleTree();
genesis.nVersion = 1;
genesis.nTime = 1432374838;
genesis.nBits = bnProofOfWorkLimit.GetCompact();
genesis.nNonce = 330954;
hashGenesisBlock = genesis.GetHash();
/*
while (hashGenesisBlock > bnProofOfWorkLimit.getuint256()){
if (++genesis.nNonce==0) break;
hashGenesisBlock = genesis.GetHash();
}
printf("%s\n MAIN ", hashGenesisBlock.ToString().c_str());
printf("%s\n MAIN ", genesis.hashMerkleRoot.ToString().c_str());
printf("%x\n MAIN ", bnProofOfWorkLimit.GetCompact());
printf("%d\n MAIN ", genesis.nNonce);
*/
/*
Updated to support DNS masternode entries
*/
assert(hashGenesisBlock == uint256("0x00000626d583465e39cc5eec2020f639792843efcfb4182d508e6734d4cfdab6"));
assert(genesis.hashMerkleRoot == uint256("0x96b2046ee922b5724d19ade2ca7f44bc828e8b850f6e5416a98cf4fbea18ba87"));
vSeeds.push_back(CDNSSeedData("node1.btcdraft.com", "node1.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node2.btcdraft.com", "node2.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node3.btcdraft.com", "node3.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node4.btcdraft.com", "node4.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node5.btcdraft.com", "node5.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node6.btcdraft.com", "node6.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node7.btcdraft.com", "node7.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node8.btcdraft.com", "node8.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node9.btcdraft.com", "node9.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node10.btcdraft.com", "node10.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node11.btcdraft.com", "node11.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node12.btcdraft.com", "node12.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node13.btcdraft.com", "node13.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node14.btcdraft.com", "node14.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node15.btcdraft.com", "node15.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node16.btcdraft.com", "node16.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node17.btcdraft.com", "node17.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node18.btcdraft.com", "node18.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node19.btcdraft.com", "node19.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node20.btcdraft.com", "node20.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node21.btcdraft.com", "node21.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node22.btcdraft.com", "node22.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node23.btcdraft.com", "node23.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node24.btcdraft.com", "node24.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node25.btcdraft.com", "node25.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node26.btcdraft.com", "node26.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node27.btcdraft.com", "node27.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node28.btcdraft.com", "node28.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node29.btcdraft.com", "node29.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node30.btcdraft.com", "node30.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node31.btcdraft.com", "node31.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node32.btcdraft.com", "node32.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node33.btcdraft.com", "node33.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node34.btcdraft.com", "node34.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node35.btcdraft.com", "node35.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node36.btcdraft.com", "node36.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node37.btcdraft.com", "node37.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node38.btcdraft.com", "node38.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node39.btcdraft.com", "node39.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node40.btcdraft.com", "node40.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node41.btcdraft.com", "node41.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node42.btcdraft.com", "node42.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node43.btcdraft.com", "node43.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node44.btcdraft.com", "node44.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node45.btcdraft.com", "node45.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node46.btcdraft.com", "node46.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node47.btcdraft.com", "node47.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node48.btcdraft.com", "node48.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node49.btcdraft.com", "node49.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node50.btcdraft.com", "node50.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node51.btcdraft.com", "node51.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node52.btcdraft.com", "node52.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node53.btcdraft.com", "node53.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node54.btcdraft.com", "node54.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node55.btcdraft.com", "node55.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node56.btcdraft.com", "node56.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node57.btcdraft.com", "node57.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node58.btcdraft.com", "node58.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node59.btcdraft.com", "node59.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node60.btcdraft.com", "node60.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node61.btcdraft.com", "node61.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node62.btcdraft.com", "node62.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node63.btcdraft.com", "node63.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node64.btcdraft.com", "node64.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node65.btcdraft.com", "node65.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node66.btcdraft.com", "node66.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node67.btcdraft.com", "node67.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node68.btcdraft.com", "node68.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node69.btcdraft.com", "node69.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node70.btcdraft.com", "node70.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node71.btcdraft.com", "node71.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node72.btcdraft.com", "node72.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node73.btcdraft.com", "node73.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node74.btcdraft.com", "node74.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node75.btcdraft.com", "node75.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node76.btcdraft.com", "node76.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node77.btcdraft.com", "node77.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node78.btcdraft.com", "node78.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node79.btcdraft.com", "node79.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node80.btcdraft.com", "node80.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node81.btcdraft.com", "node81.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node82.btcdraft.com", "node82.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node83.btcdraft.com", "node83.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node84.btcdraft.com", "node84.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node85.btcdraft.com", "node85.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node86.btcdraft.com", "node86.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node87.btcdraft.com", "node87.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node88.btcdraft.com", "node88.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node89.btcdraft.com", "node89.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node90.btcdraft.com", "node90.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node91.btcdraft.com", "node91.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node92.btcdraft.com", "node92.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node93.btcdraft.com", "node93.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node94.btcdraft.com", "node94.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node95.btcdraft.com", "node95.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node96.btcdraft.com", "node96.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node97.btcdraft.com", "node97.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node98.btcdraft.com", "node98.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node99.btcdraft.com", "node99.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node100.btcdraft.com", "node100.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node101.btcdraft.com", "node101.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node102.btcdraft.com", "node102.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node103.btcdraft.com", "node103.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node104.btcdraft.com", "node104.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node105.btcdraft.com", "node105.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node106.btcdraft.com", "node106.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node107.btcdraft.com", "node107.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node108.btcdraft.com", "node108.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node109.btcdraft.com", "node109.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node110.btcdraft.com", "node110.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node111.btcdraft.com", "node111.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node112.btcdraft.com", "node112.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node113.btcdraft.com", "node113.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node114.btcdraft.com", "node114.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node115.btcdraft.com", "node115.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node116.btcdraft.com", "node116.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node117.btcdraft.com", "node117.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node118.btcdraft.com", "node118.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node119.btcdraft.com", "node119.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node120.btcdraft.com", "node120.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node121.btcdraft.com", "node121.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node122.btcdraft.com", "node122.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node123.btcdraft.com", "node123.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node124.btcdraft.com", "node124.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("node125.btcdraft.com", "node125.btcdraft.com"));
vSeeds.push_back(CDNSSeedData("dns.btcdraft.ca", "dns.btcdraft.ca"));
vSeeds.push_back(CDNSSeedData("204.188.161.57", "204.188.161.57"));
vSeeds.push_back(CDNSSeedData("149.56.101.219", "149.56.101.219"));
vSeeds.push_back(CDNSSeedData("158.69.199.130", "158.69.199.130"));
vSeeds.push_back(CDNSSeedData("204.188.161.61", "204.188.161.61"));
vSeeds.push_back(CDNSSeedData("99.248.230.242", "99.248.230.242"));
vSeeds.push_back(CDNSSeedData("68.206.29.192", "68.206.29.192"));
vSeeds.push_back(CDNSSeedData("209.195.104.6", "209.195.104.6"));
/*
Change to support Boost 1.6
base58Prefixes[PUBKEY_ADDRESS] = list_of(30);
base58Prefixes[SCRIPT_ADDRESS] = list_of(91);
base58Prefixes[SECRET_KEY] = list_of(117);
base58Prefixes[EXT_PUBLIC_KEY] = list_of(0x04)(0x73)(0x77)(0xEE);
base58Prefixes[EXT_SECRET_KEY] = list_of(0x04)(0x73)(0xDD)(0x55);
*/
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,30);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,91);
base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1,117);
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0xc73)(0x77)(0xEE).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0xc73)(0xDD)(0x55).convert_to_container<std::vector<unsigned char> >();
/*
Change to support Boost 1.6
base58Prefixes[EXT_PUBLIC_KEY] = list_of(0x04)(0x73)(0x77)(0xEE);
base58Prefixes[EXT_SECRET_KEY] = list_of(0x04)(0x73)(0xDD)(0x55);
*/
convertSeed6(vFixedSeeds, pnSeed6_main, ARRAYLEN(pnSeed6_main));
nLastPOWBlock = 15000;
// Convert the pnSeeds array into usable address objects.
for (unsigned int i = 0; i < ARRAYLEN(pnSeed); i++)
{
// It'll only connect to one or two seed nodes because once it connects,
// it'll get a pile of addresses with newer timestamps.
// Seed nodes are given a random 'last seen time' of between one and two
// weeks ago.
const int64_t nOneWeek = 7*24*60*60;
struct in_addr ip;
memcpy(&ip, &pnSeed[i], sizeof(ip));
CAddress addr(CService(ip, GetDefaultPort()));
addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek;
vFixedSeeds.push_back(addr);
}
}
virtual const CBlock& GenesisBlock() const { return genesis; }
virtual Network NetworkID() const { return CChainParams::MAIN; }
virtual const vector<CAddress>& FixedSeeds() const {
return vFixedSeeds;
}
protected:
CBlock genesis;
vector<CAddress> vFixedSeeds;
};
static CMainParams mainParams;
//
// Testnet
//
class CTestNetParams : public CMainParams {
public:
CTestNetParams() {
// The message start string is designed to be unlikely to occur in normal data.
// The characters are rarely used upper ASCII, not valid as UTF-8, and produce
// a large 4-byte int at any alignment.
pchMessageStart[0] = 0xaa;
pchMessageStart[1] = 0x33;
pchMessageStart[2] = 0xff;
pchMessageStart[3] = 0x77;
bnProofOfWorkLimit = CBigNum(~uint256(0) >> 16);
vAlertPubKey = ParseHex("0433ec1132f34b5d4cafe6e9b3d848b710d75d8ec7a363d24554eac5256e70e5850b33a716492f2570f6f0cc5b2d3c01c2dfb25bf0a9327e634dee8a3b2d1039bb");
nDefaultPort = 12345;
nRPCPort = 12346;
strDataDir = "testnet";
// Modify the testnet genesis block so the timestamp is valid for a later start.
genesis.nBits = bnProofOfWorkLimit.GetCompact();
genesis.nNonce = 81778;
hashGenesisBlock = genesis.GetHash();
/*
while (hashGenesisBlock > bnProofOfWorkLimit.getuint256()){
if (++genesis.nNonce==0) break;
hashGenesisBlock = genesis.GetHash();
}
printf("%s\n TESTNET ", hashGenesisBlock.ToString().c_str());
printf("%s\n TESTNET ", genesis.hashMerkleRoot.ToString().c_str());
printf("%x\n TESTNET ", bnProofOfWorkLimit.GetCompact());
printf("%d\n TESTNET ", genesis.nNonce);
*/
assert(hashGenesisBlock == uint256("0x00007fe66ae67c7d800791c56234ef019ca1f85f9b059fcccc49447d33c22481"));
vFixedSeeds.clear();
vSeeds.clear();
/*
base58Prefixes[PUBKEY_ADDRESS] = list_of(110);
base58Prefixes[SCRIPT_ADDRESS] = list_of(196);
base58Prefixes[SECRET_KEY] = list_of(239);
base58Prefixes[EXT_PUBLIC_KEY] = list_of(0x04)(0x35)(0x17)(0xFF);
base58Prefixes[EXT_SECRET_KEY] = list_of(0x04)(0x35)(0x43)(0x99);
*/
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,110);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,196);
base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1,239);
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x35)(0x17)(0xFF).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x35)(0x43)(0x99).convert_to_container<std::vector<unsigned char> >();
convertSeed6(vFixedSeeds, pnSeed6_test, ARRAYLEN(pnSeed6_test));
nLastPOWBlock = 0x7fffffff;
}
virtual Network NetworkID() const { return CChainParams::TESTNET; }
};
static CTestNetParams testNetParams;
//
// Regression test
//
class CRegTestParams : public CTestNetParams {
public:
CRegTestParams() {
pchMessageStart[0] = 0xaf;
pchMessageStart[1] = 0xbb;
pchMessageStart[2] = 0x55;
pchMessageStart[3] = 0xdd;
bnProofOfWorkLimit = CBigNum(~uint256(0) >> 1);
genesis.nTime = 1412222222;
genesis.nBits = bnProofOfWorkLimit.GetCompact();
genesis.nNonce = 8;
hashGenesisBlock = genesis.GetHash();
nDefaultPort = 47992;
strDataDir = "regtest";
/*
while (hashGenesisBlock > bnProofOfWorkLimit.getuint256()){
if (++genesis.nNonce==0) break;
hashGenesisBlock = genesis.GetHash();
}
printf("%s\n REGTEST ", hashGenesisBlock.ToString().c_str());
printf("%s\n REGTEST ", genesis.hashMerkleRoot.ToString().c_str());
printf("%x\n REGTEST ", bnProofOfWorkLimit.GetCompact());
printf("%d\n REGTEST ", genesis.nNonce);
*/
assert(hashGenesisBlock == uint256("0x21a8f72f07cec34eb88c6088f382eeb868c9813adfb9edffbe40a5e076c14cb8"));
vSeeds.clear(); // Regtest mode doesn't have any DNS seeds.
}
virtual bool RequireRPCPassword() const { return false; }
virtual Network NetworkID() const { return CChainParams::REGTEST; }
};
static CRegTestParams regTestParams;
static CChainParams *pCurrentParams = &mainParams;
const CChainParams &Params() {
return *pCurrentParams;
}
void SelectParams(CChainParams::Network network) {
switch (network) {
case CChainParams::MAIN:
pCurrentParams = &mainParams;
break;
case CChainParams::TESTNET:
pCurrentParams = &testNetParams;
break;
case CChainParams::REGTEST:
pCurrentParams = ®TestParams;
break;
default:
assert(false && "Unimplemented network");
return;
}
}
bool SelectParamsFromCommandLine() {
bool fRegTest = GetBoolArg("-regtest", false);
bool fTestNet = GetBoolArg("-testnet", false);
if (fTestNet && fRegTest) {
return false;
}
if (fRegTest) {
SelectParams(CChainParams::REGTEST);
} else if (fTestNet) {
SelectParams(CChainParams::TESTNET);
} else {
SelectParams(CChainParams::MAIN);
}
return true;
}
|
{"hexsha": "9594a996f09f0d84c6c2d15e86ba6e72d75e6b99", "size": 21261, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/chainparams.cpp", "max_stars_repo_name": "btcdraft/draftcoin", "max_stars_repo_head_hexsha": "31cb050d3706b57c1340da38a0652d4b098bdddc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2018-01-01T23:34:50.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-06T05:05:38.000Z", "max_issues_repo_path": "src/chainparams.cpp", "max_issues_repo_name": "Tillkoeln/draftcoin", "max_issues_repo_head_hexsha": "52ef7c6fb4b5cc7191aa5a328f2366c5310cd19a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2018-01-31T19:02:36.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-12T18:19:47.000Z", "max_forks_repo_path": "src/chainparams.cpp", "max_forks_repo_name": "Tillkoeln/draftcoin", "max_forks_repo_head_hexsha": "52ef7c6fb4b5cc7191aa5a328f2366c5310cd19a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12.0, "max_forks_repo_forks_event_min_datetime": "2015-07-19T21:03:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-21T22:20:26.000Z", "avg_line_length": 49.7915690867, "max_line_length": 177, "alphanum_fraction": 0.708668454, "num_tokens": 6642}
|
@testset "BSpline" begin
include("constant.jl")
include("linear.jl")
include("quadratic.jl")
include("cubic.jl")
include("mixed.jl")
include("multivalued.jl")
include("non1.jl")
include("regularization.jl")
@test eltype(@inferred(interpolate(rand(Float16, 3, 3), BSpline(Linear())))) == Float16 # issue #308
end
|
{"hexsha": "f7c8a3ff9609ac8937d59049312fdaddd0900115", "size": 349, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/b-splines/runtests.jl", "max_stars_repo_name": "tiagopereira/Interpolations.jl", "max_stars_repo_head_hexsha": "7c7e11c1204694f4d57cd0d1a30c353f563af461", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 311, "max_stars_repo_stars_event_min_datetime": "2017-04-25T06:34:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T18:58:47.000Z", "max_issues_repo_path": "test/b-splines/runtests.jl", "max_issues_repo_name": "tiagopereira/Interpolations.jl", "max_issues_repo_head_hexsha": "7c7e11c1204694f4d57cd0d1a30c353f563af461", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 328, "max_issues_repo_issues_event_min_datetime": "2017-03-15T08:35:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T04:53:16.000Z", "max_forks_repo_path": "test/b-splines/runtests.jl", "max_forks_repo_name": "tiagopereira/Interpolations.jl", "max_forks_repo_head_hexsha": "7c7e11c1204694f4d57cd0d1a30c353f563af461", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 101, "max_forks_repo_forks_event_min_datetime": "2017-03-28T15:02:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T15:27:26.000Z", "avg_line_length": 26.8461538462, "max_line_length": 104, "alphanum_fraction": 0.6475644699, "num_tokens": 106}
|
from mpi4py import MPI
import numpy
arquivo = open("etapa4-2.txt","a")
def mpiPI(nroProcesso, rank):#funcao que calcula o valor aprox de pi
N = 840
i = int(1 + (N/nroProcesso)*rank)
k = int((N/nroProcesso)*(rank+1))
somatorio = 0
for j in range(i,k+1):
somatorio += 1/(1+((j-0.5)/N)**2)
#print(i,k)#intervalos
#print((somatorio/N)*4)#somatorio de cada intervalo
return (somatorio/N)*4
if __name__ == "__main__": #main -- Quarta versão
comm = MPI.COMM_WORLD
numDeProcessos = comm.Get_size()
rank = comm.Get_rank()#rank do processo atual
processoPI = numpy.zeros(1)#inicializa o processoPI com 0
total = numpy.zeros(1)#inicializa o processo total com 0
if(840 % numDeProcessos != 0):#retorna erro se o numero de processos nao for divisivel p 840
if rank == 0 :
print("ERRO!\nEntre com um número de processos que seja divisivel por 840!")
else:#se for divisivel por 840 entao divide entre os processos
idmaquina = MPI.Get_processor_name()#hostname damaquina
#se for qualquer processo diferente do processo 1
comm.Barrier()
tinicial = MPI.Wtime()
processoPI[0]= mpiPI(numDeProcessos,rank)
#print("Resposta do processo [" + str(rank) + "] = " + str(processoPI[0]) + " ID Máquina = "+str(idmaquina))
comm.Reduce(processoPI,total,op = MPI.SUM,root = 0)
comm.Barrier()
tfinal=MPI.Wtime()
comm.send(tfinal - tinicial,dest = 0)#Envia para o rank 0 o tempo de todos processos
if rank == 0:
#print("Soma de todos processos:",total[0])
bufferAux = []
for i in range(0,numDeProcessos):
bufferAux.append(comm.recv(source = i))
#print("Tempo de execução:",max(bufferAux))#exibe o tempo do processo que demorou mai
arquivo.write(str(max(bufferAux))+"\n")
arquivo.close()
|
{"hexsha": "d8d44cfe9ce9856b57d2396ea4f226c60961d8ca", "size": 1965, "ext": "py", "lang": "Python", "max_stars_repo_path": "cloud/MPI4-2.py", "max_stars_repo_name": "joaomota59/messagePassingInterface-MPI", "max_stars_repo_head_hexsha": "8e1515dbbc96d28248ac61e9c7b390c1cbded4b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-14T13:42:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-14T13:42:39.000Z", "max_issues_repo_path": "cloud/MPI4-2.py", "max_issues_repo_name": "joaomota59/messagePassingInterface-MPI", "max_issues_repo_head_hexsha": "8e1515dbbc96d28248ac61e9c7b390c1cbded4b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cloud/MPI4-2.py", "max_forks_repo_name": "joaomota59/messagePassingInterface-MPI", "max_forks_repo_head_hexsha": "8e1515dbbc96d28248ac61e9c7b390c1cbded4b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-29T20:39:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-29T20:39:27.000Z", "avg_line_length": 43.6666666667, "max_line_length": 117, "alphanum_fraction": 0.6132315522, "include": true, "reason": "import numpy", "num_tokens": 564}
|
import numpy as np
#######################################
# AND, OR, NAND, XOR using PERCEPTRON #
#######################################
def step_function(x):
y = x > 0
return y.astype(np.int)
def AND(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.7
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
def NAND(x1, x2):
x = np.array([x1, x2])
w = np.array([-0.5, -0.5])
b = 0.7
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
def OR(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
def XOR(x1, x2):
x = np.array([x1, x2])
w1 = np.array([[0.5, 0.5], [-0.5, -0.5]])
b1 = np.array([-0.2, 0.7])
w2 = np.array([0.5, 0.5])
b2 = -0.7
s = np.dot(x, w1) + b1
s_activated = step_function(s)
y = np.sum(w2 * s_activated) + b2
if y <= 0:
return 0
else:
return 1
def test_perceptrons(x):
print('#####################')
print('TESTING PERCEPTRONS')
print('#####################')
for data in x:
print("AND(%d,%d): %d" % (data[0], data[1], AND(data[0], data[1])))
for data in x:
print("NAND(%d,%d): %d" % (data[0], data[1], NAND(data[0], data[1])))
for data in x:
print("OR(%d,%d): %d" % (data[0], data[1], OR(data[0], data[1])))
for data in x:
print("XOR(%d,%d): %d" % (data[0], data[1], XOR(data[0], data[1])))
print('#####################')
if __name__=='__main__':
test_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
test_perceptrons(test_data)
|
{"hexsha": "512d46351a8d3a2708e17988af5997083f644cda", "size": 1732, "ext": "py", "lang": "Python", "max_stars_repo_path": "week1/JY/perceptron.py", "max_stars_repo_name": "maybedy/MLDLStudy", "max_stars_repo_head_hexsha": "abe121bc73c1958f1cd2d30fd30384137140187b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "week1/JY/perceptron.py", "max_issues_repo_name": "maybedy/MLDLStudy", "max_issues_repo_head_hexsha": "abe121bc73c1958f1cd2d30fd30384137140187b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "week1/JY/perceptron.py", "max_forks_repo_name": "maybedy/MLDLStudy", "max_forks_repo_head_hexsha": "abe121bc73c1958f1cd2d30fd30384137140187b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.619047619, "max_line_length": 77, "alphanum_fraction": 0.4301385681, "include": true, "reason": "import numpy", "num_tokens": 651}
|
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package band_graph
# Band diagram graph
#
import os
import io
import sys
from numpy import *
#matplotlib
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import zipfile
from dat_file import dat_file
from matplotlib.figure import Figure
from plot_io import get_plot_file_info
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QVBoxLayout,QToolBar,QSizePolicy,QAction,QActionGroup,QTabWidget,QMenu,QApplication
from PyQt5.QtGui import QIcon,QPixmap,QImage, QScreen
from PyQt5.QtGui import QPainter,QFont,QColor,QPen,QFontMetrics,QPainterPath
#calpath
from icon_lib import icon_get
from open_save_dlg import save_as_filter
from cal_path import get_sim_path
from cal_path import get_materials_path
from cal_path import get_default_material_path
from lock import get_lock
from epitaxy import get_epi
from PyQt5.QtCore import pyqtSignal
from json_material_db_item import json_material_db_item
from dat_file_math import dat_file_max_min
from gpvdm_graph import gpvdm_graph
from epitaxy import get_epi
class band():
def __init__(self):
self.Eg=0.0
self.Xi=0.0
self.dx=0.0
self.E_min=0.0
self.E_max=0.0
self.r=1
self.g=0
self.b=0
self.alpha=0
self.material=""
self.name=""
def cal_min_max(self):
self.E_min=self.Xi-self.Eg
self.E_max=self.Xi
if self.material=="metal":
self.E_min=self.Xi-1.0
class band_graph2(gpvdm_graph):
def __init__(self):
gpvdm_graph.__init__(self)
self.use_epi_for_x=True
self.bands=[]
self.build_bands()
self.setFocusPolicy( Qt.ClickFocus )
self.setFocus()
self.menu_show_energies=self.menu.addAction(_("Show energies"))
self.menu_show_energies.triggered.connect(self.menu_toggle)
self.menu_show_energies.setCheckable(True)
self.menu_show_energies.setChecked(True)
self.epi=get_epi()
def draw_graph(self):
self.load_data(self.optical_mode_file)
self.repaint()
def build_bands(self):
x_pos=0.0
self.epi=get_epi()
epi_len=self.epi.ylen()
self.bands=[]
for layer in self.epi.layers:
mat_file=os.path.join(get_materials_path(),layer.optical_material,'data.json')
mat_db_item=json_material_db_item()
mat_db_item.load(mat_file)
if self.normalize_x_axis==False:
dy=layer.dy
else:
dy=epi_len/len(self.epi.layers)
read_from_db=True
if layer.layer_type=="active":
if self.active_layer_from_dos==True:
read_from_db=False
if read_from_db==True:
if mat_db_item.electrical_constants.material_blend==False:
b=band()
b.Xi=mat_db_item.electrical_constants.Xi0
b.Eg=mat_db_item.electrical_constants.Eg0
b.dx=dy
b.r=layer.color_r*255
b.g=layer.color_g*255
b.b=layer.color_b*255
b.alpha=layer.color_alpha*255
b.material=mat_db_item.material_type
b.name=layer.shape_name
b.cal_min_max()
self.bands.append(b)
else:
b=band()
b.Xi=mat_db_item.electrical_constants.Xi0
b.Eg=mat_db_item.electrical_constants.Eg0
b.dx=dy/2.0
b.r=layer.color_r*255
b.g=layer.color_g*255
b.b=layer.color_b*255
b.alpha=layer.color_alpha*255
b.material=mat_db_item.material_type
b.name=layer.shape_name
b.cal_min_max()
self.bands.append(b)
b=band()
b.Xi=mat_db_item.electrical_constants.Xi1
b.Eg=mat_db_item.electrical_constants.Eg1
b.dx=dy/2.0
b.r=layer.color_b*255
b.g=layer.color_r*255
b.b=layer.color_g*255
b.alpha=layer.color_alpha*255
b.material=mat_db_item.material_type
b.name=layer.shape_name
b.cal_min_max()
self.bands.append(b)
else:
b=band()
b.Xi=-float(layer.shape_dos.Xi)
b.Eg=float(layer.shape_dos.Eg)
b.dx=dy
b.r=layer.color_r*255
b.g=layer.color_g*255
b.b=layer.color_b*255
b.alpha=layer.color_alpha*255
b.material=mat_db_item.material_type
b.name=layer.shape_name
b.cal_min_max()
self.bands.append(b)
if len(self.bands)>0:
self.E_max=self.bands[0].E_max
self.E_min=self.bands[0].E_min
for b in self.bands:
if b.E_max>self.E_max:
self.E_max=b.E_max
if b.E_min<self.E_min:
self.E_min=b.E_min
self.E_min=self.E_min-1.0
self.E_max=self.E_max+0.5
def draw_overlay(self,qp,svg=False):
x=0.0
for b in self.bands:
if b.material!="metal":
x0=self.to_screen_x(x)
dx=self.to_screen_x(x+b.dx)-x0
y0=self.to_screen_y2(b.Xi)
self.drawRect(qp,x0, y0, dx,12,b.r,b.g,b.b,b.alpha,svg=svg) #LUMO
if self.show_energies==True:
self.drawText(qp, x0+dx/2, y0-10 , "{:.1f}".format(b.Xi)+" eV",20,20,20,svg=svg)
x0=self.to_screen_x(x)
dx=self.to_screen_x(x+b.dx)-x0
y0=self.to_screen_y2(b.Xi-b.Eg) #HOMO
self.drawRect(qp,x0, y0, dx,12,b.r,b.g,b.b,b.alpha,svg=svg)
if self.show_energies==True:
self.drawText(qp, x0+dx/2, y0+30 , "{:.1f}".format(b.Xi-b.Eg)+" eV",20,20,20,svg=svg)
if self.show_labels==True:
self.drawText(qp, x0+dx/2, self.to_screen_y2(b.Xi-b.Eg/2.0) , b.name,20,20,20,svg=svg)
else:
x0=self.to_screen_x(x)
dx=self.to_screen_x(x+b.dx)-x0
y0=self.to_screen_y2(b.Xi)
self.drawRect(qp,x0, y0, dx,5,b.r,b.g,b.b,b.alpha,svg=svg)
if self.show_energies==True:
self.drawText(qp, x0+dx/2, y0-10 , "{:.1f}".format(b.Xi)+" eV",20,20,20,svg=svg)
if self.show_labels==True:
self.drawText(qp, x0+dx/2, self.to_screen_y2(b.Xi-1.0) , b.name,20,20,20,svg=svg)
x=x+b.dx
|
{"hexsha": "a092eeaa645af2411eea9221ac675e228a5235f8", "size": 6443, "ext": "py", "lang": "Python", "max_stars_repo_path": "gpvdm_gui/gui/band_graph2.py", "max_stars_repo_name": "roderickmackenzie/gpvdm", "max_stars_repo_head_hexsha": "914fd2ee93e7202339853acaec1d61d59b789987", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2016-09-13T08:58:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T07:04:52.000Z", "max_issues_repo_path": "gpvdm_gui/gui/band_graph2.py", "max_issues_repo_name": "roderickmackenzie/gpvdm", "max_issues_repo_head_hexsha": "914fd2ee93e7202339853acaec1d61d59b789987", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-11-11T12:33:02.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-08T00:48:08.000Z", "max_forks_repo_path": "gpvdm_gui/gui/band_graph2.py", "max_forks_repo_name": "roderickmackenzie/gpvdm", "max_forks_repo_head_hexsha": "914fd2ee93e7202339853acaec1d61d59b789987", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-01-03T06:17:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-01T15:59:00.000Z", "avg_line_length": 27.4170212766, "max_line_length": 119, "alphanum_fraction": 0.7147291634, "include": true, "reason": "from numpy", "num_tokens": 2013}
|
Describe Users/BeachBabe here.
20090212 13:04:54 nbsp Howdy! It finally looks like its been toned down enough that the advert flag can be taken off. The use of phrases like best in merchandise and prices and so on sounded like it was written by the owner. I put the other flag, for a photo request, back up, since its much preferred to have a picture of the actual store over a logo. For more info about how things are generally done around here, I would advise that you read the Welcome to the Wiki. Users/JoePomidor
20090212 13:08:25 nbsp Gotcha. Maybe next time im in...ill let the staff know to put up some pics!:)Thanks for your help. Take care Users/BeachBabe
|
{"hexsha": "e0bed39f4c643b860bda8322389036d58f5ea6ee", "size": 670, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/BeachBabe.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/BeachBabe.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/BeachBabe.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 95.7142857143, "max_line_length": 487, "alphanum_fraction": 0.7805970149, "num_tokens": 171}
|
#pragma once
#include <boost/filesystem.hpp>
namespace configure
{
class TemporaryDirectory
{
private:
boost::filesystem::path _dir;
public:
TemporaryDirectory();
~TemporaryDirectory();
public:
boost::filesystem::path const& path() const;
};
}
|
{"hexsha": "98b9669bdfd7c2a0e270a364429114f68cef9017", "size": 262, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/configure/TemporaryDirectory.hpp", "max_stars_repo_name": "hotgloupi/configure", "max_stars_repo_head_hexsha": "888cf725c93df5a1cf01794cc0a581586a82855c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2015-11-13T10:37:35.000Z", "max_stars_repo_stars_event_max_datetime": "2015-11-13T10:37:35.000Z", "max_issues_repo_path": "src/configure/TemporaryDirectory.hpp", "max_issues_repo_name": "hotgloupi/configure", "max_issues_repo_head_hexsha": "888cf725c93df5a1cf01794cc0a581586a82855c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 19.0, "max_issues_repo_issues_event_min_datetime": "2015-02-10T17:18:58.000Z", "max_issues_repo_issues_event_max_datetime": "2015-07-11T11:31:08.000Z", "max_forks_repo_path": "src/configure/TemporaryDirectory.hpp", "max_forks_repo_name": "hotgloupi/configure", "max_forks_repo_head_hexsha": "888cf725c93df5a1cf01794cc0a581586a82855c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.1, "max_line_length": 46, "alphanum_fraction": 0.713740458, "num_tokens": 61}
|
\documentclass{ut-thesis}
\usepackage{amsmath}
\usepackage[mathletters]{ucs}
\usepackage[utf8x]{inputenc}
\usepackage{array}
\usepackage[normalem]{ulem}
\newcommand{\textsubscr}[1]{\ensuremath{_{\scriptsize\textrm{#1}}}}
\usepackage[breaklinks=true,linktocpage,colorlinks]{hyperref}
\usepackage{url}
\usepackage{graphicx}
$if(numbersections)$
$else$
\setcounter{section}{1}
\setcounter{secnumdepth}{0}
\setcounter{tocdepth}{2}
$endif$
%\VerbatimFootnotes % allows verbatim text in footnotes
$for(header-includes)$
$header-includes$
$endfor$
$if(date)$
\date{$date$}
$endif$
\degree{PhD}
\department{Stuff}
\gradyear{2021}
\author{Your Name}
\title{The Grand Unified Theory of Stuff}
\begin{document}
%% *** NOTE ***
%% You should put all of your `\newcommand', `\newenvironment', and
%% `\newtheorem's (in other words, all the global definitions that
%% you will need throughout your thesis) in a separate file and use
%% "\input{filename}" to input it here.
%% This sets the page style and numbering for preliminary sections.
\begin{preliminary}
%% This generates the title page from the information given above.
\maketitle
%% There should be NOTHING between the title page and abstract.
%% This generates the abstract page, with the line spacing adjusted
%% according to SGS guidelines.
\begin{abstract}
%% *** Put your Abstract here. ***
%% (At most 150 words for M.Sc. or 350 words for Ph.D.)
This is the abstract.
\end{abstract}
%% Anything placed between the abstract and table of contents will
%% appear on a separate page since the abstract ends with \newpage
%% and the table of contents starts with \clearpage.
%% This generates a "dedication" section, if needed.
%% (uncomment to have it appear in the document)
begin{dedication}
To \emph{academia}.
\end{dedication}
%% The `dedication' and `acknowledgements' sections do not create new
%% pages so if you want the two sections to appear on separate pages,
%% you should put an explicit \newpage between them.
%% This generates an "acknowledgements" section, if needed.
%% (uncomment to have it appear in the document)
\begin{acknowledgements}
Thank you, the reader.
\end{acknowledgements}
%% This generates the Table of Contents (on a separate page).
\tableofcontents
%% This generates the List of Tables (on a separate page), if needed.
%% (uncomment to have it appear in the document)
\listoftables
%% This generates the List of Figures (on a separate page), if needed.
%% (uncomment to have it appear in the document)
\listoffigures
%% End of the preliminary sections: reset page style and numbering.
\end{preliminary}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Put your Chapters here; the easiest way to do this is to keep %%
%% each chapter in a separate file and `\include' all the files %%
%% right here. Note that each chapter file should start with the %%
%% line "\chapter{ChapterName}". Note that using `\include' %%
%% instead of `\input' makes each chapter start on a new page. %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% *** Include chapter files here. ***
$body$
%% This adds a line for the Bibliography in the Table of Contents.
%\addcontentsline{toc}{chapter}{Bibliography}
%% *** Set the bibliography style. ***
%% (change according to your preference)
% \bibliographystyle{plain}
%% *** Set the bibliography file. ***
%% ("thesis.bib" by default; change if needed)
% \bibliography{thesis}
%% *** NOTE ***
%% If you don't use bibliography files, comment out the previous line
%% and use \begin{thebibliography}...\end{thebibliography}. (In that
%% case, you should probably put the bibliography in a separate file
%% and `\include' or `\input' it here).
\end{document}
|
{"hexsha": "a12a1d9bf08470d1d978f2b3a0d0a0f2a503a8bc", "size": 3786, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "report/default.tex", "max_stars_repo_name": "blorente/Starcraft-II-Replay-Analysis", "max_stars_repo_head_hexsha": "6fc195aae83ee89c1e6c7732782d6c2fb1905e6f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "report/default.tex", "max_issues_repo_name": "blorente/Starcraft-II-Replay-Analysis", "max_issues_repo_head_hexsha": "6fc195aae83ee89c1e6c7732782d6c2fb1905e6f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "report/default.tex", "max_forks_repo_name": "blorente/Starcraft-II-Replay-Analysis", "max_forks_repo_head_hexsha": "6fc195aae83ee89c1e6c7732782d6c2fb1905e6f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2892561983, "max_line_length": 70, "alphanum_fraction": 0.6975699947, "num_tokens": 955}
|
[STATEMENT]
lemma transitionE:
fixes P :: pi
and \<alpha> :: freeRes
and P' :: pi
and P'' :: pi
and a :: name
and u :: name
and x :: name
shows "P \<Longrightarrow>\<^sub>l\<alpha> \<prec> P' \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P'' \<and> P'' \<longmapsto>\<alpha> \<prec> P''' \<and> P''' \<Longrightarrow>\<^sub>\<tau> P'" (is "_ \<Longrightarrow> ?thesis1")
and "\<lbrakk>P \<Longrightarrow>\<^sub>la<\<nu>x> \<prec> P'; x \<sharp> P\<rbrakk> \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto>a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P'"
and "\<lbrakk>P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P'\<rbrakk> \<Longrightarrow> \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto>a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (P \<Longrightarrow>\<^sub>l \<alpha> \<prec> P' \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P'' \<and> P'' \<longmapsto> \<alpha> \<prec> P''' \<and> P''' \<Longrightarrow>\<^sub>\<tau> P') &&& (\<lbrakk>P \<Longrightarrow>\<^sub>l a<\<nu>x> \<prec> P'; x \<sharp> P\<rbrakk> \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P') &&& (P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P' \<Longrightarrow> \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P')
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. P \<Longrightarrow>\<^sub>l \<alpha> \<prec> P' \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P'' \<and> P'' \<longmapsto> \<alpha> \<prec> P''' \<and> P''' \<Longrightarrow>\<^sub>\<tau> P'
2. \<lbrakk>P \<Longrightarrow>\<^sub>l a<\<nu>x> \<prec> P'; x \<sharp> P\<rbrakk> \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P'
3. P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P' \<Longrightarrow> \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
assume "P \<Longrightarrow>\<^sub>l\<alpha> \<prec> P'"
[PROOF STATE]
proof (state)
this:
P \<Longrightarrow>\<^sub>l \<alpha> \<prec> P'
goal (3 subgoals):
1. P \<Longrightarrow>\<^sub>l \<alpha> \<prec> P' \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P'' \<and> P'' \<longmapsto> \<alpha> \<prec> P''' \<and> P''' \<Longrightarrow>\<^sub>\<tau> P'
2. \<lbrakk>P \<Longrightarrow>\<^sub>l a<\<nu>x> \<prec> P'; x \<sharp> P\<rbrakk> \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P'
3. P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P' \<Longrightarrow> \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
thus ?thesis1
[PROOF STATE]
proof (prove)
using this:
P \<Longrightarrow>\<^sub>l \<alpha> \<prec> P'
goal (1 subgoal):
1. \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P'' \<and> P'' \<longmapsto> \<alpha> \<prec> P''' \<and> P''' \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
by(auto simp add: transition_def residual.inject)
[PROOF STATE]
proof (state)
this:
\<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P'' \<and> P'' \<longmapsto> \<alpha> \<prec> P''' \<and> P''' \<Longrightarrow>\<^sub>\<tau> P'
goal (2 subgoals):
1. \<lbrakk>P \<Longrightarrow>\<^sub>l a<\<nu>x> \<prec> P'; x \<sharp> P\<rbrakk> \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P'
2. P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P' \<Longrightarrow> \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>P \<Longrightarrow>\<^sub>l a<\<nu>x> \<prec> P'; x \<sharp> P\<rbrakk> \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P'
2. P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P' \<Longrightarrow> \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
assume "P \<Longrightarrow>\<^sub>la<\<nu>x> \<prec> P'" and "x \<sharp> P"
[PROOF STATE]
proof (state)
this:
P \<Longrightarrow>\<^sub>l a<\<nu>x> \<prec> P'
x \<sharp> P
goal (2 subgoals):
1. \<lbrakk>P \<Longrightarrow>\<^sub>l a<\<nu>x> \<prec> P'; x \<sharp> P\<rbrakk> \<Longrightarrow> \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P'
2. P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P' \<Longrightarrow> \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
thus "\<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto>a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P'"
[PROOF STATE]
proof (prove)
using this:
P \<Longrightarrow>\<^sub>l a<\<nu>x> \<prec> P'
x \<sharp> P
goal (1 subgoal):
1. \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
using [[hypsubst_thin = true]]
[PROOF STATE]
proof (prove)
using this:
P \<Longrightarrow>\<^sub>l a<\<nu>x> \<prec> P'
x \<sharp> P
goal (1 subgoal):
1. \<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
apply(auto simp add: transition_def residualInject name_abs_eq)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> \<exists>P'' P'''a. (P, P'''a) \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>* \<and> P'''a \<longmapsto> a<\<nu>x> \<prec> P'' \<and> (P'', [(y, x)] \<bullet> P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*
[PROOF STEP]
apply(rule_tac x="[(x, y)] \<bullet> P''" in exI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> \<exists>P'''a. (P, P'''a) \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>* \<and> P'''a \<longmapsto> a<\<nu>x> \<prec> [(x, y)] \<bullet> P'' \<and> ([(x, y)] \<bullet> P'', [(y, x)] \<bullet> P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*
[PROOF STEP]
apply(rule_tac x=P' in exI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>* \<and> P' \<longmapsto> a<\<nu>x> \<prec> [(x, y)] \<bullet> P'' \<and> ([(x, y)] \<bullet> P'', [(y, x)] \<bullet> P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*
[PROOF STEP]
apply(clarsimp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> P' \<longmapsto> a<\<nu>x> \<prec> [(y, x)] \<bullet> P'' \<and> ([(y, x)] \<bullet> P'', [(y, x)] \<bullet> P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*
[PROOF STEP]
apply(auto)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> P' \<longmapsto> a<\<nu>x> \<prec> [(y, x)] \<bullet> P''
2. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> ([(y, x)] \<bullet> P'', [(y, x)] \<bullet> P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*
[PROOF STEP]
apply(subgoal_tac "x \<sharp> P''")
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''; x \<sharp> P''\<rbrakk> \<Longrightarrow> P' \<longmapsto> a<\<nu>x> \<prec> [(y, x)] \<bullet> P''
2. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> x \<sharp> P''
3. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> ([(y, x)] \<bullet> P'', [(y, x)] \<bullet> P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*
[PROOF STEP]
apply(simp add: alphaBoundResidual name_swap)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> x \<sharp> P''
2. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> ([(y, x)] \<bullet> P'', [(y, x)] \<bullet> P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*
[PROOF STEP]
using freshChain
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?P \<Longrightarrow>\<^sub>\<tau> ?P'; ?x \<sharp> ?P\<rbrakk> \<Longrightarrow> ?x \<sharp> ?P'
goal (2 subgoals):
1. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> x \<sharp> P''
2. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> ([(y, x)] \<bullet> P'', [(y, x)] \<bullet> P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*
[PROOF STEP]
apply(force dest: freshBoundDerivative)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> ([(y, x)] \<bullet> P'', [(y, x)] \<bullet> P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*
[PROOF STEP]
using eqvtChainI
[PROOF STATE]
proof (prove)
using this:
?P \<Longrightarrow>\<^sub>\<tau> ?P' \<Longrightarrow> ?perm \<bullet> ?P \<Longrightarrow>\<^sub>\<tau> ?perm \<bullet> ?P'
goal (1 subgoal):
1. \<And>P' y P'' P'''. \<lbrakk>x \<sharp> P; (P, P') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; P' \<longmapsto> a<\<nu>y> \<prec> P''; (P'', P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*; x \<noteq> y; x \<sharp> P'''\<rbrakk> \<Longrightarrow> ([(y, x)] \<bullet> P'', [(y, x)] \<bullet> P''') \<in> {(P, P'). P \<longmapsto> \<tau> \<prec> P'}\<^sup>*
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<exists>P'' P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<\<nu>x> \<prec> P'' \<and> P'' \<Longrightarrow>\<^sub>\<tau> P'
goal (1 subgoal):
1. P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P' \<Longrightarrow> \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P' \<Longrightarrow> \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
assume PTrans: "P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P'"
[PROOF STATE]
proof (state)
this:
P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P'
goal (1 subgoal):
1. P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P' \<Longrightarrow> \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
thus "\<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'"
[PROOF STATE]
proof (prove)
using this:
P \<Longrightarrow>\<^sub>lu in P''\<rightarrow>a<x> \<prec> P'
goal (1 subgoal):
1. \<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'
[PROOF STEP]
by(auto simp add: inputTransition_def)
[PROOF STATE]
proof (state)
this:
\<exists>P'''. P \<Longrightarrow>\<^sub>\<tau> P''' \<and> P''' \<longmapsto> a<x> \<prec> P'' \<and> P''[x::=u] \<Longrightarrow>\<^sub>\<tau> P'
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6432, "file": "Pi_Calculus_Weak_Late_Step_Semantics", "length": 24}
|
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
#include <boost/test/unit_test.hpp>
#include "Common/boost-taef.h"
#include "RouterTestHelper.h"
using namespace Transport;
using namespace Common;
using namespace Federation;
using namespace std;
namespace Naming
{
namespace TestHelper
{
RouteAsyncOperation::RouteAsyncOperation(
MessageUPtr && request,
__in INamingStoreServiceRouter & messageProcessor,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & root)
: AsyncOperation(callback, root)
, request_(std::move(request))
, messageProcessor_(messageProcessor)
, timeout_(timeout)
{
messageId_ = request_->MessageId;
}
ErrorCode RouteAsyncOperation::End(AsyncOperationSPtr const & asyncOperation, MessageUPtr & result)
{
auto casted = AsyncOperation::End<RouteAsyncOperation>(asyncOperation);
swap(casted->reply_, result);
return casted->Error;
}
void RouteAsyncOperation::OnStart(AsyncOperationSPtr const & thisSPtr)
{
messageProcessor_.BeginProcessRequest(
std::move(request_),
timeout_,
[&](AsyncOperationSPtr const & operation) { OnProcessRequestComplete(operation); },
thisSPtr);
}
void RouteAsyncOperation::OnProcessRequestComplete(AsyncOperationSPtr const & operation)
{
auto error = messageProcessor_.EndProcessRequest(operation, reply_);
this->TryComplete(operation->Parent, error);
}
//
// *** RouterTestHelper
//
RouterTestHelper::RouterTestHelper()
: lock_()
, pendingRequests_()
, serviceNodes_()
, blocking_()
, blockAll_(false)
, closed_(false)
, pendingOperationsEmptyEvent_()
{
}
void RouterTestHelper::Initialize(__in INamingStoreServiceRouter & messageProcessor, NodeId predNodeId, NodeId succNodeId)
{
NodeId thisNodeId = messageProcessor.Id;
NodeIdRange range(
(thisNodeId == predNodeId && thisNodeId == succNodeId)
? NodeIdRange::Full
: NodeIdRange(thisNodeId.GetPredMidPoint(predNodeId), thisNodeId.GetSuccMidPoint(succNodeId)));
Trace.WriteNoise(Constants::TestSource, "{0} owns range {1}", thisNodeId, range);
AcquireExclusiveLock lock(lock_);
auto iter = find_if(serviceNodes_.begin(), serviceNodes_.end(),
[&](ServiceNodeEntry const & entry) -> bool { return entry.MessageProcessor.Id == thisNodeId; });
CODING_ERROR_ASSERT(iter == serviceNodes_.end());
serviceNodes_.push_back(ServiceNodeEntry(range, messageProcessor));
}
ErrorCode RouterTestHelper::Close()
{
// Mark that the object is closed, so no more operations can be started
{
AcquireExclusiveLock lock(lock_);
closed_ = true;
if (pendingRequests_.empty())
{
return ErrorCode(ErrorCodeValue::Success);
}
}
// Wait for all operations to finish
bool operationsFinished = pendingOperationsEmptyEvent_.WaitOne(10000);
if (operationsFinished)
{
return ErrorCode(ErrorCodeValue::Success);
}
else
{
return ErrorCode(ErrorCodeValue::Timeout);
}
}
void RouterTestHelper::BlockNextRequest(NodeId const receiver)
{
AcquireExclusiveLock lock(lock_);
blocking_.push_back(receiver);
}
AsyncOperationSPtr RouterTestHelper::OnBeginRoute(
MessageUPtr && message,
NodeId nodeId,
uint64 instance,
std::wstring const & ringName,
bool useExactRouting,
TimeSpan retryTimeout,
TimeSpan timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
{
return BeginRouteRequest(std::move(message), nodeId, instance, ringName, useExactRouting, retryTimeout, timeout, callback, parent);
}
ErrorCode RouterTestHelper::EndRoute(AsyncOperationSPtr const & operation)
{
MessageUPtr blankReply;
return EndRouteRequest(operation, blankReply);
}
void RouterTestHelper::BlockAllRequests()
{
AcquireExclusiveLock lock(lock_);
blockAll_ = true;
}
void RouterTestHelper::UnblockAllRequests()
{
AcquireExclusiveLock lock(lock_);
blockAll_ = false;
}
AsyncOperationSPtr RouterTestHelper::OnBeginRouteRequest(
MessageUPtr && request,
NodeId nodeId,
uint64,
wstring const &,
bool,
TimeSpan,
TimeSpan timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
{
// Routing destination is retrieved based on node ID coverage
INamingStoreServiceRouter * messageProcessor = NULL;
bool shouldRoute = false;
AsyncOperationSPtr routeOperation;
{
AcquireExclusiveLock lock(lock_);
if (closed_)
{
Trace.WriteNoise(Constants::TestSource, "Blocking request from {0}: object closed", nodeId);
routeOperation = make_shared<CompletedAsyncOperation>(
ErrorCode(ErrorCodeValue::ObjectClosed),
callback,
parent);
}
else
{
auto iter = find_if(serviceNodes_.begin(), serviceNodes_.end(),
[&](ServiceNodeEntry const & entry) -> bool { return entry.Range.Contains(nodeId); });
VERIFY_IS_TRUE(iter != serviceNodes_.end());
messageProcessor = &(iter->MessageProcessor);
if (find(blocking_.begin(), blocking_.end(), nodeId) == blocking_.end())
{
shouldRoute = true;
}
else
{
blocking_.erase(find(blocking_.begin(), blocking_.end(), nodeId));
shouldRoute = false;
}
if (shouldRoute && !blockAll_)
{
request->Headers.Add(MessageIdHeader());
request->Headers.Add(ExpectsReplyHeader(true));
auto messageId = request->MessageId;
Trace.WriteNoise(Constants::TestSource, "Routing request {0} for {1} to {2}", *request, nodeId, messageProcessor->Id);
routeOperation = make_shared<RouteAsyncOperation>(
std::move(request),
*messageProcessor,
timeout,
callback,
parent);
pendingRequests_[messageId] = routeOperation;
}
else
{
Trace.WriteNoise(Constants::TestSource, "Blocking request for {0} to {1}", nodeId, messageProcessor->Id);
routeOperation = make_shared<CompletedAsyncOperation>(
ErrorCode(ErrorCodeValue::Timeout),
callback,
parent);
}
}
}
routeOperation->Start(routeOperation);
return routeOperation;
}
ErrorCode RouterTestHelper::EndRouteRequest(AsyncOperationSPtr const & asyncOperation, MessageUPtr & reply) const
{
ErrorCode error;
if (asyncOperation->Error.IsError(ErrorCodeValue::Timeout) || asyncOperation->Error.IsError(ErrorCodeValue::ObjectClosed))
{
// operation was blocked by RouterTestHelper
error = CompletedAsyncOperation::End(asyncOperation);
}
else
{
error = RouteAsyncOperation::End(asyncOperation, reply);
{
AcquireExclusiveLock lock(lock_);
auto iter = pendingRequests_.find(AsyncOperation::Get<RouteAsyncOperation>(asyncOperation)->MessageId);
VERIFY_IS_TRUE(iter != pendingRequests_.end());
pendingRequests_.erase(iter);
if (closed_ && pendingRequests_.empty())
{
pendingOperationsEmptyEvent_.Set();
}
}
}
return error;
}
}
}
|
{"hexsha": "e340bfefd03dc2b3241193f9a2a1ba13252f7a96", "size": 9563, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/prod/src/Naming/RouterTestHelper.cpp", "max_stars_repo_name": "AnthonyM/service-fabric", "max_stars_repo_head_hexsha": "c396ea918714ea52eab9c94fd62e018cc2e09a68", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2542.0, "max_stars_repo_stars_event_min_datetime": "2018-03-14T21:56:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-06T01:18:20.000Z", "max_issues_repo_path": "src/prod/src/Naming/RouterTestHelper.cpp", "max_issues_repo_name": "AnthonyM/service-fabric", "max_issues_repo_head_hexsha": "c396ea918714ea52eab9c94fd62e018cc2e09a68", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 994.0, "max_issues_repo_issues_event_min_datetime": "2019-05-07T02:39:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:23:04.000Z", "max_forks_repo_path": "src/prod/src/Naming/RouterTestHelper.cpp", "max_forks_repo_name": "AnthonyM/service-fabric", "max_forks_repo_head_hexsha": "c396ea918714ea52eab9c94fd62e018cc2e09a68", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 300.0, "max_forks_repo_forks_event_min_datetime": "2018-03-14T21:57:17.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-06T20:07:00.000Z", "avg_line_length": 36.2234848485, "max_line_length": 143, "alphanum_fraction": 0.5265084179, "num_tokens": 1662}
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import re
import string
import random
import time
from datetime import datetime
from pathlib import Path
from typing import Optional
import sagemaker
from time import perf_counter
from contextlib import contextmanager
from syne_tune.constants import SYNE_TUNE_FOLDER
class RegularCallback:
def __init__(self, callback, call_seconds_frequency: float):
"""
Allows to call the callback function at most once every `call_seconds_frequency` seconds.
:param callback:
:param call_seconds_frequency:
"""
self.start = datetime.now()
self.frequency = call_seconds_frequency
self.callback = callback
def __call__(self, *args, **kwargs):
seconds_since_last_call = (datetime.now() - self.start).seconds
if seconds_since_last_call > self.frequency:
self.start = datetime.now()
self.callback(*args, **kwargs)
def experiment_path(
tuner_name: Optional[str] = None, local_path: Optional[str] = None
) -> Path:
f"""
:param tuner_name: name of a tuning experiment
:param local_path: local path where results should be saved when running
locally outside of Sagemaker, if not specified, then
`~/{SYNE_TUNE_FOLDER}/` is used.
:return: path where to write logs and results for Syne Tune tuner.
On Sagemaker, results are written under "/opt/ml/checkpoints/" so that files are persisted
continuously by Sagemaker.
"""
is_sagemaker = "SM_MODEL_DIR" in os.environ
if is_sagemaker:
# if SM_MODEL_DIR is present in the environment variable, this means that we are running on Sagemaker
# we use this path to store results as it is persisted by Sagemaker.
sagemaker_path = Path("/opt/ml/checkpoints")
if tuner_name is not None:
sagemaker_path = sagemaker_path / tuner_name
return sagemaker_path
else:
# means we are running on a local machine, we store results in a local path
if local_path is None:
local_path = Path(f"~/{SYNE_TUNE_FOLDER}").expanduser()
else:
local_path = Path(local_path)
if tuner_name is not None:
local_path = local_path / tuner_name
return local_path
def s3_experiment_path(
s3_bucket: Optional[str] = None,
experiment_name: Optional[str] = None,
tuner_name: Optional[str] = None,
) -> str:
"""
Returns S3 path for storing results and checkpoints.
:param s3_bucket: If not given,, the default bucket for the SageMaker
session is used
:param experiment_name: If given, this is used as first directory
:param tuner_name: If given, this is used as second directory
:return: S3 path
"""
if s3_bucket is None:
s3_bucket = sagemaker.Session().default_bucket()
s3_path = f"s3://{s3_bucket}/{SYNE_TUNE_FOLDER}"
for part in (experiment_name, tuner_name):
if part is not None:
s3_path += "/" + part
return s3_path
def check_valid_sagemaker_name(name: str):
assert re.compile("^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$").match(
name
), f"{name} should consists in alpha-digits possibly separated by character -"
def name_from_base(base: Optional[str], default: str, max_length: int = 63) -> str:
"""Append a timestamp to the provided string.
This function assures that the total length of the resulting string is
not longer than the specified max length, trimming the input parameter if
necessary.
Args:
base (str): String used as prefix to generate the unique name.
default (str): String used in case base is None.
max_length (int): Maximum length for the resulting string (default: 63).
Returns:
str: Input parameter with appended timestamp.
"""
if base is None:
check_valid_sagemaker_name(default)
base = default
else:
check_valid_sagemaker_name(base)
moment = time.time()
moment_ms = repr(moment).split(".")[1][:3]
timestamp = time.strftime(
"%Y-%m-%d-%H-%M-%S-{}".format(moment_ms), time.gmtime(moment)
)
trimmed_base = base[: max_length - len(timestamp) - 1]
return "{}-{}".format(trimmed_base, timestamp)
def random_string(length: int) -> str:
pool = string.ascii_letters + string.digits
return "".join(random.choice(pool) for _ in range(length))
def repository_root_path() -> Path:
"""
:return: Returns path including `syne_tune`, `examples`,
`benchmarking`
"""
return Path(__file__).parent.parent
def script_checkpoint_example_path() -> Path:
"""
Util to get easily the name of an example file
:return:
"""
path = (
repository_root_path()
/ "examples"
/ "training_scripts"
/ "checkpoint_example"
/ "checkpoint_example.py"
)
assert path.exists()
return path
def script_height_example_path():
"""
Util to get easily the name of an example file
:return:
"""
path = (
repository_root_path()
/ "examples"
/ "training_scripts"
/ "height_example"
/ "train_height.py"
)
assert path.exists()
return path
@contextmanager
def catchtime(name: str) -> float:
start = perf_counter()
try:
print(f"start: {name}")
yield lambda: perf_counter() - start
finally:
print(f"Time for {name}: {perf_counter() - start:.4f} secs")
|
{"hexsha": "ab18d75b69dfde159b21206510e6a6d12e78d1b7", "size": 6047, "ext": "py", "lang": "Python", "max_stars_repo_path": "syne_tune/util.py", "max_stars_repo_name": "awslabs/syne-tune", "max_stars_repo_head_hexsha": "1dd8e157477b86db01047a9a7821780ea04389bc", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 97, "max_stars_repo_stars_event_min_datetime": "2021-11-18T17:14:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T00:33:12.000Z", "max_issues_repo_path": "syne_tune/util.py", "max_issues_repo_name": "awslabs/syne-tune", "max_issues_repo_head_hexsha": "1dd8e157477b86db01047a9a7821780ea04389bc", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 54, "max_issues_repo_issues_event_min_datetime": "2021-11-18T17:14:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T08:11:48.000Z", "max_forks_repo_path": "syne_tune/util.py", "max_forks_repo_name": "awslabs/syne-tune", "max_forks_repo_head_hexsha": "1dd8e157477b86db01047a9a7821780ea04389bc", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2021-11-29T11:47:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T15:28:11.000Z", "avg_line_length": 31.6596858639, "max_line_length": 109, "alphanum_fraction": 0.6646270878, "include": true, "reason": "import sage", "num_tokens": 1427}
|
subroutine zero2
!! ~ ~ ~ PURPOSE ~ ~ ~
!! this subroutine zeros all array values
use hru_module, only : clayld, &
hru,lagyld,ndeat,ovrlnd,par,sagyld,sanyld, &
sedyld,silyld,smx,snotmp,surf_bs,twash,wrt
implicit none
real :: cklsp ! |
real :: zdb !mm |division term from net pesticide equation
cklsp = 0.
ovrlnd = 0.
sedyld = 0.
sanyld = 0.
silyld = 0.
clayld = 0.
sagyld = 0.
lagyld = 0.
smx = 0.
snotmp = 0.
surf_bs = 0.
twash = 0.
wrt = 0.
zdb = 0.
return
end
|
{"hexsha": "097518925b0af9f962c25247b186194968eda2ad", "size": 666, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/zero2.f90", "max_stars_repo_name": "mikiec84/delphi", "max_stars_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-03-03T11:57:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T21:19:54.000Z", "max_issues_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/zero2.f90", "max_issues_repo_name": "mikiec84/delphi", "max_issues_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 385, "max_issues_repo_issues_event_min_datetime": "2018-02-21T16:52:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T07:44:56.000Z", "max_forks_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/zero2.f90", "max_forks_repo_name": "mikiec84/delphi", "max_forks_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2018-03-20T01:08:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-29T01:04:49.000Z", "avg_line_length": 19.5882352941, "max_line_length": 96, "alphanum_fraction": 0.4684684685, "num_tokens": 225}
|
# for python3
# qiML (quantum-inspired Machine Learning)
import numpy as np
def center_scale(A):
A_mean = np.mean(A)
A_std = np.std(A)
A_nrm = A
A_nrm -= A_mean
A_nrm /= A_std
return A_nrm, A_mean, A_std
'''
--------------------------
qiSVD
--------------------------
'''
def vec2strctData(vector):
# Computing prob
prob = vector**2
prob = prob/np.sum(prob)
# Computing depth(d)
dim = len(vector)
d = np.log2(dim)
d = int(np.ceil(d))
# Computing cumProb
cumProb=np.ones(2**d)
cumProb[0:len(prob)]=np.cumsum(prob)
output=dict()
output={'prob': prob,'cumProb':cumProb}
return output
def mat2strctData(A):
prob = A**2
prob = np.sum(prob, axis=1)
prob = prob/np.sum(prob)
# Computing depth(d)
dim = len(prob)
d = np.log2(dim)
d = int(np.ceil(d))
# Computing cumProb
cumProb=np.ones(2**d)
cumProb[0:len(prob)] = np.cumsum(prob)
nRow = A.shape[0]
rowInfo = []
for rowID in range(0,nRow):
tmpInfo = vec2strctData(A[rowID,:])
rowInfo.append(tmpInfo)
output = dict()
output = {'rawMat':A, 'prob':prob, 'cumProb':cumProb, 'infoOnEachRow':rowInfo}
return output
def samplingFromTree(cumProb,z):
# Computing d (depth)
dim = len(cumProb)
d = np.log2(dim)
d = int(np.ceil(d))
'''
Explore end node corresponding to current_z
---------------------------------------------------
I (threshold ID)
temporal_dimID (obtained end-node ID)
dimID (end-node IDs corresponding to z)
---------------------------------------------------
'''
dimID=list()
for current_z in z:
I = (2** (d-1)) - 1
temporal_dimID = 0
for index_d in range(d):
threshold=cumProb[I]
if current_z<threshold:
I = I - 2** (d-(index_d+2))
else:
temporal_dimID = temporal_dimID + 2** (d-index_d-1)
I = I + 2** (d-(index_d+2))
dimID.append(temporal_dimID)
return dimID
def delete_small_singlar_values(s):
minS = (10** (-10))
new_k_IDs = s[s > minS] # using only components of [S > minS]
new_k = len(new_k_IDs)
s = s[0:new_k]
return s, new_k
def qiSVD(A, k, p, compute_uv=True, returnParams=False, normalizeData=False):
'''
Parameters
----------
A : (M, N) array_like. Set Matrix to decompose.
k : int. The number of components.
p : int. The number of tree-sampling.
compute_uv : bool (default is True).
If True, U and Vh are computed in addition to s.
returnParam: bool (default is False).
If True, params (dimID and U) are returned
normalizeData : bool (default is False).
If True, input A is normalized.
Returns
-------
U : (M, K) ndarray. Matrix of the left singular vectors.
s : (1, K) ndarray. The singular values that are sorted in non-increasing order.
Vh: (K, N) ndarray. Matrix of the right singular vectors.
'''
# Nomalizing A if normalizeData is True
if normalizeData is True:
_, A_mean, _ = center_scale(A)
A -= A_mean
# Setting structure data of A
data = mat2strctData(A)
# Sampling for rowIDs
treeData = data['cumProb']
z = np.random.rand(p)
rowIDs = samplingFromTree(treeData, z)
# Sampling for columnIDs
rowInfo = data['infoOnEachRow']
columnIDs = []
for pi in range(0, p):
tmpID = np.random.randint(0, p)
tmpRowID = rowIDs[tmpID]
tmpRowData = rowInfo[tmpRowID]#this may be slow. can we combined the below line?
tmpTreeData = tmpRowData['cumProb']
tmpZ = np.random.rand(1)
tmpColumnID = samplingFromTree(tmpTreeData, tmpZ)
columnIDs.append(tmpColumnID[0])
# Setting p x p matrix
ppMt = []
rowProb = data['prob']#This may be slow.
probMt = []
for ri in rowIDs:
tmpRowX = data['rawMat'][ri, columnIDs]
tmpRowProb = rowInfo[ri]['prob'][columnIDs]
# Set matrices
ppMt.append(tmpRowX)
probMt.append(tmpRowProb)
# list to np.array
ppMt = np.array(ppMt)
probMt = np.array(probMt)
# Normalize ppMt
sqrtD = np.sqrt(rowProb[rowIDs])*np.sqrt(p)
sqrtF = np.sqrt(np.mean(probMt, axis=0))*np.sqrt(p)#In the code before 20190509, sqrt(p) is not multiplied.
ppMt = ppMt.transpose()/sqrtD
ppMt = ppMt.transpose()/sqrtF
# Computing conventional SVD
print('SVD ppMt')
u, s, _ = np.linalg.svd(ppMt)
# Error catch : k < p
if ppMt.shape[1] < k:
print('k should be less than p.')
# Obtaining the first k components
u = u[:,0:k]
s = s[0:k]
# Deleting the singular vectors corresponding to small singular values.
s, new_k = delete_small_singlar_values(s)
u = u[:, 0:new_k]
# Normalizing u
norm_u = u/s
norm_u = norm_u.transpose()/np.sqrt(p* rowProb[rowIDs])
norm_u = norm_u.transpose()
#scale = np.sqrt(p* rowProb[rowIDs])
# Orthonormalization: using Schmidt's
print('Orthomalization')
A = data['rawMat'][rowIDs, :]#This may be slow.
AA=np.dot(A, A.transpose())
U = np.zeros_like(norm_u)
u0 = norm_u[:,0]
errThres = 10**(-5)
U[:,0] = u0/np.sqrt(np.dot(np.dot(u0.T, AA), u0))
for index_k in range(1, new_k):
coef = np.dot(np.dot(U[:,range(index_k)].T, AA), norm_u[:,index_k])
U[:, index_k]=norm_u[:, index_k] - np.dot(U[:, range(index_k)], coef)
U[:, index_k]=U[:, index_k]/np.sqrt(np.dot(np.dot(U[:,index_k].T, AA), U[:,index_k]))
vvMat = np.dot(np.dot(U[:, range(index_k+1)].T, AA), U[:, range(index_k+1)])
eyeMat = np.eye(index_k+1)
err = np.sum((vvMat - eyeMat)**2)
# Error catch : error < errThres
if err > errThres:
U[:, index_k] = np.nan
# Removing nanInds
Uinds = list(range(1, new_k))
us = np.sum(U, axis=0)
rmUinds = np.where(np.isnan(us), 1, 0)
U = U[:, rmUinds==0]
# Computing Vh
Vh = np.dot(U.transpose(), A)
if returnParams:
svdParams = dict()
svdParams = {'dimID':rowIDs, 'U':U}
return svdParams
return U, s, Vh
'''
--------------------------
qiCCA
--------------------------
'''
def calcResultCCAfromV(X, Y, Vx, Vy, k, compute_wv=True):
C = np.dot(Vx, Vy.transpose())
U, _, V = np.linalg.svd(C) # U: nCSample(=k) x nCK(=k), V: nCK(=k) x nCDim(=k)
U = U[:, 0:k]
V = V[0:k, :]
if compute_wv is True:
# Estimating U, S
UxSx = np.dot(X[:, :], Vx.transpose()) # nDim x k_x
Sx = np.sqrt(np.sum(UxSx**2, axis=0)) # k_x
Ux = UxSx/Sx.transpose() # nDim x k_x
UySy = np.dot(Y[:, :], Vy.transpose())
Sy = np.sqrt(np.sum(UySy**2, axis=0))
Uy = UySy/Sy.transpose()
A = np.dot(Ux/Sx, U) #v* s-1*U
B = np.dot(Uy/Sy, V.transpose())
# Computing canonCompX,canonCompY using weight matrices (A,B)
canonCompX_usingA = np.dot(X[:, :].transpose(), A) # nSample x k
canonCompY_usingB = np.dot(Y[:, :].transpose(), B) # nSample x k
ccMat_usingAB = np.corrcoef(canonCompX_usingA.transpose(), canonCompY_usingB.transpose()) # k x k
# Computing canonCompX,canonCompY using only Vx, Vy
canonCompX = np.dot(Vx.transpose(), U) # nSample x k
canonCompY = np.dot(Vy.transpose(), V.transpose()) # nSample x k
if compute_wv is True:
out = {'canonCompX':canonCompX, 'canonCompY':canonCompY, 'A':A, 'B':B, 'U':U, 'V':V}
else:
out = {'canonCompX':canonCompX, 'canonCompY':canonCompY}
return out
def corrcoef(x_scores, y_scores):
'''
Parameters
----------
x_scores : (N, k) ndarray. Canonical component of training vectors.
y_scores : (N, k) ndarray. Canonical component of target vectors.
Returns
----------
rs : (k, ) ndarray. Correlation coefficient between x_scores and y_scores for each component.
'''
rMat = np.corrcoef(x_scores.T, y_scores.T)
k = int(rMat.shape[0]/ 2)
rs = np.diag(rMat[0:k, k:2*k])
return rs
class qiCCA():
def __init__(self, k_x=1000, k_y=1000, k=100, p=120, compute_wv=True, normalizeData=False):
self.k_x = k_x
self.k_y = k_y
self.k = k
self.p = p
self.compute_wv = compute_wv
self.normalizeData = normalizeData
def fit(self, X, Y):
'''
Parameters
----------
X : (N, D1) ndarray. Training vectors. D1 denotes the number of features.
Y : (N, D2) ndarray. Target vectors. D2 denotes the number of targets.
compute_wv : bool (default is False).
If True, weight vectors for X and Y are returned.
normalizeData : bool (default is False).
If True, input X and Y are normalized.
Returns
----------
x_scores : (N, k) ndarray. Canonical components for X.
y_scores : (N, k) ndarray. Canonical components for Y.
x_weights: (N, k_x) ndarray. Weight vectors for X to project into the shared space.
y_weights: (N, k_y) ndarray. Weight vectors for Y to project into the shared space.
self.dimID_x : (p, ) list. Sampled indices for X.
self.dimID_y : (p, ) list. Sampled indices for Y.
self.corrcoefs: (k, ) ndarray. Correlation coefficients between x_scores and y_scores.
'''
k_x = self.k_x
k_y = self.k_y
k = self.k
p = self.p
# Transpose X, Y
X = X.T
Y = Y.T
# Nomalizing X, Y
if self.normalizeData is True:
X, self.x_mean_, self.x_std_ = center_scale(X)
Y, self.y_mean_, self.y_std_ = center_scale(Y)
# Setting structure data of X, Y
data_x = mat2strctData(X)
data_y = mat2strctData(Y)
# Computing qiSVD
print('Performing qiSVD on X')
svdParams_x = qiSVD(X, k_x, p, returnParams=True)
print('Performing qiSVD on Y')
svdParams_y = qiSVD(Y, k_y, p, returnParams=True)
# Setting data
dimID_x = svdParams_x['dimID']
dimID_y = svdParams_y['dimID']
discriptionUx = svdParams_x['U']
discriptionUy = svdParams_y['U']
X = data_x['rawMat']
Y = data_y['rawMat']
newK = discriptionUx.shape[1]
# Reset nSample of X and Y
nOrgSample = data_x['rawMat'].shape[1]
# Computing Vx, Vy
Vx = np.dot(discriptionUx.transpose(), X[dimID_x, :]) # k_x x nSample
Vy = np.dot(discriptionUy.transpose(), Y[dimID_y, :]) # k_y x nSample
# Computing canonCompX,canonCompY using Vx, Vy
out = calcResultCCAfromV(X[dimID_x, :], Y[dimID_y, :], Vx, Vy, k, self.compute_wv)
if self.compute_wv is True:
from scipy.linalg import pinv2
self.x_weights = np.dot(discriptionUx, out['U'])
self.y_weights = np.dot(discriptionUy, out['V'].transpose())
# Results matrices
self.x_scores = out['canonCompX']
self.y_scores = out['canonCompY']
# Setting dimID_x, _y
self.dimID_x = dimID_x
self.dimID_y = dimID_y
# Computing corrcoef
self.corrcoefs = corrcoef(self.x_scores, self.y_scores)
return self
def transform(self, X, Y=None, copy=True):
'''
Parameters
----------
X : (N, D1) ndarray. Test vectors. D1 denotes the number of features.
Y : (N, D2) ndarray. Target vectors. D2 denotes the number of targets.
Returns
----------
x_scores : (N, k) ndarray. Canonical components for X.
y_scores : (N, k) ndarray. Canonical components for Y.
'''
# Transpose X, Y
X = X.T
Y = Y.T
# Normalizing X if self.normalizeData is True
if self.normalizeData is True:
X -= self.x_mean_
X /= self.x_std_
# Computing x_scores (and y_scores)
x_scores = np.dot(X[self.dimID_x, :].T, self.x_weights)
if Y is not None:
# Normalizing Y if self.normalizeData is True
if self.normalizeData is True:
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y[self.dimID_y, :].T, self.y_weights)
return x_scores, y_scores
return x_scores, y_scores
def fit_transform(self, X, Y):
'''
Parameters
----------
X : (N, D1) ndarray. Training vectors. D1 denotes the number of features.
Y : (N, D2) ndarray. Target vectors. D2 denotes the number of targets.
Returns
----------
x_scores : (N, k) ndarray. Canonical components for X.
y_scores : (N, k) ndarray. Canonical components for Y.
'''
return self.fit(X, Y).transform(X, Y)
|
{"hexsha": "810dd713bae3a541f308689c700fa4a39e41b567", "size": 13116, "ext": "py", "lang": "Python", "max_stars_repo_path": "qiML.py", "max_stars_repo_name": "nkmjm/qiML", "max_stars_repo_head_hexsha": "677811b0a877e66e8edbcd3fa99d9a9cf164b6f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-07-25T03:51:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-25T19:49:23.000Z", "max_issues_repo_path": "qiML.py", "max_issues_repo_name": "nkmjm/qiML", "max_issues_repo_head_hexsha": "677811b0a877e66e8edbcd3fa99d9a9cf164b6f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qiML.py", "max_forks_repo_name": "nkmjm/qiML", "max_forks_repo_head_hexsha": "677811b0a877e66e8edbcd3fa99d9a9cf164b6f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-03-25T08:11:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T07:34:09.000Z", "avg_line_length": 29.4080717489, "max_line_length": 112, "alphanum_fraction": 0.5567246112, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3761}
|
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0,".")
import unittest
import neuroml
import neuroml.writers as writers
import PyOpenWorm
from PyOpenWorm import *
import networkx
import rdflib
import rdflib as R
import pint as Q
import os
import subprocess as SP
import subprocess
import tempfile
import doctest
from glob import glob
USE_BINARY_DB = False
BINARY_DB = "OpenWormData/worm.db"
TEST_CONFIG = "tests/default_test.conf"
try:
import bsddb
has_bsddb = True
except ImportError:
has_bsddb = False
try:
import numpy
has_numpy = True
except ImportError:
has_numpy = False
namespaces = { "rdf" : "http://www.w3.org/1999/02/22-rdf-syntax-ns#" }
def clear_graph(graph):
graph.update("CLEAR ALL")
def make_graph(size=100):
""" Make an rdflib graph """
g = R.Graph()
for i in range(size):
s = rdflib.URIRef("http://somehost.com/s"+str(i))
p = rdflib.URIRef("http://somehost.com/p"+str(i))
o = rdflib.URIRef("http://somehost.com/o"+str(i))
g.add((s,p,o))
return g
def delete_zodb_data_store(path):
os.unlink(path)
os.unlink(path + '.index')
os.unlink(path + '.tmp')
os.unlink(path + '.lock')
# Tests for the Configure class, which provides functionality to modules to
# allow outside objects to parameterize their behavior
from ConfigureTest import ConfigureTest
# Integration tests that read from the database and ensure that basic queries
# have expected answers, as a way to keep data quality high.
from DataIntegrityTest import DataIntegrityTest
# Integration tests that ensure basic functioning of the database backend and
# connection
from DatabaseBackendTest import DatabaseBackendTest
# Runs the examples to make sure we didn't break the API for them.
from ExampleRunnerTest import ExampleRunnerTest
# Tests our Quantity class, which is used for defining things with measurement
# units
from QuantityTest import QuantityTest
# Tests RDFLib, our backend library that interfaces with the database as an
# RDF graph.
from RDFLibTest import RDFLibTest
class _DataTest(unittest.TestCase):
def delete_dir(self):
self.path = self.TestConfig['rdf.store_conf']
try:
if self.TestConfig['rdf.source'] == "Sleepycat":
subprocess.call("rm -rf "+self.path, shell=True)
elif self.TestConfig['rdf.source'] == "ZODB":
delete_zodb_data_store(self.path)
except OSError, e:
if e.errno == 2:
# The file may not exist and that's fine
pass
else:
raise e
def setUp(self):
# Set do_logging to True if you like walls of text
self.TestConfig = Configure.open(TEST_CONFIG)
td = '__tempdir__'
z = self.TestConfig['rdf.store_conf']
if z.startswith(td):
x = z[len(td):]
h=tempfile.mkdtemp()
self.TestConfig['rdf.store_conf'] = h + x
self.delete_dir()
PyOpenWorm.connect(conf=self.TestConfig, do_logging=False)
def tearDown(self):
PyOpenWorm.disconnect()
self.delete_dir()
@property
def config(self):
return PyOpenWorm.config()
class WormTest(_DataTest):
"""Test for Worm."""
def setUp(self):
_DataTest.setUp(self)
ns = self.config['rdf.namespace']
self.trips = [(ns['64'], ns['356'], ns['184']),
(ns['john'], R.RDF['type'], ns['Connection']),
(ns['john'], ns['Connection/pre'], ns['64']),
(ns['64'], R.RDFS['label'], R.Literal("PVCR")),
(ns['john'], ns['Connection/syntype'], ns['356']),
(ns['john'], ns['Connection/number'], R.Literal('1', datatype=R.XSD.integer)),
(ns['184'], R.RDFS['label'], R.Literal("AVAL")),
(ns['john'], ns['Connection/post'], ns['184']),
(ns['65'], ns['356'], ns['185']),
(ns['luke'], R.RDF['type'], ns['Connection']),
(ns['luke'], ns['Connection/pre'], ns['65']),
(ns['65'], R.RDFS['label'], R.Literal("PVCL")),
(ns['luke'], ns['Connection/syntype'], ns['356']),
(ns['luke'], ns['Connection/number'], R.Literal('1', datatype=R.XSD.integer)),
(ns['185'], R.RDFS['label'], R.Literal("AVAR")),
(ns['luke'], ns['Connection/post'], ns['185'])]
def test_get_network(self):
w = Worm()
w.neuron_network(Network())
w.save()
self.assertIsInstance(Worm().get_neuron_network(), Network)
def test_muscles1(self):
w = Worm()
w.muscle(Muscle(name='MDL08'))
w.muscle(Muscle(name='MDL15'))
w.save()
self.assertIn(Muscle(name='MDL08'), list(Worm().muscles()))
self.assertIn(Muscle(name='MDL15'), list(Worm().muscles()))
def test_get_semantic_net(self):
g0 = Worm().get_semantic_net()
self.assertTrue(isinstance(g0, rdflib.ConjunctiveGraph))
class CellTest(_DataTest):
def test_DataUser(self):
do = Cell('',conf=self.config)
self.assertTrue(isinstance(do,DataUser))
def test_lineageName(self):
""" Test that we can retrieve the lineage name """
c = Cell(name="ADAL",conf=self.config)
c.lineageName("AB plapaaaapp")
c.save()
self.assertEqual("AB plapaaaapp", Cell(name="ADAL").lineageName())
def test_same_name_same_id(self):
"""
Test that two Cell objects with the same name have the same identifier()
Saves us from having too many inserts of the same object.
"""
c = Cell(name="boots")
c1 = Cell(name="boots")
self.assertEqual(c.identifier(),c1.identifier())
def test_blast_space(self):
"""
Test that setting the lineage name gives the blast cell.
"""
c = Cell(name="carrots")
c.lineageName("a tahsuetoahusenoatu")
self.assertEqual(c.blast(), "a")
def test_blast_dot(self):
"""
Test that setting the lineage name gives the blast cell.
"""
c = Cell(name="peas")
c.lineageName("ab.tahsuetoahusenoatu")
self.assertEqual(c.blast(), "ab")
def test_parentOf(self):
"""
Test that we can get the children of a cell
Tests for anterior, posterior, left, right, ventral, dorsal divisions
"""
p = Cell(name="peas")
base = 'ab.tahsuetoahusenoat'
p.lineageName(base)
p.save()
c = ["carrots",
"jam",
"peanuts",
"celery",
"tuna",
"chicken"]
division_directions = "alvpdr"
for x,l in zip(c, division_directions):
ln = base + l
Cell(name=x,lineageName=ln).save()
names = set(str(x.name()) for x in p.parentOf())
self.assertEqual(set(c), names)
def test_daughterOf(self):
"""
Test that we can get the parent of a cell
"""
base = "ab.tahsuetoahusenoat"
child = base + "u"
p = Cell(name="peas")
p.lineageName(base)
p.save()
c = Cell(name="carrots")
c.lineageName(child)
c.save()
parent_p = c.daughterOf().name()
self.assertEqual("peas", parent_p)
@unittest.skip('Long runner')
def test_morphology_is_NeuroML_morphology(self):
""" Check that the morphology is the kind made by neuroml """
c = Cell(name="ADAR",conf=self.config)
# get the morph
m = c.morphology()
self.assertIsInstance(m, neuroml.Morphology)
@unittest.skip('Long runner')
def test_morphology_validates(self):
""" Check that we can generate a cell's file and have it validate """
# Load in raw morphology for ADAL
self.config['rdf.graph'].parse("tests/test_data/PVDR.nml.rdf.xml",format='trig')
n = Neuron(name='PVDR', conf=self.config)
doc = PyOpenWorm.NeuroML.generate(n,1)
writers.NeuroMLWriter.write(doc, "temp.nml")
from neuroml.utils import validate_neuroml2
f = sys.stdout
try:
sys.stdout = open(os.devnull, 'w')
except:
sys.stdout = f
try:
validate_neuroml2("temp.nml")
except Exception, e:
print e
self.fail("Should validate")
sys.stdout = f
class DataObjectTest(_DataTest):
def test_DataUser(self):
do = DataObject()
self.assertTrue(isinstance(do,PyOpenWorm.DataUser))
def test_identifier(self):
""" Test that we can set and return an identifier """
do = DataObject(ident="http://example.org")
self.assertEqual(do.identifier(), R.URIRef("http://example.org"))
@unittest.skip("Should be tracked by version control")
def test_uploader(self):
""" Make sure that we're marking a statement with it's uploader """
g = make_graph(20)
r = DataObject(triples=g,conf=self.config)
r.save()
u = r.uploader()
self.assertEqual(self.config['user.email'], u)
def test_object_from_id(self):
do = DataObject(ident="http://example.org")
g = do.object_from_id('http://openworm.org/entities/Neuron')
self.assertIsInstance(g,Neuron)
g = do.object_from_id('http://openworm.org/entities/Connection')
self.assertIsInstance(g,Connection)
@unittest.skip("Should be tracked by version control")
def test_upload_date(self):
""" Make sure that we're marking a statement with it's upload date """
g = make_graph(20)
r = DataObject(triples=g)
r.save()
u = r.upload_date()
self.assertIsNotNone(u)
class DataUserTest(_DataTest):
def test_init_no_config(self):
""" Should fail to initialize since it's lacking basic configuration """
c = Configureable.conf
Configureable.conf = False
with self.assertRaises(BadConf):
DataUser()
Configureable.conf = c
def test_init_no_config_with_default(self):
""" Should suceed if the default configuration is a Data object """
DataUser()
def test_init_False_with_default(self):
""" Should suceed if the default configuration is a Data object """
DataUser(conf=False)
def test_init_config_no_Data(self):
""" Should fail if given a non-Data configuration """
# XXX: This test touches some machinery in
# PyOpenWorm/__init__.py. Feel like it's a bad test
tmp = Configureable.conf
Configureable.conf = Configure()
with self.assertRaises(BadConf):
DataUser()
Configureable.conf = tmp
@unittest.skip("Should be tracked by version control")
def test_add_statements_has_uploader(self):
""" Assert that each statement has an uploader annotation """
g = R.Graph()
# Make a statement (triple)
s = rdflib.URIRef("http://somehost.com/s")
p = rdflib.URIRef("http://somehost.com/p")
o = rdflib.URIRef("http://somehost.com/o")
# Add it to an RDF graph
g.add((s,p,o))
# Make a datauser
du = DataUser(self.config)
try:
# Add all of the statements in the graph
du.add_statements(g)
except Exception, e:
self.fail("Should be able to add statements in the first place: "+str(e))
g0 = du.conf['rdf.graph']
# These are the properties that we should find
uploader_n3_uri = du.conf['rdf.namespace']['uploader'].n3()
upload_date_n3_uri = du.conf['rdf.namespace']['upload_date'].n3()
uploader_email = du.conf['user.email']
# This is the query to get uploader information
q = """
Select ?u ?t where
{
GRAPH ?g
{
<http://somehost.com/s>
<http://somehost.com/p>
<http://somehost.com/o> .
}
?g """+uploader_n3_uri+""" ?u.
?g """+upload_date_n3_uri+""" ?t.
} LIMIT 1
"""
for x in g0.query(q):
self.assertEqual(du.conf['user.email'],str(x['u']))
def test_add_statements_completes(self):
""" Test that we can upload lots of triples.
This is to address the problem from issue #31 on https://github.com/openworm/PyOpenWorm/issues
"""
g = rdflib.Graph()
for i in range(9000):
s = rdflib.URIRef("http://somehost.com/s%d" % i)
p = rdflib.URIRef("http://somehost.com/p%d" % i)
o = rdflib.URIRef("http://somehost.com/o%d" % i)
g.add((s,p,o))
du = DataUser(conf=self.config)
du.add_statements(g)
class NeuronTest(_DataTest):
def setUp(self):
_DataTest.setUp(self)
self.neur = lambda x : Neuron(name=x)
def test_Cell(self):
do = self.neur('BDUL')
self.assertTrue(isinstance(do,Cell))
def test_receptors(self):
n = self.neur('AVAL')
n.receptor('GLR-2')
n.save()
self.assertIn('GLR-2', list(self.neur('AVAL').receptors()))
def test_same_name_same_id(self):
"""
Test that two Neuron objects with the same name have the same identifier()
Saves us from having too many inserts of the same object.
"""
c = Neuron(name="boots")
c1 = Neuron(name="boots")
self.assertEqual(c.identifier(query=True),c1.identifier(query=True))
def test_type(self):
n = self.neur('AVAL')
n.type('interneuron')
n.save()
self.assertEqual('interneuron', self.neur('AVAL').type.one())
def test_name(self):
""" Test that the name property is set when the neuron is initialized with it """
self.assertEqual('AVAL', self.neur('AVAL').name())
self.assertEqual('AVAR', self.neur('AVAR').name())
def test_neighbor(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'))
neighbors = list(n.neighbor())
self.assertIn(self.neur('PVCL'), neighbors)
n.save()
self.assertIn(self.neur('PVCL'), list(self.neur('AVAL').neighbor()))
def test_init_from_lineage_name(self):
c = Neuron(lineageName="AB plapaaaap",name="ADAL")
c.save()
c = Neuron(lineageName="AB plapaaaap")
self.assertEqual(c.name(), 'ADAL')
class NetworkTest(_DataTest):
def setUp(s):
_DataTest.setUp(s)
s.net = Network(conf=s.config)
def test_aneuron(self):
self.assertTrue(isinstance(self.net.aneuron('AVAL'),PyOpenWorm.Neuron))
def test_neurons(self):
self.net.neuron(Neuron(name='AVAL'))
self.net.neuron(Neuron(name='DD5'))
self.assertTrue('AVAL' in self.net.neurons())
self.assertTrue('DD5' in self.net.neurons())
def test_synapses_rdf(self):
""" Check that synapses() returns connection objects """
for x in self.net.synapse():
self.assertIsInstance(x,Connection)
break
def test_as_networkx(self):
self.assertTrue(isinstance(self.net.as_networkx(),networkx.DiGraph))
class EvidenceTest(_DataTest):
@unittest.skip("Post alpha")
def test_bibtex_init(self):
bibtex = u"""@ARTICLE{Cesar2013,
author = {Jean César},
title = {An amazing title},
year = {2013},
month = jan,
volume = {12},
pages = {12--23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines...},
comments = {A comment},
keywords = {keyword1, keyword2},
}
"""
self.assertEqual(u"Jean César", next(Evidence(bibtex=bibtex).author()))
def test_pubmed_init1(self):
"""
A pubmed uri
"""
uri = "http://www.ncbi.nlm.nih.gov/pubmed/24098140?dopt=abstract"
self.assertIn(u"Frédéric MY", list(Evidence(pmid=uri).author()))
def test_pubmed_init2(self):
"""
A pubmed id
"""
pmid = "24098140"
self.assertIn(u"Frédéric MY", list(Evidence(pmid=pmid).author()))
def test_pubmed_multiple_authors_list(self):
"""
When multiple authors are on a paper, all of their names should be returned in an iterator. Publication order not necessarily preserved
"""
pmid = "24098140"
alist = [u"Frédéric MY","Lundin VF","Whiteside MD","Cueva JG","Tu DK","Kang SY","Singh H","Baillie DL","Hutter H","Goodman MB","Brinkman FS","Leroux MR"]
self.assertEqual(set(alist), set(Evidence(pmid=pmid).author()))
@unittest.skip("Fix later")
def test_doi_init_fail_on_request_prefix(self):
"""
Requesting only the prefix
"""
with self.assertRaises(EvidenceError):
Evidence(doi='http://dx.doi.org/10.1126')
@unittest.skip("Fix later")
def test_doi_init_fail_on_request_suffix(self):
"""
Requesting only the prefix
"""
with self.assertRaises(EvidenceError):
Evidence(doi='http://dx.doi.org/s00454-010-9273-0')
def test_wormbase_init(self):
""" Initialize with wormbase source """
# Wormbase lacks anything beyond the author,date format for a lot of papers
self.assertIn(u'Frederic et al., 2013', list(Evidence(wormbase="WBPaper00044287").author()))
def test_wormbase_year(self):
""" Just make sure we can extract something without crashing """
for i in range(600,610):
wbid = 'WBPaper00044' + str(i)
e = Evidence(wormbase=wbid)
e.year()
def test_asserts(self):
"""
Asserting something should allow us to get it back.
"""
e=Evidence(wormbase='WBPaper00044600')
g = make_graph(20)
r = Relationship(graph=g)
e.asserts(r)
r.identifier = lambda **args : r.make_identifier("test")
e.save()
l = list(e.asserts())
self.assertIn(r,l)
def test_asserts_query(self):
""" Show that we can store the evidence on an object and later retrieve it """
e = Evidence(author='tom@cn.com')
r = Relationship(make_graph(10))
e.asserts(r)
e.save()
e0 = Evidence()
e0.asserts(r)
s = list(e0.load())
author = s[0].author.one()
self.assertIn('tom@cn.com', author)
def test_asserts_query_multiple(self):
""" Show that setting the evidence with distinct objects yields
distinct results """
e = Evidence(author='tom@cn.com')
r = Relationship(make_graph(10))
e.asserts(r)
e.save()
e1 = Evidence(year=1999)
e1.asserts(r)
e1.save()
e0 = Evidence()
e0.asserts(r)
for x in e0.load():
a = x.author.one()
y = x.year()
# Testing that either a has a result tom@cn.com and y has nothing or
# y has a result 1999 and a has nothing
self.assertTrue(a == 'tom@cn.com' and int(y) == 1999)
def test_asserts_query_multiple_author_matches(self):
""" Show that setting the evidence with distinct objects yields
distinct results even if there are matching values """
e = Evidence(author='tom@cn.com')
r = Relationship(make_graph(10))
e.asserts(r)
e.save()
e1 = Evidence(author='tom@cn.com')
e1.asserts(r)
e1.save()
e0 = Evidence()
e0.asserts(r)
self.assertTrue(len(list(e0.load())) == 2)
class ConnectionTest(_DataTest):
def setUp(self):
_DataTest.setUp(self)
ns = self.config['rdf.namespace']
self.trips = [(ns['64'], ns['356'], ns['184']),
(ns['john'], R.RDF['type'], ns['Connection']),
(ns['john'], ns['Connection/pre'], ns['64']),
(ns['64'], R.RDFS['label'], R.Literal("PVCR")),
(ns['john'], ns['Connection/syntype'], ns['356']),
(ns['john'], ns['Connection/number'], R.Literal('1', datatype=R.XSD.integer)),
(ns['184'], R.RDFS['label'], R.Literal("AVAL")),
(ns['john'], ns['Connection/post'], ns['184']),
(ns['65'], ns['356'], ns['185']),
(ns['luke'], R.RDF['type'], ns['Connection']),
(ns['luke'], ns['Connection/pre'], ns['65']),
(ns['65'], R.RDFS['label'], R.Literal("PVCL")),
(ns['luke'], ns['Connection/syntype'], ns['356']),
(ns['luke'], ns['Connection/number'], R.Literal('1', datatype=R.XSD.integer)),
(ns['185'], R.RDFS['label'], R.Literal("AVAR")),
(ns['luke'], ns['Connection/post'], ns['185'])]
def test_init(self):
"""Initialization with positional parameters"""
c = Connection('AVAL','ADAR',3,'send','Serotonin')
self.assertIsInstance(c.pre_cell(), Neuron)
self.assertIsInstance(c.post_cell(), Neuron)
self.assertEqual(c.number(), 3)
self.assertEqual(c.syntype(), 'send')
self.assertEqual(c.synclass(), 'Serotonin')
def test_init_number_is_a_number(self):
with self.assertRaises(Exception):
Connection(1,2,"gazillion",4,5)
def test_init_with_neuron_objects(self):
n1 = Neuron(name="AVAL")
n2 = Neuron(name="PVCR")
try:
Connection(n1,n2)
except:
self.fail("Shouldn't fail on Connection init")
def test_load1(self):
""" Put the appropriate triples in. Try to load them """
g = R.Graph()
self.config['rdf.graph'] = g
for t in self.trips:
g.add(t)
c = Connection(conf=self.config)
for x in c.load():
self.assertIsInstance(x,Connection)
def test_load_with_filter(self):
# Put the appropriate triples in. Try to load them
g = R.Graph()
self.config['rdf.graph'] = g
for t in self.trips:
g.add(t)
c = Connection(pre_cell="PVCR", conf=self.config)
r = c.load()
for x in r:
self.assertIsInstance(x,Connection)
class MuscleTest(_DataTest):
def test_muscle(self):
self.assertTrue(isinstance(Muscle(name='MDL08'), Muscle))
def test_innervatedBy(self):
m = Muscle('MDL08')
n = Neuron('some neuron')
m.innervatedBy(n)
m.save()
v = Muscle(name='MDL08')
self.assertIn(Neuron('some neuron'), list(v.innervatedBy()))
def test_muscle_neurons(self):
""" Should be the same as innervatedBy """
m = Muscle(name='MDL08')
neu = Neuron(name="tnnetenba")
m.neurons(neu)
m.save()
m = Muscle(name='MDL08')
self.assertIn(Neuron('tnnetenba'), list(m.neurons()))
class PropertyTest(_DataTest):
def test_one(self):
""" `one` should return None if there isn't a value or just the value if there is one """
class T(Property):
def __init__(self):
Property.__init__(self)
self.b = False
def get(self):
if self.b:
yield "12"
t = T()
self.assertIsNone(t.one())
t.b=True
self.assertEqual('12', t.one())
class SimplePropertyTest(_DataTest):
def __init__(self,*args,**kwargs):
_DataTest.__init__(self,*args,**kwargs)
id_tests = []
# XXX: auto generate some of these tests...
def test_same_value_same_id_empty(self):
"""
Test that two SimpleProperty objects with the same name have the same identifier()
"""
do = DataObject(ident=R.URIRef("http://example.org"))
do1 = DataObject(ident=R.URIRef("http://example.org"))
c = DataObject.DatatypeProperty("boots", do)
c1 = DataObject.DatatypeProperty("boots", do1)
self.assertEqual(c.identifier(),c1.identifier())
def test_same_value_same_id_not_empty(self):
"""
Test that two SimpleProperty with the same name have the same identifier()
"""
do = DataObject(ident=R.URIRef("http://example.org"))
do1 = DataObject(ident=R.URIRef("http://example.org"))
c = DataObject.DatatypeProperty("boots", do)
c1 = DataObject.DatatypeProperty("boots", do1)
do.boots('partition')
do1.boots('partition')
self.assertEqual(c.identifier(),c1.identifier())
def test_same_value_same_id_not_empty_object_property(self):
"""
Test that two SimpleProperty with the same name have the same identifier()
"""
do = DataObject(ident=R.URIRef("http://example.org"))
do1 = DataObject(ident=R.URIRef("http://example.org"))
dz = DataObject(ident=R.URIRef("http://example.org/vip"))
dz1 = DataObject(ident=R.URIRef("http://example.org/vip"))
c = DataObject.ObjectProperty("boots", do)
c1 = DataObject.ObjectProperty("boots", do1)
do.boots(dz)
do1.boots(dz1)
self.assertEqual(c.identifier(),c1.identifier())
def test_diff_value_diff_id_not_empty(self):
"""
Test that two SimpleProperty with the same name have the same identifier()
"""
do = DataObject(ident=R.URIRef("http://example.org"))
do1 = DataObject(ident=R.URIRef("http://example.org"))
c = DataObject.DatatypeProperty("boots", do)
c1 = DataObject.DatatypeProperty("boots", do1)
do.boots('join')
do1.boots('partition')
self.assertNotEqual(c.identifier(),c1.identifier())
def test_diff_prop_same_name_same_object_same_value_same_id(self):
"""
Test that two SimpleProperty with the same name have the same identifier()
"""
# why would you ever do this?
do = DataObject(ident=R.URIRef("http://example.org"))
c = DataObject.DatatypeProperty("boots", do)
c1 = DataObject.DatatypeProperty("boots", do)
c('join')
c1('join')
self.assertEqual(c.identifier(),c1.identifier())
def test_diff_prop_same_name_same_object_diff_value_same_id(self):
"""
Test that two SimpleProperty with the same name have the same identifier()
"""
# why would you ever do this?
do = DataObject(ident=R.URIRef("http://example.org"))
c = DataObject.DatatypeProperty("boots", do)
c1 = DataObject.DatatypeProperty("boots", do)
c('partition')
c1('join')
self.assertNotEqual(c.identifier(),c1.identifier())
def test_diff_value_insert_order_same_id(self):
"""
Test that two SimpleProperty with the same name have the same identifier()
"""
do = DataObject(ident=R.URIRef("http://example.org"))
do1 = DataObject(ident=R.URIRef("http://example.org"))
c = DataObject.DatatypeProperty("boots", do)
c1 = DataObject.DatatypeProperty("boots", do1)
do.boots('join')
do.boots('simile')
do.boots('partition')
do1.boots('partition')
do1.boots('join')
do1.boots('simile')
self.assertEqual(c.identifier(),c1.identifier())
def test_diff_value_insert_order_same_id_object_property(self):
"""
Test that two SimpleProperty with the same name have the same identifier()
"""
do = DataObject(ident=R.URIRef("http://example.org"))
do1 = DataObject(ident=R.URIRef("http://example.org"))
oa = DataObject(ident=R.URIRef("http://example.org/a"))
ob = DataObject(ident=R.URIRef("http://example.org/b"))
oc = DataObject(ident=R.URIRef("http://example.org/c"))
c = DataObject.ObjectProperty("boots", do)
c1 = DataObject.ObjectProperty("boots", do1)
do.boots(oa)
do.boots(ob)
do.boots(oc)
do1.boots(oc)
do1.boots(oa)
do1.boots(ob)
self.assertEqual(c.identifier(),c1.identifier())
def test_triples_with_no_value(self):
""" Test that when there is no value set for a property, it still yields triples """
do = DataObject(ident=R.URIRef("http://example.org"))
class T(SimpleProperty):
property_type = 'DatatypeProperty'
linkName = 'test'
owner_type = DataObject
sp = T(owner=do)
self.assertEqual(len(list(sp.triples())), 0)
self.assertEqual(len(list(sp.triples(query=True))), 0)
class NeuroMLTest(_DataTest):
pass
# Tests from README.md
class DocumentationTest(unittest.TestCase):
def test_readme(self):
[failure_count, return_count] = doctest.testfile("../README.md")
self.assertEqual(failure_count, 0)
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-b", "--use-binary-database", dest="binary_db",
action="store_true", default=False,
help="Use the binary database for data integrity tests")
(options, args) = parser.parse_args()
USE_BINARY_DB = options.binary_db
def getTests(testCase):
return unittest.TestLoader().loadTestsFromTestCase(testCase)
def runTests(suite):
return unittest.TextTestRunner().run(suite)
all_tests = []
configs = glob("tests/test_*.conf")
if not has_bsddb:
configs = [x for x in configs if 'Sleepycat' not in x]
print "Testing with configs:",configs
for x in configs:
TEST_CONFIG = x
suite = unittest.TestSuite()
suite.addTests(getTests(x) for x in _DataTest.__subclasses__())
all_tests.append(suite)
suite = unittest.TestSuite()
classes = filter(lambda x : isinstance(x, type), globals().values())
non_DataTestTests = (x for x in classes if (issubclass(x, unittest.TestCase) and not issubclass(x, _DataTest)))
suite.addTests(getTests(x) for x in non_DataTestTests)
all_tests.append(suite)
all_tests_flattened = []
for x in all_tests:
for y in x:
for z in y:
all_tests_flattened.append(z)
suite = unittest.TestSuite()
if len(args) == 1:
suite.addTests(filter(lambda x: x.id().startswith("__main__."+args[0]), all_tests_flattened))
else:
suite.addTests(all_tests)
res = runTests(suite)
sys.exit(len(res.failures + res.errors)>0)
|
{"hexsha": "b7858693f4c61650069d977fd0f56a5df9abeb64", "size": 30480, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test.py", "max_stars_repo_name": "nheffelman/pyopdata", "max_stars_repo_head_hexsha": "5cc3042b004f167dbf18acc119474ea48b47810d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test.py", "max_issues_repo_name": "nheffelman/pyopdata", "max_issues_repo_head_hexsha": "5cc3042b004f167dbf18acc119474ea48b47810d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test.py", "max_forks_repo_name": "nheffelman/pyopdata", "max_forks_repo_head_hexsha": "5cc3042b004f167dbf18acc119474ea48b47810d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6363636364, "max_line_length": 161, "alphanum_fraction": 0.5934711286, "include": true, "reason": "import numpy,import networkx", "num_tokens": 7376}
|
/**
* @file run_set_visitor.hpp
* @author Saksham Bansal
*
* This file provides an abstraction for the Run() function for
* different layers and automatically directs any parameter to the right layer
* type.
*
* mlpack is free software; you may redistribute it and/or modify it under the
* terms of the 3-clause BSD license. You should have received a copy of the
* 3-clause BSD license along with mlpack. If not, see
* http://www.opensource.org/licenses/BSD-3-Clause for more information.
*/
#ifndef MLPACK_METHODS_ANN_VISITOR_RUN_SET_VISITOR_HPP
#define MLPACK_METHODS_ANN_VISITOR_RUN_SET_VISITOR_HPP
#include <mlpack/methods/ann/layer/layer_traits.hpp>
#include <boost/variant.hpp>
namespace mlpack {
namespace ann {
/**
* RunSetVisitor set the run parameter given the
* run value.
*/
class RunSetVisitor : public boost::static_visitor<void>
{
public:
//! Set the run parameter given the current run value.
RunSetVisitor(const bool run = true);
//! Set the run parameter.
template<typename LayerType>
void operator()(LayerType* layer) const;
void operator()(MoreTypes layer) const;
private:
//! The run parameter.
const bool run;
//! Set the run parameter if the module implements the
//! Run() and Model() function.
template<typename T>
typename std::enable_if<
HasRunCheck<T, bool&(T::*)(void)>::value &&
HasModelCheck<T>::value, void>::type
LayerRun(T* layer) const;
//! Set the run parameter if the module implements the
//! Model() function.
template<typename T>
typename std::enable_if<
!HasRunCheck<T, bool&(T::*)(void)>::value &&
HasModelCheck<T>::value, void>::type
LayerRun(T* layer) const;
//! Set the run parameter if the module implements the
//! Run() function.
template<typename T>
typename std::enable_if<
HasRunCheck<T, bool&(T::*)(void)>::value &&
!HasModelCheck<T>::value, void>::type
LayerRun(T* layer) const;
//! Do not set the run parameter if the module doesn't implement the
//! Run() or Model() function.
template<typename T>
typename std::enable_if<
!HasRunCheck<T, bool&(T::*)(void)>::value &&
!HasModelCheck<T>::value, void>::type
LayerRun(T* layer) const;
};
} // namespace ann
} // namespace mlpack
// Include implementation.
#include "run_set_visitor_impl.hpp"
#endif
|
{"hexsha": "a7aaa065af9e2a2536a981ae476d1406af3e2b2d", "size": 2339, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/mlpack/methods/ann/visitor/run_set_visitor.hpp", "max_stars_repo_name": "tomjpsun/mlpack", "max_stars_repo_head_hexsha": "39b9a852c58b648ddb9b87a3d87aa3db2bacbf0a", "max_stars_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-11-07T14:34:37.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-07T14:34:37.000Z", "max_issues_repo_path": "src/mlpack/methods/ann/visitor/run_set_visitor.hpp", "max_issues_repo_name": "tomjpsun/mlpack", "max_issues_repo_head_hexsha": "39b9a852c58b648ddb9b87a3d87aa3db2bacbf0a", "max_issues_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-04-10T17:39:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-11T14:56:25.000Z", "max_forks_repo_path": "src/mlpack/methods/ann/visitor/run_set_visitor.hpp", "max_forks_repo_name": "tomjpsun/mlpack", "max_forks_repo_head_hexsha": "39b9a852c58b648ddb9b87a3d87aa3db2bacbf0a", "max_forks_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8452380952, "max_line_length": 78, "alphanum_fraction": 0.6998717401, "num_tokens": 591}
|
"""Time-lagged independent component analysis-based CV"""
__all__ = ["TICA_CV"]
import numpy as np
import pandas as pd
import torch
from .tica import TICA
from ..models import LinearCV
from ..utils.data import find_time_lagged_configurations
class TICA_CV(LinearCV):
""" Linear TICA CV.
Attributes
----------
tica : mlcvs.tica.TICA
TICA-object.
"""
def __init__(self, n_features, **kwargs):
"""Create a Linear TICA CV
Parameters
----------
n_features : int
Number of input features
"""
super().__init__(n_features=n_features, **kwargs)
self.name_ = "tica_cv"
self.tica = TICA()
def fit(self, X, t = None, lag = 10, logweights = None):
"""Fit TICA given time-lagged data (and optional weights).
Parameters
----------
X : numpy array, pandas dataframe or torch.Tensor
Input data
t : numpy array, pandas dataframe or torch.Tensor, optional
Time array, by default None -> np.arange(0,len(X))
lag : int, optional
lag-time, by default 10
logweights: array, optional
logarithm of the weights of the configurations
See Also
--------
fit_predict : train and project along TICA components
"""
# if DataFrame save feature names
if type(X) == pd.DataFrame:
if 'time' in X.columns:
t = X['time'].values
X = X.drop(columns='time')
self.feature_names = X.columns.values
X = X.values #torch.Tensor(X.values).to(self.device_)
#elif type(X) != torch.Tensor:
# X = torch.Tensor(X).to(self.device_)
# time
if t is None:
t = np.arange(0,len(X))
# time
if type(t) == pd.DataFrame:
t = t.values
if len(X) != len(t):
raise ValueError(f'length of X is {len(X)} while length of t is {len(t)}')
# if weights are given, rescale the time before finding time lagged configurations
if logweights is not None:
# compute time increment in simulation time t
dt = np.round(t[1]-t[0],3)
# sanitize logweights
logweights = torch.Tensor(logweights)
logweights -= torch.max(logweights)
lognorm = torch.logsumexp(logweights,0)
logweights /= lognorm
# compute instantaneus time increment in rescaled time t'
d_tprime = torch.exp(logweights)*dt
# calculate cumulative time t'
tprime = torch.cumsum(d_tprime,0)
else:
tprime = t
# find time-lagged configurations
x_t, x_lag, w_t, w_lag = find_time_lagged_configurations(X,tprime,lag)
# compute mean-free variables
ave = self.tica.compute_average(x_t,w_t)
x_t.sub_(ave)
x_lag.sub_(ave)
# perform TICA
_, eigvecs = self.tica.compute_TICA(data = [x_t,x_lag],
weights = [w_t,w_lag],
save_params=True)
# save parameters for estimator
self.set_average(ave)
self.w = eigvecs
def fit_predict(self, X, t = None, lag = 10):
"""Train TICA CV and project data
Parameters
----------
X : numpy array, pandas dataframe or torch.Tensor
Input data
t : numpy array, pandas dataframe or torch.Tensor, optional
Time array, by default None -> np.arange(0,len(X))
lag : int, optional
lag-time, by default 10
logweights: array, optional
logarithm of the weights of the configurations
Returns
-------
torch.Tensor
projection of input data along TICA components
See Also
--------
fit : train TICA estimator
"""
self.fit(X, t, lag)
return self.forward(X)
def set_average(self, Mean, Range=None):
"""Save averages for computing mean-free inputs
Parameters
----------
Mean : torch.Tensor
Input means
Range : torch.Tensor, optional
Range of inputs, by default None
"""
if Range is None:
Range = torch.ones_like(Mean)
if hasattr(self,"MeanIn"):
self.MeanIn = Mean
self.RangeIn = Range
else:
self.register_buffer("MeanIn", Mean)
self.register_buffer("RangeIn", Range)
self.normIn = True
def set_regularization(self, cholesky_reg):
"""
Set regularization for cholesky decomposition.
Parameters
----------
cholesky_reg : float
Regularization value.
"""
self.tica.reg_cholesky = cholesky_reg
|
{"hexsha": "0bde1a5d5bb40f1bf4399da0a2066be5a0a1e490", "size": 4926, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlcvs/tica/linear_tica.py", "max_stars_repo_name": "luigibonati/mlcvs", "max_stars_repo_head_hexsha": "6567fb0774dc354f9cf3472dc356fdcf10aba6f2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-14T10:06:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T10:06:42.000Z", "max_issues_repo_path": "mlcvs/tica/linear_tica.py", "max_issues_repo_name": "luigibonati/mlcvs", "max_issues_repo_head_hexsha": "6567fb0774dc354f9cf3472dc356fdcf10aba6f2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-10-31T09:28:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T15:13:21.000Z", "max_forks_repo_path": "mlcvs/tica/linear_tica.py", "max_forks_repo_name": "luigibonati/mlcvs", "max_forks_repo_head_hexsha": "6567fb0774dc354f9cf3472dc356fdcf10aba6f2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3214285714, "max_line_length": 90, "alphanum_fraction": 0.5466910272, "include": true, "reason": "import numpy", "num_tokens": 1101}
|
{-# OPTIONS --without-K --safe #-}
module Categories.NaturalTransformation where
-- all the important stuff about NaturalTransformation are defined in .Core
open import Categories.NaturalTransformation.Core public
|
{"hexsha": "be992cc5eb48eb6dff59088d5d59effe7eb59ad5", "size": 216, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/Categories/NaturalTransformation.agda", "max_stars_repo_name": "Trebor-Huang/agda-categories", "max_stars_repo_head_hexsha": "d9e4f578b126313058d105c61707d8c8ae987fa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 279, "max_stars_repo_stars_event_min_datetime": "2019-06-01T14:36:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T00:40:14.000Z", "max_issues_repo_path": "src/Categories/NaturalTransformation.agda", "max_issues_repo_name": "Code-distancing/agda-categories", "max_issues_repo_head_hexsha": "d9e4f578b126313058d105c61707d8c8ae987fa8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 236, "max_issues_repo_issues_event_min_datetime": "2019-06-01T14:53:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T14:31:43.000Z", "max_forks_repo_path": "src/Categories/NaturalTransformation.agda", "max_forks_repo_name": "Code-distancing/agda-categories", "max_forks_repo_head_hexsha": "d9e4f578b126313058d105c61707d8c8ae987fa8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 64, "max_forks_repo_forks_event_min_datetime": "2019-06-02T16:58:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T02:00:59.000Z", "avg_line_length": 30.8571428571, "max_line_length": 75, "alphanum_fraction": 0.8055555556, "num_tokens": 38}
|
[STATEMENT]
lemma Raise_Subst':
assumes "t \<noteq> \<^bold>\<sharp>"
shows "\<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t) = Subst (p + n) v (Raise k p t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t) = Subst (p + n) v (Raise k p t)
[PROOF STEP]
using assms raise_plus
[PROOF STATE]
proof (prove)
using this:
t \<noteq> \<^bold>\<sharp>
?d \<le> ?n \<Longrightarrow> Raise 0 (?m + ?n) ?t = Raise ?d ?m (Raise 0 ?n ?t)
goal (1 subgoal):
1. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t) = Subst (p + n) v (Raise k p t)
[PROOF STEP]
apply (induct t arbitrary: v k n p, auto)
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>ta v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; ta \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v ta) = Subst (p + n) v (Raise k p ta); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise (Suc k) p (Subst (Suc n) v ta) = Subst (Suc (p + n)) v (Raise (Suc k) p ta)
2. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1)
3. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2)
4. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise (Suc k) p (Subst (Suc n) v t1) = Subst (Suc (p + n)) v (Raise (Suc k) p t1)
5. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2)
[PROOF STEP]
apply (metis Raise.simps(1) Subst_Nil Suc_le_mono add_Suc_right)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1)
2. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2)
3. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise (Suc k) p (Subst (Suc n) v t1) = Subst (Suc (p + n)) v (Raise (Suc k) p t1)
4. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2)
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2)
2. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise (Suc k) p (Subst (Suc n) v t1) = Subst (Suc (p + n)) v (Raise (Suc k) p t1)
3. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2)
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise (Suc k) p (Subst (Suc n) v t1) = Subst (Suc (p + n)) v (Raise (Suc k) p t1)
2. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2)
[PROOF STEP]
apply (metis Raise.simps(1) Subst_Nil Suc_le_mono add_Suc_right)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>t1 t2 v k n p. \<lbrakk>\<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t1 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t1) = Subst (p + n) v (Raise k p t1); \<And>v k n p. \<lbrakk>v \<noteq> \<^bold>\<sharp>; k \<le> n; t2 \<noteq> \<^bold>\<sharp>\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2); v \<noteq> \<^bold>\<sharp>; k \<le> n; t \<noteq> \<^bold>\<sharp>; \<And>d n m t. d \<le> n \<Longrightarrow> Raise 0 (m + n) t = Raise d m (Raise 0 n t)\<rbrakk> \<Longrightarrow> Raise k p (Subst n v t2) = Subst (p + n) v (Raise k p t2)
[PROOF STEP]
by fastforce
|
{"llama_tokens": 4811, "file": "ResiduatedTransitionSystem_LambdaCalculus", "length": 7}
|
import skrf
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import numpy as np
import CircuitFig
from PIL import ImageTk, Image, ImageDraw
import io
import MatchCal
l2z = lambda l: l[0] + 1j * l[1]
s4cmp = lambda sf: 'nH' if sf == 'l' else 'pF'
def ld4img2gui(label: tk.Label,
color: str, stage: int, sh_se: bool,
cmp_l: list, cmp_v: list, z_val: str = '50+0j',
valid: bool = True):
cr_cfg = CircuitFig.CircuitFig(color, stage, sh_se, cmp_l, cmp_v, z_val)
image = Image.open(io.BytesIO(cr_cfg.image_data)).resize((300, 180), Image.ANTIALIAS)
im = Image.new('RGBA', (300, 180), (255, 255, 255, 255))
draw = ImageDraw.Draw(im)
im.paste(image, (0, 0))
if not valid:
draw.line((0, 0, 300, 180), fill=(255, 0, 0, 255), width=5)
draw.line((0, 180, 300, 0), fill=(255, 0, 0, 255), width=5)
label.image = ImageTk.PhotoImage(im)
label.configure(image=label.image)
class TkGui:
def __init__(self, master):
self.master = master
self.top_frame = tk.Frame(self.master)
self.top_frame.pack(side=tk.LEFT)
self.right_frame = tk.Frame(self.master)
self.right_frame.pack(side=tk.LEFT, fill=tk.BOTH)
self.upper_sch_f = tk.Frame(self.right_frame)
self.upper_sch_f.grid(row=0, padx=(0, 5), pady=(5, 0), sticky="nsew")
self.lower_ety_f = tk.Frame(self.right_frame)
self.lower_ety_f.grid(row=1, padx=(0, 5), pady=(0, 5), sticky="nsew")
self.fig = Figure(figsize=(5, 6), dpi=100)
self.fig_cvs = FigureCanvasTkAgg(self.fig, master=self.top_frame)
self.ax: Figure = self.fig.gca()
self.fig_cvs.get_tk_widget().pack(side=tk.LEFT, padx=5, pady=5)
try:
with open('ring slot.s1p', 'r'):
pass
except IOError:
with open('ring slot.s1p', 'a+') as wf:
wf.write("""!Created with skrf (http://scikit-rf.org).
# GHz S RI R 50.0
!freq ReS11 ImS11
75.0 -0.503723180993 0.457844804761""")
self.my_slot = skrf.Network('ring slot.s1p')
self.to_match_z = [50, 0]
self.ser_match_z = [50, 0]
self.shu_match_z = [50, 0]
self.shu_ser_match_z_a = [50, 0]
self.shu_ser_match_z_b = [50, 0]
self.ser_shu_match_z_a = [50, 0]
self.ser_shu_match_z_b = [50, 0]
self.plt_z0 = 50 + 0j
self.plt_freq = 2.45e9
self.up2chart()
self.lb1 = tk.Label(self.upper_sch_f, relief="ridge")
self.lb1_tit = tk.Label(
self.upper_sch_f, text='Shunt Matching', relief="raised").grid(
row=0, column=0, sticky="nsew")
self.lb1.grid(row=1, column=0)
self.lb2 = tk.Label(self.upper_sch_f, relief="ridge")
self.lb2_tit = tk.Label(
self.upper_sch_f, text='Series Matching', relief="raised").grid(
row=0, column=1, sticky="nsew")
self.lb2.grid(row=1, column=1)
self.lb3 = tk.Label(self.upper_sch_f, relief="ridge")
self.lb3_tit = tk.Label(
self.upper_sch_f, text='Shunt-Series Matching', relief="raised").grid(
row=2, column=0, sticky="nsew")
self.lb3.grid(row=3, column=0)
self.lb4 = tk.Label(self.upper_sch_f, relief="ridge")
self.lb4_tit = tk.Label(
self.upper_sch_f, text='Shunt-Series Matching', relief="raised").grid(
row=2, column=1, sticky="nsew")
self.lb4.grid(row=3, column=1)
self.lb5 = tk.Label(self.upper_sch_f, relief="ridge")
self.lb5_tit = tk.Label(
self.upper_sch_f, text='Series-Shunt Matching', relief="raised").grid(
row=4, column=0, sticky="nsew")
self.lb5.grid(row=5, column=0)
self.lb6 = tk.Label(self.upper_sch_f, relief="ridge")
self.lb6_tit = tk.Label(
self.upper_sch_f, text='Series-Shunt Matching', relief="raised").grid(
row=4, column=1, sticky="nsew")
self.lb6.grid(row=5, column=1)
ld4img2gui(self.lb1, 'b', 1, False, ['c', 'l', 'c'], ['NC', 'SHORT', ''])
ld4img2gui(self.lb2, 'y', 1, True, ['c', 'l', 'c'], ['', 'SHORT', ''])
ld4img2gui(self.lb3, 'g', 2, False, ['c', 'l', 'c'], ['NC', 'SHORT', ''])
ld4img2gui(self.lb4, 'purple', 2, False, ['c', 'l', 'c'], ['NC', 'SHORT', ''])
ld4img2gui(self.lb5, 'orange', 2, True, ['c', 'l', 'c'], ['', 'SHORT', 'NC'])
ld4img2gui(self.lb6, 'brown', 2, True, ['c', 'l', 'c'], ['', 'SHORT', 'NC'])
###################################################################
self.to_match_r = tk.StringVar(value=str(self.to_match_z[0]))
self.to_match_i = tk.StringVar(value=str(self.to_match_z[1]))
self.ety_lb1 = tk.Label(self.lower_ety_f, text='To Match Complex Value')
self.ety_lb1.pack(side=tk.TOP)
self.ety_lb1b = tk.Label(self.lower_ety_f, text='Z = ')
self.ety_lb1b.pack(side=tk.LEFT)
self.ety1_r = tk.Entry(self.lower_ety_f, textvariable=self.to_match_r)
self.ety1_r.pack(side=tk.LEFT)
self.ety_lb1c = tk.Label(self.lower_ety_f, text=' + ')
self.ety_lb1c.pack(side=tk.LEFT)
self.ety1_i = tk.Entry(self.lower_ety_f, textvariable=self.to_match_i)
self.ety1_i.pack(side=tk.LEFT)
self.ety_lb1c = tk.Label(self.lower_ety_f, text='j')
self.ety_lb1c.pack(side=tk.LEFT)
self.enter = tk.Button(self.lower_ety_f, text="Start Auto Solver",
command=self.ld2chart)
self.enter.pack(side=tk.LEFT)
def ld2chart(self):
self.to_match_z = [float(self.ety1_r.get()), float(self.ety1_i.get())]
tmp_cal = MatchCal.MatchCal()
tmp_cal.tar_freq = self.plt_freq
to_mat = float(self.ety1_r.get()) + 1j * float(self.ety1_i.get())
tmp_cal.shu_0_sol(to_mat)
disp_str = f'{tmp_cal.shu:.2f} {s4cmp(tmp_cal.shu_t)}' if tmp_cal.shu else 'NC'
ld4img2gui(self.lb1, 'b', 1, False, [tmp_cal.shu_t, 'l', 'c'],
[disp_str, 'SHORT', ''],
f'{int(tmp_cal.tmp_z.real)}+{int(tmp_cal.tmp_z.imag)}j',
tmp_cal.sol_valid)
self.ser_match_z = [tmp_cal.tmp_z.real, tmp_cal.tmp_z.imag]
tmp_cal.ser_0_sol(to_mat)
disp_str = f'{tmp_cal.ser:.2f} {s4cmp(tmp_cal.ser_t)}' if tmp_cal.ser else 'SHORT'
ld4img2gui(self.lb2, 'y', 1, True, ['c', tmp_cal.ser_t, 'c'],
['', disp_str, ''],
f'{int(tmp_cal.tmp_z.real)}+{int(tmp_cal.tmp_z.imag)}j',
tmp_cal.sol_valid)
self.shu_match_z = [tmp_cal.tmp_z.real, tmp_cal.tmp_z.imag]
tmp_cal.sol_2stage(to_mat, True)
disp_str1 = f'{tmp_cal.ser:.2f} {s4cmp(tmp_cal.ser_t)}' if tmp_cal.ser else 'SHORT'
disp_str2 = f'{tmp_cal.shu:.2f} {s4cmp(tmp_cal.shu_t)}' if tmp_cal.shu else 'NC'
ld4img2gui(self.lb3, 'g', 2, False, [tmp_cal.shu_t, tmp_cal.ser_t, 'c'],
[disp_str2, disp_str1, ''],
f'{int(tmp_cal.tmp_z.real)}+{int(tmp_cal.tmp_z.imag)}j',
tmp_cal.sol_valid)
self.shu_ser_match_z_a = [tmp_cal.tmp_z.real, tmp_cal.tmp_z.imag]
tmp_cal.sol_2stage(to_mat, True, True)
disp_str1 = f'{tmp_cal.ser:.2f} {s4cmp(tmp_cal.ser_t)}' if tmp_cal.ser else 'SHORT'
disp_str2 = f'{tmp_cal.shu:.2f} {s4cmp(tmp_cal.shu_t)}' if tmp_cal.shu else 'NC'
ld4img2gui(self.lb4, 'purple', 2, False, [tmp_cal.shu_t, tmp_cal.ser_t, 'c'],
[disp_str2, disp_str1, ''],
f'{int(tmp_cal.tmp_z.real)}+{int(tmp_cal.tmp_z.imag)}j',
tmp_cal.sol_valid)
self.shu_ser_match_z_b = [tmp_cal.tmp_z.real, tmp_cal.tmp_z.imag]
tmp_cal.sol_2stage(to_mat)
disp_str1 = f'{tmp_cal.ser:.2f} {s4cmp(tmp_cal.ser_t)}' if tmp_cal.ser else 'SHORT'
disp_str2 = f'{tmp_cal.shu:.2f} {s4cmp(tmp_cal.shu_t)}' if tmp_cal.shu else 'NC'
ld4img2gui(self.lb5, 'orange', 2, True, ['c', tmp_cal.ser_t, tmp_cal.shu_t],
['', disp_str1, disp_str2],
f'{int(tmp_cal.tmp_z.real)}+{int(tmp_cal.tmp_z.imag)}j',
tmp_cal.sol_valid)
self.ser_shu_match_z_a = [tmp_cal.tmp_z.real, tmp_cal.tmp_z.imag]
tmp_cal.sol_2stage(to_mat, ans_sel=True)
disp_str1 = f'{tmp_cal.ser:.2f} {s4cmp(tmp_cal.ser_t)}' if tmp_cal.ser else 'SHORT'
disp_str2 = f'{tmp_cal.shu:.2f} {s4cmp(tmp_cal.shu_t)}' if tmp_cal.shu else 'NC'
ld4img2gui(self.lb6, 'brown', 2, True, ['c', tmp_cal.ser_t, tmp_cal.shu_t],
['', disp_str1, disp_str2],
f'{int(tmp_cal.tmp_z.real)}+{int(tmp_cal.tmp_z.imag)}j',
tmp_cal.sol_valid)
self.ser_shu_match_z_b = [tmp_cal.tmp_z.real, tmp_cal.tmp_z.imag]
self.up2chart()
def up2chart(self):
self.ax.clear()
self.fig2gui(np.array([[[l2z(self.to_match_z)]]]), 'To Match', 'r', 's')
self.fig2gui(np.array([[[l2z(self.ser_match_z)]]]), 'After Match', 'b', 'o')
self.fig2gui(np.array([[[l2z(self.shu_match_z)]]]), 'After Match', 'y', 'o')
self.fig2gui(np.array([[[l2z(self.shu_ser_match_z_a)]]]), 'After Match', 'g', 'o')
self.fig2gui(np.array([[[l2z(self.shu_ser_match_z_b)]]]), 'After Match', 'purple', 'o')
self.fig2gui(np.array([[[l2z(self.ser_shu_match_z_a)]]]), 'After Match', 'orange', 'o')
self.fig2gui(np.array([[[l2z(self.ser_shu_match_z_b)]]]), 'After Match', 'brown', 'o')
def fig2gui(self, plt_data: np.array,
label: str = '', color: str = 'r', mark: str = 's',
plt_sel: bool = False) -> None:
self.my_slot.frequency = self.plt_freq
self.my_slot.z0 = self.plt_z0
self.my_slot.z = plt_data
if plt_sel:
self.my_slot.plot_s_db(ax=self.ax)
else:
self.my_slot.plot_s_smith(ax=self.ax, draw_labels=True, show_legend=False,
label=label, color=color, chart_type='zy', marker=mark)
self.ax.legend(bbox_to_anchor=(0.5, 1.05), loc='lower center', ncol=3,
fancybox=True, shadow=True)
self.fig_cvs.draw()
|
{"hexsha": "7792dcc7dbc4922ec1444b4df1c835939f82135a", "size": 10328, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/TkGui.py", "max_stars_repo_name": "briansune/python-smith-chart-antenna-matching", "max_stars_repo_head_hexsha": "e21dccfb4fbddb7a4fabdd89854dbaf1bd93ea31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-30T20:37:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T20:37:39.000Z", "max_issues_repo_path": "code/TkGui.py", "max_issues_repo_name": "briansune/python-smith-chart-antenna-matching", "max_issues_repo_head_hexsha": "e21dccfb4fbddb7a4fabdd89854dbaf1bd93ea31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/TkGui.py", "max_forks_repo_name": "briansune/python-smith-chart-antenna-matching", "max_forks_repo_head_hexsha": "e21dccfb4fbddb7a4fabdd89854dbaf1bd93ea31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.4977973568, "max_line_length": 95, "alphanum_fraction": 0.5807513555, "include": true, "reason": "import numpy", "num_tokens": 3144}
|
const ASSET_FINGERPRINT = "8d9151df5a4a5fafb268"
|
{"hexsha": "0a160538e62cde72eba61b47dc7ce880229f319b", "size": 48, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "config/initializers/fingerprint.jl", "max_stars_repo_name": "essenciary/chirper", "max_stars_repo_head_hexsha": "5e809216b3bbe517c65156328717d8c110a8d934", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "config/initializers/fingerprint.jl", "max_issues_repo_name": "essenciary/chirper", "max_issues_repo_head_hexsha": "5e809216b3bbe517c65156328717d8c110a8d934", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "config/initializers/fingerprint.jl", "max_forks_repo_name": "essenciary/chirper", "max_forks_repo_head_hexsha": "5e809216b3bbe517c65156328717d8c110a8d934", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-22T07:21:11.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-22T07:21:11.000Z", "avg_line_length": 48.0, "max_line_length": 48, "alphanum_fraction": 0.875, "num_tokens": 22}
|
/*
* Copyright 2021 Oleg Zharkov
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <exception>
#include <iostream>
#include <sstream>
#include <string>
#include <list>
#include <vector>
#include <memory>
#include <sstream>
#include <boost/iostreams/filtering_streambuf.hpp>
#include <boost/iostreams/copy.hpp>
#include <boost/iostreams/filter/gzip.hpp>
#include <iostream>
#include "updates.h"
#define SOCKET_BUFFER_SIZE 2048
namespace bpt = boost::property_tree;
int Updates::GetConfig() {
update_status = 1;
return update_status;
}
int Updates::Open(int mode, pid_t pid) {
bool amq_conn = false;
int conn_attempts = 0;
altprobe_mode = mode;
p_pid = pid;
do {
try {
if (connection == NULL) {
activemq::library::ActiveMQCPP::initializeLibrary();
if (ssl_broker) {
decaf::lang::System::setProperty( "decaf.net.ssl.trustStore", cert );
if (ssl_verify) {
decaf::lang::System::setProperty("decaf.net.ssl.disablePeerVerification", "false");
} else {
decaf::lang::System::setProperty("decaf.net.ssl.disablePeerVerification", "true");
}
}
if (ssl_client) {
decaf::lang::System::setProperty("decaf.net.ssl.keyStore", key);
decaf::lang::System::setProperty("decaf.net.ssl.keyStorePassword", key_pwd);
}
// Create a ConnectionFactory
string strUrl(url);
unique_ptr<ConnectionFactory> connectionFactory(
ConnectionFactory::createCMSConnectionFactory(strUrl));
// Create a Connection
if (user_pwd) {
connection = connectionFactory->createConnection(user,pwd);
} else {
connection = connectionFactory->createConnection();
}
connection->start();
}
if (session == NULL) {
// Create a Session
if (this->sessionTransacted) {
session = connection->createSession(Session::SESSION_TRANSACTED);
} else {
session = connection->createSession(Session::AUTO_ACKNOWLEDGE);
}
}
// Create the MessageConsumer
string strConsumer("jms/altprobe/" + fs.filter.ref_id + "/" + node_id + "/" + probe_id + "/sensors");
Destination* consumerCommand = session->createQueue(strConsumer);
// Create a MessageConsumer from the Session to the Topic or Queue
consumer = session->createConsumer(consumerCommand);
consumer->setMessageListener(this);
mq_counter++;
amq_conn = true;
string log = "listens sensors bus";
SysLog((char*) log.c_str());
} catch (CMSException& e) {
if (conn_attempts > 10) {
SysLog("activeMQ operation error");
string str = e.getMessage();
const char * c = str.c_str();
SysLog((char*) c);
return 0;
}
sleep(3);
conn_attempts++;
}
} while (!amq_conn);
return 1;
}
int Updates::Go(void) {
sleep(1);
return 1;
}
// Called from the consumer since this class is a registered MessageListener.
void Updates::onMessage(const Message* message) {
try {
string corr_id = message->getCMSCorrelationID();
string headerJson = "{ \"request_id\": \"" + corr_id + "\", ";
string bodyJson = "\"status\": 400 }";
if (dynamic_cast<const BytesMessage*> (message)) {
bodyJson = onBytesMessage(message);
} else {
if (dynamic_cast<const TextMessage*> (message)) {
bodyJson = onTextMessage(message);
}
}
string responseJson = headerJson + bodyJson;
// Create a MessageProducer from the Session to Queue
const Destination* tmpDest = message->getCMSReplyTo();
MessageProducer* tmpProd = session->createProducer(tmpDest);
auto_ptr<TextMessage> response(session->createTextMessage(responseJson));
tmpProd->send(response.get());
delete tmpProd;
tmpProd = NULL;
} catch (CMSException& e) {
SysLog("ActiveMQ CMS Exception occurred: update module");
CheckStatus();
return;
}
if (this->sessionTransacted) {
session->commit();
}
}
// If something bad happens you see it here as this class is also been
// registered as an ExceptionListener with the connection.
void Updates::onException(const CMSException& ex AMQCPP_UNUSED) {
SysLog("ActiveMQ CMS Exception occurred: update module");
CheckStatus();
}
void Updates::Close() {
// Destroy resources.
try {
if (consumer) {
delete consumer;
consumer = NULL;
}
m_controller.lock();
mq_counter--;
m_controller.unlock();
if (mq_counter == 0) {
delete session;
session = NULL;
delete connection;
connection = NULL;
}
} catch (CMSException& e) {
SysLog("activeMQ operation error: destroy resources");
}
}
string Updates::onBytesMessage(const Message* message) {
string ref_id = message->getStringProperty("ref_id");
if(ref_id.compare(fs.filter.ref_id)) return "\"status\": 400 }";
const BytesMessage* bytesMessage = dynamic_cast<const BytesMessage*> (message);
const unsigned char* comp = bytesMessage->getBodyBytes();
int comp_size = bytesMessage->getBodyLength();
stringstream ss, decomp;
ss.write(reinterpret_cast<const char*>(&comp[0]),comp_size);
boost::iostreams::filtering_streambuf<boost::iostreams::input> inbuf;
inbuf.push(boost::iostreams::gzip_decompressor());
inbuf.push(ss);
boost::iostreams::copy(inbuf, decomp);
boost::iostreams::close(inbuf);
ofstream ostream;
string cmd;
string content_type = message->getStringProperty("content_type");
if (!content_type.compare("filters") && ruStatus) {
fs.ParsFiltersConfig(decomp.str());
try {
ostream.open(FILTERS_FILE, ios_base::trunc);
ostream << decomp.str();
ostream.close();
SysLog("filters have been updated");
} catch (std::ostream::failure e) {
SysLog("Exception for local filters file.");
return "\"status\": 400 }";
}
return "\"status\": 200 }";
}
if (!content_type.compare("rules") && ruStatus) {
try {
int rules_type = message->getIntProperty("rules_type");
string rule_name = message->getStringProperty("rule_name");
bool rule_reload = message->getBooleanProperty("rule_reload");
switch (rules_type) {
case 0 : {
string rules_path(falco_local);
string file_path = rules_path + rule_name;
ostream.open(file_path, ios_base::trunc);
cmd = "/etc/altprobe/scripts/rulesup-falco.sh";
}
break;
case 1 : {
string rules_path(modsec_local);
string file_path = rules_path + rule_name;
ostream.open(file_path, ios_base::trunc);
cmd = "/etc/altprobe/scripts/rulesup-modsec.sh";
}
break;
case 2 : {
string rules_path(suri_local);
string file_path = rules_path + rule_name;
ostream.open(file_path, ios_base::trunc);
cmd = "/etc/altprobe/scripts/rulesup-suri.sh";
}
break;
case 3 : {
string dir_path(wazuh_local);
string rules_path(WAZUH_RULES);
string file_path = dir_path + rules_path + rule_name;
ostream.open(file_path, ios_base::trunc);
cmd = "/etc/altprobe/scripts/rulesup-wazuh.sh";
}
break;
case 4 : {
string dir_path(wazuh_local);
string rules_path(WAZUH_DECODERS);
string file_path = dir_path + rules_path + rule_name;
ostream.open(file_path, ios_base::trunc);
cmd = "/etc/altprobe/scripts/rulesup-wazuh.sh";
}
break;
default:
return "\"status\": 400 }";
}
ostream << decomp.str();
ostream.close();
if (rcStatus && rule_reload) system(cmd.c_str());
} catch (std::ostream::failure e) {
SysLog("Exception for local filters file.");
return "\"status\": 400 }";
}
return "\"status\": 200 }";
}
return "\"status\": 400 }";
}
string Updates::onTextMessage(const Message* message) {
const TextMessage* textMessage = dynamic_cast<const TextMessage*> (message);
string c2json = textMessage->getText();
//************************************************************************************************************************
SysLog((char*) c2json.c_str());
stringstream c2json_ss(c2json);
bpt::ptree pt;
bpt::read_json(c2json_ss, pt);
string ref_id = pt.get<string>("actuator.x-alertflex.tenant","indef");
if(ref_id.compare(fs.filter.ref_id) || !ref_id.compare("indef") || !rcStatus) {
return "\"status\": 400, \"status_text\": \"wrong tenant\" }";
}
string action = pt.get<string>("action","indef");
string actuator_profile = pt.get<string>("actuator.x-alertflex.profile","indef");
if(!actuator_profile.compare("indef") || !action.compare("indef")) {
return "\"status\": 400, \"status_text\": \"wrong actuator or action\" }";
}
if(!actuator_profile.compare("suricata_command") && !action.compare("deny")) {
// char test1[] = "{\"command\": \"add-hostbit\", \"arguments\": {\"ipaddress\": \"192.168.1.2\",
// \"hostbit\": \"alertflex_ar\", \"expire\": 360}}";
string ip = pt.get<string>("target.ipv4_net","indef");
string rule = pt.get<string>("args.x-alertflex:suricata_command.rule_name","indef");
int duration = pt.get<int>("args.duration",0);
stringstream int_ss;
int_ss << duration;
string ph1 = "{\"command\": \"add-hostbit\", \"arguments\": {\"ipaddress\": \"";
string ph2 = "\", \"hostbit\": \"" + rule + "\", \"expire\": ";
string suri_cmd = ph1 + ip + ph2 + int_ss.str() + "}}";
string res = SendArToSuricata(suri_cmd);
if (res.compare("ok")) {
return "\"status\": 400, \"status_text\": \"" + res + "\" }";
}
return "\"status\": 200 }";
}
if(!actuator_profile.compare("wazuh_agent")) {
if(!wazuh_token.compare("indef")) {
return "\"status\": 400, \"status_text\": \"problem with Wazuh api\" }";
}
if(!action.compare("create")) {
try {
string ip = pt.get<string>("target.device.ipv4_net","indef");
string agent = pt.get<string>("args.x-alertflex:wazuh_agent.name","indef");
if(!ip.compare("indef") || !agent.compare("indef")) {
return "\"status\": 400, \"status_text\": \"wrong ip or agent name for create\" }";
}
string jsonCreateAgent = "{ \"name\": \"" + agent + "\", \"ip\": \"" + ip + "\" }";
string res = CreateAgentWazuh(jsonCreateAgent);
return res;
} catch (const std::exception & ex) {
return "\"status\": 400, \"status_text\": \"wrong args\" }";
}
}
if(!action.compare("delete")) {
try {
string agent = pt.get<string>("args.x-alertflex:wazuh_agent.id","indef");
if(!agent.compare("indef")) {
return "\"status\": 400, \"status_text\": \"agent id is missing\" }";
}
string res = DeleteAgentWazuh(agent);
return res;
} catch (const std::exception & ex) {
return "\"status\": 400, \"status_text\": \"wrong args\" }";
}
}
if (!action.compare("start")) {
string agent = pt.get<string>("target.device.device_id","indef");
try {
bpt::ptree pt_args = pt.get_child("args.x-alertflex:wazuh_command");
std::ostringstream oss;
write_json(oss, pt_args);
string args = oss.str();
if(!agent.compare("indef") || args.empty()) {
return "\"status\": 400, \"status_text\": \"wrong agent or args\" }";
}
string res = SendArToWazuh(agent, args);
return res;
} catch (const std::exception & ex) {
return "\"status\": 400, \"status_text\": \"wrong args\" }";
}
}
}
if(!actuator_profile.compare("docker_command") && !action.empty()) {
string id = pt.get<string>("target.device.device_id","indef");
if(!id.compare("indef")) {
return "\"status\": 400, \"status_text\": \"wrong id for stop\" }";
}
string res = DockerContainer(id, action);
if (res.compare("ok")) {
return "\"status\": 400, \"status_text\": \"" + res + "\" }";
}
return "\"status\": 200 }";
}
return "\"status\": 400, \"status_text\": \"wrong actuator or action\" }";
}
string Updates::SendArToWazuh(string agent, string json) {
try {
boost::asio::io_service io_service;
string hostAddress;
string ip(wazuh_host);
stringstream ss;
ss << wazuh_port;
string port = ss.str();
if (wazuh_port != 80) {
hostAddress = ip + ":" + port;
} else {
hostAddress = ip;
}
// Get a list of endpoints corresponding to the server name.
tcp::resolver resolver(io_service);
tcp::resolver::query query(ip, port);
tcp::resolver::iterator endpoint_iterator = resolver.resolve(query);
tcp::socket socket(io_service);
boost::asio::connect(socket, endpoint_iterator);
string queryStr = "/active-response/?agents_list=" + agent;
boost::asio::streambuf request;
ostream request_stream(&request);
request_stream << "PUT " << queryStr << " HTTP/1.1\r\n"; // note that you can change it if you wish to HTTP/1.0
request_stream << "Host: " << hostAddress << "\r\n";
request_stream << "Authorization: Bearer "<< wazuh_token << "\r\n";
request_stream << "Accept: */*\r\n";
request_stream << "Content-Type:application/json\r\n";
request_stream << "Content-Length: " << json.length() << "\r\n";
request_stream << "Connection: close\r\n\r\n"; //NOTE THE Double line feed
request_stream << json;
// Send the request.
boost::asio::write(socket, request);
// Read the response status line. The response streambuf will automatically
// grow to accommodate the entire line. The growth may be limited by passing
// a maximum size to the streambuf constructor.
boost::asio::streambuf response;
boost::asio::read_until(socket, response, "\r\n");
// Check that response is OK.
istream response_stream(&response);
string http_version;
response_stream >> http_version;
unsigned int status_code;
response_stream >> status_code;
string status_message;
getline(response_stream, status_message);
if (!response_stream || http_version.substr(0, 5) != "HTTP/") return 0;
if (status_code != 200) return "\"status\": 400, \"status_text\": \"error start command\" }";
// Read the response headers, which are terminated by a blank line.
boost::asio::read_until(socket, response, "\r\n\r\n");
// Process the response headers.
string header;
while (getline(response_stream, header) && header != "\r") { }
stringstream payload;
// Write whatever content we already have to output.
if (response.size() > 0) {
payload << &response;
}
// Read until EOF, writing data to output as we go.
boost::system::error_code error;
while (boost::asio::read(socket, response,boost::asio::transfer_at_least(1), error)) {
payload << &response;
}
if (error != boost::asio::error::eof) {
throw boost::system::system_error(error);
}
return "\"status\": 200 }";
}
catch (std::exception& e) {
}
return "\"status\": 400, \"status_text\": \"error start command\" }";
}
string Updates::CreateAgentWazuh(string json) {
try {
boost::asio::io_service io_service;
string hostAddress;
string ip(wazuh_host);
stringstream ss;
ss << wazuh_port;
string port = ss.str();
if (wazuh_port != 80) {
hostAddress = ip + ":" + port;
} else {
hostAddress = ip;
}
// Get a list of endpoints corresponding to the server name.
tcp::resolver resolver(io_service);
tcp::resolver::query query(ip, port);
tcp::resolver::iterator endpoint_iterator = resolver.resolve(query);
tcp::socket socket(io_service);
boost::asio::connect(socket, endpoint_iterator);
boost::asio::streambuf request;
ostream request_stream(&request);
request_stream << "POST /agents HTTP/1.1\r\n";
request_stream << "Host: " << hostAddress << "\r\n";
request_stream << "Authorization: Bearer "<< wazuh_token << "\r\n";
request_stream << "Accept: */*\r\n";
request_stream << "Content-Type:application/json\r\n";
request_stream << "Content-Length: " << json.length() << "\r\n\r\n";
request_stream << json;
boost::asio::write(socket, request);
// Read the response status line. The response streambuf will automatically
// grow to accommodate the entire line. The growth may be limited by passing
// a maximum size to the streambuf constructor.
boost::asio::streambuf response;
boost::asio::read_until(socket, response, "\r\n");
// Check that response is OK.
istream response_stream(&response);
string http_version;
response_stream >> http_version;
unsigned int status_code;
response_stream >> status_code;
string status_message;
getline(response_stream, status_message);
if (!response_stream || http_version.substr(0, 5) != "HTTP/") {
return "\"status\": 400, \"status_text\": \"error response from wazuh api\" }";
}
if (status_code != 200) {
return "\"status\": 400, \"status_text\": \"error response from wazuh api\" }";
}
// Read the response headers, which are terminated by a blank line.
boost::asio::read_until(socket, response, "\r\n\r\n");
// Process the response headers.
string header;
while (getline(response_stream, header) && header != "\r") { }
stringstream payload;
// Write whatever content we already have to output.
if (response.size() > 0) {
payload << &response;
}
boost::asio::read_until(socket, response, boost::regex("}.*}"));
payload << &response;
stringstream response_ss(payload.str());
bpt::ptree pt;
bpt::read_json(response_ss, pt);
string id = pt.get<string>("data.id","indef");
string key = pt.get<string>("data.key","indef");
if(!id.compare("indef") || !key.compare("indef")) {
return "\"status\": 400, \"status_text\": \"error response from wazuh - key or id\" }";
}
return "\"status\": 200, \"result\": { \"x-alertflex:wazuh_agent\": { \"id\": \"" + id + "\", \"key\": \"" + key + "\"} } }";
}
catch (std::exception& e) {
}
return "\"status\": 400, \"status_text\": \"wazuh api exception\" }";
}
string Updates::DeleteAgentWazuh(string agent) {
try {
boost::asio::io_service io_service;
string hostAddress;
string ip(wazuh_host);
stringstream ss;
ss << wazuh_port;
string port = ss.str();
if (wazuh_port != 80) {
hostAddress = ip + ":" + port;
} else {
hostAddress = ip;
}
// Get a list of endpoints corresponding to the server name.
tcp::resolver resolver(io_service);
tcp::resolver::query query(ip, port);
tcp::resolver::iterator endpoint_iterator = resolver.resolve(query);
tcp::socket socket(io_service);
boost::asio::connect(socket, endpoint_iterator);
boost::asio::streambuf request;
ostream request_stream(&request);
request_stream << "DELETE /agents?status=all&purge=true&older_than=10s&agents_list=" + agent + " HTTP/1.1\r\n";
request_stream << "Host: " << hostAddress << "\r\n";
request_stream << "Authorization: Bearer "<< wazuh_token << "\r\n";
request_stream << "Accept: */*\r\n";
request_stream << "Connection: close\r\n\r\n";
boost::asio::write(socket, request);
// Read the response status line. The response streambuf will automatically
// grow to accommodate the entire line. The growth may be limited by passing
// a maximum size to the streambuf constructor.
boost::asio::streambuf response;
boost::asio::read_until(socket, response, "\r\n");
// Check that response is OK.
istream response_stream(&response);
string http_version;
response_stream >> http_version;
unsigned int status_code;
response_stream >> status_code;
string status_message;
getline(response_stream, status_message);
if (!response_stream || http_version.substr(0, 5) != "HTTP/") {
return "\"status\": 400, \"status_text\": \"error response from wazuh api\" }";
}
if (status_code != 200) {
return "\"status\": 400, \"status_text\": \"error response from wazuh api\" }";
}
// Read the response headers, which are terminated by a blank line.
boost::asio::read_until(socket, response, "\r\n\r\n");
// Process the response headers.
string header;
while (getline(response_stream, header) && header != "\r") { }
stringstream payload;
// Write whatever content we already have to output.
if (response.size() > 0) {
payload << &response;
}
boost::asio::read_until(socket, response, boost::regex("}.*}"));
payload << &response;
stringstream response_ss(payload.str());
bpt::ptree pt;
bpt::read_json(response_ss, pt);
int total_affected_items = pt.get<int>("data.total_affected_items",0);
int total_failed_items = pt.get<int>("data.total_failed_items",0);
if(total_affected_items != 1 || total_failed_items != 0) {
return "\"status\": 400, \"status_text\": \"error response from wazuh - key or id\" }";
}
return "\"status\": 200 }";
}
catch (std::exception& e) {
}
return "\"status\": 400, \"status_text\": \"wazuh api exception\" }";
}
string Updates::SendArToSuricata(string json) {
int sck;
struct sockaddr_un addr;
char buffer[SOCKET_BUFFER_SIZE];
int ret;
if (suriSocketStatus) {
try
{
/* create socket */
sck = socket(AF_UNIX, SOCK_STREAM, 0);
if (sck == -1) {
close (sck);
return "suricata_unixsocket: can not create socket";
}
/* set address */
addr.sun_family = AF_UNIX;
strncpy(addr.sun_path, suri_socket, sizeof(addr.sun_path));
addr.sun_path[sizeof(addr.sun_path) - 1] = 0;
/* Connect to unix socket */
ret = connect(sck, (struct sockaddr *) &addr, sizeof(addr));
if (ret == -1) {
close (sck);
return "suricata_unixsocket: can not connect to socket";
}
char test[] = "{\"version\": \"0.1\"}";
int siz = strlen(test);
ret = send(sck, test, siz, 0);
if (ret == -1) {
close (sck);
return "suricata_unixsocket: can not send version";
} else if (ret < siz) {
close (sck);
return "suricata_unixsocket: unable to send all string";
}
memset(buffer, 0, sizeof(buffer));
ret = read(sck, buffer, sizeof(buffer));
if (ret == -1) {
close (sck);
return "suricata_unixsocket: can not read answer (version)";
}
siz = strlen(json.c_str());
ret = send(sck, json.c_str(), siz, 0);
if (ret == -1) {
close (sck);
return "suricata_unixsocket: can not send parameters";
}
memset(buffer, 0, SOCKET_BUFFER_SIZE);
ret = read(sck, buffer, SOCKET_BUFFER_SIZE);
if (ret == -1) {
close (sck);
return "suricata_unixsocket: can not read answer";
}
close (sck);
return "ok";
}
catch (std::exception& e) {
close (sck);
return "suricata_unixsocket: exception";
}
}
return "suricata_unixsocket: error";
}
string Updates::DockerContainer(string id, string cmd) {
int sck;
struct sockaddr_un addr;
int ret;
if (dockerSocketStatus) {
try {
/* create socket */
sck = socket(AF_UNIX, SOCK_STREAM, 0);
if (sck == -1) {
close (sck);
return "can not create socket";
}
/* set address */
addr.sun_family = AF_UNIX;
strncpy(addr.sun_path, docker_socket, sizeof(addr.sun_path));
addr.sun_path[sizeof(addr.sun_path) - 1] = 0;
/* Connect to unix socket */
ret = connect(sck, (struct sockaddr *) &addr, sizeof(addr));
if (ret == -1) {
close (sck);
return "can not connect to socket";
}
std::string req = "POST /v1.40/containers/";
req += id;
req += "/";
req += cmd;
req += " HTTP/1.1\r\n";
req += "Host: localhost\r\n";
req += "Accept: */*\r\n\r\n";
int siz = req.size();
ret = send(sck, req.c_str(), siz, 0);
if (ret == -1) {
close (sck);
return "can not send request";
} else if (ret < siz) {
close (sck);
return "unable send all size of message";
}
char buffer[SOCKET_BUFFER_SIZE];
memset(buffer, 0, sizeof(buffer));
ret = read(sck, buffer, SOCKET_BUFFER_SIZE);
if (ret == -1) {
close (sck);
return "can not read answer";
}
close (sck);
return "ok";
} catch (std::exception& e) {
close (sck);
return "docker_unixsocket: exception";
}
}
return "docker_unixsocket: error";
}
int Updates::IsHomeNetwork(string ip) {
if (ip.compare("") == 0) return 0;
if (fs.filter.home_nets.size() != 0) {
std::vector<Network*>::iterator i, end;
for (i = fs.filter.home_nets.begin(), end = fs.filter.home_nets.end(); i != end; ++i) {
string net = (*i)->network;
string mask = (*i)->netmask;
if(IsIPInRange(ip, net, mask)) return 1;
}
}
return 0;
}
|
{"hexsha": "ed30a8de2f9e6e67ea571e6e95d08b8a3febcfc5", "size": 30939, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/updates.cpp", "max_stars_repo_name": "olegzhr/altprobe", "max_stars_repo_head_hexsha": "da9597efcf0463f31ea38bf715ed8d5453dfc0e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2016-04-02T19:13:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-22T20:05:08.000Z", "max_issues_repo_path": "src/updates.cpp", "max_issues_repo_name": "olegzhr/altprobe", "max_issues_repo_head_hexsha": "da9597efcf0463f31ea38bf715ed8d5453dfc0e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/updates.cpp", "max_forks_repo_name": "olegzhr/altprobe", "max_forks_repo_head_hexsha": "da9597efcf0463f31ea38bf715ed8d5453dfc0e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-10-09T02:15:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-09T02:15:56.000Z", "avg_line_length": 32.5331230284, "max_line_length": 133, "alphanum_fraction": 0.5078056821, "num_tokens": 6713}
|
[STATEMENT]
lemma in_MPLS_leq_2_pow_n:
fixes PROB :: "'a problem" and x
assumes "finite PROB" "(x \<in> MPLS PROB)"
shows "(x \<le> 2 ^ card (prob_dom PROB) - 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
let ?mpls = "MPLS PROB"
\<comment> \<open>NOTE obtain p = (s, as) where 'x = Inf (PLS s as)' from premise.\<close>
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
have "?mpls =
(\<lambda> (s, as). Inf (PLS s as)) `
{(s, as). (s \<in> valid_states PROB) \<and> (as \<in> valid_plans PROB)}
"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. MPLS PROB = (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
[PROOF STEP]
using MPLS_def
[PROOF STATE]
proof (prove)
using this:
MPLS ?PROB \<equiv> (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states ?PROB \<and> as \<in> valid_plans ?PROB}
goal (1 subgoal):
1. MPLS PROB = (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
MPLS PROB = (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
MPLS PROB = (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
[PROOF STEP]
obtain s :: "('a, bool) fmap" and as :: "(('a, bool) fmap \<times> ('a, bool) fmap) list"
where obtain_s_as: "x \<in>
((\<lambda> (s, as). Inf (PLS s as)) `
{(s, as). (s \<in> valid_states PROB) \<and> (as \<in> valid_plans PROB)})
"
[PROOF STATE]
proof (prove)
using this:
MPLS PROB = (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
goal (1 subgoal):
1. (x \<in> (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB} \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
MPLS PROB = (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
x \<in> MPLS PROB
goal (1 subgoal):
1. (x \<in> (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB} \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<in> (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
[PROOF STEP]
have
"x \<in> {Inf (PLS (fst p) (snd p)) | p. (fst p \<in> valid_states PROB) \<and> (snd p \<in> valid_plans PROB)}"
[PROOF STATE]
proof (prove)
using this:
x \<in> (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
goal (1 subgoal):
1. x \<in> {Inf (PLS (fst p) (snd p)) |p. fst p \<in> valid_states PROB \<and> snd p \<in> valid_plans PROB}
[PROOF STEP]
using assms(1) obtain_s_as
[PROOF STATE]
proof (prove)
using this:
x \<in> (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
finite PROB
x \<in> (\<lambda>(s, as). Inf (PLS s as)) ` {(s, as). s \<in> valid_states PROB \<and> as \<in> valid_plans PROB}
goal (1 subgoal):
1. x \<in> {Inf (PLS (fst p) (snd p)) |p. fst p \<in> valid_states PROB \<and> snd p \<in> valid_plans PROB}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> {Inf (PLS (fst p) (snd p)) |p. fst p \<in> valid_states PROB \<and> snd p \<in> valid_plans PROB}
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> {Inf (PLS (fst p) (snd p)) |p. fst p \<in> valid_states PROB \<and> snd p \<in> valid_plans PROB}
[PROOF STEP]
have
"\<exists> p. x = Inf (PLS (fst p) (snd p)) \<and> (fst p \<in> valid_states PROB) \<and> (snd p \<in> valid_plans PROB)"
[PROOF STATE]
proof (prove)
using this:
x \<in> {Inf (PLS (fst p) (snd p)) |p. fst p \<in> valid_states PROB \<and> snd p \<in> valid_plans PROB}
goal (1 subgoal):
1. \<exists>p. x = Inf (PLS (fst p) (snd p)) \<and> fst p \<in> valid_states PROB \<and> snd p \<in> valid_plans PROB
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>p. x = Inf (PLS (fst p) (snd p)) \<and> fst p \<in> valid_states PROB \<and> snd p \<in> valid_plans PROB
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>p. x = Inf (PLS (fst p) (snd p)) \<and> fst p \<in> valid_states PROB \<and> snd p \<in> valid_plans PROB
[PROOF STEP]
obtain p :: "('a, bool) fmap \<times> (('a, bool) fmap \<times> ('a, bool) fmap) list" where obtain_p:
"x = Inf (PLS (fst p) (snd p))" "(fst p \<in> valid_states PROB)" "(snd p \<in> valid_plans PROB)"
[PROOF STATE]
proof (prove)
using this:
\<exists>p. x = Inf (PLS (fst p) (snd p)) \<and> fst p \<in> valid_states PROB \<and> snd p \<in> valid_plans PROB
goal (1 subgoal):
1. (\<And>p. \<lbrakk>x = Inf (PLS (fst p) (snd p)); fst p \<in> valid_states PROB; snd p \<in> valid_plans PROB\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x = Inf (PLS (fst p) (snd p))
fst p \<in> valid_states PROB
snd p \<in> valid_plans PROB
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x = Inf (PLS (fst p) (snd p))
fst p \<in> valid_states PROB
snd p \<in> valid_plans PROB
[PROOF STEP]
have "fst p \<in> valid_states PROB" "snd p \<in> valid_plans PROB"
[PROOF STATE]
proof (prove)
using this:
x = Inf (PLS (fst p) (snd p))
fst p \<in> valid_states PROB
snd p \<in> valid_plans PROB
goal (1 subgoal):
1. fst p \<in> valid_states PROB &&& snd p \<in> valid_plans PROB
[PROOF STEP]
using obtain_p
[PROOF STATE]
proof (prove)
using this:
x = Inf (PLS (fst p) (snd p))
fst p \<in> valid_states PROB
snd p \<in> valid_plans PROB
x = Inf (PLS (fst p) (snd p))
fst p \<in> valid_states PROB
snd p \<in> valid_plans PROB
goal (1 subgoal):
1. fst p \<in> valid_states PROB &&& snd p \<in> valid_plans PROB
[PROOF STEP]
by blast+
[PROOF STATE]
proof (state)
this:
fst p \<in> valid_states PROB
snd p \<in> valid_plans PROB
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
fst p \<in> valid_states PROB
snd p \<in> valid_plans PROB
[PROOF STEP]
obtain x' :: nat where obtain_x':
"x' \<in> PLS (fst p) (snd p) \<and> x' \<le> 2 ^ card (prob_dom PROB) - 1"
[PROOF STATE]
proof (prove)
using this:
fst p \<in> valid_states PROB
snd p \<in> valid_plans PROB
goal (1 subgoal):
1. (\<And>x'. x' \<in> PLS (fst p) (snd p) \<and> x' \<le> 2 ^ card (prob_dom PROB) - 1 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms(1) in_PLS_leq_2_pow_n[where s = "fst p" and as = "snd p"]
[PROOF STATE]
proof (prove)
using this:
fst p \<in> valid_states PROB
snd p \<in> valid_plans PROB
finite PROB
\<lbrakk>finite ?PROB; fst p \<in> valid_states ?PROB; snd p \<in> valid_plans ?PROB\<rbrakk> \<Longrightarrow> \<exists>x. x \<in> PLS (fst p) (snd p) \<and> x \<le> 2 ^ card (prob_dom ?PROB) - 1
goal (1 subgoal):
1. (\<And>x'. x' \<in> PLS (fst p) (snd p) \<and> x' \<le> 2 ^ card (prob_dom PROB) - 1 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x' \<in> PLS (fst p) (snd p) \<and> x' \<le> 2 ^ card (prob_dom PROB) - 1
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x' \<in> PLS (fst p) (snd p) \<and> x' \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
have 1: "x' \<le> 2 ^ card (prob_dom PROB) - 1" "x' \<in> PLS (fst p) (snd p)"
"x = Inf (PLS (fst p) (snd p))" "finite (PLS (fst p) (snd p))"
[PROOF STATE]
proof (prove)
using this:
x' \<in> PLS (fst p) (snd p) \<and> x' \<le> 2 ^ card (prob_dom PROB) - 1
goal (1 subgoal):
1. (x' \<le> 2 ^ card (prob_dom PROB) - 1 &&& x' \<in> PLS (fst p) (snd p)) &&& x = Inf (PLS (fst p) (snd p)) &&& finite (PLS (fst p) (snd p))
[PROOF STEP]
using obtain_x' obtain_p finite_PLS
[PROOF STATE]
proof (prove)
using this:
x' \<in> PLS (fst p) (snd p) \<and> x' \<le> 2 ^ card (prob_dom PROB) - 1
x' \<in> PLS (fst p) (snd p) \<and> x' \<le> 2 ^ card (prob_dom PROB) - 1
x = Inf (PLS (fst p) (snd p))
fst p \<in> valid_states PROB
snd p \<in> valid_plans PROB
finite (PLS ?s ?as)
goal (1 subgoal):
1. (x' \<le> 2 ^ card (prob_dom PROB) - 1 &&& x' \<in> PLS (fst p) (snd p)) &&& x = Inf (PLS (fst p) (snd p)) &&& finite (PLS (fst p) (snd p))
[PROOF STEP]
by blast+
[PROOF STATE]
proof (state)
this:
x' \<le> 2 ^ card (prob_dom PROB) - 1
x' \<in> PLS (fst p) (snd p)
x = Inf (PLS (fst p) (snd p))
finite (PLS (fst p) (snd p))
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
x' \<le> 2 ^ card (prob_dom PROB) - 1
x' \<in> PLS (fst p) (snd p)
x = Inf (PLS (fst p) (snd p))
finite (PLS (fst p) (snd p))
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
have "x \<le> x'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<le> x'
[PROOF STEP]
using 1(2, 4) obtain_p(1) cInf_le_finite
[PROOF STATE]
proof (prove)
using this:
x' \<in> PLS (fst p) (snd p)
finite (PLS (fst p) (snd p))
x = Inf (PLS (fst p) (snd p))
\<lbrakk>finite ?X; ?x \<in> ?X\<rbrakk> \<Longrightarrow> Inf ?X \<le> ?x
goal (1 subgoal):
1. x \<le> x'
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<le> x'
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
x' \<le> 2 ^ card (prob_dom PROB) - 1
x' \<in> PLS (fst p) (snd p)
x = Inf (PLS (fst p) (snd p))
finite (PLS (fst p) (snd p))
x \<le> x'
[PROOF STEP]
show "(x \<le> 2 ^ card (prob_dom PROB) - 1)"
[PROOF STATE]
proof (prove)
using this:
x' \<le> 2 ^ card (prob_dom PROB) - 1
x' \<in> PLS (fst p) (snd p)
x = Inf (PLS (fst p) (snd p))
finite (PLS (fst p) (snd p))
x \<le> x'
goal (1 subgoal):
1. x \<le> 2 ^ card (prob_dom PROB) - 1
[PROOF STEP]
by linarith
[PROOF STATE]
proof (state)
this:
x \<le> 2 ^ card (prob_dom PROB) - 1
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5047, "file": "Factored_Transition_System_Bounding_TopologicalProps", "length": 39}
|
# example of combination image augmentation
from numpy import expand_dims
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import ImageDataGenerator
from matplotlib import pyplot
# import matplotlib
import os, shutil
deck = 'dobble_deck07_cards_55'
dataPath = 'dobble_dataset/' + deck + '/'
augmentedPath = 'dobble_dataset/' + deck + '-augmented/'
if os.path.exists(augmentedPath):
shutil.rmtree(augmentedPath)
os.mkdir(augmentedPath)
total_images_to_augment = 100
image_size = 300
for folder in os.listdir(dataPath):
print("[INFO] generating images in folder " + folder)
for file in os.listdir(dataPath + '/' + folder):
# load each image
img = load_img(dataPath + '/' + folder + '/' + file)
# convert to numpy array
data = img_to_array(img)
# expand dimension to one sample
samples = expand_dims(data, 0)
# create image data augmentation generator
datagen = ImageDataGenerator(
width_shift_range=0.3,
height_shift_range=0.3,
brightness_range=[0.3,1.0],
zoom_range=[0.7,1.5]
)
# prepare iterator
it = datagen.flow(samples, batch_size=1)
outputPath = augmentedPath + folder + '/'
if os.path.exists(outputPath):
shutil.rmtree(outputPath)
os.mkdir(outputPath)
for i in range(1, total_images_to_augment + 1):
# generate batch of images
batch = it.next()
# convert to unsigned integers for viewing
image = batch[0].astype('uint8')
fig = pyplot.figure(frameon=False)
#fig.set_size_inches(w,h)
ax = pyplot.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# plot raw pixel data
ax.imshow(image)
fig.savefig(fname = outputPath + "card" + folder + "_{:03d}.tif".format(i))
# show the figure
#pyplot.show()
# the figure will remain open, using memory, open unless explicitly closed with the following: (you'll get a warning if you don't include it)
pyplot.close('all')
|
{"hexsha": "2a8c535e30c743cb70de0b775a58367dadda3c7e", "size": 1975, "ext": "py", "lang": "Python", "max_stars_repo_path": "save_augmented_images.py", "max_stars_repo_name": "maxpark/dobble_buddy", "max_stars_repo_head_hexsha": "52109de1275f96af79fb77f1a5f5fb8fe00e96d2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-03T10:08:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T10:08:05.000Z", "max_issues_repo_path": "save_augmented_images.py", "max_issues_repo_name": "maxpark/dobble_buddy", "max_issues_repo_head_hexsha": "52109de1275f96af79fb77f1a5f5fb8fe00e96d2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "save_augmented_images.py", "max_forks_repo_name": "maxpark/dobble_buddy", "max_forks_repo_head_hexsha": "52109de1275f96af79fb77f1a5f5fb8fe00e96d2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-12-11T19:31:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-28T00:33:02.000Z", "avg_line_length": 27.0547945205, "max_line_length": 144, "alphanum_fraction": 0.7098734177, "include": true, "reason": "from numpy", "num_tokens": 546}
|
export Car
mutable struct Car <: Robot
btCollisionObject
end
Car() = Car(BulletCollision.convex_hull([zeros(3)]))
|
{"hexsha": "61bf2953177c5632d13c271587181d42d32ce49a", "size": 118, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/robot/car.jl", "max_stars_repo_name": "schoelst/GuSTO.jl", "max_stars_repo_head_hexsha": "b5753959c2e232c4e91be3e73ec4a81470c703b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 39, "max_stars_repo_stars_event_min_datetime": "2019-02-04T21:44:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T02:22:46.000Z", "max_issues_repo_path": "src/robot/car.jl", "max_issues_repo_name": "schoelst/GuSTO.jl", "max_issues_repo_head_hexsha": "b5753959c2e232c4e91be3e73ec4a81470c703b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-07-13T01:04:23.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-27T23:30:56.000Z", "max_forks_repo_path": "src/robot/car.jl", "max_forks_repo_name": "schoelst/GuSTO.jl", "max_forks_repo_head_hexsha": "b5753959c2e232c4e91be3e73ec4a81470c703b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-05-07T21:08:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T10:17:09.000Z", "avg_line_length": 16.8571428571, "max_line_length": 52, "alphanum_fraction": 0.7457627119, "num_tokens": 33}
|
"""
IGRound
Abstract base type for dispatched InstaRound rounds.
"""
abstract type IGRound end
units = [
"K", "M", "B", "t", "q", "Q", "s", "S", "o",
"n", "d", "U", "D", "T", "Qt", "Qd", "Sd", "St",
"O", "N", "v", "c"
]
unit_names = [
"Thousand",
"Million",
"Billion",
"Trillion",
"Quadrillion",
"Quintillion",
"Sextillion",
"Septillion",
"Octillion",
"Nonillion",
"Decillion",
"Undecillion",
"Duodecillion",
"Tredecillion",
"Quattuordecillion",
"Quindecillion",
"Sexdecillion",
"Septendecillion",
"Octodecillion",
"Novemdecillion",
"Vigintillion",
"Unvigintillion",
]
|
{"hexsha": "804ae37504819d6a05a4d8cd1237981ce4cadfdf", "size": 677, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/custom.jl", "max_stars_repo_name": "PyDataBlog/InstaRound.jl", "max_stars_repo_head_hexsha": "3eb3f6de229a82819415856b0f851fb8899492b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-08-03T22:33:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-08T06:37:37.000Z", "max_issues_repo_path": "src/custom.jl", "max_issues_repo_name": "PyDataBlog/InstaRound.jl", "max_issues_repo_head_hexsha": "3eb3f6de229a82819415856b0f851fb8899492b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-07-09T22:22:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-18T08:59:18.000Z", "max_forks_repo_path": "src/custom.jl", "max_forks_repo_name": "PyDataBlog/InstaRound.jl", "max_forks_repo_head_hexsha": "3eb3f6de229a82819415856b0f851fb8899492b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.358974359, "max_line_length": 52, "alphanum_fraction": 0.5302806499, "num_tokens": 238}
|
module animal_herd_module
implicit none
type animals
character(len=16) :: name ! |animal name (cattle, sheep, goats, etc)
real :: phyp = 0. ! |
real :: pthd = 0. ! |
real :: pthu = 0. ! |
real :: gzlm = 0. !t/ha |
real :: gzin = 0. ! |
real :: gzwi = 0. !kg/hd |
real :: gzwm = 0. !kg/hd |
real :: pmlk = 0. !kg/hd |
real :: antq = 0. ! |
integer :: igzd = 0 ! |
integer :: impl = 0 ! |
integer :: icvb = 0 ! |
integer :: icvf = 0 ! |
integer :: icwd = 0 ! |
end type animals
end module animal_herd_module
|
{"hexsha": "cf12c99517d3f86f5f9dc76ca76fd5fd68aa16fb", "size": 926, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "source_codes_60.5/animal_herd_module.f90", "max_stars_repo_name": "ankwasa/wetlands_swatplus", "max_stars_repo_head_hexsha": "3cdf83cc6a4dc68ce4f53ce1d0ebacd7695b54cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source_codes_60.5/animal_herd_module.f90", "max_issues_repo_name": "ankwasa/wetlands_swatplus", "max_issues_repo_head_hexsha": "3cdf83cc6a4dc68ce4f53ce1d0ebacd7695b54cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source_codes_60.5/animal_herd_module.f90", "max_forks_repo_name": "ankwasa/wetlands_swatplus", "max_forks_repo_head_hexsha": "3cdf83cc6a4dc68ce4f53ce1d0ebacd7695b54cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2608695652, "max_line_length": 88, "alphanum_fraction": 0.3142548596, "num_tokens": 243}
|
Sunflowers can be found in both Town Flora personal gardens and farmers fields Outskirts just outside of town. They are grown locally for seed production. The seeds harvested will be planted throughout the world for confection, oil, or ornamental markets. Sunflower seeds are one of Yolo Countys top grossing agricultural commodities with over $11,000,000 worth of seeds harvested in 2010.
Two varieties are planted for crosspollination, and European honeybees are used to supplement the native bees that pollinate the flowers.
|
{"hexsha": "051e08349660b1054b2c687778ea3a1f9a3c29f0", "size": 532, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Sunflowers.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Sunflowers.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Sunflowers.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 106.4, "max_line_length": 391, "alphanum_fraction": 0.8214285714, "num_tokens": 112}
|
"""
Created by Hamid Eghbal-zadeh at 22.03.21
Johannes Kepler University of Linz
"""
import torch
from torch import optim
from tqdm import tqdm
import numpy as np
import os
from datetime import datetime
import argparse
import pickle
import matplotlib.pyplot as plt
import json
from datasets.utils import get_disentangled_loaders
from carla.architectures.utils import get_model
from general_utils.io_utils import check_dir
from sklearn import linear_model
from sklearn.metrics import classification_report
import pickle
from carla.env.wrapper import *
msg_template = 'ep {} loss: ({:.5f}, {:.5f}) acc: ({:.2f}, {:.2f}) lr: {}'
def load_and_set_vae_weights(vae_model_path):
model_path = os.path.join(vae_model_path, 'model.pt')
args_path = os.path.join(vae_model_path, 'args.pkl')
with open(args_path, 'rb') as handle:
args_dict = pickle.load(handle)
context_encoder = get_model(**args_dict)
context_encoder.load_state_dict(torch.load(model_path))
context_encoder = freez_vae(context_encoder)
context_encoder = set_eval_vae(context_encoder)
return context_encoder
def freez_vae(context_encoder):
for param in context_encoder.parameters():
param.requires_grad = False
return context_encoder
def set_eval_vae(context_encoder):
context_encoder.eval()
return context_encoder
def forward(model, encoder, X, clf_type, use_posterior):
if clf_type == 'linear' or clf_type == 'mlp':
with torch.no_grad():
emb_mu, emb_var = encoder.encode(X)
if use_posterior:
emb = encoder.reparameterize(emb_mu, emb_var)
else:
emb = emb_mu
y_hat = model(emb)
elif 'cnn' in clf_type or 'vgg' in clf_type or 'resnet' in clf_type:
y_hat = model(X)
return y_hat
def _prune_dims(variances, threshold=0.):
scale_z = np.sqrt(variances)
return scale_z >= threshold
def eval(img_size=64, batch_size=128, lr=0.005, weight_decay=0.0, scheduler_gamma=0.95, num_epochs=100,
ds_name='celeba', n_workers=8, crop_size=148,
latent_dim=256, save_dir='', milestones=[25, 75], schedule_type='exp', aug=True,
dec_out_nonlin='tanh', init='he', vae_model_path='', use_posterior=False, alt_img_count=1,
env=None, seed=None, fully_obs=None, tile_size=None, context_config=None, reward=None, grid_size=None,
n_objects=None, vae_uid=None,
args_dict=None
):
dateTimeObj = datetime.now()
uid = '{}-{}-{}_{}-{}-{}.{}'.format(dateTimeObj.year, dateTimeObj.month, dateTimeObj.day, dateTimeObj.hour,
dateTimeObj.minute, dateTimeObj.second, dateTimeObj.microsecond)
path = os.path.join(save_dir, ds_name, uid)
check_dir(path)
with open(os.path.join(path, 'args.pkl'), 'wb') as handle:
pickle.dump(args_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(path, 'args.txt'), 'w') as f:
json.dump(args_dict, f, indent=2)
device = torch.device('cuda')
vae_model_path = os.path.join(vae_model_path, vae_uid)
encoder = load_and_set_vae_weights(vae_model_path)
encoder.to(device)
trn_loader, num_train_imgs = get_disentangled_loaders(ds_name, crop_size, img_size, batch_size,
n_workers, aug, dec_out_nonlin, alt_img_count)
log_dict = {'trn': {'loss': [], 'LR': []},
'val': {'loss': [], 'LR': []}}
random_state = np.random.RandomState(0)
model = linear_model.LogisticRegression(random_state=random_state)
features, labels, embeddings, gts = [], [], [], []
for data in tqdm(trn_loader):
X1, X2s, gt1, gt2, y = data
X1, y = X1.to(device), y.to(device)
with torch.no_grad():
emb1_mu, emb1_var = encoder.encode(X1)
if use_posterior:
emb1 = encoder.reparameterize(emb1_mu, emb1_var)
else:
emb1 = emb1_mu.detach().cpu().numpy()
feat_mean = []
for X2 in X2s:
X2 = X2.to(device)
if (X1 - X2).abs().sum() < 0.00001:
print('found near-duplicates!')
for b_i in range(X1.shape[0]):
plt.subplot(1, 2, 1)
plt.imshow(X1[b_i].cpu().permute(1, 2, 0))
plt.subplot(1, 2, 2)
plt.imshow(X2[b_i].cpu().permute(1, 2, 0))
plt.title('batch index {} class {}'.format(b_i, y))
plt.pause(2)
plt.close(0)
continue
with torch.no_grad():
emb2_mu, emb2_var = encoder.encode(X2)
if use_posterior:
emb2 = encoder.reparameterize(emb2_mu, emb2_var)
else:
emb2 = emb2_mu.detach().cpu().numpy()
# feat = np.mean(np.abs(emb1 - emb2), axis=0)
feat = np.abs(emb1 - emb2)
feat_mean.append(feat)
# if sum(feat) <= 0.0001:
# continue
features.append(np.mean(feat_mean, 0))
labels.append(y.detach().cpu().numpy())
features = np.vstack(features)
labels = np.hstack(labels)
n_class = len(np.unique(labels))
num_ds = features.shape[0]
valid_size = 0.2
indices = list(range(num_ds))
num_val_imgs = int(np.floor(valid_size * num_ds))
num_train_imgs = num_ds - num_val_imgs
np.random.shuffle(indices)
train_idx, val_idx = indices[num_val_imgs:], indices[:num_val_imgs]
train_features, train_labels = features[train_idx], labels[train_idx]
val_features, val_labels = features[val_idx], labels[val_idx]
model.fit(train_features, train_labels)
train_preds = model.predict(train_features)
print(classification_report(train_labels, train_preds))
val_preds = model.predict(val_features)
print(classification_report(val_labels, val_preds))
print('done')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-latent-dim', type=int, default=100)
parser.add_argument('-dec-out-nonlin', choices=['tanh', 'sig'], default="sig")
parser.add_argument('-init', choices=['kaiming', 'xavier', 'none'], default="none")
parser.add_argument('-lr', type=float, default=.0003)
parser.add_argument('-weight-decay', type=float, default=0) # 5e-4
parser.add_argument('-schedule-type', choices=['cos', 'exp', 'step', 'none'], default="none")
parser.add_argument('-scheduler-gamma', type=float, default=0.5)
parser.add_argument('-milestones', nargs='+', default=[50, 75], type=int)
parser.add_argument('-img-size', type=int, default=84)
parser.add_argument('-crop-size', type=int, default=84)
parser.add_argument('-num-epochs', type=int, default=50)
parser.add_argument('-batch-size', type=int, default=128)
parser.add_argument('-ds-name', type=str,
default="carla4fully48pxdisentangle")
parser.add_argument('-alt_img_count', type=int, default=1)
parser.add_argument('-n-workers', type=int, default=0)
parser.add_argument('-aug', nargs='+', default=[''], type=str)
parser.add_argument('-save-dir', type=str, required=True)
parser.add_argument('-vae-model-path', type=str, default='')
parser.add_argument('-use-posterior', default=False, action='store_true')
parser.add_argument('--env', type=str, default='MiniGrid-Context-Dynamic-Obstacles-8x8-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--fully_obs', default=False, action='store_true')
parser.add_argument('--random_start', default=False, action='store_true')
parser.add_argument('--no_goodies', default=False, action='store_true')
parser.add_argument('--norm_obs', default=False, action='store_true')
parser.add_argument("--context_config", help="which context configuration to load", default='color_contexts.yaml')
parser.add_argument('--tile_size', type=int, default=12)
parser.add_argument('--random_goal', default=False, action='store_true')
parser.add_argument("--reward", help="choose reward configuration", default='default.yaml')
parser.add_argument('--grid_size', type=int, default=8)
parser.add_argument('--n_objects', type=int, default=4)
parser.add_argument('-vae-uid', type=str, default='')
args = parser.parse_args()
eval(args.img_size, args.batch_size, args.lr, args.weight_decay, args.scheduler_gamma, args.num_epochs,
args.ds_name, args.n_workers, args.crop_size, args.latent_dim, args.save_dir, args.milestones,
args.schedule_type, args.aug, args.dec_out_nonlin, args.init, args.vae_model_path, args.use_posterior,
args.alt_img_count,
args.env, args.seed, args.fully_obs, args.tile_size, args.context_config, args.reward, args.grid_size,
args.n_objects, args.vae_uid,
args_dict=args.__dict__)
|
{"hexsha": "48e5cb08457ba86b31c0b311e39d9a118e4c6fb8", "size": 8989, "ext": "py", "lang": "Python", "max_stars_repo_path": "VAE/disentangle_eval.py", "max_stars_repo_name": "sebzap/CarlaRL", "max_stars_repo_head_hexsha": "5283d15dee9e8dc5e728314d56875b4fbca3acb2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-07-14T09:19:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T07:59:20.000Z", "max_issues_repo_path": "VAE/disentangle_eval.py", "max_issues_repo_name": "sebzap/CarlaRL", "max_issues_repo_head_hexsha": "5283d15dee9e8dc5e728314d56875b4fbca3acb2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "VAE/disentangle_eval.py", "max_forks_repo_name": "sebzap/CarlaRL", "max_forks_repo_head_hexsha": "5283d15dee9e8dc5e728314d56875b4fbca3acb2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-14T15:07:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T15:07:33.000Z", "avg_line_length": 39.0826086957, "max_line_length": 118, "alphanum_fraction": 0.6494604517, "include": true, "reason": "import numpy", "num_tokens": 2206}
|
#include "run_test_file.hpp"
#include <signal.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include <cstdint>
#include <sstream>
#include <boost/iostreams/device/file_descriptor.hpp>
#include <boost/iostreams/stream.hpp>
#include <mettle/driver/exit_code.hpp>
#include <mettle/driver/posix/scoped_pipe.hpp>
#include "../../err_string.hpp"
// XXX: Use std::source_location instead when we're able.
#define PARENT_FAILED() parent_failed(__FILE__, __LINE__)
namespace mettle::posix {
namespace {
file_result parent_failed(const char *file, std::size_t line) {
std::ostringstream ss;
ss << "Fatal error at " << file << ":" << line << "\n"
<< err_string(errno);
return {false, ss.str()};
}
[[noreturn]] void
child_failed(int fd, const std::string &file) {
auto err = err_string(errno);
try {
namespace io = boost::iostreams;
io::stream<io::file_descriptor_sink> stream(
fd, io::never_close_handle
);
bencode::encode(stream, bencode::dict_view{
{"event", "failed_file"},
{"file", file},
{"message", err}
});
stream.flush();
_exit(exit_code::success);
} catch(...) {
_exit(exit_code::fatal);
}
}
std::unique_ptr<char *[]>
make_argv(const std::vector<std::string> &argv) {
auto real_argv = std::make_unique<char *[]>(argv.size() + 1);
for(std::size_t i = 0; i != argv.size(); i++)
real_argv[i] = const_cast<char*>(argv[i].c_str());
return real_argv;
}
}
file_result run_test_file(std::vector<std::string> args, log::pipe &logger) {
posix::scoped_pipe message_pipe;
if(message_pipe.open() < 0)
return PARENT_FAILED();
rlimit lim;
if(getrlimit(RLIMIT_NOFILE, &lim) < 0)
return PARENT_FAILED();
int max_fd = lim.rlim_cur - 1;
args.insert(args.end(), { "--output-fd", std::to_string(max_fd) });
auto argv = make_argv(args);
pid_t pid;
if((pid = fork()) < 0)
return PARENT_FAILED();
if(pid == 0) {
if(message_pipe.close_read() < 0)
child_failed(message_pipe.write_fd, args[0]);
if(message_pipe.write_fd != max_fd) {
if(dup2(message_pipe.write_fd, max_fd) < 0)
child_failed(message_pipe.write_fd, args[0]);
if(message_pipe.close_write() < 0)
child_failed(max_fd, args[0]);
}
execvp(argv[0], argv.get());
child_failed(max_fd, args[0]);
} else {
if(message_pipe.close_write() < 0) {
kill(pid, SIGKILL);
return PARENT_FAILED();
}
std::exception_ptr except;
try {
namespace io = boost::iostreams;
io::stream<io::file_descriptor_source> fds(
message_pipe.read_fd, io::never_close_handle
);
while(fds.peek() != EOF)
logger(fds);
} catch(...) {
except = std::current_exception();
}
int status;
if(waitpid(pid, &status, 0) < 0) {
kill(pid, SIGKILL);
return PARENT_FAILED();
}
if(WIFEXITED(status)) {
int exit_status = WEXITSTATUS(status);
if(exit_status != exit_code::success) {
std::ostringstream ss;
ss << "Exited with status " << exit_status;
return {false, ss.str()};
} else if(except) {
try {
std::rethrow_exception(except);
} catch(const std::exception &e) {
return {false, e.what()};
}
} else {
return {true, ""};
}
} else { // WIFSIGNALED
return {false, strsignal(WTERMSIG(status))};
}
}
}
} // namespace mettle::posix
|
{"hexsha": "11d5f8e6aace2c3b412fe305696b2fbe405fc13c", "size": 3703, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/mettle/posix/run_test_file.cpp", "max_stars_repo_name": "jimporter/mettle", "max_stars_repo_head_hexsha": "c65aa75b04a08b550b3572f4c080c68e26ad86fa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 82.0, "max_stars_repo_stars_event_min_datetime": "2015-01-05T10:06:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T01:41:28.000Z", "max_issues_repo_path": "src/mettle/posix/run_test_file.cpp", "max_issues_repo_name": "JohnGalbraith/mettle", "max_issues_repo_head_hexsha": "38b70fe1dc0f30e98b768a37108196328182b5f4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 44.0, "max_issues_repo_issues_event_min_datetime": "2015-01-08T08:40:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-29T23:28:56.000Z", "max_forks_repo_path": "src/mettle/posix/run_test_file.cpp", "max_forks_repo_name": "jimporter/mettle", "max_forks_repo_head_hexsha": "c65aa75b04a08b550b3572f4c080c68e26ad86fa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 13.0, "max_forks_repo_forks_event_min_datetime": "2015-06-23T07:41:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-14T15:35:07.000Z", "avg_line_length": 26.45, "max_line_length": 79, "alphanum_fraction": 0.5716986227, "num_tokens": 933}
|
"""
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
import numpy as np
import pytest
from numpy.lib.recfunctions import structured_to_unstructured
from pde import ScalarField
from pde.grids import (
CartesianGrid,
CylindricalSymGrid,
PolarSymGrid,
SphericalSymGrid,
UnitGrid,
)
from droplets import image_analysis
from droplets.droplets import DiffuseDroplet, PerturbedDroplet2D, PerturbedDroplet3D
from droplets.emulsions import Emulsion
@pytest.mark.parametrize("size", [16, 17])
@pytest.mark.parametrize("periodic", [True, False])
def test_localization_sym_unit(size, periodic):
"""tests simple droplets localization in 2d"""
pos = np.random.random(2) * size
radius = np.random.uniform(2, 5)
width = np.random.uniform(1, 2)
d1 = DiffuseDroplet(pos, radius, interface_width=width)
grid = UnitGrid((size, size), periodic=periodic)
field = d1.get_phase_field(grid)
emulsion = image_analysis.locate_droplets(field, refine=True)
assert len(emulsion) == 1
d2 = emulsion[0]
np.testing.assert_almost_equal(d1.position, d2.position)
assert d1.radius == pytest.approx(d2.radius, rel=1e-4)
assert d1.interface_width == pytest.approx(d2.interface_width)
emulsion = image_analysis.locate_droplets(field, minimal_radius=size)
assert len(emulsion) == 0
emulsion = image_analysis.locate_droplets(ScalarField(grid))
assert len(emulsion) == 0
@pytest.mark.parametrize("periodic", [True, False])
def test_localization_sym_rect(periodic):
"""tests simple droplets localization in 2d with a rectangular grid"""
size = 16
pos = np.random.uniform(-4, 4, size=2)
radius = np.random.uniform(2, 5)
width = np.random.uniform(0.5, 1.5)
d1 = DiffuseDroplet(pos, radius, interface_width=width)
a = np.random.random(2) - size / 2
b = np.random.random(2) + size / 2
grid = CartesianGrid(np.c_[a, b], 3 * size, periodic=periodic)
field = d1.get_phase_field(grid)
emulsion = image_analysis.locate_droplets(field, refine=True)
assert len(emulsion) == 1
d2 = emulsion[0]
np.testing.assert_almost_equal(d1.position, d2.position)
assert d1.radius == pytest.approx(d2.radius, rel=1e-5)
assert d1.interface_width == pytest.approx(d2.interface_width)
emulsion = image_analysis.locate_droplets(ScalarField(grid))
assert len(emulsion) == 0
@pytest.mark.parametrize("periodic", [True, False])
def test_localization_perturbed_2d(periodic):
"""tests localization of perturbed 2d droplets"""
size = 16
pos = np.random.uniform(-4, 4, size=2)
radius = np.random.uniform(2, 5)
width = np.random.uniform(0.5, 1.5)
ampls = np.random.uniform(-0.01, 0.01, size=4)
d1 = PerturbedDroplet2D(pos, radius, interface_width=width, amplitudes=ampls)
a = np.random.random(2) - size / 2
b = np.random.random(2) + size / 2
grid = CartesianGrid(np.c_[a, b], 2 * size, periodic=periodic)
field = d1.get_phase_field(grid)
emulsion = image_analysis.locate_droplets(field, refine=True, modes=d1.modes)
assert len(emulsion) == 1
d2 = emulsion[0]
msg = "size=%d, periodic=%s, %s != %s" % (size, periodic, d1, d2)
np.testing.assert_almost_equal(d1.position, d2.position, decimal=1, err_msg=msg)
assert d1.radius == pytest.approx(d2.radius, rel=1e-5)
assert d1.interface_width == pytest.approx(d2.interface_width, rel=1e-3)
np.testing.assert_allclose(d1.amplitudes[1:], d2.amplitudes[1:], rtol=0.5)
@pytest.mark.parametrize("periodic", [True, False])
def test_localization_perturbed_3d(periodic):
"""tests localization of perturbed 3d droplets"""
size = 8
pos = np.random.uniform(-2, 2, size=3)
radius = np.random.uniform(2, 3)
width = np.random.uniform(0.5, 1.5)
ampls = np.random.uniform(-0.01, 0.01, size=3)
d1 = PerturbedDroplet3D(pos, radius, interface_width=width, amplitudes=ampls)
a = np.random.random(3) - size / 2
b = np.random.random(3) + size / 2
grid = CartesianGrid(np.c_[a, b], 2 * size, periodic=periodic)
assert grid.dim == 3
field = d1.get_phase_field(grid)
emulsion = image_analysis.locate_droplets(field, refine=True, modes=d1.modes)
assert len(emulsion) == 1
d2 = emulsion[0]
msg = "size=%d, periodic=%s, %s != %s" % (size, periodic, d1, d2)
np.testing.assert_almost_equal(d1.position, d2.position, decimal=1, err_msg=msg)
assert d1.radius == pytest.approx(d2.radius, rel=1e-4)
assert d1.interface_width == pytest.approx(d2.interface_width, rel=1e-3)
np.testing.assert_allclose(
d1.amplitudes[3:], d2.amplitudes[3:], rtol=0.5, err_msg=msg
)
def test_localization_polar():
"""tests simple droplets localization in polar grid"""
radius = np.random.uniform(2, 3)
width = np.random.uniform(0.5, 1.5)
d1 = DiffuseDroplet((0, 0), radius, interface_width=width)
grid_radius = 6 + 2 * np.random.random()
grid = PolarSymGrid(grid_radius, 16)
field = d1.get_phase_field(grid)
emulsion = image_analysis.locate_droplets(field, refine=True)
assert len(emulsion) == 1
d2 = emulsion[0]
np.testing.assert_almost_equal(d1.position, d2.position, decimal=5)
assert d1.radius == pytest.approx(d2.radius, rel=1e-5)
assert d1.interface_width == pytest.approx(d2.interface_width, rel=1e-5)
emulsion = image_analysis.locate_droplets(ScalarField(grid))
assert len(emulsion) == 0
def test_localization_spherical():
"""tests simple droplets localization in spherical grid"""
radius = np.random.uniform(2, 3)
width = np.random.uniform(0.5, 1.5)
d1 = DiffuseDroplet((0, 0, 0), radius, interface_width=width)
grid_radius = 6 + 2 * np.random.random()
grid = SphericalSymGrid(grid_radius, 16)
field = d1.get_phase_field(grid)
emulsion = image_analysis.locate_droplets(field, refine=True)
assert len(emulsion) == 1
d2 = emulsion[0]
np.testing.assert_almost_equal(d1.position, d2.position, decimal=5)
assert d1.radius == pytest.approx(d2.radius, rel=1e-5)
assert d1.interface_width == pytest.approx(d2.interface_width, rel=1e-5)
emulsion = image_analysis.locate_droplets(ScalarField(grid))
assert len(emulsion) == 0
@pytest.mark.parametrize("periodic", [True, False])
def test_localization_cylindrical(periodic):
"""tests simple droplets localization in cylindrical grid"""
pos = (0, 0, np.random.uniform(-4, 4))
radius = np.random.uniform(2, 3)
width = np.random.uniform(0.5, 1.5)
d1 = DiffuseDroplet(pos, radius, interface_width=width)
grid_radius = 6 + 2 * np.random.random()
bounds_z = np.random.uniform(1, 2, size=2) * np.array([-4, 4])
grid = CylindricalSymGrid(grid_radius, bounds_z, (16, 32), periodic_z=periodic)
field = d1.get_phase_field(grid)
emulsion = image_analysis.locate_droplets(field, refine=True)
assert len(emulsion) == 1
d2 = emulsion[0]
np.testing.assert_almost_equal(d1.position, d2.position, decimal=5)
assert d1.radius == pytest.approx(d2.radius, rel=1e-5)
assert d1.interface_width == pytest.approx(d2.interface_width)
emulsion = image_analysis.locate_droplets(ScalarField(grid))
assert len(emulsion) == 0
def test_localization_threshold():
"""tests different localization thresholds"""
pos = np.random.random(2) * 16
radius = np.random.uniform(2, 5)
width = np.random.uniform(1, 2)
d1 = DiffuseDroplet(pos, radius, interface_width=width)
grid = UnitGrid((16, 16), periodic=False)
field = d1.get_phase_field(grid)
for threshold in [0.25, 0.75, "auto"]:
emulsion = image_analysis.locate_droplets(
field, threshold=threshold, refine=True
)
assert len(emulsion) == 1
d2 = emulsion[0]
np.testing.assert_almost_equal(d1.position, d2.position)
assert d1.radius == pytest.approx(d2.radius, rel=1e-4)
assert d1.interface_width == pytest.approx(d2.interface_width)
def test_get_length_scale():
"""test determining the length scale"""
grid = CartesianGrid([[0, 8 * np.pi]], 64, periodic=True)
c = ScalarField(grid, np.sin(grid.axes_coords[0]))
for method in ["structure_factor_mean", "structure_factor_maximum"]:
s = image_analysis.get_length_scale(c, method=method)
assert s == pytest.approx(2 * np.pi, rel=0.1)
def test_emulsion_processing():
"""test identifying emulsions in phase fields"""
grid = UnitGrid([32, 32], periodic=True)
e1 = Emulsion(
[
DiffuseDroplet(position=[5, 6], radius=9, interface_width=1),
DiffuseDroplet(position=[20, 19], radius=8, interface_width=1),
],
grid=grid,
)
field = e1.get_phasefield()
e2 = image_analysis.locate_droplets(field, refine=True)
np.testing.assert_allclose(
structured_to_unstructured(e1.data),
structured_to_unstructured(e2.data),
rtol=0.02,
)
def test_structure_factor_random():
"""test the structure factor function for random input"""
g1 = CartesianGrid([[0, 10]] * 2, 64, periodic=True)
f1 = ScalarField.random_colored(g1, -2)
# test invariance with re-meshing
g2 = CartesianGrid([[0, 10]] * 2, [128, 64], periodic=True)
f2 = f1.interpolate_to_grid(g2)
ks = np.linspace(0.2, 3)
k1, s1 = image_analysis.get_structure_factor(f1, wave_numbers=ks)
k2, s2 = image_analysis.get_structure_factor(f2, wave_numbers=ks)
np.testing.assert_equal(ks, k1)
np.testing.assert_equal(ks, k2)
np.testing.assert_allclose(s1, s2, atol=0.05)
# test invariance with respect to scaling
k2, s2 = image_analysis.get_structure_factor(100 * f1, wave_numbers=ks)
np.testing.assert_equal(ks, k2)
np.testing.assert_allclose(s1, s2, atol=0.05)
|
{"hexsha": "55a623ceeb97b341de83d6de2b1f91352fe09f0f", "size": 9791, "ext": "py", "lang": "Python", "max_stars_repo_path": "droplets/tests/test_image_analysis.py", "max_stars_repo_name": "tefavidal/py-droplets", "max_stars_repo_head_hexsha": "633f0cff75eecd9d1a375cfaebfad326cb9a7bf0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-01T19:50:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-26T13:04:09.000Z", "max_issues_repo_path": "droplets/tests/test_image_analysis.py", "max_issues_repo_name": "tefavidal/py-droplets", "max_issues_repo_head_hexsha": "633f0cff75eecd9d1a375cfaebfad326cb9a7bf0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-05-17T19:03:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T16:31:20.000Z", "max_forks_repo_path": "droplets/tests/test_image_analysis.py", "max_forks_repo_name": "tefavidal/py-droplets", "max_forks_repo_head_hexsha": "633f0cff75eecd9d1a375cfaebfad326cb9a7bf0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-05-04T07:45:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-06T09:12:39.000Z", "avg_line_length": 35.3465703971, "max_line_length": 84, "alphanum_fraction": 0.6886936983, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2856}
|
from __future__ import print_function
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.python.keras.optimizers import SGD
import numpy as np
def createModel(input_shape,hid_size,num_outputs):
model = Sequential()
model.add(Dense(hid_size, activation='relu', input_shape=input_shape))
model.add(Dense(num_outputs,activation='sigmoid'))
return model
def train(model,x_train,y_train,steps,epochs,x_test,y_test,modelname):
model.compile(optimizer = SGD(lr=0.7,momentum=0.3),loss = 'mean_squared_error',metrics = ['accuracy'])
save_callback = ModelCheckpoint(filepath=modelname)
early_stop = EarlyStopping(monitor = 'loss',min_delta=0.01,patience=20)
model.fit(x = x_train,y = y_train,steps_per_epoch=steps,epochs=epochs,shuffle=True,callbacks=[save_callback,early_stop])
print(model.evaluate(x = x_test,y = y_test))
def predict(model,x):
print(model.predict(x))
def main():
x = [(0.,0.),(1.,0.),(0.,1.),(1.,1.)]
y = [0.,1.,1.,0.]
x_train = np.asarray(x)
y_train = np.asarray(y)
print(x_train)
print(y_train)
model = createModel((2,),8,1)
train(model,x_train,y_train,16,1000,x_train,y_train,'xor.hd5')
predict(model,x_train)
if __name__ == '__main__':
main()
|
{"hexsha": "aca3345377d394b07f67b60943b96122576ce1eb", "size": 1431, "ext": "py", "lang": "Python", "max_stars_repo_path": "EX_1.py", "max_stars_repo_name": "tszdanger/NUS_ALL", "max_stars_repo_head_hexsha": "2b38cce6c0aeebed4bbd211e3e29565c66084cf6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-14T15:58:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-14T15:58:44.000Z", "max_issues_repo_path": "EX_1.py", "max_issues_repo_name": "tszdanger/NUS_ALL", "max_issues_repo_head_hexsha": "2b38cce6c0aeebed4bbd211e3e29565c66084cf6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EX_1.py", "max_forks_repo_name": "tszdanger/NUS_ALL", "max_forks_repo_head_hexsha": "2b38cce6c0aeebed4bbd211e3e29565c66084cf6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9024390244, "max_line_length": 125, "alphanum_fraction": 0.7037037037, "include": true, "reason": "import numpy", "num_tokens": 354}
|
[STATEMENT]
lemma cindexP_lineE_changes:
fixes p::"complex poly" and a b ::complex
assumes "p\<noteq>0" "a\<noteq>b"
shows "cindexP_lineE p a b =
(let p1 = pcompose p [:a, b-a:];
pR1 = map_poly Re p1;
pI1 = map_poly Im p1;
gc1 = gcd pR1 pI1
in
real_of_int (changes_alt_itv_smods 0 1
(pR1 div gc1) (pI1 div gc1)) / 2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
define p1 pR1 pI1 gc1 where "p1 = pcompose p [:a, b-a:]"
and "pR1 = map_poly Re p1" and "pI1 = map_poly Im p1"
and "gc1 = gcd pR1 pI1"
[PROOF STATE]
proof (state)
this:
p1 = p \<circ>\<^sub>p [:a, b - a:]
pR1 = map_poly Re p1
pI1 = map_poly Im p1
gc1 = gcd pR1 pI1
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
have "gc1 \<noteq>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. gc1 \<noteq> 0
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> gc1 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
assume "\<not> gc1 \<noteq> 0"
[PROOF STATE]
proof (state)
this:
\<not> gc1 \<noteq> 0
goal (1 subgoal):
1. \<not> gc1 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> gc1 \<noteq> 0
[PROOF STEP]
have "pI1 = 0" "pR1 = 0"
[PROOF STATE]
proof (prove)
using this:
\<not> gc1 \<noteq> 0
goal (1 subgoal):
1. pI1 = 0 &&& pR1 = 0
[PROOF STEP]
unfolding gc1_def
[PROOF STATE]
proof (prove)
using this:
\<not> gcd pR1 pI1 \<noteq> 0
goal (1 subgoal):
1. pI1 = 0 &&& pR1 = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
pI1 = 0
pR1 = 0
goal (1 subgoal):
1. \<not> gc1 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
pI1 = 0
pR1 = 0
[PROOF STEP]
have "p1 = 0"
[PROOF STATE]
proof (prove)
using this:
pI1 = 0
pR1 = 0
goal (1 subgoal):
1. p1 = 0
[PROOF STEP]
unfolding pI1_def pR1_def
[PROOF STATE]
proof (prove)
using this:
map_poly Im p1 = 0
map_poly Re p1 = 0
goal (1 subgoal):
1. p1 = 0
[PROOF STEP]
by (metis cpoly_of_decompose map_poly_0)
[PROOF STATE]
proof (state)
this:
p1 = 0
goal (1 subgoal):
1. \<not> gc1 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
p1 = 0
[PROOF STEP]
have "p=0"
[PROOF STATE]
proof (prove)
using this:
p1 = 0
goal (1 subgoal):
1. p = 0
[PROOF STEP]
unfolding p1_def
[PROOF STATE]
proof (prove)
using this:
p \<circ>\<^sub>p [:a, b - a:] = 0
goal (1 subgoal):
1. p = 0
[PROOF STEP]
apply (subst (asm) pcompose_eq_0)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. degree [:a, b - a:] \<noteq> 0
2. p = 0 \<Longrightarrow> p = 0
[PROOF STEP]
using \<open>a\<noteq>b\<close>
[PROOF STATE]
proof (prove)
using this:
a \<noteq> b
goal (2 subgoals):
1. degree [:a, b - a:] \<noteq> 0
2. p = 0 \<Longrightarrow> p = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
p = 0
goal (1 subgoal):
1. \<not> gc1 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
p = 0
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
p = 0
goal (1 subgoal):
1. False
[PROOF STEP]
using \<open>p\<noteq>0\<close>
[PROOF STATE]
proof (prove)
using this:
p = 0
p \<noteq> 0
goal (1 subgoal):
1. False
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
gc1 \<noteq> 0
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
have "cindexP_lineE p a b =
cindexE 0 1 (\<lambda>t. Im (poly p (linepath a b t))
/ Re (poly p (linepath a b t)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindexP_lineE p a b = cindexE 0 1 (\<lambda>t. Im (poly p (linepath a b t)) / Re (poly p (linepath a b t)))
[PROOF STEP]
unfolding cindexP_lineE_def cindex_pathE_def cindexP_pathE_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindexE 0 1 (\<lambda>t. Im ((poly p \<circ> linepath a b) t - 0) / Re ((poly p \<circ> linepath a b) t - 0)) = cindexE 0 1 (\<lambda>t. Im (poly p (linepath a b t)) / Re (poly p (linepath a b t)))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
cindexP_lineE p a b = cindexE 0 1 (\<lambda>t. Im (poly p (linepath a b t)) / Re (poly p (linepath a b t)))
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cindexP_lineE p a b = cindexE 0 1 (\<lambda>t. Im (poly p (linepath a b t)) / Re (poly p (linepath a b t)))
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
have "... = cindexE 0 1 (\<lambda>t. poly pI1 t / poly pR1 t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindexE 0 1 (\<lambda>t. Im (poly p (linepath a b t)) / Re (poly p (linepath a b t))) = cindexE 0 1 (\<lambda>t. poly pI1 t / poly pR1 t)
[PROOF STEP]
unfolding pI1_def pR1_def p1_def poly_linepath_comp'
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindexE 0 1 (\<lambda>t. Im (poly (p \<circ>\<^sub>p [:a, b - a:]) (complex_of_real t)) / Re (poly (p \<circ>\<^sub>p [:a, b - a:]) (complex_of_real t))) = cindexE 0 1 (\<lambda>t. poly (map_poly Im (p \<circ>\<^sub>p [:a, b - a:])) t / poly (map_poly Re (p \<circ>\<^sub>p [:a, b - a:])) t)
[PROOF STEP]
by (simp add:Im_poly_of_real Re_poly_of_real)
[PROOF STATE]
proof (state)
this:
cindexE 0 1 (\<lambda>t. Im (poly p (linepath a b t)) / Re (poly p (linepath a b t))) = cindexE 0 1 (\<lambda>t. poly pI1 t / poly pR1 t)
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cindexE 0 1 (\<lambda>t. Im (poly p (linepath a b t)) / Re (poly p (linepath a b t))) = cindexE 0 1 (\<lambda>t. poly pI1 t / poly pR1 t)
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
have "... = cindex_polyE 0 1 pI1 pR1 "
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindexE 0 1 (\<lambda>t. poly pI1 t / poly pR1 t) = cindex_polyE 0 1 pI1 pR1
[PROOF STEP]
by (simp add: cindexE_eq_cindex_polyE)
[PROOF STATE]
proof (state)
this:
cindexE 0 1 (\<lambda>t. poly pI1 t / poly pR1 t) = cindex_polyE 0 1 pI1 pR1
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cindexE 0 1 (\<lambda>t. poly pI1 t / poly pR1 t) = cindex_polyE 0 1 pI1 pR1
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
have "... = cindex_polyE 0 1 (pI1 div gc1) (pR1 div gc1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindex_polyE 0 1 pI1 pR1 = cindex_polyE 0 1 (pI1 div gc1) (pR1 div gc1)
[PROOF STEP]
using \<open>gc1\<noteq>0\<close>
[PROOF STATE]
proof (prove)
using this:
gc1 \<noteq> 0
goal (1 subgoal):
1. cindex_polyE 0 1 pI1 pR1 = cindex_polyE 0 1 (pI1 div gc1) (pR1 div gc1)
[PROOF STEP]
apply (subst (2) cindex_polyE_mult_cancel[of gc1,symmetric])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. gc1 \<noteq> 0 \<Longrightarrow> gc1 \<noteq> 0
2. gc1 \<noteq> 0 \<Longrightarrow> cindex_polyE 0 1 pI1 pR1 = cindex_polyE 0 1 (gc1 * (pI1 div gc1)) (gc1 * (pR1 div gc1))
[PROOF STEP]
by (simp_all add: gc1_def)
[PROOF STATE]
proof (state)
this:
cindex_polyE 0 1 pI1 pR1 = cindex_polyE 0 1 (pI1 div gc1) (pR1 div gc1)
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cindex_polyE 0 1 pI1 pR1 = cindex_polyE 0 1 (pI1 div gc1) (pR1 div gc1)
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
have "... = real_of_int (changes_alt_itv_smods 0 1
(pR1 div gc1) (pI1 div gc1)) / 2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindex_polyE 0 1 (pI1 div gc1) (pR1 div gc1) = real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2
[PROOF STEP]
apply (rule cindex_polyE_changes_alt_itv_mods)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. 0 < 1
2. coprime (pR1 div gc1) (pI1 div gc1)
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. coprime (pR1 div gc1) (pI1 div gc1)
[PROOF STEP]
by (metis \<open>gc1 \<noteq> 0\<close> div_gcd_coprime gc1_def gcd_eq_0_iff)
[PROOF STATE]
proof (state)
this:
cindex_polyE 0 1 (pI1 div gc1) (pR1 div gc1) = real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
cindexP_lineE p a b = real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
cindexP_lineE p a b = real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2
goal (1 subgoal):
1. cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
[PROOF STEP]
by (metis gc1_def p1_def pI1_def pR1_def)
[PROOF STATE]
proof (state)
this:
cindexP_lineE p a b = (let p1 = p \<circ>\<^sub>p [:a, b - a:]; pR1 = map_poly Re p1; pI1 = map_poly Im p1; gc1 = gcd pR1 pI1 in real_of_int (changes_alt_itv_smods 0 1 (pR1 div gc1) (pI1 div gc1)) / 2)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5745, "file": "Count_Complex_Roots_Extended_Sturm", "length": 48}
|
"""
UNet architecture in Keras TensorFlow
"""
import os
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
class Unet:
def __init__(self, input_size=256):
self.input_size = input_size
def build_model(self):
def conv_block(x, n_filter, pool=True):
x = Conv2D(n_filter, (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(n_filter, (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
c = x
if pool == True:
x = MaxPooling2D((2, 2), (2, 2))(x)
return c, x
else:
return c
n_filters = [16, 32, 64, 128, 256]
inputs = Input((self.input_size, self.input_size, 3))
c0 = inputs
## Encoder
c1, p1 = conv_block(c0, n_filters[0])
c2, p2 = conv_block(p1, n_filters[1])
c3, p3 = conv_block(p2, n_filters[2])
c4, p4 = conv_block(p3, n_filters[3])
## Bridge
b1 = conv_block(p4, n_filters[4], pool=False)
b2 = conv_block(b1, n_filters[4], pool=False)
## Decoder
d1 = Conv2DTranspose(n_filters[3], (3, 3), padding="same", strides=(2, 2))(b2)
d1 = Concatenate()([d1, c4])
d1 = conv_block(d1, n_filters[3], pool=False)
d2 = Conv2DTranspose(n_filters[3], (3, 3), padding="same", strides=(2, 2))(d1)
d2 = Concatenate()([d2, c3])
d2 = conv_block(d2, n_filters[2], pool=False)
d3 = Conv2DTranspose(n_filters[3], (3, 3), padding="same", strides=(2, 2))(d2)
d3 = Concatenate()([d3, c2])
d3 = conv_block(d3, n_filters[1], pool=False)
d4 = Conv2DTranspose(n_filters[3], (3, 3), padding="same", strides=(2, 2))(d3)
d4 = Concatenate()([d4, c1])
d4 = conv_block(d4, n_filters[0], pool=False)
## output
outputs = Conv2D(1, (1, 1), padding="same")(d4)
outputs = BatchNormalization()(outputs)
outputs = Activation("sigmoid")(outputs)
## Model
model = Model(inputs, outputs)
return model
|
{"hexsha": "006c51fc073b43d13f8a06dd0e2f5525ef14f086", "size": 2247, "ext": "py", "lang": "Python", "max_stars_repo_path": "Classes/ResUNetPlusPlus/unet.py", "max_stars_repo_name": "Nitin-Mane/ALL-PyTorch-Segmentation-2021", "max_stars_repo_head_hexsha": "0f3c7b129629cc2863c502898bcfa3c45077af85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Classes/ResUNetPlusPlus/unet.py", "max_issues_repo_name": "Nitin-Mane/ALL-PyTorch-Segmentation-2021", "max_issues_repo_head_hexsha": "0f3c7b129629cc2863c502898bcfa3c45077af85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Classes/ResUNetPlusPlus/unet.py", "max_forks_repo_name": "Nitin-Mane/ALL-PyTorch-Segmentation-2021", "max_forks_repo_head_hexsha": "0f3c7b129629cc2863c502898bcfa3c45077af85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2083333333, "max_line_length": 86, "alphanum_fraction": 0.551846907, "include": true, "reason": "import numpy", "num_tokens": 673}
|
import numpy as np
import pandas as pd
import os, glob, pickle
from pathlib import Path
from os.path import join, exists, dirname, abspath, isdir
import random
from sklearn.neighbors import KDTree
from tqdm import tqdm
import logging
from .utils import DataProcessing, get_min_bbox, BEVBox3D
from .base_dataset import BaseDataset, BaseDatasetSplit
from ..utils import make_dir, DATASET
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s - %(asctime)s - %(module)s - %(message)s',
)
log = logging.getLogger(__name__)
class S3DIS(BaseDataset):
"""This class is used to create a dataset based on the S3DIS (Stanford
Large-Scale 3D Indoor Spaces) dataset, and used in visualizer, training, or
testing.
The S3DIS dataset is best used to train models for building indoors.
"""
def __init__(self,
dataset_path,
name='S3DIS',
task='segmentation',
cache_dir='./logs/cache',
use_cache=False,
class_weights=[
3370714, 2856755, 4919229, 318158, 375640, 478001, 974733,
650464, 791496, 88727, 1284130, 229758, 2272837
],
num_points=40960,
test_area_idx=3,
ignored_label_inds=[],
ignored_objects=[
'wall', 'floor', 'ceiling', 'beam', 'column', 'clutter'
],
test_result_folder='./test',
**kwargs):
"""Initialize the function by passing the dataset and other details.
Args:
dataset_path: The path to the dataset to use.
name: The name of the dataset (S3DIS in this case).
task: One of {segmentation, detection} for semantic segmentation and object detection.
cache_dir: The directory where the cache is stored.
use_cache: Indicates if the dataset should be cached.
class_weights: The class weights to use in the dataset.
num_points: The maximum number of points to use when splitting the dataset.
test_area_idx: The area to use for testing. The valid values are 1 through 6.
ignored_label_inds: A list of labels that should be ignored in the dataset.
ignored_objects: Ignored objects
test_result_folder: The folder where the test results should be stored.
"""
super().__init__(dataset_path=dataset_path,
name=name,
task=task,
cache_dir=cache_dir,
use_cache=use_cache,
class_weights=class_weights,
test_result_folder=test_result_folder,
num_points=num_points,
test_area_idx=test_area_idx,
ignored_label_inds=ignored_label_inds,
ignored_objects=ignored_objects,
**kwargs)
cfg = self.cfg
assert isdir(dataset_path), f"Invalid dataset path {dataset_path}"
self.label_to_names = self.get_label_to_names()
self.num_classes = len(self.label_to_names)
self.label_values = np.sort([k for k, v in self.label_to_names.items()])
self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
self.ignored_labels = np.array([])
self.test_split = 'Area_' + str(cfg.test_area_idx)
self.pc_path = join(self.cfg.dataset_path, 'original_pkl')
if not exists(self.pc_path):
print("creating dataset")
self.create_ply_files(self.cfg.dataset_path, self.label_to_names)
# TODO : if num of ply files < 272, then create.
self.all_files = glob.glob(
str(Path(self.cfg.dataset_path) / 'original_pkl' / '*.pkl'))
@staticmethod
def get_label_to_names():
"""Returns a label to names dictonary object.
Returns:
A dict where keys are label numbers and
values are the corresponding names.
"""
label_to_names = {
0: 'ceiling',
1: 'floor',
2: 'wall',
3: 'beam',
4: 'column',
5: 'window',
6: 'door',
7: 'table',
8: 'chair',
9: 'sofa',
10: 'bookcase',
11: 'board',
12: 'clutter'
}
return label_to_names
def get_split(self, split):
"""Returns a dataset split.
Args:
split: A string identifying the dataset split that is usually one of
'training', 'test', 'validation', or 'all'.
Returns:
A dataset split object providing the requested subset of the data.
"""
return S3DISSplit(self, split=split)
def get_split_list(self, split):
cfg = self.cfg
dataset_path = cfg.dataset_path
file_list = []
if split in ['test', 'testing', 'val', 'validation']:
file_list = [
f for f in self.all_files
if 'Area_' + str(cfg.test_area_idx) in f
]
elif split in ['train', 'training']:
file_list = [
f for f in self.all_files
if 'Area_' + str(cfg.test_area_idx) not in f
]
elif split in ['all']:
file_list = self.all_files
else:
raise ValueError("Invalid split {}".format(split))
return file_list
def is_tested(self, attr):
cfg = self.cfg
name = attr['name']
path = cfg.test_result_folder
store_path = join(path, self.name, name + '.npy')
if exists(store_path):
print("{} already exists.".format(store_path))
return True
else:
return False
"""Saves the output of a model.
Args:
results: The output of a model for the datum associated with the attribute passed.
attr: The attributes that correspond to the outputs passed in results.
"""
def save_test_result(self, results, attr):
cfg = self.cfg
name = attr['name'].split('.')[0]
path = cfg.test_result_folder
make_dir(path)
pred = results['predict_labels']
pred = np.array(pred)
for ign in cfg.ignored_label_inds:
pred[pred >= ign] += 1
store_path = join(path, self.name, name + '.npy')
make_dir(Path(store_path).parent)
np.save(store_path, pred)
log.info("Saved {} in {}.".format(name, store_path))
@staticmethod
def create_ply_files(dataset_path, class_names):
os.makedirs(join(dataset_path, 'original_pkl'), exist_ok=True)
anno_file = Path(abspath(
__file__)).parent / '_resources' / 's3dis_annotation_paths.txt'
anno_file = str(anno_file)
anno_paths = [line.rstrip() for line in open(anno_file)]
anno_paths = [Path(dataset_path) / p for p in anno_paths]
class_names = [val for key, val in class_names.items()]
label_to_idx = {l: i for i, l in enumerate(class_names)}
out_format = '.pkl'
for anno_path in tqdm(anno_paths):
elems = str(anno_path).split('/')
save_path = elems[-3] + '_' + elems[-2] + out_format
save_path = Path(dataset_path) / 'original_pkl' / save_path
data_list = []
bboxes = []
for file in glob.glob(str(anno_path / '*.txt')):
class_name = Path(file).name.split('_')[0]
if class_name not in class_names:
class_name = 'clutter'
pc = pd.read_csv(file, header=None,
delim_whitespace=True).values
labels = np.ones((pc.shape[0], 1)) * label_to_idx[class_name]
data_list.append(np.concatenate([pc, labels], 1))
bbox = [class_name] + get_min_bbox(pc)
bboxes.append(bbox)
pc_label = np.concatenate(data_list, 0)
info = [pc_label, bboxes]
with open(save_path, 'wb') as f:
pickle.dump(info, f)
@staticmethod
def read_bboxes(bboxes, ignored_objects):
objects = []
for box in bboxes:
name = box[0]
if name in ignored_objects:
continue
center = np.array([box[1], box[2], box[3]])
size = np.array([box[4], box[5], box[6]]) # w, h, l
yaw = box[7]
objects.append(Object3d(name, center, size, yaw))
return objects
class S3DISSplit(BaseDatasetSplit):
"""This class is used to create a split for S3DIS dataset.
Initialize the class.
Args:
dataset: The dataset to split.
split: A string identifying the dataset split that is usually one of
'training', 'test', 'validation', or 'all'.
**kwargs: The configuration of the model as keyword arguments.
Returns:
A dataset split object providing the requested subset of the data.
"""
def __init__(self, dataset, split='training'):
super().__init__(dataset, split=split)
self.cfg = dataset.cfg
path_list = dataset.get_split_list(split)
log.info("Found {} pointclouds for {}".format(len(path_list), split))
self.path_list = path_list
self.split = split
self.dataset = dataset
def __len__(self):
return len(self.path_list)
def get_data(self, idx):
pc_path = self.path_list[idx]
data = pickle.load(open(pc_path, 'rb'))
pc, bboxes = data
bboxes = self.dataset.read_bboxes(bboxes, self.cfg.ignored_objects)
points = np.array(pc[:, :3], dtype=np.float32)
feat = np.array(pc[:, 3:6], dtype=np.float32)
labels = np.array(pc[:, 6], dtype=np.int32).reshape((-1,))
data = {
'point': points,
'feat': feat,
'label': labels,
'bounding_boxes': bboxes
}
return data
def get_attr(self, idx):
pc_path = Path(self.path_list[idx])
name = pc_path.name.replace('.pkl', '')
pc_path = str(pc_path)
split = self.split
attr = {'idx': idx, 'name': name, 'path': pc_path, 'split': split}
return attr
class Object3d(BEVBox3D):
"""Stores object specific details like bbox coordinates."""
def __init__(self, name, center, size, yaw):
super().__init__(center, size, yaw, name, -1.0)
self.occlusion = 0.0
DATASET._register_module(S3DIS)
|
{"hexsha": "5de409f1c88e3e39caa32119aaaf36957101dbc9", "size": 10708, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml3d/datasets/s3dis.py", "max_stars_repo_name": "thomasbrockmeier-ams/Open3D-ML", "max_stars_repo_head_hexsha": "1e362bbf133537668923905a12a15c540d9b689d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-08-11T02:21:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-15T19:32:04.000Z", "max_issues_repo_path": "ml3d/datasets/s3dis.py", "max_issues_repo_name": "thomasbrockmeier-ams/Open3D-ML", "max_issues_repo_head_hexsha": "1e362bbf133537668923905a12a15c540d9b689d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-31T09:06:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-31T09:06:08.000Z", "max_forks_repo_path": "ml3d/datasets/s3dis.py", "max_forks_repo_name": "thomasbrockmeier-ams/Open3D-ML", "max_forks_repo_head_hexsha": "1e362bbf133537668923905a12a15c540d9b689d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2546583851, "max_line_length": 98, "alphanum_fraction": 0.5672394471, "include": true, "reason": "import numpy", "num_tokens": 2406}
|
########################################################################################
## This file is a part of YAP package of scripts. https://github.com/shpakoo/YAP
## Distributed under the MIT license: http://www.opensource.org/licenses/mit-license.php
## Copyright (c) 2011-2013 Sebastian Szpakowski
########################################################################################
library(ggplot2)
library(grid)
library(scales)
multiplot <- function(..., plotlist=NULL, cols=1, layout=NULL)
{
require(grid)
# Make a list from the ... arguments and plotlist
plots = c(list(...), plotlist)
numPlots = length(plots)
print (numPlots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout))
{
layout = grid.layout(ceiling(numPlots/cols), cols)
}
# create a panel that will be used to map ids later
panel = matrix(1:(layout$nrow * layout$ncol), nrow = layout$nrow, ncol = layout$ncol, byrow=TRUE )
if (numPlots==1)
{
print(plots[[1]])
}
else
{
# Set up the page
grid.newpage()
pushViewport(viewport(layout = layout))
for (i in 1:numPlots)
{
matching <- as.data.frame(which(panel == i, arr.ind = TRUE))
print ( plots[[i]], vp = viewport( layout.pos.row = matching$row,
layout.pos.col = matching$col)
)
}
}
}
makePlot = function(filename = "read.groups.tab.txt", minreads = 0, mingroups = 0, nlevels = 500, mode="OTU", xlim=c(0,0), ylim=c(0,0))
{
if (mode=="OTU")
{
x = read.table(filename, as.is=T, header=F, sep="\t")
x=x[-1,]
names(x) = c("reads", "groups")
ylab = "number of samples showing an OTU"
xlab = "OTU size (i.e. number of reads in OTU, log10 scale)"
histlab="OTU size"
groups = unique(x$groups)
labs = groups
alldata = x
subsetdata = x[x$reads>minreads & x$groups>mingroups,]
v = NA
h = NA
forhist = subsetdata$reads
forhist[forhist>100]=100
}
else if ( mode=="coverage")
{
x = read.table(filename, as.is=T, header=F, sep="", skip=34)
#print (dim(x))
#x=x[-1,]
### Contig Sites Reads Coverage
names(x) = c("x1", "reads", "x2", "groups")
ylab = "coverage"
xlab = "Contig Size (log10 scale)"
histlab="Contig Coverage"
x$groups = log10(x$groups)
#groups = round(seq(0,ceiling(max(x$groups)), length.out=20),2)
groups = log10(c(1,10,100,1000,10000))
labs = round(10^groups,0)
alldata = x
subsetdata = x[x$reads>minreads & x$groups>mingroups,]
forhist = 10^subsetdata$groups
h = mean(subsetdata$reads)
v = sum(subsetdata$x2) / sum(subsetdata$reads) * 100
xlim = log10(c(100,100000))
ylim = log10 (c(0.1,10000))
forhist[forhist>100]=100
print (v)
}
else
{
print ("unknown plotting mode.")
return (0)
}
if (nrow(subsetdata)>2 && nrow(alldata)>2)
{
alldata = alldata[is.finite(alldata$groups),]
subsetdata = subsetdata[is.finite(subsetdata$groups),]
### setup canvas using all data
nf <- layout(matrix(c(1,2,3,0), 2, 2, byrow=TRUE), respect=FALSE, width=c(0.8,0.2))
if (xlim[1]==xlim[2])
{
ylim = c(min(alldata$groups)-0.5,max(alldata$groups)+0.5)
plot (log10(alldata$reads), alldata$groups, type= "n", ylab = ylab, xlab = xlab, ylim=ylim, axes=FALSE, main=filename)
}
else
{
plot (log10(alldata$reads), alldata$groups, type= "n", ylab = ylab, xlab = xlab, ylim=ylim, xlim=xlim, axes=FALSE, main=filename)
}
axis(2, at=groups, lab=labs, las=2)
A = c(1:9)
B =seq(0, log10(max(alldata$reads)), by=1)
C =expand.grid(A, B)
C$at = C[,1] * 10^(C[,2])
C$lab = ifelse (C[,1]%%10==1, 10^C[,2], "")
axis(1, at=log10(C$at), lab=rep("", nrow(C)) , col.ticks="gray", lwd.ticks=2 )
C2 = C[C$lab!="", ]
axis(1, at=log10(C2$at), lab=C2$lab , col.ticks="black", lwd.ticks= 4 )
### use only subset data from now on:
subsetdata$reads = log10(subsetdata$reads)
# contour
require(MASS)
try( {
xykde = MASS::kde2d(subsetdata$reads, subsetdata$groups, lims = c( min(subsetdata$reads)-5, max(subsetdata$reads)+5, min(subsetdata$groups)-5, max(subsetdata$groups)+5) )
if (sum(is.nan(xykde$z))>0)
{
cat("have to nudge groups...\n")
b = subsetdata$groups + sample(c(-0.1, 0.1), length(subsetdata$groups), replace=T)
xykde = MASS::kde2d(subsetdata$reads, b, lims = c( min(subsetdata$reads)-5, max(subsetdata$reads)+5, min(subsetdata$groups)-5, max(subsetdata$groups)+5) )
}
zlim = range(xykde$z, finite = TRUE)
lev = seq(zlim[1], zlim[2], le = nlevels)
col = rev(heat.colors(length(lev)))
contour(xykde, add = TRUE, levels = lev,
drawlabels = FALSE, col=col )
# trendline
trendline = lowess((subsetdata$reads), subsetdata$groups, f=0.1)
points(trendline , type="l", lwd=5, col="gray30")
# points
points(subsetdata$reads, subsetdata$group, pch=19, col="gray60", cex=0.4)
# cutoff lines if (mingroups>0)
{
abline(h=mingroups, col="red", lty=2, lwd=0.5)
}
if (minreads>0)
{
abline(v=log10(minreads), col="red", lty=2, lwd=0.5)
}
box(lwd=5)
# legend
plot (0,100, xlim=c(0,1), ylim = c(min(lev), max(lev)), xlab="", ylab="", axes=F, main="")
rect(0, lev[-length(lev)], 1, lev[-1L], col = col, border=NA)
#print (lev)
at = unique(round(seq(0, max(lev), length.out=10),2))
lab = paste(round((1-(at/ceiling(max(at)))) * 100,0) , "%", sep="")
axis(4, las=1, cex.axis=0.5)
axis(2, las=1, at = at, lab=lab)
box()
# histogram
hist ((forhist), col="gray", br=seq(0.5,100.5, by=1), axes =FALSE, xlab = histlab, main="")
axis(2)
at = c(1:10, seq(20,100, by=10) )
labs = paste(at)
labs [2:9]=""
labs[length(labs)] = paste(labs[length(labs)], "+", sep="")
axis(1, at = at, lab=labs, las=3)
if (! is.na(v))
{
#abline(v= v, lwd=5, col="green")
#abline(h= h, lwd=5, col="blue")
legend("top", c(
paste( "Average coverage: ", round(v,2), sep=" "),
paste( "Average length: ", round(h,2), sep=" ")
),
fill=c("green", "blue"))
}
#print ( nrow(x[x$groups<=1 & x$reads<=0.1,]) )
#print ( nrow(x))
} )
}
}
getData = function(files, mode="clccoverage")
{
global = data.frame()
for (f in files)
{
print (f)
if (mode =="clccoverage")
{
tmp = read.table(f, as.is=T, header=F, sep="", skip=34)
names(tmp) <- c("Contig", "Sites", "Reads", "Coverage")
}
else if (mode == "otu")
{
tmp = read.table(f, as.is=T, header=F, sep="\t")
names(tmp) <- c("Reads", "Groups")
}
else
{
return (global)
}
a = strsplit( f, "\\.")[[1]][2]
b = strsplit( a,"-")[[1]][1]
cat(f, "->", a, "->", b, "\n")
tmp$File = rep(b, nrow(tmp))
if (nrow(global)==0)
{
global=tmp
}
else
{
global=rbind(tmp,global)
}
}
return (global)
}
makeGGplotOTU = function(global)
{
print ("not there yet")
}
makeGGplotCOVERAGE = function(global)
{
### how do files compare w/r to contig sizes
plots = list()
for (S in c("Reads", "Sites", "Coverage"))
{
x1 = (ggplot(global, aes_string(y=S, x="File", color="File"))
+ geom_boxplot()
+ scale_y_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x)))
+ coord_flip()
+ theme(axis.ticks = element_blank(), axis.text.y = element_blank())
)
x2 = (ggplot(global, aes_string(x=S, y="..count..", fill="File", group="File" ))
+ stat_bin(aes(y=..count..), geom="area", position="stack", weight=2)
+ scale_x_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x)))
)
x3 = (ggplot(global, aes_string(x=S, y="..count..", fill="File", group="File" ))
+ stat_bin(aes(y=..count..), geom="area", position="dodge", alpha=0.5, weight=2)
+ scale_x_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x)))
)
x4 = (ggplot(global, aes_string(x=S, y="..count..", fill="File", group="File" ))
+ geom_bar(position="fill")
+ scale_x_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x)))
)
plots = append(list(x1, x2, x3, x4), plots)
}
y1 = (ggplot(global, aes(y=Reads / Sites, x=File, color=File))
+ geom_boxplot()
+ scale_y_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x)))
+ coord_flip()
+ theme(axis.ticks = element_blank(), axis.text.y = element_blank())
)
y2 = (ggplot(global, aes(x= Reads/Sites , y=..count.., fill=File, group=File ))
+ stat_bin(aes(y=..count..), geom="area", position="stack", weight=2)
+ scale_x_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x)))
)
y3 = (ggplot(global, aes(x= Reads/Sites , y=..count.., fill=File, group=File ))
+ stat_bin(aes(y=..count..), geom="area", position="dodge", alpha=0.5, weight=2)
+ scale_x_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x)))
)
y4 = (ggplot(global, aes(x= Reads/Sites , y=..count.., fill=File, group=File ))
+ geom_bar(position="fill")
+ scale_x_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x)))
)
plots = append(plots, list(y1, y2, y3, y4), )
#return (plots)
# B = (ggplot(global, aes(File, Sites, color=File))
# + geom_boxplot()
# + scale_y_log10(breaks = trans_breaks("log10", function(x) 10^x),
# labels = trans_format("log10", math_format(10^.x)))
# + coord_flip()
# + theme(axis.ticks = element_blank(), axis.text.y = element_blank())
#
# )
# C = (ggplot(global, aes(File, Coverage, color=File))
# + geom_boxplot()
# + scale_y_log10(breaks = trans_breaks("log10", function(x) 10^x),
# labels = trans_format("log10", math_format(10^.x)))
# + coord_flip()
# + theme(axis.ticks = element_blank(), axis.text.y = element_blank())
#
# )
png("AllAssemblyStats_GG.png", width=20, height = 20, units="in", res=250)
multiplot(plotlist=plots, cols=4)
dev.off()
invisible(plots)
}
makeBatchOTU = function(filename)
{
pdf(paste(filename,"otusizes.pdf", sep="."), paper="special", width=11, height=10)
makePlot(filename = filename, minreads=0, mingroups=0)
makePlot(filename = filename, minreads=1, mingroups=0)
makePlot(filename = filename, minreads=1, mingroups=1)
makePlot(filename = filename, minreads=5, mingroups=1)
dev.off()
}
makeBatchCoverage = function(filename)
{
pdf(paste(filename,"coveragestats.pdf", sep="."), paper="special", width=11, height=10)
makePlot(filename = filename, minreads=0, mingroups=0, mode="coverage", nlevels=50)
makePlot(filename = filename, minreads=0, mingroups=log10(10), mode="coverage", nlevels=50)
makePlot(filename = filename, minreads=1000, mingroups=log10(10), mode="coverage", nlevels=50)
dev.off()
}
###################################################################
##### OTU stats
files = dir( pattern=glob2rx("*.otustats"))
for (f in files)
{
print (f)
makeBatchOTU (f)
}
if (length(files)>0)
{
inputdata = getData(files, mode="otu")
makeGGplotOTU (inputdata)
}
##### COVERAGE stats
files = dir( pattern=glob2rx("*.clcassemblystats"))
for (f in files)
{
print (f)
makeBatchCoverage (f)
}
if (length(files)>0)
{
inputdata = getData(files, mode="clccoverage")
makeGGplotCOVERAGE (inputdata)
}
|
{"hexsha": "90e511f6b71389c7fc4b3d03c8b8ff2fc4fe2528", "size": 11587, "ext": "r", "lang": "R", "max_stars_repo_path": "OtuReadPlots.r", "max_stars_repo_name": "andreyto/YAP", "max_stars_repo_head_hexsha": "5e897b7bbc8d3dc7a7d1d5ac6485ad474f2d51c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OtuReadPlots.r", "max_issues_repo_name": "andreyto/YAP", "max_issues_repo_head_hexsha": "5e897b7bbc8d3dc7a7d1d5ac6485ad474f2d51c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OtuReadPlots.r", "max_forks_repo_name": "andreyto/YAP", "max_forks_repo_head_hexsha": "5e897b7bbc8d3dc7a7d1d5ac6485ad474f2d51c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1995305164, "max_line_length": 172, "alphanum_fraction": 0.5960990766, "num_tokens": 4048}
|
from datetime import timedelta
import pandas as pd
import numpy as np
import re
def groupact(x):
if x < 10:
return "[0-10)"
if x < 30:
return "[10-30)"
if x < 100:
return "[30-100)"
else:
return "[100,)"
def get_matched_dataframes(df_, reddit_venue, fringe_venue, migration_date, grace_period, days_before, days_after):
def fix_percentages(df_content):
df_content["fixation_dict_incels"] = df_content["fixation_dict_incels"] / df_content["count_repeated"]
df_content["fixation_dict_td"] = df_content["fixation_dict_td"] / df_content["count_repeated"]
df_content["NegativeEmotion"] = df_content["NegativeEmotion"] / df_content["count_repeated"]
df_content["CoreHostility"] = df_content["CoreHostility"] / df_content["count_repeated"]
df_content["They:"] = df_content["They:"] / df_content["count_repeated"]
df_content["We:"] = df_content["We:"] / df_content["count_repeated"]
df_content.loc[:, ["SEVERE_TOXICITY", "SEVERE_TOXICITY80p", "fixation_dict_incels", "fixation_dict_td",
'NegativeEmotion', 'CoreHostility', "They:", "We:"]] *= 100
df_content = df_content.rename({"We:": "We", "They:": "They"}, axis=1)
return df_content
# gets only relevant dates
df = df_.loc[(((df_.venue == reddit_venue) | (df_.venue == fringe_venue)) &
(df_.date_post >= migration_date - timedelta(days=days_before)) &
(df_.date_post <= migration_date + timedelta(days=days_after)))]
# gets author names
authors_reddit = list(set(df.loc[(df.venue == reddit_venue) &
(df.date_post < migration_date)].author))
authors_fringe = list(set(df.loc[(df.venue == fringe_venue) &
(df.date_post >= migration_date)].author))
# performs exact matching
authors_reddit_d = {re.sub("[^A-Za-z0-9]", "", name).lower(): name for name in authors_reddit}
authors_reddit_r = {i: k for k, i in authors_reddit_d.items()}
authors_fringe_d = {re.sub("[^A-Za-z0-9]", "", name).lower(): name for name in authors_fringe}
authors_fringe_r = {i: k for k, i in authors_fringe_d.items()}
exact_match = set(authors_reddit_d.keys()).intersection(set(authors_fringe_d.keys()))
pairs = [(authors_reddit_d[m], authors_fringe_d[m]) for m in exact_match if len(m) > 0]
# gets count before and after
df_before = df.loc[((df.venue == reddit_venue) &
(df.date_post >= migration_date - timedelta(days=days_before)) &
(df.date_post < migration_date))]
df_before["kind"] = "before"
df_after = df.loc[((df.venue == fringe_venue) &
(df.date_post >= migration_date) &
(df.date_post <= migration_date + timedelta(days=days_after)))]
df_after["kind"] = "after"
dict_to_agg = {
"body": len,
"SEVERE_TOXICITY": np.max,
"SEVERE_TOXICITY80p": np.mean,
'fixation_dict_incels': np.nansum,
'fixation_dict_td': np.nansum,
'NegativeEmotion': np.nansum,
'CoreHostility': np.nansum,
'We:': np.nansum,
'They:': np.nansum,
'length': np.nanmedian,
'count_repeated': np.nansum
}
df_before_gb = df_before.groupby("author").agg( dict_to_agg).reset_index()
df_before_gb["kind"] = "before"
df_before_gb = fix_percentages(df_before_gb)
df_after_gb = df_after.groupby("author").agg(dict_to_agg).reset_index()
df_after_gb["kind"] = "after"
df_after_gb = fix_percentages(df_after_gb)
# Does matching
set_before = set([p[0] for p in pairs])
df_before_matched = df_before_gb.loc[df_before_gb.author.apply(lambda x: x in set_before)]
df_before_matched["author"] = df_before_matched["author"].apply(lambda x: authors_reddit_r[x])
df_before_matched = df_before_matched.set_index("author")
set_after = set([p[1] for p in pairs])
df_after_matched = df_after_gb.loc[df_after_gb.author.apply(lambda x: x in set_after)]
df_after_matched["author"] = df_after_matched["author"].apply(lambda x: authors_fringe_r[x])
df_after_matched = df_after_matched.set_index("author")
df_before_after = df_before_matched.merge(df_after_matched, left_index=True, right_index=True)
df_before_after.rename({"body_x": "before", "body_y": "after"}, axis=1, inplace=True)
# get quartiles
quintiles = np.nanquantile(df_before_after.before.values, np.arange(0, 1, 0.25))
print("quartiles before", quintiles)
df_before_after["ptile"] = df_before_after.before.apply(lambda x: np.argmin(x >= quintiles)
+ 4 * (x >= max(quintiles)))
quintiles_after = np.nanquantile(df_before_after.after.values, np.arange(0, 1, 0.25))
df_before_after["ptile_after"] = df_before_after.after.apply(lambda x: np.argmin(x >= quintiles_after)
+ 4 * (x >= max(quintiles_after)))
df_before_after["group_after"] = df_before_after.after.apply(groupact)
quintiles = np.nanquantile(df_after_gb.body.values, np.arange(0, 1, 0.25), interpolation="nearest")
print("quartiles after", quintiles)
df_before_gb["ptile"] = df_before_gb.body.apply(lambda x: np.argmin(x >= quintiles)
+ 4 * (x >= max(quintiles)))
df_after_gb["ptile"] = df_after_gb.body.apply(lambda x: np.argmin(x >= quintiles)
+ 4 * (x >= max(quintiles)))
# concats
df_gb = pd.concat([df_before_gb, df_after_gb])
df_users_matched = pd.concat(
[df_before.loc[df_before.author.apply(lambda x: x in set_before)],
df_after.loc[df_after.author.apply(lambda x: x in set_after)]])
return pairs, df_users_matched, df_gb, df_before_after
|
{"hexsha": "f35159d074d4e20084ffc2c9926122fcbe2d13b6", "size": 5980, "ext": "py", "lang": "Python", "max_stars_repo_path": "helpers/match_helpers.py", "max_stars_repo_name": "epfl-dlab/platform_bans", "max_stars_repo_head_hexsha": "a294477687ffb4f636eb69c2492d8858ab2621f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "helpers/match_helpers.py", "max_issues_repo_name": "epfl-dlab/platform_bans", "max_issues_repo_head_hexsha": "a294477687ffb4f636eb69c2492d8858ab2621f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "helpers/match_helpers.py", "max_forks_repo_name": "epfl-dlab/platform_bans", "max_forks_repo_head_hexsha": "a294477687ffb4f636eb69c2492d8858ab2621f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.2258064516, "max_line_length": 115, "alphanum_fraction": 0.6284280936, "include": true, "reason": "import numpy", "num_tokens": 1527}
|
program write_to_console
implicit none
character(len=:), allocatable :: chars
chars = 'Fortran is 💪, 😎, 🔥!'
write(*,*) chars
end program
|
{"hexsha": "a17fec8cd002141111a0cdb3651afebf64d19b9f", "size": 153, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "example/write_to_console.f90", "max_stars_repo_name": "plevold/unicode-in-fortran", "max_stars_repo_head_hexsha": "1ab8fbb154bef172e947c9c8573f950359e2c308", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-10T19:34:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T19:46:06.000Z", "max_issues_repo_path": "example/write_to_console.f90", "max_issues_repo_name": "plevold/unicode-in-fortran", "max_issues_repo_head_hexsha": "1ab8fbb154bef172e947c9c8573f950359e2c308", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-10T20:38:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T21:43:32.000Z", "max_forks_repo_path": "example/write_to_console.f90", "max_forks_repo_name": "plevold/unicode-in-fortran", "max_forks_repo_head_hexsha": "1ab8fbb154bef172e947c9c8573f950359e2c308", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.8571428571, "max_line_length": 42, "alphanum_fraction": 0.6339869281, "num_tokens": 44}
|
function UseCards(object, ~, inventory, main)
res = get(0, 'ScreenSize');
%% FIGURE WINDOW
handle = ...
figure('Name', 'Use Cards', ...
'Units', 'pixels', ...
'MenuBar', 'none', ...
'NumberTitle', 'off', ...
'Position', [res(3:4)/3, 150, 200]);
%% RADIO BUTTONS
str = {'3 x Infantry','3 x Cavallery','3 x Artillery','One of each'};
r = zeros(1,4);
for i = 1:4
r(i) = uicontrol('style', 'radiobutton', ...
'Parent', handle, ...
'Units', 'normalized', ...
'String', str{i}, ...
'FontSize', 12, ...
'BackgroundColor', get(handle, 'Color'), ...
'Position', [0.1, 0.85 - 0.2 * (i-1), 0.8, 0.1], ...
'CallBack', @RadioCallback);
end
%% USE BUTTON
uicontrol('style', 'pushbutton', ...
'Parent', handle, ...
'Units', 'normalized', ...
'String', 'Use', ...
'FontSize', 12, ...
'FontWeight', 'bold', ...
'BackgroundColor', get(handle, 'Color'), ...
'Position', [0.3, 0.06, 0.4, 0.1], ...
'CallBack', {@UseBtnCallback, inventory, r, object})
uiwait(handle) %10
uiresume(main); %9
function RadioCallback(button, ~)
r = findobj('Parent', get(button, 'Parent'), 'style', 'radiobutton');
for i = find(r ~= button)
set(r(i), 'Value', 0);
end
function UseBtnCallback(button, ~, inventory, r, object)
for i = 1:4
if get(r(i),'Value')
break
end
end
%% CHECK INVENTORY IF THIS OPTION IS AVAILABLE
N = [sum(inventory == 1), sum(inventory == 2), sum(inventory == 3)];
ok = ( (i < 4) && sum(inventory == i) >= 3 ) || ( (i == 4) && all(N) );
if ~ok
uiwait(msgbox('You don''t have these cards.'))
return
end
set(object, 'UserData', i);
uiresume(get(button, 'Parent')) %10
delete(get(button, 'Parent'))
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/34438-risk/Final/UseCards.m"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.