file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
combat.rs | use super::*;
use rand::Rng;
use crate::r#const::*;
use crate::types::*;
use crate::types::Tcod;
use crate::types::Messages;
use crate::func::*;
use crate::types::object::Object;
use tcod::input::{self, Event, Mouse};
use tcod::colors::{self, Color};
/// returns a clicked monster inside FOV up to a range, or None if right-clicked
pub fn target_monster(tcod: &mut Tcod,
objects: &[Object],
game: &mut Game,
max_range: Option<f32>)
-> Option<usize> {
loop {
match target_tile(tcod, objects, game, max_range) {
Some((x, y)) => {
// return the first clicked monster, otherwise continue looping
for (id, obj) in objects.iter().enumerate() {
if obj.pos() == (x, y) && obj.fighter.is_some() && id != PLAYER {
return Some(id)
}
}
}
None => return None,
}
}
}
/// return the position of a tile left-clicked in player's FOV (optionally in a
/// range), or (None,None) if right-clicked.
pub fn target_tile(tcod: &mut Tcod,
objects: &[Object], game: &mut Game,
max_range: Option<f32>)
-> Option<(i32, i32)> {
use tcod::input::KeyCode::Escape;
loop {
// render the screen. this erases the inventory and shows the names of
// objects under the mouse.
tcod.root.flush();
let event = input::check_for_event(input::KEY_PRESS | input::MOUSE).map(|e| e.1);
let mut key = None;
match event {
Some(Event::Mouse(m)) => tcod.mouse = m,
Some(Event::Key(k)) => key = Some(k),
None => {}
}
render_all(tcod, objects, game, false);
let (x, y) = (tcod.mouse.cx as i32, tcod.mouse.cy as i32);
// accept the target if the player clicked in FOV, and in case a range
// is specified, if it's in that range
let in_fov = (x < MAP_WIDTH) && (y < MAP_HEIGHT) && tcod.fov.is_in_fov(x, y);
let in_range = max_range.map_or(
true, |range| objects[PLAYER].distance(x, y) <= range);
if tcod.mouse.lbutton_pressed && in_fov && in_range {
return Some((x, y))
}
let escape = key.map_or(false, |k| k.code == Escape);
if tcod.mouse.rbutton_pressed || escape {
return None // cancel if the player right-clicked or pressed Escape
}
}
}
/// find closest enemy, up to a maximum range, and in the player's FOV
pub fn closest_monster(max_range: i32, objects: &mut [Object], tcod: &Tcod) -> Option<usize> {
let mut closest_enemy = None;
let mut closest_dist = (max_range + 1) as f32; // start with (slightly more than) maximum range
for (id, object) in objects.iter().enumerate() {
if (id != PLAYER) && object.fighter.is_some() && object.ai.is_some() &&
tcod.fov.is_in_fov(object.x, object.y)
{
// calculate distance between this object and the player
let dist = objects[PLAYER].distance_to(object);
if dist < closest_dist { // it's closer, so remember it
closest_enemy = Some(id);
closest_dist = dist;
}
}
}
closest_enemy
}
pub fn cast_fireball(_inventory_id: usize, objects: &mut [Object],game: &mut Game, tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target tile to throw a fireball at
game.log.add("Left-click a target tile for the fireball, or right-click to cancel.",
colors::LIGHT_CYAN);
let (x, y) = match target_tile(tcod, objects, game, None) {
Some(tile_pos) => tile_pos,
None => return UseResult::Cancelled,
};
game.log.add(format!("The fireball explodes, burning everything within {} tiles!", FIREBALL_RADIUS),
colors::ORANGE);
let mut xp_to_gain = 0;
for (id, obj) in objects.iter_mut().enumerate() {
if obj.distance(x, y) <= FIREBALL_RADIUS as f32 && obj.fighter.is_some() {
game.log.add(format!("The {} gets burned for {} hit points.", obj.name, FIREBALL_DAMAGE),
colors::ORANGE);
if let Some(xp) = obj.take_damage(FIREBALL_DAMAGE, game) {
// Don't reward the player for burning themself!
if id != PLAYER {
xp_to_gain += xp;
}
}
}
}
objects[PLAYER].fighter.as_mut().unwrap().xp += xp_to_gain;
UseResult::UsedUp
}
pub fn cast_heal(_inventory_id: usize, objects: &mut [Object], game: &mut Game, _tcod: &mut Tcod)
-> UseResult
{
// heal the player
let player = &mut objects[PLAYER];
if let Some(fighter) = player.fighter {
if fighter.hp == player.max_hp(game) {
game.log.add("You are already at full health.", colors::RED);
return UseResult::Cancelled;
}
game.log.add("Your wounds start to feel better!", colors::LIGHT_VIOLET);
player.heal(HEAL_AMOUNT, game);
return UseResult::UsedUp;
}
UseResult::Cancelled
}
pub fn cast_lightning(_inventory_id: usize, objects: &mut [Object], game: &mut Game, tcod: &mut Tcod) -> UseResult
{
// find closest enemy (inside a maximum range) and damage it
let monster_id = closest_monster(LIGHTNING_RANGE, objects, tcod);
if let Some(monster_id) = monster_id {
// zap it!
game.log.add(format!("A lightning bolt strikes the {} with a loud thunder! \
The damage is {} hit points.",
objects[monster_id].name, LIGHTNING_DAMAGE),
colors::LIGHT_BLUE);
objects[monster_id].take_damage(LIGHTNING_DAMAGE, game);
UseResult::UsedUp
} else { // no enemy found within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn cast_confuse(_inventory_id: usize, objects: &mut [Object], game: &mut Game,tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target to confuse
game.log.add("Left-click an enemy to confuse it, or right-click to cancel.", colors::LIGHT_CYAN);
let monster_id = target_monster(tcod, objects, game, Some(CONFUSE_RANGE as f32));
if let Some(monster_id) = monster_id {
let old_ai = objects[monster_id].ai.take().unwrap_or(Ai::Basic);
// replace the monster's AI with a "confused" one; after
// some turns it will restore the old AI | game.log.add(format!("The eyes of {} look vacant, as he starts to stumble around!",
objects[monster_id].name),
colors::LIGHT_GREEN);
UseResult::UsedUp
} else { // no enemy fonud within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn player_death(player: &mut Object, messages: &mut Messages) {
// the game ended!
// TODO Replace with game.log.add()
message(messages, "You died!", colors::DARK_RED);
// for added effect, transform the player into a corpse!
player.char = CORPSE;
player.color = colors::DARK_RED;
}
pub fn monster_death(monster: &mut Object, messages: &mut Messages) {
// transform it into a nasty corpse! it doesn't block, can't be
// attacked and doesn't move
// TODO Replace with game.log.add()
// message(messages, format!("{} is dead!", monster.name), colors::ORANGE);
message(messages, format!("{} is dead! You gain {} experience points.",
monster.name, monster.fighter.unwrap().xp), colors::ORANGE);
monster.char = CORPSE;
monster.color = colors::DARK_RED;
monster.blocks = false;
monster.fighter = None;
monster.ai = None;
monster.name = format!("remains of {}", monster.name);
}
pub fn player_move_or_attack(dx: i32, dy: i32, objects: &mut [Object], game: &mut Game) {
// the coordinates the player is moving to/attacking
let x = objects[PLAYER].x + dx;
let y = objects[PLAYER].y + dy;
// try to find an attackable object there
let target_id = objects.iter().position(|object| {
object.fighter.is_some() && object.pos() == (x, y)
});
// attack if target found, move otherwise
match target_id {
Some(target_id) => {
let (player, target) = mut_two(PLAYER, target_id, objects);
player.attack(target, game);
}
None => {
move_by(PLAYER, dx, dy, &mut game.map, objects);
}
}
}
pub fn ai_take_turn(monster_id: usize, objects: &mut [Object], game: &mut Game, fov_map: &FovMap) {
use Ai::*;
if let Some(ai) = objects[monster_id].ai.take() {
let new_ai = match ai {
Basic => ai_basic(monster_id, game, objects, fov_map),
Confused{previous_ai, num_turns} => ai_confused(
monster_id, &mut game.map, objects, &mut game.log, previous_ai, num_turns)
};
objects[monster_id].ai = Some(new_ai);
}
}
pub fn ai_basic(monster_id: usize, game: &mut Game, objects: &mut [Object], fov_map: &FovMap) -> Ai {
// a basic monster takes its turn. If you can see it, it can see you
let (monster_x, monster_y) = objects[monster_id].pos();
if fov_map.is_in_fov(monster_x, monster_y) {
if objects[monster_id].distance_to(&objects[PLAYER]) >= 2.0 {
// move towards player if far away
let (player_x, player_y) = objects[PLAYER].pos();
move_towards(monster_id, player_x, player_y, &mut game.map, objects);
} else if objects[PLAYER].fighter.map_or(false, |f| f.hp > 0) {
// close enough, attack! (if the player is still alive.)
let (monster, player) = mut_two(monster_id, PLAYER, objects);
monster.attack(player, game);
}
}
Ai::Basic
}
pub fn ai_confused(monster_id: usize, map: &Map, objects: &mut [Object], messages: &mut Messages,
previous_ai: Box<Ai>, num_turns: i32) -> Ai {
if num_turns >= 0 { // still confused ...
// move in a random idrection, and decrease the number of turns confused
move_by(monster_id,
rand::thread_rng().gen_range(-1, 2),
rand::thread_rng().gen_range(-1, 2),
map,
objects);
Ai::Confused{previous_ai: previous_ai, num_turns: num_turns - 1}
} else { // restore the previous AI (this one will be deleted)
// TODO Replace with game.log.add()
message(messages, format!("The {} is no longer confused!",
objects[monster_id].name),
colors::RED);
*previous_ai
}
}
pub fn move_towards(id: usize, target_x: i32, target_y: i32, map: &Map, objects: &mut [Object]) {
// vector from this object to the target, and distance
let dx = target_x - objects[id].x;
let dy = target_y - objects[id].y;
let distance = ((dx.pow(2) + dy.pow(2)) as f32).sqrt();
// normalize it to length 1 (preserving direction), then round it and
// convert to integer so the movement is restricted to the map grid
let dx = (dx as f32 / distance).round() as i32;
let dy = (dy as f32 / distance).round() as i32;
move_by(id, dx, dy, map, objects);
} | objects[monster_id].ai = Some(Ai::Confused {
previous_ai: Box::new(old_ai),
num_turns: CONFUSE_NUM_TURNS,
}); | random_line_split |
combat.rs | use super::*;
use rand::Rng;
use crate::r#const::*;
use crate::types::*;
use crate::types::Tcod;
use crate::types::Messages;
use crate::func::*;
use crate::types::object::Object;
use tcod::input::{self, Event, Mouse};
use tcod::colors::{self, Color};
/// returns a clicked monster inside FOV up to a range, or None if right-clicked
pub fn target_monster(tcod: &mut Tcod,
objects: &[Object],
game: &mut Game,
max_range: Option<f32>)
-> Option<usize> {
loop {
match target_tile(tcod, objects, game, max_range) {
Some((x, y)) => {
// return the first clicked monster, otherwise continue looping
for (id, obj) in objects.iter().enumerate() {
if obj.pos() == (x, y) && obj.fighter.is_some() && id != PLAYER {
return Some(id)
}
}
}
None => return None,
}
}
}
/// return the position of a tile left-clicked in player's FOV (optionally in a
/// range), or (None,None) if right-clicked.
pub fn target_tile(tcod: &mut Tcod,
objects: &[Object], game: &mut Game,
max_range: Option<f32>)
-> Option<(i32, i32)> {
use tcod::input::KeyCode::Escape;
loop {
// render the screen. this erases the inventory and shows the names of
// objects under the mouse.
tcod.root.flush();
let event = input::check_for_event(input::KEY_PRESS | input::MOUSE).map(|e| e.1);
let mut key = None;
match event {
Some(Event::Mouse(m)) => tcod.mouse = m,
Some(Event::Key(k)) => key = Some(k),
None => {}
}
render_all(tcod, objects, game, false);
let (x, y) = (tcod.mouse.cx as i32, tcod.mouse.cy as i32);
// accept the target if the player clicked in FOV, and in case a range
// is specified, if it's in that range
let in_fov = (x < MAP_WIDTH) && (y < MAP_HEIGHT) && tcod.fov.is_in_fov(x, y);
let in_range = max_range.map_or(
true, |range| objects[PLAYER].distance(x, y) <= range);
if tcod.mouse.lbutton_pressed && in_fov && in_range {
return Some((x, y))
}
let escape = key.map_or(false, |k| k.code == Escape);
if tcod.mouse.rbutton_pressed || escape {
return None // cancel if the player right-clicked or pressed Escape
}
}
}
/// find closest enemy, up to a maximum range, and in the player's FOV
pub fn closest_monster(max_range: i32, objects: &mut [Object], tcod: &Tcod) -> Option<usize> {
let mut closest_enemy = None;
let mut closest_dist = (max_range + 1) as f32; // start with (slightly more than) maximum range
for (id, object) in objects.iter().enumerate() {
if (id != PLAYER) && object.fighter.is_some() && object.ai.is_some() &&
tcod.fov.is_in_fov(object.x, object.y)
{
// calculate distance between this object and the player
let dist = objects[PLAYER].distance_to(object);
if dist < closest_dist { // it's closer, so remember it
closest_enemy = Some(id);
closest_dist = dist;
}
}
}
closest_enemy
}
pub fn cast_fireball(_inventory_id: usize, objects: &mut [Object],game: &mut Game, tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target tile to throw a fireball at
game.log.add("Left-click a target tile for the fireball, or right-click to cancel.",
colors::LIGHT_CYAN);
let (x, y) = match target_tile(tcod, objects, game, None) {
Some(tile_pos) => tile_pos,
None => return UseResult::Cancelled,
};
game.log.add(format!("The fireball explodes, burning everything within {} tiles!", FIREBALL_RADIUS),
colors::ORANGE);
let mut xp_to_gain = 0;
for (id, obj) in objects.iter_mut().enumerate() {
if obj.distance(x, y) <= FIREBALL_RADIUS as f32 && obj.fighter.is_some() {
game.log.add(format!("The {} gets burned for {} hit points.", obj.name, FIREBALL_DAMAGE),
colors::ORANGE);
if let Some(xp) = obj.take_damage(FIREBALL_DAMAGE, game) {
// Don't reward the player for burning themself!
if id != PLAYER {
xp_to_gain += xp;
}
}
}
}
objects[PLAYER].fighter.as_mut().unwrap().xp += xp_to_gain;
UseResult::UsedUp
}
pub fn cast_heal(_inventory_id: usize, objects: &mut [Object], game: &mut Game, _tcod: &mut Tcod)
-> UseResult
{
// heal the player
let player = &mut objects[PLAYER];
if let Some(fighter) = player.fighter {
if fighter.hp == player.max_hp(game) {
game.log.add("You are already at full health.", colors::RED);
return UseResult::Cancelled;
}
game.log.add("Your wounds start to feel better!", colors::LIGHT_VIOLET);
player.heal(HEAL_AMOUNT, game);
return UseResult::UsedUp;
}
UseResult::Cancelled
}
pub fn cast_lightning(_inventory_id: usize, objects: &mut [Object], game: &mut Game, tcod: &mut Tcod) -> UseResult
{
// find closest enemy (inside a maximum range) and damage it
let monster_id = closest_monster(LIGHTNING_RANGE, objects, tcod);
if let Some(monster_id) = monster_id {
// zap it!
game.log.add(format!("A lightning bolt strikes the {} with a loud thunder! \
The damage is {} hit points.",
objects[monster_id].name, LIGHTNING_DAMAGE),
colors::LIGHT_BLUE);
objects[monster_id].take_damage(LIGHTNING_DAMAGE, game);
UseResult::UsedUp
} else { // no enemy found within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn cast_confuse(_inventory_id: usize, objects: &mut [Object], game: &mut Game,tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target to confuse
game.log.add("Left-click an enemy to confuse it, or right-click to cancel.", colors::LIGHT_CYAN);
let monster_id = target_monster(tcod, objects, game, Some(CONFUSE_RANGE as f32));
if let Some(monster_id) = monster_id {
let old_ai = objects[monster_id].ai.take().unwrap_or(Ai::Basic);
// replace the monster's AI with a "confused" one; after
// some turns it will restore the old AI
objects[monster_id].ai = Some(Ai::Confused {
previous_ai: Box::new(old_ai),
num_turns: CONFUSE_NUM_TURNS,
});
game.log.add(format!("The eyes of {} look vacant, as he starts to stumble around!",
objects[monster_id].name),
colors::LIGHT_GREEN);
UseResult::UsedUp
} else { // no enemy fonud within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn player_death(player: &mut Object, messages: &mut Messages) |
pub fn monster_death(monster: &mut Object, messages: &mut Messages) {
// transform it into a nasty corpse! it doesn't block, can't be
// attacked and doesn't move
// TODO Replace with game.log.add()
// message(messages, format!("{} is dead!", monster.name), colors::ORANGE);
message(messages, format!("{} is dead! You gain {} experience points.",
monster.name, monster.fighter.unwrap().xp), colors::ORANGE);
monster.char = CORPSE;
monster.color = colors::DARK_RED;
monster.blocks = false;
monster.fighter = None;
monster.ai = None;
monster.name = format!("remains of {}", monster.name);
}
pub fn player_move_or_attack(dx: i32, dy: i32, objects: &mut [Object], game: &mut Game) {
// the coordinates the player is moving to/attacking
let x = objects[PLAYER].x + dx;
let y = objects[PLAYER].y + dy;
// try to find an attackable object there
let target_id = objects.iter().position(|object| {
object.fighter.is_some() && object.pos() == (x, y)
});
// attack if target found, move otherwise
match target_id {
Some(target_id) => {
let (player, target) = mut_two(PLAYER, target_id, objects);
player.attack(target, game);
}
None => {
move_by(PLAYER, dx, dy, &mut game.map, objects);
}
}
}
pub fn ai_take_turn(monster_id: usize, objects: &mut [Object], game: &mut Game, fov_map: &FovMap) {
use Ai::*;
if let Some(ai) = objects[monster_id].ai.take() {
let new_ai = match ai {
Basic => ai_basic(monster_id, game, objects, fov_map),
Confused{previous_ai, num_turns} => ai_confused(
monster_id, &mut game.map, objects, &mut game.log, previous_ai, num_turns)
};
objects[monster_id].ai = Some(new_ai);
}
}
pub fn ai_basic(monster_id: usize, game: &mut Game, objects: &mut [Object], fov_map: &FovMap) -> Ai {
// a basic monster takes its turn. If you can see it, it can see you
let (monster_x, monster_y) = objects[monster_id].pos();
if fov_map.is_in_fov(monster_x, monster_y) {
if objects[monster_id].distance_to(&objects[PLAYER]) >= 2.0 {
// move towards player if far away
let (player_x, player_y) = objects[PLAYER].pos();
move_towards(monster_id, player_x, player_y, &mut game.map, objects);
} else if objects[PLAYER].fighter.map_or(false, |f| f.hp > 0) {
// close enough, attack! (if the player is still alive.)
let (monster, player) = mut_two(monster_id, PLAYER, objects);
monster.attack(player, game);
}
}
Ai::Basic
}
pub fn ai_confused(monster_id: usize, map: &Map, objects: &mut [Object], messages: &mut Messages,
previous_ai: Box<Ai>, num_turns: i32) -> Ai {
if num_turns >= 0 { // still confused ...
// move in a random idrection, and decrease the number of turns confused
move_by(monster_id,
rand::thread_rng().gen_range(-1, 2),
rand::thread_rng().gen_range(-1, 2),
map,
objects);
Ai::Confused{previous_ai: previous_ai, num_turns: num_turns - 1}
} else { // restore the previous AI (this one will be deleted)
// TODO Replace with game.log.add()
message(messages, format!("The {} is no longer confused!",
objects[monster_id].name),
colors::RED);
*previous_ai
}
}
pub fn move_towards(id: usize, target_x: i32, target_y: i32, map: &Map, objects: &mut [Object]) {
// vector from this object to the target, and distance
let dx = target_x - objects[id].x;
let dy = target_y - objects[id].y;
let distance = ((dx.pow(2) + dy.pow(2)) as f32).sqrt();
// normalize it to length 1 (preserving direction), then round it and
// convert to integer so the movement is restricted to the map grid
let dx = (dx as f32 / distance).round() as i32;
let dy = (dy as f32 / distance).round() as i32;
move_by(id, dx, dy, map, objects);
}
| {
// the game ended!
// TODO Replace with game.log.add()
message(messages, "You died!", colors::DARK_RED);
// for added effect, transform the player into a corpse!
player.char = CORPSE;
player.color = colors::DARK_RED;
} | identifier_body |
combat.rs | use super::*;
use rand::Rng;
use crate::r#const::*;
use crate::types::*;
use crate::types::Tcod;
use crate::types::Messages;
use crate::func::*;
use crate::types::object::Object;
use tcod::input::{self, Event, Mouse};
use tcod::colors::{self, Color};
/// returns a clicked monster inside FOV up to a range, or None if right-clicked
pub fn target_monster(tcod: &mut Tcod,
objects: &[Object],
game: &mut Game,
max_range: Option<f32>)
-> Option<usize> {
loop {
match target_tile(tcod, objects, game, max_range) {
Some((x, y)) => {
// return the first clicked monster, otherwise continue looping
for (id, obj) in objects.iter().enumerate() {
if obj.pos() == (x, y) && obj.fighter.is_some() && id != PLAYER {
return Some(id)
}
}
}
None => return None,
}
}
}
/// return the position of a tile left-clicked in player's FOV (optionally in a
/// range), or (None,None) if right-clicked.
pub fn target_tile(tcod: &mut Tcod,
objects: &[Object], game: &mut Game,
max_range: Option<f32>)
-> Option<(i32, i32)> {
use tcod::input::KeyCode::Escape;
loop {
// render the screen. this erases the inventory and shows the names of
// objects under the mouse.
tcod.root.flush();
let event = input::check_for_event(input::KEY_PRESS | input::MOUSE).map(|e| e.1);
let mut key = None;
match event {
Some(Event::Mouse(m)) => tcod.mouse = m,
Some(Event::Key(k)) => key = Some(k),
None => |
}
render_all(tcod, objects, game, false);
let (x, y) = (tcod.mouse.cx as i32, tcod.mouse.cy as i32);
// accept the target if the player clicked in FOV, and in case a range
// is specified, if it's in that range
let in_fov = (x < MAP_WIDTH) && (y < MAP_HEIGHT) && tcod.fov.is_in_fov(x, y);
let in_range = max_range.map_or(
true, |range| objects[PLAYER].distance(x, y) <= range);
if tcod.mouse.lbutton_pressed && in_fov && in_range {
return Some((x, y))
}
let escape = key.map_or(false, |k| k.code == Escape);
if tcod.mouse.rbutton_pressed || escape {
return None // cancel if the player right-clicked or pressed Escape
}
}
}
/// find closest enemy, up to a maximum range, and in the player's FOV
pub fn closest_monster(max_range: i32, objects: &mut [Object], tcod: &Tcod) -> Option<usize> {
let mut closest_enemy = None;
let mut closest_dist = (max_range + 1) as f32; // start with (slightly more than) maximum range
for (id, object) in objects.iter().enumerate() {
if (id != PLAYER) && object.fighter.is_some() && object.ai.is_some() &&
tcod.fov.is_in_fov(object.x, object.y)
{
// calculate distance between this object and the player
let dist = objects[PLAYER].distance_to(object);
if dist < closest_dist { // it's closer, so remember it
closest_enemy = Some(id);
closest_dist = dist;
}
}
}
closest_enemy
}
pub fn cast_fireball(_inventory_id: usize, objects: &mut [Object],game: &mut Game, tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target tile to throw a fireball at
game.log.add("Left-click a target tile for the fireball, or right-click to cancel.",
colors::LIGHT_CYAN);
let (x, y) = match target_tile(tcod, objects, game, None) {
Some(tile_pos) => tile_pos,
None => return UseResult::Cancelled,
};
game.log.add(format!("The fireball explodes, burning everything within {} tiles!", FIREBALL_RADIUS),
colors::ORANGE);
let mut xp_to_gain = 0;
for (id, obj) in objects.iter_mut().enumerate() {
if obj.distance(x, y) <= FIREBALL_RADIUS as f32 && obj.fighter.is_some() {
game.log.add(format!("The {} gets burned for {} hit points.", obj.name, FIREBALL_DAMAGE),
colors::ORANGE);
if let Some(xp) = obj.take_damage(FIREBALL_DAMAGE, game) {
// Don't reward the player for burning themself!
if id != PLAYER {
xp_to_gain += xp;
}
}
}
}
objects[PLAYER].fighter.as_mut().unwrap().xp += xp_to_gain;
UseResult::UsedUp
}
pub fn cast_heal(_inventory_id: usize, objects: &mut [Object], game: &mut Game, _tcod: &mut Tcod)
-> UseResult
{
// heal the player
let player = &mut objects[PLAYER];
if let Some(fighter) = player.fighter {
if fighter.hp == player.max_hp(game) {
game.log.add("You are already at full health.", colors::RED);
return UseResult::Cancelled;
}
game.log.add("Your wounds start to feel better!", colors::LIGHT_VIOLET);
player.heal(HEAL_AMOUNT, game);
return UseResult::UsedUp;
}
UseResult::Cancelled
}
pub fn cast_lightning(_inventory_id: usize, objects: &mut [Object], game: &mut Game, tcod: &mut Tcod) -> UseResult
{
// find closest enemy (inside a maximum range) and damage it
let monster_id = closest_monster(LIGHTNING_RANGE, objects, tcod);
if let Some(monster_id) = monster_id {
// zap it!
game.log.add(format!("A lightning bolt strikes the {} with a loud thunder! \
The damage is {} hit points.",
objects[monster_id].name, LIGHTNING_DAMAGE),
colors::LIGHT_BLUE);
objects[monster_id].take_damage(LIGHTNING_DAMAGE, game);
UseResult::UsedUp
} else { // no enemy found within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn cast_confuse(_inventory_id: usize, objects: &mut [Object], game: &mut Game,tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target to confuse
game.log.add("Left-click an enemy to confuse it, or right-click to cancel.", colors::LIGHT_CYAN);
let monster_id = target_monster(tcod, objects, game, Some(CONFUSE_RANGE as f32));
if let Some(monster_id) = monster_id {
let old_ai = objects[monster_id].ai.take().unwrap_or(Ai::Basic);
// replace the monster's AI with a "confused" one; after
// some turns it will restore the old AI
objects[monster_id].ai = Some(Ai::Confused {
previous_ai: Box::new(old_ai),
num_turns: CONFUSE_NUM_TURNS,
});
game.log.add(format!("The eyes of {} look vacant, as he starts to stumble around!",
objects[monster_id].name),
colors::LIGHT_GREEN);
UseResult::UsedUp
} else { // no enemy fonud within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn player_death(player: &mut Object, messages: &mut Messages) {
// the game ended!
// TODO Replace with game.log.add()
message(messages, "You died!", colors::DARK_RED);
// for added effect, transform the player into a corpse!
player.char = CORPSE;
player.color = colors::DARK_RED;
}
pub fn monster_death(monster: &mut Object, messages: &mut Messages) {
// transform it into a nasty corpse! it doesn't block, can't be
// attacked and doesn't move
// TODO Replace with game.log.add()
// message(messages, format!("{} is dead!", monster.name), colors::ORANGE);
message(messages, format!("{} is dead! You gain {} experience points.",
monster.name, monster.fighter.unwrap().xp), colors::ORANGE);
monster.char = CORPSE;
monster.color = colors::DARK_RED;
monster.blocks = false;
monster.fighter = None;
monster.ai = None;
monster.name = format!("remains of {}", monster.name);
}
pub fn player_move_or_attack(dx: i32, dy: i32, objects: &mut [Object], game: &mut Game) {
// the coordinates the player is moving to/attacking
let x = objects[PLAYER].x + dx;
let y = objects[PLAYER].y + dy;
// try to find an attackable object there
let target_id = objects.iter().position(|object| {
object.fighter.is_some() && object.pos() == (x, y)
});
// attack if target found, move otherwise
match target_id {
Some(target_id) => {
let (player, target) = mut_two(PLAYER, target_id, objects);
player.attack(target, game);
}
None => {
move_by(PLAYER, dx, dy, &mut game.map, objects);
}
}
}
pub fn ai_take_turn(monster_id: usize, objects: &mut [Object], game: &mut Game, fov_map: &FovMap) {
use Ai::*;
if let Some(ai) = objects[monster_id].ai.take() {
let new_ai = match ai {
Basic => ai_basic(monster_id, game, objects, fov_map),
Confused{previous_ai, num_turns} => ai_confused(
monster_id, &mut game.map, objects, &mut game.log, previous_ai, num_turns)
};
objects[monster_id].ai = Some(new_ai);
}
}
pub fn ai_basic(monster_id: usize, game: &mut Game, objects: &mut [Object], fov_map: &FovMap) -> Ai {
// a basic monster takes its turn. If you can see it, it can see you
let (monster_x, monster_y) = objects[monster_id].pos();
if fov_map.is_in_fov(monster_x, monster_y) {
if objects[monster_id].distance_to(&objects[PLAYER]) >= 2.0 {
// move towards player if far away
let (player_x, player_y) = objects[PLAYER].pos();
move_towards(monster_id, player_x, player_y, &mut game.map, objects);
} else if objects[PLAYER].fighter.map_or(false, |f| f.hp > 0) {
// close enough, attack! (if the player is still alive.)
let (monster, player) = mut_two(monster_id, PLAYER, objects);
monster.attack(player, game);
}
}
Ai::Basic
}
pub fn ai_confused(monster_id: usize, map: &Map, objects: &mut [Object], messages: &mut Messages,
previous_ai: Box<Ai>, num_turns: i32) -> Ai {
if num_turns >= 0 { // still confused ...
// move in a random idrection, and decrease the number of turns confused
move_by(monster_id,
rand::thread_rng().gen_range(-1, 2),
rand::thread_rng().gen_range(-1, 2),
map,
objects);
Ai::Confused{previous_ai: previous_ai, num_turns: num_turns - 1}
} else { // restore the previous AI (this one will be deleted)
// TODO Replace with game.log.add()
message(messages, format!("The {} is no longer confused!",
objects[monster_id].name),
colors::RED);
*previous_ai
}
}
pub fn move_towards(id: usize, target_x: i32, target_y: i32, map: &Map, objects: &mut [Object]) {
// vector from this object to the target, and distance
let dx = target_x - objects[id].x;
let dy = target_y - objects[id].y;
let distance = ((dx.pow(2) + dy.pow(2)) as f32).sqrt();
// normalize it to length 1 (preserving direction), then round it and
// convert to integer so the movement is restricted to the map grid
let dx = (dx as f32 / distance).round() as i32;
let dy = (dy as f32 / distance).round() as i32;
move_by(id, dx, dy, map, objects);
}
| {} | conditional_block |
combat.rs | use super::*;
use rand::Rng;
use crate::r#const::*;
use crate::types::*;
use crate::types::Tcod;
use crate::types::Messages;
use crate::func::*;
use crate::types::object::Object;
use tcod::input::{self, Event, Mouse};
use tcod::colors::{self, Color};
/// returns a clicked monster inside FOV up to a range, or None if right-clicked
pub fn target_monster(tcod: &mut Tcod,
objects: &[Object],
game: &mut Game,
max_range: Option<f32>)
-> Option<usize> {
loop {
match target_tile(tcod, objects, game, max_range) {
Some((x, y)) => {
// return the first clicked monster, otherwise continue looping
for (id, obj) in objects.iter().enumerate() {
if obj.pos() == (x, y) && obj.fighter.is_some() && id != PLAYER {
return Some(id)
}
}
}
None => return None,
}
}
}
/// return the position of a tile left-clicked in player's FOV (optionally in a
/// range), or (None,None) if right-clicked.
pub fn target_tile(tcod: &mut Tcod,
objects: &[Object], game: &mut Game,
max_range: Option<f32>)
-> Option<(i32, i32)> {
use tcod::input::KeyCode::Escape;
loop {
// render the screen. this erases the inventory and shows the names of
// objects under the mouse.
tcod.root.flush();
let event = input::check_for_event(input::KEY_PRESS | input::MOUSE).map(|e| e.1);
let mut key = None;
match event {
Some(Event::Mouse(m)) => tcod.mouse = m,
Some(Event::Key(k)) => key = Some(k),
None => {}
}
render_all(tcod, objects, game, false);
let (x, y) = (tcod.mouse.cx as i32, tcod.mouse.cy as i32);
// accept the target if the player clicked in FOV, and in case a range
// is specified, if it's in that range
let in_fov = (x < MAP_WIDTH) && (y < MAP_HEIGHT) && tcod.fov.is_in_fov(x, y);
let in_range = max_range.map_or(
true, |range| objects[PLAYER].distance(x, y) <= range);
if tcod.mouse.lbutton_pressed && in_fov && in_range {
return Some((x, y))
}
let escape = key.map_or(false, |k| k.code == Escape);
if tcod.mouse.rbutton_pressed || escape {
return None // cancel if the player right-clicked or pressed Escape
}
}
}
/// find closest enemy, up to a maximum range, and in the player's FOV
pub fn closest_monster(max_range: i32, objects: &mut [Object], tcod: &Tcod) -> Option<usize> {
let mut closest_enemy = None;
let mut closest_dist = (max_range + 1) as f32; // start with (slightly more than) maximum range
for (id, object) in objects.iter().enumerate() {
if (id != PLAYER) && object.fighter.is_some() && object.ai.is_some() &&
tcod.fov.is_in_fov(object.x, object.y)
{
// calculate distance between this object and the player
let dist = objects[PLAYER].distance_to(object);
if dist < closest_dist { // it's closer, so remember it
closest_enemy = Some(id);
closest_dist = dist;
}
}
}
closest_enemy
}
pub fn cast_fireball(_inventory_id: usize, objects: &mut [Object],game: &mut Game, tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target tile to throw a fireball at
game.log.add("Left-click a target tile for the fireball, or right-click to cancel.",
colors::LIGHT_CYAN);
let (x, y) = match target_tile(tcod, objects, game, None) {
Some(tile_pos) => tile_pos,
None => return UseResult::Cancelled,
};
game.log.add(format!("The fireball explodes, burning everything within {} tiles!", FIREBALL_RADIUS),
colors::ORANGE);
let mut xp_to_gain = 0;
for (id, obj) in objects.iter_mut().enumerate() {
if obj.distance(x, y) <= FIREBALL_RADIUS as f32 && obj.fighter.is_some() {
game.log.add(format!("The {} gets burned for {} hit points.", obj.name, FIREBALL_DAMAGE),
colors::ORANGE);
if let Some(xp) = obj.take_damage(FIREBALL_DAMAGE, game) {
// Don't reward the player for burning themself!
if id != PLAYER {
xp_to_gain += xp;
}
}
}
}
objects[PLAYER].fighter.as_mut().unwrap().xp += xp_to_gain;
UseResult::UsedUp
}
pub fn cast_heal(_inventory_id: usize, objects: &mut [Object], game: &mut Game, _tcod: &mut Tcod)
-> UseResult
{
// heal the player
let player = &mut objects[PLAYER];
if let Some(fighter) = player.fighter {
if fighter.hp == player.max_hp(game) {
game.log.add("You are already at full health.", colors::RED);
return UseResult::Cancelled;
}
game.log.add("Your wounds start to feel better!", colors::LIGHT_VIOLET);
player.heal(HEAL_AMOUNT, game);
return UseResult::UsedUp;
}
UseResult::Cancelled
}
pub fn cast_lightning(_inventory_id: usize, objects: &mut [Object], game: &mut Game, tcod: &mut Tcod) -> UseResult
{
// find closest enemy (inside a maximum range) and damage it
let monster_id = closest_monster(LIGHTNING_RANGE, objects, tcod);
if let Some(monster_id) = monster_id {
// zap it!
game.log.add(format!("A lightning bolt strikes the {} with a loud thunder! \
The damage is {} hit points.",
objects[monster_id].name, LIGHTNING_DAMAGE),
colors::LIGHT_BLUE);
objects[monster_id].take_damage(LIGHTNING_DAMAGE, game);
UseResult::UsedUp
} else { // no enemy found within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn cast_confuse(_inventory_id: usize, objects: &mut [Object], game: &mut Game,tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target to confuse
game.log.add("Left-click an enemy to confuse it, or right-click to cancel.", colors::LIGHT_CYAN);
let monster_id = target_monster(tcod, objects, game, Some(CONFUSE_RANGE as f32));
if let Some(monster_id) = monster_id {
let old_ai = objects[monster_id].ai.take().unwrap_or(Ai::Basic);
// replace the monster's AI with a "confused" one; after
// some turns it will restore the old AI
objects[monster_id].ai = Some(Ai::Confused {
previous_ai: Box::new(old_ai),
num_turns: CONFUSE_NUM_TURNS,
});
game.log.add(format!("The eyes of {} look vacant, as he starts to stumble around!",
objects[monster_id].name),
colors::LIGHT_GREEN);
UseResult::UsedUp
} else { // no enemy fonud within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn player_death(player: &mut Object, messages: &mut Messages) {
// the game ended!
// TODO Replace with game.log.add()
message(messages, "You died!", colors::DARK_RED);
// for added effect, transform the player into a corpse!
player.char = CORPSE;
player.color = colors::DARK_RED;
}
pub fn monster_death(monster: &mut Object, messages: &mut Messages) {
// transform it into a nasty corpse! it doesn't block, can't be
// attacked and doesn't move
// TODO Replace with game.log.add()
// message(messages, format!("{} is dead!", monster.name), colors::ORANGE);
message(messages, format!("{} is dead! You gain {} experience points.",
monster.name, monster.fighter.unwrap().xp), colors::ORANGE);
monster.char = CORPSE;
monster.color = colors::DARK_RED;
monster.blocks = false;
monster.fighter = None;
monster.ai = None;
monster.name = format!("remains of {}", monster.name);
}
pub fn player_move_or_attack(dx: i32, dy: i32, objects: &mut [Object], game: &mut Game) {
// the coordinates the player is moving to/attacking
let x = objects[PLAYER].x + dx;
let y = objects[PLAYER].y + dy;
// try to find an attackable object there
let target_id = objects.iter().position(|object| {
object.fighter.is_some() && object.pos() == (x, y)
});
// attack if target found, move otherwise
match target_id {
Some(target_id) => {
let (player, target) = mut_two(PLAYER, target_id, objects);
player.attack(target, game);
}
None => {
move_by(PLAYER, dx, dy, &mut game.map, objects);
}
}
}
pub fn ai_take_turn(monster_id: usize, objects: &mut [Object], game: &mut Game, fov_map: &FovMap) {
use Ai::*;
if let Some(ai) = objects[monster_id].ai.take() {
let new_ai = match ai {
Basic => ai_basic(monster_id, game, objects, fov_map),
Confused{previous_ai, num_turns} => ai_confused(
monster_id, &mut game.map, objects, &mut game.log, previous_ai, num_turns)
};
objects[monster_id].ai = Some(new_ai);
}
}
pub fn | (monster_id: usize, game: &mut Game, objects: &mut [Object], fov_map: &FovMap) -> Ai {
// a basic monster takes its turn. If you can see it, it can see you
let (monster_x, monster_y) = objects[monster_id].pos();
if fov_map.is_in_fov(monster_x, monster_y) {
if objects[monster_id].distance_to(&objects[PLAYER]) >= 2.0 {
// move towards player if far away
let (player_x, player_y) = objects[PLAYER].pos();
move_towards(monster_id, player_x, player_y, &mut game.map, objects);
} else if objects[PLAYER].fighter.map_or(false, |f| f.hp > 0) {
// close enough, attack! (if the player is still alive.)
let (monster, player) = mut_two(monster_id, PLAYER, objects);
monster.attack(player, game);
}
}
Ai::Basic
}
pub fn ai_confused(monster_id: usize, map: &Map, objects: &mut [Object], messages: &mut Messages,
previous_ai: Box<Ai>, num_turns: i32) -> Ai {
if num_turns >= 0 { // still confused ...
// move in a random idrection, and decrease the number of turns confused
move_by(monster_id,
rand::thread_rng().gen_range(-1, 2),
rand::thread_rng().gen_range(-1, 2),
map,
objects);
Ai::Confused{previous_ai: previous_ai, num_turns: num_turns - 1}
} else { // restore the previous AI (this one will be deleted)
// TODO Replace with game.log.add()
message(messages, format!("The {} is no longer confused!",
objects[monster_id].name),
colors::RED);
*previous_ai
}
}
pub fn move_towards(id: usize, target_x: i32, target_y: i32, map: &Map, objects: &mut [Object]) {
// vector from this object to the target, and distance
let dx = target_x - objects[id].x;
let dy = target_y - objects[id].y;
let distance = ((dx.pow(2) + dy.pow(2)) as f32).sqrt();
// normalize it to length 1 (preserving direction), then round it and
// convert to integer so the movement is restricted to the map grid
let dx = (dx as f32 / distance).round() as i32;
let dy = (dy as f32 / distance).round() as i32;
move_by(id, dx, dy, map, objects);
}
| ai_basic | identifier_name |
ikdbtest_gl.py | #Python 2/3 compatibility
from __future__ import print_function,division,absolute_import
from builtins import input,range
from six import iteritems
from ikdb import *
from ikdb import functionfactory
from klampt import *
import pkg_resources
if pkg_resources.get_distribution('klampt').version >= '0.7':
NEW_KLAMPT = True
from klampt.model import ik
from klampt.io import loader
from klampt.vis.glrobotprogram import *
from klampt.vis.glcommon import *
from klampt import PointPoser,TransformPoser
from klampt.model import collide
from klampt.math import se3
#patch to Klamp't 0.6.X
class GLWidgetProgram(GLPluginProgram):
def __init__(self,world,name):
GLPluginProgram.__init__(self,name)
self.widgetPlugin = GLWidgetPlugin()
self.setPlugin(self.widgetPlugin)
self.widgetMaster = self.widgetPlugin.klamptwidgetmaster
self.world = world
def display(self):
GLPluginProgram.display(self)
self.world.drawGL()
else:
NEW_KLAMPT = False
from klampt import ik,loader
from klampt.glrobotprogram import *
from klampt import PointPoser,TransformPoser
from klampt import robotcollide as collide
from klampt import se3
import sys
import traceback
import numpy as np
#preload
from sklearn.neighbors import NearestNeighbors,BallTree
class IKDBVisualTester(GLWidgetProgram):
def __init__(self,visWorld,planningWorld,name="IK Database visual tester"):
GLWidgetProgram.__init__(self,visWorld,name)
self.planningWorld = planningWorld
self.collider = collide.WorldCollider(visWorld)
self.ikdb = ManagedIKDatabase(planningWorld.robot(0))
self.ikWidgets = []
self.ikIndices = []
self.ikProblem = IKProblem()
self.ikProblem.setFeasibilityTest('collisionFree',None)
qmin,qmax = planningWorld.robot(0).getJointLimits()
self.ikProblem.setCostFunction('jointRangeCost_dynamic',[qmin,qmax])
self.drawDb = False
self.continuous = False
self.reSolve = False
self.currentConfig = self.world.robot(0).getConfig()
def mousefunc(self,button,state,x,y):
#Put your mouse handler here
#the current example prints out the list of objects clicked whenever
#you right click
GLWidgetProgram.mousefunc(self,button,state,x,y)
self.reSolve = False
dragging = False
if NEW_KLAMPT:
dragging = self.widgetPlugin.klamptwidgetdragging
else:
dragging = self.draggingWidget
if not dragging and button == 2 and state==0:
#down
clicked = self.click_world(x,y)
if clicked is not None and isinstance(clicked[0],RobotModelLink):
#make a new widget
link, wpt = clicked
lpt = se3.apply(se3.inv(link.getTransform()),wpt)
self.ikIndices.append(len(self.ikWidgets))
self.ikWidgets.append(PointPoser())
self.ikWidgets[-1].set(wpt)
self.widgetMaster.add(self.ikWidgets[-1])
self.ikProblem.addObjective(ik.objective(link,local=lpt,world=wpt))
GLWidgetProgram.mousefunc(self,button,state,x,y)
self.refresh()
return
#the dx,dy arguments are needed to be cross-compatible between 0.6.x and 0.7
def motionfunc(self,x,y,dx=0,dy=0):
dragging = False
if NEW_KLAMPT:
retval = GLWidgetProgram.motionfunc(self,x,y,dx,dy)
dragging = self.widgetPlugin.klamptwidgetdragging
else:
retval = GLWidgetProgram.motionfunc(self,x,y)
dragging = self.draggingWidget
if dragging:
#update all the IK objectives
for i in range(len(self.ikWidgets)):
index = self.ikIndices[i]
if isinstance(self.ikWidgets[i],PointPoser):
wptnew = self.ikWidgets[i].get()
obj = self.ikProblem.objectives[index]
link = obj.link()
lpt,wptold = obj.getPosition()
obj.setFixedPoint(link,lpt,wptnew)
#don't solve now, wait for refresh to process
if self.continuous and wptnew != wptold:
self.reSolve = True
elif isinstance(self.ikWidgets[i],TransformPoser):
Rnew,tnew = self.ikWidgets[i].get()
obj = self.ikProblem.objectives[index]
link = obj.link()
Rold,told = obj.getTransform()
obj.setFixedTransform(link,Rnew,tnew)
#don't solve now, wait for refresh to process
if self.continuous and (Rnew,tnew) != (Rold,told):
self.reSolve = True
return retval
def keyboardfunc(self,c,x,y):
if c=='h': | print ('[space]: tests the current configuration')
print ('d: deletes IK constraint')
print ('t: adds a new rotation-fixed IK constraint')
print ('f: flushes the current database to disk')
print ('s: saves the current database to disk')
print ('b: performs one background step')
print ('B: starts / stops the background thread')
print ('v: toggles display of the database')
print ('c: toggles continuous re-solving of IK constraint its as being moved')
print ('o: toggles soft / hard IK constraints')
elif c==' ':
self.planningWorld.robot(0).setConfig(self.currentConfig)
soln = self.ikdb.solve(self.ikProblem)
if soln:
print ("Solved")
self.currentConfig = soln
self.refresh()
else:
print ("Failure")
elif c=='d':
for i,w in enumerate(self.ikWidgets):
if w.hasHighlight():
print ("Deleting IK widget")
#delete it
index = self.ikIndices[i]
self.widgetMaster.remove(w)
del self.ikWidgets[i]
del self.ikIndices[i]
del self.ikProblem.objectives[index]
for j in range(len(self.ikIndices)):
self.ikIndices[j] = j
self.refresh()
break
elif c=='t':
clicked = self.click_world(x,y)
if clicked is not None and isinstance(clicked[0],RobotModelLink):
#make a new widget
link, wpt = clicked
Tlink = link.getTransform()
self.ikIndices.append(len(self.ikWidgets))
self.ikWidgets.append(TransformPoser())
self.ikWidgets[-1].set(*Tlink)
self.widgetMaster.add(self.ikWidgets[-1])
self.ikProblem.addObjective(ik.objective(link,R=Tlink[0],t=Tlink[1]))
self.refresh()
elif c=='f':
self.ikdb.flush()
elif c=='s':
self.ikdb.save()
elif c=='b':
self.ikdb.backgroundStep()
self.refresh()
elif c=='B':
if hasattr(self.ikdb,'thread'):
self.ikdb.stopBackgroundLoop()
else:
self.ikdb.startBackgroundLoop(0)
elif c=='v':
self.drawDb = not self.drawDb
elif c=='c':
self.continuous = not self.continuous
elif c == 'o':
self.ikProblem.setSoftObjectives(not self.ikProblem.softObjectives)
def display(self):
if self.reSolve:
self.planningWorld.robot(0).setConfig(self.currentConfig)
soln = self.ikdb.solve(self.ikProblem)
if soln:
self.currentConfig = soln
self.reSolve = False
self.world.robot(0).setConfig(self.currentConfig)
GLWidgetProgram.display(self)
glDisable(GL_LIGHTING)
#draw IK goals
for obj in self.ikProblem.objectives:
linkindex = obj.link()
link = self.world.robot(0).link(linkindex)
lp,wpdes = obj.getPosition()
wp = se3.apply(link.getTransform(),lp)
glLineWidth(4.0)
glDisable(GL_LIGHTING)
glColor3f(0,1,0)
glBegin(GL_LINES)
glVertex3f(*wp)
glVertex3f(*wpdes)
glEnd()
glLineWidth(1)
#draw end positions of solved problems
if self.drawDb:
glPointSize(3.0)
glBegin(GL_POINTS)
for k,db in iteritems(self.ikdb.databases):
for i in range(db.numProblems()):
try:
p = db.getProblem(i)
except Exception as e:
traceback.print_exc()
exit(0)
if db.solutions[i] is None:
glColor3f(1,0,0)
else:
glColor3f(0,0,1)
for obj in p.objectives:
lp,wpdes = obj.getPosition()
glVertex3f(*wpdes)
glColor3f(1,1,0)
for pjson,soln in self.ikdb.backburner:
p = IKProblem()
p.fromJson(pjson)
for obj in p.objectives:
lp,wpdes = obj.getPosition()
glVertex3f(*wpdes)
glEnd()
return
def click_world(self,x,y):
"""Helper: returns (obj,pt) where obj is the closest world object
clicked, and pt is the associated clicked point (in world coordinates).
If no point is clicked, returns None."""
#get the viewport ray
if NEW_KLAMPT:
(s,d) = self.view.click_ray(x,y)
else:
(s,d) = self.click_ray(x,y)
#run the collision tests
collided = []
for g in self.collider.geomList:
(hit,pt) = g[1].rayCast(s,d)
if hit:
dist = vectorops.dot(vectorops.sub(pt,s),d)
collided.append((dist,g[0]))
if len(collided)==0:
return None
dist,obj = min(collided,key=lambda x:x[0])
return obj,vectorops.madd(s,d,dist)
def main():
print ("ikdbtest2.py: This example visually shows the learning process")
print ("USAGE: ikdbtest2.py [ROBOT OR WORLD FILE]")
print ("Press h for help.")
import sys
import os
fn = os.path.expanduser("~/Klampt-examples/data/robots/tx90ball.rob")
if len(sys.argv) > 1:
fn = sys.argv[1]
world = WorldModel()
world.readFile(fn)
planningWorld = world.copy()
#for free base robots
qmin,qmax = world.robot(0).getJointLimits()
for i,(a,b) in enumerate(zip(qmin,qmax)):
if not np.isfinite(a):
print ("Setting finite bound on joint",i)
qmin[i] = -math.pi
if not np.isfinite(b):
print ("Setting finite bound on joint",i)
qmax[i] = math.pi
planningWorld.robot(0).setJointLimits(qmin,qmax)
functionfactory.registerDefaultFunctions()
functionfactory.registerCollisionFunction(planningWorld)
functionfactory.registerJointRangeCostFunction(planningWorld.robot(0))
tester = IKDBVisualTester(world,planningWorld)
tester.run()
if __name__ == "__main__":
main() | print ('HELP:')
print ('[right-click]: add a new IK constraint') | random_line_split |
ikdbtest_gl.py | #Python 2/3 compatibility
from __future__ import print_function,division,absolute_import
from builtins import input,range
from six import iteritems
from ikdb import *
from ikdb import functionfactory
from klampt import *
import pkg_resources
if pkg_resources.get_distribution('klampt').version >= '0.7':
NEW_KLAMPT = True
from klampt.model import ik
from klampt.io import loader
from klampt.vis.glrobotprogram import *
from klampt.vis.glcommon import *
from klampt import PointPoser,TransformPoser
from klampt.model import collide
from klampt.math import se3
#patch to Klamp't 0.6.X
class GLWidgetProgram(GLPluginProgram):
def __init__(self,world,name):
GLPluginProgram.__init__(self,name)
self.widgetPlugin = GLWidgetPlugin()
self.setPlugin(self.widgetPlugin)
self.widgetMaster = self.widgetPlugin.klamptwidgetmaster
self.world = world
def display(self):
GLPluginProgram.display(self)
self.world.drawGL()
else:
NEW_KLAMPT = False
from klampt import ik,loader
from klampt.glrobotprogram import *
from klampt import PointPoser,TransformPoser
from klampt import robotcollide as collide
from klampt import se3
import sys
import traceback
import numpy as np
#preload
from sklearn.neighbors import NearestNeighbors,BallTree
class IKDBVisualTester(GLWidgetProgram):
def __init__(self,visWorld,planningWorld,name="IK Database visual tester"):
GLWidgetProgram.__init__(self,visWorld,name)
self.planningWorld = planningWorld
self.collider = collide.WorldCollider(visWorld)
self.ikdb = ManagedIKDatabase(planningWorld.robot(0))
self.ikWidgets = []
self.ikIndices = []
self.ikProblem = IKProblem()
self.ikProblem.setFeasibilityTest('collisionFree',None)
qmin,qmax = planningWorld.robot(0).getJointLimits()
self.ikProblem.setCostFunction('jointRangeCost_dynamic',[qmin,qmax])
self.drawDb = False
self.continuous = False
self.reSolve = False
self.currentConfig = self.world.robot(0).getConfig()
def mousefunc(self,button,state,x,y):
#Put your mouse handler here
#the current example prints out the list of objects clicked whenever
#you right click
GLWidgetProgram.mousefunc(self,button,state,x,y)
self.reSolve = False
dragging = False
if NEW_KLAMPT:
dragging = self.widgetPlugin.klamptwidgetdragging
else:
dragging = self.draggingWidget
if not dragging and button == 2 and state==0:
#down
clicked = self.click_world(x,y)
if clicked is not None and isinstance(clicked[0],RobotModelLink):
#make a new widget
link, wpt = clicked
lpt = se3.apply(se3.inv(link.getTransform()),wpt)
self.ikIndices.append(len(self.ikWidgets))
self.ikWidgets.append(PointPoser())
self.ikWidgets[-1].set(wpt)
self.widgetMaster.add(self.ikWidgets[-1])
self.ikProblem.addObjective(ik.objective(link,local=lpt,world=wpt))
GLWidgetProgram.mousefunc(self,button,state,x,y)
self.refresh()
return
#the dx,dy arguments are needed to be cross-compatible between 0.6.x and 0.7
def motionfunc(self,x,y,dx=0,dy=0):
dragging = False
if NEW_KLAMPT:
retval = GLWidgetProgram.motionfunc(self,x,y,dx,dy)
dragging = self.widgetPlugin.klamptwidgetdragging
else:
retval = GLWidgetProgram.motionfunc(self,x,y)
dragging = self.draggingWidget
if dragging:
#update all the IK objectives
for i in range(len(self.ikWidgets)):
index = self.ikIndices[i]
if isinstance(self.ikWidgets[i],PointPoser):
wptnew = self.ikWidgets[i].get()
obj = self.ikProblem.objectives[index]
link = obj.link()
lpt,wptold = obj.getPosition()
obj.setFixedPoint(link,lpt,wptnew)
#don't solve now, wait for refresh to process
if self.continuous and wptnew != wptold:
self.reSolve = True
elif isinstance(self.ikWidgets[i],TransformPoser):
Rnew,tnew = self.ikWidgets[i].get()
obj = self.ikProblem.objectives[index]
link = obj.link()
Rold,told = obj.getTransform()
obj.setFixedTransform(link,Rnew,tnew)
#don't solve now, wait for refresh to process
if self.continuous and (Rnew,tnew) != (Rold,told):
self.reSolve = True
return retval
def keyboardfunc(self,c,x,y):
if c=='h':
print ('HELP:')
print ('[right-click]: add a new IK constraint')
print ('[space]: tests the current configuration')
print ('d: deletes IK constraint')
print ('t: adds a new rotation-fixed IK constraint')
print ('f: flushes the current database to disk')
print ('s: saves the current database to disk')
print ('b: performs one background step')
print ('B: starts / stops the background thread')
print ('v: toggles display of the database')
print ('c: toggles continuous re-solving of IK constraint its as being moved')
print ('o: toggles soft / hard IK constraints')
elif c==' ':
self.planningWorld.robot(0).setConfig(self.currentConfig)
soln = self.ikdb.solve(self.ikProblem)
if soln:
print ("Solved")
self.currentConfig = soln
self.refresh()
else:
print ("Failure")
elif c=='d':
for i,w in enumerate(self.ikWidgets):
if w.hasHighlight():
print ("Deleting IK widget")
#delete it
index = self.ikIndices[i]
self.widgetMaster.remove(w)
del self.ikWidgets[i]
del self.ikIndices[i]
del self.ikProblem.objectives[index]
for j in range(len(self.ikIndices)):
self.ikIndices[j] = j
self.refresh()
break
elif c=='t':
clicked = self.click_world(x,y)
if clicked is not None and isinstance(clicked[0],RobotModelLink):
#make a new widget
link, wpt = clicked
Tlink = link.getTransform()
self.ikIndices.append(len(self.ikWidgets))
self.ikWidgets.append(TransformPoser())
self.ikWidgets[-1].set(*Tlink)
self.widgetMaster.add(self.ikWidgets[-1])
self.ikProblem.addObjective(ik.objective(link,R=Tlink[0],t=Tlink[1]))
self.refresh()
elif c=='f':
self.ikdb.flush()
elif c=='s':
self.ikdb.save()
elif c=='b':
self.ikdb.backgroundStep()
self.refresh()
elif c=='B':
if hasattr(self.ikdb,'thread'):
self.ikdb.stopBackgroundLoop()
else:
self.ikdb.startBackgroundLoop(0)
elif c=='v':
self.drawDb = not self.drawDb
elif c=='c':
self.continuous = not self.continuous
elif c == 'o':
self.ikProblem.setSoftObjectives(not self.ikProblem.softObjectives)
def display(self):
if self.reSolve:
self.planningWorld.robot(0).setConfig(self.currentConfig)
soln = self.ikdb.solve(self.ikProblem)
if soln:
self.currentConfig = soln
self.reSolve = False
self.world.robot(0).setConfig(self.currentConfig)
GLWidgetProgram.display(self)
glDisable(GL_LIGHTING)
#draw IK goals
for obj in self.ikProblem.objectives:
linkindex = obj.link()
link = self.world.robot(0).link(linkindex)
lp,wpdes = obj.getPosition()
wp = se3.apply(link.getTransform(),lp)
glLineWidth(4.0)
glDisable(GL_LIGHTING)
glColor3f(0,1,0)
glBegin(GL_LINES)
glVertex3f(*wp)
glVertex3f(*wpdes)
glEnd()
glLineWidth(1)
#draw end positions of solved problems
if self.drawDb:
glPointSize(3.0)
glBegin(GL_POINTS)
for k,db in iteritems(self.ikdb.databases):
for i in range(db.numProblems()):
try:
p = db.getProblem(i)
except Exception as e:
traceback.print_exc()
exit(0)
if db.solutions[i] is None:
glColor3f(1,0,0)
else:
glColor3f(0,0,1)
for obj in p.objectives:
lp,wpdes = obj.getPosition()
glVertex3f(*wpdes)
glColor3f(1,1,0)
for pjson,soln in self.ikdb.backburner:
p = IKProblem()
p.fromJson(pjson)
for obj in p.objectives:
lp,wpdes = obj.getPosition()
glVertex3f(*wpdes)
glEnd()
return
def click_world(self,x,y):
"""Helper: returns (obj,pt) where obj is the closest world object
clicked, and pt is the associated clicked point (in world coordinates).
If no point is clicked, returns None."""
#get the viewport ray
if NEW_KLAMPT:
|
else:
(s,d) = self.click_ray(x,y)
#run the collision tests
collided = []
for g in self.collider.geomList:
(hit,pt) = g[1].rayCast(s,d)
if hit:
dist = vectorops.dot(vectorops.sub(pt,s),d)
collided.append((dist,g[0]))
if len(collided)==0:
return None
dist,obj = min(collided,key=lambda x:x[0])
return obj,vectorops.madd(s,d,dist)
def main():
print ("ikdbtest2.py: This example visually shows the learning process")
print ("USAGE: ikdbtest2.py [ROBOT OR WORLD FILE]")
print ("Press h for help.")
import sys
import os
fn = os.path.expanduser("~/Klampt-examples/data/robots/tx90ball.rob")
if len(sys.argv) > 1:
fn = sys.argv[1]
world = WorldModel()
world.readFile(fn)
planningWorld = world.copy()
#for free base robots
qmin,qmax = world.robot(0).getJointLimits()
for i,(a,b) in enumerate(zip(qmin,qmax)):
if not np.isfinite(a):
print ("Setting finite bound on joint",i)
qmin[i] = -math.pi
if not np.isfinite(b):
print ("Setting finite bound on joint",i)
qmax[i] = math.pi
planningWorld.robot(0).setJointLimits(qmin,qmax)
functionfactory.registerDefaultFunctions()
functionfactory.registerCollisionFunction(planningWorld)
functionfactory.registerJointRangeCostFunction(planningWorld.robot(0))
tester = IKDBVisualTester(world,planningWorld)
tester.run()
if __name__ == "__main__":
main()
| (s,d) = self.view.click_ray(x,y) | conditional_block |
ikdbtest_gl.py | #Python 2/3 compatibility
from __future__ import print_function,division,absolute_import
from builtins import input,range
from six import iteritems
from ikdb import *
from ikdb import functionfactory
from klampt import *
import pkg_resources
if pkg_resources.get_distribution('klampt').version >= '0.7':
NEW_KLAMPT = True
from klampt.model import ik
from klampt.io import loader
from klampt.vis.glrobotprogram import *
from klampt.vis.glcommon import *
from klampt import PointPoser,TransformPoser
from klampt.model import collide
from klampt.math import se3
#patch to Klamp't 0.6.X
class GLWidgetProgram(GLPluginProgram):
def __init__(self,world,name):
GLPluginProgram.__init__(self,name)
self.widgetPlugin = GLWidgetPlugin()
self.setPlugin(self.widgetPlugin)
self.widgetMaster = self.widgetPlugin.klamptwidgetmaster
self.world = world
def display(self):
GLPluginProgram.display(self)
self.world.drawGL()
else:
NEW_KLAMPT = False
from klampt import ik,loader
from klampt.glrobotprogram import *
from klampt import PointPoser,TransformPoser
from klampt import robotcollide as collide
from klampt import se3
import sys
import traceback
import numpy as np
#preload
from sklearn.neighbors import NearestNeighbors,BallTree
class IKDBVisualTester(GLWidgetProgram):
def | (self,visWorld,planningWorld,name="IK Database visual tester"):
GLWidgetProgram.__init__(self,visWorld,name)
self.planningWorld = planningWorld
self.collider = collide.WorldCollider(visWorld)
self.ikdb = ManagedIKDatabase(planningWorld.robot(0))
self.ikWidgets = []
self.ikIndices = []
self.ikProblem = IKProblem()
self.ikProblem.setFeasibilityTest('collisionFree',None)
qmin,qmax = planningWorld.robot(0).getJointLimits()
self.ikProblem.setCostFunction('jointRangeCost_dynamic',[qmin,qmax])
self.drawDb = False
self.continuous = False
self.reSolve = False
self.currentConfig = self.world.robot(0).getConfig()
def mousefunc(self,button,state,x,y):
#Put your mouse handler here
#the current example prints out the list of objects clicked whenever
#you right click
GLWidgetProgram.mousefunc(self,button,state,x,y)
self.reSolve = False
dragging = False
if NEW_KLAMPT:
dragging = self.widgetPlugin.klamptwidgetdragging
else:
dragging = self.draggingWidget
if not dragging and button == 2 and state==0:
#down
clicked = self.click_world(x,y)
if clicked is not None and isinstance(clicked[0],RobotModelLink):
#make a new widget
link, wpt = clicked
lpt = se3.apply(se3.inv(link.getTransform()),wpt)
self.ikIndices.append(len(self.ikWidgets))
self.ikWidgets.append(PointPoser())
self.ikWidgets[-1].set(wpt)
self.widgetMaster.add(self.ikWidgets[-1])
self.ikProblem.addObjective(ik.objective(link,local=lpt,world=wpt))
GLWidgetProgram.mousefunc(self,button,state,x,y)
self.refresh()
return
#the dx,dy arguments are needed to be cross-compatible between 0.6.x and 0.7
def motionfunc(self,x,y,dx=0,dy=0):
dragging = False
if NEW_KLAMPT:
retval = GLWidgetProgram.motionfunc(self,x,y,dx,dy)
dragging = self.widgetPlugin.klamptwidgetdragging
else:
retval = GLWidgetProgram.motionfunc(self,x,y)
dragging = self.draggingWidget
if dragging:
#update all the IK objectives
for i in range(len(self.ikWidgets)):
index = self.ikIndices[i]
if isinstance(self.ikWidgets[i],PointPoser):
wptnew = self.ikWidgets[i].get()
obj = self.ikProblem.objectives[index]
link = obj.link()
lpt,wptold = obj.getPosition()
obj.setFixedPoint(link,lpt,wptnew)
#don't solve now, wait for refresh to process
if self.continuous and wptnew != wptold:
self.reSolve = True
elif isinstance(self.ikWidgets[i],TransformPoser):
Rnew,tnew = self.ikWidgets[i].get()
obj = self.ikProblem.objectives[index]
link = obj.link()
Rold,told = obj.getTransform()
obj.setFixedTransform(link,Rnew,tnew)
#don't solve now, wait for refresh to process
if self.continuous and (Rnew,tnew) != (Rold,told):
self.reSolve = True
return retval
def keyboardfunc(self,c,x,y):
if c=='h':
print ('HELP:')
print ('[right-click]: add a new IK constraint')
print ('[space]: tests the current configuration')
print ('d: deletes IK constraint')
print ('t: adds a new rotation-fixed IK constraint')
print ('f: flushes the current database to disk')
print ('s: saves the current database to disk')
print ('b: performs one background step')
print ('B: starts / stops the background thread')
print ('v: toggles display of the database')
print ('c: toggles continuous re-solving of IK constraint its as being moved')
print ('o: toggles soft / hard IK constraints')
elif c==' ':
self.planningWorld.robot(0).setConfig(self.currentConfig)
soln = self.ikdb.solve(self.ikProblem)
if soln:
print ("Solved")
self.currentConfig = soln
self.refresh()
else:
print ("Failure")
elif c=='d':
for i,w in enumerate(self.ikWidgets):
if w.hasHighlight():
print ("Deleting IK widget")
#delete it
index = self.ikIndices[i]
self.widgetMaster.remove(w)
del self.ikWidgets[i]
del self.ikIndices[i]
del self.ikProblem.objectives[index]
for j in range(len(self.ikIndices)):
self.ikIndices[j] = j
self.refresh()
break
elif c=='t':
clicked = self.click_world(x,y)
if clicked is not None and isinstance(clicked[0],RobotModelLink):
#make a new widget
link, wpt = clicked
Tlink = link.getTransform()
self.ikIndices.append(len(self.ikWidgets))
self.ikWidgets.append(TransformPoser())
self.ikWidgets[-1].set(*Tlink)
self.widgetMaster.add(self.ikWidgets[-1])
self.ikProblem.addObjective(ik.objective(link,R=Tlink[0],t=Tlink[1]))
self.refresh()
elif c=='f':
self.ikdb.flush()
elif c=='s':
self.ikdb.save()
elif c=='b':
self.ikdb.backgroundStep()
self.refresh()
elif c=='B':
if hasattr(self.ikdb,'thread'):
self.ikdb.stopBackgroundLoop()
else:
self.ikdb.startBackgroundLoop(0)
elif c=='v':
self.drawDb = not self.drawDb
elif c=='c':
self.continuous = not self.continuous
elif c == 'o':
self.ikProblem.setSoftObjectives(not self.ikProblem.softObjectives)
def display(self):
if self.reSolve:
self.planningWorld.robot(0).setConfig(self.currentConfig)
soln = self.ikdb.solve(self.ikProblem)
if soln:
self.currentConfig = soln
self.reSolve = False
self.world.robot(0).setConfig(self.currentConfig)
GLWidgetProgram.display(self)
glDisable(GL_LIGHTING)
#draw IK goals
for obj in self.ikProblem.objectives:
linkindex = obj.link()
link = self.world.robot(0).link(linkindex)
lp,wpdes = obj.getPosition()
wp = se3.apply(link.getTransform(),lp)
glLineWidth(4.0)
glDisable(GL_LIGHTING)
glColor3f(0,1,0)
glBegin(GL_LINES)
glVertex3f(*wp)
glVertex3f(*wpdes)
glEnd()
glLineWidth(1)
#draw end positions of solved problems
if self.drawDb:
glPointSize(3.0)
glBegin(GL_POINTS)
for k,db in iteritems(self.ikdb.databases):
for i in range(db.numProblems()):
try:
p = db.getProblem(i)
except Exception as e:
traceback.print_exc()
exit(0)
if db.solutions[i] is None:
glColor3f(1,0,0)
else:
glColor3f(0,0,1)
for obj in p.objectives:
lp,wpdes = obj.getPosition()
glVertex3f(*wpdes)
glColor3f(1,1,0)
for pjson,soln in self.ikdb.backburner:
p = IKProblem()
p.fromJson(pjson)
for obj in p.objectives:
lp,wpdes = obj.getPosition()
glVertex3f(*wpdes)
glEnd()
return
def click_world(self,x,y):
"""Helper: returns (obj,pt) where obj is the closest world object
clicked, and pt is the associated clicked point (in world coordinates).
If no point is clicked, returns None."""
#get the viewport ray
if NEW_KLAMPT:
(s,d) = self.view.click_ray(x,y)
else:
(s,d) = self.click_ray(x,y)
#run the collision tests
collided = []
for g in self.collider.geomList:
(hit,pt) = g[1].rayCast(s,d)
if hit:
dist = vectorops.dot(vectorops.sub(pt,s),d)
collided.append((dist,g[0]))
if len(collided)==0:
return None
dist,obj = min(collided,key=lambda x:x[0])
return obj,vectorops.madd(s,d,dist)
def main():
print ("ikdbtest2.py: This example visually shows the learning process")
print ("USAGE: ikdbtest2.py [ROBOT OR WORLD FILE]")
print ("Press h for help.")
import sys
import os
fn = os.path.expanduser("~/Klampt-examples/data/robots/tx90ball.rob")
if len(sys.argv) > 1:
fn = sys.argv[1]
world = WorldModel()
world.readFile(fn)
planningWorld = world.copy()
#for free base robots
qmin,qmax = world.robot(0).getJointLimits()
for i,(a,b) in enumerate(zip(qmin,qmax)):
if not np.isfinite(a):
print ("Setting finite bound on joint",i)
qmin[i] = -math.pi
if not np.isfinite(b):
print ("Setting finite bound on joint",i)
qmax[i] = math.pi
planningWorld.robot(0).setJointLimits(qmin,qmax)
functionfactory.registerDefaultFunctions()
functionfactory.registerCollisionFunction(planningWorld)
functionfactory.registerJointRangeCostFunction(planningWorld.robot(0))
tester = IKDBVisualTester(world,planningWorld)
tester.run()
if __name__ == "__main__":
main()
| __init__ | identifier_name |
ikdbtest_gl.py | #Python 2/3 compatibility
from __future__ import print_function,division,absolute_import
from builtins import input,range
from six import iteritems
from ikdb import *
from ikdb import functionfactory
from klampt import *
import pkg_resources
if pkg_resources.get_distribution('klampt').version >= '0.7':
NEW_KLAMPT = True
from klampt.model import ik
from klampt.io import loader
from klampt.vis.glrobotprogram import *
from klampt.vis.glcommon import *
from klampt import PointPoser,TransformPoser
from klampt.model import collide
from klampt.math import se3
#patch to Klamp't 0.6.X
class GLWidgetProgram(GLPluginProgram):
def __init__(self,world,name):
GLPluginProgram.__init__(self,name)
self.widgetPlugin = GLWidgetPlugin()
self.setPlugin(self.widgetPlugin)
self.widgetMaster = self.widgetPlugin.klamptwidgetmaster
self.world = world
def display(self):
GLPluginProgram.display(self)
self.world.drawGL()
else:
NEW_KLAMPT = False
from klampt import ik,loader
from klampt.glrobotprogram import *
from klampt import PointPoser,TransformPoser
from klampt import robotcollide as collide
from klampt import se3
import sys
import traceback
import numpy as np
#preload
from sklearn.neighbors import NearestNeighbors,BallTree
class IKDBVisualTester(GLWidgetProgram):
def __init__(self,visWorld,planningWorld,name="IK Database visual tester"):
|
def mousefunc(self,button,state,x,y):
#Put your mouse handler here
#the current example prints out the list of objects clicked whenever
#you right click
GLWidgetProgram.mousefunc(self,button,state,x,y)
self.reSolve = False
dragging = False
if NEW_KLAMPT:
dragging = self.widgetPlugin.klamptwidgetdragging
else:
dragging = self.draggingWidget
if not dragging and button == 2 and state==0:
#down
clicked = self.click_world(x,y)
if clicked is not None and isinstance(clicked[0],RobotModelLink):
#make a new widget
link, wpt = clicked
lpt = se3.apply(se3.inv(link.getTransform()),wpt)
self.ikIndices.append(len(self.ikWidgets))
self.ikWidgets.append(PointPoser())
self.ikWidgets[-1].set(wpt)
self.widgetMaster.add(self.ikWidgets[-1])
self.ikProblem.addObjective(ik.objective(link,local=lpt,world=wpt))
GLWidgetProgram.mousefunc(self,button,state,x,y)
self.refresh()
return
#the dx,dy arguments are needed to be cross-compatible between 0.6.x and 0.7
def motionfunc(self,x,y,dx=0,dy=0):
dragging = False
if NEW_KLAMPT:
retval = GLWidgetProgram.motionfunc(self,x,y,dx,dy)
dragging = self.widgetPlugin.klamptwidgetdragging
else:
retval = GLWidgetProgram.motionfunc(self,x,y)
dragging = self.draggingWidget
if dragging:
#update all the IK objectives
for i in range(len(self.ikWidgets)):
index = self.ikIndices[i]
if isinstance(self.ikWidgets[i],PointPoser):
wptnew = self.ikWidgets[i].get()
obj = self.ikProblem.objectives[index]
link = obj.link()
lpt,wptold = obj.getPosition()
obj.setFixedPoint(link,lpt,wptnew)
#don't solve now, wait for refresh to process
if self.continuous and wptnew != wptold:
self.reSolve = True
elif isinstance(self.ikWidgets[i],TransformPoser):
Rnew,tnew = self.ikWidgets[i].get()
obj = self.ikProblem.objectives[index]
link = obj.link()
Rold,told = obj.getTransform()
obj.setFixedTransform(link,Rnew,tnew)
#don't solve now, wait for refresh to process
if self.continuous and (Rnew,tnew) != (Rold,told):
self.reSolve = True
return retval
def keyboardfunc(self,c,x,y):
if c=='h':
print ('HELP:')
print ('[right-click]: add a new IK constraint')
print ('[space]: tests the current configuration')
print ('d: deletes IK constraint')
print ('t: adds a new rotation-fixed IK constraint')
print ('f: flushes the current database to disk')
print ('s: saves the current database to disk')
print ('b: performs one background step')
print ('B: starts / stops the background thread')
print ('v: toggles display of the database')
print ('c: toggles continuous re-solving of IK constraint its as being moved')
print ('o: toggles soft / hard IK constraints')
elif c==' ':
self.planningWorld.robot(0).setConfig(self.currentConfig)
soln = self.ikdb.solve(self.ikProblem)
if soln:
print ("Solved")
self.currentConfig = soln
self.refresh()
else:
print ("Failure")
elif c=='d':
for i,w in enumerate(self.ikWidgets):
if w.hasHighlight():
print ("Deleting IK widget")
#delete it
index = self.ikIndices[i]
self.widgetMaster.remove(w)
del self.ikWidgets[i]
del self.ikIndices[i]
del self.ikProblem.objectives[index]
for j in range(len(self.ikIndices)):
self.ikIndices[j] = j
self.refresh()
break
elif c=='t':
clicked = self.click_world(x,y)
if clicked is not None and isinstance(clicked[0],RobotModelLink):
#make a new widget
link, wpt = clicked
Tlink = link.getTransform()
self.ikIndices.append(len(self.ikWidgets))
self.ikWidgets.append(TransformPoser())
self.ikWidgets[-1].set(*Tlink)
self.widgetMaster.add(self.ikWidgets[-1])
self.ikProblem.addObjective(ik.objective(link,R=Tlink[0],t=Tlink[1]))
self.refresh()
elif c=='f':
self.ikdb.flush()
elif c=='s':
self.ikdb.save()
elif c=='b':
self.ikdb.backgroundStep()
self.refresh()
elif c=='B':
if hasattr(self.ikdb,'thread'):
self.ikdb.stopBackgroundLoop()
else:
self.ikdb.startBackgroundLoop(0)
elif c=='v':
self.drawDb = not self.drawDb
elif c=='c':
self.continuous = not self.continuous
elif c == 'o':
self.ikProblem.setSoftObjectives(not self.ikProblem.softObjectives)
def display(self):
if self.reSolve:
self.planningWorld.robot(0).setConfig(self.currentConfig)
soln = self.ikdb.solve(self.ikProblem)
if soln:
self.currentConfig = soln
self.reSolve = False
self.world.robot(0).setConfig(self.currentConfig)
GLWidgetProgram.display(self)
glDisable(GL_LIGHTING)
#draw IK goals
for obj in self.ikProblem.objectives:
linkindex = obj.link()
link = self.world.robot(0).link(linkindex)
lp,wpdes = obj.getPosition()
wp = se3.apply(link.getTransform(),lp)
glLineWidth(4.0)
glDisable(GL_LIGHTING)
glColor3f(0,1,0)
glBegin(GL_LINES)
glVertex3f(*wp)
glVertex3f(*wpdes)
glEnd()
glLineWidth(1)
#draw end positions of solved problems
if self.drawDb:
glPointSize(3.0)
glBegin(GL_POINTS)
for k,db in iteritems(self.ikdb.databases):
for i in range(db.numProblems()):
try:
p = db.getProblem(i)
except Exception as e:
traceback.print_exc()
exit(0)
if db.solutions[i] is None:
glColor3f(1,0,0)
else:
glColor3f(0,0,1)
for obj in p.objectives:
lp,wpdes = obj.getPosition()
glVertex3f(*wpdes)
glColor3f(1,1,0)
for pjson,soln in self.ikdb.backburner:
p = IKProblem()
p.fromJson(pjson)
for obj in p.objectives:
lp,wpdes = obj.getPosition()
glVertex3f(*wpdes)
glEnd()
return
def click_world(self,x,y):
"""Helper: returns (obj,pt) where obj is the closest world object
clicked, and pt is the associated clicked point (in world coordinates).
If no point is clicked, returns None."""
#get the viewport ray
if NEW_KLAMPT:
(s,d) = self.view.click_ray(x,y)
else:
(s,d) = self.click_ray(x,y)
#run the collision tests
collided = []
for g in self.collider.geomList:
(hit,pt) = g[1].rayCast(s,d)
if hit:
dist = vectorops.dot(vectorops.sub(pt,s),d)
collided.append((dist,g[0]))
if len(collided)==0:
return None
dist,obj = min(collided,key=lambda x:x[0])
return obj,vectorops.madd(s,d,dist)
def main():
print ("ikdbtest2.py: This example visually shows the learning process")
print ("USAGE: ikdbtest2.py [ROBOT OR WORLD FILE]")
print ("Press h for help.")
import sys
import os
fn = os.path.expanduser("~/Klampt-examples/data/robots/tx90ball.rob")
if len(sys.argv) > 1:
fn = sys.argv[1]
world = WorldModel()
world.readFile(fn)
planningWorld = world.copy()
#for free base robots
qmin,qmax = world.robot(0).getJointLimits()
for i,(a,b) in enumerate(zip(qmin,qmax)):
if not np.isfinite(a):
print ("Setting finite bound on joint",i)
qmin[i] = -math.pi
if not np.isfinite(b):
print ("Setting finite bound on joint",i)
qmax[i] = math.pi
planningWorld.robot(0).setJointLimits(qmin,qmax)
functionfactory.registerDefaultFunctions()
functionfactory.registerCollisionFunction(planningWorld)
functionfactory.registerJointRangeCostFunction(planningWorld.robot(0))
tester = IKDBVisualTester(world,planningWorld)
tester.run()
if __name__ == "__main__":
main()
| GLWidgetProgram.__init__(self,visWorld,name)
self.planningWorld = planningWorld
self.collider = collide.WorldCollider(visWorld)
self.ikdb = ManagedIKDatabase(planningWorld.robot(0))
self.ikWidgets = []
self.ikIndices = []
self.ikProblem = IKProblem()
self.ikProblem.setFeasibilityTest('collisionFree',None)
qmin,qmax = planningWorld.robot(0).getJointLimits()
self.ikProblem.setCostFunction('jointRangeCost_dynamic',[qmin,qmax])
self.drawDb = False
self.continuous = False
self.reSolve = False
self.currentConfig = self.world.robot(0).getConfig() | identifier_body |
index.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the infrastructure to create an
// (identifier) index for a set of Go files.
//
// Basic indexing algorithm:
// - traverse all .go files of the file tree specified by root
// - for each word (identifier) encountered, collect all occurences (spots)
// into a list; this produces a list of spots for each word
// - reduce the lists: from a list of spots to a list of FileRuns,
// and from a list of FileRuns into a list of PakRuns
// - make a HitList from the PakRuns
//
// Details:
// - keep two lists per word: one containing package-level declarations
// that have snippets, and one containing all other spots
// - keep the snippets in a separate table indexed by snippet index
// and store the snippet index in place of the line number in a SpotInfo
// (the line number for spots with snippets is stored in the snippet)
// - at the end, create lists of alternative spellings for a given
// word
package main
import (
"container/vector";
"go/ast";
"go/parser";
"go/token";
"go/scanner";
"os";
pathutil "path";
"sort";
"strings";
)
// ----------------------------------------------------------------------------
// RunList
// A RunList is a vector of entries that can be sorted according to some
// criteria. A RunList may be compressed by grouping "runs" of entries
// which are equal (according to the sort critera) into a new RunList of
// runs. For instance, a RunList containing pairs (x, y) may be compressed
// into a RunList containing pair runs (x, {y}) where each run consists of
// a list of y's with the same x.
type RunList struct {
vector.Vector;
less func(x, y interface{}) bool;
}
func (h *RunList) Less(i, j int) bool { return h.less(h.At(i), h.At(j)) }
func (h *RunList) sort(less func(x, y interface{}) bool) {
h.less = less;
sort.Sort(h);
}
// Compress entries which are the same according to a sort criteria
// (specified by less) into "runs".
func (h *RunList) reduce(less func(x, y interface{}) bool, newRun func(h *RunList, i, j int) interface{}) *RunList {
// create runs of entries with equal values
h.sort(less);
// for each run, make a new run object and collect them in a new RunList
var hh RunList;
i := 0;
for j := 0; j < h.Len(); j++ {
if less(h.At(i), h.At(j)) {
hh.Push(newRun(h, i, j));
i = j; // start a new run
}
}
// add final run, if any
if i < h.Len() {
hh.Push(newRun(h, i, h.Len()))
}
return &hh;
}
// ----------------------------------------------------------------------------
// SpotInfo
// A SpotInfo value describes a particular identifier spot in a given file;
// It encodes three values: the SpotKind (declaration or use), a line or
// snippet index "lori", and whether it's a line or index.
//
// The following encoding is used:
//
// bits 32 4 1 0
// value [lori|kind|isIndex]
//
type SpotInfo uint32
// SpotKind describes whether an identifier is declared (and what kind of
// declaration) or used.
type SpotKind uint32
const (
PackageClause SpotKind = iota;
ImportDecl;
ConstDecl;
TypeDecl;
VarDecl;
FuncDecl;
MethodDecl;
Use;
nKinds;
)
func init() {
// sanity check: if nKinds is too large, the SpotInfo
// accessor functions may need to be updated
if nKinds > 8 {
panic()
}
}
// makeSpotInfo makes a SpotInfo.
func makeSpotInfo(kind SpotKind, lori int, isIndex bool) SpotInfo {
// encode lori: bits [4..32)
x := SpotInfo(lori) << 4;
if int(x>>4) != lori {
// lori value doesn't fit - since snippet indices are
// most certainly always smaller then 1<<28, this can
// only happen for line numbers; give it no line number (= 0)
x = 0
}
// encode kind: bits [1..4)
x |= SpotInfo(kind) << 1;
// encode isIndex: bit 0
if isIndex {
x |= 1
}
return x;
}
func (x SpotInfo) Kind() SpotKind { return SpotKind(x >> 1 & 7) }
func (x SpotInfo) Lori() int { return int(x >> 4) }
func (x SpotInfo) IsIndex() bool { return x&1 != 0 }
// ----------------------------------------------------------------------------
// KindRun
// Debugging support. Disable to see multiple entries per line.
const removeDuplicates = true
// A KindRun is a run of SpotInfos of the same kind in a given file.
type KindRun struct {
Kind SpotKind;
Infos []SpotInfo;
}
// KindRuns are sorted by line number or index. Since the isIndex bit
// is always the same for all infos in one list we can compare lori's.
func (f *KindRun) Len() int { return len(f.Infos) }
func (f *KindRun) Less(i, j int) bool { return f.Infos[i].Lori() < f.Infos[j].Lori() }
func (f *KindRun) Swap(i, j int) { f.Infos[i], f.Infos[j] = f.Infos[j], f.Infos[i] }
// FileRun contents are sorted by Kind for the reduction into KindRuns.
func lessKind(x, y interface{}) bool { return x.(SpotInfo).Kind() < y.(SpotInfo).Kind() }
// newKindRun allocates a new KindRun from the SpotInfo run [i, j) in h.
func newKindRun(h *RunList, i, j int) interface{} {
kind := h.At(i).(SpotInfo).Kind();
infos := make([]SpotInfo, j-i);
k := 0;
for ; i < j; i++ {
infos[k] = h.At(i).(SpotInfo);
k++;
}
run := &KindRun{kind, infos};
// Spots were sorted by file and kind to create this run.
// Within this run, sort them by line number or index.
sort.Sort(run);
if removeDuplicates {
// Since both the lori and kind field must be
// same for duplicates, and since the isIndex
// bit is always the same for all infos in one
// list we can simply compare the entire info.
k := 0;
var prev SpotInfo;
for i, x := range infos {
if x != prev || i == 0 {
infos[k] = x;
k++;
prev = x;
}
}
run.Infos = infos[0:k];
}
return run;
}
// ----------------------------------------------------------------------------
// FileRun
// A Pak describes a Go package.
type Pak struct {
Path string; // path of directory containing the package
Name string; // package name as declared by package clause
}
// Paks are sorted by name (primary key) and by import path (secondary key).
func (p *Pak) less(q *Pak) bool {
return p.Name < q.Name || p.Name == q.Name && p.Path < q.Path
}
// A File describes a Go file.
type File struct {
Path string; // complete file name
Pak Pak; // the package to which the file belongs
}
// A Spot describes a single occurence of a word.
type Spot struct {
File *File;
Info SpotInfo;
}
// A FileRun is a list of KindRuns belonging to the same file.
type FileRun struct {
File *File;
Groups []*KindRun;
}
// Spots are sorted by path for the reduction into FileRuns.
func lessSpot(x, y interface{}) bool { return x.(Spot).File.Path < y.(Spot).File.Path }
// newFileRun allocates a new FileRun from the Spot run [i, j) in h.
func newFileRun(h0 *RunList, i, j int) interface{} {
file := h0.At(i).(Spot).File;
// reduce the list of Spots into a list of KindRuns
var h1 RunList;
h1.Vector.Init(j - i);
k := 0;
for ; i < j; i++ {
h1.Set(k, h0.At(i).(Spot).Info);
k++;
}
h2 := h1.reduce(lessKind, newKindRun);
// create the FileRun
groups := make([]*KindRun, h2.Len());
for i := 0; i < h2.Len(); i++ {
groups[i] = h2.At(i).(*KindRun)
}
return &FileRun{file, groups};
}
// ----------------------------------------------------------------------------
// PakRun
// A PakRun describes a run of *FileRuns of a package.
type PakRun struct {
Pak Pak;
Files []*FileRun;
}
// Sorting support for files within a PakRun.
func (p *PakRun) Len() int { return len(p.Files) }
func (p *PakRun) Less(i, j int) bool { return p.Files[i].File.Path < p.Files[j].File.Path }
func (p *PakRun) Swap(i, j int) { p.Files[i], p.Files[j] = p.Files[j], p.Files[i] }
// FileRuns are sorted by package for the reduction into PakRuns.
func lessFileRun(x, y interface{}) bool {
return x.(*FileRun).File.Pak.less(&y.(*FileRun).File.Pak)
}
// newPakRun allocates a new PakRun from the *FileRun run [i, j) in h.
func newPakRun(h *RunList, i, j int) interface{} {
pak := h.At(i).(*FileRun).File.Pak;
files := make([]*FileRun, j-i);
k := 0;
for ; i < j; i++ |
run := &PakRun{pak, files};
sort.Sort(run); // files were sorted by package; sort them by file now
return run;
}
// ----------------------------------------------------------------------------
// HitList
// A HitList describes a list of PakRuns.
type HitList []*PakRun
// PakRuns are sorted by package.
func lessPakRun(x, y interface{}) bool { return x.(*PakRun).Pak.less(&y.(*PakRun).Pak) }
func reduce(h0 *RunList) HitList {
// reduce a list of Spots into a list of FileRuns
h1 := h0.reduce(lessSpot, newFileRun);
// reduce a list of FileRuns into a list of PakRuns
h2 := h1.reduce(lessFileRun, newPakRun);
// sort the list of PakRuns by package
h2.sort(lessPakRun);
// create a HitList
h := make(HitList, h2.Len());
for i := 0; i < h2.Len(); i++ {
h[i] = h2.At(i).(*PakRun)
}
return h;
}
func (h HitList) filter(pakname string) HitList {
// determine number of matching packages (most of the time just one)
n := 0;
for _, p := range h {
if p.Pak.Name == pakname {
n++
}
}
// create filtered HitList
hh := make(HitList, n);
i := 0;
for _, p := range h {
if p.Pak.Name == pakname {
hh[i] = p;
i++;
}
}
return hh;
}
// ----------------------------------------------------------------------------
// AltWords
type wordPair struct {
canon string; // canonical word spelling (all lowercase)
alt string; // alternative spelling
}
// An AltWords describes a list of alternative spellings for a
// canonical (all lowercase) spelling of a word.
type AltWords struct {
Canon string; // canonical word spelling (all lowercase)
Alts []string; // alternative spelling for the same word
}
// wordPairs are sorted by their canonical spelling.
func lessWordPair(x, y interface{}) bool { return x.(*wordPair).canon < y.(*wordPair).canon }
// newAltWords allocates a new AltWords from the *wordPair run [i, j) in h.
func newAltWords(h *RunList, i, j int) interface{} {
canon := h.At(i).(*wordPair).canon;
alts := make([]string, j-i);
k := 0;
for ; i < j; i++ {
alts[k] = h.At(i).(*wordPair).alt;
k++;
}
return &AltWords{canon, alts};
}
func (a *AltWords) filter(s string) *AltWords {
if len(a.Alts) == 1 && a.Alts[0] == s {
// there are no different alternatives
return nil
}
// make a new AltWords with the current spelling removed
alts := make([]string, len(a.Alts));
i := 0;
for _, w := range a.Alts {
if w != s {
alts[i] = w;
i++;
}
}
return &AltWords{a.Canon, alts[0:i]};
}
// ----------------------------------------------------------------------------
// Indexer
// Adjust these flags as seems best.
const excludeMainPackages = false
const excludeTestFiles = false
type IndexResult struct {
Decls RunList; // package-level declarations (with snippets)
Others RunList; // all other occurences
}
// An Indexer maintains the data structures and provides the machinery
// for indexing .go files under a file tree. It implements the path.Visitor
// interface for walking file trees, and the ast.Visitor interface for
// walking Go ASTs.
type Indexer struct {
words map[string]*IndexResult; // RunLists of Spots
snippets vector.Vector; // vector of *Snippets, indexed by snippet indices
file *File; // current file
decl ast.Decl; // current decl
nspots int; // number of spots encountered
}
func (x *Indexer) addSnippet(s *Snippet) int {
index := x.snippets.Len();
x.snippets.Push(s);
return index;
}
func (x *Indexer) visitComment(c *ast.CommentGroup) {
if c != nil {
ast.Walk(x, c)
}
}
func (x *Indexer) visitIdent(kind SpotKind, id *ast.Ident) {
if id != nil {
lists, found := x.words[id.Value];
if !found {
lists = new(IndexResult);
x.words[id.Value] = lists;
}
if kind == Use || x.decl == nil {
// not a declaration or no snippet required
info := makeSpotInfo(kind, id.Pos().Line, false);
lists.Others.Push(Spot{x.file, info});
} else {
// a declaration with snippet
index := x.addSnippet(NewSnippet(x.decl, id));
info := makeSpotInfo(kind, index, true);
lists.Decls.Push(Spot{x.file, info});
}
x.nspots++;
}
}
func (x *Indexer) visitSpec(spec ast.Spec, isVarDecl bool) {
switch n := spec.(type) {
case *ast.ImportSpec:
x.visitComment(n.Doc);
x.visitIdent(ImportDecl, n.Name);
for _, s := range n.Path {
ast.Walk(x, s)
}
x.visitComment(n.Comment);
case *ast.ValueSpec:
x.visitComment(n.Doc);
kind := ConstDecl;
if isVarDecl {
kind = VarDecl
}
for _, n := range n.Names {
x.visitIdent(kind, n)
}
ast.Walk(x, n.Type);
for _, v := range n.Values {
ast.Walk(x, v)
}
x.visitComment(n.Comment);
case *ast.TypeSpec:
x.visitComment(n.Doc);
x.visitIdent(TypeDecl, n.Name);
ast.Walk(x, n.Type);
x.visitComment(n.Comment);
}
}
func (x *Indexer) Visit(node interface{}) bool {
// TODO(gri): methods in interface types are categorized as VarDecl
switch n := node.(type) {
case *ast.Ident:
x.visitIdent(Use, n)
case *ast.Field:
x.decl = nil; // no snippets for fields
x.visitComment(n.Doc);
for _, m := range n.Names {
x.visitIdent(VarDecl, m)
}
ast.Walk(x, n.Type);
for _, s := range n.Tag {
ast.Walk(x, s)
}
x.visitComment(n.Comment);
case *ast.DeclStmt:
if decl, ok := n.Decl.(*ast.GenDecl); ok {
// local declarations can only be *ast.GenDecls
x.decl = nil; // no snippets for local declarations
x.visitComment(decl.Doc);
for _, s := range decl.Specs {
x.visitSpec(s, decl.Tok == token.VAR)
}
} else {
// handle error case gracefully
ast.Walk(x, n.Decl)
}
case *ast.GenDecl:
x.decl = n;
x.visitComment(n.Doc);
for _, s := range n.Specs {
x.visitSpec(s, n.Tok == token.VAR)
}
case *ast.FuncDecl:
x.visitComment(n.Doc);
kind := FuncDecl;
if n.Recv != nil {
kind = MethodDecl;
ast.Walk(x, n.Recv);
}
x.decl = n;
x.visitIdent(kind, n.Name);
ast.Walk(x, n.Type);
if n.Body != nil {
ast.Walk(x, n.Body)
}
case *ast.File:
x.visitComment(n.Doc);
x.decl = nil;
x.visitIdent(PackageClause, n.Name);
for _, d := range n.Decls {
ast.Walk(x, d)
}
// don't visit package level comments for now
// to avoid duplicate visiting from individual
// nodes
default:
return true
}
return false;
}
func (x *Indexer) VisitDir(path string, d *os.Dir) bool {
return true
}
func (x *Indexer) VisitFile(path string, d *os.Dir) {
if !isGoFile(d) {
return
}
if excludeTestFiles && (!isPkgFile(d) || strings.HasPrefix(path, "test/")) {
return
}
if excludeMainPackages && pkgName(path) == "main" {
return
}
file, err := parser.ParseFile(path, nil, parser.ParseComments);
if err != nil {
return // ignore files with (parse) errors
}
dir, _ := pathutil.Split(path);
pak := Pak{dir, file.Name.Value};
x.file = &File{path, pak};
ast.Walk(x, file);
}
// ----------------------------------------------------------------------------
// Index
type LookupResult struct {
Decls HitList; // package-level declarations (with snippets)
Others HitList; // all other occurences
}
type Index struct {
words map[string]*LookupResult; // maps words to hit lists
alts map[string]*AltWords; // maps canonical(words) to lists of alternative spellings
snippets []*Snippet; // all snippets, indexed by snippet index
nspots int; // number of spots indexed (a measure of the index size)
}
func canonical(w string) string { return strings.ToLower(w) }
// NewIndex creates a new index for the file tree rooted at root.
func NewIndex(root string) *Index {
var x Indexer;
// initialize Indexer
x.words = make(map[string]*IndexResult);
// collect all Spots
pathutil.Walk(root, &x, nil);
// for each word, reduce the RunLists into a LookupResult;
// also collect the word with its canonical spelling in a
// word list for later computation of alternative spellings
words := make(map[string]*LookupResult);
var wlist RunList;
for w, h := range x.words {
decls := reduce(&h.Decls);
others := reduce(&h.Others);
words[w] = &LookupResult{
Decls: decls,
Others: others,
};
wlist.Push(&wordPair{canonical(w), w});
}
// reduce the word list {canonical(w), w} into
// a list of AltWords runs {canonical(w), {w}}
alist := wlist.reduce(lessWordPair, newAltWords);
// convert alist into a map of alternative spellings
alts := make(map[string]*AltWords);
for i := 0; i < alist.Len(); i++ {
a := alist.At(i).(*AltWords);
alts[a.Canon] = a;
}
// convert snippet vector into a list
snippets := make([]*Snippet, x.snippets.Len());
for i := 0; i < x.snippets.Len(); i++ {
snippets[i] = x.snippets.At(i).(*Snippet)
}
return &Index{words, alts, snippets, x.nspots};
}
// Size returns the number of different words and
// spots indexed as a measure for the index size.
func (x *Index) Size() (nwords int, nspots int) {
return len(x.words), x.nspots
}
func (x *Index) LookupWord(w string) (match *LookupResult, alt *AltWords) {
match, _ = x.words[w];
alt, _ = x.alts[canonical(w)];
// remove current spelling from alternatives
// (if there is no match, the alternatives do
// not contain the current spelling)
if match != nil && alt != nil {
alt = alt.filter(w)
}
return;
}
func isIdentifier(s string) bool {
var S scanner.Scanner;
S.Init("", strings.Bytes(s), nil, 0);
if _, tok, _ := S.Scan(); tok == token.IDENT {
_, tok, _ := S.Scan();
return tok == token.EOF;
}
return false;
}
// For a given query, which is either a single identifier or a qualified
// identifier, Lookup returns a LookupResult, and a list of alternative
// spellings, if any. If the query syntax is wrong, illegal is set.
func (x *Index) Lookup(query string) (match *LookupResult, alt *AltWords, illegal bool) {
ss := strings.Split(query, ".", 0);
// check query syntax
for _, s := range ss {
if !isIdentifier(s) {
illegal = true;
return;
}
}
switch len(ss) {
case 1:
match, alt = x.LookupWord(ss[0])
case 2:
pakname := ss[0];
match, alt = x.LookupWord(ss[1]);
if match != nil {
// found a match - filter by package name
decls := match.Decls.filter(pakname);
others := match.Others.filter(pakname);
match = &LookupResult{decls, others};
}
default:
illegal = true
}
return;
}
func (x *Index) Snippet(i int) *Snippet {
// handle illegal snippet indices gracefully
if 0 <= i && i < len(x.snippets) {
return x.snippets[i]
}
return nil;
}
| {
files[k] = h.At(i).(*FileRun);
k++;
} | conditional_block |
index.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the infrastructure to create an
// (identifier) index for a set of Go files.
//
// Basic indexing algorithm:
// - traverse all .go files of the file tree specified by root
// - for each word (identifier) encountered, collect all occurences (spots)
// into a list; this produces a list of spots for each word
// - reduce the lists: from a list of spots to a list of FileRuns,
// and from a list of FileRuns into a list of PakRuns
// - make a HitList from the PakRuns
//
// Details:
// - keep two lists per word: one containing package-level declarations
// that have snippets, and one containing all other spots
// - keep the snippets in a separate table indexed by snippet index
// and store the snippet index in place of the line number in a SpotInfo
// (the line number for spots with snippets is stored in the snippet)
// - at the end, create lists of alternative spellings for a given
// word
package main
import (
"container/vector";
"go/ast";
"go/parser";
"go/token";
"go/scanner";
"os";
pathutil "path";
"sort";
"strings";
)
// ----------------------------------------------------------------------------
// RunList
// A RunList is a vector of entries that can be sorted according to some
// criteria. A RunList may be compressed by grouping "runs" of entries
// which are equal (according to the sort critera) into a new RunList of
// runs. For instance, a RunList containing pairs (x, y) may be compressed
// into a RunList containing pair runs (x, {y}) where each run consists of
// a list of y's with the same x.
type RunList struct {
vector.Vector;
less func(x, y interface{}) bool;
}
func (h *RunList) Less(i, j int) bool { return h.less(h.At(i), h.At(j)) }
func (h *RunList) sort(less func(x, y interface{}) bool) {
h.less = less;
sort.Sort(h);
}
// Compress entries which are the same according to a sort criteria
// (specified by less) into "runs".
func (h *RunList) reduce(less func(x, y interface{}) bool, newRun func(h *RunList, i, j int) interface{}) *RunList {
// create runs of entries with equal values
h.sort(less);
// for each run, make a new run object and collect them in a new RunList
var hh RunList;
i := 0;
for j := 0; j < h.Len(); j++ {
if less(h.At(i), h.At(j)) {
hh.Push(newRun(h, i, j));
i = j; // start a new run
}
}
// add final run, if any
if i < h.Len() {
hh.Push(newRun(h, i, h.Len()))
}
return &hh;
}
// ----------------------------------------------------------------------------
// SpotInfo
// A SpotInfo value describes a particular identifier spot in a given file;
// It encodes three values: the SpotKind (declaration or use), a line or
// snippet index "lori", and whether it's a line or index.
//
// The following encoding is used:
//
// bits 32 4 1 0
// value [lori|kind|isIndex]
//
type SpotInfo uint32
// SpotKind describes whether an identifier is declared (and what kind of
// declaration) or used.
type SpotKind uint32
const (
PackageClause SpotKind = iota;
ImportDecl;
ConstDecl;
TypeDecl;
VarDecl;
FuncDecl;
MethodDecl;
Use;
nKinds;
)
func init() {
// sanity check: if nKinds is too large, the SpotInfo
// accessor functions may need to be updated
if nKinds > 8 {
panic()
}
}
// makeSpotInfo makes a SpotInfo.
func makeSpotInfo(kind SpotKind, lori int, isIndex bool) SpotInfo {
// encode lori: bits [4..32)
x := SpotInfo(lori) << 4;
if int(x>>4) != lori {
// lori value doesn't fit - since snippet indices are
// most certainly always smaller then 1<<28, this can
// only happen for line numbers; give it no line number (= 0)
x = 0
}
// encode kind: bits [1..4)
x |= SpotInfo(kind) << 1;
// encode isIndex: bit 0
if isIndex {
x |= 1
}
return x;
}
func (x SpotInfo) Kind() SpotKind |
func (x SpotInfo) Lori() int { return int(x >> 4) }
func (x SpotInfo) IsIndex() bool { return x&1 != 0 }
// ----------------------------------------------------------------------------
// KindRun
// Debugging support. Disable to see multiple entries per line.
const removeDuplicates = true
// A KindRun is a run of SpotInfos of the same kind in a given file.
type KindRun struct {
Kind SpotKind;
Infos []SpotInfo;
}
// KindRuns are sorted by line number or index. Since the isIndex bit
// is always the same for all infos in one list we can compare lori's.
func (f *KindRun) Len() int { return len(f.Infos) }
func (f *KindRun) Less(i, j int) bool { return f.Infos[i].Lori() < f.Infos[j].Lori() }
func (f *KindRun) Swap(i, j int) { f.Infos[i], f.Infos[j] = f.Infos[j], f.Infos[i] }
// FileRun contents are sorted by Kind for the reduction into KindRuns.
func lessKind(x, y interface{}) bool { return x.(SpotInfo).Kind() < y.(SpotInfo).Kind() }
// newKindRun allocates a new KindRun from the SpotInfo run [i, j) in h.
func newKindRun(h *RunList, i, j int) interface{} {
kind := h.At(i).(SpotInfo).Kind();
infos := make([]SpotInfo, j-i);
k := 0;
for ; i < j; i++ {
infos[k] = h.At(i).(SpotInfo);
k++;
}
run := &KindRun{kind, infos};
// Spots were sorted by file and kind to create this run.
// Within this run, sort them by line number or index.
sort.Sort(run);
if removeDuplicates {
// Since both the lori and kind field must be
// same for duplicates, and since the isIndex
// bit is always the same for all infos in one
// list we can simply compare the entire info.
k := 0;
var prev SpotInfo;
for i, x := range infos {
if x != prev || i == 0 {
infos[k] = x;
k++;
prev = x;
}
}
run.Infos = infos[0:k];
}
return run;
}
// ----------------------------------------------------------------------------
// FileRun
// A Pak describes a Go package.
type Pak struct {
Path string; // path of directory containing the package
Name string; // package name as declared by package clause
}
// Paks are sorted by name (primary key) and by import path (secondary key).
func (p *Pak) less(q *Pak) bool {
return p.Name < q.Name || p.Name == q.Name && p.Path < q.Path
}
// A File describes a Go file.
type File struct {
Path string; // complete file name
Pak Pak; // the package to which the file belongs
}
// A Spot describes a single occurence of a word.
type Spot struct {
File *File;
Info SpotInfo;
}
// A FileRun is a list of KindRuns belonging to the same file.
type FileRun struct {
File *File;
Groups []*KindRun;
}
// Spots are sorted by path for the reduction into FileRuns.
func lessSpot(x, y interface{}) bool { return x.(Spot).File.Path < y.(Spot).File.Path }
// newFileRun allocates a new FileRun from the Spot run [i, j) in h.
func newFileRun(h0 *RunList, i, j int) interface{} {
file := h0.At(i).(Spot).File;
// reduce the list of Spots into a list of KindRuns
var h1 RunList;
h1.Vector.Init(j - i);
k := 0;
for ; i < j; i++ {
h1.Set(k, h0.At(i).(Spot).Info);
k++;
}
h2 := h1.reduce(lessKind, newKindRun);
// create the FileRun
groups := make([]*KindRun, h2.Len());
for i := 0; i < h2.Len(); i++ {
groups[i] = h2.At(i).(*KindRun)
}
return &FileRun{file, groups};
}
// ----------------------------------------------------------------------------
// PakRun
// A PakRun describes a run of *FileRuns of a package.
type PakRun struct {
Pak Pak;
Files []*FileRun;
}
// Sorting support for files within a PakRun.
func (p *PakRun) Len() int { return len(p.Files) }
func (p *PakRun) Less(i, j int) bool { return p.Files[i].File.Path < p.Files[j].File.Path }
func (p *PakRun) Swap(i, j int) { p.Files[i], p.Files[j] = p.Files[j], p.Files[i] }
// FileRuns are sorted by package for the reduction into PakRuns.
func lessFileRun(x, y interface{}) bool {
return x.(*FileRun).File.Pak.less(&y.(*FileRun).File.Pak)
}
// newPakRun allocates a new PakRun from the *FileRun run [i, j) in h.
func newPakRun(h *RunList, i, j int) interface{} {
pak := h.At(i).(*FileRun).File.Pak;
files := make([]*FileRun, j-i);
k := 0;
for ; i < j; i++ {
files[k] = h.At(i).(*FileRun);
k++;
}
run := &PakRun{pak, files};
sort.Sort(run); // files were sorted by package; sort them by file now
return run;
}
// ----------------------------------------------------------------------------
// HitList
// A HitList describes a list of PakRuns.
type HitList []*PakRun
// PakRuns are sorted by package.
func lessPakRun(x, y interface{}) bool { return x.(*PakRun).Pak.less(&y.(*PakRun).Pak) }
func reduce(h0 *RunList) HitList {
// reduce a list of Spots into a list of FileRuns
h1 := h0.reduce(lessSpot, newFileRun);
// reduce a list of FileRuns into a list of PakRuns
h2 := h1.reduce(lessFileRun, newPakRun);
// sort the list of PakRuns by package
h2.sort(lessPakRun);
// create a HitList
h := make(HitList, h2.Len());
for i := 0; i < h2.Len(); i++ {
h[i] = h2.At(i).(*PakRun)
}
return h;
}
func (h HitList) filter(pakname string) HitList {
// determine number of matching packages (most of the time just one)
n := 0;
for _, p := range h {
if p.Pak.Name == pakname {
n++
}
}
// create filtered HitList
hh := make(HitList, n);
i := 0;
for _, p := range h {
if p.Pak.Name == pakname {
hh[i] = p;
i++;
}
}
return hh;
}
// ----------------------------------------------------------------------------
// AltWords
type wordPair struct {
canon string; // canonical word spelling (all lowercase)
alt string; // alternative spelling
}
// An AltWords describes a list of alternative spellings for a
// canonical (all lowercase) spelling of a word.
type AltWords struct {
Canon string; // canonical word spelling (all lowercase)
Alts []string; // alternative spelling for the same word
}
// wordPairs are sorted by their canonical spelling.
func lessWordPair(x, y interface{}) bool { return x.(*wordPair).canon < y.(*wordPair).canon }
// newAltWords allocates a new AltWords from the *wordPair run [i, j) in h.
func newAltWords(h *RunList, i, j int) interface{} {
canon := h.At(i).(*wordPair).canon;
alts := make([]string, j-i);
k := 0;
for ; i < j; i++ {
alts[k] = h.At(i).(*wordPair).alt;
k++;
}
return &AltWords{canon, alts};
}
func (a *AltWords) filter(s string) *AltWords {
if len(a.Alts) == 1 && a.Alts[0] == s {
// there are no different alternatives
return nil
}
// make a new AltWords with the current spelling removed
alts := make([]string, len(a.Alts));
i := 0;
for _, w := range a.Alts {
if w != s {
alts[i] = w;
i++;
}
}
return &AltWords{a.Canon, alts[0:i]};
}
// ----------------------------------------------------------------------------
// Indexer
// Adjust these flags as seems best.
const excludeMainPackages = false
const excludeTestFiles = false
type IndexResult struct {
Decls RunList; // package-level declarations (with snippets)
Others RunList; // all other occurences
}
// An Indexer maintains the data structures and provides the machinery
// for indexing .go files under a file tree. It implements the path.Visitor
// interface for walking file trees, and the ast.Visitor interface for
// walking Go ASTs.
type Indexer struct {
words map[string]*IndexResult; // RunLists of Spots
snippets vector.Vector; // vector of *Snippets, indexed by snippet indices
file *File; // current file
decl ast.Decl; // current decl
nspots int; // number of spots encountered
}
func (x *Indexer) addSnippet(s *Snippet) int {
index := x.snippets.Len();
x.snippets.Push(s);
return index;
}
func (x *Indexer) visitComment(c *ast.CommentGroup) {
if c != nil {
ast.Walk(x, c)
}
}
func (x *Indexer) visitIdent(kind SpotKind, id *ast.Ident) {
if id != nil {
lists, found := x.words[id.Value];
if !found {
lists = new(IndexResult);
x.words[id.Value] = lists;
}
if kind == Use || x.decl == nil {
// not a declaration or no snippet required
info := makeSpotInfo(kind, id.Pos().Line, false);
lists.Others.Push(Spot{x.file, info});
} else {
// a declaration with snippet
index := x.addSnippet(NewSnippet(x.decl, id));
info := makeSpotInfo(kind, index, true);
lists.Decls.Push(Spot{x.file, info});
}
x.nspots++;
}
}
func (x *Indexer) visitSpec(spec ast.Spec, isVarDecl bool) {
switch n := spec.(type) {
case *ast.ImportSpec:
x.visitComment(n.Doc);
x.visitIdent(ImportDecl, n.Name);
for _, s := range n.Path {
ast.Walk(x, s)
}
x.visitComment(n.Comment);
case *ast.ValueSpec:
x.visitComment(n.Doc);
kind := ConstDecl;
if isVarDecl {
kind = VarDecl
}
for _, n := range n.Names {
x.visitIdent(kind, n)
}
ast.Walk(x, n.Type);
for _, v := range n.Values {
ast.Walk(x, v)
}
x.visitComment(n.Comment);
case *ast.TypeSpec:
x.visitComment(n.Doc);
x.visitIdent(TypeDecl, n.Name);
ast.Walk(x, n.Type);
x.visitComment(n.Comment);
}
}
func (x *Indexer) Visit(node interface{}) bool {
// TODO(gri): methods in interface types are categorized as VarDecl
switch n := node.(type) {
case *ast.Ident:
x.visitIdent(Use, n)
case *ast.Field:
x.decl = nil; // no snippets for fields
x.visitComment(n.Doc);
for _, m := range n.Names {
x.visitIdent(VarDecl, m)
}
ast.Walk(x, n.Type);
for _, s := range n.Tag {
ast.Walk(x, s)
}
x.visitComment(n.Comment);
case *ast.DeclStmt:
if decl, ok := n.Decl.(*ast.GenDecl); ok {
// local declarations can only be *ast.GenDecls
x.decl = nil; // no snippets for local declarations
x.visitComment(decl.Doc);
for _, s := range decl.Specs {
x.visitSpec(s, decl.Tok == token.VAR)
}
} else {
// handle error case gracefully
ast.Walk(x, n.Decl)
}
case *ast.GenDecl:
x.decl = n;
x.visitComment(n.Doc);
for _, s := range n.Specs {
x.visitSpec(s, n.Tok == token.VAR)
}
case *ast.FuncDecl:
x.visitComment(n.Doc);
kind := FuncDecl;
if n.Recv != nil {
kind = MethodDecl;
ast.Walk(x, n.Recv);
}
x.decl = n;
x.visitIdent(kind, n.Name);
ast.Walk(x, n.Type);
if n.Body != nil {
ast.Walk(x, n.Body)
}
case *ast.File:
x.visitComment(n.Doc);
x.decl = nil;
x.visitIdent(PackageClause, n.Name);
for _, d := range n.Decls {
ast.Walk(x, d)
}
// don't visit package level comments for now
// to avoid duplicate visiting from individual
// nodes
default:
return true
}
return false;
}
func (x *Indexer) VisitDir(path string, d *os.Dir) bool {
return true
}
func (x *Indexer) VisitFile(path string, d *os.Dir) {
if !isGoFile(d) {
return
}
if excludeTestFiles && (!isPkgFile(d) || strings.HasPrefix(path, "test/")) {
return
}
if excludeMainPackages && pkgName(path) == "main" {
return
}
file, err := parser.ParseFile(path, nil, parser.ParseComments);
if err != nil {
return // ignore files with (parse) errors
}
dir, _ := pathutil.Split(path);
pak := Pak{dir, file.Name.Value};
x.file = &File{path, pak};
ast.Walk(x, file);
}
// ----------------------------------------------------------------------------
// Index
type LookupResult struct {
Decls HitList; // package-level declarations (with snippets)
Others HitList; // all other occurences
}
type Index struct {
words map[string]*LookupResult; // maps words to hit lists
alts map[string]*AltWords; // maps canonical(words) to lists of alternative spellings
snippets []*Snippet; // all snippets, indexed by snippet index
nspots int; // number of spots indexed (a measure of the index size)
}
func canonical(w string) string { return strings.ToLower(w) }
// NewIndex creates a new index for the file tree rooted at root.
func NewIndex(root string) *Index {
var x Indexer;
// initialize Indexer
x.words = make(map[string]*IndexResult);
// collect all Spots
pathutil.Walk(root, &x, nil);
// for each word, reduce the RunLists into a LookupResult;
// also collect the word with its canonical spelling in a
// word list for later computation of alternative spellings
words := make(map[string]*LookupResult);
var wlist RunList;
for w, h := range x.words {
decls := reduce(&h.Decls);
others := reduce(&h.Others);
words[w] = &LookupResult{
Decls: decls,
Others: others,
};
wlist.Push(&wordPair{canonical(w), w});
}
// reduce the word list {canonical(w), w} into
// a list of AltWords runs {canonical(w), {w}}
alist := wlist.reduce(lessWordPair, newAltWords);
// convert alist into a map of alternative spellings
alts := make(map[string]*AltWords);
for i := 0; i < alist.Len(); i++ {
a := alist.At(i).(*AltWords);
alts[a.Canon] = a;
}
// convert snippet vector into a list
snippets := make([]*Snippet, x.snippets.Len());
for i := 0; i < x.snippets.Len(); i++ {
snippets[i] = x.snippets.At(i).(*Snippet)
}
return &Index{words, alts, snippets, x.nspots};
}
// Size returns the number of different words and
// spots indexed as a measure for the index size.
func (x *Index) Size() (nwords int, nspots int) {
return len(x.words), x.nspots
}
func (x *Index) LookupWord(w string) (match *LookupResult, alt *AltWords) {
match, _ = x.words[w];
alt, _ = x.alts[canonical(w)];
// remove current spelling from alternatives
// (if there is no match, the alternatives do
// not contain the current spelling)
if match != nil && alt != nil {
alt = alt.filter(w)
}
return;
}
func isIdentifier(s string) bool {
var S scanner.Scanner;
S.Init("", strings.Bytes(s), nil, 0);
if _, tok, _ := S.Scan(); tok == token.IDENT {
_, tok, _ := S.Scan();
return tok == token.EOF;
}
return false;
}
// For a given query, which is either a single identifier or a qualified
// identifier, Lookup returns a LookupResult, and a list of alternative
// spellings, if any. If the query syntax is wrong, illegal is set.
func (x *Index) Lookup(query string) (match *LookupResult, alt *AltWords, illegal bool) {
ss := strings.Split(query, ".", 0);
// check query syntax
for _, s := range ss {
if !isIdentifier(s) {
illegal = true;
return;
}
}
switch len(ss) {
case 1:
match, alt = x.LookupWord(ss[0])
case 2:
pakname := ss[0];
match, alt = x.LookupWord(ss[1]);
if match != nil {
// found a match - filter by package name
decls := match.Decls.filter(pakname);
others := match.Others.filter(pakname);
match = &LookupResult{decls, others};
}
default:
illegal = true
}
return;
}
func (x *Index) Snippet(i int) *Snippet {
// handle illegal snippet indices gracefully
if 0 <= i && i < len(x.snippets) {
return x.snippets[i]
}
return nil;
}
| { return SpotKind(x >> 1 & 7) } | identifier_body |
index.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the infrastructure to create an
// (identifier) index for a set of Go files.
//
// Basic indexing algorithm:
// - traverse all .go files of the file tree specified by root
// - for each word (identifier) encountered, collect all occurences (spots)
// into a list; this produces a list of spots for each word
// - reduce the lists: from a list of spots to a list of FileRuns,
// and from a list of FileRuns into a list of PakRuns
// - make a HitList from the PakRuns
//
// Details:
// - keep two lists per word: one containing package-level declarations
// that have snippets, and one containing all other spots
// - keep the snippets in a separate table indexed by snippet index
// and store the snippet index in place of the line number in a SpotInfo
// (the line number for spots with snippets is stored in the snippet)
// - at the end, create lists of alternative spellings for a given
// word
package main
import (
"container/vector";
"go/ast";
"go/parser";
"go/token";
"go/scanner";
"os";
pathutil "path";
"sort";
"strings"; |
// ----------------------------------------------------------------------------
// RunList
// A RunList is a vector of entries that can be sorted according to some
// criteria. A RunList may be compressed by grouping "runs" of entries
// which are equal (according to the sort critera) into a new RunList of
// runs. For instance, a RunList containing pairs (x, y) may be compressed
// into a RunList containing pair runs (x, {y}) where each run consists of
// a list of y's with the same x.
type RunList struct {
vector.Vector;
less func(x, y interface{}) bool;
}
func (h *RunList) Less(i, j int) bool { return h.less(h.At(i), h.At(j)) }
func (h *RunList) sort(less func(x, y interface{}) bool) {
h.less = less;
sort.Sort(h);
}
// Compress entries which are the same according to a sort criteria
// (specified by less) into "runs".
func (h *RunList) reduce(less func(x, y interface{}) bool, newRun func(h *RunList, i, j int) interface{}) *RunList {
// create runs of entries with equal values
h.sort(less);
// for each run, make a new run object and collect them in a new RunList
var hh RunList;
i := 0;
for j := 0; j < h.Len(); j++ {
if less(h.At(i), h.At(j)) {
hh.Push(newRun(h, i, j));
i = j; // start a new run
}
}
// add final run, if any
if i < h.Len() {
hh.Push(newRun(h, i, h.Len()))
}
return &hh;
}
// ----------------------------------------------------------------------------
// SpotInfo
// A SpotInfo value describes a particular identifier spot in a given file;
// It encodes three values: the SpotKind (declaration or use), a line or
// snippet index "lori", and whether it's a line or index.
//
// The following encoding is used:
//
// bits 32 4 1 0
// value [lori|kind|isIndex]
//
type SpotInfo uint32
// SpotKind describes whether an identifier is declared (and what kind of
// declaration) or used.
type SpotKind uint32
const (
PackageClause SpotKind = iota;
ImportDecl;
ConstDecl;
TypeDecl;
VarDecl;
FuncDecl;
MethodDecl;
Use;
nKinds;
)
func init() {
// sanity check: if nKinds is too large, the SpotInfo
// accessor functions may need to be updated
if nKinds > 8 {
panic()
}
}
// makeSpotInfo makes a SpotInfo.
func makeSpotInfo(kind SpotKind, lori int, isIndex bool) SpotInfo {
// encode lori: bits [4..32)
x := SpotInfo(lori) << 4;
if int(x>>4) != lori {
// lori value doesn't fit - since snippet indices are
// most certainly always smaller then 1<<28, this can
// only happen for line numbers; give it no line number (= 0)
x = 0
}
// encode kind: bits [1..4)
x |= SpotInfo(kind) << 1;
// encode isIndex: bit 0
if isIndex {
x |= 1
}
return x;
}
func (x SpotInfo) Kind() SpotKind { return SpotKind(x >> 1 & 7) }
func (x SpotInfo) Lori() int { return int(x >> 4) }
func (x SpotInfo) IsIndex() bool { return x&1 != 0 }
// ----------------------------------------------------------------------------
// KindRun
// Debugging support. Disable to see multiple entries per line.
const removeDuplicates = true
// A KindRun is a run of SpotInfos of the same kind in a given file.
type KindRun struct {
Kind SpotKind;
Infos []SpotInfo;
}
// KindRuns are sorted by line number or index. Since the isIndex bit
// is always the same for all infos in one list we can compare lori's.
func (f *KindRun) Len() int { return len(f.Infos) }
func (f *KindRun) Less(i, j int) bool { return f.Infos[i].Lori() < f.Infos[j].Lori() }
func (f *KindRun) Swap(i, j int) { f.Infos[i], f.Infos[j] = f.Infos[j], f.Infos[i] }
// FileRun contents are sorted by Kind for the reduction into KindRuns.
func lessKind(x, y interface{}) bool { return x.(SpotInfo).Kind() < y.(SpotInfo).Kind() }
// newKindRun allocates a new KindRun from the SpotInfo run [i, j) in h.
func newKindRun(h *RunList, i, j int) interface{} {
kind := h.At(i).(SpotInfo).Kind();
infos := make([]SpotInfo, j-i);
k := 0;
for ; i < j; i++ {
infos[k] = h.At(i).(SpotInfo);
k++;
}
run := &KindRun{kind, infos};
// Spots were sorted by file and kind to create this run.
// Within this run, sort them by line number or index.
sort.Sort(run);
if removeDuplicates {
// Since both the lori and kind field must be
// same for duplicates, and since the isIndex
// bit is always the same for all infos in one
// list we can simply compare the entire info.
k := 0;
var prev SpotInfo;
for i, x := range infos {
if x != prev || i == 0 {
infos[k] = x;
k++;
prev = x;
}
}
run.Infos = infos[0:k];
}
return run;
}
// ----------------------------------------------------------------------------
// FileRun
// A Pak describes a Go package.
type Pak struct {
Path string; // path of directory containing the package
Name string; // package name as declared by package clause
}
// Paks are sorted by name (primary key) and by import path (secondary key).
func (p *Pak) less(q *Pak) bool {
return p.Name < q.Name || p.Name == q.Name && p.Path < q.Path
}
// A File describes a Go file.
type File struct {
Path string; // complete file name
Pak Pak; // the package to which the file belongs
}
// A Spot describes a single occurence of a word.
type Spot struct {
File *File;
Info SpotInfo;
}
// A FileRun is a list of KindRuns belonging to the same file.
type FileRun struct {
File *File;
Groups []*KindRun;
}
// Spots are sorted by path for the reduction into FileRuns.
func lessSpot(x, y interface{}) bool { return x.(Spot).File.Path < y.(Spot).File.Path }
// newFileRun allocates a new FileRun from the Spot run [i, j) in h.
func newFileRun(h0 *RunList, i, j int) interface{} {
file := h0.At(i).(Spot).File;
// reduce the list of Spots into a list of KindRuns
var h1 RunList;
h1.Vector.Init(j - i);
k := 0;
for ; i < j; i++ {
h1.Set(k, h0.At(i).(Spot).Info);
k++;
}
h2 := h1.reduce(lessKind, newKindRun);
// create the FileRun
groups := make([]*KindRun, h2.Len());
for i := 0; i < h2.Len(); i++ {
groups[i] = h2.At(i).(*KindRun)
}
return &FileRun{file, groups};
}
// ----------------------------------------------------------------------------
// PakRun
// A PakRun describes a run of *FileRuns of a package.
type PakRun struct {
Pak Pak;
Files []*FileRun;
}
// Sorting support for files within a PakRun.
func (p *PakRun) Len() int { return len(p.Files) }
func (p *PakRun) Less(i, j int) bool { return p.Files[i].File.Path < p.Files[j].File.Path }
func (p *PakRun) Swap(i, j int) { p.Files[i], p.Files[j] = p.Files[j], p.Files[i] }
// FileRuns are sorted by package for the reduction into PakRuns.
func lessFileRun(x, y interface{}) bool {
return x.(*FileRun).File.Pak.less(&y.(*FileRun).File.Pak)
}
// newPakRun allocates a new PakRun from the *FileRun run [i, j) in h.
func newPakRun(h *RunList, i, j int) interface{} {
pak := h.At(i).(*FileRun).File.Pak;
files := make([]*FileRun, j-i);
k := 0;
for ; i < j; i++ {
files[k] = h.At(i).(*FileRun);
k++;
}
run := &PakRun{pak, files};
sort.Sort(run); // files were sorted by package; sort them by file now
return run;
}
// ----------------------------------------------------------------------------
// HitList
// A HitList describes a list of PakRuns.
type HitList []*PakRun
// PakRuns are sorted by package.
func lessPakRun(x, y interface{}) bool { return x.(*PakRun).Pak.less(&y.(*PakRun).Pak) }
func reduce(h0 *RunList) HitList {
// reduce a list of Spots into a list of FileRuns
h1 := h0.reduce(lessSpot, newFileRun);
// reduce a list of FileRuns into a list of PakRuns
h2 := h1.reduce(lessFileRun, newPakRun);
// sort the list of PakRuns by package
h2.sort(lessPakRun);
// create a HitList
h := make(HitList, h2.Len());
for i := 0; i < h2.Len(); i++ {
h[i] = h2.At(i).(*PakRun)
}
return h;
}
func (h HitList) filter(pakname string) HitList {
// determine number of matching packages (most of the time just one)
n := 0;
for _, p := range h {
if p.Pak.Name == pakname {
n++
}
}
// create filtered HitList
hh := make(HitList, n);
i := 0;
for _, p := range h {
if p.Pak.Name == pakname {
hh[i] = p;
i++;
}
}
return hh;
}
// ----------------------------------------------------------------------------
// AltWords
type wordPair struct {
canon string; // canonical word spelling (all lowercase)
alt string; // alternative spelling
}
// An AltWords describes a list of alternative spellings for a
// canonical (all lowercase) spelling of a word.
type AltWords struct {
Canon string; // canonical word spelling (all lowercase)
Alts []string; // alternative spelling for the same word
}
// wordPairs are sorted by their canonical spelling.
func lessWordPair(x, y interface{}) bool { return x.(*wordPair).canon < y.(*wordPair).canon }
// newAltWords allocates a new AltWords from the *wordPair run [i, j) in h.
func newAltWords(h *RunList, i, j int) interface{} {
canon := h.At(i).(*wordPair).canon;
alts := make([]string, j-i);
k := 0;
for ; i < j; i++ {
alts[k] = h.At(i).(*wordPair).alt;
k++;
}
return &AltWords{canon, alts};
}
func (a *AltWords) filter(s string) *AltWords {
if len(a.Alts) == 1 && a.Alts[0] == s {
// there are no different alternatives
return nil
}
// make a new AltWords with the current spelling removed
alts := make([]string, len(a.Alts));
i := 0;
for _, w := range a.Alts {
if w != s {
alts[i] = w;
i++;
}
}
return &AltWords{a.Canon, alts[0:i]};
}
// ----------------------------------------------------------------------------
// Indexer
// Adjust these flags as seems best.
const excludeMainPackages = false
const excludeTestFiles = false
type IndexResult struct {
Decls RunList; // package-level declarations (with snippets)
Others RunList; // all other occurences
}
// An Indexer maintains the data structures and provides the machinery
// for indexing .go files under a file tree. It implements the path.Visitor
// interface for walking file trees, and the ast.Visitor interface for
// walking Go ASTs.
type Indexer struct {
words map[string]*IndexResult; // RunLists of Spots
snippets vector.Vector; // vector of *Snippets, indexed by snippet indices
file *File; // current file
decl ast.Decl; // current decl
nspots int; // number of spots encountered
}
func (x *Indexer) addSnippet(s *Snippet) int {
index := x.snippets.Len();
x.snippets.Push(s);
return index;
}
func (x *Indexer) visitComment(c *ast.CommentGroup) {
if c != nil {
ast.Walk(x, c)
}
}
func (x *Indexer) visitIdent(kind SpotKind, id *ast.Ident) {
if id != nil {
lists, found := x.words[id.Value];
if !found {
lists = new(IndexResult);
x.words[id.Value] = lists;
}
if kind == Use || x.decl == nil {
// not a declaration or no snippet required
info := makeSpotInfo(kind, id.Pos().Line, false);
lists.Others.Push(Spot{x.file, info});
} else {
// a declaration with snippet
index := x.addSnippet(NewSnippet(x.decl, id));
info := makeSpotInfo(kind, index, true);
lists.Decls.Push(Spot{x.file, info});
}
x.nspots++;
}
}
func (x *Indexer) visitSpec(spec ast.Spec, isVarDecl bool) {
switch n := spec.(type) {
case *ast.ImportSpec:
x.visitComment(n.Doc);
x.visitIdent(ImportDecl, n.Name);
for _, s := range n.Path {
ast.Walk(x, s)
}
x.visitComment(n.Comment);
case *ast.ValueSpec:
x.visitComment(n.Doc);
kind := ConstDecl;
if isVarDecl {
kind = VarDecl
}
for _, n := range n.Names {
x.visitIdent(kind, n)
}
ast.Walk(x, n.Type);
for _, v := range n.Values {
ast.Walk(x, v)
}
x.visitComment(n.Comment);
case *ast.TypeSpec:
x.visitComment(n.Doc);
x.visitIdent(TypeDecl, n.Name);
ast.Walk(x, n.Type);
x.visitComment(n.Comment);
}
}
func (x *Indexer) Visit(node interface{}) bool {
// TODO(gri): methods in interface types are categorized as VarDecl
switch n := node.(type) {
case *ast.Ident:
x.visitIdent(Use, n)
case *ast.Field:
x.decl = nil; // no snippets for fields
x.visitComment(n.Doc);
for _, m := range n.Names {
x.visitIdent(VarDecl, m)
}
ast.Walk(x, n.Type);
for _, s := range n.Tag {
ast.Walk(x, s)
}
x.visitComment(n.Comment);
case *ast.DeclStmt:
if decl, ok := n.Decl.(*ast.GenDecl); ok {
// local declarations can only be *ast.GenDecls
x.decl = nil; // no snippets for local declarations
x.visitComment(decl.Doc);
for _, s := range decl.Specs {
x.visitSpec(s, decl.Tok == token.VAR)
}
} else {
// handle error case gracefully
ast.Walk(x, n.Decl)
}
case *ast.GenDecl:
x.decl = n;
x.visitComment(n.Doc);
for _, s := range n.Specs {
x.visitSpec(s, n.Tok == token.VAR)
}
case *ast.FuncDecl:
x.visitComment(n.Doc);
kind := FuncDecl;
if n.Recv != nil {
kind = MethodDecl;
ast.Walk(x, n.Recv);
}
x.decl = n;
x.visitIdent(kind, n.Name);
ast.Walk(x, n.Type);
if n.Body != nil {
ast.Walk(x, n.Body)
}
case *ast.File:
x.visitComment(n.Doc);
x.decl = nil;
x.visitIdent(PackageClause, n.Name);
for _, d := range n.Decls {
ast.Walk(x, d)
}
// don't visit package level comments for now
// to avoid duplicate visiting from individual
// nodes
default:
return true
}
return false;
}
func (x *Indexer) VisitDir(path string, d *os.Dir) bool {
return true
}
func (x *Indexer) VisitFile(path string, d *os.Dir) {
if !isGoFile(d) {
return
}
if excludeTestFiles && (!isPkgFile(d) || strings.HasPrefix(path, "test/")) {
return
}
if excludeMainPackages && pkgName(path) == "main" {
return
}
file, err := parser.ParseFile(path, nil, parser.ParseComments);
if err != nil {
return // ignore files with (parse) errors
}
dir, _ := pathutil.Split(path);
pak := Pak{dir, file.Name.Value};
x.file = &File{path, pak};
ast.Walk(x, file);
}
// ----------------------------------------------------------------------------
// Index
type LookupResult struct {
Decls HitList; // package-level declarations (with snippets)
Others HitList; // all other occurences
}
type Index struct {
words map[string]*LookupResult; // maps words to hit lists
alts map[string]*AltWords; // maps canonical(words) to lists of alternative spellings
snippets []*Snippet; // all snippets, indexed by snippet index
nspots int; // number of spots indexed (a measure of the index size)
}
func canonical(w string) string { return strings.ToLower(w) }
// NewIndex creates a new index for the file tree rooted at root.
func NewIndex(root string) *Index {
var x Indexer;
// initialize Indexer
x.words = make(map[string]*IndexResult);
// collect all Spots
pathutil.Walk(root, &x, nil);
// for each word, reduce the RunLists into a LookupResult;
// also collect the word with its canonical spelling in a
// word list for later computation of alternative spellings
words := make(map[string]*LookupResult);
var wlist RunList;
for w, h := range x.words {
decls := reduce(&h.Decls);
others := reduce(&h.Others);
words[w] = &LookupResult{
Decls: decls,
Others: others,
};
wlist.Push(&wordPair{canonical(w), w});
}
// reduce the word list {canonical(w), w} into
// a list of AltWords runs {canonical(w), {w}}
alist := wlist.reduce(lessWordPair, newAltWords);
// convert alist into a map of alternative spellings
alts := make(map[string]*AltWords);
for i := 0; i < alist.Len(); i++ {
a := alist.At(i).(*AltWords);
alts[a.Canon] = a;
}
// convert snippet vector into a list
snippets := make([]*Snippet, x.snippets.Len());
for i := 0; i < x.snippets.Len(); i++ {
snippets[i] = x.snippets.At(i).(*Snippet)
}
return &Index{words, alts, snippets, x.nspots};
}
// Size returns the number of different words and
// spots indexed as a measure for the index size.
func (x *Index) Size() (nwords int, nspots int) {
return len(x.words), x.nspots
}
func (x *Index) LookupWord(w string) (match *LookupResult, alt *AltWords) {
match, _ = x.words[w];
alt, _ = x.alts[canonical(w)];
// remove current spelling from alternatives
// (if there is no match, the alternatives do
// not contain the current spelling)
if match != nil && alt != nil {
alt = alt.filter(w)
}
return;
}
func isIdentifier(s string) bool {
var S scanner.Scanner;
S.Init("", strings.Bytes(s), nil, 0);
if _, tok, _ := S.Scan(); tok == token.IDENT {
_, tok, _ := S.Scan();
return tok == token.EOF;
}
return false;
}
// For a given query, which is either a single identifier or a qualified
// identifier, Lookup returns a LookupResult, and a list of alternative
// spellings, if any. If the query syntax is wrong, illegal is set.
func (x *Index) Lookup(query string) (match *LookupResult, alt *AltWords, illegal bool) {
ss := strings.Split(query, ".", 0);
// check query syntax
for _, s := range ss {
if !isIdentifier(s) {
illegal = true;
return;
}
}
switch len(ss) {
case 1:
match, alt = x.LookupWord(ss[0])
case 2:
pakname := ss[0];
match, alt = x.LookupWord(ss[1]);
if match != nil {
// found a match - filter by package name
decls := match.Decls.filter(pakname);
others := match.Others.filter(pakname);
match = &LookupResult{decls, others};
}
default:
illegal = true
}
return;
}
func (x *Index) Snippet(i int) *Snippet {
// handle illegal snippet indices gracefully
if 0 <= i && i < len(x.snippets) {
return x.snippets[i]
}
return nil;
} | ) | random_line_split |
index.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the infrastructure to create an
// (identifier) index for a set of Go files.
//
// Basic indexing algorithm:
// - traverse all .go files of the file tree specified by root
// - for each word (identifier) encountered, collect all occurences (spots)
// into a list; this produces a list of spots for each word
// - reduce the lists: from a list of spots to a list of FileRuns,
// and from a list of FileRuns into a list of PakRuns
// - make a HitList from the PakRuns
//
// Details:
// - keep two lists per word: one containing package-level declarations
// that have snippets, and one containing all other spots
// - keep the snippets in a separate table indexed by snippet index
// and store the snippet index in place of the line number in a SpotInfo
// (the line number for spots with snippets is stored in the snippet)
// - at the end, create lists of alternative spellings for a given
// word
package main
import (
"container/vector";
"go/ast";
"go/parser";
"go/token";
"go/scanner";
"os";
pathutil "path";
"sort";
"strings";
)
// ----------------------------------------------------------------------------
// RunList
// A RunList is a vector of entries that can be sorted according to some
// criteria. A RunList may be compressed by grouping "runs" of entries
// which are equal (according to the sort critera) into a new RunList of
// runs. For instance, a RunList containing pairs (x, y) may be compressed
// into a RunList containing pair runs (x, {y}) where each run consists of
// a list of y's with the same x.
type RunList struct {
vector.Vector;
less func(x, y interface{}) bool;
}
func (h *RunList) Less(i, j int) bool { return h.less(h.At(i), h.At(j)) }
func (h *RunList) sort(less func(x, y interface{}) bool) {
h.less = less;
sort.Sort(h);
}
// Compress entries which are the same according to a sort criteria
// (specified by less) into "runs".
func (h *RunList) reduce(less func(x, y interface{}) bool, newRun func(h *RunList, i, j int) interface{}) *RunList {
// create runs of entries with equal values
h.sort(less);
// for each run, make a new run object and collect them in a new RunList
var hh RunList;
i := 0;
for j := 0; j < h.Len(); j++ {
if less(h.At(i), h.At(j)) {
hh.Push(newRun(h, i, j));
i = j; // start a new run
}
}
// add final run, if any
if i < h.Len() {
hh.Push(newRun(h, i, h.Len()))
}
return &hh;
}
// ----------------------------------------------------------------------------
// SpotInfo
// A SpotInfo value describes a particular identifier spot in a given file;
// It encodes three values: the SpotKind (declaration or use), a line or
// snippet index "lori", and whether it's a line or index.
//
// The following encoding is used:
//
// bits 32 4 1 0
// value [lori|kind|isIndex]
//
type SpotInfo uint32
// SpotKind describes whether an identifier is declared (and what kind of
// declaration) or used.
type SpotKind uint32
const (
PackageClause SpotKind = iota;
ImportDecl;
ConstDecl;
TypeDecl;
VarDecl;
FuncDecl;
MethodDecl;
Use;
nKinds;
)
func init() {
// sanity check: if nKinds is too large, the SpotInfo
// accessor functions may need to be updated
if nKinds > 8 {
panic()
}
}
// makeSpotInfo makes a SpotInfo.
func makeSpotInfo(kind SpotKind, lori int, isIndex bool) SpotInfo {
// encode lori: bits [4..32)
x := SpotInfo(lori) << 4;
if int(x>>4) != lori {
// lori value doesn't fit - since snippet indices are
// most certainly always smaller then 1<<28, this can
// only happen for line numbers; give it no line number (= 0)
x = 0
}
// encode kind: bits [1..4)
x |= SpotInfo(kind) << 1;
// encode isIndex: bit 0
if isIndex {
x |= 1
}
return x;
}
func (x SpotInfo) Kind() SpotKind { return SpotKind(x >> 1 & 7) }
func (x SpotInfo) Lori() int { return int(x >> 4) }
func (x SpotInfo) IsIndex() bool { return x&1 != 0 }
// ----------------------------------------------------------------------------
// KindRun
// Debugging support. Disable to see multiple entries per line.
const removeDuplicates = true
// A KindRun is a run of SpotInfos of the same kind in a given file.
type KindRun struct {
Kind SpotKind;
Infos []SpotInfo;
}
// KindRuns are sorted by line number or index. Since the isIndex bit
// is always the same for all infos in one list we can compare lori's.
func (f *KindRun) Len() int { return len(f.Infos) }
func (f *KindRun) Less(i, j int) bool { return f.Infos[i].Lori() < f.Infos[j].Lori() }
func (f *KindRun) Swap(i, j int) { f.Infos[i], f.Infos[j] = f.Infos[j], f.Infos[i] }
// FileRun contents are sorted by Kind for the reduction into KindRuns.
func lessKind(x, y interface{}) bool { return x.(SpotInfo).Kind() < y.(SpotInfo).Kind() }
// newKindRun allocates a new KindRun from the SpotInfo run [i, j) in h.
func newKindRun(h *RunList, i, j int) interface{} {
kind := h.At(i).(SpotInfo).Kind();
infos := make([]SpotInfo, j-i);
k := 0;
for ; i < j; i++ {
infos[k] = h.At(i).(SpotInfo);
k++;
}
run := &KindRun{kind, infos};
// Spots were sorted by file and kind to create this run.
// Within this run, sort them by line number or index.
sort.Sort(run);
if removeDuplicates {
// Since both the lori and kind field must be
// same for duplicates, and since the isIndex
// bit is always the same for all infos in one
// list we can simply compare the entire info.
k := 0;
var prev SpotInfo;
for i, x := range infos {
if x != prev || i == 0 {
infos[k] = x;
k++;
prev = x;
}
}
run.Infos = infos[0:k];
}
return run;
}
// ----------------------------------------------------------------------------
// FileRun
// A Pak describes a Go package.
type Pak struct {
Path string; // path of directory containing the package
Name string; // package name as declared by package clause
}
// Paks are sorted by name (primary key) and by import path (secondary key).
func (p *Pak) less(q *Pak) bool {
return p.Name < q.Name || p.Name == q.Name && p.Path < q.Path
}
// A File describes a Go file.
type File struct {
Path string; // complete file name
Pak Pak; // the package to which the file belongs
}
// A Spot describes a single occurence of a word.
type Spot struct {
File *File;
Info SpotInfo;
}
// A FileRun is a list of KindRuns belonging to the same file.
type FileRun struct {
File *File;
Groups []*KindRun;
}
// Spots are sorted by path for the reduction into FileRuns.
func lessSpot(x, y interface{}) bool { return x.(Spot).File.Path < y.(Spot).File.Path }
// newFileRun allocates a new FileRun from the Spot run [i, j) in h.
func newFileRun(h0 *RunList, i, j int) interface{} {
file := h0.At(i).(Spot).File;
// reduce the list of Spots into a list of KindRuns
var h1 RunList;
h1.Vector.Init(j - i);
k := 0;
for ; i < j; i++ {
h1.Set(k, h0.At(i).(Spot).Info);
k++;
}
h2 := h1.reduce(lessKind, newKindRun);
// create the FileRun
groups := make([]*KindRun, h2.Len());
for i := 0; i < h2.Len(); i++ {
groups[i] = h2.At(i).(*KindRun)
}
return &FileRun{file, groups};
}
// ----------------------------------------------------------------------------
// PakRun
// A PakRun describes a run of *FileRuns of a package.
type PakRun struct {
Pak Pak;
Files []*FileRun;
}
// Sorting support for files within a PakRun.
func (p *PakRun) Len() int { return len(p.Files) }
func (p *PakRun) Less(i, j int) bool { return p.Files[i].File.Path < p.Files[j].File.Path }
func (p *PakRun) Swap(i, j int) { p.Files[i], p.Files[j] = p.Files[j], p.Files[i] }
// FileRuns are sorted by package for the reduction into PakRuns.
func lessFileRun(x, y interface{}) bool {
return x.(*FileRun).File.Pak.less(&y.(*FileRun).File.Pak)
}
// newPakRun allocates a new PakRun from the *FileRun run [i, j) in h.
func newPakRun(h *RunList, i, j int) interface{} {
pak := h.At(i).(*FileRun).File.Pak;
files := make([]*FileRun, j-i);
k := 0;
for ; i < j; i++ {
files[k] = h.At(i).(*FileRun);
k++;
}
run := &PakRun{pak, files};
sort.Sort(run); // files were sorted by package; sort them by file now
return run;
}
// ----------------------------------------------------------------------------
// HitList
// A HitList describes a list of PakRuns.
type HitList []*PakRun
// PakRuns are sorted by package.
func lessPakRun(x, y interface{}) bool { return x.(*PakRun).Pak.less(&y.(*PakRun).Pak) }
func reduce(h0 *RunList) HitList {
// reduce a list of Spots into a list of FileRuns
h1 := h0.reduce(lessSpot, newFileRun);
// reduce a list of FileRuns into a list of PakRuns
h2 := h1.reduce(lessFileRun, newPakRun);
// sort the list of PakRuns by package
h2.sort(lessPakRun);
// create a HitList
h := make(HitList, h2.Len());
for i := 0; i < h2.Len(); i++ {
h[i] = h2.At(i).(*PakRun)
}
return h;
}
func (h HitList) filter(pakname string) HitList {
// determine number of matching packages (most of the time just one)
n := 0;
for _, p := range h {
if p.Pak.Name == pakname {
n++
}
}
// create filtered HitList
hh := make(HitList, n);
i := 0;
for _, p := range h {
if p.Pak.Name == pakname {
hh[i] = p;
i++;
}
}
return hh;
}
// ----------------------------------------------------------------------------
// AltWords
type wordPair struct {
canon string; // canonical word spelling (all lowercase)
alt string; // alternative spelling
}
// An AltWords describes a list of alternative spellings for a
// canonical (all lowercase) spelling of a word.
type AltWords struct {
Canon string; // canonical word spelling (all lowercase)
Alts []string; // alternative spelling for the same word
}
// wordPairs are sorted by their canonical spelling.
func lessWordPair(x, y interface{}) bool { return x.(*wordPair).canon < y.(*wordPair).canon }
// newAltWords allocates a new AltWords from the *wordPair run [i, j) in h.
func newAltWords(h *RunList, i, j int) interface{} {
canon := h.At(i).(*wordPair).canon;
alts := make([]string, j-i);
k := 0;
for ; i < j; i++ {
alts[k] = h.At(i).(*wordPair).alt;
k++;
}
return &AltWords{canon, alts};
}
func (a *AltWords) | (s string) *AltWords {
if len(a.Alts) == 1 && a.Alts[0] == s {
// there are no different alternatives
return nil
}
// make a new AltWords with the current spelling removed
alts := make([]string, len(a.Alts));
i := 0;
for _, w := range a.Alts {
if w != s {
alts[i] = w;
i++;
}
}
return &AltWords{a.Canon, alts[0:i]};
}
// ----------------------------------------------------------------------------
// Indexer
// Adjust these flags as seems best.
const excludeMainPackages = false
const excludeTestFiles = false
type IndexResult struct {
Decls RunList; // package-level declarations (with snippets)
Others RunList; // all other occurences
}
// An Indexer maintains the data structures and provides the machinery
// for indexing .go files under a file tree. It implements the path.Visitor
// interface for walking file trees, and the ast.Visitor interface for
// walking Go ASTs.
type Indexer struct {
words map[string]*IndexResult; // RunLists of Spots
snippets vector.Vector; // vector of *Snippets, indexed by snippet indices
file *File; // current file
decl ast.Decl; // current decl
nspots int; // number of spots encountered
}
func (x *Indexer) addSnippet(s *Snippet) int {
index := x.snippets.Len();
x.snippets.Push(s);
return index;
}
func (x *Indexer) visitComment(c *ast.CommentGroup) {
if c != nil {
ast.Walk(x, c)
}
}
func (x *Indexer) visitIdent(kind SpotKind, id *ast.Ident) {
if id != nil {
lists, found := x.words[id.Value];
if !found {
lists = new(IndexResult);
x.words[id.Value] = lists;
}
if kind == Use || x.decl == nil {
// not a declaration or no snippet required
info := makeSpotInfo(kind, id.Pos().Line, false);
lists.Others.Push(Spot{x.file, info});
} else {
// a declaration with snippet
index := x.addSnippet(NewSnippet(x.decl, id));
info := makeSpotInfo(kind, index, true);
lists.Decls.Push(Spot{x.file, info});
}
x.nspots++;
}
}
func (x *Indexer) visitSpec(spec ast.Spec, isVarDecl bool) {
switch n := spec.(type) {
case *ast.ImportSpec:
x.visitComment(n.Doc);
x.visitIdent(ImportDecl, n.Name);
for _, s := range n.Path {
ast.Walk(x, s)
}
x.visitComment(n.Comment);
case *ast.ValueSpec:
x.visitComment(n.Doc);
kind := ConstDecl;
if isVarDecl {
kind = VarDecl
}
for _, n := range n.Names {
x.visitIdent(kind, n)
}
ast.Walk(x, n.Type);
for _, v := range n.Values {
ast.Walk(x, v)
}
x.visitComment(n.Comment);
case *ast.TypeSpec:
x.visitComment(n.Doc);
x.visitIdent(TypeDecl, n.Name);
ast.Walk(x, n.Type);
x.visitComment(n.Comment);
}
}
func (x *Indexer) Visit(node interface{}) bool {
// TODO(gri): methods in interface types are categorized as VarDecl
switch n := node.(type) {
case *ast.Ident:
x.visitIdent(Use, n)
case *ast.Field:
x.decl = nil; // no snippets for fields
x.visitComment(n.Doc);
for _, m := range n.Names {
x.visitIdent(VarDecl, m)
}
ast.Walk(x, n.Type);
for _, s := range n.Tag {
ast.Walk(x, s)
}
x.visitComment(n.Comment);
case *ast.DeclStmt:
if decl, ok := n.Decl.(*ast.GenDecl); ok {
// local declarations can only be *ast.GenDecls
x.decl = nil; // no snippets for local declarations
x.visitComment(decl.Doc);
for _, s := range decl.Specs {
x.visitSpec(s, decl.Tok == token.VAR)
}
} else {
// handle error case gracefully
ast.Walk(x, n.Decl)
}
case *ast.GenDecl:
x.decl = n;
x.visitComment(n.Doc);
for _, s := range n.Specs {
x.visitSpec(s, n.Tok == token.VAR)
}
case *ast.FuncDecl:
x.visitComment(n.Doc);
kind := FuncDecl;
if n.Recv != nil {
kind = MethodDecl;
ast.Walk(x, n.Recv);
}
x.decl = n;
x.visitIdent(kind, n.Name);
ast.Walk(x, n.Type);
if n.Body != nil {
ast.Walk(x, n.Body)
}
case *ast.File:
x.visitComment(n.Doc);
x.decl = nil;
x.visitIdent(PackageClause, n.Name);
for _, d := range n.Decls {
ast.Walk(x, d)
}
// don't visit package level comments for now
// to avoid duplicate visiting from individual
// nodes
default:
return true
}
return false;
}
func (x *Indexer) VisitDir(path string, d *os.Dir) bool {
return true
}
func (x *Indexer) VisitFile(path string, d *os.Dir) {
if !isGoFile(d) {
return
}
if excludeTestFiles && (!isPkgFile(d) || strings.HasPrefix(path, "test/")) {
return
}
if excludeMainPackages && pkgName(path) == "main" {
return
}
file, err := parser.ParseFile(path, nil, parser.ParseComments);
if err != nil {
return // ignore files with (parse) errors
}
dir, _ := pathutil.Split(path);
pak := Pak{dir, file.Name.Value};
x.file = &File{path, pak};
ast.Walk(x, file);
}
// ----------------------------------------------------------------------------
// Index
type LookupResult struct {
Decls HitList; // package-level declarations (with snippets)
Others HitList; // all other occurences
}
type Index struct {
words map[string]*LookupResult; // maps words to hit lists
alts map[string]*AltWords; // maps canonical(words) to lists of alternative spellings
snippets []*Snippet; // all snippets, indexed by snippet index
nspots int; // number of spots indexed (a measure of the index size)
}
func canonical(w string) string { return strings.ToLower(w) }
// NewIndex creates a new index for the file tree rooted at root.
func NewIndex(root string) *Index {
var x Indexer;
// initialize Indexer
x.words = make(map[string]*IndexResult);
// collect all Spots
pathutil.Walk(root, &x, nil);
// for each word, reduce the RunLists into a LookupResult;
// also collect the word with its canonical spelling in a
// word list for later computation of alternative spellings
words := make(map[string]*LookupResult);
var wlist RunList;
for w, h := range x.words {
decls := reduce(&h.Decls);
others := reduce(&h.Others);
words[w] = &LookupResult{
Decls: decls,
Others: others,
};
wlist.Push(&wordPair{canonical(w), w});
}
// reduce the word list {canonical(w), w} into
// a list of AltWords runs {canonical(w), {w}}
alist := wlist.reduce(lessWordPair, newAltWords);
// convert alist into a map of alternative spellings
alts := make(map[string]*AltWords);
for i := 0; i < alist.Len(); i++ {
a := alist.At(i).(*AltWords);
alts[a.Canon] = a;
}
// convert snippet vector into a list
snippets := make([]*Snippet, x.snippets.Len());
for i := 0; i < x.snippets.Len(); i++ {
snippets[i] = x.snippets.At(i).(*Snippet)
}
return &Index{words, alts, snippets, x.nspots};
}
// Size returns the number of different words and
// spots indexed as a measure for the index size.
func (x *Index) Size() (nwords int, nspots int) {
return len(x.words), x.nspots
}
func (x *Index) LookupWord(w string) (match *LookupResult, alt *AltWords) {
match, _ = x.words[w];
alt, _ = x.alts[canonical(w)];
// remove current spelling from alternatives
// (if there is no match, the alternatives do
// not contain the current spelling)
if match != nil && alt != nil {
alt = alt.filter(w)
}
return;
}
func isIdentifier(s string) bool {
var S scanner.Scanner;
S.Init("", strings.Bytes(s), nil, 0);
if _, tok, _ := S.Scan(); tok == token.IDENT {
_, tok, _ := S.Scan();
return tok == token.EOF;
}
return false;
}
// For a given query, which is either a single identifier or a qualified
// identifier, Lookup returns a LookupResult, and a list of alternative
// spellings, if any. If the query syntax is wrong, illegal is set.
func (x *Index) Lookup(query string) (match *LookupResult, alt *AltWords, illegal bool) {
ss := strings.Split(query, ".", 0);
// check query syntax
for _, s := range ss {
if !isIdentifier(s) {
illegal = true;
return;
}
}
switch len(ss) {
case 1:
match, alt = x.LookupWord(ss[0])
case 2:
pakname := ss[0];
match, alt = x.LookupWord(ss[1]);
if match != nil {
// found a match - filter by package name
decls := match.Decls.filter(pakname);
others := match.Others.filter(pakname);
match = &LookupResult{decls, others};
}
default:
illegal = true
}
return;
}
func (x *Index) Snippet(i int) *Snippet {
// handle illegal snippet indices gracefully
if 0 <= i && i < len(x.snippets) {
return x.snippets[i]
}
return nil;
}
| filter | identifier_name |
allocator.rs | use std::rc::Rc;
use std::cell::RefCell;
use std::sync::{Arc, Mutex};
use std::any::Any;
use std::sync::mpsc::{Sender, Receiver, channel};
use std::collections::VecDeque;
use std::marker::PhantomData;
use columnar::{Columnar, ColumnarStack};
use communication::{Pushable, Pullable};
use networking::networking::MessageHeader;
use std::default::Default;
use drain::DrainExt;
// The Communicator trait presents the interface a worker has to the outside world.
// The worker can see its index, the total number of peers, and acquire channels to and from the other workers.
// There is an assumption that each worker performs the same channel allocation logic; things go wrong otherwise.
pub trait Communicator: 'static {
fn index(&self) -> u64; // number out of peers
fn peers(&self) -> u64; // number of peers
fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>);
}
// TODO : Would be nice if Communicator had associated types for its Pushable and Pullable types,
// TODO : but they would have to be generic over T, with the current set-up. Might require HKT?
// impl<'a, C: Communicator + 'a> Communicator for &'a mut C {
// fn index(&self) -> u64 { (**self).index() }
// fn peers(&self) -> u64 { (**self).peers() }
// fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) { (**self).new_channel() }
// }
// The simplest communicator remains worker-local and just queues sent messages.
pub struct ThreadCommunicator;
impl Communicator for ThreadCommunicator {
fn index(&self) -> u64 { 0 }
fn | (&self) -> u64 { 1 }
fn new_channel<T:'static>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let shared = Rc::new(RefCell::new(VecDeque::<T>::new()));
return (vec![Box::new(shared.clone()) as Box<Pushable<T>>], Box::new(shared.clone()) as Box<Pullable<T>>)
}
}
// A specific Communicator for inter-thread intra-process communication
pub struct ProcessCommunicator {
inner: ThreadCommunicator, // inner ThreadCommunicator
index: u64, // number out of peers
peers: u64, // number of peer allocators (for typed channel allocation).
allocated: u64, // indicates how many have been allocated (locally).
channels: Arc<Mutex<Vec<Box<Any+Send>>>>, // Box<Any+Send> -> Box<Vec<Option<(Vec<Sender<T>>, Receiver<T>)>>>
}
impl ProcessCommunicator {
pub fn inner<'a>(&'a mut self) -> &'a mut ThreadCommunicator { &mut self.inner }
pub fn new_vector(count: u64) -> Vec<ProcessCommunicator> {
let channels = Arc::new(Mutex::new(Vec::new()));
return (0 .. count).map(|index| ProcessCommunicator {
inner: ThreadCommunicator,
index: index,
peers: count,
allocated: 0,
channels: channels.clone(),
}).collect();
}
}
impl Communicator for ProcessCommunicator {
fn index(&self) -> u64 { self.index }
fn peers(&self) -> u64 { self.peers }
fn new_channel<T:Send+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let mut channels = self.channels.lock().ok().expect("mutex error?");
if self.allocated == channels.len() as u64 { // we need a new channel ...
let mut senders = Vec::new();
let mut receivers = Vec::new();
for _ in (0..self.peers) {
let (s, r): (Sender<T>, Receiver<T>) = channel();
senders.push(s);
receivers.push(r);
}
let mut to_box = Vec::new();
for recv in receivers.drain_temp() {
to_box.push(Some((senders.clone(), recv)));
}
channels.push(Box::new(to_box));
}
match channels[self.allocated as usize].downcast_mut::<(Vec<Option<(Vec<Sender<T>>, Receiver<T>)>>)>() {
Some(ref mut vector) => {
self.allocated += 1;
let (mut send, recv) = vector[self.index as usize].take().unwrap();
let mut temp = Vec::new();
for s in send.drain_temp() { temp.push(Box::new(s) as Box<Pushable<T>>); }
return (temp, Box::new(recv) as Box<Pullable<T>>)
}
_ => { panic!("unable to cast channel correctly"); }
}
}
}
// A communicator intended for binary channels (networking, pipes, shared memory)
pub struct BinaryCommunicator {
pub inner: ProcessCommunicator, // inner ProcessCommunicator (use for process-local channels)
pub index: u64, // index of this worker
pub peers: u64, // number of peer workers
pub graph: u64, // identifier for the current graph
pub allocated: u64, // indicates how many channels have been allocated (locally).
// for loading up state in the networking threads.
pub writers: Vec<Sender<((u64, u64, u64), Sender<Vec<u8>>)>>, // (index, back-to-worker)
pub readers: Vec<Sender<((u64, u64, u64), Sender<Vec<u8>>, Receiver<Vec<u8>>)>>, // (index, data-to-worker, back-from-worker)
pub senders: Vec<Sender<(MessageHeader, Vec<u8>)>> // for sending bytes!
}
impl BinaryCommunicator {
pub fn inner<'a>(&'a mut self) -> &'a mut ProcessCommunicator { &mut self.inner }
}
// A Communicator backed by Sender<Vec<u8>>/Receiver<Vec<u8>> pairs (e.g. networking, shared memory, files, pipes)
impl Communicator for BinaryCommunicator {
fn index(&self) -> u64 { self.index }
fn peers(&self) -> u64 { self.peers }
fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let mut pushers: Vec<Box<Pushable<T>>> = Vec::new(); // built-up vector of Box<Pushable<T>> to return
// we'll need process-local channels as well (no self-loop binary connection in this design; perhaps should allow)
let inner_peers = self.inner.peers();
let (inner_sends, inner_recv) = self.inner.new_channel();
// prep a pushable for each endpoint, multiplied by inner_peers
for (index, writer) in self.writers.iter().enumerate() {
for _ in (0..inner_peers) {
let (s,r) = channel(); // generate a binary (Vec<u8>) channel pair of (back_to_worker, back_from_net)
let target_index = if index as u64 >= (self.index * inner_peers) { index as u64 + inner_peers } else { index as u64 };
println!("init'ing send channel: ({} {} {})", self.index, self.graph, self.allocated);
writer.send(((self.index, self.graph, self.allocated), s)).unwrap();
let header = MessageHeader {
graph: self.graph,
channel: self.allocated,
source: self.index,
target: target_index,
length: 0,
};
pushers.push(Box::new(BinaryPushable::new(header, self.senders[index].clone(), r)));
}
}
// splice inner_sends into the vector of pushables
for (index, writer) in inner_sends.into_iter().enumerate() {
pushers.insert((self.index * inner_peers) as usize + index, writer);
}
// prep a Box<Pullable<T>> using inner_recv and fresh registered pullables
let (send,recv) = channel(); // binary channel from binary listener to BinaryPullable<T>
let mut pullsends = Vec::new();
for reader in self.readers.iter() {
let (s,r) = channel();
pullsends.push(s);
println!("init'ing recv channel: ({} {} {})", self.index, self.graph, self.allocated);
reader.send(((self.index, self.graph, self.allocated), send.clone(), r)).unwrap();
}
let pullable = Box::new(BinaryPullable {
inner: inner_recv,
senders: pullsends,
receiver: recv,
stack: Default::default(),
});
self.allocated += 1;
return (pushers, pullable);
}
}
struct BinaryPushable<T: Columnar> {
header: MessageHeader,
sender: Sender<(MessageHeader, Vec<u8>)>, // targets for each remote destination
receiver: Receiver<Vec<u8>>, // source of empty binary vectors
phantom: PhantomData<T>,
buffer: Vec<u8>,
stack: <T as Columnar>::Stack,
}
impl<T: Columnar> BinaryPushable<T> {
pub fn new(header: MessageHeader, sender: Sender<(MessageHeader, Vec<u8>)>, receiver: Receiver<Vec<u8>>) -> BinaryPushable<T> {
BinaryPushable {
header: header,
sender: sender,
receiver: receiver,
phantom: PhantomData,
buffer: Vec::new(),
stack: Default::default(),
}
}
}
impl<T:Columnar+'static> Pushable<T> for BinaryPushable<T> {
#[inline]
fn push(&mut self, data: T) {
let mut bytes = if let Some(buffer) = self.receiver.try_recv().ok() { buffer } else { Vec::new() };
bytes.clear();
self.stack.push(data);
self.stack.encode(&mut bytes).unwrap();
let mut header = self.header;
header.length = bytes.len() as u64;
self.sender.send((header, bytes)).ok();
}
}
struct BinaryPullable<T: Columnar> {
inner: Box<Pullable<T>>, // inner pullable (e.g. intra-process typed queue)
senders: Vec<Sender<Vec<u8>>>, // places to put used binary vectors
receiver: Receiver<Vec<u8>>, // source of serialized buffers
stack: <T as Columnar>::Stack,
}
impl<T:Columnar+'static> Pullable<T> for BinaryPullable<T> {
#[inline]
fn pull(&mut self) -> Option<T> {
if let Some(data) = self.inner.pull() { Some(data) }
else if let Some(bytes) = self.receiver.try_recv().ok() {
self.stack.decode(&mut &bytes[..]).unwrap();
self.senders[0].send(bytes).unwrap(); // TODO : Not clear where bytes came from; find out!
self.stack.pop()
}
else { None }
}
}
| peers | identifier_name |
allocator.rs | use std::rc::Rc;
use std::cell::RefCell;
use std::sync::{Arc, Mutex};
use std::any::Any;
use std::sync::mpsc::{Sender, Receiver, channel};
use std::collections::VecDeque;
use std::marker::PhantomData;
use columnar::{Columnar, ColumnarStack};
use communication::{Pushable, Pullable};
use networking::networking::MessageHeader;
use std::default::Default;
use drain::DrainExt;
// The Communicator trait presents the interface a worker has to the outside world.
// The worker can see its index, the total number of peers, and acquire channels to and from the other workers.
// There is an assumption that each worker performs the same channel allocation logic; things go wrong otherwise.
pub trait Communicator: 'static {
fn index(&self) -> u64; // number out of peers
fn peers(&self) -> u64; // number of peers
fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>);
}
// TODO : Would be nice if Communicator had associated types for its Pushable and Pullable types,
// TODO : but they would have to be generic over T, with the current set-up. Might require HKT?
// impl<'a, C: Communicator + 'a> Communicator for &'a mut C {
// fn index(&self) -> u64 { (**self).index() }
// fn peers(&self) -> u64 { (**self).peers() }
// fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) { (**self).new_channel() }
// }
// The simplest communicator remains worker-local and just queues sent messages.
pub struct ThreadCommunicator;
impl Communicator for ThreadCommunicator {
fn index(&self) -> u64 { 0 }
fn peers(&self) -> u64 { 1 }
fn new_channel<T:'static>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let shared = Rc::new(RefCell::new(VecDeque::<T>::new()));
return (vec![Box::new(shared.clone()) as Box<Pushable<T>>], Box::new(shared.clone()) as Box<Pullable<T>>)
}
}
// A specific Communicator for inter-thread intra-process communication
pub struct ProcessCommunicator {
inner: ThreadCommunicator, // inner ThreadCommunicator
index: u64, // number out of peers
peers: u64, // number of peer allocators (for typed channel allocation).
allocated: u64, // indicates how many have been allocated (locally).
channels: Arc<Mutex<Vec<Box<Any+Send>>>>, // Box<Any+Send> -> Box<Vec<Option<(Vec<Sender<T>>, Receiver<T>)>>>
}
impl ProcessCommunicator {
pub fn inner<'a>(&'a mut self) -> &'a mut ThreadCommunicator { &mut self.inner }
pub fn new_vector(count: u64) -> Vec<ProcessCommunicator> {
let channels = Arc::new(Mutex::new(Vec::new()));
return (0 .. count).map(|index| ProcessCommunicator {
inner: ThreadCommunicator,
index: index,
peers: count,
allocated: 0,
channels: channels.clone(),
}).collect();
}
}
impl Communicator for ProcessCommunicator {
fn index(&self) -> u64 { self.index }
fn peers(&self) -> u64 { self.peers }
fn new_channel<T:Send+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let mut channels = self.channels.lock().ok().expect("mutex error?");
if self.allocated == channels.len() as u64 { // we need a new channel ...
let mut senders = Vec::new();
let mut receivers = Vec::new();
for _ in (0..self.peers) {
let (s, r): (Sender<T>, Receiver<T>) = channel();
senders.push(s);
receivers.push(r);
}
let mut to_box = Vec::new();
for recv in receivers.drain_temp() {
to_box.push(Some((senders.clone(), recv)));
}
channels.push(Box::new(to_box));
}
match channels[self.allocated as usize].downcast_mut::<(Vec<Option<(Vec<Sender<T>>, Receiver<T>)>>)>() {
Some(ref mut vector) => {
self.allocated += 1;
let (mut send, recv) = vector[self.index as usize].take().unwrap();
let mut temp = Vec::new();
for s in send.drain_temp() { temp.push(Box::new(s) as Box<Pushable<T>>); }
return (temp, Box::new(recv) as Box<Pullable<T>>)
}
_ => { panic!("unable to cast channel correctly"); }
}
}
}
// A communicator intended for binary channels (networking, pipes, shared memory)
pub struct BinaryCommunicator {
pub inner: ProcessCommunicator, // inner ProcessCommunicator (use for process-local channels)
pub index: u64, // index of this worker
pub peers: u64, // number of peer workers
pub graph: u64, // identifier for the current graph
pub allocated: u64, // indicates how many channels have been allocated (locally).
// for loading up state in the networking threads.
pub writers: Vec<Sender<((u64, u64, u64), Sender<Vec<u8>>)>>, // (index, back-to-worker)
pub readers: Vec<Sender<((u64, u64, u64), Sender<Vec<u8>>, Receiver<Vec<u8>>)>>, // (index, data-to-worker, back-from-worker)
pub senders: Vec<Sender<(MessageHeader, Vec<u8>)>> // for sending bytes!
}
impl BinaryCommunicator {
pub fn inner<'a>(&'a mut self) -> &'a mut ProcessCommunicator { &mut self.inner }
}
// A Communicator backed by Sender<Vec<u8>>/Receiver<Vec<u8>> pairs (e.g. networking, shared memory, files, pipes)
impl Communicator for BinaryCommunicator {
fn index(&self) -> u64 { self.index }
fn peers(&self) -> u64 { self.peers }
fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let mut pushers: Vec<Box<Pushable<T>>> = Vec::new(); // built-up vector of Box<Pushable<T>> to return
// we'll need process-local channels as well (no self-loop binary connection in this design; perhaps should allow)
let inner_peers = self.inner.peers();
let (inner_sends, inner_recv) = self.inner.new_channel();
// prep a pushable for each endpoint, multiplied by inner_peers
for (index, writer) in self.writers.iter().enumerate() {
for _ in (0..inner_peers) {
let (s,r) = channel(); // generate a binary (Vec<u8>) channel pair of (back_to_worker, back_from_net)
let target_index = if index as u64 >= (self.index * inner_peers) { index as u64 + inner_peers } else { index as u64 };
println!("init'ing send channel: ({} {} {})", self.index, self.graph, self.allocated);
writer.send(((self.index, self.graph, self.allocated), s)).unwrap();
let header = MessageHeader {
graph: self.graph,
channel: self.allocated,
source: self.index,
target: target_index,
length: 0,
};
pushers.push(Box::new(BinaryPushable::new(header, self.senders[index].clone(), r)));
}
}
// splice inner_sends into the vector of pushables
for (index, writer) in inner_sends.into_iter().enumerate() {
pushers.insert((self.index * inner_peers) as usize + index, writer);
}
// prep a Box<Pullable<T>> using inner_recv and fresh registered pullables
let (send,recv) = channel(); // binary channel from binary listener to BinaryPullable<T>
let mut pullsends = Vec::new();
for reader in self.readers.iter() {
let (s,r) = channel();
pullsends.push(s);
println!("init'ing recv channel: ({} {} {})", self.index, self.graph, self.allocated);
reader.send(((self.index, self.graph, self.allocated), send.clone(), r)).unwrap();
}
let pullable = Box::new(BinaryPullable {
inner: inner_recv,
senders: pullsends,
receiver: recv,
stack: Default::default(),
});
self.allocated += 1;
return (pushers, pullable);
}
}
struct BinaryPushable<T: Columnar> {
header: MessageHeader,
sender: Sender<(MessageHeader, Vec<u8>)>, // targets for each remote destination
receiver: Receiver<Vec<u8>>, // source of empty binary vectors
phantom: PhantomData<T>,
buffer: Vec<u8>,
stack: <T as Columnar>::Stack,
}
impl<T: Columnar> BinaryPushable<T> {
pub fn new(header: MessageHeader, sender: Sender<(MessageHeader, Vec<u8>)>, receiver: Receiver<Vec<u8>>) -> BinaryPushable<T> {
BinaryPushable {
header: header, | }
}
}
impl<T:Columnar+'static> Pushable<T> for BinaryPushable<T> {
#[inline]
fn push(&mut self, data: T) {
let mut bytes = if let Some(buffer) = self.receiver.try_recv().ok() { buffer } else { Vec::new() };
bytes.clear();
self.stack.push(data);
self.stack.encode(&mut bytes).unwrap();
let mut header = self.header;
header.length = bytes.len() as u64;
self.sender.send((header, bytes)).ok();
}
}
struct BinaryPullable<T: Columnar> {
inner: Box<Pullable<T>>, // inner pullable (e.g. intra-process typed queue)
senders: Vec<Sender<Vec<u8>>>, // places to put used binary vectors
receiver: Receiver<Vec<u8>>, // source of serialized buffers
stack: <T as Columnar>::Stack,
}
impl<T:Columnar+'static> Pullable<T> for BinaryPullable<T> {
#[inline]
fn pull(&mut self) -> Option<T> {
if let Some(data) = self.inner.pull() { Some(data) }
else if let Some(bytes) = self.receiver.try_recv().ok() {
self.stack.decode(&mut &bytes[..]).unwrap();
self.senders[0].send(bytes).unwrap(); // TODO : Not clear where bytes came from; find out!
self.stack.pop()
}
else { None }
}
} | sender: sender,
receiver: receiver,
phantom: PhantomData,
buffer: Vec::new(),
stack: Default::default(), | random_line_split |
load_balancer.rs | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::convert::TryFrom;
use std::sync::atomic::{AtomicUsize, Ordering};
use rand::{thread_rng, Rng};
use serde::{Deserialize, Serialize};
use crate::{config::UpstreamEndpoints, filters::prelude::*, map_proto_enum};
crate::include_proto!("quilkin.extensions.filters.load_balancer.v1alpha1");
use self::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::Policy as ProtoPolicy, LoadBalancer as ProtoConfig,
};
/// Policy represents how a [`LoadBalancerFilter`] distributes
/// packets across endpoints.
#[derive(Debug, Deserialize, Serialize, Eq, PartialEq)]
pub enum Policy {
/// Send packets to endpoints in turns.
#[serde(rename = "ROUND_ROBIN")]
RoundRobin,
/// Send packets to endpoints chosen at random.
#[serde(rename = "RANDOM")]
Random,
}
impl Default for Policy {
fn default() -> Self {
Policy::RoundRobin
}
}
/// Config represents configuration for a [`LoadBalancerFilter`].
#[derive(Serialize, Deserialize, Debug, PartialEq)]
struct Config {
#[serde(default)]
policy: Policy,
}
impl TryFrom<ProtoConfig> for Config {
type Error = ConvertProtoConfigError;
fn try_from(p: ProtoConfig) -> Result<Self, Self::Error> {
let policy = p
.policy
.map(|policy| {
map_proto_enum!(
value = policy.value,
field = "policy",
proto_enum_type = ProtoPolicy,
target_enum_type = Policy,
variants = [RoundRobin, Random]
)
})
.transpose()?
.unwrap_or_else(Policy::default);
Ok(Self { policy })
}
}
/// EndpointChooser chooses from a set of endpoints that a proxy is connected to.
trait EndpointChooser: Send + Sync {
/// choose_endpoints asks for the next endpoint(s) to use.
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints);
}
/// RoundRobinEndpointChooser chooses endpoints in round-robin order.
pub struct RoundRobinEndpointChooser {
next_endpoint: AtomicUsize,
}
impl RoundRobinEndpointChooser {
fn new() -> Self {
RoundRobinEndpointChooser {
next_endpoint: AtomicUsize::new(0),
}
}
}
impl EndpointChooser for RoundRobinEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
let count = self.next_endpoint.fetch_add(1, Ordering::Relaxed);
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let num_endpoints = endpoints.size();
endpoints.keep(count % num_endpoints)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// RandomEndpointChooser chooses endpoints in random order.
pub struct RandomEndpointChooser;
impl EndpointChooser for RandomEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let idx = (&mut thread_rng()).gen_range(0..endpoints.size());
endpoints.keep(idx)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// Creates instances of LoadBalancerFilter.
#[derive(Default)]
pub struct LoadBalancerFilterFactory;
/// LoadBalancerFilter load balances packets over the upstream endpoints.
#[crate::filter("quilkin.extensions.filters.load_balancer.v1alpha1.LoadBalancer")]
struct LoadBalancerFilter {
endpoint_chooser: Box<dyn EndpointChooser>,
}
impl FilterFactory for LoadBalancerFilterFactory {
fn name(&self) -> &'static str {
LoadBalancerFilter::FILTER_NAME
}
fn create_filter(&self, args: CreateFilterArgs) -> Result<Box<dyn Filter>, Error> {
let config: Config = self
.require_config(args.config)?
.deserialize::<Config, ProtoConfig>(self.name())?;
let endpoint_chooser: Box<dyn EndpointChooser> = match config.policy {
Policy::RoundRobin => Box::new(RoundRobinEndpointChooser::new()),
Policy::Random => Box::new(RandomEndpointChooser),
};
Ok(Box::new(LoadBalancerFilter { endpoint_chooser }))
}
}
impl Filter for LoadBalancerFilter {
fn read(&self, mut ctx: ReadContext) -> Option<ReadResponse> {
self.endpoint_chooser.choose_endpoints(&mut ctx.endpoints);
Some(ctx.into())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::convert::TryFrom;
use std::net::SocketAddr;
use super::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::{Policy as ProtoPolicy, PolicyValue},
LoadBalancer as ProtoConfig,
};
use super::{Config, Policy};
use crate::cluster::Endpoint;
use crate::config::Endpoints;
use crate::filters::{
extensions::load_balancer::LoadBalancerFilterFactory, CreateFilterArgs, Filter,
FilterFactory, ReadContext,
};
use prometheus::Registry;
fn create_filter(config: &str) -> Box<dyn Filter> {
let factory = LoadBalancerFilterFactory;
factory
.create_filter(CreateFilterArgs::fixed(
Registry::default(),
Some(&serde_yaml::from_str(config).unwrap()),
))
.unwrap()
}
fn get_response_addresses(
filter: &dyn Filter,
input_addresses: &[SocketAddr],
) -> Vec<SocketAddr> {
filter
.read(ReadContext::new(
Endpoints::new(
input_addresses
.iter()
.map(|addr| Endpoint::from_address(*addr))
.collect(),
)
.unwrap()
.into(),
"127.0.0.1:8080".parse().unwrap(),
vec![],
))
.unwrap()
.endpoints
.iter()
.map(|ep| ep.address)
.collect::<Vec<_>>()
}
#[test]
fn convert_proto_config() {
let test_cases = vec![
(
"RandomPolicy",
ProtoConfig {
policy: Some(PolicyValue {
value: ProtoPolicy::Random as i32,
}),
},
Some(Config {
policy: Policy::Random,
}),
),
(
"RoundRobinPolicy",
ProtoConfig {
policy: Some(PolicyValue {
value: ProtoPolicy::RoundRobin as i32,
}),
},
Some(Config {
policy: Policy::RoundRobin,
}),
),
(
"should fail when invalid policy is provided",
ProtoConfig {
policy: Some(PolicyValue { value: 42 }),
},
None,
),
(
"should use correct default values",
ProtoConfig { policy: None },
Some(Config {
policy: Policy::default(),
}),
),
];
for (name, proto_config, expected) in test_cases {
let result = Config::try_from(proto_config);
assert_eq!(
result.is_err(),
expected.is_none(),
"{}: error expectation does not match",
name
);
if let Some(expected) = expected |
}
}
#[test]
fn round_robin_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: ROUND_ROBIN
";
let filter = create_filter(yaml);
// Check that we repeat the same addresses in sequence forever.
let expected_sequence = addresses.iter().map(|addr| vec![*addr]).collect::<Vec<_>>();
for _ in 0..10 {
assert_eq!(
expected_sequence,
(0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>()
);
}
}
#[test]
fn random_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: RANDOM
";
let filter = create_filter(yaml);
// Run a few selection rounds through the addresses.
let mut result_sequences = vec![];
for _ in 0..10 {
let sequence = (0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>();
result_sequences.push(sequence);
}
// Check that every address was chosen at least once.
assert_eq!(
addresses.into_iter().collect::<HashSet<_>>(),
result_sequences
.clone()
.into_iter()
.flatten()
.flatten()
.collect::<HashSet<_>>(),
);
// Check that there is at least one different sequence of addresses.
assert!(
&result_sequences[1..]
.iter()
.any(|seq| seq != &result_sequences[0]),
"the same sequence of addresses were chosen for random load balancer"
);
}
}
| {
assert_eq!(expected, result.unwrap(), "{}", name);
} | conditional_block |
load_balancer.rs | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::convert::TryFrom;
use std::sync::atomic::{AtomicUsize, Ordering};
use rand::{thread_rng, Rng};
use serde::{Deserialize, Serialize};
use crate::{config::UpstreamEndpoints, filters::prelude::*, map_proto_enum};
crate::include_proto!("quilkin.extensions.filters.load_balancer.v1alpha1");
use self::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::Policy as ProtoPolicy, LoadBalancer as ProtoConfig,
};
/// Policy represents how a [`LoadBalancerFilter`] distributes
/// packets across endpoints.
#[derive(Debug, Deserialize, Serialize, Eq, PartialEq)]
pub enum Policy {
/// Send packets to endpoints in turns.
#[serde(rename = "ROUND_ROBIN")]
RoundRobin,
/// Send packets to endpoints chosen at random.
#[serde(rename = "RANDOM")]
Random,
}
impl Default for Policy {
fn default() -> Self {
Policy::RoundRobin
}
}
/// Config represents configuration for a [`LoadBalancerFilter`].
#[derive(Serialize, Deserialize, Debug, PartialEq)]
struct | {
#[serde(default)]
policy: Policy,
}
impl TryFrom<ProtoConfig> for Config {
type Error = ConvertProtoConfigError;
fn try_from(p: ProtoConfig) -> Result<Self, Self::Error> {
let policy = p
.policy
.map(|policy| {
map_proto_enum!(
value = policy.value,
field = "policy",
proto_enum_type = ProtoPolicy,
target_enum_type = Policy,
variants = [RoundRobin, Random]
)
})
.transpose()?
.unwrap_or_else(Policy::default);
Ok(Self { policy })
}
}
/// EndpointChooser chooses from a set of endpoints that a proxy is connected to.
trait EndpointChooser: Send + Sync {
/// choose_endpoints asks for the next endpoint(s) to use.
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints);
}
/// RoundRobinEndpointChooser chooses endpoints in round-robin order.
pub struct RoundRobinEndpointChooser {
next_endpoint: AtomicUsize,
}
impl RoundRobinEndpointChooser {
fn new() -> Self {
RoundRobinEndpointChooser {
next_endpoint: AtomicUsize::new(0),
}
}
}
impl EndpointChooser for RoundRobinEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
let count = self.next_endpoint.fetch_add(1, Ordering::Relaxed);
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let num_endpoints = endpoints.size();
endpoints.keep(count % num_endpoints)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// RandomEndpointChooser chooses endpoints in random order.
pub struct RandomEndpointChooser;
impl EndpointChooser for RandomEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let idx = (&mut thread_rng()).gen_range(0..endpoints.size());
endpoints.keep(idx)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// Creates instances of LoadBalancerFilter.
#[derive(Default)]
pub struct LoadBalancerFilterFactory;
/// LoadBalancerFilter load balances packets over the upstream endpoints.
#[crate::filter("quilkin.extensions.filters.load_balancer.v1alpha1.LoadBalancer")]
struct LoadBalancerFilter {
endpoint_chooser: Box<dyn EndpointChooser>,
}
impl FilterFactory for LoadBalancerFilterFactory {
fn name(&self) -> &'static str {
LoadBalancerFilter::FILTER_NAME
}
fn create_filter(&self, args: CreateFilterArgs) -> Result<Box<dyn Filter>, Error> {
let config: Config = self
.require_config(args.config)?
.deserialize::<Config, ProtoConfig>(self.name())?;
let endpoint_chooser: Box<dyn EndpointChooser> = match config.policy {
Policy::RoundRobin => Box::new(RoundRobinEndpointChooser::new()),
Policy::Random => Box::new(RandomEndpointChooser),
};
Ok(Box::new(LoadBalancerFilter { endpoint_chooser }))
}
}
impl Filter for LoadBalancerFilter {
fn read(&self, mut ctx: ReadContext) -> Option<ReadResponse> {
self.endpoint_chooser.choose_endpoints(&mut ctx.endpoints);
Some(ctx.into())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::convert::TryFrom;
use std::net::SocketAddr;
use super::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::{Policy as ProtoPolicy, PolicyValue},
LoadBalancer as ProtoConfig,
};
use super::{Config, Policy};
use crate::cluster::Endpoint;
use crate::config::Endpoints;
use crate::filters::{
extensions::load_balancer::LoadBalancerFilterFactory, CreateFilterArgs, Filter,
FilterFactory, ReadContext,
};
use prometheus::Registry;
fn create_filter(config: &str) -> Box<dyn Filter> {
let factory = LoadBalancerFilterFactory;
factory
.create_filter(CreateFilterArgs::fixed(
Registry::default(),
Some(&serde_yaml::from_str(config).unwrap()),
))
.unwrap()
}
fn get_response_addresses(
filter: &dyn Filter,
input_addresses: &[SocketAddr],
) -> Vec<SocketAddr> {
filter
.read(ReadContext::new(
Endpoints::new(
input_addresses
.iter()
.map(|addr| Endpoint::from_address(*addr))
.collect(),
)
.unwrap()
.into(),
"127.0.0.1:8080".parse().unwrap(),
vec![],
))
.unwrap()
.endpoints
.iter()
.map(|ep| ep.address)
.collect::<Vec<_>>()
}
#[test]
fn convert_proto_config() {
let test_cases = vec![
(
"RandomPolicy",
ProtoConfig {
policy: Some(PolicyValue {
value: ProtoPolicy::Random as i32,
}),
},
Some(Config {
policy: Policy::Random,
}),
),
(
"RoundRobinPolicy",
ProtoConfig {
policy: Some(PolicyValue {
value: ProtoPolicy::RoundRobin as i32,
}),
},
Some(Config {
policy: Policy::RoundRobin,
}),
),
(
"should fail when invalid policy is provided",
ProtoConfig {
policy: Some(PolicyValue { value: 42 }),
},
None,
),
(
"should use correct default values",
ProtoConfig { policy: None },
Some(Config {
policy: Policy::default(),
}),
),
];
for (name, proto_config, expected) in test_cases {
let result = Config::try_from(proto_config);
assert_eq!(
result.is_err(),
expected.is_none(),
"{}: error expectation does not match",
name
);
if let Some(expected) = expected {
assert_eq!(expected, result.unwrap(), "{}", name);
}
}
}
#[test]
fn round_robin_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: ROUND_ROBIN
";
let filter = create_filter(yaml);
// Check that we repeat the same addresses in sequence forever.
let expected_sequence = addresses.iter().map(|addr| vec![*addr]).collect::<Vec<_>>();
for _ in 0..10 {
assert_eq!(
expected_sequence,
(0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>()
);
}
}
#[test]
fn random_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: RANDOM
";
let filter = create_filter(yaml);
// Run a few selection rounds through the addresses.
let mut result_sequences = vec![];
for _ in 0..10 {
let sequence = (0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>();
result_sequences.push(sequence);
}
// Check that every address was chosen at least once.
assert_eq!(
addresses.into_iter().collect::<HashSet<_>>(),
result_sequences
.clone()
.into_iter()
.flatten()
.flatten()
.collect::<HashSet<_>>(),
);
// Check that there is at least one different sequence of addresses.
assert!(
&result_sequences[1..]
.iter()
.any(|seq| seq != &result_sequences[0]),
"the same sequence of addresses were chosen for random load balancer"
);
}
}
| Config | identifier_name |
load_balancer.rs | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::convert::TryFrom;
use std::sync::atomic::{AtomicUsize, Ordering};
use rand::{thread_rng, Rng};
use serde::{Deserialize, Serialize};
use crate::{config::UpstreamEndpoints, filters::prelude::*, map_proto_enum};
crate::include_proto!("quilkin.extensions.filters.load_balancer.v1alpha1");
use self::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::Policy as ProtoPolicy, LoadBalancer as ProtoConfig,
};
/// Policy represents how a [`LoadBalancerFilter`] distributes
/// packets across endpoints.
#[derive(Debug, Deserialize, Serialize, Eq, PartialEq)]
pub enum Policy {
/// Send packets to endpoints in turns.
#[serde(rename = "ROUND_ROBIN")]
RoundRobin,
/// Send packets to endpoints chosen at random.
#[serde(rename = "RANDOM")]
Random,
}
impl Default for Policy {
fn default() -> Self {
Policy::RoundRobin
}
}
/// Config represents configuration for a [`LoadBalancerFilter`].
#[derive(Serialize, Deserialize, Debug, PartialEq)]
struct Config {
#[serde(default)]
policy: Policy,
}
impl TryFrom<ProtoConfig> for Config {
type Error = ConvertProtoConfigError;
fn try_from(p: ProtoConfig) -> Result<Self, Self::Error> {
let policy = p
.policy
.map(|policy| {
map_proto_enum!(
value = policy.value,
field = "policy",
proto_enum_type = ProtoPolicy,
target_enum_type = Policy,
variants = [RoundRobin, Random]
)
})
.transpose()?
.unwrap_or_else(Policy::default);
Ok(Self { policy })
}
}
/// EndpointChooser chooses from a set of endpoints that a proxy is connected to.
trait EndpointChooser: Send + Sync {
/// choose_endpoints asks for the next endpoint(s) to use.
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints);
}
/// RoundRobinEndpointChooser chooses endpoints in round-robin order.
pub struct RoundRobinEndpointChooser {
next_endpoint: AtomicUsize,
}
impl RoundRobinEndpointChooser {
fn new() -> Self {
RoundRobinEndpointChooser {
next_endpoint: AtomicUsize::new(0),
}
}
}
impl EndpointChooser for RoundRobinEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
let count = self.next_endpoint.fetch_add(1, Ordering::Relaxed);
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let num_endpoints = endpoints.size();
endpoints.keep(count % num_endpoints)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// RandomEndpointChooser chooses endpoints in random order.
pub struct RandomEndpointChooser;
impl EndpointChooser for RandomEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let idx = (&mut thread_rng()).gen_range(0..endpoints.size());
endpoints.keep(idx)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// Creates instances of LoadBalancerFilter.
#[derive(Default)]
pub struct LoadBalancerFilterFactory;
/// LoadBalancerFilter load balances packets over the upstream endpoints.
#[crate::filter("quilkin.extensions.filters.load_balancer.v1alpha1.LoadBalancer")]
struct LoadBalancerFilter {
endpoint_chooser: Box<dyn EndpointChooser>,
}
impl FilterFactory for LoadBalancerFilterFactory {
fn name(&self) -> &'static str {
LoadBalancerFilter::FILTER_NAME
}
fn create_filter(&self, args: CreateFilterArgs) -> Result<Box<dyn Filter>, Error> {
let config: Config = self
.require_config(args.config)?
.deserialize::<Config, ProtoConfig>(self.name())?;
let endpoint_chooser: Box<dyn EndpointChooser> = match config.policy {
Policy::RoundRobin => Box::new(RoundRobinEndpointChooser::new()),
Policy::Random => Box::new(RandomEndpointChooser),
};
Ok(Box::new(LoadBalancerFilter { endpoint_chooser }))
}
}
impl Filter for LoadBalancerFilter {
fn read(&self, mut ctx: ReadContext) -> Option<ReadResponse> {
self.endpoint_chooser.choose_endpoints(&mut ctx.endpoints);
Some(ctx.into())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::convert::TryFrom;
use std::net::SocketAddr;
use super::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::{Policy as ProtoPolicy, PolicyValue},
LoadBalancer as ProtoConfig,
};
use super::{Config, Policy};
use crate::cluster::Endpoint;
use crate::config::Endpoints;
use crate::filters::{
extensions::load_balancer::LoadBalancerFilterFactory, CreateFilterArgs, Filter,
FilterFactory, ReadContext,
};
use prometheus::Registry;
fn create_filter(config: &str) -> Box<dyn Filter> {
let factory = LoadBalancerFilterFactory;
factory
.create_filter(CreateFilterArgs::fixed(
Registry::default(),
Some(&serde_yaml::from_str(config).unwrap()),
))
.unwrap()
}
fn get_response_addresses(
filter: &dyn Filter,
input_addresses: &[SocketAddr],
) -> Vec<SocketAddr> {
filter
.read(ReadContext::new(
Endpoints::new(
input_addresses
.iter()
.map(|addr| Endpoint::from_address(*addr))
.collect(),
)
.unwrap()
.into(),
"127.0.0.1:8080".parse().unwrap(),
vec![],
))
.unwrap()
.endpoints
.iter()
.map(|ep| ep.address)
.collect::<Vec<_>>()
}
#[test] | policy: Some(PolicyValue {
value: ProtoPolicy::Random as i32,
}),
},
Some(Config {
policy: Policy::Random,
}),
),
(
"RoundRobinPolicy",
ProtoConfig {
policy: Some(PolicyValue {
value: ProtoPolicy::RoundRobin as i32,
}),
},
Some(Config {
policy: Policy::RoundRobin,
}),
),
(
"should fail when invalid policy is provided",
ProtoConfig {
policy: Some(PolicyValue { value: 42 }),
},
None,
),
(
"should use correct default values",
ProtoConfig { policy: None },
Some(Config {
policy: Policy::default(),
}),
),
];
for (name, proto_config, expected) in test_cases {
let result = Config::try_from(proto_config);
assert_eq!(
result.is_err(),
expected.is_none(),
"{}: error expectation does not match",
name
);
if let Some(expected) = expected {
assert_eq!(expected, result.unwrap(), "{}", name);
}
}
}
#[test]
fn round_robin_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: ROUND_ROBIN
";
let filter = create_filter(yaml);
// Check that we repeat the same addresses in sequence forever.
let expected_sequence = addresses.iter().map(|addr| vec![*addr]).collect::<Vec<_>>();
for _ in 0..10 {
assert_eq!(
expected_sequence,
(0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>()
);
}
}
#[test]
fn random_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: RANDOM
";
let filter = create_filter(yaml);
// Run a few selection rounds through the addresses.
let mut result_sequences = vec![];
for _ in 0..10 {
let sequence = (0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>();
result_sequences.push(sequence);
}
// Check that every address was chosen at least once.
assert_eq!(
addresses.into_iter().collect::<HashSet<_>>(),
result_sequences
.clone()
.into_iter()
.flatten()
.flatten()
.collect::<HashSet<_>>(),
);
// Check that there is at least one different sequence of addresses.
assert!(
&result_sequences[1..]
.iter()
.any(|seq| seq != &result_sequences[0]),
"the same sequence of addresses were chosen for random load balancer"
);
}
} | fn convert_proto_config() {
let test_cases = vec![
(
"RandomPolicy",
ProtoConfig { | random_line_split |
usher.go | /*
usher is a tiny personal url shortener.
This library provides the maintenance functions for our simple
database of code => url mappings (a yaml file in
filepath.join(os.UserConfigDir(), "usher")).
*/
package usher
import (
"errors"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
yaml "gopkg.in/yaml.v3"
)
const configfile = "usher.yml"
const indexCode = "INDEX"
// Random Code generation constants
const minRandomCodeLen = 5
const maxRandomCodeLen = 8
const digits = "23456789" // omit 0 and 1 as easily confused with o and l
const chars = "abcdefghijkmnpqrstuvwxyz" // omit o and l as easily confused with 0 and 1
// Errors
var (
ErrNotFound = errors.New("not found")
ErrCodeExists = errors.New("code already used")
ErrNoChange = errors.New("mapping unchanged")
ErrPushTypeUnconfigured = errors.New("config backend type is unconfigured")
ErrPushTypeBad = errors.New("config backend type is bad")
)
type DB struct {
Root string // full path to usher root directory containing databases
Domain string // fully-qualified domain whose mappings we want
DBPath string // full path to database for Domain
ConfigPath string // full path to usher config file
}
type Entry struct {
Code string
Url string
}
type ConfigEntry struct {
Type string `yaml:"type"`
AWSKey string `yaml:"aws_key,omitempty"`
AWSSecret string `yaml:"aws_secret,omitempty"`
AWSRegion string `yaml:"aws_region,omitempty"`
}
// NewDB creates a DB struct with members derived from parameters,
// the environment, or defaults (in that order). It does no checking
// that the values produced are sane or exist on the filesystem.
func NewDB(domain string) (*DB, error) {
// Get root
root := os.Getenv("USHER_ROOT")
if root == "" {
// If USHER_ROOT is unset, check if there is an usher.yml in the cwd
stat, err := os.Stat("usher.yml")
if err == nil && !stat.IsDir() |
}
if root == "" {
// If root is still unset, default to "os.UserConfigDir()/usher"
configDir, err := os.UserConfigDir()
if err != nil {
return nil, err
}
root = filepath.Join(configDir, "usher")
}
// Derive domain if not set - check for USHER_DOMAIN in environment
if domain == "" {
domain = os.Getenv("USHER_DOMAIN")
}
// Else infer the domain if only one database exists
if domain == "" {
matches, _ := filepath.Glob(filepath.Join(root, "*.*.yml"))
if len(matches) == 1 {
// Exactly one match - strip .yml suffix to get domain
re := regexp.MustCompile(`.yml$`)
domain = re.ReplaceAllLiteralString(filepath.Base(matches[0]), "")
}
}
// Else give up with an error
if domain == "" {
return nil, errors.New("Domain not passed as parameter or set in env USHER_DOMAIN")
}
// Set DBPath
dbpath := filepath.Join(root, domain+".yml")
// Set ConfigPath
configpath := filepath.Join(root, configfile)
return &DB{Root: root, Domain: domain, DBPath: dbpath, ConfigPath: configpath}, nil
}
// Init checks and creates the following, if they don't exist:
// - an usher root directory
// - an usher database for the db.Domain
// - an entry in the user config file for db.Domain
func (db *DB) Init() (dbCreated bool, err error) {
dbCreated = false
// Ensure root exists
err = os.MkdirAll(db.Root, 0755)
if err != nil {
return dbCreated, err
}
// Ensure database exists
_, err = os.Stat(db.DBPath)
if err == nil {
return dbCreated, nil // exists
}
if err != nil && !os.IsNotExist(err) {
return dbCreated, err // unexpected error
}
// Database does not exist - create
fh, err := os.Create(db.DBPath)
fh.Close()
if err != nil {
return dbCreated, err
}
dbCreated = true
// Ensure configfile exists
_, err = os.Stat(db.ConfigPath)
if err == nil {
_, err := db.readConfig()
if err != nil {
if err != ErrNotFound {
return dbCreated, err
}
}
err = db.appendConfigString(db.configPlaceholder())
if err != nil {
return dbCreated, err
}
} else {
// Create a placeholder config file for domain
err = db.writeConfigString(db.configPlaceholder())
if err != nil {
return dbCreated, err
}
}
return dbCreated, nil
}
// List returns the set of database entries whose code matches glob
func (db *DB) List(glob string) ([]Entry, error) {
// FIXME: first-pass - ignore glob
mappings, err := db.readDB()
if err != nil {
return nil, err
}
// Extract codes and sort
codes := make([]string, len(mappings))
i := 0
for code := range mappings {
codes[i] = code
i++
}
sort.Strings(codes)
// Compile entries
var entries = make([]Entry, len(mappings))
i = 0
for _, code := range codes {
entries[i] = Entry{Code: code, Url: mappings[code]}
i++
}
return entries, nil
}
// Add a mapping for url and code to the database.
// If code is missing, a random code will be generated and returned.
func (db *DB) Add(url, code string) (string, error) {
mappings, err := db.readDB()
if err != nil {
return "", err
}
if code == "" {
code = randomCode(mappings)
} else {
// Check for parameter inversion
reUrl := regexp.MustCompile(`^https?://`)
if !reUrl.MatchString(url) && reUrl.MatchString(code) {
url, code = code, url
}
// Check whether code is already used
dburl, exists := mappings[code]
if exists {
if dburl == url {
// Trying to re-add the same url is not an error, just a noop
return code, nil
}
return code, ErrCodeExists
}
}
mappings[code] = url
err = db.writeDB(mappings)
if err != nil {
return code, err
}
return code, nil
}
// Update an existing mapping in the database, changing the URL.
func (db *DB) Update(url, code string) error {
mappings, err := db.readDB()
if err != nil {
return err
}
// Check for parameter inversion
reUrl := regexp.MustCompile(`^https?://`)
if !reUrl.MatchString(url) && reUrl.MatchString(code) {
url, code = code, url
}
// If code is missing, abort
dburl, exists := mappings[code]
if !exists {
return ErrNotFound
}
// Trying to update to the same url is not an error, just a noop
if dburl == url {
return nil
}
mappings[code] = url
err = db.writeDB(mappings)
if err != nil {
return err
}
return nil
}
// Remove the mapping with code from the database
// Returns ErrNotFound if code does not exist in the database
func (db *DB) Remove(code string) error {
mappings, err := db.readDB()
if err != nil {
return err
}
_, exists := mappings[code]
if !exists {
return ErrNotFound
}
delete(mappings, code)
err = db.writeDB(mappings)
if err != nil {
return err
}
return nil
}
// Push syncs all current mappings with the backend configured for db.Domain
// in db.ConfigPath
func (db *DB) Push() error {
config, err := db.readConfig()
if err != nil {
return err
}
if config.Type == "" {
return fmt.Errorf("no 'type' field found for %q in config %q\n",
db.Domain, db.ConfigPath)
}
switch config.Type {
case "s3":
err = db.pushS3(config)
if err != nil {
return err
}
case "render":
err = db.pushRender()
if err != nil {
return err
}
case "unconfigured":
return ErrPushTypeUnconfigured
default:
return fmt.Errorf("invalid config backend type %q found for %q: %w",
config.Type, db.Domain, ErrPushTypeBad)
}
return nil
}
// readDB is a utility function to read all mappings from db.DBPath
// and return as a go map
func (db *DB) readDB() (map[string]string, error) {
data, err := ioutil.ReadFile(db.DBPath)
if err != nil {
return nil, err
}
var mappings map[string]string
err = yaml.Unmarshal(data, &mappings)
if err != nil {
return nil, err
}
if len(mappings) == 0 {
mappings = make(map[string]string)
}
return mappings, nil
}
// writeDB is a utility function to write mappings (as yaml) to db.DBPath
func (db *DB) writeDB(mappings map[string]string) error {
var data []byte
var err error
if len(mappings) > 0 {
data, err = yaml.Marshal(mappings)
if err != nil {
return err
}
}
tmpfile := db.DBPath + ".tmp"
err = ioutil.WriteFile(tmpfile, data, 0644)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.DBPath)
if err != nil {
return err
}
return nil
}
// readConfig is a utility function to read the config entry for
// db.Domain from db.ConfigPath file
func (db *DB) readConfig() (*ConfigEntry, error) {
data, err := ioutil.ReadFile(db.ConfigPath)
if err != nil {
return nil, err
}
var entries map[string]ConfigEntry
err = yaml.Unmarshal(data, &entries)
if err != nil {
return nil, err
}
entry, exists := entries[db.Domain]
if !exists {
return nil, ErrNotFound
}
return &entry, nil
}
// writeConfigString is a utility function to write data to db.ConfigPath
func (db *DB) writeConfigString(data string) error {
tmpfile := db.ConfigPath + ".tmp"
err := ioutil.WriteFile(tmpfile, []byte(data), 0600)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.ConfigPath)
if err != nil {
return err
}
return nil
}
// appendConfigString is a utility function to write data to db.ConfigPath
func (db *DB) appendConfigString(data string) error {
config, err := ioutil.ReadFile(db.ConfigPath)
if err != nil {
return err
}
config = append(config, []byte(data)...)
tmpfile := db.ConfigPath + ".tmp"
err = ioutil.WriteFile(tmpfile, config, 0600)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.ConfigPath)
if err != nil {
return err
}
return nil
}
// randomCode is a utility function to generate a random code
// and check that it doesn't exist in mappings.
// Random codes use the following pattern: 1 digit, then 4-7
// lowercase ascii characters. This usually allows them to be
// relatively easily distinguished from explicit codes, while
// still being easy to communicate orally.
func randomCode(mappings map[string]string) string {
rand.Seed(time.Now().UnixNano())
var b strings.Builder
b.WriteByte(digits[rand.Intn(len(digits))])
for i := 1; i < maxRandomCodeLen; i++ {
b.WriteByte(chars[rand.Intn(len(chars))])
// If long enough, check if exists in mappings, and return if not
if i+1 >= minRandomCodeLen {
s := b.String()
if _, exists := mappings[s]; !exists {
return s
}
}
}
// Failed to find an unused code? Just retry?
return randomCode(mappings)
}
func (db *DB) configPlaceholder() string {
return db.Domain + `:
type: unconfigured
# Replace the line above with one of the 'type' sections below for the backend
# you wish to use.
# 'render' uses render.com as a backend, and needs no additional config here.
# See https://github.com/gavincarr/usher/blob/master/Render.md for render configuration details.
# type: render
# 's3' uses Amazon S3 as a backed, and requires the 3 'aws_*' parameters below.
# See https://github.com/gavincarr/usher/blob/master/S3.md for full S3 configuration details.
# type: s3
# aws_key: foo
# aws_secret: bar
# aws_region: us-east-1
`
}
| {
cwd, err := os.Getwd()
if err == nil {
root = cwd
}
} | conditional_block |
usher.go | /*
usher is a tiny personal url shortener.
This library provides the maintenance functions for our simple
database of code => url mappings (a yaml file in
filepath.join(os.UserConfigDir(), "usher")).
*/
package usher
import (
"errors"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
yaml "gopkg.in/yaml.v3"
)
const configfile = "usher.yml"
const indexCode = "INDEX"
// Random Code generation constants
const minRandomCodeLen = 5
const maxRandomCodeLen = 8
const digits = "23456789" // omit 0 and 1 as easily confused with o and l
const chars = "abcdefghijkmnpqrstuvwxyz" // omit o and l as easily confused with 0 and 1
// Errors
var (
ErrNotFound = errors.New("not found")
ErrCodeExists = errors.New("code already used")
ErrNoChange = errors.New("mapping unchanged")
ErrPushTypeUnconfigured = errors.New("config backend type is unconfigured")
ErrPushTypeBad = errors.New("config backend type is bad")
)
type DB struct {
Root string // full path to usher root directory containing databases
Domain string // fully-qualified domain whose mappings we want
DBPath string // full path to database for Domain
ConfigPath string // full path to usher config file
}
type Entry struct {
Code string
Url string
}
type ConfigEntry struct {
Type string `yaml:"type"`
AWSKey string `yaml:"aws_key,omitempty"`
AWSSecret string `yaml:"aws_secret,omitempty"`
AWSRegion string `yaml:"aws_region,omitempty"`
}
// NewDB creates a DB struct with members derived from parameters,
// the environment, or defaults (in that order). It does no checking
// that the values produced are sane or exist on the filesystem.
func NewDB(domain string) (*DB, error) {
// Get root
root := os.Getenv("USHER_ROOT")
if root == "" {
// If USHER_ROOT is unset, check if there is an usher.yml in the cwd
stat, err := os.Stat("usher.yml")
if err == nil && !stat.IsDir() {
cwd, err := os.Getwd()
if err == nil {
root = cwd
}
}
}
if root == "" {
// If root is still unset, default to "os.UserConfigDir()/usher"
configDir, err := os.UserConfigDir()
if err != nil {
return nil, err
}
root = filepath.Join(configDir, "usher")
}
// Derive domain if not set - check for USHER_DOMAIN in environment
if domain == "" {
domain = os.Getenv("USHER_DOMAIN")
}
// Else infer the domain if only one database exists
if domain == "" {
matches, _ := filepath.Glob(filepath.Join(root, "*.*.yml"))
if len(matches) == 1 {
// Exactly one match - strip .yml suffix to get domain
re := regexp.MustCompile(`.yml$`)
domain = re.ReplaceAllLiteralString(filepath.Base(matches[0]), "")
}
}
// Else give up with an error
if domain == "" {
return nil, errors.New("Domain not passed as parameter or set in env USHER_DOMAIN")
}
// Set DBPath
dbpath := filepath.Join(root, domain+".yml")
// Set ConfigPath
configpath := filepath.Join(root, configfile)
return &DB{Root: root, Domain: domain, DBPath: dbpath, ConfigPath: configpath}, nil
}
// Init checks and creates the following, if they don't exist:
// - an usher root directory
// - an usher database for the db.Domain
// - an entry in the user config file for db.Domain
func (db *DB) Init() (dbCreated bool, err error) {
dbCreated = false
// Ensure root exists
err = os.MkdirAll(db.Root, 0755)
if err != nil {
return dbCreated, err
}
// Ensure database exists
_, err = os.Stat(db.DBPath)
if err == nil {
return dbCreated, nil // exists
}
if err != nil && !os.IsNotExist(err) {
return dbCreated, err // unexpected error
}
// Database does not exist - create
fh, err := os.Create(db.DBPath)
fh.Close()
if err != nil {
return dbCreated, err
}
dbCreated = true
// Ensure configfile exists
_, err = os.Stat(db.ConfigPath)
if err == nil {
_, err := db.readConfig()
if err != nil {
if err != ErrNotFound {
return dbCreated, err
}
}
err = db.appendConfigString(db.configPlaceholder())
if err != nil {
return dbCreated, err
}
} else {
// Create a placeholder config file for domain
err = db.writeConfigString(db.configPlaceholder())
if err != nil {
return dbCreated, err
}
}
return dbCreated, nil
}
// List returns the set of database entries whose code matches glob
func (db *DB) List(glob string) ([]Entry, error) {
// FIXME: first-pass - ignore glob
mappings, err := db.readDB()
if err != nil {
return nil, err
}
// Extract codes and sort
codes := make([]string, len(mappings))
i := 0
for code := range mappings {
codes[i] = code
i++
}
sort.Strings(codes)
// Compile entries
var entries = make([]Entry, len(mappings))
i = 0
for _, code := range codes {
entries[i] = Entry{Code: code, Url: mappings[code]}
i++
}
return entries, nil
}
// Add a mapping for url and code to the database.
// If code is missing, a random code will be generated and returned.
func (db *DB) Add(url, code string) (string, error) {
mappings, err := db.readDB()
if err != nil {
return "", err
}
if code == "" {
code = randomCode(mappings)
} else {
// Check for parameter inversion
reUrl := regexp.MustCompile(`^https?://`)
if !reUrl.MatchString(url) && reUrl.MatchString(code) {
url, code = code, url
}
// Check whether code is already used
dburl, exists := mappings[code]
if exists {
if dburl == url {
// Trying to re-add the same url is not an error, just a noop
return code, nil
}
return code, ErrCodeExists
}
}
mappings[code] = url
err = db.writeDB(mappings)
if err != nil {
return code, err
}
return code, nil
}
// Update an existing mapping in the database, changing the URL.
func (db *DB) Update(url, code string) error {
mappings, err := db.readDB()
if err != nil {
return err
}
// Check for parameter inversion
reUrl := regexp.MustCompile(`^https?://`)
if !reUrl.MatchString(url) && reUrl.MatchString(code) {
url, code = code, url
}
// If code is missing, abort
dburl, exists := mappings[code]
if !exists {
return ErrNotFound
}
// Trying to update to the same url is not an error, just a noop
if dburl == url {
return nil
}
mappings[code] = url
err = db.writeDB(mappings)
if err != nil {
return err
}
return nil
}
// Remove the mapping with code from the database
// Returns ErrNotFound if code does not exist in the database
func (db *DB) Remove(code string) error {
mappings, err := db.readDB()
if err != nil {
return err
}
_, exists := mappings[code]
if !exists {
return ErrNotFound
}
delete(mappings, code)
err = db.writeDB(mappings)
if err != nil {
return err
}
return nil
}
// Push syncs all current mappings with the backend configured for db.Domain
// in db.ConfigPath
func (db *DB) Push() error {
config, err := db.readConfig()
if err != nil {
return err
}
if config.Type == "" {
return fmt.Errorf("no 'type' field found for %q in config %q\n",
db.Domain, db.ConfigPath)
}
switch config.Type {
case "s3":
err = db.pushS3(config)
if err != nil {
return err
}
case "render":
err = db.pushRender()
if err != nil {
return err
}
case "unconfigured":
return ErrPushTypeUnconfigured
default:
return fmt.Errorf("invalid config backend type %q found for %q: %w",
config.Type, db.Domain, ErrPushTypeBad)
}
return nil
}
// readDB is a utility function to read all mappings from db.DBPath
// and return as a go map
func (db *DB) readDB() (map[string]string, error) {
data, err := ioutil.ReadFile(db.DBPath)
if err != nil {
return nil, err
}
var mappings map[string]string
err = yaml.Unmarshal(data, &mappings)
if err != nil {
return nil, err
}
if len(mappings) == 0 {
mappings = make(map[string]string)
}
return mappings, nil
}
// writeDB is a utility function to write mappings (as yaml) to db.DBPath
func (db *DB) writeDB(mappings map[string]string) error {
var data []byte
var err error
if len(mappings) > 0 {
data, err = yaml.Marshal(mappings)
if err != nil {
return err
}
}
tmpfile := db.DBPath + ".tmp"
err = ioutil.WriteFile(tmpfile, data, 0644)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.DBPath)
if err != nil {
return err
}
return nil
}
// readConfig is a utility function to read the config entry for
// db.Domain from db.ConfigPath file
func (db *DB) readConfig() (*ConfigEntry, error) {
data, err := ioutil.ReadFile(db.ConfigPath)
if err != nil {
return nil, err
}
var entries map[string]ConfigEntry
err = yaml.Unmarshal(data, &entries)
if err != nil {
return nil, err
}
entry, exists := entries[db.Domain]
if !exists {
return nil, ErrNotFound
}
return &entry, nil
}
// writeConfigString is a utility function to write data to db.ConfigPath
func (db *DB) writeConfigString(data string) error {
tmpfile := db.ConfigPath + ".tmp"
err := ioutil.WriteFile(tmpfile, []byte(data), 0600)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.ConfigPath)
if err != nil {
return err
}
return nil
}
// appendConfigString is a utility function to write data to db.ConfigPath
func (db *DB) appendConfigString(data string) error |
// randomCode is a utility function to generate a random code
// and check that it doesn't exist in mappings.
// Random codes use the following pattern: 1 digit, then 4-7
// lowercase ascii characters. This usually allows them to be
// relatively easily distinguished from explicit codes, while
// still being easy to communicate orally.
func randomCode(mappings map[string]string) string {
rand.Seed(time.Now().UnixNano())
var b strings.Builder
b.WriteByte(digits[rand.Intn(len(digits))])
for i := 1; i < maxRandomCodeLen; i++ {
b.WriteByte(chars[rand.Intn(len(chars))])
// If long enough, check if exists in mappings, and return if not
if i+1 >= minRandomCodeLen {
s := b.String()
if _, exists := mappings[s]; !exists {
return s
}
}
}
// Failed to find an unused code? Just retry?
return randomCode(mappings)
}
func (db *DB) configPlaceholder() string {
return db.Domain + `:
type: unconfigured
# Replace the line above with one of the 'type' sections below for the backend
# you wish to use.
# 'render' uses render.com as a backend, and needs no additional config here.
# See https://github.com/gavincarr/usher/blob/master/Render.md for render configuration details.
# type: render
# 's3' uses Amazon S3 as a backed, and requires the 3 'aws_*' parameters below.
# See https://github.com/gavincarr/usher/blob/master/S3.md for full S3 configuration details.
# type: s3
# aws_key: foo
# aws_secret: bar
# aws_region: us-east-1
`
}
| {
config, err := ioutil.ReadFile(db.ConfigPath)
if err != nil {
return err
}
config = append(config, []byte(data)...)
tmpfile := db.ConfigPath + ".tmp"
err = ioutil.WriteFile(tmpfile, config, 0600)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.ConfigPath)
if err != nil {
return err
}
return nil
} | identifier_body |
usher.go | /*
usher is a tiny personal url shortener.
This library provides the maintenance functions for our simple
database of code => url mappings (a yaml file in
filepath.join(os.UserConfigDir(), "usher")).
*/
package usher
import (
"errors"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
yaml "gopkg.in/yaml.v3"
)
const configfile = "usher.yml"
const indexCode = "INDEX"
// Random Code generation constants
const minRandomCodeLen = 5
const maxRandomCodeLen = 8
const digits = "23456789" // omit 0 and 1 as easily confused with o and l
const chars = "abcdefghijkmnpqrstuvwxyz" // omit o and l as easily confused with 0 and 1
// Errors
var (
ErrNotFound = errors.New("not found")
ErrCodeExists = errors.New("code already used")
ErrNoChange = errors.New("mapping unchanged")
ErrPushTypeUnconfigured = errors.New("config backend type is unconfigured")
ErrPushTypeBad = errors.New("config backend type is bad")
)
type DB struct {
Root string // full path to usher root directory containing databases
Domain string // fully-qualified domain whose mappings we want
DBPath string // full path to database for Domain
ConfigPath string // full path to usher config file
}
type Entry struct {
Code string
Url string
}
type ConfigEntry struct {
Type string `yaml:"type"`
AWSKey string `yaml:"aws_key,omitempty"`
AWSSecret string `yaml:"aws_secret,omitempty"`
AWSRegion string `yaml:"aws_region,omitempty"`
}
// NewDB creates a DB struct with members derived from parameters,
// the environment, or defaults (in that order). It does no checking
// that the values produced are sane or exist on the filesystem.
func NewDB(domain string) (*DB, error) {
// Get root
root := os.Getenv("USHER_ROOT")
if root == "" {
// If USHER_ROOT is unset, check if there is an usher.yml in the cwd
stat, err := os.Stat("usher.yml")
if err == nil && !stat.IsDir() {
cwd, err := os.Getwd()
if err == nil {
root = cwd
}
}
}
if root == "" {
// If root is still unset, default to "os.UserConfigDir()/usher"
configDir, err := os.UserConfigDir()
if err != nil {
return nil, err
}
root = filepath.Join(configDir, "usher")
}
// Derive domain if not set - check for USHER_DOMAIN in environment
if domain == "" {
domain = os.Getenv("USHER_DOMAIN")
}
// Else infer the domain if only one database exists
if domain == "" {
matches, _ := filepath.Glob(filepath.Join(root, "*.*.yml"))
if len(matches) == 1 {
// Exactly one match - strip .yml suffix to get domain
re := regexp.MustCompile(`.yml$`)
domain = re.ReplaceAllLiteralString(filepath.Base(matches[0]), "")
}
}
// Else give up with an error
if domain == "" {
return nil, errors.New("Domain not passed as parameter or set in env USHER_DOMAIN")
}
// Set DBPath
dbpath := filepath.Join(root, domain+".yml")
// Set ConfigPath
configpath := filepath.Join(root, configfile)
return &DB{Root: root, Domain: domain, DBPath: dbpath, ConfigPath: configpath}, nil
}
// Init checks and creates the following, if they don't exist:
// - an usher root directory
// - an usher database for the db.Domain
// - an entry in the user config file for db.Domain
func (db *DB) Init() (dbCreated bool, err error) {
dbCreated = false
// Ensure root exists
err = os.MkdirAll(db.Root, 0755)
if err != nil {
return dbCreated, err
}
// Ensure database exists
_, err = os.Stat(db.DBPath)
if err == nil {
return dbCreated, nil // exists
}
if err != nil && !os.IsNotExist(err) {
return dbCreated, err // unexpected error
}
// Database does not exist - create
fh, err := os.Create(db.DBPath)
fh.Close()
if err != nil {
return dbCreated, err
}
dbCreated = true
// Ensure configfile exists
_, err = os.Stat(db.ConfigPath)
if err == nil {
_, err := db.readConfig()
if err != nil {
if err != ErrNotFound {
return dbCreated, err
}
}
err = db.appendConfigString(db.configPlaceholder())
if err != nil {
return dbCreated, err
}
} else {
// Create a placeholder config file for domain
err = db.writeConfigString(db.configPlaceholder())
if err != nil {
return dbCreated, err
}
}
return dbCreated, nil
}
// List returns the set of database entries whose code matches glob
func (db *DB) List(glob string) ([]Entry, error) {
// FIXME: first-pass - ignore glob
mappings, err := db.readDB()
if err != nil {
return nil, err
}
// Extract codes and sort
codes := make([]string, len(mappings))
i := 0
for code := range mappings { | // Compile entries
var entries = make([]Entry, len(mappings))
i = 0
for _, code := range codes {
entries[i] = Entry{Code: code, Url: mappings[code]}
i++
}
return entries, nil
}
// Add a mapping for url and code to the database.
// If code is missing, a random code will be generated and returned.
func (db *DB) Add(url, code string) (string, error) {
mappings, err := db.readDB()
if err != nil {
return "", err
}
if code == "" {
code = randomCode(mappings)
} else {
// Check for parameter inversion
reUrl := regexp.MustCompile(`^https?://`)
if !reUrl.MatchString(url) && reUrl.MatchString(code) {
url, code = code, url
}
// Check whether code is already used
dburl, exists := mappings[code]
if exists {
if dburl == url {
// Trying to re-add the same url is not an error, just a noop
return code, nil
}
return code, ErrCodeExists
}
}
mappings[code] = url
err = db.writeDB(mappings)
if err != nil {
return code, err
}
return code, nil
}
// Update an existing mapping in the database, changing the URL.
func (db *DB) Update(url, code string) error {
mappings, err := db.readDB()
if err != nil {
return err
}
// Check for parameter inversion
reUrl := regexp.MustCompile(`^https?://`)
if !reUrl.MatchString(url) && reUrl.MatchString(code) {
url, code = code, url
}
// If code is missing, abort
dburl, exists := mappings[code]
if !exists {
return ErrNotFound
}
// Trying to update to the same url is not an error, just a noop
if dburl == url {
return nil
}
mappings[code] = url
err = db.writeDB(mappings)
if err != nil {
return err
}
return nil
}
// Remove the mapping with code from the database
// Returns ErrNotFound if code does not exist in the database
func (db *DB) Remove(code string) error {
mappings, err := db.readDB()
if err != nil {
return err
}
_, exists := mappings[code]
if !exists {
return ErrNotFound
}
delete(mappings, code)
err = db.writeDB(mappings)
if err != nil {
return err
}
return nil
}
// Push syncs all current mappings with the backend configured for db.Domain
// in db.ConfigPath
func (db *DB) Push() error {
config, err := db.readConfig()
if err != nil {
return err
}
if config.Type == "" {
return fmt.Errorf("no 'type' field found for %q in config %q\n",
db.Domain, db.ConfigPath)
}
switch config.Type {
case "s3":
err = db.pushS3(config)
if err != nil {
return err
}
case "render":
err = db.pushRender()
if err != nil {
return err
}
case "unconfigured":
return ErrPushTypeUnconfigured
default:
return fmt.Errorf("invalid config backend type %q found for %q: %w",
config.Type, db.Domain, ErrPushTypeBad)
}
return nil
}
// readDB is a utility function to read all mappings from db.DBPath
// and return as a go map
func (db *DB) readDB() (map[string]string, error) {
data, err := ioutil.ReadFile(db.DBPath)
if err != nil {
return nil, err
}
var mappings map[string]string
err = yaml.Unmarshal(data, &mappings)
if err != nil {
return nil, err
}
if len(mappings) == 0 {
mappings = make(map[string]string)
}
return mappings, nil
}
// writeDB is a utility function to write mappings (as yaml) to db.DBPath
func (db *DB) writeDB(mappings map[string]string) error {
var data []byte
var err error
if len(mappings) > 0 {
data, err = yaml.Marshal(mappings)
if err != nil {
return err
}
}
tmpfile := db.DBPath + ".tmp"
err = ioutil.WriteFile(tmpfile, data, 0644)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.DBPath)
if err != nil {
return err
}
return nil
}
// readConfig is a utility function to read the config entry for
// db.Domain from db.ConfigPath file
func (db *DB) readConfig() (*ConfigEntry, error) {
data, err := ioutil.ReadFile(db.ConfigPath)
if err != nil {
return nil, err
}
var entries map[string]ConfigEntry
err = yaml.Unmarshal(data, &entries)
if err != nil {
return nil, err
}
entry, exists := entries[db.Domain]
if !exists {
return nil, ErrNotFound
}
return &entry, nil
}
// writeConfigString is a utility function to write data to db.ConfigPath
func (db *DB) writeConfigString(data string) error {
tmpfile := db.ConfigPath + ".tmp"
err := ioutil.WriteFile(tmpfile, []byte(data), 0600)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.ConfigPath)
if err != nil {
return err
}
return nil
}
// appendConfigString is a utility function to write data to db.ConfigPath
func (db *DB) appendConfigString(data string) error {
config, err := ioutil.ReadFile(db.ConfigPath)
if err != nil {
return err
}
config = append(config, []byte(data)...)
tmpfile := db.ConfigPath + ".tmp"
err = ioutil.WriteFile(tmpfile, config, 0600)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.ConfigPath)
if err != nil {
return err
}
return nil
}
// randomCode is a utility function to generate a random code
// and check that it doesn't exist in mappings.
// Random codes use the following pattern: 1 digit, then 4-7
// lowercase ascii characters. This usually allows them to be
// relatively easily distinguished from explicit codes, while
// still being easy to communicate orally.
func randomCode(mappings map[string]string) string {
rand.Seed(time.Now().UnixNano())
var b strings.Builder
b.WriteByte(digits[rand.Intn(len(digits))])
for i := 1; i < maxRandomCodeLen; i++ {
b.WriteByte(chars[rand.Intn(len(chars))])
// If long enough, check if exists in mappings, and return if not
if i+1 >= minRandomCodeLen {
s := b.String()
if _, exists := mappings[s]; !exists {
return s
}
}
}
// Failed to find an unused code? Just retry?
return randomCode(mappings)
}
func (db *DB) configPlaceholder() string {
return db.Domain + `:
type: unconfigured
# Replace the line above with one of the 'type' sections below for the backend
# you wish to use.
# 'render' uses render.com as a backend, and needs no additional config here.
# See https://github.com/gavincarr/usher/blob/master/Render.md for render configuration details.
# type: render
# 's3' uses Amazon S3 as a backed, and requires the 3 'aws_*' parameters below.
# See https://github.com/gavincarr/usher/blob/master/S3.md for full S3 configuration details.
# type: s3
# aws_key: foo
# aws_secret: bar
# aws_region: us-east-1
`
} | codes[i] = code
i++
}
sort.Strings(codes)
| random_line_split |
usher.go | /*
usher is a tiny personal url shortener.
This library provides the maintenance functions for our simple
database of code => url mappings (a yaml file in
filepath.join(os.UserConfigDir(), "usher")).
*/
package usher
import (
"errors"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
yaml "gopkg.in/yaml.v3"
)
const configfile = "usher.yml"
const indexCode = "INDEX"
// Random Code generation constants
const minRandomCodeLen = 5
const maxRandomCodeLen = 8
const digits = "23456789" // omit 0 and 1 as easily confused with o and l
const chars = "abcdefghijkmnpqrstuvwxyz" // omit o and l as easily confused with 0 and 1
// Errors
var (
ErrNotFound = errors.New("not found")
ErrCodeExists = errors.New("code already used")
ErrNoChange = errors.New("mapping unchanged")
ErrPushTypeUnconfigured = errors.New("config backend type is unconfigured")
ErrPushTypeBad = errors.New("config backend type is bad")
)
type DB struct {
Root string // full path to usher root directory containing databases
Domain string // fully-qualified domain whose mappings we want
DBPath string // full path to database for Domain
ConfigPath string // full path to usher config file
}
type Entry struct {
Code string
Url string
}
type ConfigEntry struct {
Type string `yaml:"type"`
AWSKey string `yaml:"aws_key,omitempty"`
AWSSecret string `yaml:"aws_secret,omitempty"`
AWSRegion string `yaml:"aws_region,omitempty"`
}
// NewDB creates a DB struct with members derived from parameters,
// the environment, or defaults (in that order). It does no checking
// that the values produced are sane or exist on the filesystem.
func NewDB(domain string) (*DB, error) {
// Get root
root := os.Getenv("USHER_ROOT")
if root == "" {
// If USHER_ROOT is unset, check if there is an usher.yml in the cwd
stat, err := os.Stat("usher.yml")
if err == nil && !stat.IsDir() {
cwd, err := os.Getwd()
if err == nil {
root = cwd
}
}
}
if root == "" {
// If root is still unset, default to "os.UserConfigDir()/usher"
configDir, err := os.UserConfigDir()
if err != nil {
return nil, err
}
root = filepath.Join(configDir, "usher")
}
// Derive domain if not set - check for USHER_DOMAIN in environment
if domain == "" {
domain = os.Getenv("USHER_DOMAIN")
}
// Else infer the domain if only one database exists
if domain == "" {
matches, _ := filepath.Glob(filepath.Join(root, "*.*.yml"))
if len(matches) == 1 {
// Exactly one match - strip .yml suffix to get domain
re := regexp.MustCompile(`.yml$`)
domain = re.ReplaceAllLiteralString(filepath.Base(matches[0]), "")
}
}
// Else give up with an error
if domain == "" {
return nil, errors.New("Domain not passed as parameter or set in env USHER_DOMAIN")
}
// Set DBPath
dbpath := filepath.Join(root, domain+".yml")
// Set ConfigPath
configpath := filepath.Join(root, configfile)
return &DB{Root: root, Domain: domain, DBPath: dbpath, ConfigPath: configpath}, nil
}
// Init checks and creates the following, if they don't exist:
// - an usher root directory
// - an usher database for the db.Domain
// - an entry in the user config file for db.Domain
func (db *DB) Init() (dbCreated bool, err error) {
dbCreated = false
// Ensure root exists
err = os.MkdirAll(db.Root, 0755)
if err != nil {
return dbCreated, err
}
// Ensure database exists
_, err = os.Stat(db.DBPath)
if err == nil {
return dbCreated, nil // exists
}
if err != nil && !os.IsNotExist(err) {
return dbCreated, err // unexpected error
}
// Database does not exist - create
fh, err := os.Create(db.DBPath)
fh.Close()
if err != nil {
return dbCreated, err
}
dbCreated = true
// Ensure configfile exists
_, err = os.Stat(db.ConfigPath)
if err == nil {
_, err := db.readConfig()
if err != nil {
if err != ErrNotFound {
return dbCreated, err
}
}
err = db.appendConfigString(db.configPlaceholder())
if err != nil {
return dbCreated, err
}
} else {
// Create a placeholder config file for domain
err = db.writeConfigString(db.configPlaceholder())
if err != nil {
return dbCreated, err
}
}
return dbCreated, nil
}
// List returns the set of database entries whose code matches glob
func (db *DB) List(glob string) ([]Entry, error) {
// FIXME: first-pass - ignore glob
mappings, err := db.readDB()
if err != nil {
return nil, err
}
// Extract codes and sort
codes := make([]string, len(mappings))
i := 0
for code := range mappings {
codes[i] = code
i++
}
sort.Strings(codes)
// Compile entries
var entries = make([]Entry, len(mappings))
i = 0
for _, code := range codes {
entries[i] = Entry{Code: code, Url: mappings[code]}
i++
}
return entries, nil
}
// Add a mapping for url and code to the database.
// If code is missing, a random code will be generated and returned.
func (db *DB) Add(url, code string) (string, error) {
mappings, err := db.readDB()
if err != nil {
return "", err
}
if code == "" {
code = randomCode(mappings)
} else {
// Check for parameter inversion
reUrl := regexp.MustCompile(`^https?://`)
if !reUrl.MatchString(url) && reUrl.MatchString(code) {
url, code = code, url
}
// Check whether code is already used
dburl, exists := mappings[code]
if exists {
if dburl == url {
// Trying to re-add the same url is not an error, just a noop
return code, nil
}
return code, ErrCodeExists
}
}
mappings[code] = url
err = db.writeDB(mappings)
if err != nil {
return code, err
}
return code, nil
}
// Update an existing mapping in the database, changing the URL.
func (db *DB) Update(url, code string) error {
mappings, err := db.readDB()
if err != nil {
return err
}
// Check for parameter inversion
reUrl := regexp.MustCompile(`^https?://`)
if !reUrl.MatchString(url) && reUrl.MatchString(code) {
url, code = code, url
}
// If code is missing, abort
dburl, exists := mappings[code]
if !exists {
return ErrNotFound
}
// Trying to update to the same url is not an error, just a noop
if dburl == url {
return nil
}
mappings[code] = url
err = db.writeDB(mappings)
if err != nil {
return err
}
return nil
}
// Remove the mapping with code from the database
// Returns ErrNotFound if code does not exist in the database
func (db *DB) Remove(code string) error {
mappings, err := db.readDB()
if err != nil {
return err
}
_, exists := mappings[code]
if !exists {
return ErrNotFound
}
delete(mappings, code)
err = db.writeDB(mappings)
if err != nil {
return err
}
return nil
}
// Push syncs all current mappings with the backend configured for db.Domain
// in db.ConfigPath
func (db *DB) Push() error {
config, err := db.readConfig()
if err != nil {
return err
}
if config.Type == "" {
return fmt.Errorf("no 'type' field found for %q in config %q\n",
db.Domain, db.ConfigPath)
}
switch config.Type {
case "s3":
err = db.pushS3(config)
if err != nil {
return err
}
case "render":
err = db.pushRender()
if err != nil {
return err
}
case "unconfigured":
return ErrPushTypeUnconfigured
default:
return fmt.Errorf("invalid config backend type %q found for %q: %w",
config.Type, db.Domain, ErrPushTypeBad)
}
return nil
}
// readDB is a utility function to read all mappings from db.DBPath
// and return as a go map
func (db *DB) readDB() (map[string]string, error) {
data, err := ioutil.ReadFile(db.DBPath)
if err != nil {
return nil, err
}
var mappings map[string]string
err = yaml.Unmarshal(data, &mappings)
if err != nil {
return nil, err
}
if len(mappings) == 0 {
mappings = make(map[string]string)
}
return mappings, nil
}
// writeDB is a utility function to write mappings (as yaml) to db.DBPath
func (db *DB) writeDB(mappings map[string]string) error {
var data []byte
var err error
if len(mappings) > 0 {
data, err = yaml.Marshal(mappings)
if err != nil {
return err
}
}
tmpfile := db.DBPath + ".tmp"
err = ioutil.WriteFile(tmpfile, data, 0644)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.DBPath)
if err != nil {
return err
}
return nil
}
// readConfig is a utility function to read the config entry for
// db.Domain from db.ConfigPath file
func (db *DB) readConfig() (*ConfigEntry, error) {
data, err := ioutil.ReadFile(db.ConfigPath)
if err != nil {
return nil, err
}
var entries map[string]ConfigEntry
err = yaml.Unmarshal(data, &entries)
if err != nil {
return nil, err
}
entry, exists := entries[db.Domain]
if !exists {
return nil, ErrNotFound
}
return &entry, nil
}
// writeConfigString is a utility function to write data to db.ConfigPath
func (db *DB) writeConfigString(data string) error {
tmpfile := db.ConfigPath + ".tmp"
err := ioutil.WriteFile(tmpfile, []byte(data), 0600)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.ConfigPath)
if err != nil {
return err
}
return nil
}
// appendConfigString is a utility function to write data to db.ConfigPath
func (db *DB) appendConfigString(data string) error {
config, err := ioutil.ReadFile(db.ConfigPath)
if err != nil {
return err
}
config = append(config, []byte(data)...)
tmpfile := db.ConfigPath + ".tmp"
err = ioutil.WriteFile(tmpfile, config, 0600)
if err != nil {
return err
}
err = os.Rename(tmpfile, db.ConfigPath)
if err != nil {
return err
}
return nil
}
// randomCode is a utility function to generate a random code
// and check that it doesn't exist in mappings.
// Random codes use the following pattern: 1 digit, then 4-7
// lowercase ascii characters. This usually allows them to be
// relatively easily distinguished from explicit codes, while
// still being easy to communicate orally.
func | (mappings map[string]string) string {
rand.Seed(time.Now().UnixNano())
var b strings.Builder
b.WriteByte(digits[rand.Intn(len(digits))])
for i := 1; i < maxRandomCodeLen; i++ {
b.WriteByte(chars[rand.Intn(len(chars))])
// If long enough, check if exists in mappings, and return if not
if i+1 >= minRandomCodeLen {
s := b.String()
if _, exists := mappings[s]; !exists {
return s
}
}
}
// Failed to find an unused code? Just retry?
return randomCode(mappings)
}
func (db *DB) configPlaceholder() string {
return db.Domain + `:
type: unconfigured
# Replace the line above with one of the 'type' sections below for the backend
# you wish to use.
# 'render' uses render.com as a backend, and needs no additional config here.
# See https://github.com/gavincarr/usher/blob/master/Render.md for render configuration details.
# type: render
# 's3' uses Amazon S3 as a backed, and requires the 3 'aws_*' parameters below.
# See https://github.com/gavincarr/usher/blob/master/S3.md for full S3 configuration details.
# type: s3
# aws_key: foo
# aws_secret: bar
# aws_region: us-east-1
`
}
| randomCode | identifier_name |
forall.rs | use itertools::Itertools;
use std::collections::HashMap;
use std::collections::HashSet;
use crate::Line;
use crate::Constraint;
use crate::Problem;
use indicatif::ProgressStyle;
use indicatif::ProgressBar;
use chrono::prelude::*;
struct Comb{
max : Vec<usize>,
state : Vec<usize>,
first : bool
}
impl Comb {
fn new(n : usize, max : Vec<usize>) -> Self {
let mut state = vec![0;max.len()];
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(max[i],res);
state[i] = cur;
res -= cur;
i += 1;
}
Comb {
max, state, first:true
}
}
fn transform(&mut self, n : usize, max : impl Iterator<Item=usize>) {
let mut i = 0;
for x in max {
self.max[i] = x;
i += 1;
}
assert!(i == self.max.len());
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(self.max[i],res);
self.state[i] = cur;
res -= cur;
i += 1;
}
for j in i..self.state.len() {
self.state[j] = 0;
}
self.first = true;
}
fn next(&mut self) -> Option<&Vec<usize>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
let v = &mut self.state;
let m = &mut self.max;
let mut i = 0;
loop {
if i == v.len()-1 { | return None;
}
if v[i] > 0 {
v[i+1] += 1;
v[i] -= 1;
if v[i+1] <= m[i+1] {
break;
}
}
i += 1;
}
let mut res = v[0..=i].iter().sum();
let mut j = 0;
while res > 0 {
let cur = std::cmp::min(m[j],res);
v[j] = cur;
res -= cur;
j += 1;
}
for k in j..=i {
v[k] = 0;
}
return Some(&self.state);
}
}
}
struct Matches {
state : Vec<Comb>,
first : bool,
v1 : Vec<usize>
}
impl Matches {
fn new(v1 : Vec<usize>, mut v2 : Vec<usize>) -> Self {
let mut s = vec![];
for &x in &v1 {
let mut c = Comb::new(x,v2.clone());
c.next();
for i in 0..v2.len() {
v2[i] -= c.state[i];
}
s.push(c);
}
Self {
v1, state : s, first : true
}
}
fn next(&mut self) -> Option<&Vec<Comb>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
for i in (0..self.state.len()).rev() {
if self.state[i].next() != None {
for j in i+1..self.state.len() {
let split = self.state.split_at_mut(j);
let p = &split.0[j-1];
let p2 = &mut split.1[0];
let pmax = &p.max;
let ps = &p.state;
let n = self.v1[j];
p2.transform(n,pmax.iter().zip(ps.iter()).map(|(m,x)|m-x));
//let v : Vec<_> = pmax.iter().zip(ps.iter()).map(|(m,x)|m-x).collect();
//self.state[j] = Comb::new(n,v);
p2.next();
}
return Some(&self.state);
}
}
None
}
}
}
fn count_map<BigNum>(v : &[BigNum]) -> HashMap<BigNum,usize> where BigNum : crate::bignum::BigNum{
let mut h = HashMap::new();
for n in v {
*h.entry(n.clone()).or_default() += 1;
}
h
}
#[inline(never)]
fn intersections<BigNum>(uni : BigNum, c1 : &[(BigNum,usize)], c2 : &[(BigNum,usize)], delta : usize , bits : usize) -> Vec<Line<BigNum>> where BigNum : crate::bignum::BigNum {
let v1 : Vec<_> = c1.iter().map(|(_,c)|*c).collect();
let v2 : Vec<_> = c2.iter().map(|(_,c)|*c).collect();
let mut m = Matches::new(v1,v2);
let mut r = vec![];
let mut oldbad : Option<(usize,usize,usize,usize)> = None;
'outer: while let Some(x) = m.next() {
if let Some((i1,i2,j1,j2)) = oldbad {
if x[i1].state[j1] != 0 && x[i2].state[j2] != 0 {
continue 'outer;
}
}
for i1 in 0..c1.len() {
for j1 in 0..c2.len() {
if x[i1].state[j1] != 0 {
for i2 in i1+1..c1.len() {
for j2 in 0..c2.len() {
if x[i2].state[j2] != 0 {
let u1 = c1[i1].0.clone() & c2[j1].0.clone();
let u2 = c1[i2].0.clone() & c2[j2].0.clone();
let u3 = c1[i1].0.clone() & c2[j2].0.clone();
let u4 = c1[i2].0.clone() & c2[j1].0.clone();
if (u4.is_superset(&u1) && u3.is_superset(&u2) && (u1 != u4 || u2 != u3)) || (u3.is_superset(&u1) && u4.is_superset(&u2) && (u1 != u3 || u2 != u4)) {
oldbad = Some((i1,i2,j1,j2));
continue 'outer;
}
}
}
}
}
}
}
let mut groups = Vec::with_capacity(delta);
groups.push(uni.clone());
for (i,(ga,_)) in c1.iter().enumerate() {
for (j,(gb,_)) in c2.iter().enumerate() {
for _ in 0..x[i].state[j] {
groups.push(ga.clone() & gb.clone());
}
}
}
if !groups.contains(&BigNum::zero()) {
r.push(Line::from_groups(delta, bits, groups.into_iter()).sorted());
}
}
r
}
#[inline(never)]
fn perm_includes<BigNum>(line : &Line<BigNum>, other : &Line<BigNum>) -> bool where BigNum : crate::bignum::BigNum {
let g1 : Vec<_> = line.groups().collect();
let g2 : Vec<_> = other.groups().collect();
let d = g1.len();
let mut g = contest_algorithms::graph::flow::FlowGraph::new(2*d+2,d*d);
for i in 1..=d {
g.add_edge(0, i, 1, 0, 0);
}
for i in d+1..=2*d {
g.add_edge(i, 2*d+1, 1, 0, 0);
}
for i in 0..d {
for j in 0..d {
if g1[i].is_superset(&g2[j]) {
g.add_edge(1+i, 1+d+j, 1, 0, 0);
}
}
}
g.dinic(0, 2*d+1).0 == d as i64
}
#[inline(never)]
fn add_reduce_maximal<BigNum>(lines : &mut Vec<Line<BigNum>>, newline : Line<BigNum>) where BigNum : crate::bignum::BigNum {
let l1 = lines.len();
lines.retain(|oldline| !perm_includes(&newline, oldline));
let l2 = lines.len();
if l1 != l2 || lines.iter().all(|oldline|!perm_includes(oldline,&newline)) {
lines.push(newline);
}
}
#[inline(never)]
fn find_good_unions<BigNum>(u1 : &[BigNum], u2 : &[BigNum]) -> HashMap<BigNum,Vec<(BigNum,BigNum)>> where BigNum : crate::bignum::BigNum {
let mut unions = HashMap::new();
for x in u1.iter() {
for y in u2.iter() {
if x.is_superset(y) || y.is_superset(x) {
continue;
}
let uni = x.clone() | y.clone();
let unis : &mut Vec<(BigNum,BigNum)> = unions.entry(uni).or_insert(vec![]);
let len = unis.len();
unis.retain(|(xc,yc)| !(xc.is_superset(x) && yc.is_superset(y)) );
if unis.len() != len || unis.iter().all(|(xc,yc)| !(x.is_superset(xc) && y.is_superset(yc)) ) {
unis.push((x.clone(),y.clone()));
}
}
}
unions
}
pub fn forall<BigNum>(nc : &Constraint<BigNum>, problem : &Problem<BigNum>) -> Constraint<BigNum> where BigNum : crate::bignum::BigNum {
let mut nc = nc.clone();
let maplt = problem.map_label_text();
let set_to_string = |s:&BigNum|{
let r = s.one_bits().map(|elem|&maplt[&elem]).join("");
if r == "" {
String::from("_")
}else{
r
}
};
let make_right_closed = |g : BigNum|{g.clone()|problem.successors(g.one_bits().next().unwrap(),false)};
for line in &mut nc.lines {
*line = line.edited(|g|{make_right_closed(g)}).sorted();
}
let mut seen : HashSet<_> = nc.lines.iter().cloned().collect();
let lines = std::mem::replace(&mut nc.lines, vec![]);
for line in lines {
seen.insert(line.clone());
add_reduce_maximal(&mut nc.lines, line);
}
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
let mut pairs = HashSet::new();
loop {
let mut newc = nc.clone();
let size = nc.lines.len();
let lines = &nc.lines;
let mut without_one = vec![];
for line in &nc.lines {
let mut h = HashMap::new();
let g : Vec<_> = line.groups().collect();
for i in 0..g.len() {
if !h.contains_key(&g[i]){
let v : Vec<_> = [&g[0..i],&g[i+1..g.len()]].concat();
let v : Vec<_> = count_map(&v).into_iter().sorted().collect();
h.insert(g[i].clone(),v);
}
}
without_one.push(h);
}
let mut line_groups = vec![];
for line in lines {
line_groups.push(line.groups().unique().collect::<Vec<_>>());
}
#[cfg(not(target_arch = "wasm32"))]
let pb = ProgressBar::new((size*size) as u64);
#[cfg(not(target_arch = "wasm32"))]
pb.set_style(ProgressStyle::default_bar()
.template("\n[elapsed: {elapsed_precise}] [{wide_bar:.green/red}] [eta: {eta_precise}]\n{msg}")
/*.progress_chars("#>-")*/);
for i in 0..lines.len() {
#[cfg(not(target_arch = "wasm32"))]
{
pb.set_position((i*i) as u64);
let est = pb.eta().as_secs();
let dest = chrono::Duration::seconds(est as i64);
let whenfinish = (Local::now() + dest).to_rfc2822();
pb.set_message(format!("[i: {}/{}] [new lines: {}] [eta: {}]",i,size,newc.lines.len(),whenfinish));
}
let mut candidates2 = vec![];
for j in 0..=i {
let mut candidates = vec![];
let pair = (lines[i].clone(),lines[j].clone());
if pairs.contains(&pair) || pairs.contains(&(pair.1.clone(),pair.0.clone())) {
continue;
}
pairs.insert(pair);
let u1 = &line_groups[i];
let u2 = &line_groups[j];
let unions = find_good_unions(u1,u2);
for (uni,v) in unions {
for (x,y) in v {
let c1 = &without_one[i][&x];
let c2 = &without_one[j][&y];
let lines = intersections(uni.clone(),c1,c2,nc.delta, nc.bits);
for newline in lines {
if !seen.contains(&newline){
seen.insert(newline.clone());
add_reduce_maximal(&mut candidates, newline);
}
}
}
}
for newline in candidates {
add_reduce_maximal(&mut candidates2, newline);
}
}
for newline in candidates2 {
add_reduce_maximal(&mut newc.lines, newline);
}
}
#[cfg(not(target_arch = "wasm32"))]
pb.finish_and_clear();
if newc == nc { break; }
println!("new iteration...");
nc = newc;
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
}
nc
} | random_line_split | |
forall.rs |
use itertools::Itertools;
use std::collections::HashMap;
use std::collections::HashSet;
use crate::Line;
use crate::Constraint;
use crate::Problem;
use indicatif::ProgressStyle;
use indicatif::ProgressBar;
use chrono::prelude::*;
struct Comb{
max : Vec<usize>,
state : Vec<usize>,
first : bool
}
impl Comb {
fn new(n : usize, max : Vec<usize>) -> Self {
let mut state = vec![0;max.len()];
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(max[i],res);
state[i] = cur;
res -= cur;
i += 1;
}
Comb {
max, state, first:true
}
}
fn transform(&mut self, n : usize, max : impl Iterator<Item=usize>) {
let mut i = 0;
for x in max {
self.max[i] = x;
i += 1;
}
assert!(i == self.max.len());
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(self.max[i],res);
self.state[i] = cur;
res -= cur;
i += 1;
}
for j in i..self.state.len() {
self.state[j] = 0;
}
self.first = true;
}
fn next(&mut self) -> Option<&Vec<usize>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
let v = &mut self.state;
let m = &mut self.max;
let mut i = 0;
loop {
if i == v.len()-1 {
return None;
}
if v[i] > 0 {
v[i+1] += 1;
v[i] -= 1;
if v[i+1] <= m[i+1] {
break;
}
}
i += 1;
}
let mut res = v[0..=i].iter().sum();
let mut j = 0;
while res > 0 {
let cur = std::cmp::min(m[j],res);
v[j] = cur;
res -= cur;
j += 1;
}
for k in j..=i {
v[k] = 0;
}
return Some(&self.state);
}
}
}
struct Matches {
state : Vec<Comb>,
first : bool,
v1 : Vec<usize>
}
impl Matches {
fn new(v1 : Vec<usize>, mut v2 : Vec<usize>) -> Self {
let mut s = vec![];
for &x in &v1 {
let mut c = Comb::new(x,v2.clone());
c.next();
for i in 0..v2.len() {
v2[i] -= c.state[i];
}
s.push(c);
}
Self {
v1, state : s, first : true
}
}
fn next(&mut self) -> Option<&Vec<Comb>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
for i in (0..self.state.len()).rev() {
if self.state[i].next() != None {
for j in i+1..self.state.len() {
let split = self.state.split_at_mut(j);
let p = &split.0[j-1];
let p2 = &mut split.1[0];
let pmax = &p.max;
let ps = &p.state;
let n = self.v1[j];
p2.transform(n,pmax.iter().zip(ps.iter()).map(|(m,x)|m-x));
//let v : Vec<_> = pmax.iter().zip(ps.iter()).map(|(m,x)|m-x).collect();
//self.state[j] = Comb::new(n,v);
p2.next();
}
return Some(&self.state);
}
}
None
}
}
}
fn count_map<BigNum>(v : &[BigNum]) -> HashMap<BigNum,usize> where BigNum : crate::bignum::BigNum{
let mut h = HashMap::new();
for n in v {
*h.entry(n.clone()).or_default() += 1;
}
h
}
#[inline(never)]
fn intersections<BigNum>(uni : BigNum, c1 : &[(BigNum,usize)], c2 : &[(BigNum,usize)], delta : usize , bits : usize) -> Vec<Line<BigNum>> where BigNum : crate::bignum::BigNum {
let v1 : Vec<_> = c1.iter().map(|(_,c)|*c).collect();
let v2 : Vec<_> = c2.iter().map(|(_,c)|*c).collect();
let mut m = Matches::new(v1,v2);
let mut r = vec![];
let mut oldbad : Option<(usize,usize,usize,usize)> = None;
'outer: while let Some(x) = m.next() {
if let Some((i1,i2,j1,j2)) = oldbad {
if x[i1].state[j1] != 0 && x[i2].state[j2] != 0 {
continue 'outer;
}
}
for i1 in 0..c1.len() {
for j1 in 0..c2.len() {
if x[i1].state[j1] != 0 {
for i2 in i1+1..c1.len() {
for j2 in 0..c2.len() {
if x[i2].state[j2] != 0 |
}
}
}
}
}
let mut groups = Vec::with_capacity(delta);
groups.push(uni.clone());
for (i,(ga,_)) in c1.iter().enumerate() {
for (j,(gb,_)) in c2.iter().enumerate() {
for _ in 0..x[i].state[j] {
groups.push(ga.clone() & gb.clone());
}
}
}
if !groups.contains(&BigNum::zero()) {
r.push(Line::from_groups(delta, bits, groups.into_iter()).sorted());
}
}
r
}
#[inline(never)]
fn perm_includes<BigNum>(line : &Line<BigNum>, other : &Line<BigNum>) -> bool where BigNum : crate::bignum::BigNum {
let g1 : Vec<_> = line.groups().collect();
let g2 : Vec<_> = other.groups().collect();
let d = g1.len();
let mut g = contest_algorithms::graph::flow::FlowGraph::new(2*d+2,d*d);
for i in 1..=d {
g.add_edge(0, i, 1, 0, 0);
}
for i in d+1..=2*d {
g.add_edge(i, 2*d+1, 1, 0, 0);
}
for i in 0..d {
for j in 0..d {
if g1[i].is_superset(&g2[j]) {
g.add_edge(1+i, 1+d+j, 1, 0, 0);
}
}
}
g.dinic(0, 2*d+1).0 == d as i64
}
#[inline(never)]
fn add_reduce_maximal<BigNum>(lines : &mut Vec<Line<BigNum>>, newline : Line<BigNum>) where BigNum : crate::bignum::BigNum {
let l1 = lines.len();
lines.retain(|oldline| !perm_includes(&newline, oldline));
let l2 = lines.len();
if l1 != l2 || lines.iter().all(|oldline|!perm_includes(oldline,&newline)) {
lines.push(newline);
}
}
#[inline(never)]
fn find_good_unions<BigNum>(u1 : &[BigNum], u2 : &[BigNum]) -> HashMap<BigNum,Vec<(BigNum,BigNum)>> where BigNum : crate::bignum::BigNum {
let mut unions = HashMap::new();
for x in u1.iter() {
for y in u2.iter() {
if x.is_superset(y) || y.is_superset(x) {
continue;
}
let uni = x.clone() | y.clone();
let unis : &mut Vec<(BigNum,BigNum)> = unions.entry(uni).or_insert(vec![]);
let len = unis.len();
unis.retain(|(xc,yc)| !(xc.is_superset(x) && yc.is_superset(y)) );
if unis.len() != len || unis.iter().all(|(xc,yc)| !(x.is_superset(xc) && y.is_superset(yc)) ) {
unis.push((x.clone(),y.clone()));
}
}
}
unions
}
pub fn forall<BigNum>(nc : &Constraint<BigNum>, problem : &Problem<BigNum>) -> Constraint<BigNum> where BigNum : crate::bignum::BigNum {
let mut nc = nc.clone();
let maplt = problem.map_label_text();
let set_to_string = |s:&BigNum|{
let r = s.one_bits().map(|elem|&maplt[&elem]).join("");
if r == "" {
String::from("_")
}else{
r
}
};
let make_right_closed = |g : BigNum|{g.clone()|problem.successors(g.one_bits().next().unwrap(),false)};
for line in &mut nc.lines {
*line = line.edited(|g|{make_right_closed(g)}).sorted();
}
let mut seen : HashSet<_> = nc.lines.iter().cloned().collect();
let lines = std::mem::replace(&mut nc.lines, vec![]);
for line in lines {
seen.insert(line.clone());
add_reduce_maximal(&mut nc.lines, line);
}
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
let mut pairs = HashSet::new();
loop {
let mut newc = nc.clone();
let size = nc.lines.len();
let lines = &nc.lines;
let mut without_one = vec![];
for line in &nc.lines {
let mut h = HashMap::new();
let g : Vec<_> = line.groups().collect();
for i in 0..g.len() {
if !h.contains_key(&g[i]){
let v : Vec<_> = [&g[0..i],&g[i+1..g.len()]].concat();
let v : Vec<_> = count_map(&v).into_iter().sorted().collect();
h.insert(g[i].clone(),v);
}
}
without_one.push(h);
}
let mut line_groups = vec![];
for line in lines {
line_groups.push(line.groups().unique().collect::<Vec<_>>());
}
#[cfg(not(target_arch = "wasm32"))]
let pb = ProgressBar::new((size*size) as u64);
#[cfg(not(target_arch = "wasm32"))]
pb.set_style(ProgressStyle::default_bar()
.template("\n[elapsed: {elapsed_precise}] [{wide_bar:.green/red}] [eta: {eta_precise}]\n{msg}")
/*.progress_chars("#>-")*/);
for i in 0..lines.len() {
#[cfg(not(target_arch = "wasm32"))]
{
pb.set_position((i*i) as u64);
let est = pb.eta().as_secs();
let dest = chrono::Duration::seconds(est as i64);
let whenfinish = (Local::now() + dest).to_rfc2822();
pb.set_message(format!("[i: {}/{}] [new lines: {}] [eta: {}]",i,size,newc.lines.len(),whenfinish));
}
let mut candidates2 = vec![];
for j in 0..=i {
let mut candidates = vec![];
let pair = (lines[i].clone(),lines[j].clone());
if pairs.contains(&pair) || pairs.contains(&(pair.1.clone(),pair.0.clone())) {
continue;
}
pairs.insert(pair);
let u1 = &line_groups[i];
let u2 = &line_groups[j];
let unions = find_good_unions(u1,u2);
for (uni,v) in unions {
for (x,y) in v {
let c1 = &without_one[i][&x];
let c2 = &without_one[j][&y];
let lines = intersections(uni.clone(),c1,c2,nc.delta, nc.bits);
for newline in lines {
if !seen.contains(&newline){
seen.insert(newline.clone());
add_reduce_maximal(&mut candidates, newline);
}
}
}
}
for newline in candidates {
add_reduce_maximal(&mut candidates2, newline);
}
}
for newline in candidates2 {
add_reduce_maximal(&mut newc.lines, newline);
}
}
#[cfg(not(target_arch = "wasm32"))]
pb.finish_and_clear();
if newc == nc { break; }
println!("new iteration...");
nc = newc;
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
}
nc
}
| {
let u1 = c1[i1].0.clone() & c2[j1].0.clone();
let u2 = c1[i2].0.clone() & c2[j2].0.clone();
let u3 = c1[i1].0.clone() & c2[j2].0.clone();
let u4 = c1[i2].0.clone() & c2[j1].0.clone();
if (u4.is_superset(&u1) && u3.is_superset(&u2) && (u1 != u4 || u2 != u3)) || (u3.is_superset(&u1) && u4.is_superset(&u2) && (u1 != u3 || u2 != u4)) {
oldbad = Some((i1,i2,j1,j2));
continue 'outer;
}
} | conditional_block |
forall.rs |
use itertools::Itertools;
use std::collections::HashMap;
use std::collections::HashSet;
use crate::Line;
use crate::Constraint;
use crate::Problem;
use indicatif::ProgressStyle;
use indicatif::ProgressBar;
use chrono::prelude::*;
struct Comb{
max : Vec<usize>,
state : Vec<usize>,
first : bool
}
impl Comb {
fn new(n : usize, max : Vec<usize>) -> Self |
fn transform(&mut self, n : usize, max : impl Iterator<Item=usize>) {
let mut i = 0;
for x in max {
self.max[i] = x;
i += 1;
}
assert!(i == self.max.len());
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(self.max[i],res);
self.state[i] = cur;
res -= cur;
i += 1;
}
for j in i..self.state.len() {
self.state[j] = 0;
}
self.first = true;
}
fn next(&mut self) -> Option<&Vec<usize>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
let v = &mut self.state;
let m = &mut self.max;
let mut i = 0;
loop {
if i == v.len()-1 {
return None;
}
if v[i] > 0 {
v[i+1] += 1;
v[i] -= 1;
if v[i+1] <= m[i+1] {
break;
}
}
i += 1;
}
let mut res = v[0..=i].iter().sum();
let mut j = 0;
while res > 0 {
let cur = std::cmp::min(m[j],res);
v[j] = cur;
res -= cur;
j += 1;
}
for k in j..=i {
v[k] = 0;
}
return Some(&self.state);
}
}
}
struct Matches {
state : Vec<Comb>,
first : bool,
v1 : Vec<usize>
}
impl Matches {
fn new(v1 : Vec<usize>, mut v2 : Vec<usize>) -> Self {
let mut s = vec![];
for &x in &v1 {
let mut c = Comb::new(x,v2.clone());
c.next();
for i in 0..v2.len() {
v2[i] -= c.state[i];
}
s.push(c);
}
Self {
v1, state : s, first : true
}
}
fn next(&mut self) -> Option<&Vec<Comb>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
for i in (0..self.state.len()).rev() {
if self.state[i].next() != None {
for j in i+1..self.state.len() {
let split = self.state.split_at_mut(j);
let p = &split.0[j-1];
let p2 = &mut split.1[0];
let pmax = &p.max;
let ps = &p.state;
let n = self.v1[j];
p2.transform(n,pmax.iter().zip(ps.iter()).map(|(m,x)|m-x));
//let v : Vec<_> = pmax.iter().zip(ps.iter()).map(|(m,x)|m-x).collect();
//self.state[j] = Comb::new(n,v);
p2.next();
}
return Some(&self.state);
}
}
None
}
}
}
fn count_map<BigNum>(v : &[BigNum]) -> HashMap<BigNum,usize> where BigNum : crate::bignum::BigNum{
let mut h = HashMap::new();
for n in v {
*h.entry(n.clone()).or_default() += 1;
}
h
}
#[inline(never)]
fn intersections<BigNum>(uni : BigNum, c1 : &[(BigNum,usize)], c2 : &[(BigNum,usize)], delta : usize , bits : usize) -> Vec<Line<BigNum>> where BigNum : crate::bignum::BigNum {
let v1 : Vec<_> = c1.iter().map(|(_,c)|*c).collect();
let v2 : Vec<_> = c2.iter().map(|(_,c)|*c).collect();
let mut m = Matches::new(v1,v2);
let mut r = vec![];
let mut oldbad : Option<(usize,usize,usize,usize)> = None;
'outer: while let Some(x) = m.next() {
if let Some((i1,i2,j1,j2)) = oldbad {
if x[i1].state[j1] != 0 && x[i2].state[j2] != 0 {
continue 'outer;
}
}
for i1 in 0..c1.len() {
for j1 in 0..c2.len() {
if x[i1].state[j1] != 0 {
for i2 in i1+1..c1.len() {
for j2 in 0..c2.len() {
if x[i2].state[j2] != 0 {
let u1 = c1[i1].0.clone() & c2[j1].0.clone();
let u2 = c1[i2].0.clone() & c2[j2].0.clone();
let u3 = c1[i1].0.clone() & c2[j2].0.clone();
let u4 = c1[i2].0.clone() & c2[j1].0.clone();
if (u4.is_superset(&u1) && u3.is_superset(&u2) && (u1 != u4 || u2 != u3)) || (u3.is_superset(&u1) && u4.is_superset(&u2) && (u1 != u3 || u2 != u4)) {
oldbad = Some((i1,i2,j1,j2));
continue 'outer;
}
}
}
}
}
}
}
let mut groups = Vec::with_capacity(delta);
groups.push(uni.clone());
for (i,(ga,_)) in c1.iter().enumerate() {
for (j,(gb,_)) in c2.iter().enumerate() {
for _ in 0..x[i].state[j] {
groups.push(ga.clone() & gb.clone());
}
}
}
if !groups.contains(&BigNum::zero()) {
r.push(Line::from_groups(delta, bits, groups.into_iter()).sorted());
}
}
r
}
#[inline(never)]
fn perm_includes<BigNum>(line : &Line<BigNum>, other : &Line<BigNum>) -> bool where BigNum : crate::bignum::BigNum {
let g1 : Vec<_> = line.groups().collect();
let g2 : Vec<_> = other.groups().collect();
let d = g1.len();
let mut g = contest_algorithms::graph::flow::FlowGraph::new(2*d+2,d*d);
for i in 1..=d {
g.add_edge(0, i, 1, 0, 0);
}
for i in d+1..=2*d {
g.add_edge(i, 2*d+1, 1, 0, 0);
}
for i in 0..d {
for j in 0..d {
if g1[i].is_superset(&g2[j]) {
g.add_edge(1+i, 1+d+j, 1, 0, 0);
}
}
}
g.dinic(0, 2*d+1).0 == d as i64
}
#[inline(never)]
fn add_reduce_maximal<BigNum>(lines : &mut Vec<Line<BigNum>>, newline : Line<BigNum>) where BigNum : crate::bignum::BigNum {
let l1 = lines.len();
lines.retain(|oldline| !perm_includes(&newline, oldline));
let l2 = lines.len();
if l1 != l2 || lines.iter().all(|oldline|!perm_includes(oldline,&newline)) {
lines.push(newline);
}
}
#[inline(never)]
fn find_good_unions<BigNum>(u1 : &[BigNum], u2 : &[BigNum]) -> HashMap<BigNum,Vec<(BigNum,BigNum)>> where BigNum : crate::bignum::BigNum {
let mut unions = HashMap::new();
for x in u1.iter() {
for y in u2.iter() {
if x.is_superset(y) || y.is_superset(x) {
continue;
}
let uni = x.clone() | y.clone();
let unis : &mut Vec<(BigNum,BigNum)> = unions.entry(uni).or_insert(vec![]);
let len = unis.len();
unis.retain(|(xc,yc)| !(xc.is_superset(x) && yc.is_superset(y)) );
if unis.len() != len || unis.iter().all(|(xc,yc)| !(x.is_superset(xc) && y.is_superset(yc)) ) {
unis.push((x.clone(),y.clone()));
}
}
}
unions
}
pub fn forall<BigNum>(nc : &Constraint<BigNum>, problem : &Problem<BigNum>) -> Constraint<BigNum> where BigNum : crate::bignum::BigNum {
let mut nc = nc.clone();
let maplt = problem.map_label_text();
let set_to_string = |s:&BigNum|{
let r = s.one_bits().map(|elem|&maplt[&elem]).join("");
if r == "" {
String::from("_")
}else{
r
}
};
let make_right_closed = |g : BigNum|{g.clone()|problem.successors(g.one_bits().next().unwrap(),false)};
for line in &mut nc.lines {
*line = line.edited(|g|{make_right_closed(g)}).sorted();
}
let mut seen : HashSet<_> = nc.lines.iter().cloned().collect();
let lines = std::mem::replace(&mut nc.lines, vec![]);
for line in lines {
seen.insert(line.clone());
add_reduce_maximal(&mut nc.lines, line);
}
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
let mut pairs = HashSet::new();
loop {
let mut newc = nc.clone();
let size = nc.lines.len();
let lines = &nc.lines;
let mut without_one = vec![];
for line in &nc.lines {
let mut h = HashMap::new();
let g : Vec<_> = line.groups().collect();
for i in 0..g.len() {
if !h.contains_key(&g[i]){
let v : Vec<_> = [&g[0..i],&g[i+1..g.len()]].concat();
let v : Vec<_> = count_map(&v).into_iter().sorted().collect();
h.insert(g[i].clone(),v);
}
}
without_one.push(h);
}
let mut line_groups = vec![];
for line in lines {
line_groups.push(line.groups().unique().collect::<Vec<_>>());
}
#[cfg(not(target_arch = "wasm32"))]
let pb = ProgressBar::new((size*size) as u64);
#[cfg(not(target_arch = "wasm32"))]
pb.set_style(ProgressStyle::default_bar()
.template("\n[elapsed: {elapsed_precise}] [{wide_bar:.green/red}] [eta: {eta_precise}]\n{msg}")
/*.progress_chars("#>-")*/);
for i in 0..lines.len() {
#[cfg(not(target_arch = "wasm32"))]
{
pb.set_position((i*i) as u64);
let est = pb.eta().as_secs();
let dest = chrono::Duration::seconds(est as i64);
let whenfinish = (Local::now() + dest).to_rfc2822();
pb.set_message(format!("[i: {}/{}] [new lines: {}] [eta: {}]",i,size,newc.lines.len(),whenfinish));
}
let mut candidates2 = vec![];
for j in 0..=i {
let mut candidates = vec![];
let pair = (lines[i].clone(),lines[j].clone());
if pairs.contains(&pair) || pairs.contains(&(pair.1.clone(),pair.0.clone())) {
continue;
}
pairs.insert(pair);
let u1 = &line_groups[i];
let u2 = &line_groups[j];
let unions = find_good_unions(u1,u2);
for (uni,v) in unions {
for (x,y) in v {
let c1 = &without_one[i][&x];
let c2 = &without_one[j][&y];
let lines = intersections(uni.clone(),c1,c2,nc.delta, nc.bits);
for newline in lines {
if !seen.contains(&newline){
seen.insert(newline.clone());
add_reduce_maximal(&mut candidates, newline);
}
}
}
}
for newline in candidates {
add_reduce_maximal(&mut candidates2, newline);
}
}
for newline in candidates2 {
add_reduce_maximal(&mut newc.lines, newline);
}
}
#[cfg(not(target_arch = "wasm32"))]
pb.finish_and_clear();
if newc == nc { break; }
println!("new iteration...");
nc = newc;
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
}
nc
}
| {
let mut state = vec![0;max.len()];
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(max[i],res);
state[i] = cur;
res -= cur;
i += 1;
}
Comb {
max, state, first:true
}
} | identifier_body |
forall.rs |
use itertools::Itertools;
use std::collections::HashMap;
use std::collections::HashSet;
use crate::Line;
use crate::Constraint;
use crate::Problem;
use indicatif::ProgressStyle;
use indicatif::ProgressBar;
use chrono::prelude::*;
struct Comb{
max : Vec<usize>,
state : Vec<usize>,
first : bool
}
impl Comb {
fn new(n : usize, max : Vec<usize>) -> Self {
let mut state = vec![0;max.len()];
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(max[i],res);
state[i] = cur;
res -= cur;
i += 1;
}
Comb {
max, state, first:true
}
}
fn transform(&mut self, n : usize, max : impl Iterator<Item=usize>) {
let mut i = 0;
for x in max {
self.max[i] = x;
i += 1;
}
assert!(i == self.max.len());
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(self.max[i],res);
self.state[i] = cur;
res -= cur;
i += 1;
}
for j in i..self.state.len() {
self.state[j] = 0;
}
self.first = true;
}
fn next(&mut self) -> Option<&Vec<usize>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
let v = &mut self.state;
let m = &mut self.max;
let mut i = 0;
loop {
if i == v.len()-1 {
return None;
}
if v[i] > 0 {
v[i+1] += 1;
v[i] -= 1;
if v[i+1] <= m[i+1] {
break;
}
}
i += 1;
}
let mut res = v[0..=i].iter().sum();
let mut j = 0;
while res > 0 {
let cur = std::cmp::min(m[j],res);
v[j] = cur;
res -= cur;
j += 1;
}
for k in j..=i {
v[k] = 0;
}
return Some(&self.state);
}
}
}
struct | {
state : Vec<Comb>,
first : bool,
v1 : Vec<usize>
}
impl Matches {
fn new(v1 : Vec<usize>, mut v2 : Vec<usize>) -> Self {
let mut s = vec![];
for &x in &v1 {
let mut c = Comb::new(x,v2.clone());
c.next();
for i in 0..v2.len() {
v2[i] -= c.state[i];
}
s.push(c);
}
Self {
v1, state : s, first : true
}
}
fn next(&mut self) -> Option<&Vec<Comb>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
for i in (0..self.state.len()).rev() {
if self.state[i].next() != None {
for j in i+1..self.state.len() {
let split = self.state.split_at_mut(j);
let p = &split.0[j-1];
let p2 = &mut split.1[0];
let pmax = &p.max;
let ps = &p.state;
let n = self.v1[j];
p2.transform(n,pmax.iter().zip(ps.iter()).map(|(m,x)|m-x));
//let v : Vec<_> = pmax.iter().zip(ps.iter()).map(|(m,x)|m-x).collect();
//self.state[j] = Comb::new(n,v);
p2.next();
}
return Some(&self.state);
}
}
None
}
}
}
fn count_map<BigNum>(v : &[BigNum]) -> HashMap<BigNum,usize> where BigNum : crate::bignum::BigNum{
let mut h = HashMap::new();
for n in v {
*h.entry(n.clone()).or_default() += 1;
}
h
}
#[inline(never)]
fn intersections<BigNum>(uni : BigNum, c1 : &[(BigNum,usize)], c2 : &[(BigNum,usize)], delta : usize , bits : usize) -> Vec<Line<BigNum>> where BigNum : crate::bignum::BigNum {
let v1 : Vec<_> = c1.iter().map(|(_,c)|*c).collect();
let v2 : Vec<_> = c2.iter().map(|(_,c)|*c).collect();
let mut m = Matches::new(v1,v2);
let mut r = vec![];
let mut oldbad : Option<(usize,usize,usize,usize)> = None;
'outer: while let Some(x) = m.next() {
if let Some((i1,i2,j1,j2)) = oldbad {
if x[i1].state[j1] != 0 && x[i2].state[j2] != 0 {
continue 'outer;
}
}
for i1 in 0..c1.len() {
for j1 in 0..c2.len() {
if x[i1].state[j1] != 0 {
for i2 in i1+1..c1.len() {
for j2 in 0..c2.len() {
if x[i2].state[j2] != 0 {
let u1 = c1[i1].0.clone() & c2[j1].0.clone();
let u2 = c1[i2].0.clone() & c2[j2].0.clone();
let u3 = c1[i1].0.clone() & c2[j2].0.clone();
let u4 = c1[i2].0.clone() & c2[j1].0.clone();
if (u4.is_superset(&u1) && u3.is_superset(&u2) && (u1 != u4 || u2 != u3)) || (u3.is_superset(&u1) && u4.is_superset(&u2) && (u1 != u3 || u2 != u4)) {
oldbad = Some((i1,i2,j1,j2));
continue 'outer;
}
}
}
}
}
}
}
let mut groups = Vec::with_capacity(delta);
groups.push(uni.clone());
for (i,(ga,_)) in c1.iter().enumerate() {
for (j,(gb,_)) in c2.iter().enumerate() {
for _ in 0..x[i].state[j] {
groups.push(ga.clone() & gb.clone());
}
}
}
if !groups.contains(&BigNum::zero()) {
r.push(Line::from_groups(delta, bits, groups.into_iter()).sorted());
}
}
r
}
#[inline(never)]
fn perm_includes<BigNum>(line : &Line<BigNum>, other : &Line<BigNum>) -> bool where BigNum : crate::bignum::BigNum {
let g1 : Vec<_> = line.groups().collect();
let g2 : Vec<_> = other.groups().collect();
let d = g1.len();
let mut g = contest_algorithms::graph::flow::FlowGraph::new(2*d+2,d*d);
for i in 1..=d {
g.add_edge(0, i, 1, 0, 0);
}
for i in d+1..=2*d {
g.add_edge(i, 2*d+1, 1, 0, 0);
}
for i in 0..d {
for j in 0..d {
if g1[i].is_superset(&g2[j]) {
g.add_edge(1+i, 1+d+j, 1, 0, 0);
}
}
}
g.dinic(0, 2*d+1).0 == d as i64
}
#[inline(never)]
fn add_reduce_maximal<BigNum>(lines : &mut Vec<Line<BigNum>>, newline : Line<BigNum>) where BigNum : crate::bignum::BigNum {
let l1 = lines.len();
lines.retain(|oldline| !perm_includes(&newline, oldline));
let l2 = lines.len();
if l1 != l2 || lines.iter().all(|oldline|!perm_includes(oldline,&newline)) {
lines.push(newline);
}
}
#[inline(never)]
fn find_good_unions<BigNum>(u1 : &[BigNum], u2 : &[BigNum]) -> HashMap<BigNum,Vec<(BigNum,BigNum)>> where BigNum : crate::bignum::BigNum {
let mut unions = HashMap::new();
for x in u1.iter() {
for y in u2.iter() {
if x.is_superset(y) || y.is_superset(x) {
continue;
}
let uni = x.clone() | y.clone();
let unis : &mut Vec<(BigNum,BigNum)> = unions.entry(uni).or_insert(vec![]);
let len = unis.len();
unis.retain(|(xc,yc)| !(xc.is_superset(x) && yc.is_superset(y)) );
if unis.len() != len || unis.iter().all(|(xc,yc)| !(x.is_superset(xc) && y.is_superset(yc)) ) {
unis.push((x.clone(),y.clone()));
}
}
}
unions
}
pub fn forall<BigNum>(nc : &Constraint<BigNum>, problem : &Problem<BigNum>) -> Constraint<BigNum> where BigNum : crate::bignum::BigNum {
let mut nc = nc.clone();
let maplt = problem.map_label_text();
let set_to_string = |s:&BigNum|{
let r = s.one_bits().map(|elem|&maplt[&elem]).join("");
if r == "" {
String::from("_")
}else{
r
}
};
let make_right_closed = |g : BigNum|{g.clone()|problem.successors(g.one_bits().next().unwrap(),false)};
for line in &mut nc.lines {
*line = line.edited(|g|{make_right_closed(g)}).sorted();
}
let mut seen : HashSet<_> = nc.lines.iter().cloned().collect();
let lines = std::mem::replace(&mut nc.lines, vec![]);
for line in lines {
seen.insert(line.clone());
add_reduce_maximal(&mut nc.lines, line);
}
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
let mut pairs = HashSet::new();
loop {
let mut newc = nc.clone();
let size = nc.lines.len();
let lines = &nc.lines;
let mut without_one = vec![];
for line in &nc.lines {
let mut h = HashMap::new();
let g : Vec<_> = line.groups().collect();
for i in 0..g.len() {
if !h.contains_key(&g[i]){
let v : Vec<_> = [&g[0..i],&g[i+1..g.len()]].concat();
let v : Vec<_> = count_map(&v).into_iter().sorted().collect();
h.insert(g[i].clone(),v);
}
}
without_one.push(h);
}
let mut line_groups = vec![];
for line in lines {
line_groups.push(line.groups().unique().collect::<Vec<_>>());
}
#[cfg(not(target_arch = "wasm32"))]
let pb = ProgressBar::new((size*size) as u64);
#[cfg(not(target_arch = "wasm32"))]
pb.set_style(ProgressStyle::default_bar()
.template("\n[elapsed: {elapsed_precise}] [{wide_bar:.green/red}] [eta: {eta_precise}]\n{msg}")
/*.progress_chars("#>-")*/);
for i in 0..lines.len() {
#[cfg(not(target_arch = "wasm32"))]
{
pb.set_position((i*i) as u64);
let est = pb.eta().as_secs();
let dest = chrono::Duration::seconds(est as i64);
let whenfinish = (Local::now() + dest).to_rfc2822();
pb.set_message(format!("[i: {}/{}] [new lines: {}] [eta: {}]",i,size,newc.lines.len(),whenfinish));
}
let mut candidates2 = vec![];
for j in 0..=i {
let mut candidates = vec![];
let pair = (lines[i].clone(),lines[j].clone());
if pairs.contains(&pair) || pairs.contains(&(pair.1.clone(),pair.0.clone())) {
continue;
}
pairs.insert(pair);
let u1 = &line_groups[i];
let u2 = &line_groups[j];
let unions = find_good_unions(u1,u2);
for (uni,v) in unions {
for (x,y) in v {
let c1 = &without_one[i][&x];
let c2 = &without_one[j][&y];
let lines = intersections(uni.clone(),c1,c2,nc.delta, nc.bits);
for newline in lines {
if !seen.contains(&newline){
seen.insert(newline.clone());
add_reduce_maximal(&mut candidates, newline);
}
}
}
}
for newline in candidates {
add_reduce_maximal(&mut candidates2, newline);
}
}
for newline in candidates2 {
add_reduce_maximal(&mut newc.lines, newline);
}
}
#[cfg(not(target_arch = "wasm32"))]
pb.finish_and_clear();
if newc == nc { break; }
println!("new iteration...");
nc = newc;
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
}
nc
}
| Matches | identifier_name |
api_op_CreateFleet.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package gamelift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/gamelift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Creates a fleet of Amazon Elastic Compute Cloud (Amazon EC2) instances to host
// your custom game server or Realtime Servers. Use this operation to configure the
// computing resources for your fleet and provide instructions for running game
// servers on each instance. Most Amazon GameLift fleets can deploy instances to
// multiple locations, including the home Region (where the fleet is created) and
// an optional set of remote locations. Fleets that are created in the following
// Amazon Web Services Regions support multiple locations: us-east-1 (N. Virginia),
// us-west-2 (Oregon), eu-central-1 (Frankfurt), eu-west-1 (Ireland),
// ap-southeast-2 (Sydney), ap-northeast-1 (Tokyo), and ap-northeast-2 (Seoul).
// Fleets that are created in other Amazon GameLift Regions can deploy instances in
// the fleet's home Region only. All fleet instances use the same configuration
// regardless of location; however, you can adjust capacity settings and turn
// auto-scaling on/off for each location. To create a fleet, choose the hardware
// for your instances, specify a game server build or Realtime script to deploy,
// and provide a runtime configuration to direct Amazon GameLift how to start and
// run game servers on each instance in the fleet. Set permissions for inbound
// traffic to your game servers, and enable optional features as needed. When
// creating a multi-location fleet, provide a list of additional remote locations.
// If you need to debug your fleet, fetch logs, view performance metrics or other
// actions on the fleet, create the development fleet with port 22/3389 open. As a
// best practice, we recommend opening ports for remote access only when you need
// them and closing them when you're finished. If successful, this operation
// creates a new Fleet resource and places it in NEW status, which prompts Amazon
// GameLift to initiate the fleet creation workflow (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-all.html#fleets-creation-workflow)
// . Learn more Setting up fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
// Debug fleet creation issues (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation)
// Multi-location fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
func (c *Client) CreateFleet(ctx context.Context, params *CreateFleetInput, optFns ...func(*Options)) (*CreateFleetOutput, error) |
type CreateFleetInput struct {
// A descriptive label that is associated with a fleet. Fleet names do not need to
// be unique.
//
// This member is required.
Name *string
// Amazon GameLift Anywhere configuration options.
AnywhereConfiguration *types.AnywhereConfiguration
// The unique identifier for a custom game server build to be deployed on fleet
// instances. You can use either the build ID or ARN. The build must be uploaded to
// Amazon GameLift and in READY status. This fleet property cannot be changed
// later.
BuildId *string
// Prompts Amazon GameLift to generate a TLS/SSL certificate for the fleet. Amazon
// GameLift uses the certificates to encrypt traffic between game clients and the
// game servers running on Amazon GameLift. By default, the
// CertificateConfiguration is DISABLED . You can't change this property after you
// create the fleet. Certificate Manager (ACM) certificates expire after 13 months.
// Certificate expiration can cause fleets to fail, preventing players from
// connecting to instances in the fleet. We recommend you replace fleets before 13
// months, consider using fleet aliases for a smooth transition. ACM isn't
// available in all Amazon Web Services regions. A fleet creation request with
// certificate generation enabled in an unsupported Region, fails with a 4xx error.
// For more information about the supported Regions, see Supported Regions (https://docs.aws.amazon.com/acm/latest/userguide/acm-regions.html)
// in the Certificate Manager User Guide.
CertificateConfiguration *types.CertificateConfiguration
// The type of compute resource used to host your game servers. You can use your
// own compute resources with Amazon GameLift Anywhere or use Amazon EC2 instances
// with managed Amazon GameLift. By default, this property is set to EC2 .
ComputeType types.ComputeType
// A description for the fleet.
Description *string
// The allowed IP address ranges and port settings that allow inbound traffic to
// access game sessions on this fleet. If the fleet is hosting a custom game build,
// this property must be set before players can connect to game sessions. For
// Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.
EC2InboundPermissions []types.IpPermission
// The Amazon GameLift-supported Amazon EC2 instance type to use for all fleet
// instances. Instance type determines the computing resources that will be used to
// host your game servers, including CPU, memory, storage, and networking capacity.
// See Amazon Elastic Compute Cloud Instance Types (http://aws.amazon.com/ec2/instance-types/)
// for detailed descriptions of Amazon EC2 instance types.
EC2InstanceType types.EC2InstanceType
// Indicates whether to use On-Demand or Spot instances for this fleet. By
// default, this property is set to ON_DEMAND . Learn more about when to use
// On-Demand versus Spot Instances (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot)
// . This property cannot be changed after the fleet is created.
FleetType types.FleetType
// A unique identifier for an IAM role that manages access to your Amazon Web
// Services services. With an instance role ARN set, any application that runs on
// an instance in this fleet can assume the role, including install scripts, server
// processes, and daemons (background processes). Create a role or look up a role's
// ARN by using the IAM dashboard (https://console.aws.amazon.com/iam/) in the
// Amazon Web Services Management Console. Learn more about using on-box
// credentials for your game servers at Access external resources from a game
// server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html)
// . This property cannot be changed after the fleet is created.
InstanceRoleArn *string
// A set of remote locations to deploy additional instances to and manage as part
// of the fleet. This parameter can only be used when creating fleets in Amazon Web
// Services Regions that support multiple locations. You can add any Amazon
// GameLift-supported Amazon Web Services Region as a remote location, in the form
// of an Amazon Web Services Region code such as us-west-2 . To create a fleet with
// instances in the home Region only, don't use this parameter. To use this
// parameter, Amazon GameLift requires you to use your home location in the
// request.
Locations []types.LocationConfiguration
// This parameter is no longer used. To specify where Amazon GameLift should store
// log files once a server process shuts down, use the Amazon GameLift server API
// ProcessReady() and specify one or more directory paths in logParameters . For
// more information, see Initialize the server process (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-initialize)
// in the Amazon GameLift Developer Guide.
LogPaths []string
// The name of an Amazon Web Services CloudWatch metric group to add this fleet
// to. A metric group is used to aggregate the metrics for multiple fleets. You can
// specify an existing metric group name or set a new name to create a new metric
// group. A fleet can be included in only one metric group at a time.
MetricGroups []string
// The status of termination protection for active game sessions on the fleet. By
// default, this property is set to NoProtection . You can also set game session
// protection for an individual game session by calling UpdateGameSession .
// - NoProtection - Game sessions can be terminated during active gameplay as a
// result of a scale-down event.
// - FullProtection - Game sessions in ACTIVE status cannot be terminated during
// a scale-down event.
NewGameSessionProtectionPolicy types.ProtectionPolicy
// Used when peering your Amazon GameLift fleet with a VPC, the unique identifier
// for the Amazon Web Services account that owns the VPC. You can find your account
// ID in the Amazon Web Services Management Console under account settings.
PeerVpcAwsAccountId *string
// A unique identifier for a VPC with resources to be accessed by your Amazon
// GameLift fleet. The VPC must be in the same Region as your fleet. To look up a
// VPC ID, use the VPC Dashboard (https://console.aws.amazon.com/vpc/) in the
// Amazon Web Services Management Console. Learn more about VPC peering in VPC
// Peering with Amazon GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html)
// .
PeerVpcId *string
// A policy that limits the number of game sessions that an individual player can
// create on instances in this fleet within a specified span of time.
ResourceCreationLimitPolicy *types.ResourceCreationLimitPolicy
// Instructions for how to launch and maintain server processes on instances in
// the fleet. The runtime configuration defines one or more server process
// configurations, each identifying a build executable or Realtime script file and
// the number of processes of that type to run concurrently. The
// RuntimeConfiguration parameter is required unless the fleet is being configured
// using the older parameters ServerLaunchPath and ServerLaunchParameters , which
// are still supported for backward compatibility.
RuntimeConfiguration *types.RuntimeConfiguration
// The unique identifier for a Realtime configuration script to be deployed on
// fleet instances. You can use either the script ID or ARN. Scripts must be
// uploaded to Amazon GameLift prior to creating the fleet. This fleet property
// cannot be changed later.
ScriptId *string
// This parameter is no longer used. Specify server launch parameters using the
// RuntimeConfiguration parameter. Requests that use this parameter instead
// continue to be valid.
ServerLaunchParameters *string
// This parameter is no longer used. Specify a server launch path using the
// RuntimeConfiguration parameter. Requests that use this parameter instead
// continue to be valid.
ServerLaunchPath *string
// A list of labels to assign to the new fleet resource. Tags are
// developer-defined key-value pairs. Tagging Amazon Web Services resources are
// useful for resource management, access management and cost allocation. For more
// information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)
// in the Amazon Web Services General Reference.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateFleetOutput struct {
// The properties for the new fleet, including the current status. All fleets are
// placed in NEW status on creation.
FleetAttributes *types.FleetAttributes
// The fleet's locations and life-cycle status of each location. For new fleets,
// the status of all locations is set to NEW . During fleet creation, Amazon
// GameLift updates each location status as instances are deployed there and
// prepared for game hosting. This list includes an entry for the fleet's home
// Region. For fleets with no remote locations, only one entry, representing the
// home Region, is returned.
LocationStates []types.LocationState
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateFleetMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateFleet{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateFleet{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateFleetResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateFleetValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateFleet(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateFleet(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "CreateFleet",
}
}
type opCreateFleetResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateFleetResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateFleetResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "gamelift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "gamelift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("gamelift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addCreateFleetResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opCreateFleetResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| {
if params == nil {
params = &CreateFleetInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateFleet", params, optFns, c.addOperationCreateFleetMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateFleetOutput)
out.ResultMetadata = metadata
return out, nil
} | identifier_body |
api_op_CreateFleet.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package gamelift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/gamelift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http" | // computing resources for your fleet and provide instructions for running game
// servers on each instance. Most Amazon GameLift fleets can deploy instances to
// multiple locations, including the home Region (where the fleet is created) and
// an optional set of remote locations. Fleets that are created in the following
// Amazon Web Services Regions support multiple locations: us-east-1 (N. Virginia),
// us-west-2 (Oregon), eu-central-1 (Frankfurt), eu-west-1 (Ireland),
// ap-southeast-2 (Sydney), ap-northeast-1 (Tokyo), and ap-northeast-2 (Seoul).
// Fleets that are created in other Amazon GameLift Regions can deploy instances in
// the fleet's home Region only. All fleet instances use the same configuration
// regardless of location; however, you can adjust capacity settings and turn
// auto-scaling on/off for each location. To create a fleet, choose the hardware
// for your instances, specify a game server build or Realtime script to deploy,
// and provide a runtime configuration to direct Amazon GameLift how to start and
// run game servers on each instance in the fleet. Set permissions for inbound
// traffic to your game servers, and enable optional features as needed. When
// creating a multi-location fleet, provide a list of additional remote locations.
// If you need to debug your fleet, fetch logs, view performance metrics or other
// actions on the fleet, create the development fleet with port 22/3389 open. As a
// best practice, we recommend opening ports for remote access only when you need
// them and closing them when you're finished. If successful, this operation
// creates a new Fleet resource and places it in NEW status, which prompts Amazon
// GameLift to initiate the fleet creation workflow (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-all.html#fleets-creation-workflow)
// . Learn more Setting up fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
// Debug fleet creation issues (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation)
// Multi-location fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
func (c *Client) CreateFleet(ctx context.Context, params *CreateFleetInput, optFns ...func(*Options)) (*CreateFleetOutput, error) {
if params == nil {
params = &CreateFleetInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateFleet", params, optFns, c.addOperationCreateFleetMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateFleetOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateFleetInput struct {
// A descriptive label that is associated with a fleet. Fleet names do not need to
// be unique.
//
// This member is required.
Name *string
// Amazon GameLift Anywhere configuration options.
AnywhereConfiguration *types.AnywhereConfiguration
// The unique identifier for a custom game server build to be deployed on fleet
// instances. You can use either the build ID or ARN. The build must be uploaded to
// Amazon GameLift and in READY status. This fleet property cannot be changed
// later.
BuildId *string
// Prompts Amazon GameLift to generate a TLS/SSL certificate for the fleet. Amazon
// GameLift uses the certificates to encrypt traffic between game clients and the
// game servers running on Amazon GameLift. By default, the
// CertificateConfiguration is DISABLED . You can't change this property after you
// create the fleet. Certificate Manager (ACM) certificates expire after 13 months.
// Certificate expiration can cause fleets to fail, preventing players from
// connecting to instances in the fleet. We recommend you replace fleets before 13
// months, consider using fleet aliases for a smooth transition. ACM isn't
// available in all Amazon Web Services regions. A fleet creation request with
// certificate generation enabled in an unsupported Region, fails with a 4xx error.
// For more information about the supported Regions, see Supported Regions (https://docs.aws.amazon.com/acm/latest/userguide/acm-regions.html)
// in the Certificate Manager User Guide.
CertificateConfiguration *types.CertificateConfiguration
// The type of compute resource used to host your game servers. You can use your
// own compute resources with Amazon GameLift Anywhere or use Amazon EC2 instances
// with managed Amazon GameLift. By default, this property is set to EC2 .
ComputeType types.ComputeType
// A description for the fleet.
Description *string
// The allowed IP address ranges and port settings that allow inbound traffic to
// access game sessions on this fleet. If the fleet is hosting a custom game build,
// this property must be set before players can connect to game sessions. For
// Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.
EC2InboundPermissions []types.IpPermission
// The Amazon GameLift-supported Amazon EC2 instance type to use for all fleet
// instances. Instance type determines the computing resources that will be used to
// host your game servers, including CPU, memory, storage, and networking capacity.
// See Amazon Elastic Compute Cloud Instance Types (http://aws.amazon.com/ec2/instance-types/)
// for detailed descriptions of Amazon EC2 instance types.
EC2InstanceType types.EC2InstanceType
// Indicates whether to use On-Demand or Spot instances for this fleet. By
// default, this property is set to ON_DEMAND . Learn more about when to use
// On-Demand versus Spot Instances (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot)
// . This property cannot be changed after the fleet is created.
FleetType types.FleetType
// A unique identifier for an IAM role that manages access to your Amazon Web
// Services services. With an instance role ARN set, any application that runs on
// an instance in this fleet can assume the role, including install scripts, server
// processes, and daemons (background processes). Create a role or look up a role's
// ARN by using the IAM dashboard (https://console.aws.amazon.com/iam/) in the
// Amazon Web Services Management Console. Learn more about using on-box
// credentials for your game servers at Access external resources from a game
// server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html)
// . This property cannot be changed after the fleet is created.
InstanceRoleArn *string
// A set of remote locations to deploy additional instances to and manage as part
// of the fleet. This parameter can only be used when creating fleets in Amazon Web
// Services Regions that support multiple locations. You can add any Amazon
// GameLift-supported Amazon Web Services Region as a remote location, in the form
// of an Amazon Web Services Region code such as us-west-2 . To create a fleet with
// instances in the home Region only, don't use this parameter. To use this
// parameter, Amazon GameLift requires you to use your home location in the
// request.
Locations []types.LocationConfiguration
// This parameter is no longer used. To specify where Amazon GameLift should store
// log files once a server process shuts down, use the Amazon GameLift server API
// ProcessReady() and specify one or more directory paths in logParameters . For
// more information, see Initialize the server process (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-initialize)
// in the Amazon GameLift Developer Guide.
LogPaths []string
// The name of an Amazon Web Services CloudWatch metric group to add this fleet
// to. A metric group is used to aggregate the metrics for multiple fleets. You can
// specify an existing metric group name or set a new name to create a new metric
// group. A fleet can be included in only one metric group at a time.
MetricGroups []string
// The status of termination protection for active game sessions on the fleet. By
// default, this property is set to NoProtection . You can also set game session
// protection for an individual game session by calling UpdateGameSession .
// - NoProtection - Game sessions can be terminated during active gameplay as a
// result of a scale-down event.
// - FullProtection - Game sessions in ACTIVE status cannot be terminated during
// a scale-down event.
NewGameSessionProtectionPolicy types.ProtectionPolicy
// Used when peering your Amazon GameLift fleet with a VPC, the unique identifier
// for the Amazon Web Services account that owns the VPC. You can find your account
// ID in the Amazon Web Services Management Console under account settings.
PeerVpcAwsAccountId *string
// A unique identifier for a VPC with resources to be accessed by your Amazon
// GameLift fleet. The VPC must be in the same Region as your fleet. To look up a
// VPC ID, use the VPC Dashboard (https://console.aws.amazon.com/vpc/) in the
// Amazon Web Services Management Console. Learn more about VPC peering in VPC
// Peering with Amazon GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html)
// .
PeerVpcId *string
// A policy that limits the number of game sessions that an individual player can
// create on instances in this fleet within a specified span of time.
ResourceCreationLimitPolicy *types.ResourceCreationLimitPolicy
// Instructions for how to launch and maintain server processes on instances in
// the fleet. The runtime configuration defines one or more server process
// configurations, each identifying a build executable or Realtime script file and
// the number of processes of that type to run concurrently. The
// RuntimeConfiguration parameter is required unless the fleet is being configured
// using the older parameters ServerLaunchPath and ServerLaunchParameters , which
// are still supported for backward compatibility.
RuntimeConfiguration *types.RuntimeConfiguration
// The unique identifier for a Realtime configuration script to be deployed on
// fleet instances. You can use either the script ID or ARN. Scripts must be
// uploaded to Amazon GameLift prior to creating the fleet. This fleet property
// cannot be changed later.
ScriptId *string
// This parameter is no longer used. Specify server launch parameters using the
// RuntimeConfiguration parameter. Requests that use this parameter instead
// continue to be valid.
ServerLaunchParameters *string
// This parameter is no longer used. Specify a server launch path using the
// RuntimeConfiguration parameter. Requests that use this parameter instead
// continue to be valid.
ServerLaunchPath *string
// A list of labels to assign to the new fleet resource. Tags are
// developer-defined key-value pairs. Tagging Amazon Web Services resources are
// useful for resource management, access management and cost allocation. For more
// information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)
// in the Amazon Web Services General Reference.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateFleetOutput struct {
// The properties for the new fleet, including the current status. All fleets are
// placed in NEW status on creation.
FleetAttributes *types.FleetAttributes
// The fleet's locations and life-cycle status of each location. For new fleets,
// the status of all locations is set to NEW . During fleet creation, Amazon
// GameLift updates each location status as instances are deployed there and
// prepared for game hosting. This list includes an entry for the fleet's home
// Region. For fleets with no remote locations, only one entry, representing the
// home Region, is returned.
LocationStates []types.LocationState
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateFleetMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateFleet{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateFleet{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateFleetResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateFleetValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateFleet(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateFleet(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "CreateFleet",
}
}
type opCreateFleetResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateFleetResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateFleetResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "gamelift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "gamelift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("gamelift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addCreateFleetResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opCreateFleetResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
} | )
// Creates a fleet of Amazon Elastic Compute Cloud (Amazon EC2) instances to host
// your custom game server or Realtime Servers. Use this operation to configure the | random_line_split |
api_op_CreateFleet.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package gamelift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/gamelift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Creates a fleet of Amazon Elastic Compute Cloud (Amazon EC2) instances to host
// your custom game server or Realtime Servers. Use this operation to configure the
// computing resources for your fleet and provide instructions for running game
// servers on each instance. Most Amazon GameLift fleets can deploy instances to
// multiple locations, including the home Region (where the fleet is created) and
// an optional set of remote locations. Fleets that are created in the following
// Amazon Web Services Regions support multiple locations: us-east-1 (N. Virginia),
// us-west-2 (Oregon), eu-central-1 (Frankfurt), eu-west-1 (Ireland),
// ap-southeast-2 (Sydney), ap-northeast-1 (Tokyo), and ap-northeast-2 (Seoul).
// Fleets that are created in other Amazon GameLift Regions can deploy instances in
// the fleet's home Region only. All fleet instances use the same configuration
// regardless of location; however, you can adjust capacity settings and turn
// auto-scaling on/off for each location. To create a fleet, choose the hardware
// for your instances, specify a game server build or Realtime script to deploy,
// and provide a runtime configuration to direct Amazon GameLift how to start and
// run game servers on each instance in the fleet. Set permissions for inbound
// traffic to your game servers, and enable optional features as needed. When
// creating a multi-location fleet, provide a list of additional remote locations.
// If you need to debug your fleet, fetch logs, view performance metrics or other
// actions on the fleet, create the development fleet with port 22/3389 open. As a
// best practice, we recommend opening ports for remote access only when you need
// them and closing them when you're finished. If successful, this operation
// creates a new Fleet resource and places it in NEW status, which prompts Amazon
// GameLift to initiate the fleet creation workflow (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-all.html#fleets-creation-workflow)
// . Learn more Setting up fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
// Debug fleet creation issues (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation)
// Multi-location fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
func (c *Client) CreateFleet(ctx context.Context, params *CreateFleetInput, optFns ...func(*Options)) (*CreateFleetOutput, error) {
if params == nil {
params = &CreateFleetInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateFleet", params, optFns, c.addOperationCreateFleetMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateFleetOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateFleetInput struct {
// A descriptive label that is associated with a fleet. Fleet names do not need to
// be unique.
//
// This member is required.
Name *string
// Amazon GameLift Anywhere configuration options.
AnywhereConfiguration *types.AnywhereConfiguration
// The unique identifier for a custom game server build to be deployed on fleet
// instances. You can use either the build ID or ARN. The build must be uploaded to
// Amazon GameLift and in READY status. This fleet property cannot be changed
// later.
BuildId *string
// Prompts Amazon GameLift to generate a TLS/SSL certificate for the fleet. Amazon
// GameLift uses the certificates to encrypt traffic between game clients and the
// game servers running on Amazon GameLift. By default, the
// CertificateConfiguration is DISABLED . You can't change this property after you
// create the fleet. Certificate Manager (ACM) certificates expire after 13 months.
// Certificate expiration can cause fleets to fail, preventing players from
// connecting to instances in the fleet. We recommend you replace fleets before 13
// months, consider using fleet aliases for a smooth transition. ACM isn't
// available in all Amazon Web Services regions. A fleet creation request with
// certificate generation enabled in an unsupported Region, fails with a 4xx error.
// For more information about the supported Regions, see Supported Regions (https://docs.aws.amazon.com/acm/latest/userguide/acm-regions.html)
// in the Certificate Manager User Guide.
CertificateConfiguration *types.CertificateConfiguration
// The type of compute resource used to host your game servers. You can use your
// own compute resources with Amazon GameLift Anywhere or use Amazon EC2 instances
// with managed Amazon GameLift. By default, this property is set to EC2 .
ComputeType types.ComputeType
// A description for the fleet.
Description *string
// The allowed IP address ranges and port settings that allow inbound traffic to
// access game sessions on this fleet. If the fleet is hosting a custom game build,
// this property must be set before players can connect to game sessions. For
// Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.
EC2InboundPermissions []types.IpPermission
// The Amazon GameLift-supported Amazon EC2 instance type to use for all fleet
// instances. Instance type determines the computing resources that will be used to
// host your game servers, including CPU, memory, storage, and networking capacity.
// See Amazon Elastic Compute Cloud Instance Types (http://aws.amazon.com/ec2/instance-types/)
// for detailed descriptions of Amazon EC2 instance types.
EC2InstanceType types.EC2InstanceType
// Indicates whether to use On-Demand or Spot instances for this fleet. By
// default, this property is set to ON_DEMAND . Learn more about when to use
// On-Demand versus Spot Instances (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot)
// . This property cannot be changed after the fleet is created.
FleetType types.FleetType
// A unique identifier for an IAM role that manages access to your Amazon Web
// Services services. With an instance role ARN set, any application that runs on
// an instance in this fleet can assume the role, including install scripts, server
// processes, and daemons (background processes). Create a role or look up a role's
// ARN by using the IAM dashboard (https://console.aws.amazon.com/iam/) in the
// Amazon Web Services Management Console. Learn more about using on-box
// credentials for your game servers at Access external resources from a game
// server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html)
// . This property cannot be changed after the fleet is created.
InstanceRoleArn *string
// A set of remote locations to deploy additional instances to and manage as part
// of the fleet. This parameter can only be used when creating fleets in Amazon Web
// Services Regions that support multiple locations. You can add any Amazon
// GameLift-supported Amazon Web Services Region as a remote location, in the form
// of an Amazon Web Services Region code such as us-west-2 . To create a fleet with
// instances in the home Region only, don't use this parameter. To use this
// parameter, Amazon GameLift requires you to use your home location in the
// request.
Locations []types.LocationConfiguration
// This parameter is no longer used. To specify where Amazon GameLift should store
// log files once a server process shuts down, use the Amazon GameLift server API
// ProcessReady() and specify one or more directory paths in logParameters . For
// more information, see Initialize the server process (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-initialize)
// in the Amazon GameLift Developer Guide.
LogPaths []string
// The name of an Amazon Web Services CloudWatch metric group to add this fleet
// to. A metric group is used to aggregate the metrics for multiple fleets. You can
// specify an existing metric group name or set a new name to create a new metric
// group. A fleet can be included in only one metric group at a time.
MetricGroups []string
// The status of termination protection for active game sessions on the fleet. By
// default, this property is set to NoProtection . You can also set game session
// protection for an individual game session by calling UpdateGameSession .
// - NoProtection - Game sessions can be terminated during active gameplay as a
// result of a scale-down event.
// - FullProtection - Game sessions in ACTIVE status cannot be terminated during
// a scale-down event.
NewGameSessionProtectionPolicy types.ProtectionPolicy
// Used when peering your Amazon GameLift fleet with a VPC, the unique identifier
// for the Amazon Web Services account that owns the VPC. You can find your account
// ID in the Amazon Web Services Management Console under account settings.
PeerVpcAwsAccountId *string
// A unique identifier for a VPC with resources to be accessed by your Amazon
// GameLift fleet. The VPC must be in the same Region as your fleet. To look up a
// VPC ID, use the VPC Dashboard (https://console.aws.amazon.com/vpc/) in the
// Amazon Web Services Management Console. Learn more about VPC peering in VPC
// Peering with Amazon GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html)
// .
PeerVpcId *string
// A policy that limits the number of game sessions that an individual player can
// create on instances in this fleet within a specified span of time.
ResourceCreationLimitPolicy *types.ResourceCreationLimitPolicy
// Instructions for how to launch and maintain server processes on instances in
// the fleet. The runtime configuration defines one or more server process
// configurations, each identifying a build executable or Realtime script file and
// the number of processes of that type to run concurrently. The
// RuntimeConfiguration parameter is required unless the fleet is being configured
// using the older parameters ServerLaunchPath and ServerLaunchParameters , which
// are still supported for backward compatibility.
RuntimeConfiguration *types.RuntimeConfiguration
// The unique identifier for a Realtime configuration script to be deployed on
// fleet instances. You can use either the script ID or ARN. Scripts must be
// uploaded to Amazon GameLift prior to creating the fleet. This fleet property
// cannot be changed later.
ScriptId *string
// This parameter is no longer used. Specify server launch parameters using the
// RuntimeConfiguration parameter. Requests that use this parameter instead
// continue to be valid.
ServerLaunchParameters *string
// This parameter is no longer used. Specify a server launch path using the
// RuntimeConfiguration parameter. Requests that use this parameter instead
// continue to be valid.
ServerLaunchPath *string
// A list of labels to assign to the new fleet resource. Tags are
// developer-defined key-value pairs. Tagging Amazon Web Services resources are
// useful for resource management, access management and cost allocation. For more
// information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)
// in the Amazon Web Services General Reference.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateFleetOutput struct {
// The properties for the new fleet, including the current status. All fleets are
// placed in NEW status on creation.
FleetAttributes *types.FleetAttributes
// The fleet's locations and life-cycle status of each location. For new fleets,
// the status of all locations is set to NEW . During fleet creation, Amazon
// GameLift updates each location status as instances are deployed there and
// prepared for game hosting. This list includes an entry for the fleet's home
// Region. For fleets with no remote locations, only one entry, representing the
// home Region, is returned.
LocationStates []types.LocationState
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateFleetMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateFleet{}, middleware.After)
if err != nil |
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateFleet{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateFleetResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateFleetValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateFleet(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateFleet(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "CreateFleet",
}
}
type opCreateFleetResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateFleetResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateFleetResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "gamelift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "gamelift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("gamelift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addCreateFleetResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opCreateFleetResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| {
return err
} | conditional_block |
api_op_CreateFleet.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package gamelift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/gamelift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Creates a fleet of Amazon Elastic Compute Cloud (Amazon EC2) instances to host
// your custom game server or Realtime Servers. Use this operation to configure the
// computing resources for your fleet and provide instructions for running game
// servers on each instance. Most Amazon GameLift fleets can deploy instances to
// multiple locations, including the home Region (where the fleet is created) and
// an optional set of remote locations. Fleets that are created in the following
// Amazon Web Services Regions support multiple locations: us-east-1 (N. Virginia),
// us-west-2 (Oregon), eu-central-1 (Frankfurt), eu-west-1 (Ireland),
// ap-southeast-2 (Sydney), ap-northeast-1 (Tokyo), and ap-northeast-2 (Seoul).
// Fleets that are created in other Amazon GameLift Regions can deploy instances in
// the fleet's home Region only. All fleet instances use the same configuration
// regardless of location; however, you can adjust capacity settings and turn
// auto-scaling on/off for each location. To create a fleet, choose the hardware
// for your instances, specify a game server build or Realtime script to deploy,
// and provide a runtime configuration to direct Amazon GameLift how to start and
// run game servers on each instance in the fleet. Set permissions for inbound
// traffic to your game servers, and enable optional features as needed. When
// creating a multi-location fleet, provide a list of additional remote locations.
// If you need to debug your fleet, fetch logs, view performance metrics or other
// actions on the fleet, create the development fleet with port 22/3389 open. As a
// best practice, we recommend opening ports for remote access only when you need
// them and closing them when you're finished. If successful, this operation
// creates a new Fleet resource and places it in NEW status, which prompts Amazon
// GameLift to initiate the fleet creation workflow (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-all.html#fleets-creation-workflow)
// . Learn more Setting up fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
// Debug fleet creation issues (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation)
// Multi-location fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
func (c *Client) | (ctx context.Context, params *CreateFleetInput, optFns ...func(*Options)) (*CreateFleetOutput, error) {
if params == nil {
params = &CreateFleetInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateFleet", params, optFns, c.addOperationCreateFleetMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateFleetOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateFleetInput struct {
// A descriptive label that is associated with a fleet. Fleet names do not need to
// be unique.
//
// This member is required.
Name *string
// Amazon GameLift Anywhere configuration options.
AnywhereConfiguration *types.AnywhereConfiguration
// The unique identifier for a custom game server build to be deployed on fleet
// instances. You can use either the build ID or ARN. The build must be uploaded to
// Amazon GameLift and in READY status. This fleet property cannot be changed
// later.
BuildId *string
// Prompts Amazon GameLift to generate a TLS/SSL certificate for the fleet. Amazon
// GameLift uses the certificates to encrypt traffic between game clients and the
// game servers running on Amazon GameLift. By default, the
// CertificateConfiguration is DISABLED . You can't change this property after you
// create the fleet. Certificate Manager (ACM) certificates expire after 13 months.
// Certificate expiration can cause fleets to fail, preventing players from
// connecting to instances in the fleet. We recommend you replace fleets before 13
// months, consider using fleet aliases for a smooth transition. ACM isn't
// available in all Amazon Web Services regions. A fleet creation request with
// certificate generation enabled in an unsupported Region, fails with a 4xx error.
// For more information about the supported Regions, see Supported Regions (https://docs.aws.amazon.com/acm/latest/userguide/acm-regions.html)
// in the Certificate Manager User Guide.
CertificateConfiguration *types.CertificateConfiguration
// The type of compute resource used to host your game servers. You can use your
// own compute resources with Amazon GameLift Anywhere or use Amazon EC2 instances
// with managed Amazon GameLift. By default, this property is set to EC2 .
ComputeType types.ComputeType
// A description for the fleet.
Description *string
// The allowed IP address ranges and port settings that allow inbound traffic to
// access game sessions on this fleet. If the fleet is hosting a custom game build,
// this property must be set before players can connect to game sessions. For
// Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.
EC2InboundPermissions []types.IpPermission
// The Amazon GameLift-supported Amazon EC2 instance type to use for all fleet
// instances. Instance type determines the computing resources that will be used to
// host your game servers, including CPU, memory, storage, and networking capacity.
// See Amazon Elastic Compute Cloud Instance Types (http://aws.amazon.com/ec2/instance-types/)
// for detailed descriptions of Amazon EC2 instance types.
EC2InstanceType types.EC2InstanceType
// Indicates whether to use On-Demand or Spot instances for this fleet. By
// default, this property is set to ON_DEMAND . Learn more about when to use
// On-Demand versus Spot Instances (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot)
// . This property cannot be changed after the fleet is created.
FleetType types.FleetType
// A unique identifier for an IAM role that manages access to your Amazon Web
// Services services. With an instance role ARN set, any application that runs on
// an instance in this fleet can assume the role, including install scripts, server
// processes, and daemons (background processes). Create a role or look up a role's
// ARN by using the IAM dashboard (https://console.aws.amazon.com/iam/) in the
// Amazon Web Services Management Console. Learn more about using on-box
// credentials for your game servers at Access external resources from a game
// server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html)
// . This property cannot be changed after the fleet is created.
InstanceRoleArn *string
// A set of remote locations to deploy additional instances to and manage as part
// of the fleet. This parameter can only be used when creating fleets in Amazon Web
// Services Regions that support multiple locations. You can add any Amazon
// GameLift-supported Amazon Web Services Region as a remote location, in the form
// of an Amazon Web Services Region code such as us-west-2 . To create a fleet with
// instances in the home Region only, don't use this parameter. To use this
// parameter, Amazon GameLift requires you to use your home location in the
// request.
Locations []types.LocationConfiguration
// This parameter is no longer used. To specify where Amazon GameLift should store
// log files once a server process shuts down, use the Amazon GameLift server API
// ProcessReady() and specify one or more directory paths in logParameters . For
// more information, see Initialize the server process (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-initialize)
// in the Amazon GameLift Developer Guide.
LogPaths []string
// The name of an Amazon Web Services CloudWatch metric group to add this fleet
// to. A metric group is used to aggregate the metrics for multiple fleets. You can
// specify an existing metric group name or set a new name to create a new metric
// group. A fleet can be included in only one metric group at a time.
MetricGroups []string
// The status of termination protection for active game sessions on the fleet. By
// default, this property is set to NoProtection . You can also set game session
// protection for an individual game session by calling UpdateGameSession .
// - NoProtection - Game sessions can be terminated during active gameplay as a
// result of a scale-down event.
// - FullProtection - Game sessions in ACTIVE status cannot be terminated during
// a scale-down event.
NewGameSessionProtectionPolicy types.ProtectionPolicy
// Used when peering your Amazon GameLift fleet with a VPC, the unique identifier
// for the Amazon Web Services account that owns the VPC. You can find your account
// ID in the Amazon Web Services Management Console under account settings.
PeerVpcAwsAccountId *string
// A unique identifier for a VPC with resources to be accessed by your Amazon
// GameLift fleet. The VPC must be in the same Region as your fleet. To look up a
// VPC ID, use the VPC Dashboard (https://console.aws.amazon.com/vpc/) in the
// Amazon Web Services Management Console. Learn more about VPC peering in VPC
// Peering with Amazon GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html)
// .
PeerVpcId *string
// A policy that limits the number of game sessions that an individual player can
// create on instances in this fleet within a specified span of time.
ResourceCreationLimitPolicy *types.ResourceCreationLimitPolicy
// Instructions for how to launch and maintain server processes on instances in
// the fleet. The runtime configuration defines one or more server process
// configurations, each identifying a build executable or Realtime script file and
// the number of processes of that type to run concurrently. The
// RuntimeConfiguration parameter is required unless the fleet is being configured
// using the older parameters ServerLaunchPath and ServerLaunchParameters , which
// are still supported for backward compatibility.
RuntimeConfiguration *types.RuntimeConfiguration
// The unique identifier for a Realtime configuration script to be deployed on
// fleet instances. You can use either the script ID or ARN. Scripts must be
// uploaded to Amazon GameLift prior to creating the fleet. This fleet property
// cannot be changed later.
ScriptId *string
// This parameter is no longer used. Specify server launch parameters using the
// RuntimeConfiguration parameter. Requests that use this parameter instead
// continue to be valid.
ServerLaunchParameters *string
// This parameter is no longer used. Specify a server launch path using the
// RuntimeConfiguration parameter. Requests that use this parameter instead
// continue to be valid.
ServerLaunchPath *string
// A list of labels to assign to the new fleet resource. Tags are
// developer-defined key-value pairs. Tagging Amazon Web Services resources are
// useful for resource management, access management and cost allocation. For more
// information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)
// in the Amazon Web Services General Reference.
Tags []types.Tag
noSmithyDocumentSerde
}
type CreateFleetOutput struct {
// The properties for the new fleet, including the current status. All fleets are
// placed in NEW status on creation.
FleetAttributes *types.FleetAttributes
// The fleet's locations and life-cycle status of each location. For new fleets,
// the status of all locations is set to NEW . During fleet creation, Amazon
// GameLift updates each location status as instances are deployed there and
// prepared for game hosting. This list includes an entry for the fleet's home
// Region. For fleets with no remote locations, only one entry, representing the
// home Region, is returned.
LocationStates []types.LocationState
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateFleetMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateFleet{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateFleet{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateFleetResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateFleetValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateFleet(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateFleet(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "CreateFleet",
}
}
type opCreateFleetResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateFleetResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateFleetResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "gamelift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "gamelift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("gamelift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addCreateFleetResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opCreateFleetResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| CreateFleet | identifier_name |
policy_handler.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request, Response as SettingResponse};
use crate::handler::setting_handler::{SettingHandlerResult, StorageFactory};
use crate::message::base::Audience;
use crate::policy::response::{Error as PolicyError, Response};
use crate::policy::{
BoxedHandler, Context, GenerateHandlerResult, HasPolicyType, PolicyInfo, PolicyType,
Request as PolicyRequest,
};
use crate::service;
use crate::storage::{self, StorageInfo};
use anyhow::Error;
use async_trait::async_trait;
use fuchsia_syslog::fx_log_err;
use fuchsia_trace as ftrace;
use futures::future::BoxFuture;
use settings_storage::device_storage::DeviceStorage;
use settings_storage::UpdateState;
use std::convert::{TryFrom, TryInto};
/// PolicyHandlers are in charge of applying and persisting policies set by clients.
#[async_trait]
pub trait PolicyHandler {
/// Called when a policy client makes a request on the policy API this handler controls.
async fn handle_policy_request(&mut self, request: PolicyRequest) -> Response;
/// Called when a setting request is intercepted for the setting this policy handler supervises.
///
/// If there are no policies or the request does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to consume the request and respond to the client directly, it should
/// return [`RequestTransform::Result`].
///
/// If this handler wants to modify the request, then let the setting handler handle it,
/// [`RequestTransform::Request`] should be returned, with the modified request.
///
/// [`RequestTransform::Result`]: enum.RequestTransform.html
/// [`RequestTransform::Request`]: enum.RequestTransform.html
async fn handle_setting_request(&mut self, request: Request) -> Option<RequestTransform>;
/// Called when a setting response is intercepted from the setting this policy handler
/// supervises.
///
/// If there are no policies or the response does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to modify the response and still let the original audience handle it,
/// [`Response`] should be returned, containing the modified response.
///
/// [`Response`]: ResponseTransform::Response
async fn handle_setting_response(
&mut self,
response: SettingResponse,
) -> Option<ResponseTransform>;
}
/// `RequestTransform` is returned by a [`PolicyHandler`] in response to a setting request that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]: ../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum RequestTransform {
/// A new, modified request that should be forwarded to the setting handler for processing.
Request(Request),
/// A result to return directly to the settings client.
Result(SettingHandlerResult),
}
/// `ResponseTransform` is returned by a [`PolicyHandler`] in response to a setting response that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]: ../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum ResponseTransform {
/// A new, modified response that should be forwarded.
Response(SettingResponse),
}
/// Trait used to create policy handlers.
#[async_trait]
pub trait Create: Sized {
async fn create(handler: ClientProxy) -> Result<Self, Error>;
}
/// Creates a [`PolicyHandler`] from the given [`Context`].
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`Context`]: ../base/struct.Context.html
pub(crate) fn create_handler<C, T: StorageFactory<Storage = DeviceStorage> + 'static>(
context: Context<T>,
) -> BoxFuture<'static, GenerateHandlerResult>
where
C: Create + PolicyHandler + Send + Sync + 'static,
{
Box::pin(async move {
let _ = &context;
let proxy = ClientProxy::new(context.service_messenger);
C::create(proxy).await.map(|handler| Box::new(handler) as BoxedHandler)
})
}
/// `ClientProxy` provides common functionality, like messaging and persistence to policy handlers.
#[derive(Clone)]
pub struct ClientProxy {
service_messenger: service::message::Messenger,
}
impl ClientProxy {
/// Sends a setting request to the underlying setting proxy this policy handler controls.
pub(crate) fn send_setting_request(
&self,
setting_type: SettingType,
request: Request,
) -> service::message::Receptor {
self.service_messenger
.message(
HandlerPayload::Request(request).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send()
}
/// Requests the setting handler to rebroadcast a settings changed event to its listeners.
pub(crate) fn request_rebroadcast(&self, setting_type: SettingType) {
// Ignore the receptor result.
let _ = self
.service_messenger
.message(
HandlerPayload::Request(Request::Rebroadcast).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send();
}
}
impl ClientProxy {
pub(crate) fn new(service_messenger: service::message::Messenger) -> Self {
Self { service_messenger }
}
/// The type `T` is any type that has a [`PolicyType`] associated with it and that can be
/// converted into a [`PolicyInfo`]. This is usually a variant of the `PolicyInfo` enum.
pub(crate) async fn read_policy<T: HasPolicyType + TryFrom<PolicyInfo>>(
&self,
id: ftrace::Id,
) -> T {
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Read(T::POLICY_TYPE.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Read(
StorageInfo::PolicyInfo(policy_info),
)) = payload
{
let policy_type: PolicyType = (&policy_info).into();
if let Ok(info) = policy_info.try_into() {
return info;
}
panic!(
"Mismatching type during read. Expected {:?}, but got {:?}",
T::POLICY_TYPE,
policy_type
);
} else {
panic!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
panic!("Error reading from storage: {:?}", err);
}
}
}
/// Write a policy info object to storage. The argument `write_through` will
/// block returning until the value has been completely written to
/// persistent store, rather than any temporary in-memory caching.
pub(crate) async fn write_policy(
&self,
policy_info: PolicyInfo,
id: ftrace::Id,
) -> Result<UpdateState, PolicyError> {
let policy_type = (&policy_info).into();
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Write(policy_info.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Write(result)) = payload
{
return result.map_err(|e| {
fx_log_err!("Failed to write policy: {:?}", e);
PolicyError::WriteFailure(policy_type)
});
} else {
fx_log_err!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
fx_log_err!("Error writing to storage: {:?}", err);
}
}
Err(PolicyError::WriteFailure(policy_type))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request};
use crate::message::base::MessengerType;
use crate::message::MessageHubUtil;
use crate::policy::PolicyType;
use crate::service;
use crate::tests::message_utils::verify_payload;
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_send_setting_request() {
let policy_type = PolicyType::Unknown;
let setting_request = Request::Get;
let target_setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut setting_proxy_receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(
policy_type.setting_type(),
)))
.await
.expect("setting proxy messenger created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
let _ = client_proxy.send_setting_request(target_setting_type, setting_request.clone());
verify_payload(
service::Payload::Setting(HandlerPayload::Request(setting_request)),
&mut setting_proxy_receptor,
None,
)
.await
}
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_request_rebroadcast() |
}
| {
let setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(setting_type)))
.await
.expect("service receptor created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
client_proxy.request_rebroadcast(setting_type);
verify_payload(HandlerPayload::Request(Request::Rebroadcast).into(), &mut receptor, None)
.await
} | identifier_body |
policy_handler.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request, Response as SettingResponse};
use crate::handler::setting_handler::{SettingHandlerResult, StorageFactory};
use crate::message::base::Audience;
use crate::policy::response::{Error as PolicyError, Response};
use crate::policy::{
BoxedHandler, Context, GenerateHandlerResult, HasPolicyType, PolicyInfo, PolicyType,
Request as PolicyRequest,
};
use crate::service;
use crate::storage::{self, StorageInfo};
use anyhow::Error;
use async_trait::async_trait;
use fuchsia_syslog::fx_log_err;
use fuchsia_trace as ftrace;
use futures::future::BoxFuture;
use settings_storage::device_storage::DeviceStorage;
use settings_storage::UpdateState;
use std::convert::{TryFrom, TryInto};
/// PolicyHandlers are in charge of applying and persisting policies set by clients.
#[async_trait]
pub trait PolicyHandler {
/// Called when a policy client makes a request on the policy API this handler controls.
async fn handle_policy_request(&mut self, request: PolicyRequest) -> Response;
/// Called when a setting request is intercepted for the setting this policy handler supervises.
///
/// If there are no policies or the request does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to consume the request and respond to the client directly, it should
/// return [`RequestTransform::Result`].
///
/// If this handler wants to modify the request, then let the setting handler handle it,
/// [`RequestTransform::Request`] should be returned, with the modified request.
///
/// [`RequestTransform::Result`]: enum.RequestTransform.html
/// [`RequestTransform::Request`]: enum.RequestTransform.html
async fn handle_setting_request(&mut self, request: Request) -> Option<RequestTransform>;
/// Called when a setting response is intercepted from the setting this policy handler
/// supervises.
///
/// If there are no policies or the response does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to modify the response and still let the original audience handle it,
/// [`Response`] should be returned, containing the modified response.
///
/// [`Response`]: ResponseTransform::Response
async fn handle_setting_response(
&mut self,
response: SettingResponse,
) -> Option<ResponseTransform>;
}
/// `RequestTransform` is returned by a [`PolicyHandler`] in response to a setting request that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]: ../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum RequestTransform {
/// A new, modified request that should be forwarded to the setting handler for processing.
Request(Request),
/// A result to return directly to the settings client.
Result(SettingHandlerResult),
}
/// `ResponseTransform` is returned by a [`PolicyHandler`] in response to a setting response that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]: ../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum ResponseTransform {
/// A new, modified response that should be forwarded.
Response(SettingResponse),
}
/// Trait used to create policy handlers.
#[async_trait]
pub trait Create: Sized {
async fn create(handler: ClientProxy) -> Result<Self, Error>;
}
/// Creates a [`PolicyHandler`] from the given [`Context`].
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`Context`]: ../base/struct.Context.html
pub(crate) fn create_handler<C, T: StorageFactory<Storage = DeviceStorage> + 'static>(
context: Context<T>,
) -> BoxFuture<'static, GenerateHandlerResult>
where
C: Create + PolicyHandler + Send + Sync + 'static,
{
Box::pin(async move {
let _ = &context;
let proxy = ClientProxy::new(context.service_messenger);
C::create(proxy).await.map(|handler| Box::new(handler) as BoxedHandler)
})
}
/// `ClientProxy` provides common functionality, like messaging and persistence to policy handlers.
#[derive(Clone)]
pub struct ClientProxy {
service_messenger: service::message::Messenger,
}
impl ClientProxy {
/// Sends a setting request to the underlying setting proxy this policy handler controls.
pub(crate) fn send_setting_request(
&self,
setting_type: SettingType,
request: Request,
) -> service::message::Receptor {
self.service_messenger
.message(
HandlerPayload::Request(request).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send()
}
/// Requests the setting handler to rebroadcast a settings changed event to its listeners.
pub(crate) fn request_rebroadcast(&self, setting_type: SettingType) {
// Ignore the receptor result.
let _ = self
.service_messenger
.message(
HandlerPayload::Request(Request::Rebroadcast).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send();
}
}
impl ClientProxy {
pub(crate) fn new(service_messenger: service::message::Messenger) -> Self {
Self { service_messenger }
}
/// The type `T` is any type that has a [`PolicyType`] associated with it and that can be
/// converted into a [`PolicyInfo`]. This is usually a variant of the `PolicyInfo` enum.
pub(crate) async fn read_policy<T: HasPolicyType + TryFrom<PolicyInfo>>(
&self,
id: ftrace::Id,
) -> T {
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Read(T::POLICY_TYPE.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Read(
StorageInfo::PolicyInfo(policy_info),
)) = payload
{
let policy_type: PolicyType = (&policy_info).into();
if let Ok(info) = policy_info.try_into() {
return info;
}
panic!(
"Mismatching type during read. Expected {:?}, but got {:?}",
T::POLICY_TYPE,
policy_type
);
} else {
panic!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
panic!("Error reading from storage: {:?}", err);
}
}
}
/// Write a policy info object to storage. The argument `write_through` will
/// block returning until the value has been completely written to
/// persistent store, rather than any temporary in-memory caching.
pub(crate) async fn | (
&self,
policy_info: PolicyInfo,
id: ftrace::Id,
) -> Result<UpdateState, PolicyError> {
let policy_type = (&policy_info).into();
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Write(policy_info.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Write(result)) = payload
{
return result.map_err(|e| {
fx_log_err!("Failed to write policy: {:?}", e);
PolicyError::WriteFailure(policy_type)
});
} else {
fx_log_err!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
fx_log_err!("Error writing to storage: {:?}", err);
}
}
Err(PolicyError::WriteFailure(policy_type))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request};
use crate::message::base::MessengerType;
use crate::message::MessageHubUtil;
use crate::policy::PolicyType;
use crate::service;
use crate::tests::message_utils::verify_payload;
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_send_setting_request() {
let policy_type = PolicyType::Unknown;
let setting_request = Request::Get;
let target_setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut setting_proxy_receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(
policy_type.setting_type(),
)))
.await
.expect("setting proxy messenger created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
let _ = client_proxy.send_setting_request(target_setting_type, setting_request.clone());
verify_payload(
service::Payload::Setting(HandlerPayload::Request(setting_request)),
&mut setting_proxy_receptor,
None,
)
.await
}
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_request_rebroadcast() {
let setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(setting_type)))
.await
.expect("service receptor created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
client_proxy.request_rebroadcast(setting_type);
verify_payload(HandlerPayload::Request(Request::Rebroadcast).into(), &mut receptor, None)
.await
}
}
| write_policy | identifier_name |
policy_handler.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request, Response as SettingResponse};
use crate::handler::setting_handler::{SettingHandlerResult, StorageFactory};
use crate::message::base::Audience;
use crate::policy::response::{Error as PolicyError, Response};
use crate::policy::{
BoxedHandler, Context, GenerateHandlerResult, HasPolicyType, PolicyInfo, PolicyType,
Request as PolicyRequest,
};
use crate::service;
use crate::storage::{self, StorageInfo};
use anyhow::Error;
use async_trait::async_trait;
use fuchsia_syslog::fx_log_err;
use fuchsia_trace as ftrace;
use futures::future::BoxFuture;
use settings_storage::device_storage::DeviceStorage;
use settings_storage::UpdateState;
use std::convert::{TryFrom, TryInto};
/// PolicyHandlers are in charge of applying and persisting policies set by clients.
#[async_trait]
pub trait PolicyHandler {
/// Called when a policy client makes a request on the policy API this handler controls.
async fn handle_policy_request(&mut self, request: PolicyRequest) -> Response;
/// Called when a setting request is intercepted for the setting this policy handler supervises.
///
/// If there are no policies or the request does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to consume the request and respond to the client directly, it should
/// return [`RequestTransform::Result`].
///
/// If this handler wants to modify the request, then let the setting handler handle it,
/// [`RequestTransform::Request`] should be returned, with the modified request.
///
/// [`RequestTransform::Result`]: enum.RequestTransform.html
/// [`RequestTransform::Request`]: enum.RequestTransform.html
async fn handle_setting_request(&mut self, request: Request) -> Option<RequestTransform>;
/// Called when a setting response is intercepted from the setting this policy handler
/// supervises.
///
/// If there are no policies or the response does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to modify the response and still let the original audience handle it,
/// [`Response`] should be returned, containing the modified response.
///
/// [`Response`]: ResponseTransform::Response
async fn handle_setting_response(
&mut self,
response: SettingResponse,
) -> Option<ResponseTransform>;
}
/// `RequestTransform` is returned by a [`PolicyHandler`] in response to a setting request that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]: ../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum RequestTransform {
/// A new, modified request that should be forwarded to the setting handler for processing.
Request(Request),
/// A result to return directly to the settings client.
Result(SettingHandlerResult),
}
/// `ResponseTransform` is returned by a [`PolicyHandler`] in response to a setting response that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]: ../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum ResponseTransform {
/// A new, modified response that should be forwarded.
Response(SettingResponse),
}
/// Trait used to create policy handlers.
#[async_trait]
pub trait Create: Sized {
async fn create(handler: ClientProxy) -> Result<Self, Error>;
}
/// Creates a [`PolicyHandler`] from the given [`Context`].
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`Context`]: ../base/struct.Context.html
pub(crate) fn create_handler<C, T: StorageFactory<Storage = DeviceStorage> + 'static>(
context: Context<T>,
) -> BoxFuture<'static, GenerateHandlerResult>
where
C: Create + PolicyHandler + Send + Sync + 'static,
{
Box::pin(async move {
let _ = &context;
let proxy = ClientProxy::new(context.service_messenger);
C::create(proxy).await.map(|handler| Box::new(handler) as BoxedHandler)
})
}
/// `ClientProxy` provides common functionality, like messaging and persistence to policy handlers.
#[derive(Clone)]
pub struct ClientProxy {
service_messenger: service::message::Messenger,
} |
impl ClientProxy {
/// Sends a setting request to the underlying setting proxy this policy handler controls.
pub(crate) fn send_setting_request(
&self,
setting_type: SettingType,
request: Request,
) -> service::message::Receptor {
self.service_messenger
.message(
HandlerPayload::Request(request).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send()
}
/// Requests the setting handler to rebroadcast a settings changed event to its listeners.
pub(crate) fn request_rebroadcast(&self, setting_type: SettingType) {
// Ignore the receptor result.
let _ = self
.service_messenger
.message(
HandlerPayload::Request(Request::Rebroadcast).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send();
}
}
impl ClientProxy {
pub(crate) fn new(service_messenger: service::message::Messenger) -> Self {
Self { service_messenger }
}
/// The type `T` is any type that has a [`PolicyType`] associated with it and that can be
/// converted into a [`PolicyInfo`]. This is usually a variant of the `PolicyInfo` enum.
pub(crate) async fn read_policy<T: HasPolicyType + TryFrom<PolicyInfo>>(
&self,
id: ftrace::Id,
) -> T {
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Read(T::POLICY_TYPE.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Read(
StorageInfo::PolicyInfo(policy_info),
)) = payload
{
let policy_type: PolicyType = (&policy_info).into();
if let Ok(info) = policy_info.try_into() {
return info;
}
panic!(
"Mismatching type during read. Expected {:?}, but got {:?}",
T::POLICY_TYPE,
policy_type
);
} else {
panic!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
panic!("Error reading from storage: {:?}", err);
}
}
}
/// Write a policy info object to storage. The argument `write_through` will
/// block returning until the value has been completely written to
/// persistent store, rather than any temporary in-memory caching.
pub(crate) async fn write_policy(
&self,
policy_info: PolicyInfo,
id: ftrace::Id,
) -> Result<UpdateState, PolicyError> {
let policy_type = (&policy_info).into();
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Write(policy_info.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Write(result)) = payload
{
return result.map_err(|e| {
fx_log_err!("Failed to write policy: {:?}", e);
PolicyError::WriteFailure(policy_type)
});
} else {
fx_log_err!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
fx_log_err!("Error writing to storage: {:?}", err);
}
}
Err(PolicyError::WriteFailure(policy_type))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request};
use crate::message::base::MessengerType;
use crate::message::MessageHubUtil;
use crate::policy::PolicyType;
use crate::service;
use crate::tests::message_utils::verify_payload;
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_send_setting_request() {
let policy_type = PolicyType::Unknown;
let setting_request = Request::Get;
let target_setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut setting_proxy_receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(
policy_type.setting_type(),
)))
.await
.expect("setting proxy messenger created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
let _ = client_proxy.send_setting_request(target_setting_type, setting_request.clone());
verify_payload(
service::Payload::Setting(HandlerPayload::Request(setting_request)),
&mut setting_proxy_receptor,
None,
)
.await
}
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_request_rebroadcast() {
let setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(setting_type)))
.await
.expect("service receptor created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
client_proxy.request_rebroadcast(setting_type);
verify_payload(HandlerPayload::Request(Request::Rebroadcast).into(), &mut receptor, None)
.await
}
} | random_line_split | |
projectsvmglove.py | # -*- coding: utf-8 -*-
"""ProjectSVMGlove.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oP_lPuxcr0qYni8HvGSS-rCFlUCtfUYE
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import pickle
from google.colab import drive
drive.mount('/content/gdrive')
# Commented out IPython magic to ensure Python compatibility.
# %cd /content/gdrive/My\ Drive/NLP_Project
with open(r"processed_english.pkl", "rb") as input_file:
data = pickle.load(input_file)
import re
from pymongo import MongoClient
from collections import defaultdict
from nltk.corpus import wordnet as wn
#from transliteration import transliterate_word
import nltk
from nltk import word_tokenize
from nltk.util import ngrams as ngrams_creator
#from googletrans import Translator
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.corpus import stopwords
import pickle
#nltk.download('punkt')
nltk.download('vader_lexicon')
from nltk.corpus import sentiwordnet as swn
from nltk.sentiment.vader import SentimentIntensityAnalyzer
'''
Utility function to handle commonly used short forms
'''
def handle_short_forms(w):
|
'''
Translate word .
'''
def translate(word):
return translator.translate(word,src='hi' , dest='en').text
'''
Self defined contractions
'''
def load_dict_contractions():
return {
"ain't":"is not",
"amn't":"am not",
"aren't":"are not",
"can't":"cannot",
"'cause":"because",
"couldn't":"could not",
"couldn't've":"could not have",
"could've":"could have",
"daren't":"dare not",
"daresn't":"dare not",
"dasn't":"dare not",
"didn't":"did not",
"doesn't":"does not",
"don't":"do not",
"e'er":"ever",
"em":"them",
"everyone's":"everyone is",
"finna":"fixing to",
"gimme":"give me",
"gonna":"going to",
"gon't":"go not",
"gotta":"got to",
"hadn't":"had not",
"hasn't":"has not",
"haven't":"have not",
"he'd":"he would",
"he'll":"he will",
"he's":"he is",
"he've":"he have",
"how'd":"how would",
"how'll":"how will",
"how're":"how are",
"how's":"how is",
"I'd":"I would",
"I'll":"I will",
"i'll":"I will",
"I'm":"I am",
"I'm'a":"I am about to",
"I'm'o":"I am going to",
"isn't":"is not",
"it'd":"it would",
"it'll":"it will",
"it's":"it is",
"I've":"I have",
"kinda":"kind of",
"let's":"let us",
"mayn't":"may not",
"may've":"may have",
"mightn't":"might not",
"might've":"might have",
"mustn't":"must not",
"mustn't've":"must not have",
"must've":"must have",
"needn't":"need not",
"ne'er":"never",
"o'":"of",
"o'er":"over",
"ol'":"old",
"oughtn't":"ought not",
"shalln't":"shall not",
"shan't":"shall not",
"she'd":"she would",
"she'll":"she will",
"she's":"she is",
"shouldn't":"should not",
"shouldn't've":"should not have",
"should've":"should have",
"somebody's":"somebody is",
"someone's":"someone is",
"something's":"something is",
"that'd":"that would",
"that'll":"that will",
"that're":"that are",
"that's":"that is",
"there'd":"there would",
"there'll":"there will",
"there're":"there are",
"there's":"there is",
"these're":"these are",
"they'd":"they would",
"they'll":"they will",
"they're":"they are",
"they've":"they have",
"this's":"this is",
"those're":"those are",
"'tis":"it is",
"'twas":"it was",
"wanna":"want to",
"wasn't":"was not",
"we'd":"we would",
"we'd've":"we would have",
"we'll":"we will",
"we're":"we are",
"weren't":"were not",
"we've":"we have",
"what'd":"what did",
"what'll":"what will",
"what're":"what are",
"what's":"what is",
"what've":"what have",
"when's":"when is",
"where'd":"where did",
"where're":"where are",
"where's":"where is",
"where've":"where have",
"which's":"which is",
"who'd":"who would",
"who'd've":"who would have",
"who'll":"who will",
"who're":"who are",
"who's":"who is",
"who've":"who have",
"why'd":"why did",
"why're":"why are",
"why's":"why is",
"won't":"will not",
"wouldn't":"would not",
"would've":"would have",
"y'all":"you all",
"you'd":"you would",
"you'll":"you will",
"you're":"you are",
"you've":"you have",
"Whatcha":"What are you",
"whatcha":"What are you",
"luv":"love",
"sux":"sucks"
}
'''
Handling short forms and contractions in the sentences
'''
long_form_dict = load_dict_contractions()
def expand_sent(sentence):
final_sent =""
res = " ".join(long_form_dict.get(ele, ele) for ele in sentence.split())
for word in res.split():
final_sent += (handle_short_forms(word))+ " "
return final_sent
data.shape
l=[]
for sent in data.sentence_eng:
l.append(expand_sent(sent))
print(l[2:5])
data['sentence_eng']=l
'''
Emoticon processing
'''
emoji_list = pd.read_csv('emoji.csv',sep=",")
def find_emoji(sentence):
positive = 0
negative = 0
neutral = 0
sentiment =[]
for word in sentence.split():
if not emoji_list[emoji_list['Emoji']==word].empty:
positive += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Positive']
negative += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Negative']
neutral += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Neutral']
return positive,negative,neutral
'''
Slang word processing
'''
slang_list = pd.read_csv('Hinglish_Profanity_List.csv',sep=",",header=None)
slang_list.columns=['hindi_word','meaning','rating']
def find_slang(sentence):
slang_exists = False
total_rating = 0
for word in sentence.split():
if not slang_list[slang_list['hindi_word']==word].empty:
try:
total_rating += slang_list.iloc[slang_list.index[slang_list['hindi_word'] == word].tolist()[0]]['rating']
except:
total_rating +=0
slang_exists = True
try:
total_rating = int(total_rating)
except:
total_rating = 0
return slang_exists,total_rating
data['slang_exists']= data['sentence_mixed'].apply(find_slang)
data[['slang_existance', 'slang_rating']] = pd.DataFrame(data['slang_exists'].tolist(), index=data.index)
data =data.drop(columns=['slang_exists'])
data['emoji_sentiment'] = data['sentence_mixed'].apply(find_emoji)
data[['positive_emoji', 'negative_emoji','neutral_emoji']] = pd.DataFrame(data['emoji_sentiment'].tolist(), index=data.index)
data =data.drop(columns=['emoji_sentiment'])
'''
Calculating the sentiment polarity score
'''
sid = SentimentIntensityAnalyzer()
neg_score=[]
pos_score=[]
neu_score=[]
for sent in data.sentence_eng:
ss = sid.polarity_scores(sent)
neg_score.append(ss['neg'])
pos_score.append(ss['pos'])
neu_score.append(ss['neu'])
data['neg_score']= neg_score
data['pos_score']= pos_score
data['neu_score']= neu_score
data.tail(10)
# removing unwanted patterns from the data
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
#Splitting the data
train = data[:5000]
test = data[5000:]
'''
fetching the sentence/comments from the test dataset and cleaning it
'''
train_corpus = []
for i in range(0, 5000):
review = re.sub('[^a-zA-Z]', ' ', train['sentence_eng'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
# stemming
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# joining them back with space
review = ' '.join(review)
train_corpus.append(review)
'''
fetching the sentence/comments from the test dataset and cleaning it
'''
test_corpus = []
for i in range(5000, 6137):
review = re.sub('[^a-zA-Z]', ' ', test['sentence_eng'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
# stemming
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# joining them back with space
review = ' '.join(review)
test_corpus.append(review)
#Conversion of snetiment into numerical sentiment
train =train.replace("positive",1)
train =train.replace("negative",2)
train = train.replace("neutral",0)
train.head()
y = train.iloc[:, 2]
print(y)
from nltk.tokenize import WordPunctTokenizer
tokenizer = WordPunctTokenizer()
data_tok = [tokenizer.tokenize(d.lower()) for d in train_corpus]
import gensim.downloader as api
model = api.load('glove-twitter-50')
import numpy as np
def get_phrase_embedding(phrase):
"""
Convert phrase to a vector by aggregating it's word embeddings. Just take an
average of vectors for all tokens in the phrase with some weights.
"""
vector = np.zeros([model.vector_size], dtype='float32')
# 1. lowercase phrase
phrase = phrase.lower()
# 2. tokenize phrase
phrase_tokens = tokenizer.tokenize(phrase)
# 3. average word vectors for all words in tokenized phrase, skip words that are not in model's vocabulary
divisor = 0
for word in phrase_tokens:
if word in model.vocab:
divisor += 1
vector = vector + model.get_vector(word)
if divisor != 0: vector /= divisor
return vector
'''
Embeddings for Training Corpus
'''
vector_matrix_x_train = list(map(get_phrase_embedding, train_corpus))
'''
Embeddings for Training Corpus
'''
vector_matrix_x_test = list(map(get_phrase_embedding, test_corpus))
numerical_features_t = train[['neg_score','pos_score', 'neu_score', 'positive_emoji', 'negative_emoji', 'neutral_emoji', 'slang_rating']]
print(numerical_features_t.values.tolist())
'''
Stacking numerical features along with textual features
'''
combinedFeatures = np.hstack([numerical_features_t, np.array(vector_matrix_x_train)])
numerical_features_test = test[['neg_score','pos_score', 'neu_score', 'positive_emoji', 'negative_emoji', 'neutral_emoji', 'slang_rating']]
print(numerical_features_test.values.tolist())
combinedFeatures_test = np.hstack([numerical_features_test, np.array(vector_matrix_x_test)])
from sklearn.model_selection import train_test_split
'''
Train test validation split
'''
x_train, x_valid, y_train, y_valid = train_test_split(combinedFeatures, y, test_size = 0.25, random_state = 42)
print(x_train.shape)
print(x_valid.shape)
print(y_train.shape)
print(y_valid.shape)
print(combinedFeatures_test.shape)
# standardization
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_valid = sc.transform(x_valid)
x_test = sc.transform(combinedFeatures_test)
'''
Random Forest Classifer
'''
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
model = RandomForestClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("F1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
Logistic Regression Classifier
'''
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
Decision Tree Classifier
'''
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
SVM Classifier
'''
from sklearn.svm import SVC
model = SVC()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
xgBoost Classifier
'''
from xgboost import XGBClassifier
model = XGBClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
pd.options.display.max_colwidth = 300
train.head(2)
t = train[1:]
t.head()
#print(test)
print(y_valid)
print(y_pred)
| if w == 'h':
return 'hai'
elif w == 'n':
return 'na'
elif w == 'da':
return 'the'
elif w == 'wid':
return 'with'
elif w == 'pr':
return 'par'
elif w == 'mattt':
return 'mat'
elif w == 'vo':
return 'woh'
elif w == 'ki':
return 'kee'
elif w == 'ap':
return 'aap'
elif w == 'bs':
return 'bas'
elif w == 'goood':
return 'very good'
elif w == 'tera':
return 'teraa'
elif w == 'cnfsn':
return 'confusion'
elif w == 'ka':
return 'kaa'
elif w == 'rkhi':
return 'rakhi'
elif w == 'thts':
return 'thats'
elif w == 'cald':
return 'called'
elif w == 'tabhe':
return 'tabhi'
elif w == 'pta':
return 'pata'
elif w == 'b':
return 'bhi'
elif w == 'nai':
return 'nahi'
elif w == 'f':
return 'of'
elif w == 'd':
return 'the'
else:
return w | identifier_body |
projectsvmglove.py | # -*- coding: utf-8 -*-
"""ProjectSVMGlove.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oP_lPuxcr0qYni8HvGSS-rCFlUCtfUYE
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import pickle
from google.colab import drive
drive.mount('/content/gdrive')
# Commented out IPython magic to ensure Python compatibility.
# %cd /content/gdrive/My\ Drive/NLP_Project
with open(r"processed_english.pkl", "rb") as input_file:
data = pickle.load(input_file)
import re
from pymongo import MongoClient
from collections import defaultdict
from nltk.corpus import wordnet as wn
#from transliteration import transliterate_word
import nltk
from nltk import word_tokenize
from nltk.util import ngrams as ngrams_creator
#from googletrans import Translator
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.corpus import stopwords
import pickle
#nltk.download('punkt')
nltk.download('vader_lexicon')
from nltk.corpus import sentiwordnet as swn
from nltk.sentiment.vader import SentimentIntensityAnalyzer
'''
Utility function to handle commonly used short forms
'''
def handle_short_forms(w):
if w == 'h':
return 'hai'
elif w == 'n':
return 'na'
elif w == 'da':
return 'the'
elif w == 'wid':
return 'with'
elif w == 'pr':
return 'par'
elif w == 'mattt':
return 'mat'
elif w == 'vo':
return 'woh'
elif w == 'ki':
return 'kee'
elif w == 'ap':
return 'aap'
elif w == 'bs':
return 'bas'
elif w == 'goood':
return 'very good'
elif w == 'tera':
return 'teraa'
elif w == 'cnfsn':
return 'confusion'
elif w == 'ka':
return 'kaa'
elif w == 'rkhi':
return 'rakhi'
elif w == 'thts':
return 'thats'
elif w == 'cald':
return 'called'
elif w == 'tabhe':
return 'tabhi'
elif w == 'pta':
return 'pata'
elif w == 'b':
return 'bhi'
elif w == 'nai':
return 'nahi'
elif w == 'f':
return 'of'
elif w == 'd':
return 'the'
else:
return w
'''
Translate word .
'''
def translate(word):
return translator.translate(word,src='hi' , dest='en').text
'''
Self defined contractions
'''
def load_dict_contractions():
return {
"ain't":"is not",
"amn't":"am not",
"aren't":"are not",
"can't":"cannot",
"'cause":"because",
"couldn't":"could not",
"couldn't've":"could not have",
"could've":"could have",
"daren't":"dare not",
"daresn't":"dare not",
"dasn't":"dare not",
"didn't":"did not",
"doesn't":"does not",
"don't":"do not",
"e'er":"ever",
"em":"them",
"everyone's":"everyone is",
"finna":"fixing to",
"gimme":"give me",
"gonna":"going to",
"gon't":"go not",
"gotta":"got to",
"hadn't":"had not",
"hasn't":"has not",
"haven't":"have not",
"he'd":"he would",
"he'll":"he will",
"he's":"he is",
"he've":"he have",
"how'd":"how would",
"how'll":"how will",
"how're":"how are",
"how's":"how is",
"I'd":"I would",
"I'll":"I will",
"i'll":"I will",
"I'm":"I am",
"I'm'a":"I am about to",
"I'm'o":"I am going to",
"isn't":"is not",
"it'd":"it would",
"it'll":"it will",
"it's":"it is",
"I've":"I have",
"kinda":"kind of",
"let's":"let us",
"mayn't":"may not",
"may've":"may have",
"mightn't":"might not",
"might've":"might have",
"mustn't":"must not",
"mustn't've":"must not have",
"must've":"must have",
"needn't":"need not",
"ne'er":"never",
"o'":"of",
"o'er":"over",
"ol'":"old",
"oughtn't":"ought not",
"shalln't":"shall not",
"shan't":"shall not",
"she'd":"she would",
"she'll":"she will",
"she's":"she is",
"shouldn't":"should not",
"shouldn't've":"should not have",
"should've":"should have",
"somebody's":"somebody is",
"someone's":"someone is",
"something's":"something is",
"that'd":"that would",
"that'll":"that will",
"that're":"that are",
"that's":"that is",
"there'd":"there would",
"there'll":"there will",
"there're":"there are",
"there's":"there is",
"these're":"these are",
"they'd":"they would",
"they'll":"they will",
"they're":"they are",
"they've":"they have",
"this's":"this is",
"those're":"those are",
"'tis":"it is",
"'twas":"it was",
"wanna":"want to",
"wasn't":"was not",
"we'd":"we would",
"we'd've":"we would have",
"we'll":"we will",
"we're":"we are",
"weren't":"were not",
"we've":"we have",
"what'd":"what did",
"what'll":"what will",
"what're":"what are",
"what's":"what is",
"what've":"what have",
"when's":"when is",
"where'd":"where did",
"where're":"where are",
"where's":"where is",
"where've":"where have",
"which's":"which is",
"who'd":"who would",
"who'd've":"who would have",
"who'll":"who will",
"who're":"who are",
"who's":"who is",
"who've":"who have",
"why'd":"why did",
"why're":"why are",
"why's":"why is",
"won't":"will not",
"wouldn't":"would not",
"would've":"would have",
"y'all":"you all",
"you'd":"you would",
"you'll":"you will",
"you're":"you are",
"you've":"you have",
"Whatcha":"What are you",
"whatcha":"What are you",
"luv":"love",
"sux":"sucks"
}
'''
Handling short forms and contractions in the sentences
'''
long_form_dict = load_dict_contractions()
def expand_sent(sentence):
final_sent =""
res = " ".join(long_form_dict.get(ele, ele) for ele in sentence.split())
for word in res.split():
final_sent += (handle_short_forms(word))+ " "
return final_sent
data.shape
l=[]
for sent in data.sentence_eng:
l.append(expand_sent(sent))
print(l[2:5])
data['sentence_eng']=l
'''
Emoticon processing
'''
emoji_list = pd.read_csv('emoji.csv',sep=",")
def find_emoji(sentence):
positive = 0
negative = 0
neutral = 0
sentiment =[]
for word in sentence.split():
if not emoji_list[emoji_list['Emoji']==word].empty:
positive += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Positive']
negative += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Negative']
neutral += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Neutral']
return positive,negative,neutral
'''
Slang word processing
'''
slang_list = pd.read_csv('Hinglish_Profanity_List.csv',sep=",",header=None)
slang_list.columns=['hindi_word','meaning','rating']
def find_slang(sentence):
slang_exists = False
total_rating = 0
for word in sentence.split():
if not slang_list[slang_list['hindi_word']==word].empty:
try:
total_rating += slang_list.iloc[slang_list.index[slang_list['hindi_word'] == word].tolist()[0]]['rating']
except:
total_rating +=0
slang_exists = True
try:
total_rating = int(total_rating)
except:
total_rating = 0
return slang_exists,total_rating
data['slang_exists']= data['sentence_mixed'].apply(find_slang)
data[['slang_existance', 'slang_rating']] = pd.DataFrame(data['slang_exists'].tolist(), index=data.index)
data =data.drop(columns=['slang_exists'])
data['emoji_sentiment'] = data['sentence_mixed'].apply(find_emoji)
data[['positive_emoji', 'negative_emoji','neutral_emoji']] = pd.DataFrame(data['emoji_sentiment'].tolist(), index=data.index)
data =data.drop(columns=['emoji_sentiment'])
'''
Calculating the sentiment polarity score
'''
sid = SentimentIntensityAnalyzer()
neg_score=[]
pos_score=[]
neu_score=[]
for sent in data.sentence_eng:
ss = sid.polarity_scores(sent)
neg_score.append(ss['neg'])
pos_score.append(ss['pos'])
neu_score.append(ss['neu'])
data['neg_score']= neg_score
data['pos_score']= pos_score
data['neu_score']= neu_score
data.tail(10)
# removing unwanted patterns from the data
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
#Splitting the data
train = data[:5000]
test = data[5000:]
'''
fetching the sentence/comments from the test dataset and cleaning it
'''
train_corpus = []
for i in range(0, 5000):
review = re.sub('[^a-zA-Z]', ' ', train['sentence_eng'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
# stemming
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# joining them back with space
review = ' '.join(review)
train_corpus.append(review)
'''
fetching the sentence/comments from the test dataset and cleaning it
'''
test_corpus = []
for i in range(5000, 6137):
review = re.sub('[^a-zA-Z]', ' ', test['sentence_eng'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
# stemming
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# joining them back with space
review = ' '.join(review)
test_corpus.append(review)
#Conversion of snetiment into numerical sentiment
train =train.replace("positive",1)
train =train.replace("negative",2)
train = train.replace("neutral",0)
train.head()
y = train.iloc[:, 2]
print(y)
from nltk.tokenize import WordPunctTokenizer
tokenizer = WordPunctTokenizer()
data_tok = [tokenizer.tokenize(d.lower()) for d in train_corpus]
import gensim.downloader as api
model = api.load('glove-twitter-50')
import numpy as np
def get_phrase_embedding(phrase):
"""
Convert phrase to a vector by aggregating it's word embeddings. Just take an
average of vectors for all tokens in the phrase with some weights.
"""
vector = np.zeros([model.vector_size], dtype='float32')
# 1. lowercase phrase
phrase = phrase.lower()
# 2. tokenize phrase
phrase_tokens = tokenizer.tokenize(phrase)
# 3. average word vectors for all words in tokenized phrase, skip words that are not in model's vocabulary
divisor = 0
for word in phrase_tokens:
if word in model.vocab:
divisor += 1
vector = vector + model.get_vector(word)
if divisor != 0: vector /= divisor
return vector
'''
Embeddings for Training Corpus
'''
vector_matrix_x_train = list(map(get_phrase_embedding, train_corpus))
'''
Embeddings for Training Corpus
'''
vector_matrix_x_test = list(map(get_phrase_embedding, test_corpus))
numerical_features_t = train[['neg_score','pos_score', 'neu_score', 'positive_emoji', 'negative_emoji', 'neutral_emoji', 'slang_rating']]
print(numerical_features_t.values.tolist())
'''
Stacking numerical features along with textual features
'''
combinedFeatures = np.hstack([numerical_features_t, np.array(vector_matrix_x_train)])
numerical_features_test = test[['neg_score','pos_score', 'neu_score', 'positive_emoji', 'negative_emoji', 'neutral_emoji', 'slang_rating']]
print(numerical_features_test.values.tolist())
combinedFeatures_test = np.hstack([numerical_features_test, np.array(vector_matrix_x_test)])
from sklearn.model_selection import train_test_split
'''
Train test validation split
'''
x_train, x_valid, y_train, y_valid = train_test_split(combinedFeatures, y, test_size = 0.25, random_state = 42)
print(x_train.shape)
|
# standardization
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_valid = sc.transform(x_valid)
x_test = sc.transform(combinedFeatures_test)
'''
Random Forest Classifer
'''
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
model = RandomForestClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("F1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
Logistic Regression Classifier
'''
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
Decision Tree Classifier
'''
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
SVM Classifier
'''
from sklearn.svm import SVC
model = SVC()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
xgBoost Classifier
'''
from xgboost import XGBClassifier
model = XGBClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
pd.options.display.max_colwidth = 300
train.head(2)
t = train[1:]
t.head()
#print(test)
print(y_valid)
print(y_pred) | print(x_valid.shape)
print(y_train.shape)
print(y_valid.shape)
print(combinedFeatures_test.shape) | random_line_split |
projectsvmglove.py | # -*- coding: utf-8 -*-
"""ProjectSVMGlove.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oP_lPuxcr0qYni8HvGSS-rCFlUCtfUYE
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import pickle
from google.colab import drive
drive.mount('/content/gdrive')
# Commented out IPython magic to ensure Python compatibility.
# %cd /content/gdrive/My\ Drive/NLP_Project
with open(r"processed_english.pkl", "rb") as input_file:
data = pickle.load(input_file)
import re
from pymongo import MongoClient
from collections import defaultdict
from nltk.corpus import wordnet as wn
#from transliteration import transliterate_word
import nltk
from nltk import word_tokenize
from nltk.util import ngrams as ngrams_creator
#from googletrans import Translator
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.corpus import stopwords
import pickle
#nltk.download('punkt')
nltk.download('vader_lexicon')
from nltk.corpus import sentiwordnet as swn
from nltk.sentiment.vader import SentimentIntensityAnalyzer
'''
Utility function to handle commonly used short forms
'''
def handle_short_forms(w):
if w == 'h':
return 'hai'
elif w == 'n':
return 'na'
elif w == 'da':
return 'the'
elif w == 'wid':
return 'with'
elif w == 'pr':
return 'par'
elif w == 'mattt':
return 'mat'
elif w == 'vo':
return 'woh'
elif w == 'ki':
return 'kee'
elif w == 'ap':
return 'aap'
elif w == 'bs':
|
elif w == 'goood':
return 'very good'
elif w == 'tera':
return 'teraa'
elif w == 'cnfsn':
return 'confusion'
elif w == 'ka':
return 'kaa'
elif w == 'rkhi':
return 'rakhi'
elif w == 'thts':
return 'thats'
elif w == 'cald':
return 'called'
elif w == 'tabhe':
return 'tabhi'
elif w == 'pta':
return 'pata'
elif w == 'b':
return 'bhi'
elif w == 'nai':
return 'nahi'
elif w == 'f':
return 'of'
elif w == 'd':
return 'the'
else:
return w
'''
Translate word .
'''
def translate(word):
return translator.translate(word,src='hi' , dest='en').text
'''
Self defined contractions
'''
def load_dict_contractions():
return {
"ain't":"is not",
"amn't":"am not",
"aren't":"are not",
"can't":"cannot",
"'cause":"because",
"couldn't":"could not",
"couldn't've":"could not have",
"could've":"could have",
"daren't":"dare not",
"daresn't":"dare not",
"dasn't":"dare not",
"didn't":"did not",
"doesn't":"does not",
"don't":"do not",
"e'er":"ever",
"em":"them",
"everyone's":"everyone is",
"finna":"fixing to",
"gimme":"give me",
"gonna":"going to",
"gon't":"go not",
"gotta":"got to",
"hadn't":"had not",
"hasn't":"has not",
"haven't":"have not",
"he'd":"he would",
"he'll":"he will",
"he's":"he is",
"he've":"he have",
"how'd":"how would",
"how'll":"how will",
"how're":"how are",
"how's":"how is",
"I'd":"I would",
"I'll":"I will",
"i'll":"I will",
"I'm":"I am",
"I'm'a":"I am about to",
"I'm'o":"I am going to",
"isn't":"is not",
"it'd":"it would",
"it'll":"it will",
"it's":"it is",
"I've":"I have",
"kinda":"kind of",
"let's":"let us",
"mayn't":"may not",
"may've":"may have",
"mightn't":"might not",
"might've":"might have",
"mustn't":"must not",
"mustn't've":"must not have",
"must've":"must have",
"needn't":"need not",
"ne'er":"never",
"o'":"of",
"o'er":"over",
"ol'":"old",
"oughtn't":"ought not",
"shalln't":"shall not",
"shan't":"shall not",
"she'd":"she would",
"she'll":"she will",
"she's":"she is",
"shouldn't":"should not",
"shouldn't've":"should not have",
"should've":"should have",
"somebody's":"somebody is",
"someone's":"someone is",
"something's":"something is",
"that'd":"that would",
"that'll":"that will",
"that're":"that are",
"that's":"that is",
"there'd":"there would",
"there'll":"there will",
"there're":"there are",
"there's":"there is",
"these're":"these are",
"they'd":"they would",
"they'll":"they will",
"they're":"they are",
"they've":"they have",
"this's":"this is",
"those're":"those are",
"'tis":"it is",
"'twas":"it was",
"wanna":"want to",
"wasn't":"was not",
"we'd":"we would",
"we'd've":"we would have",
"we'll":"we will",
"we're":"we are",
"weren't":"were not",
"we've":"we have",
"what'd":"what did",
"what'll":"what will",
"what're":"what are",
"what's":"what is",
"what've":"what have",
"when's":"when is",
"where'd":"where did",
"where're":"where are",
"where's":"where is",
"where've":"where have",
"which's":"which is",
"who'd":"who would",
"who'd've":"who would have",
"who'll":"who will",
"who're":"who are",
"who's":"who is",
"who've":"who have",
"why'd":"why did",
"why're":"why are",
"why's":"why is",
"won't":"will not",
"wouldn't":"would not",
"would've":"would have",
"y'all":"you all",
"you'd":"you would",
"you'll":"you will",
"you're":"you are",
"you've":"you have",
"Whatcha":"What are you",
"whatcha":"What are you",
"luv":"love",
"sux":"sucks"
}
'''
Handling short forms and contractions in the sentences
'''
long_form_dict = load_dict_contractions()
def expand_sent(sentence):
final_sent =""
res = " ".join(long_form_dict.get(ele, ele) for ele in sentence.split())
for word in res.split():
final_sent += (handle_short_forms(word))+ " "
return final_sent
data.shape
l=[]
for sent in data.sentence_eng:
l.append(expand_sent(sent))
print(l[2:5])
data['sentence_eng']=l
'''
Emoticon processing
'''
emoji_list = pd.read_csv('emoji.csv',sep=",")
def find_emoji(sentence):
positive = 0
negative = 0
neutral = 0
sentiment =[]
for word in sentence.split():
if not emoji_list[emoji_list['Emoji']==word].empty:
positive += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Positive']
negative += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Negative']
neutral += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Neutral']
return positive,negative,neutral
'''
Slang word processing
'''
slang_list = pd.read_csv('Hinglish_Profanity_List.csv',sep=",",header=None)
slang_list.columns=['hindi_word','meaning','rating']
def find_slang(sentence):
slang_exists = False
total_rating = 0
for word in sentence.split():
if not slang_list[slang_list['hindi_word']==word].empty:
try:
total_rating += slang_list.iloc[slang_list.index[slang_list['hindi_word'] == word].tolist()[0]]['rating']
except:
total_rating +=0
slang_exists = True
try:
total_rating = int(total_rating)
except:
total_rating = 0
return slang_exists,total_rating
data['slang_exists']= data['sentence_mixed'].apply(find_slang)
data[['slang_existance', 'slang_rating']] = pd.DataFrame(data['slang_exists'].tolist(), index=data.index)
data =data.drop(columns=['slang_exists'])
data['emoji_sentiment'] = data['sentence_mixed'].apply(find_emoji)
data[['positive_emoji', 'negative_emoji','neutral_emoji']] = pd.DataFrame(data['emoji_sentiment'].tolist(), index=data.index)
data =data.drop(columns=['emoji_sentiment'])
'''
Calculating the sentiment polarity score
'''
sid = SentimentIntensityAnalyzer()
neg_score=[]
pos_score=[]
neu_score=[]
for sent in data.sentence_eng:
ss = sid.polarity_scores(sent)
neg_score.append(ss['neg'])
pos_score.append(ss['pos'])
neu_score.append(ss['neu'])
data['neg_score']= neg_score
data['pos_score']= pos_score
data['neu_score']= neu_score
data.tail(10)
# removing unwanted patterns from the data
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
#Splitting the data
train = data[:5000]
test = data[5000:]
'''
fetching the sentence/comments from the test dataset and cleaning it
'''
train_corpus = []
for i in range(0, 5000):
review = re.sub('[^a-zA-Z]', ' ', train['sentence_eng'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
# stemming
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# joining them back with space
review = ' '.join(review)
train_corpus.append(review)
'''
fetching the sentence/comments from the test dataset and cleaning it
'''
test_corpus = []
for i in range(5000, 6137):
review = re.sub('[^a-zA-Z]', ' ', test['sentence_eng'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
# stemming
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# joining them back with space
review = ' '.join(review)
test_corpus.append(review)
#Conversion of snetiment into numerical sentiment
train =train.replace("positive",1)
train =train.replace("negative",2)
train = train.replace("neutral",0)
train.head()
y = train.iloc[:, 2]
print(y)
from nltk.tokenize import WordPunctTokenizer
tokenizer = WordPunctTokenizer()
data_tok = [tokenizer.tokenize(d.lower()) for d in train_corpus]
import gensim.downloader as api
model = api.load('glove-twitter-50')
import numpy as np
def get_phrase_embedding(phrase):
"""
Convert phrase to a vector by aggregating it's word embeddings. Just take an
average of vectors for all tokens in the phrase with some weights.
"""
vector = np.zeros([model.vector_size], dtype='float32')
# 1. lowercase phrase
phrase = phrase.lower()
# 2. tokenize phrase
phrase_tokens = tokenizer.tokenize(phrase)
# 3. average word vectors for all words in tokenized phrase, skip words that are not in model's vocabulary
divisor = 0
for word in phrase_tokens:
if word in model.vocab:
divisor += 1
vector = vector + model.get_vector(word)
if divisor != 0: vector /= divisor
return vector
'''
Embeddings for Training Corpus
'''
vector_matrix_x_train = list(map(get_phrase_embedding, train_corpus))
'''
Embeddings for Training Corpus
'''
vector_matrix_x_test = list(map(get_phrase_embedding, test_corpus))
numerical_features_t = train[['neg_score','pos_score', 'neu_score', 'positive_emoji', 'negative_emoji', 'neutral_emoji', 'slang_rating']]
print(numerical_features_t.values.tolist())
'''
Stacking numerical features along with textual features
'''
combinedFeatures = np.hstack([numerical_features_t, np.array(vector_matrix_x_train)])
numerical_features_test = test[['neg_score','pos_score', 'neu_score', 'positive_emoji', 'negative_emoji', 'neutral_emoji', 'slang_rating']]
print(numerical_features_test.values.tolist())
combinedFeatures_test = np.hstack([numerical_features_test, np.array(vector_matrix_x_test)])
from sklearn.model_selection import train_test_split
'''
Train test validation split
'''
x_train, x_valid, y_train, y_valid = train_test_split(combinedFeatures, y, test_size = 0.25, random_state = 42)
print(x_train.shape)
print(x_valid.shape)
print(y_train.shape)
print(y_valid.shape)
print(combinedFeatures_test.shape)
# standardization
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_valid = sc.transform(x_valid)
x_test = sc.transform(combinedFeatures_test)
'''
Random Forest Classifer
'''
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
model = RandomForestClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("F1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
Logistic Regression Classifier
'''
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
Decision Tree Classifier
'''
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
SVM Classifier
'''
from sklearn.svm import SVC
model = SVC()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
xgBoost Classifier
'''
from xgboost import XGBClassifier
model = XGBClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
pd.options.display.max_colwidth = 300
train.head(2)
t = train[1:]
t.head()
#print(test)
print(y_valid)
print(y_pred)
| return 'bas' | conditional_block |
projectsvmglove.py | # -*- coding: utf-8 -*-
"""ProjectSVMGlove.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oP_lPuxcr0qYni8HvGSS-rCFlUCtfUYE
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import pickle
from google.colab import drive
drive.mount('/content/gdrive')
# Commented out IPython magic to ensure Python compatibility.
# %cd /content/gdrive/My\ Drive/NLP_Project
with open(r"processed_english.pkl", "rb") as input_file:
data = pickle.load(input_file)
import re
from pymongo import MongoClient
from collections import defaultdict
from nltk.corpus import wordnet as wn
#from transliteration import transliterate_word
import nltk
from nltk import word_tokenize
from nltk.util import ngrams as ngrams_creator
#from googletrans import Translator
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.corpus import stopwords
import pickle
#nltk.download('punkt')
nltk.download('vader_lexicon')
from nltk.corpus import sentiwordnet as swn
from nltk.sentiment.vader import SentimentIntensityAnalyzer
'''
Utility function to handle commonly used short forms
'''
def handle_short_forms(w):
if w == 'h':
return 'hai'
elif w == 'n':
return 'na'
elif w == 'da':
return 'the'
elif w == 'wid':
return 'with'
elif w == 'pr':
return 'par'
elif w == 'mattt':
return 'mat'
elif w == 'vo':
return 'woh'
elif w == 'ki':
return 'kee'
elif w == 'ap':
return 'aap'
elif w == 'bs':
return 'bas'
elif w == 'goood':
return 'very good'
elif w == 'tera':
return 'teraa'
elif w == 'cnfsn':
return 'confusion'
elif w == 'ka':
return 'kaa'
elif w == 'rkhi':
return 'rakhi'
elif w == 'thts':
return 'thats'
elif w == 'cald':
return 'called'
elif w == 'tabhe':
return 'tabhi'
elif w == 'pta':
return 'pata'
elif w == 'b':
return 'bhi'
elif w == 'nai':
return 'nahi'
elif w == 'f':
return 'of'
elif w == 'd':
return 'the'
else:
return w
'''
Translate word .
'''
def translate(word):
return translator.translate(word,src='hi' , dest='en').text
'''
Self defined contractions
'''
def load_dict_contractions():
return {
"ain't":"is not",
"amn't":"am not",
"aren't":"are not",
"can't":"cannot",
"'cause":"because",
"couldn't":"could not",
"couldn't've":"could not have",
"could've":"could have",
"daren't":"dare not",
"daresn't":"dare not",
"dasn't":"dare not",
"didn't":"did not",
"doesn't":"does not",
"don't":"do not",
"e'er":"ever",
"em":"them",
"everyone's":"everyone is",
"finna":"fixing to",
"gimme":"give me",
"gonna":"going to",
"gon't":"go not",
"gotta":"got to",
"hadn't":"had not",
"hasn't":"has not",
"haven't":"have not",
"he'd":"he would",
"he'll":"he will",
"he's":"he is",
"he've":"he have",
"how'd":"how would",
"how'll":"how will",
"how're":"how are",
"how's":"how is",
"I'd":"I would",
"I'll":"I will",
"i'll":"I will",
"I'm":"I am",
"I'm'a":"I am about to",
"I'm'o":"I am going to",
"isn't":"is not",
"it'd":"it would",
"it'll":"it will",
"it's":"it is",
"I've":"I have",
"kinda":"kind of",
"let's":"let us",
"mayn't":"may not",
"may've":"may have",
"mightn't":"might not",
"might've":"might have",
"mustn't":"must not",
"mustn't've":"must not have",
"must've":"must have",
"needn't":"need not",
"ne'er":"never",
"o'":"of",
"o'er":"over",
"ol'":"old",
"oughtn't":"ought not",
"shalln't":"shall not",
"shan't":"shall not",
"she'd":"she would",
"she'll":"she will",
"she's":"she is",
"shouldn't":"should not",
"shouldn't've":"should not have",
"should've":"should have",
"somebody's":"somebody is",
"someone's":"someone is",
"something's":"something is",
"that'd":"that would",
"that'll":"that will",
"that're":"that are",
"that's":"that is",
"there'd":"there would",
"there'll":"there will",
"there're":"there are",
"there's":"there is",
"these're":"these are",
"they'd":"they would",
"they'll":"they will",
"they're":"they are",
"they've":"they have",
"this's":"this is",
"those're":"those are",
"'tis":"it is",
"'twas":"it was",
"wanna":"want to",
"wasn't":"was not",
"we'd":"we would",
"we'd've":"we would have",
"we'll":"we will",
"we're":"we are",
"weren't":"were not",
"we've":"we have",
"what'd":"what did",
"what'll":"what will",
"what're":"what are",
"what's":"what is",
"what've":"what have",
"when's":"when is",
"where'd":"where did",
"where're":"where are",
"where's":"where is",
"where've":"where have",
"which's":"which is",
"who'd":"who would",
"who'd've":"who would have",
"who'll":"who will",
"who're":"who are",
"who's":"who is",
"who've":"who have",
"why'd":"why did",
"why're":"why are",
"why's":"why is",
"won't":"will not",
"wouldn't":"would not",
"would've":"would have",
"y'all":"you all",
"you'd":"you would",
"you'll":"you will",
"you're":"you are",
"you've":"you have",
"Whatcha":"What are you",
"whatcha":"What are you",
"luv":"love",
"sux":"sucks"
}
'''
Handling short forms and contractions in the sentences
'''
long_form_dict = load_dict_contractions()
def | (sentence):
final_sent =""
res = " ".join(long_form_dict.get(ele, ele) for ele in sentence.split())
for word in res.split():
final_sent += (handle_short_forms(word))+ " "
return final_sent
data.shape
l=[]
for sent in data.sentence_eng:
l.append(expand_sent(sent))
print(l[2:5])
data['sentence_eng']=l
'''
Emoticon processing
'''
emoji_list = pd.read_csv('emoji.csv',sep=",")
def find_emoji(sentence):
positive = 0
negative = 0
neutral = 0
sentiment =[]
for word in sentence.split():
if not emoji_list[emoji_list['Emoji']==word].empty:
positive += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Positive']
negative += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Negative']
neutral += emoji_list.iloc[emoji_list.index[emoji_list['Emoji'] == word].tolist()[0]]['Neutral']
return positive,negative,neutral
'''
Slang word processing
'''
slang_list = pd.read_csv('Hinglish_Profanity_List.csv',sep=",",header=None)
slang_list.columns=['hindi_word','meaning','rating']
def find_slang(sentence):
slang_exists = False
total_rating = 0
for word in sentence.split():
if not slang_list[slang_list['hindi_word']==word].empty:
try:
total_rating += slang_list.iloc[slang_list.index[slang_list['hindi_word'] == word].tolist()[0]]['rating']
except:
total_rating +=0
slang_exists = True
try:
total_rating = int(total_rating)
except:
total_rating = 0
return slang_exists,total_rating
data['slang_exists']= data['sentence_mixed'].apply(find_slang)
data[['slang_existance', 'slang_rating']] = pd.DataFrame(data['slang_exists'].tolist(), index=data.index)
data =data.drop(columns=['slang_exists'])
data['emoji_sentiment'] = data['sentence_mixed'].apply(find_emoji)
data[['positive_emoji', 'negative_emoji','neutral_emoji']] = pd.DataFrame(data['emoji_sentiment'].tolist(), index=data.index)
data =data.drop(columns=['emoji_sentiment'])
'''
Calculating the sentiment polarity score
'''
sid = SentimentIntensityAnalyzer()
neg_score=[]
pos_score=[]
neu_score=[]
for sent in data.sentence_eng:
ss = sid.polarity_scores(sent)
neg_score.append(ss['neg'])
pos_score.append(ss['pos'])
neu_score.append(ss['neu'])
data['neg_score']= neg_score
data['pos_score']= pos_score
data['neu_score']= neu_score
data.tail(10)
# removing unwanted patterns from the data
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
#Splitting the data
train = data[:5000]
test = data[5000:]
'''
fetching the sentence/comments from the test dataset and cleaning it
'''
train_corpus = []
for i in range(0, 5000):
review = re.sub('[^a-zA-Z]', ' ', train['sentence_eng'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
# stemming
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# joining them back with space
review = ' '.join(review)
train_corpus.append(review)
'''
fetching the sentence/comments from the test dataset and cleaning it
'''
test_corpus = []
for i in range(5000, 6137):
review = re.sub('[^a-zA-Z]', ' ', test['sentence_eng'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
# stemming
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# joining them back with space
review = ' '.join(review)
test_corpus.append(review)
#Conversion of snetiment into numerical sentiment
train =train.replace("positive",1)
train =train.replace("negative",2)
train = train.replace("neutral",0)
train.head()
y = train.iloc[:, 2]
print(y)
from nltk.tokenize import WordPunctTokenizer
tokenizer = WordPunctTokenizer()
data_tok = [tokenizer.tokenize(d.lower()) for d in train_corpus]
import gensim.downloader as api
model = api.load('glove-twitter-50')
import numpy as np
def get_phrase_embedding(phrase):
"""
Convert phrase to a vector by aggregating it's word embeddings. Just take an
average of vectors for all tokens in the phrase with some weights.
"""
vector = np.zeros([model.vector_size], dtype='float32')
# 1. lowercase phrase
phrase = phrase.lower()
# 2. tokenize phrase
phrase_tokens = tokenizer.tokenize(phrase)
# 3. average word vectors for all words in tokenized phrase, skip words that are not in model's vocabulary
divisor = 0
for word in phrase_tokens:
if word in model.vocab:
divisor += 1
vector = vector + model.get_vector(word)
if divisor != 0: vector /= divisor
return vector
'''
Embeddings for Training Corpus
'''
vector_matrix_x_train = list(map(get_phrase_embedding, train_corpus))
'''
Embeddings for Training Corpus
'''
vector_matrix_x_test = list(map(get_phrase_embedding, test_corpus))
numerical_features_t = train[['neg_score','pos_score', 'neu_score', 'positive_emoji', 'negative_emoji', 'neutral_emoji', 'slang_rating']]
print(numerical_features_t.values.tolist())
'''
Stacking numerical features along with textual features
'''
combinedFeatures = np.hstack([numerical_features_t, np.array(vector_matrix_x_train)])
numerical_features_test = test[['neg_score','pos_score', 'neu_score', 'positive_emoji', 'negative_emoji', 'neutral_emoji', 'slang_rating']]
print(numerical_features_test.values.tolist())
combinedFeatures_test = np.hstack([numerical_features_test, np.array(vector_matrix_x_test)])
from sklearn.model_selection import train_test_split
'''
Train test validation split
'''
x_train, x_valid, y_train, y_valid = train_test_split(combinedFeatures, y, test_size = 0.25, random_state = 42)
print(x_train.shape)
print(x_valid.shape)
print(y_train.shape)
print(y_valid.shape)
print(combinedFeatures_test.shape)
# standardization
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_valid = sc.transform(x_valid)
x_test = sc.transform(combinedFeatures_test)
'''
Random Forest Classifer
'''
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
model = RandomForestClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("F1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
Logistic Regression Classifier
'''
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
Decision Tree Classifier
'''
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
SVM Classifier
'''
from sklearn.svm import SVC
model = SVC()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
'''
xgBoost Classifier
'''
from xgboost import XGBClassifier
model = XGBClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_valid)
print("Training Accuracy :", model.score(x_train, y_train))
print("Validation Accuracy :", model.score(x_valid, y_valid))
# calculating the f1 score for the validation set
print("f1 score :", f1_score(y_valid, y_pred,average='macro'))
# confusion matrix
cm = confusion_matrix(y_valid, y_pred)
print(cm)
pd.options.display.max_colwidth = 300
train.head(2)
t = train[1:]
t.head()
#print(test)
print(y_valid)
print(y_pred)
| expand_sent | identifier_name |
main.py | """
Pensando no uso de micro serviços, esse arquivo contem o conteudo (python) das funções que serão colocadas na AWS
utilizando o framework Serverless.
O frameWork Serverless é utilizado para fazer o deploy do código na AWS, sua utilização é interessante, pois o framework
pode ser utilizado para outros serviços como Google Cloud e Azure, sem grandes alterações no arquivo YML
"""
import uuid
from os import getenv
from infra.S3 import S3
from utils import valid_new_card_request, score_to_text
from random import randint
def get_requests_card_list_handler(event, context):
"""
GET - Esse código será chamado através de um GET para a API que sera criada no arquivo serverless
Retorna a lista das solicitações de cartão realizadas
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo a lista das das solicitações.
"""
try:
s3_bucket = getenv('S3_BUCKET', '')
s3 = S3(s3_bucket)
s3.create_s3_instance()
request_list, msg = s3.get_bucket_files(s3_bucket)
_requests = list()
for request in request_list:
obj, msg = s3.get_s3_obj('', request['Key'])
_requests.append(obj)
if _requests:
return {'status': 200, 'requests_list': _requests}
else:
return {'status': 404, 'msg': 'Lista de requisições não encontrada'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def request_new_card_handler(event, context):
"""
POST - Esse código será chamado através de um POST para a API que será criada no arquivo serverless.
Faz a requisição de um novo cartão, o Score do candidato será avaliado.
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo informações sobre a solicitação realizada, se o pedido de cartão foi aprovado ou não.
"""
try:
body = event.get('body', {})
s3_bucket = getenv('S3_BUCKET', '')
if body:
# Verifica se o JSON é valido
json_valido, msg = valid_new_card_request(body)
if json_valido:
body['id'] = str(uuid.uuid4())
s3 = S3(s3_bucket)
s3.create_s3_instance()
# insere o JSON no S3
resp, msg = s3.put_s3_obj('', body, body['cpf'])
print(resp)
print(msg)
json_ret = {'status': 200, 'msg': 'Requisição enviada para aprovação!'}
else:
json_ret = {'status': 500, 'msg': msg}
else:
json_ret = {'status': 500, 'msg': 'Json inválido!'}
return json_ret
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def delete_card_request_handler(event, context):
"""
DELETE - Esse código será chamado através de um DELETE para a API que será criada no arquivo serverless.
Faz a requisição para apagar uma solicitação de cartão.
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo informações sobre a solicitação realizada, se o pedido de cartão foi excluído ou não.
"""
try:
s3_bucket_crawler = getenv('S3_BUCKET', '')
request_json, msg = None, None
path = event.get('path', {})
if 'id' in path.keys():
s3 = S3(s3_bucket_crawler)
s3.create_s3_instance()
del_response, msg = s3.delete_s3_obj('', path['id'])
if del_response['ResponseMetadata']['HTTPStatusCode'] == 204:
return {'status': 200, 'msg': 'Requisição deletada!'}
else:
return {'status': 404, 'msg': 'Requisição não encontrado'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def process_card_request_handler(event, context):
"""
Realiza o processamento de um pedido de novo cartão.
Esse código é ativado quando um novo arquivo de pedido é salvo no S3
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: None.
"""
try:
for obj in event['Records']:
# Informações do arquivo que foi inserido no S3
bucket_name = obj['s3']['bucket']['name']
obj = obj['s3']['object']['key']
s3 = S3(bucket_name)
s3.create_s3_instance()
obj_json, msg = s3.get_s3_obj('', obj)
# faz os Score do cliente
score = randint(1, 999)
obj_json['credit'] = score_to_text(score, obj_json['income'])
resp, msg = s3.put_s3_obj('', obj_json, obj_json['cpf'])
except Exception as err:
print({'status': 500, 'msg': 'Erro interno ao processar a requisição', "error": f'{err}'})
def get_new_card_request_response_handler(event, context):
"""
GET - Esse código será chamado através de um GET para a API que será criada no arquivo serverless.
Recebe o id do solicitante via parametro de URL e retorna as informações referente ao solicitante e seu crédito
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: informações do solicitante
"""
try:
s3_bucket_crawler = getenv('S3_BUCKET', '')
print(event)
request_json, msg = None, None
path = event.get('path', {})
if 'id' in path.keys():
s3 = S3(s3_bucket_crawler)
s3.create_s3_instance()
request_json, msg = s3.get_s3_obj('', path['id'])
if request_json:
request_json['status'] = 200
return request_json
else:
return {'status': 404, 'msg': 'Requisição não encontrada'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
if __name__ == '__main__':
ex_event = {'body':
{
'name': 'Leonardo Roberto Gazziro',
'phone': '99999999999',
'age': 26,
'cpf': '999.999.999-99',
'income': 3000
},
'method': 'POST', 'principalId': '',
'stage': 'dev',
'cognitoPoolClaims': {'sub': ''},
'enhancedAuthContext': {},
'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br',
'Cache-Control': 'no-cache', 'CloudFront-Forwarded-Proto': 'https',
'CloudFront-Is-Desktop-Viewer': 'true',
'CloudFront-Is-Mobile-Viewer': 'false',
'CloudFront-Is-SmartTV-Viewer': 'false',
'CloudFront-Is-Tablet-Viewer': 'false',
'CloudFront-Viewer-Country': 'BR',
'Content-Type': 'application/json',
'Host': '1nhucniq8b.execute-api.us-east-1.amazonaws.com',
'Postman-Token': '0d40ddc9-494c-438c-9e34-30f9f44ea018',
'User-Agent': 'PostmanRuntime/7.26.3',
'Via': '1.1 3fff6e22f8d6795a61bfdca17d362ca5.cloudfront.net (CloudFront)',
'X-Amz-Cf-Id': 'OKNx6jkzKLQ3nbtD0t4JTNynGlc2TZDSemAepsPC-8Kv0ZV1f6Tz7w==',
'X-Amzn-Trace-Id': 'Root=1-5f4bf435-7d8aa2621cebaaabfa719a0c',
'X-Forwarded-For': '138.204.24.213, 64.252.179.69',
'X-Forwarded-Port': '443', 'X-Forwarded-Proto': 'https'},
'query': {},
'path': {},
'identity': {'cognitoIdentityPoolId': '', 'accountId': '', 'cognitoIdentityId': '', 'caller': '',
'sourceIp': '138.204.24.213', 'principalOrgId': '', 'accessKey': '',
'cognitoAuthenticationType': '', 'cognitoAuthenticationProvider': '', 'userArn': '',
'userAgent': 'PostmanRuntime/7.26.3', 'user': ''},
'stageVariables': {},
'requestPath': ''}
ex_event_s3 = {'Records': [{'eventVersion': '2.1', 'eventSource': 'aws:s3', 'awsRegion': 'us-east-1',
'eventTime': '2020-09-06T18:11:57.722Z', 'eventName': 'ObjectCreated:Put',
'userIdentity': {'principalId': 'A1GN6EPM0JKA8K'},
'requestParameters': {'sourceIPAddress': '177.42.49.149'},
'responseElements': {'x-amz-request-id': '7857E87B7E4BA60D',
'x-amz-id-2': 'lmNysI3mKLmMQoOzCtPnjT8usl2fMUYbIyipfE59v3oWuyu44XxI/L2tXxRPkqjC6uUNu3rGB/eekMpWqOj6RceGfrLLLTeg'},
's3': {'s3SchemaVersion': '1.0',
'configurationId': 'cc7a7b1c-d354-413c-94e0-c4c2e3cf153a',
'bucket': {'name': 'cards-requests-leogazziro',
'ownerIdentity': {'principalId': 'A1GN6EPM0JKA8K'},
'arn': 'arn:aws:s3:::novo-produto'},
'object': {'key': '999.999.999-99.json', 'size': 1172,
'eTag': 'f64c165d9209eb645b660f04b27dc8d2',
'sequencer': '005F552670F6AF15F4'}}}]}
ex_event_att_price = {'body': {},
'method': 'GET', | 'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'CloudFront-Forwarded-Proto': 'https', 'CloudFront-Is-Desktop-Viewer': 'true', 'CloudFront-Is-Mobile-Viewer': 'false', 'CloudFront-Is-SmartTV-Viewer': 'false', 'CloudFront-Is-Tablet-Viewer': 'false', 'CloudFront-Viewer-Country': 'BR', 'Host': 'ug0whmmoab.execute-api.us-east-1.amazonaws.com', 'Postman-Token': 'b4156348-2b52-4528-9fe5-f990b6e0b002', 'User-Agent': 'PostmanRuntime/7.26.8', 'Via': '1.1 563ebd37505bdef43c0d2cf809086a89.cloudfront.net (CloudFront)', 'X-Amz-Cf-Id': '3GsBNWObU21N4Duq6GXK-NVydq7LdeTnEVsiU9o4i3WB4WZcY95iLQ==', 'X-Amzn-Trace-Id': 'Root=1-5fc44260-324117fb66e2aaa37e74b01d', 'X-Forwarded-For': '177.220.174.33, 64.252.179.71', 'X-Forwarded-Port': '443', 'X-Forwarded-Proto': 'https'},
'query': {},
'path': {'id': '999.999.999-99'},
'identity': {'cognitoIdentityPoolId': '', 'accountId': '', 'cognitoIdentityId': '', 'caller': '', 'sourceIp': '177.220.174.33', 'principalOrgId': '', 'accessKey': '', 'cognitoAuthenticationType': '', 'cognitoAuthenticationProvider': '', 'userArn': '', 'userAgent': 'PostmanRuntime/7.26.8', 'user': ''}, 'stageVariables': {}, 'requestPath': '/get_new_card_request_response/{id}'}
# request_new_card_handler(ex_event, '')
# get_requests_card_list_handler('', '')
# process_card_request(ex_event_s3, '')
# get_new_card_request_response_handler(ex_event_att_price, '')
# delete_card_request_handler(ex_event_att_price, '') | 'principalId': '',
'stage': 'dev',
'cognitoPoolClaims': {'sub': ''},
'enhancedAuthContext': {}, | random_line_split |
main.py | """
Pensando no uso de micro serviços, esse arquivo contem o conteudo (python) das funções que serão colocadas na AWS
utilizando o framework Serverless.
O frameWork Serverless é utilizado para fazer o deploy do código na AWS, sua utilização é interessante, pois o framework
pode ser utilizado para outros serviços como Google Cloud e Azure, sem grandes alterações no arquivo YML
"""
import uuid
from os import getenv
from infra.S3 import S3
from utils import valid_new_card_request, score_to_text
from random import randint
def get_requests_card_list_handler(event, context):
"""
GET - Esse código será chamado através de um GET para a API que sera criada no arquivo serverless
Retorna a lista das solicitações de cartão realizadas
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo a lista das das solicitações.
"""
try:
s3_bucket = getenv('S3_BUCKET', '')
s3 = S3(s3_bucket)
s3.create_s3_instance()
request_list, msg = s3.get_bucket_files(s3_bucket)
_requests = list()
for request in request_list:
obj, msg = s3.get_s3_obj('', request['Key'])
_requests.append(obj)
if _requests:
return {'status': 200, 'requests_list': _requests}
else:
return {'status': 404, 'msg': 'Lista de requisições não encontrada'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def request_new_card_handler(event, context):
"""
POST - Esse código se | """
DELETE - Esse código será chamado através de um DELETE para a API que será criada no arquivo serverless.
Faz a requisição para apagar uma solicitação de cartão.
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo informações sobre a solicitação realizada, se o pedido de cartão foi excluído ou não.
"""
try:
s3_bucket_crawler = getenv('S3_BUCKET', '')
request_json, msg = None, None
path = event.get('path', {})
if 'id' in path.keys():
s3 = S3(s3_bucket_crawler)
s3.create_s3_instance()
del_response, msg = s3.delete_s3_obj('', path['id'])
if del_response['ResponseMetadata']['HTTPStatusCode'] == 204:
return {'status': 200, 'msg': 'Requisição deletada!'}
else:
return {'status': 404, 'msg': 'Requisição não encontrado'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def process_card_request_handler(event, context):
"""
Realiza o processamento de um pedido de novo cartão.
Esse código é ativado quando um novo arquivo de pedido é salvo no S3
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: None.
"""
try:
for obj in event['Records']:
# Informações do arquivo que foi inserido no S3
bucket_name = obj['s3']['bucket']['name']
obj = obj['s3']['object']['key']
s3 = S3(bucket_name)
s3.create_s3_instance()
obj_json, msg = s3.get_s3_obj('', obj)
# faz os Score do cliente
score = randint(1, 999)
obj_json['credit'] = score_to_text(score, obj_json['income'])
resp, msg = s3.put_s3_obj('', obj_json, obj_json['cpf'])
except Exception as err:
print({'status': 500, 'msg': 'Erro interno ao processar a requisição', "error": f'{err}'})
def get_new_card_request_response_handler(event, context):
"""
GET - Esse código será chamado através de um GET para a API que será criada no arquivo serverless.
Recebe o id do solicitante via parametro de URL e retorna as informações referente ao solicitante e seu crédito
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: informações do solicitante
"""
try:
s3_bucket_crawler = getenv('S3_BUCKET', '')
print(event)
request_json, msg = None, None
path = event.get('path', {})
if 'id' in path.keys():
s3 = S3(s3_bucket_crawler)
s3.create_s3_instance()
request_json, msg = s3.get_s3_obj('', path['id'])
if request_json:
request_json['status'] = 200
return request_json
else:
return {'status': 404, 'msg': 'Requisição não encontrada'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
if __name__ == '__main__':
ex_event = {'body':
{
'name': 'Leonardo Roberto Gazziro',
'phone': '99999999999',
'age': 26,
'cpf': '999.999.999-99',
'income': 3000
},
'method': 'POST', 'principalId': '',
'stage': 'dev',
'cognitoPoolClaims': {'sub': ''},
'enhancedAuthContext': {},
'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br',
'Cache-Control': 'no-cache', 'CloudFront-Forwarded-Proto': 'https',
'CloudFront-Is-Desktop-Viewer': 'true',
'CloudFront-Is-Mobile-Viewer': 'false',
'CloudFront-Is-SmartTV-Viewer': 'false',
'CloudFront-Is-Tablet-Viewer': 'false',
'CloudFront-Viewer-Country': 'BR',
'Content-Type': 'application/json',
'Host': '1nhucniq8b.execute-api.us-east-1.amazonaws.com',
'Postman-Token': '0d40ddc9-494c-438c-9e34-30f9f44ea018',
'User-Agent': 'PostmanRuntime/7.26.3',
'Via': '1.1 3fff6e22f8d6795a61bfdca17d362ca5.cloudfront.net (CloudFront)',
'X-Amz-Cf-Id': 'OKNx6jkzKLQ3nbtD0t4JTNynGlc2TZDSemAepsPC-8Kv0ZV1f6Tz7w==',
'X-Amzn-Trace-Id': 'Root=1-5f4bf435-7d8aa2621cebaaabfa719a0c',
'X-Forwarded-For': '138.204.24.213, 64.252.179.69',
'X-Forwarded-Port': '443', 'X-Forwarded-Proto': 'https'},
'query': {},
'path': {},
'identity': {'cognitoIdentityPoolId': '', 'accountId': '', 'cognitoIdentityId': '', 'caller': '',
'sourceIp': '138.204.24.213', 'principalOrgId': '', 'accessKey': '',
'cognitoAuthenticationType': '', 'cognitoAuthenticationProvider': '', 'userArn': '',
'userAgent': 'PostmanRuntime/7.26.3', 'user': ''},
'stageVariables': {},
'requestPath': ''}
ex_event_s3 = {'Records': [{'eventVersion': '2.1', 'eventSource': 'aws:s3', 'awsRegion': 'us-east-1',
'eventTime': '2020-09-06T18:11:57.722Z', 'eventName': 'ObjectCreated:Put',
'userIdentity': {'principalId': 'A1GN6EPM0JKA8K'},
'requestParameters': {'sourceIPAddress': '177.42.49.149'},
'responseElements': {'x-amz-request-id': '7857E87B7E4BA60D',
'x-amz-id-2': 'lmNysI3mKLmMQoOzCtPnjT8usl2fMUYbIyipfE59v3oWuyu44XxI/L2tXxRPkqjC6uUNu3rGB/eekMpWqOj6RceGfrLLLTeg'},
's3': {'s3SchemaVersion': '1.0',
'configurationId': 'cc7a7b1c-d354-413c-94e0-c4c2e3cf153a',
'bucket': {'name': 'cards-requests-leogazziro',
'ownerIdentity': {'principalId': 'A1GN6EPM0JKA8K'},
'arn': 'arn:aws:s3:::novo-produto'},
'object': {'key': '999.999.999-99.json', 'size': 1172,
'eTag': 'f64c165d9209eb645b660f04b27dc8d2',
'sequencer': '005F552670F6AF15F4'}}}]}
ex_event_att_price = {'body': {},
'method': 'GET',
'principalId': '',
'stage': 'dev',
'cognitoPoolClaims': {'sub': ''},
'enhancedAuthContext': {},
'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'CloudFront-Forwarded-Proto': 'https', 'CloudFront-Is-Desktop-Viewer': 'true', 'CloudFront-Is-Mobile-Viewer': 'false', 'CloudFront-Is-SmartTV-Viewer': 'false', 'CloudFront-Is-Tablet-Viewer': 'false', 'CloudFront-Viewer-Country': 'BR', 'Host': 'ug0whmmoab.execute-api.us-east-1.amazonaws.com', 'Postman-Token': 'b4156348-2b52-4528-9fe5-f990b6e0b002', 'User-Agent': 'PostmanRuntime/7.26.8', 'Via': '1.1 563ebd37505bdef43c0d2cf809086a89.cloudfront.net (CloudFront)', 'X-Amz-Cf-Id': '3GsBNWObU21N4Duq6GXK-NVydq7LdeTnEVsiU9o4i3WB4WZcY95iLQ==', 'X-Amzn-Trace-Id': 'Root=1-5fc44260-324117fb66e2aaa37e74b01d', 'X-Forwarded-For': '177.220.174.33, 64.252.179.71', 'X-Forwarded-Port': '443', 'X-Forwarded-Proto': 'https'},
'query': {},
'path': {'id': '999.999.999-99'},
'identity': {'cognitoIdentityPoolId': '', 'accountId': '', 'cognitoIdentityId': '', 'caller': '', 'sourceIp': '177.220.174.33', 'principalOrgId': '', 'accessKey': '', 'cognitoAuthenticationType': '', 'cognitoAuthenticationProvider': '', 'userArn': '', 'userAgent': 'PostmanRuntime/7.26.8', 'user': ''}, 'stageVariables': {}, 'requestPath': '/get_new_card_request_response/{id}'}
# request_new_card_handler(ex_event, '')
# get_requests_card_list_handler('', '')
# process_card_request(ex_event_s3, '')
# get_new_card_request_response_handler(ex_event_att_price, '')
# delete_card_request_handler(ex_event_att_price, '')
| rá chamado através de um POST para a API que será criada no arquivo serverless.
Faz a requisição de um novo cartão, o Score do candidato será avaliado.
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo informações sobre a solicitação realizada, se o pedido de cartão foi aprovado ou não.
"""
try:
body = event.get('body', {})
s3_bucket = getenv('S3_BUCKET', '')
if body:
# Verifica se o JSON é valido
json_valido, msg = valid_new_card_request(body)
if json_valido:
body['id'] = str(uuid.uuid4())
s3 = S3(s3_bucket)
s3.create_s3_instance()
# insere o JSON no S3
resp, msg = s3.put_s3_obj('', body, body['cpf'])
print(resp)
print(msg)
json_ret = {'status': 200, 'msg': 'Requisição enviada para aprovação!'}
else:
json_ret = {'status': 500, 'msg': msg}
else:
json_ret = {'status': 500, 'msg': 'Json inválido!'}
return json_ret
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def delete_card_request_handler(event, context):
| identifier_body |
main.py | """
Pensando no uso de micro serviços, esse arquivo contem o conteudo (python) das funções que serão colocadas na AWS
utilizando o framework Serverless.
O frameWork Serverless é utilizado para fazer o deploy do código na AWS, sua utilização é interessante, pois o framework
pode ser utilizado para outros serviços como Google Cloud e Azure, sem grandes alterações no arquivo YML
"""
import uuid
from os import getenv
from infra.S3 import S3
from utils import valid_new_card_request, score_to_text
from random import randint
def get_requests_card_list_handler(event, context):
"""
GET - Esse código será chamado através de um GET para a API que sera criada no arquivo serverless
Retorna a lista das solicitações de cartão realizadas
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo a lista das das solicitações.
"""
try:
s3_bucket = getenv('S3_BUCKET', '')
s3 = S3(s3_bucket)
s3.create_s3_instance()
request_list, msg = s3.get_bucket_files(s3_bucket)
_requests = list()
for request in request_list:
obj, msg = s3.get_s3_obj('', request['Key'])
_requests.append(obj)
if _requests:
return {'status': 200, 'requests_list': _requests}
else:
return {'status': 404, ' | r:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def request_new_card_handler(event, context):
"""
POST - Esse código será chamado através de um POST para a API que será criada no arquivo serverless.
Faz a requisição de um novo cartão, o Score do candidato será avaliado.
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo informações sobre a solicitação realizada, se o pedido de cartão foi aprovado ou não.
"""
try:
body = event.get('body', {})
s3_bucket = getenv('S3_BUCKET', '')
if body:
# Verifica se o JSON é valido
json_valido, msg = valid_new_card_request(body)
if json_valido:
body['id'] = str(uuid.uuid4())
s3 = S3(s3_bucket)
s3.create_s3_instance()
# insere o JSON no S3
resp, msg = s3.put_s3_obj('', body, body['cpf'])
print(resp)
print(msg)
json_ret = {'status': 200, 'msg': 'Requisição enviada para aprovação!'}
else:
json_ret = {'status': 500, 'msg': msg}
else:
json_ret = {'status': 500, 'msg': 'Json inválido!'}
return json_ret
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def delete_card_request_handler(event, context):
"""
DELETE - Esse código será chamado através de um DELETE para a API que será criada no arquivo serverless.
Faz a requisição para apagar uma solicitação de cartão.
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo informações sobre a solicitação realizada, se o pedido de cartão foi excluído ou não.
"""
try:
s3_bucket_crawler = getenv('S3_BUCKET', '')
request_json, msg = None, None
path = event.get('path', {})
if 'id' in path.keys():
s3 = S3(s3_bucket_crawler)
s3.create_s3_instance()
del_response, msg = s3.delete_s3_obj('', path['id'])
if del_response['ResponseMetadata']['HTTPStatusCode'] == 204:
return {'status': 200, 'msg': 'Requisição deletada!'}
else:
return {'status': 404, 'msg': 'Requisição não encontrado'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def process_card_request_handler(event, context):
"""
Realiza o processamento de um pedido de novo cartão.
Esse código é ativado quando um novo arquivo de pedido é salvo no S3
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: None.
"""
try:
for obj in event['Records']:
# Informações do arquivo que foi inserido no S3
bucket_name = obj['s3']['bucket']['name']
obj = obj['s3']['object']['key']
s3 = S3(bucket_name)
s3.create_s3_instance()
obj_json, msg = s3.get_s3_obj('', obj)
# faz os Score do cliente
score = randint(1, 999)
obj_json['credit'] = score_to_text(score, obj_json['income'])
resp, msg = s3.put_s3_obj('', obj_json, obj_json['cpf'])
except Exception as err:
print({'status': 500, 'msg': 'Erro interno ao processar a requisição', "error": f'{err}'})
def get_new_card_request_response_handler(event, context):
"""
GET - Esse código será chamado através de um GET para a API que será criada no arquivo serverless.
Recebe o id do solicitante via parametro de URL e retorna as informações referente ao solicitante e seu crédito
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: informações do solicitante
"""
try:
s3_bucket_crawler = getenv('S3_BUCKET', '')
print(event)
request_json, msg = None, None
path = event.get('path', {})
if 'id' in path.keys():
s3 = S3(s3_bucket_crawler)
s3.create_s3_instance()
request_json, msg = s3.get_s3_obj('', path['id'])
if request_json:
request_json['status'] = 200
return request_json
else:
return {'status': 404, 'msg': 'Requisição não encontrada'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
if __name__ == '__main__':
ex_event = {'body':
{
'name': 'Leonardo Roberto Gazziro',
'phone': '99999999999',
'age': 26,
'cpf': '999.999.999-99',
'income': 3000
},
'method': 'POST', 'principalId': '',
'stage': 'dev',
'cognitoPoolClaims': {'sub': ''},
'enhancedAuthContext': {},
'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br',
'Cache-Control': 'no-cache', 'CloudFront-Forwarded-Proto': 'https',
'CloudFront-Is-Desktop-Viewer': 'true',
'CloudFront-Is-Mobile-Viewer': 'false',
'CloudFront-Is-SmartTV-Viewer': 'false',
'CloudFront-Is-Tablet-Viewer': 'false',
'CloudFront-Viewer-Country': 'BR',
'Content-Type': 'application/json',
'Host': '1nhucniq8b.execute-api.us-east-1.amazonaws.com',
'Postman-Token': '0d40ddc9-494c-438c-9e34-30f9f44ea018',
'User-Agent': 'PostmanRuntime/7.26.3',
'Via': '1.1 3fff6e22f8d6795a61bfdca17d362ca5.cloudfront.net (CloudFront)',
'X-Amz-Cf-Id': 'OKNx6jkzKLQ3nbtD0t4JTNynGlc2TZDSemAepsPC-8Kv0ZV1f6Tz7w==',
'X-Amzn-Trace-Id': 'Root=1-5f4bf435-7d8aa2621cebaaabfa719a0c',
'X-Forwarded-For': '138.204.24.213, 64.252.179.69',
'X-Forwarded-Port': '443', 'X-Forwarded-Proto': 'https'},
'query': {},
'path': {},
'identity': {'cognitoIdentityPoolId': '', 'accountId': '', 'cognitoIdentityId': '', 'caller': '',
'sourceIp': '138.204.24.213', 'principalOrgId': '', 'accessKey': '',
'cognitoAuthenticationType': '', 'cognitoAuthenticationProvider': '', 'userArn': '',
'userAgent': 'PostmanRuntime/7.26.3', 'user': ''},
'stageVariables': {},
'requestPath': ''}
ex_event_s3 = {'Records': [{'eventVersion': '2.1', 'eventSource': 'aws:s3', 'awsRegion': 'us-east-1',
'eventTime': '2020-09-06T18:11:57.722Z', 'eventName': 'ObjectCreated:Put',
'userIdentity': {'principalId': 'A1GN6EPM0JKA8K'},
'requestParameters': {'sourceIPAddress': '177.42.49.149'},
'responseElements': {'x-amz-request-id': '7857E87B7E4BA60D',
'x-amz-id-2': 'lmNysI3mKLmMQoOzCtPnjT8usl2fMUYbIyipfE59v3oWuyu44XxI/L2tXxRPkqjC6uUNu3rGB/eekMpWqOj6RceGfrLLLTeg'},
's3': {'s3SchemaVersion': '1.0',
'configurationId': 'cc7a7b1c-d354-413c-94e0-c4c2e3cf153a',
'bucket': {'name': 'cards-requests-leogazziro',
'ownerIdentity': {'principalId': 'A1GN6EPM0JKA8K'},
'arn': 'arn:aws:s3:::novo-produto'},
'object': {'key': '999.999.999-99.json', 'size': 1172,
'eTag': 'f64c165d9209eb645b660f04b27dc8d2',
'sequencer': '005F552670F6AF15F4'}}}]}
ex_event_att_price = {'body': {},
'method': 'GET',
'principalId': '',
'stage': 'dev',
'cognitoPoolClaims': {'sub': ''},
'enhancedAuthContext': {},
'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'CloudFront-Forwarded-Proto': 'https', 'CloudFront-Is-Desktop-Viewer': 'true', 'CloudFront-Is-Mobile-Viewer': 'false', 'CloudFront-Is-SmartTV-Viewer': 'false', 'CloudFront-Is-Tablet-Viewer': 'false', 'CloudFront-Viewer-Country': 'BR', 'Host': 'ug0whmmoab.execute-api.us-east-1.amazonaws.com', 'Postman-Token': 'b4156348-2b52-4528-9fe5-f990b6e0b002', 'User-Agent': 'PostmanRuntime/7.26.8', 'Via': '1.1 563ebd37505bdef43c0d2cf809086a89.cloudfront.net (CloudFront)', 'X-Amz-Cf-Id': '3GsBNWObU21N4Duq6GXK-NVydq7LdeTnEVsiU9o4i3WB4WZcY95iLQ==', 'X-Amzn-Trace-Id': 'Root=1-5fc44260-324117fb66e2aaa37e74b01d', 'X-Forwarded-For': '177.220.174.33, 64.252.179.71', 'X-Forwarded-Port': '443', 'X-Forwarded-Proto': 'https'},
'query': {},
'path': {'id': '999.999.999-99'},
'identity': {'cognitoIdentityPoolId': '', 'accountId': '', 'cognitoIdentityId': '', 'caller': '', 'sourceIp': '177.220.174.33', 'principalOrgId': '', 'accessKey': '', 'cognitoAuthenticationType': '', 'cognitoAuthenticationProvider': '', 'userArn': '', 'userAgent': 'PostmanRuntime/7.26.8', 'user': ''}, 'stageVariables': {}, 'requestPath': '/get_new_card_request_response/{id}'}
# request_new_card_handler(ex_event, '')
# get_requests_card_list_handler('', '')
# process_card_request(ex_event_s3, '')
# get_new_card_request_response_handler(ex_event_att_price, '')
# delete_card_request_handler(ex_event_att_price, '')
| msg': 'Lista de requisições não encontrada'}
except Exception as er | conditional_block |
main.py | """
Pensando no uso de micro serviços, esse arquivo contem o conteudo (python) das funções que serão colocadas na AWS
utilizando o framework Serverless.
O frameWork Serverless é utilizado para fazer o deploy do código na AWS, sua utilização é interessante, pois o framework
pode ser utilizado para outros serviços como Google Cloud e Azure, sem grandes alterações no arquivo YML
"""
import uuid
from os import getenv
from infra.S3 import S3
from utils import valid_new_card_request, score_to_text
from random import randint
def get_requests_card_list_handler(event, context):
"""
GET - Esse código será chamado através de um GET para a API que sera criada no arquivo serverless
Retorna a lista das solicitações de cartão realizadas
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo a lista das das solicitações.
"""
try:
s3_bucket = getenv('S3_BUCKET', '')
s3 = S3(s3_bucket)
s3.create_s3_instance()
request_list, msg = s3.get_bucket_files(s3_bucket)
_requests = list()
for request in request_list:
obj, msg = s3.get_s3_obj('', request['Key'])
_requests.append(obj)
if _requests:
return {'status': 200, 'requests_list': _requests}
else:
return {'status': 404, 'msg': 'Lista de requisições não encontrada'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def request_new_card_handler(event, context):
"""
POST - Esse código será chamado através de um POST para a API que será criada no arquivo serverless.
Faz a requisição de um novo cartão, o Score do candidato será avaliado.
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo informações sobre a solicitação realizada, se o pedido de cartão foi aprovado ou não.
"""
try:
body = event.get('body', {})
s3_bucket = getenv('S3_BUCKET', '')
if body:
# Verifica se o JSON é valido
json_valido, msg = valid_new_card_request(body)
if json_valido:
body['id'] = str(uuid.uuid4())
s3 = S3(s3_bucket)
s3.create_s3_instance()
# insere o JSON no S3
resp, msg = s3.put_s3_obj('', body, body['cpf'])
print(resp)
print(msg)
json_ret = {'status': 200, 'msg': 'Requisição enviada para aprovação!'}
else:
json_ret = {'status': 500, 'msg': msg}
else:
json_ret = {'status': 500, 'msg': 'Json inválido!'}
return json_ret
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def delete_card_request_handler(event, context):
"""
DELETE - Esse código será chamado através de um DELETE para a API que será criada no arquivo serverless.
Faz a requisição para apagar uma solicitação de cartão.
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: JSON contendo informações sobre a solicitação realizada, se o pedido de cartão foi excluído ou não.
"""
try:
s3_bucket_crawler = getenv('S3_BUCKET', '')
request_json, msg = None, None
path = event.get('path', {})
if 'id' in path.keys():
s3 = S3(s3_bucket_crawler)
s3.create_s3_instance()
del_response, msg = s3.delete_s3_obj('', path['id'])
if del_response['ResponseMetadata']['HTTPStatusCode'] == 204:
return {'status': 200, 'msg': 'Requisição deletada!'}
else:
return {'status': 404, 'msg': 'Requisição não encontrado'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
def process_card_request_handler(event, context):
"""
Realiza o processamento de um pedido de novo cartão.
Esse código é ativado quando um novo arquivo de pedido é salvo no S3
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: None.
"""
try:
for obj in event['Records']:
# Informações do arquivo que foi inserido no S3
bucket_name = obj['s3']['bucket']['name']
obj = obj['s3']['object']['key']
s3 = S3(bucket_name)
s3.create_s3_instance()
obj_json, msg = s3.get_s3_obj('', obj)
# faz os Score do cliente
score = randint(1, 999)
obj_json['credit'] = score_to_text(score, obj_json['income'])
resp, msg = s3.put_s3_obj('', obj_json, obj_json['cpf'])
except Exception as err:
print({'status': 500, 'msg': 'Erro interno ao processar a requisição', "error": f'{err}'})
def get_new_card_request_response_handler(event, context):
"""
GET - Esse código será cham | será criada no arquivo serverless.
Recebe o id do solicitante via parametro de URL e retorna as informações referente ao solicitante e seu crédito
:param event: Event recebido pela nuvem
:param context: Contexto com informações da função
:return: informações do solicitante
"""
try:
s3_bucket_crawler = getenv('S3_BUCKET', '')
print(event)
request_json, msg = None, None
path = event.get('path', {})
if 'id' in path.keys():
s3 = S3(s3_bucket_crawler)
s3.create_s3_instance()
request_json, msg = s3.get_s3_obj('', path['id'])
if request_json:
request_json['status'] = 200
return request_json
else:
return {'status': 404, 'msg': 'Requisição não encontrada'}
except Exception as err:
return {'status': 500, 'msg': 'Erro interno ao processar a requisição'}
if __name__ == '__main__':
ex_event = {'body':
{
'name': 'Leonardo Roberto Gazziro',
'phone': '99999999999',
'age': 26,
'cpf': '999.999.999-99',
'income': 3000
},
'method': 'POST', 'principalId': '',
'stage': 'dev',
'cognitoPoolClaims': {'sub': ''},
'enhancedAuthContext': {},
'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br',
'Cache-Control': 'no-cache', 'CloudFront-Forwarded-Proto': 'https',
'CloudFront-Is-Desktop-Viewer': 'true',
'CloudFront-Is-Mobile-Viewer': 'false',
'CloudFront-Is-SmartTV-Viewer': 'false',
'CloudFront-Is-Tablet-Viewer': 'false',
'CloudFront-Viewer-Country': 'BR',
'Content-Type': 'application/json',
'Host': '1nhucniq8b.execute-api.us-east-1.amazonaws.com',
'Postman-Token': '0d40ddc9-494c-438c-9e34-30f9f44ea018',
'User-Agent': 'PostmanRuntime/7.26.3',
'Via': '1.1 3fff6e22f8d6795a61bfdca17d362ca5.cloudfront.net (CloudFront)',
'X-Amz-Cf-Id': 'OKNx6jkzKLQ3nbtD0t4JTNynGlc2TZDSemAepsPC-8Kv0ZV1f6Tz7w==',
'X-Amzn-Trace-Id': 'Root=1-5f4bf435-7d8aa2621cebaaabfa719a0c',
'X-Forwarded-For': '138.204.24.213, 64.252.179.69',
'X-Forwarded-Port': '443', 'X-Forwarded-Proto': 'https'},
'query': {},
'path': {},
'identity': {'cognitoIdentityPoolId': '', 'accountId': '', 'cognitoIdentityId': '', 'caller': '',
'sourceIp': '138.204.24.213', 'principalOrgId': '', 'accessKey': '',
'cognitoAuthenticationType': '', 'cognitoAuthenticationProvider': '', 'userArn': '',
'userAgent': 'PostmanRuntime/7.26.3', 'user': ''},
'stageVariables': {},
'requestPath': ''}
ex_event_s3 = {'Records': [{'eventVersion': '2.1', 'eventSource': 'aws:s3', 'awsRegion': 'us-east-1',
'eventTime': '2020-09-06T18:11:57.722Z', 'eventName': 'ObjectCreated:Put',
'userIdentity': {'principalId': 'A1GN6EPM0JKA8K'},
'requestParameters': {'sourceIPAddress': '177.42.49.149'},
'responseElements': {'x-amz-request-id': '7857E87B7E4BA60D',
'x-amz-id-2': 'lmNysI3mKLmMQoOzCtPnjT8usl2fMUYbIyipfE59v3oWuyu44XxI/L2tXxRPkqjC6uUNu3rGB/eekMpWqOj6RceGfrLLLTeg'},
's3': {'s3SchemaVersion': '1.0',
'configurationId': 'cc7a7b1c-d354-413c-94e0-c4c2e3cf153a',
'bucket': {'name': 'cards-requests-leogazziro',
'ownerIdentity': {'principalId': 'A1GN6EPM0JKA8K'},
'arn': 'arn:aws:s3:::novo-produto'},
'object': {'key': '999.999.999-99.json', 'size': 1172,
'eTag': 'f64c165d9209eb645b660f04b27dc8d2',
'sequencer': '005F552670F6AF15F4'}}}]}
ex_event_att_price = {'body': {},
'method': 'GET',
'principalId': '',
'stage': 'dev',
'cognitoPoolClaims': {'sub': ''},
'enhancedAuthContext': {},
'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'CloudFront-Forwarded-Proto': 'https', 'CloudFront-Is-Desktop-Viewer': 'true', 'CloudFront-Is-Mobile-Viewer': 'false', 'CloudFront-Is-SmartTV-Viewer': 'false', 'CloudFront-Is-Tablet-Viewer': 'false', 'CloudFront-Viewer-Country': 'BR', 'Host': 'ug0whmmoab.execute-api.us-east-1.amazonaws.com', 'Postman-Token': 'b4156348-2b52-4528-9fe5-f990b6e0b002', 'User-Agent': 'PostmanRuntime/7.26.8', 'Via': '1.1 563ebd37505bdef43c0d2cf809086a89.cloudfront.net (CloudFront)', 'X-Amz-Cf-Id': '3GsBNWObU21N4Duq6GXK-NVydq7LdeTnEVsiU9o4i3WB4WZcY95iLQ==', 'X-Amzn-Trace-Id': 'Root=1-5fc44260-324117fb66e2aaa37e74b01d', 'X-Forwarded-For': '177.220.174.33, 64.252.179.71', 'X-Forwarded-Port': '443', 'X-Forwarded-Proto': 'https'},
'query': {},
'path': {'id': '999.999.999-99'},
'identity': {'cognitoIdentityPoolId': '', 'accountId': '', 'cognitoIdentityId': '', 'caller': '', 'sourceIp': '177.220.174.33', 'principalOrgId': '', 'accessKey': '', 'cognitoAuthenticationType': '', 'cognitoAuthenticationProvider': '', 'userArn': '', 'userAgent': 'PostmanRuntime/7.26.8', 'user': ''}, 'stageVariables': {}, 'requestPath': '/get_new_card_request_response/{id}'}
# request_new_card_handler(ex_event, '')
# get_requests_card_list_handler('', '')
# process_card_request(ex_event_s3, '')
# get_new_card_request_response_handler(ex_event_att_price, '')
# delete_card_request_handler(ex_event_att_price, '')
| ado através de um GET para a API que | identifier_name |
config.go | package config
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"path"
"path/filepath"
"sort"
"strings"
version "github.com/hashicorp/go-version"
"github.com/pkg/errors"
"github.com/yext/edward/services"
)
// Config defines the structure for the Edward project configuration file
type Config struct {
workingDir string
TelemetryScript string `json:"telemetryScript,omitempty"`
MinEdwardVersion string `json:"edwardVersion,omitempty"`
Imports []string `json:"imports,omitempty"`
ImportedGroups []GroupDef `json:"-"`
ImportedServices []services.ServiceConfig `json:"-"`
Env []string `json:"env,omitempty"`
Groups []GroupDef `json:"groups,omitempty"`
Services []services.ServiceConfig `json:"services"`
ServiceMap map[string]*services.ServiceConfig `json:"-"`
GroupMap map[string]*services.ServiceGroupConfig `json:"-"`
FilePath string `json:"-"`
}
// GroupDef defines a group based on a list of children specified by name
type GroupDef struct {
Name string `json:"name"`
Aliases []string `json:"aliases,omitempty"`
Description string `json:"description,omitempty"`
Children []string `json:"children"`
Env []string `json:"env,omitempty"`
}
// LoadConfig loads configuration from an io.Reader with the working directory explicitly specified
func LoadConfig(filePath string, edwardVersion string) (Config, error) {
reader, err := os.Open(filePath)
if err != nil {
return Config{}, errors.WithStack(err)
}
workingDir := path.Dir(filePath)
config, err := loadConfigContents(reader, workingDir)
config.FilePath = filePath
if err != nil {
return Config{}, errors.WithStack(err)
}
if config.MinEdwardVersion != "" && edwardVersion != "" {
// Check that this config is supported by this version
minVersion, err1 := version.NewVersion(config.MinEdwardVersion)
if err1 != nil {
return Config{}, errors.WithStack(err)
}
currentVersion, err2 := version.NewVersion(edwardVersion)
if err2 != nil {
return Config{}, errors.WithStack(err)
}
if currentVersion.LessThan(minVersion) {
return Config{}, errors.New("this config requires at least version " + config.MinEdwardVersion)
}
}
err = config.initMaps()
log.Printf("Config loaded with: %d groups and %d services\n", len(config.GroupMap), len(config.ServiceMap))
return config, errors.WithStack(err)
}
// Reader from os.Open
func loadConfigContents(reader io.Reader, workingDir string) (Config, error) {
log.Printf("Loading config with working dir %v.\n", workingDir)
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(reader)
if err != nil {
return Config{}, errors.Wrap(err, "could not read config")
}
data := buf.Bytes()
var config Config
err = json.Unmarshal(data, &config)
if err != nil {
if syntax, ok := err.(*json.SyntaxError); ok && syntax.Offset != 0 {
start := strings.LastIndex(string(data[:syntax.Offset]), "\n") + 1
line, pos := strings.Count(string(data[:start]), "\n")+1, int(syntax.Offset)-start-1
return Config{}, errors.Wrapf(err, "could not parse config file (line %v, char %v)", line, pos)
}
return Config{}, errors.Wrap(err, "could not parse config file")
}
config.workingDir = workingDir
err = config.loadImports()
if err != nil {
return Config{}, errors.WithStack(err)
}
return config, nil
}
// Save saves config to an io.Writer
func (c Config) Save(writer io.Writer) error {
log.Printf("Saving config")
content, err := json.MarshalIndent(c, "", " ")
if err != nil {
return errors.WithStack(err)
}
_, err = writer.Write(content)
return errors.WithStack(err)
}
// NewConfig creates a Config from slices of services and groups
func NewConfig(newServices []services.ServiceConfig, newGroups []services.ServiceGroupConfig) Config {
log.Printf("Creating new config with %d services and %d groups.\n", len(newServices), len(newGroups))
// Find Env settings common to all services
var allEnvSlices [][]string
for _, s := range newServices {
allEnvSlices = append(allEnvSlices, s.Env)
}
env := stringSliceIntersect(allEnvSlices)
// Remove common settings from services
var svcs []services.ServiceConfig
for _, s := range newServices {
s.Env = stringSliceRemoveCommon(env, s.Env)
svcs = append(svcs, s)
}
cfg := Config{
Env: env,
Services: svcs,
Groups: []GroupDef{},
}
cfg.AddGroups(newGroups)
log.Printf("Config created: %v", cfg)
return cfg
}
// EmptyConfig creates a Config with no services or groups
func EmptyConfig(workingDir string) Config {
log.Printf("Creating empty config\n")
cfg := Config{
workingDir: workingDir,
}
cfg.ServiceMap = make(map[string]*services.ServiceConfig)
cfg.GroupMap = make(map[string]*services.ServiceGroupConfig)
return cfg
}
// NormalizeServicePaths will modify the Paths for each of the provided services
// to be relative to the working directory of this config file
func (c *Config) NormalizeServicePaths(searchPath string, newServices []*services.ServiceConfig) ([]*services.ServiceConfig, error) {
log.Printf("Normalizing paths for %d services.\n", len(newServices))
var outServices []*services.ServiceConfig
for _, s := range newServices {
curService := *s
fullPath := filepath.Join(searchPath, *curService.Path)
relPath, err := filepath.Rel(c.workingDir, fullPath)
if err != nil {
return outServices, errors.WithStack(err)
}
curService.Path = &relPath
outServices = append(outServices, &curService)
}
return outServices, nil
}
// AppendServices adds services to an existing config without replacing existing services
func (c *Config) AppendServices(newServices []*services.ServiceConfig) error {
log.Printf("Appending %d services.\n", len(newServices))
if c.ServiceMap == nil {
c.ServiceMap = make(map[string]*services.ServiceConfig)
}
for _, s := range newServices {
if _, found := c.ServiceMap[s.Name]; !found {
c.ServiceMap[s.Name] = s
c.Services = append(c.Services, *s)
}
}
return nil
}
// AppendGroups adds groups to an existing config without replacing existing groups
func (c *Config) AppendGroups(groups []*services.ServiceGroupConfig) error {
var groupsDereferenced []services.ServiceGroupConfig
for _, group := range groups {
groupsDereferenced = append(groupsDereferenced, *group)
}
return errors.WithStack(c.AddGroups(groupsDereferenced))
}
func (c *Config) RemoveGroup(name string) error {
if _, ok := c.GroupMap[name]; !ok {
return errors.New("Group not found")
}
delete(c.GroupMap, name)
existingGroupDefs := c.Groups
c.Groups = make([]GroupDef, 0, len(existingGroupDefs))
for _, group := range existingGroupDefs {
if group.Name != name {
c.Groups = append(c.Groups, group)
}
}
return nil
}
// AddGroups adds a slice of groups to the Config
func (c *Config) AddGroups(groups []services.ServiceGroupConfig) error {
log.Printf("Adding %d groups.\n", len(groups))
for _, group := range groups {
grp := GroupDef{
Name: group.Name,
Aliases: group.Aliases,
Description: group.Description,
Children: []string{},
Env: group.Env,
}
for _, cg := range group.Groups {
if cg != nil {
grp.Children = append(grp.Children, cg.Name)
}
}
for _, cs := range group.Services {
if cs != nil {
grp.Children = append(grp.Children, cs.Name)
}
}
c.Groups = append(c.Groups, grp)
}
return nil
}
func (c *Config) loadImports() error {
log.Printf("Loading imports\n")
for _, i := range c.Imports {
var cPath string
if filepath.IsAbs(i) {
cPath = i
} else {
cPath = filepath.Join(c.workingDir, i)
}
log.Printf("Loading: %v\n", cPath)
r, err := os.Open(cPath)
if err != nil {
return errors.WithStack(err)
}
cfg, err := loadConfigContents(r, filepath.Dir(cPath))
if err != nil {
return errors.WithMessage(err, i)
}
err = c.importConfig(cfg)
if err != nil {
return errors.WithStack(err)
}
}
return nil
}
func (c *Config) importConfig(second Config) error {
for _, service := range append(second.Services, second.ImportedServices...) {
c.ImportedServices = append(c.ImportedServices, service)
}
for _, group := range append(second.Groups, second.ImportedGroups...) {
c.ImportedGroups = append(c.ImportedGroups, group)
}
return nil
}
func (c *Config) combinePath(path string) *string {
if filepath.IsAbs(path) || strings.HasPrefix(path, "$") {
return &path
}
fullPath := filepath.Join(c.workingDir, path)
return &fullPath
}
func addToMap(m map[string]struct{}, values ...string) {
for _, v := range values {
m[v] = struct{}{}
}
}
func intersect(m map[string]struct{}, values ...string) []string {
var out []string
for _, v := range values {
if _, ok := m[v]; ok {
out = append(out, v)
}
}
sort.Strings(out)
return out
}
func (c *Config) initMaps() error {
var err error
var svcs = make(map[string]*services.ServiceConfig)
var servicesSkipped = make(map[string]struct{})
var namesInUse = make(map[string]struct{})
for _, s := range append(c.Services, c.ImportedServices...) {
sc := s
sc.Env = append(sc.Env, c.Env...)
sc.ConfigFile, err = filepath.Abs(c.FilePath)
if err != nil {
return errors.WithStack(err)
}
if sc.MatchesPlatform() {
if i := intersect(namesInUse, append(sc.Aliases, sc.Name)...); len(i) > 0 {
return fmt.Errorf("Duplicate name or alias: %v", strings.Join(i, ", "))
}
svcs[sc.Name] = &sc
addToMap(namesInUse, append(sc.Aliases, sc.Name)...)
} else {
servicesSkipped[sc.Name] = struct{}{}
}
}
var groups = make(map[string]*services.ServiceGroupConfig)
// First pass: Services
var orphanNames = make(map[string]struct{})
for _, g := range append(c.Groups, c.ImportedGroups...) {
var childServices []*services.ServiceConfig
for _, name := range g.Children {
if s, ok := svcs[name]; ok {
if s.Path != nil {
s.Path = c.combinePath(*s.Path)
}
childServices = append(childServices, s)
} else if _, skipped := servicesSkipped[name]; !skipped {
orphanNames[name] = struct{}{}
}
}
if i := intersect(namesInUse, append(g.Aliases, g.Name)...); len(i) > 0 {
return fmt.Errorf("Duplicate name or alias: %v", strings.Join(i, ", "))
}
groups[g.Name] = &services.ServiceGroupConfig{
Name: g.Name,
Aliases: g.Aliases,
Description: g.Description,
Services: childServices,
Groups: []*services.ServiceGroupConfig{},
Env: g.Env,
ChildOrder: g.Children,
}
addToMap(namesInUse, append(g.Aliases, g.Name)...)
}
// Second pass: Groups
for _, g := range append(c.Groups, c.ImportedGroups...) {
childGroups := []*services.ServiceGroupConfig{}
for _, name := range g.Children {
if gr, ok := groups[name]; ok {
delete(orphanNames, name)
childGroups = append(childGroups, gr)
}
if hasChildCycle(groups[g.Name], childGroups) {
return errors.New("group cycle: " + g.Name)
}
}
groups[g.Name].Groups = childGroups
}
if len(orphanNames) > 0 {
var keys []string
for k := range orphanNames {
keys = append(keys, k)
}
return errors.New("A service or group could not be found for the following names: " + strings.Join(keys, ", "))
}
c.ServiceMap = svcs
c.GroupMap = groups
return nil
}
func hasChildCycle(parent *services.ServiceGroupConfig, children []*services.ServiceGroupConfig) bool {
for _, sg := range children {
if parent == sg {
return true
}
if hasChildCycle(parent, sg.Groups) {
return true
}
}
return false
}
func stringSliceIntersect(slices [][]string) []string {
var counts = make(map[string]int)
for _, s := range slices {
for _, v := range s {
counts[v]++
}
}
var outSlice []string
for v, count := range counts {
if count == len(slices) {
outSlice = append(outSlice, v)
}
}
return outSlice
}
func stringSliceRemoveCommon(common []string, original []string) []string {
var commonMap = make(map[string]interface{}) | if _, ok := commonMap[s]; !ok {
outSlice = append(outSlice, s)
}
}
return outSlice
} | for _, s := range common {
commonMap[s] = struct{}{}
}
var outSlice []string
for _, s := range original { | random_line_split |
config.go | package config
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"path"
"path/filepath"
"sort"
"strings"
version "github.com/hashicorp/go-version"
"github.com/pkg/errors"
"github.com/yext/edward/services"
)
// Config defines the structure for the Edward project configuration file
type Config struct {
workingDir string
TelemetryScript string `json:"telemetryScript,omitempty"`
MinEdwardVersion string `json:"edwardVersion,omitempty"`
Imports []string `json:"imports,omitempty"`
ImportedGroups []GroupDef `json:"-"`
ImportedServices []services.ServiceConfig `json:"-"`
Env []string `json:"env,omitempty"`
Groups []GroupDef `json:"groups,omitempty"`
Services []services.ServiceConfig `json:"services"`
ServiceMap map[string]*services.ServiceConfig `json:"-"`
GroupMap map[string]*services.ServiceGroupConfig `json:"-"`
FilePath string `json:"-"`
}
// GroupDef defines a group based on a list of children specified by name
type GroupDef struct {
Name string `json:"name"`
Aliases []string `json:"aliases,omitempty"`
Description string `json:"description,omitempty"`
Children []string `json:"children"`
Env []string `json:"env,omitempty"`
}
// LoadConfig loads configuration from an io.Reader with the working directory explicitly specified
func LoadConfig(filePath string, edwardVersion string) (Config, error) {
reader, err := os.Open(filePath)
if err != nil {
return Config{}, errors.WithStack(err)
}
workingDir := path.Dir(filePath)
config, err := loadConfigContents(reader, workingDir)
config.FilePath = filePath
if err != nil {
return Config{}, errors.WithStack(err)
}
if config.MinEdwardVersion != "" && edwardVersion != "" {
// Check that this config is supported by this version
minVersion, err1 := version.NewVersion(config.MinEdwardVersion)
if err1 != nil {
return Config{}, errors.WithStack(err)
}
currentVersion, err2 := version.NewVersion(edwardVersion)
if err2 != nil {
return Config{}, errors.WithStack(err)
}
if currentVersion.LessThan(minVersion) {
return Config{}, errors.New("this config requires at least version " + config.MinEdwardVersion)
}
}
err = config.initMaps()
log.Printf("Config loaded with: %d groups and %d services\n", len(config.GroupMap), len(config.ServiceMap))
return config, errors.WithStack(err)
}
// Reader from os.Open
func loadConfigContents(reader io.Reader, workingDir string) (Config, error) {
log.Printf("Loading config with working dir %v.\n", workingDir)
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(reader)
if err != nil {
return Config{}, errors.Wrap(err, "could not read config")
}
data := buf.Bytes()
var config Config
err = json.Unmarshal(data, &config)
if err != nil {
if syntax, ok := err.(*json.SyntaxError); ok && syntax.Offset != 0 {
start := strings.LastIndex(string(data[:syntax.Offset]), "\n") + 1
line, pos := strings.Count(string(data[:start]), "\n")+1, int(syntax.Offset)-start-1
return Config{}, errors.Wrapf(err, "could not parse config file (line %v, char %v)", line, pos)
}
return Config{}, errors.Wrap(err, "could not parse config file")
}
config.workingDir = workingDir
err = config.loadImports()
if err != nil {
return Config{}, errors.WithStack(err)
}
return config, nil
}
// Save saves config to an io.Writer
func (c Config) Save(writer io.Writer) error {
log.Printf("Saving config")
content, err := json.MarshalIndent(c, "", " ")
if err != nil {
return errors.WithStack(err)
}
_, err = writer.Write(content)
return errors.WithStack(err)
}
// NewConfig creates a Config from slices of services and groups
func NewConfig(newServices []services.ServiceConfig, newGroups []services.ServiceGroupConfig) Config {
log.Printf("Creating new config with %d services and %d groups.\n", len(newServices), len(newGroups))
// Find Env settings common to all services
var allEnvSlices [][]string
for _, s := range newServices {
allEnvSlices = append(allEnvSlices, s.Env)
}
env := stringSliceIntersect(allEnvSlices)
// Remove common settings from services
var svcs []services.ServiceConfig
for _, s := range newServices {
s.Env = stringSliceRemoveCommon(env, s.Env)
svcs = append(svcs, s)
}
cfg := Config{
Env: env,
Services: svcs,
Groups: []GroupDef{},
}
cfg.AddGroups(newGroups)
log.Printf("Config created: %v", cfg)
return cfg
}
// EmptyConfig creates a Config with no services or groups
func EmptyConfig(workingDir string) Config {
log.Printf("Creating empty config\n")
cfg := Config{
workingDir: workingDir,
}
cfg.ServiceMap = make(map[string]*services.ServiceConfig)
cfg.GroupMap = make(map[string]*services.ServiceGroupConfig)
return cfg
}
// NormalizeServicePaths will modify the Paths for each of the provided services
// to be relative to the working directory of this config file
func (c *Config) NormalizeServicePaths(searchPath string, newServices []*services.ServiceConfig) ([]*services.ServiceConfig, error) {
log.Printf("Normalizing paths for %d services.\n", len(newServices))
var outServices []*services.ServiceConfig
for _, s := range newServices {
curService := *s
fullPath := filepath.Join(searchPath, *curService.Path)
relPath, err := filepath.Rel(c.workingDir, fullPath)
if err != nil {
return outServices, errors.WithStack(err)
}
curService.Path = &relPath
outServices = append(outServices, &curService)
}
return outServices, nil
}
// AppendServices adds services to an existing config without replacing existing services
func (c *Config) AppendServices(newServices []*services.ServiceConfig) error {
log.Printf("Appending %d services.\n", len(newServices))
if c.ServiceMap == nil {
c.ServiceMap = make(map[string]*services.ServiceConfig)
}
for _, s := range newServices {
if _, found := c.ServiceMap[s.Name]; !found {
c.ServiceMap[s.Name] = s
c.Services = append(c.Services, *s)
}
}
return nil
}
// AppendGroups adds groups to an existing config without replacing existing groups
func (c *Config) AppendGroups(groups []*services.ServiceGroupConfig) error {
var groupsDereferenced []services.ServiceGroupConfig
for _, group := range groups {
groupsDereferenced = append(groupsDereferenced, *group)
}
return errors.WithStack(c.AddGroups(groupsDereferenced))
}
func (c *Config) RemoveGroup(name string) error {
if _, ok := c.GroupMap[name]; !ok {
return errors.New("Group not found")
}
delete(c.GroupMap, name)
existingGroupDefs := c.Groups
c.Groups = make([]GroupDef, 0, len(existingGroupDefs))
for _, group := range existingGroupDefs {
if group.Name != name {
c.Groups = append(c.Groups, group)
}
}
return nil
}
// AddGroups adds a slice of groups to the Config
func (c *Config) AddGroups(groups []services.ServiceGroupConfig) error |
func (c *Config) loadImports() error {
log.Printf("Loading imports\n")
for _, i := range c.Imports {
var cPath string
if filepath.IsAbs(i) {
cPath = i
} else {
cPath = filepath.Join(c.workingDir, i)
}
log.Printf("Loading: %v\n", cPath)
r, err := os.Open(cPath)
if err != nil {
return errors.WithStack(err)
}
cfg, err := loadConfigContents(r, filepath.Dir(cPath))
if err != nil {
return errors.WithMessage(err, i)
}
err = c.importConfig(cfg)
if err != nil {
return errors.WithStack(err)
}
}
return nil
}
func (c *Config) importConfig(second Config) error {
for _, service := range append(second.Services, second.ImportedServices...) {
c.ImportedServices = append(c.ImportedServices, service)
}
for _, group := range append(second.Groups, second.ImportedGroups...) {
c.ImportedGroups = append(c.ImportedGroups, group)
}
return nil
}
func (c *Config) combinePath(path string) *string {
if filepath.IsAbs(path) || strings.HasPrefix(path, "$") {
return &path
}
fullPath := filepath.Join(c.workingDir, path)
return &fullPath
}
func addToMap(m map[string]struct{}, values ...string) {
for _, v := range values {
m[v] = struct{}{}
}
}
func intersect(m map[string]struct{}, values ...string) []string {
var out []string
for _, v := range values {
if _, ok := m[v]; ok {
out = append(out, v)
}
}
sort.Strings(out)
return out
}
func (c *Config) initMaps() error {
var err error
var svcs = make(map[string]*services.ServiceConfig)
var servicesSkipped = make(map[string]struct{})
var namesInUse = make(map[string]struct{})
for _, s := range append(c.Services, c.ImportedServices...) {
sc := s
sc.Env = append(sc.Env, c.Env...)
sc.ConfigFile, err = filepath.Abs(c.FilePath)
if err != nil {
return errors.WithStack(err)
}
if sc.MatchesPlatform() {
if i := intersect(namesInUse, append(sc.Aliases, sc.Name)...); len(i) > 0 {
return fmt.Errorf("Duplicate name or alias: %v", strings.Join(i, ", "))
}
svcs[sc.Name] = &sc
addToMap(namesInUse, append(sc.Aliases, sc.Name)...)
} else {
servicesSkipped[sc.Name] = struct{}{}
}
}
var groups = make(map[string]*services.ServiceGroupConfig)
// First pass: Services
var orphanNames = make(map[string]struct{})
for _, g := range append(c.Groups, c.ImportedGroups...) {
var childServices []*services.ServiceConfig
for _, name := range g.Children {
if s, ok := svcs[name]; ok {
if s.Path != nil {
s.Path = c.combinePath(*s.Path)
}
childServices = append(childServices, s)
} else if _, skipped := servicesSkipped[name]; !skipped {
orphanNames[name] = struct{}{}
}
}
if i := intersect(namesInUse, append(g.Aliases, g.Name)...); len(i) > 0 {
return fmt.Errorf("Duplicate name or alias: %v", strings.Join(i, ", "))
}
groups[g.Name] = &services.ServiceGroupConfig{
Name: g.Name,
Aliases: g.Aliases,
Description: g.Description,
Services: childServices,
Groups: []*services.ServiceGroupConfig{},
Env: g.Env,
ChildOrder: g.Children,
}
addToMap(namesInUse, append(g.Aliases, g.Name)...)
}
// Second pass: Groups
for _, g := range append(c.Groups, c.ImportedGroups...) {
childGroups := []*services.ServiceGroupConfig{}
for _, name := range g.Children {
if gr, ok := groups[name]; ok {
delete(orphanNames, name)
childGroups = append(childGroups, gr)
}
if hasChildCycle(groups[g.Name], childGroups) {
return errors.New("group cycle: " + g.Name)
}
}
groups[g.Name].Groups = childGroups
}
if len(orphanNames) > 0 {
var keys []string
for k := range orphanNames {
keys = append(keys, k)
}
return errors.New("A service or group could not be found for the following names: " + strings.Join(keys, ", "))
}
c.ServiceMap = svcs
c.GroupMap = groups
return nil
}
func hasChildCycle(parent *services.ServiceGroupConfig, children []*services.ServiceGroupConfig) bool {
for _, sg := range children {
if parent == sg {
return true
}
if hasChildCycle(parent, sg.Groups) {
return true
}
}
return false
}
func stringSliceIntersect(slices [][]string) []string {
var counts = make(map[string]int)
for _, s := range slices {
for _, v := range s {
counts[v]++
}
}
var outSlice []string
for v, count := range counts {
if count == len(slices) {
outSlice = append(outSlice, v)
}
}
return outSlice
}
func stringSliceRemoveCommon(common []string, original []string) []string {
var commonMap = make(map[string]interface{})
for _, s := range common {
commonMap[s] = struct{}{}
}
var outSlice []string
for _, s := range original {
if _, ok := commonMap[s]; !ok {
outSlice = append(outSlice, s)
}
}
return outSlice
}
| {
log.Printf("Adding %d groups.\n", len(groups))
for _, group := range groups {
grp := GroupDef{
Name: group.Name,
Aliases: group.Aliases,
Description: group.Description,
Children: []string{},
Env: group.Env,
}
for _, cg := range group.Groups {
if cg != nil {
grp.Children = append(grp.Children, cg.Name)
}
}
for _, cs := range group.Services {
if cs != nil {
grp.Children = append(grp.Children, cs.Name)
}
}
c.Groups = append(c.Groups, grp)
}
return nil
} | identifier_body |
config.go | package config
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"path"
"path/filepath"
"sort"
"strings"
version "github.com/hashicorp/go-version"
"github.com/pkg/errors"
"github.com/yext/edward/services"
)
// Config defines the structure for the Edward project configuration file
type Config struct {
workingDir string
TelemetryScript string `json:"telemetryScript,omitempty"`
MinEdwardVersion string `json:"edwardVersion,omitempty"`
Imports []string `json:"imports,omitempty"`
ImportedGroups []GroupDef `json:"-"`
ImportedServices []services.ServiceConfig `json:"-"`
Env []string `json:"env,omitempty"`
Groups []GroupDef `json:"groups,omitempty"`
Services []services.ServiceConfig `json:"services"`
ServiceMap map[string]*services.ServiceConfig `json:"-"`
GroupMap map[string]*services.ServiceGroupConfig `json:"-"`
FilePath string `json:"-"`
}
// GroupDef defines a group based on a list of children specified by name
type GroupDef struct {
Name string `json:"name"`
Aliases []string `json:"aliases,omitempty"`
Description string `json:"description,omitempty"`
Children []string `json:"children"`
Env []string `json:"env,omitempty"`
}
// LoadConfig loads configuration from an io.Reader with the working directory explicitly specified
func LoadConfig(filePath string, edwardVersion string) (Config, error) {
reader, err := os.Open(filePath)
if err != nil {
return Config{}, errors.WithStack(err)
}
workingDir := path.Dir(filePath)
config, err := loadConfigContents(reader, workingDir)
config.FilePath = filePath
if err != nil {
return Config{}, errors.WithStack(err)
}
if config.MinEdwardVersion != "" && edwardVersion != "" {
// Check that this config is supported by this version
minVersion, err1 := version.NewVersion(config.MinEdwardVersion)
if err1 != nil {
return Config{}, errors.WithStack(err)
}
currentVersion, err2 := version.NewVersion(edwardVersion)
if err2 != nil {
return Config{}, errors.WithStack(err)
}
if currentVersion.LessThan(minVersion) {
return Config{}, errors.New("this config requires at least version " + config.MinEdwardVersion)
}
}
err = config.initMaps()
log.Printf("Config loaded with: %d groups and %d services\n", len(config.GroupMap), len(config.ServiceMap))
return config, errors.WithStack(err)
}
// Reader from os.Open
func loadConfigContents(reader io.Reader, workingDir string) (Config, error) {
log.Printf("Loading config with working dir %v.\n", workingDir)
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(reader)
if err != nil {
return Config{}, errors.Wrap(err, "could not read config")
}
data := buf.Bytes()
var config Config
err = json.Unmarshal(data, &config)
if err != nil {
if syntax, ok := err.(*json.SyntaxError); ok && syntax.Offset != 0 {
start := strings.LastIndex(string(data[:syntax.Offset]), "\n") + 1
line, pos := strings.Count(string(data[:start]), "\n")+1, int(syntax.Offset)-start-1
return Config{}, errors.Wrapf(err, "could not parse config file (line %v, char %v)", line, pos)
}
return Config{}, errors.Wrap(err, "could not parse config file")
}
config.workingDir = workingDir
err = config.loadImports()
if err != nil {
return Config{}, errors.WithStack(err)
}
return config, nil
}
// Save saves config to an io.Writer
func (c Config) Save(writer io.Writer) error {
log.Printf("Saving config")
content, err := json.MarshalIndent(c, "", " ")
if err != nil {
return errors.WithStack(err)
}
_, err = writer.Write(content)
return errors.WithStack(err)
}
// NewConfig creates a Config from slices of services and groups
func NewConfig(newServices []services.ServiceConfig, newGroups []services.ServiceGroupConfig) Config {
log.Printf("Creating new config with %d services and %d groups.\n", len(newServices), len(newGroups))
// Find Env settings common to all services
var allEnvSlices [][]string
for _, s := range newServices {
allEnvSlices = append(allEnvSlices, s.Env)
}
env := stringSliceIntersect(allEnvSlices)
// Remove common settings from services
var svcs []services.ServiceConfig
for _, s := range newServices {
s.Env = stringSliceRemoveCommon(env, s.Env)
svcs = append(svcs, s)
}
cfg := Config{
Env: env,
Services: svcs,
Groups: []GroupDef{},
}
cfg.AddGroups(newGroups)
log.Printf("Config created: %v", cfg)
return cfg
}
// EmptyConfig creates a Config with no services or groups
func EmptyConfig(workingDir string) Config {
log.Printf("Creating empty config\n")
cfg := Config{
workingDir: workingDir,
}
cfg.ServiceMap = make(map[string]*services.ServiceConfig)
cfg.GroupMap = make(map[string]*services.ServiceGroupConfig)
return cfg
}
// NormalizeServicePaths will modify the Paths for each of the provided services
// to be relative to the working directory of this config file
func (c *Config) NormalizeServicePaths(searchPath string, newServices []*services.ServiceConfig) ([]*services.ServiceConfig, error) {
log.Printf("Normalizing paths for %d services.\n", len(newServices))
var outServices []*services.ServiceConfig
for _, s := range newServices {
curService := *s
fullPath := filepath.Join(searchPath, *curService.Path)
relPath, err := filepath.Rel(c.workingDir, fullPath)
if err != nil {
return outServices, errors.WithStack(err)
}
curService.Path = &relPath
outServices = append(outServices, &curService)
}
return outServices, nil
}
// AppendServices adds services to an existing config without replacing existing services
func (c *Config) AppendServices(newServices []*services.ServiceConfig) error {
log.Printf("Appending %d services.\n", len(newServices))
if c.ServiceMap == nil {
c.ServiceMap = make(map[string]*services.ServiceConfig)
}
for _, s := range newServices {
if _, found := c.ServiceMap[s.Name]; !found {
c.ServiceMap[s.Name] = s
c.Services = append(c.Services, *s)
}
}
return nil
}
// AppendGroups adds groups to an existing config without replacing existing groups
func (c *Config) AppendGroups(groups []*services.ServiceGroupConfig) error {
var groupsDereferenced []services.ServiceGroupConfig
for _, group := range groups {
groupsDereferenced = append(groupsDereferenced, *group)
}
return errors.WithStack(c.AddGroups(groupsDereferenced))
}
func (c *Config) RemoveGroup(name string) error {
if _, ok := c.GroupMap[name]; !ok {
return errors.New("Group not found")
}
delete(c.GroupMap, name)
existingGroupDefs := c.Groups
c.Groups = make([]GroupDef, 0, len(existingGroupDefs))
for _, group := range existingGroupDefs {
if group.Name != name {
c.Groups = append(c.Groups, group)
}
}
return nil
}
// AddGroups adds a slice of groups to the Config
func (c *Config) | (groups []services.ServiceGroupConfig) error {
log.Printf("Adding %d groups.\n", len(groups))
for _, group := range groups {
grp := GroupDef{
Name: group.Name,
Aliases: group.Aliases,
Description: group.Description,
Children: []string{},
Env: group.Env,
}
for _, cg := range group.Groups {
if cg != nil {
grp.Children = append(grp.Children, cg.Name)
}
}
for _, cs := range group.Services {
if cs != nil {
grp.Children = append(grp.Children, cs.Name)
}
}
c.Groups = append(c.Groups, grp)
}
return nil
}
func (c *Config) loadImports() error {
log.Printf("Loading imports\n")
for _, i := range c.Imports {
var cPath string
if filepath.IsAbs(i) {
cPath = i
} else {
cPath = filepath.Join(c.workingDir, i)
}
log.Printf("Loading: %v\n", cPath)
r, err := os.Open(cPath)
if err != nil {
return errors.WithStack(err)
}
cfg, err := loadConfigContents(r, filepath.Dir(cPath))
if err != nil {
return errors.WithMessage(err, i)
}
err = c.importConfig(cfg)
if err != nil {
return errors.WithStack(err)
}
}
return nil
}
func (c *Config) importConfig(second Config) error {
for _, service := range append(second.Services, second.ImportedServices...) {
c.ImportedServices = append(c.ImportedServices, service)
}
for _, group := range append(second.Groups, second.ImportedGroups...) {
c.ImportedGroups = append(c.ImportedGroups, group)
}
return nil
}
func (c *Config) combinePath(path string) *string {
if filepath.IsAbs(path) || strings.HasPrefix(path, "$") {
return &path
}
fullPath := filepath.Join(c.workingDir, path)
return &fullPath
}
func addToMap(m map[string]struct{}, values ...string) {
for _, v := range values {
m[v] = struct{}{}
}
}
func intersect(m map[string]struct{}, values ...string) []string {
var out []string
for _, v := range values {
if _, ok := m[v]; ok {
out = append(out, v)
}
}
sort.Strings(out)
return out
}
func (c *Config) initMaps() error {
var err error
var svcs = make(map[string]*services.ServiceConfig)
var servicesSkipped = make(map[string]struct{})
var namesInUse = make(map[string]struct{})
for _, s := range append(c.Services, c.ImportedServices...) {
sc := s
sc.Env = append(sc.Env, c.Env...)
sc.ConfigFile, err = filepath.Abs(c.FilePath)
if err != nil {
return errors.WithStack(err)
}
if sc.MatchesPlatform() {
if i := intersect(namesInUse, append(sc.Aliases, sc.Name)...); len(i) > 0 {
return fmt.Errorf("Duplicate name or alias: %v", strings.Join(i, ", "))
}
svcs[sc.Name] = &sc
addToMap(namesInUse, append(sc.Aliases, sc.Name)...)
} else {
servicesSkipped[sc.Name] = struct{}{}
}
}
var groups = make(map[string]*services.ServiceGroupConfig)
// First pass: Services
var orphanNames = make(map[string]struct{})
for _, g := range append(c.Groups, c.ImportedGroups...) {
var childServices []*services.ServiceConfig
for _, name := range g.Children {
if s, ok := svcs[name]; ok {
if s.Path != nil {
s.Path = c.combinePath(*s.Path)
}
childServices = append(childServices, s)
} else if _, skipped := servicesSkipped[name]; !skipped {
orphanNames[name] = struct{}{}
}
}
if i := intersect(namesInUse, append(g.Aliases, g.Name)...); len(i) > 0 {
return fmt.Errorf("Duplicate name or alias: %v", strings.Join(i, ", "))
}
groups[g.Name] = &services.ServiceGroupConfig{
Name: g.Name,
Aliases: g.Aliases,
Description: g.Description,
Services: childServices,
Groups: []*services.ServiceGroupConfig{},
Env: g.Env,
ChildOrder: g.Children,
}
addToMap(namesInUse, append(g.Aliases, g.Name)...)
}
// Second pass: Groups
for _, g := range append(c.Groups, c.ImportedGroups...) {
childGroups := []*services.ServiceGroupConfig{}
for _, name := range g.Children {
if gr, ok := groups[name]; ok {
delete(orphanNames, name)
childGroups = append(childGroups, gr)
}
if hasChildCycle(groups[g.Name], childGroups) {
return errors.New("group cycle: " + g.Name)
}
}
groups[g.Name].Groups = childGroups
}
if len(orphanNames) > 0 {
var keys []string
for k := range orphanNames {
keys = append(keys, k)
}
return errors.New("A service or group could not be found for the following names: " + strings.Join(keys, ", "))
}
c.ServiceMap = svcs
c.GroupMap = groups
return nil
}
func hasChildCycle(parent *services.ServiceGroupConfig, children []*services.ServiceGroupConfig) bool {
for _, sg := range children {
if parent == sg {
return true
}
if hasChildCycle(parent, sg.Groups) {
return true
}
}
return false
}
func stringSliceIntersect(slices [][]string) []string {
var counts = make(map[string]int)
for _, s := range slices {
for _, v := range s {
counts[v]++
}
}
var outSlice []string
for v, count := range counts {
if count == len(slices) {
outSlice = append(outSlice, v)
}
}
return outSlice
}
func stringSliceRemoveCommon(common []string, original []string) []string {
var commonMap = make(map[string]interface{})
for _, s := range common {
commonMap[s] = struct{}{}
}
var outSlice []string
for _, s := range original {
if _, ok := commonMap[s]; !ok {
outSlice = append(outSlice, s)
}
}
return outSlice
}
| AddGroups | identifier_name |
config.go | package config
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"path"
"path/filepath"
"sort"
"strings"
version "github.com/hashicorp/go-version"
"github.com/pkg/errors"
"github.com/yext/edward/services"
)
// Config defines the structure for the Edward project configuration file
type Config struct {
workingDir string
TelemetryScript string `json:"telemetryScript,omitempty"`
MinEdwardVersion string `json:"edwardVersion,omitempty"`
Imports []string `json:"imports,omitempty"`
ImportedGroups []GroupDef `json:"-"`
ImportedServices []services.ServiceConfig `json:"-"`
Env []string `json:"env,omitempty"`
Groups []GroupDef `json:"groups,omitempty"`
Services []services.ServiceConfig `json:"services"`
ServiceMap map[string]*services.ServiceConfig `json:"-"`
GroupMap map[string]*services.ServiceGroupConfig `json:"-"`
FilePath string `json:"-"`
}
// GroupDef defines a group based on a list of children specified by name
type GroupDef struct {
Name string `json:"name"`
Aliases []string `json:"aliases,omitempty"`
Description string `json:"description,omitempty"`
Children []string `json:"children"`
Env []string `json:"env,omitempty"`
}
// LoadConfig loads configuration from an io.Reader with the working directory explicitly specified
func LoadConfig(filePath string, edwardVersion string) (Config, error) {
reader, err := os.Open(filePath)
if err != nil {
return Config{}, errors.WithStack(err)
}
workingDir := path.Dir(filePath)
config, err := loadConfigContents(reader, workingDir)
config.FilePath = filePath
if err != nil {
return Config{}, errors.WithStack(err)
}
if config.MinEdwardVersion != "" && edwardVersion != "" {
// Check that this config is supported by this version
minVersion, err1 := version.NewVersion(config.MinEdwardVersion)
if err1 != nil {
return Config{}, errors.WithStack(err)
}
currentVersion, err2 := version.NewVersion(edwardVersion)
if err2 != nil {
return Config{}, errors.WithStack(err)
}
if currentVersion.LessThan(minVersion) {
return Config{}, errors.New("this config requires at least version " + config.MinEdwardVersion)
}
}
err = config.initMaps()
log.Printf("Config loaded with: %d groups and %d services\n", len(config.GroupMap), len(config.ServiceMap))
return config, errors.WithStack(err)
}
// Reader from os.Open
func loadConfigContents(reader io.Reader, workingDir string) (Config, error) {
log.Printf("Loading config with working dir %v.\n", workingDir)
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(reader)
if err != nil {
return Config{}, errors.Wrap(err, "could not read config")
}
data := buf.Bytes()
var config Config
err = json.Unmarshal(data, &config)
if err != nil {
if syntax, ok := err.(*json.SyntaxError); ok && syntax.Offset != 0 {
start := strings.LastIndex(string(data[:syntax.Offset]), "\n") + 1
line, pos := strings.Count(string(data[:start]), "\n")+1, int(syntax.Offset)-start-1
return Config{}, errors.Wrapf(err, "could not parse config file (line %v, char %v)", line, pos)
}
return Config{}, errors.Wrap(err, "could not parse config file")
}
config.workingDir = workingDir
err = config.loadImports()
if err != nil {
return Config{}, errors.WithStack(err)
}
return config, nil
}
// Save saves config to an io.Writer
func (c Config) Save(writer io.Writer) error {
log.Printf("Saving config")
content, err := json.MarshalIndent(c, "", " ")
if err != nil {
return errors.WithStack(err)
}
_, err = writer.Write(content)
return errors.WithStack(err)
}
// NewConfig creates a Config from slices of services and groups
func NewConfig(newServices []services.ServiceConfig, newGroups []services.ServiceGroupConfig) Config {
log.Printf("Creating new config with %d services and %d groups.\n", len(newServices), len(newGroups))
// Find Env settings common to all services
var allEnvSlices [][]string
for _, s := range newServices {
allEnvSlices = append(allEnvSlices, s.Env)
}
env := stringSliceIntersect(allEnvSlices)
// Remove common settings from services
var svcs []services.ServiceConfig
for _, s := range newServices {
s.Env = stringSliceRemoveCommon(env, s.Env)
svcs = append(svcs, s)
}
cfg := Config{
Env: env,
Services: svcs,
Groups: []GroupDef{},
}
cfg.AddGroups(newGroups)
log.Printf("Config created: %v", cfg)
return cfg
}
// EmptyConfig creates a Config with no services or groups
func EmptyConfig(workingDir string) Config {
log.Printf("Creating empty config\n")
cfg := Config{
workingDir: workingDir,
}
cfg.ServiceMap = make(map[string]*services.ServiceConfig)
cfg.GroupMap = make(map[string]*services.ServiceGroupConfig)
return cfg
}
// NormalizeServicePaths will modify the Paths for each of the provided services
// to be relative to the working directory of this config file
func (c *Config) NormalizeServicePaths(searchPath string, newServices []*services.ServiceConfig) ([]*services.ServiceConfig, error) {
log.Printf("Normalizing paths for %d services.\n", len(newServices))
var outServices []*services.ServiceConfig
for _, s := range newServices {
curService := *s
fullPath := filepath.Join(searchPath, *curService.Path)
relPath, err := filepath.Rel(c.workingDir, fullPath)
if err != nil {
return outServices, errors.WithStack(err)
}
curService.Path = &relPath
outServices = append(outServices, &curService)
}
return outServices, nil
}
// AppendServices adds services to an existing config without replacing existing services
func (c *Config) AppendServices(newServices []*services.ServiceConfig) error {
log.Printf("Appending %d services.\n", len(newServices))
if c.ServiceMap == nil {
c.ServiceMap = make(map[string]*services.ServiceConfig)
}
for _, s := range newServices {
if _, found := c.ServiceMap[s.Name]; !found {
c.ServiceMap[s.Name] = s
c.Services = append(c.Services, *s)
}
}
return nil
}
// AppendGroups adds groups to an existing config without replacing existing groups
func (c *Config) AppendGroups(groups []*services.ServiceGroupConfig) error {
var groupsDereferenced []services.ServiceGroupConfig
for _, group := range groups {
groupsDereferenced = append(groupsDereferenced, *group)
}
return errors.WithStack(c.AddGroups(groupsDereferenced))
}
func (c *Config) RemoveGroup(name string) error {
if _, ok := c.GroupMap[name]; !ok {
return errors.New("Group not found")
}
delete(c.GroupMap, name)
existingGroupDefs := c.Groups
c.Groups = make([]GroupDef, 0, len(existingGroupDefs))
for _, group := range existingGroupDefs {
if group.Name != name {
c.Groups = append(c.Groups, group)
}
}
return nil
}
// AddGroups adds a slice of groups to the Config
func (c *Config) AddGroups(groups []services.ServiceGroupConfig) error {
log.Printf("Adding %d groups.\n", len(groups))
for _, group := range groups {
grp := GroupDef{
Name: group.Name,
Aliases: group.Aliases,
Description: group.Description,
Children: []string{},
Env: group.Env,
}
for _, cg := range group.Groups {
if cg != nil {
grp.Children = append(grp.Children, cg.Name)
}
}
for _, cs := range group.Services {
if cs != nil {
grp.Children = append(grp.Children, cs.Name)
}
}
c.Groups = append(c.Groups, grp)
}
return nil
}
func (c *Config) loadImports() error {
log.Printf("Loading imports\n")
for _, i := range c.Imports {
var cPath string
if filepath.IsAbs(i) {
cPath = i
} else {
cPath = filepath.Join(c.workingDir, i)
}
log.Printf("Loading: %v\n", cPath)
r, err := os.Open(cPath)
if err != nil {
return errors.WithStack(err)
}
cfg, err := loadConfigContents(r, filepath.Dir(cPath))
if err != nil {
return errors.WithMessage(err, i)
}
err = c.importConfig(cfg)
if err != nil {
return errors.WithStack(err)
}
}
return nil
}
func (c *Config) importConfig(second Config) error {
for _, service := range append(second.Services, second.ImportedServices...) {
c.ImportedServices = append(c.ImportedServices, service)
}
for _, group := range append(second.Groups, second.ImportedGroups...) {
c.ImportedGroups = append(c.ImportedGroups, group)
}
return nil
}
func (c *Config) combinePath(path string) *string {
if filepath.IsAbs(path) || strings.HasPrefix(path, "$") {
return &path
}
fullPath := filepath.Join(c.workingDir, path)
return &fullPath
}
func addToMap(m map[string]struct{}, values ...string) {
for _, v := range values {
m[v] = struct{}{}
}
}
func intersect(m map[string]struct{}, values ...string) []string {
var out []string
for _, v := range values {
if _, ok := m[v]; ok {
out = append(out, v)
}
}
sort.Strings(out)
return out
}
func (c *Config) initMaps() error {
var err error
var svcs = make(map[string]*services.ServiceConfig)
var servicesSkipped = make(map[string]struct{})
var namesInUse = make(map[string]struct{})
for _, s := range append(c.Services, c.ImportedServices...) {
sc := s
sc.Env = append(sc.Env, c.Env...)
sc.ConfigFile, err = filepath.Abs(c.FilePath)
if err != nil {
return errors.WithStack(err)
}
if sc.MatchesPlatform() {
if i := intersect(namesInUse, append(sc.Aliases, sc.Name)...); len(i) > 0 {
return fmt.Errorf("Duplicate name or alias: %v", strings.Join(i, ", "))
}
svcs[sc.Name] = &sc
addToMap(namesInUse, append(sc.Aliases, sc.Name)...)
} else {
servicesSkipped[sc.Name] = struct{}{}
}
}
var groups = make(map[string]*services.ServiceGroupConfig)
// First pass: Services
var orphanNames = make(map[string]struct{})
for _, g := range append(c.Groups, c.ImportedGroups...) {
var childServices []*services.ServiceConfig
for _, name := range g.Children {
if s, ok := svcs[name]; ok {
if s.Path != nil {
s.Path = c.combinePath(*s.Path)
}
childServices = append(childServices, s)
} else if _, skipped := servicesSkipped[name]; !skipped {
orphanNames[name] = struct{}{}
}
}
if i := intersect(namesInUse, append(g.Aliases, g.Name)...); len(i) > 0 {
return fmt.Errorf("Duplicate name or alias: %v", strings.Join(i, ", "))
}
groups[g.Name] = &services.ServiceGroupConfig{
Name: g.Name,
Aliases: g.Aliases,
Description: g.Description,
Services: childServices,
Groups: []*services.ServiceGroupConfig{},
Env: g.Env,
ChildOrder: g.Children,
}
addToMap(namesInUse, append(g.Aliases, g.Name)...)
}
// Second pass: Groups
for _, g := range append(c.Groups, c.ImportedGroups...) {
childGroups := []*services.ServiceGroupConfig{}
for _, name := range g.Children {
if gr, ok := groups[name]; ok {
delete(orphanNames, name)
childGroups = append(childGroups, gr)
}
if hasChildCycle(groups[g.Name], childGroups) {
return errors.New("group cycle: " + g.Name)
}
}
groups[g.Name].Groups = childGroups
}
if len(orphanNames) > 0 |
c.ServiceMap = svcs
c.GroupMap = groups
return nil
}
func hasChildCycle(parent *services.ServiceGroupConfig, children []*services.ServiceGroupConfig) bool {
for _, sg := range children {
if parent == sg {
return true
}
if hasChildCycle(parent, sg.Groups) {
return true
}
}
return false
}
func stringSliceIntersect(slices [][]string) []string {
var counts = make(map[string]int)
for _, s := range slices {
for _, v := range s {
counts[v]++
}
}
var outSlice []string
for v, count := range counts {
if count == len(slices) {
outSlice = append(outSlice, v)
}
}
return outSlice
}
func stringSliceRemoveCommon(common []string, original []string) []string {
var commonMap = make(map[string]interface{})
for _, s := range common {
commonMap[s] = struct{}{}
}
var outSlice []string
for _, s := range original {
if _, ok := commonMap[s]; !ok {
outSlice = append(outSlice, s)
}
}
return outSlice
}
| {
var keys []string
for k := range orphanNames {
keys = append(keys, k)
}
return errors.New("A service or group could not be found for the following names: " + strings.Join(keys, ", "))
} | conditional_block |
scriptAppoint.js | // Setup the calendar with the current date
$(document).ready(function () {
var date = new Date(); // Date() imprime la fecha actual hora y día.
var today = date.getDate(); /* devuelve el dia del mes, hoy 4 */
var mes = date.getMonth(); /* Obtiene el numero indicado del mes */
var anio= date.getFullYear();
var new_year = anio;
var fechaCita = date; /* va a guardar la fecha que se escogio para la cita */
var ban2 = 0;
var ban = 0; // bandera para verificar si se ha seleccionado una fecha
// Establecer controladores de clic para elementos DOM
$(".right-button").click({ date: date, anio}, next_year); /* obtiene el siguiente año */
$(".left-button").click({ date: date, anio}, prev_year); /* obtiene el el calendario de años anteriores */
$(".month").click({ date: date, mes, anio}, month_click);/* al darle clic en algunos de los meses */
$("#add-button").click({ date: date }, new_event); /* con el clic en el boton hace el llamado a una nuevo evento */
// Establecer el mes actual como activo
$(".months-row").children().eq(date.getMonth()).addClass("active-month");/* activa el mes que estamos, selecciona getmonth */
init_calendar(date);/* inicia el calendario con la fecha ultima del mes anterior */
/* check_events: se encarga de verificar los eventos es un evento que obtiene la fecha del dia, obtiene
un numero del mes+1, date.getFullYear obtiene el año actual*/
/* date.getFullYear: obtiene el año: 2019, date.getMonth() + 1: obtiene 8 que es sep */
var events = check_events(today, date.getMonth() + 1, date.getFullYear()); //
/* refleja los eventos llevados acabo */
show_events(events, months[date.getMonth()], today );
/* manda a llamar a una funcion */
});
// Inicializa el calendario agregando las fechas HTML
function init_calendar(date) {
ban2 = 0;
ban = 0;
$(".events-container").hide(250);
$(".tbody").empty();//asignando a la clase tbody, una sentencia vacia, para guardar nuevos datos
$(".events-container").empty(); /* asignando a la clase events-container una sentencia vacia */
var calendar_days = $(".tbody"); // lo de la clase se agrega a la var.
var month = date.getMonth(); // obtiene un numero del mes del 0-11
var year = date.getFullYear();//obtiene un numero que representa el año
var day_count = days_in_month(month, year); // llama a la función, y le manda dos var., la var day_c guarda los dias
var row = $("<tr class='table-row'></tr>"); //agrega una tabla, con una fila
var today = date.getDate(); // obtiene la fecha 11
//Establezca la fecha en 1 para encontrar el primer día del mes.
date.setDate(1); // le asigna al dato como fecha 1
var first_day = date.getDay(); // se obtiene el dia primero, lunes es 0 y domingo 6
/*35 + firstDay es el número de elementos de fecha que se agregarán a la tabla de fechas,
35 +1 es la cantidad de números a agregar a la tabla*/
// 35 es de(7 días en una semana) * (hasta 5 filas de fechas en un mes)
for (var i = 0; i < 35 + first_day; i++) {
/*
Dado que algunos de los elementos estarán en blanco
es necesario calcular la fecha real a partir del índice */
var day = i - first_day+1;
// Si es domingo, haz una nueva fila
if (i % 7 === 0) {
calendar_days.append(row); // Agrega esta fila al final de la tabla a calendar_days
row = $("<tr class='table-row'></tr>");
}
// si el índice actual no es un día de este mes, déjelo en blanco
if (i < first_day || day > day_count) {
var curr_date = $("<td class='table-date nil'>" + "</td>");
row.append(curr_date); // le agrega una fila a la tabla
}
else {
var curr_date = $("<td class='table-date'>" + day + "</td>");
var events = check_events(day, month + 1, year);
if (today === day && $(".active-date").length === 0) {
curr_date.addClass("active-date");
show_events(events, months[month], day);
}
// Si esta fecha tiene algún evento, dale estilo con .event-date
if (events.length !== 0) {
curr_date.addClass("event-date");
}
// Configure el controlador onClick para hacer clic en una fecha
curr_date.click({ events: events, month: months[month], day: day }, date_click);
row.append(curr_date);
}
}
// Agregue la última fila y configure el año actual
calendar_days.append(row);
$(".year").text(year);
}
// Obtenga la cantidad de días en un mes / año determinado
function days_in_month(month, year) {
var monthStart = new Date(year, month, 1);// obtiene el inicio del mes
var monthEnd = new Date(year, month + 1, 1); // obtiene el inicio del mes consecutivo
return (monthEnd - monthStart) / (1000 * 60 * 60 * 24); /* regresa la cantidad de dias que hay */
}
//Controlador de eventos para cuando se hace clic en una fecha
function date_click(event) {
//document.getElementById('horaCita').innerHTML = "";//limpiar hora hecho por evelyn
ban=1;
$("#dialog").hide(250); //muestra el apartado de horario
$(".events-container").hide(250); // ocultar contendio
var datee = new Date(); // Date() imprime la fecha actual hora y día.
var today = datee.getDate(); // se obtiene el día
var anio= datee.getFullYear();
var mes = datee.getMonth(); // obtiene mes 8
var m=0;
switch (event.data.month){
case 'January':
m = 0;
break;
case 'February':
m = 1;
break;
case 'March':
m = 2;
break;
case 'April':
m = 3;
break;
case 'May':
m = 4;
break;
case 'June':
m = 5;
break;
case 'July':
m = 6;
break;
case 'August':
m = 7;
break;
case 'September':
m=8;
break;
case 'October':
m = 9;
break;
case 'November':
m = 10;
break;
case 'December':
m = 11;
break;
}
d = event.data.day;
$(".events-container").empty(); // ocultar contendio
var fech = anio + "-" + (m+1) + "-" + d;
if (m >= mes && m <= (mes + 2)) {
if (event.data.day < today && mes == m) {
window.alert("Fecha invalida");
$(".horarios-container").hide(250);
} else {
$(".active-date").removeClass("active-date");
$(this).addClass("active-date");
$(".horarios-container").show(250); //muestra el apartado de horario
$(".events-container").show(250); // ocultar contendio
}
}
};
// Controlador de eventos para cuando se hace clic en un mes
function month_click(event) {
$(".events-container").hide(250);
$("#dialog").hide(250);
var date = event.data.date; // obtiene la fecha del ultimo dia del mes anterior
var mesAc = event.data.mes;// obtiene el mes actual, regresa un num
var new_month = $(".month").index(this); // regresa el nuemero del mes seleccionado
var ahioSel = date.getFullYear();
var ahioAc= event.data.anio;
if (mesAc==11 && ahioSel!==ahioAc){
if(new_month==0 || new_month==1){
$(".active-month").removeClass("active-month"); //remueve las clases
$(this).addClass("active-month"); // agrega una clase
date.setMonth(new_month); // se le asigna al dato al mes nuevo
init_calendar(date); //inicia el calendario con la fecha propor
}else{
window.alert("Mes no disponible");
}
} else if (new_month >= mesAc && new_month<= (mesAc + 2) && ahioSel==ahioAc) {
$(".active-month").removeClass("active-month"); //remueve las clases
$(this).addClass("active-month"); // agrega una clase
date.setMonth(new_month); // se le asigna al dato al mes nuevo
init_calendar(date); //inicia el calendario con la fecha propor
}
else {
window.alert("Mes no disponible");
}
}
//Controlador de eventos para cuando se hace clic en el botón derecho del año
function next_year(event, anio) {
$("#dialog").hide(250);
$(".events-container").hide(250);
var date = event.data.date; //obtiene la fecha del ultimo día del mes anterior
new_year = date.getFullYear() + 1; // devuelve el año actaul, y se le agrega 1 para el proximo año
if ((event.data.anio + 1) == new_year) {
$(".year").html(new_year); // se le agrega e texto a la clase
date.setFullYear(new_year); // le asigna al date, el año proximo con un mes antes
init_calendar(date);// el calendario se inicia con el ultimo dia del mes anterio
}
else{
window.alert("Año no disponible");
}
}
// Controlador de eventos para cuando se hace clic en el botón izquierdo del año
function prev_year(event, anio) {
$("#dialog").hide(250);
$(".events-container").hide(250);
var date = event.data.date; // obtiene la fecha del ultimo dia del mes anterior
new_year = date.getFullYear() - 1;// devuelve el año actaul, y se le le resta -1 para el año anterior
if (event.data.anio == new_year)
{
$("year").html(new_year);// se le agrega e texto a la clase
date.setFullYear(new_year);// le asigna al date, el año anterior, con fecha del ultimo dia del mes anterior a la que estamos
init_calendar(date);//inicia el calendario
}
}
// Controlador de eventos para hacer clic en el botón del nuevo evento
function new_event(event) {
if (ban==0){
window.alert("Antes de, escoge una fecha para tu cita.");
return false;
}
/*if (ban2 == 0) {
window.alert("Antes de, escoge una hora para tu cita.");
return false;
}*/
if(ban==1){
var date = event.data.date;
var a = date.getFullYear();
var m = date.getMonth();
$(".horarios-container").hide(250);
$(".events-container").hide(250);
$(".dialog").show(250); // muestra el contenedor para requisitar la cita
switch (m) {
case 0:
m = "01";
break;
case 1:
m = "02";
break;
case 2:
m = "03";
break;
case 3:
m = "04";
break;
case 4:
m = "05";
break;
case 5:
m = "06";
break;
case 6:
m = "07";
break;
case 7:
m = "08";
break;
case 8:
m = "09";
break;
case 9:
m = "10";
break;
case 10:
m = "11";
break;
case 11:
m = "12";
break;
}
fechaCita = a + "/" + m + "/" + d; //formato año/mes/dia hecho por evelyn
document.getElementById('fechaId').value = fechaCita;
//document.getElementById('lider').value = fechaCita;
return false;
}else {
window.alert("Por favor, verifica sí ya escogistes fecha de cita y horario");
return false;
}
return true;
}
// Mostrar todos los eventos de la fecha seleccionada en vistas de tarjeta
function show_events(events, month, day) {
// limpiar de datos los dos container
$(".events-container").empty(); /* */
// $(".events-container").show(250);
console.log(event_data["events"]);
// var date = event.data.date;
// Si no hay eventos para esta fecha, notifique al usuario
if (events.length === 0) {
var event_card = $("<div class='event-card'></div>");
var event_name = $("<div class='event-name'>There are no events planned for " + month + " " + day + ".</div>");
$(event_card).css({ "border-left": "10px solid #FF1744" });
$(event_card).append(event_name);
$(".events-container").append(event_card);
}
else {
// Go through and add each event as a card to the events container
for (var i = 0; i < events.length; i++) {
var event_card = $("<div class='event-card'></div>");
var event_name = $("<div class='event-name'>" + events[i]["occasion"] + ":</div>");
var event_count = $("<div class='event-count'>" + events[i]["invited_count"] + " Invited</div>");
if (events[i]["cancelled"] === true) {
$(event_card).css({
"border-left": "10px solid #FF1744"
});
event_count = $("<div class='event-cancelled'>Cancelled</div>");
}
$(event_card).append(event_name).append(event_count);
$(".events-container").append(event_card);
}
}
}
// Comprueba si una fecha específica tiene algún evento
function check_events(day, month, year) {
var events = [];
for (var i = 0; i < event_data["events"].length; i++) {
var event = event_data["events"][i];
if (event["day"] === day &&
event["month"] === month &&
event["year"] === year) {
events.push(event);
}
}
return events;
}
// // Datos dados para eventos en formato JSON
var event_data = {
"events": [
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Test Event",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 11
}
]
};
const months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
];
// funcion para obtner la hora
function obtenerHor(id){
switch(id){
case "9":
document.getElementById('horaId').value = '09-10'; /* asignando un valor al id */
break;
case "10":
document.getElementById('horaId').value = '10-11';
ban2 = 1;
break;
case "11":
document.getElementById('horaId').value = '11-12';
ban2 = 1;
break;
case "12":
document.getElementById('horaId').value = "12-13";
ban2 = 1;
break
case "13":
document.getElementById('horaId').value = "13-14";
ban2 = 1;
break;
case "16":
document.getElementById('horaId').value = "16-17";
ban2 = 1;
break;
case "17":
document.getElementById('horaId').value = "17-18";
ban2 = 1;
break;
case "18":
document.getElementById('horaId').value = "18-19";
ban2 = 1;
break;
}
}
/* validacion de los campos del formmulario */
function validacionForm() {
var reason = "";
var nom = document.getElementById("name");
var mot = document.getElementById("cita");
var tel = document.getElementById("numer");
reason += validateName(nom);
reason += validateCita(mot);
reason += validatePhone(tel);
if (reason != "") {
window.alert("Algunos de los campos necesita correción\n" + reason);
return false;
}
return true;
}
/* validacion de la caja de nombre */
function validateName(nombre) {
var error = "";
// var illegalChars = /\W/; // permite letras, números y guiones bajos
if (nombre.value == "" || nombre.value == null || (/^\s+$/.test(nombre.value))) {
nombre.style.background = 'red';
error="La caja para nombre no contiene nada...\n";
nombre.focus();
}else if ((nombre.length < 3) || (nombre.length > 30)) {
nombre.style.background = 'red';
error = "El nombre tiene una longitud incorrecta...\n";
nombre.focus();
}else {
nombre.style.background = 'White';
}
// } else if (illegalChars.test(nombre.value)) {
// nombre.style.background = 'red';
// error = "El nombre ingresado contiene caracteres ilegales.\n";
// nombre.focus();
// }
return error;
}
function validateCita(cita){
var error = "";
if (cita.value == 0 || cita.length < 5 || cita.length > 30 ) {
cita.style.background = 'reed';
err |
}
return error;
}
function validatePhone(tel) {
var error = "";
var stripped = tel.value.replace(/[\(\)\.\-\ ]/g, '');
if (tel.value == "" || tel.value==null) {
error = "No ingresó un número de teléfono..\n";
tel.style.background = 'red';
} else if (isNaN(parseInt(stripped))) {
error = "El número de teléfono contiene caracteres ilegales..\n";
tel.style.background = 'red';
} else if (!(stripped.length == 10)) {
error = "El número de teléfono tiene la longitud incorrecta. Asegúrese de incluir un código de área.\n";
tel.style.background = 'red';
}
return error;
}
// Obtener referencia a botones
// Recuerda: el punto . indica clases
const botones = document.querySelectorAll(".botonC");
// Definir función y evitar definirla de manera anónima
const cuandoSeHaceClick = function (evento) {
// Recuerda, this es el elemento
// window.alert("El texto que tiene es: ", this.innerText);
ban2=1;
}
// botones es un arreglo así que lo recorremos
botones.forEach(boton => {
//Agregar listener
boton.addEventListener("click", cuandoSeHaceClick);
});
function botonCancelar(){
$("#dialog").hide(250);
$(".horarios-container").show(250);
//$(".horariosss").show(250);
//$(".horariosss").show(250);
ban=0;
ban2=0;
document.getElementById('fechaId').innerHTML = "";
document.getElementById('horaId').innerHTML = "";
document.getElementById('name').innerHTML = "";
document.getElementById('cita').innerHTML = "";
document.getElementById('numer').innerHTML = "";
}
| or = "Verifique el campo cita, antes de seguir\n"
} else {
cita.style.background = 'White'; | conditional_block |
scriptAppoint.js | // Setup the calendar with the current date
$(document).ready(function () {
var date = new Date(); // Date() imprime la fecha actual hora y día.
var today = date.getDate(); /* devuelve el dia del mes, hoy 4 */
var mes = date.getMonth(); /* Obtiene el numero indicado del mes */
var anio= date.getFullYear();
var new_year = anio;
var fechaCita = date; /* va a guardar la fecha que se escogio para la cita */
var ban2 = 0;
var ban = 0; // bandera para verificar si se ha seleccionado una fecha
// Establecer controladores de clic para elementos DOM
$(".right-button").click({ date: date, anio}, next_year); /* obtiene el siguiente año */
$(".left-button").click({ date: date, anio}, prev_year); /* obtiene el el calendario de años anteriores */
$(".month").click({ date: date, mes, anio}, month_click);/* al darle clic en algunos de los meses */
$("#add-button").click({ date: date }, new_event); /* con el clic en el boton hace el llamado a una nuevo evento */
// Establecer el mes actual como activo
$(".months-row").children().eq(date.getMonth()).addClass("active-month");/* activa el mes que estamos, selecciona getmonth */
init_calendar(date);/* inicia el calendario con la fecha ultima del mes anterior */
/* check_events: se encarga de verificar los eventos es un evento que obtiene la fecha del dia, obtiene
un numero del mes+1, date.getFullYear obtiene el año actual*/
/* date.getFullYear: obtiene el año: 2019, date.getMonth() + 1: obtiene 8 que es sep */
var events = check_events(today, date.getMonth() + 1, date.getFullYear()); //
/* refleja los eventos llevados acabo */
show_events(events, months[date.getMonth()], today );
/* manda a llamar a una funcion */
});
// Inicializa el calendario agregando las fechas HTML
function init_calendar(date) {
ban2 = 0;
ban = 0;
$(".events-container").hide(250);
$(".tbody").empty();//asignando a la clase tbody, una sentencia vacia, para guardar nuevos datos
$(".events-container").empty(); /* asignando a la clase events-container una sentencia vacia */
var calendar_days = $(".tbody"); // lo de la clase se agrega a la var.
var month = date.getMonth(); // obtiene un numero del mes del 0-11
var year = date.getFullYear();//obtiene un numero que representa el año
var day_count = days_in_month(month, year); // llama a la función, y le manda dos var., la var day_c guarda los dias
var row = $("<tr class='table-row'></tr>"); //agrega una tabla, con una fila
var today = date.getDate(); // obtiene la fecha 11
//Establezca la fecha en 1 para encontrar el primer día del mes.
date.setDate(1); // le asigna al dato como fecha 1
var first_day = date.getDay(); // se obtiene el dia primero, lunes es 0 y domingo 6
/*35 + firstDay es el número de elementos de fecha que se agregarán a la tabla de fechas,
35 +1 es la cantidad de números a agregar a la tabla*/
// 35 es de(7 días en una semana) * (hasta 5 filas de fechas en un mes)
for (var i = 0; i < 35 + first_day; i++) {
/*
Dado que algunos de los elementos estarán en blanco
es necesario calcular la fecha real a partir del índice */
var day = i - first_day+1;
// Si es domingo, haz una nueva fila
if (i % 7 === 0) {
calendar_days.append(row); // Agrega esta fila al final de la tabla a calendar_days
row = $("<tr class='table-row'></tr>");
}
// si el índice actual no es un día de este mes, déjelo en blanco
if (i < first_day || day > day_count) {
var curr_date = $("<td class='table-date nil'>" + "</td>");
row.append(curr_date); // le agrega una fila a la tabla
}
else {
var curr_date = $("<td class='table-date'>" + day + "</td>");
var events = check_events(day, month + 1, year);
if (today === day && $(".active-date").length === 0) {
curr_date.addClass("active-date");
show_events(events, months[month], day);
}
// Si esta fecha tiene algún evento, dale estilo con .event-date
if (events.length !== 0) {
curr_date.addClass("event-date");
}
// Configure el controlador onClick para hacer clic en una fecha
curr_date.click({ events: events, month: months[month], day: day }, date_click);
row.append(curr_date);
}
}
// Agregue la última fila y configure el año actual
calendar_days.append(row);
$(".year").text(year);
}
// Obtenga la cantidad de días en un mes / año determinado
function days_in_month(month, year) {
var monthStart = new Date(year, month, 1);// obtiene el inicio del mes
var monthEnd = new Date(year, month + 1, 1); // obtiene el inicio del mes consecutivo
return (monthEnd - monthStart) / (1000 * 60 * 60 * 24); /* regresa la cantidad de dias que hay */
}
//Controlador de eventos para cuando se hace clic en una fecha
function date_click(event) {
//document.getElementById('horaCita').innerHTML = "";//limpiar hora hecho por evelyn
ban=1;
$("#dialog").hide(250); //muestra el apartado de horario
$(".events-container").hide(250); // ocultar contendio
var datee = new Date(); // Date() imprime la fecha actual hora y día.
var today = datee.getDate(); // se obtiene el día
var anio= datee.getFullYear();
var mes = datee.getMonth(); // obtiene mes 8
var m=0;
switch (event.data.month){
case 'January':
m = 0;
break;
case 'February':
m = 1;
break;
case 'March':
m = 2;
break;
case 'April':
m = 3;
break;
case 'May':
m = 4;
break;
case 'June':
m = 5;
break;
case 'July':
m = 6;
break;
case 'August':
m = 7;
break;
case 'September':
m=8;
break;
case 'October':
m = 9;
break;
case 'November':
m = 10;
break;
case 'December':
m = 11;
break;
}
d = event.data.day;
$(".events-container").empty(); // ocultar contendio
var fech = anio + "-" + (m+1) + "-" + d;
if (m >= mes && m <= (mes + 2)) {
if (event.data.day < today && mes == m) {
window.alert("Fecha invalida");
$(".horarios-container").hide(250);
} else {
$(".active-date").removeClass("active-date");
$(this).addClass("active-date");
$(".horarios-container").show(250); //muestra el apartado de horario
$(".events-container").show(250); // ocultar contendio
}
}
};
// Controlador de eventos para cuando se hace clic en un mes
function month_click(event) {
$(".events-container").hide(250);
$("#dialog").hide(250);
var date = event.data.date; // obtiene la fecha del ultimo dia del mes anterior
var mesAc = event.data.mes;// obtiene el mes actual, regresa un num
var new_month = $(".month").index(this); // regresa el nuemero del mes seleccionado
var ahioSel = date.getFullYear();
var ahioAc= event.data.anio;
if (mesAc==11 && ahioSel!==ahioAc){
if(new_month==0 || new_month==1){
$(".active-month").removeClass("active-month"); //remueve las clases
$(this).addClass("active-month"); // agrega una clase
date.setMonth(new_month); // se le asigna al dato al mes nuevo
init_calendar(date); //inicia el calendario con la fecha propor
}else{
window.alert("Mes no disponible");
}
} else if (new_month >= mesAc && new_month<= (mesAc + 2) && ahioSel==ahioAc) {
$(".active-month").removeClass("active-month"); //remueve las clases
$(this).addClass("active-month"); // agrega una clase
date.setMonth(new_month); // se le asigna al dato al mes nuevo
init_calendar(date); //inicia el calendario con la fecha propor
}
else {
window.alert("Mes no disponible");
}
}
//Controlador de eventos para cuando se hace clic en el botón derecho del año
function next_year(event, anio) {
$("#dialog").hide(250);
$(".events-container").hide(250);
var date = event.data.date; //obtiene la fecha del ultimo día del mes anterior
new_year = date.getFullYear() + 1; // devuelve el año actaul, y se le agrega 1 para el proximo año
if ((event.data.anio + 1) == new_year) {
$(".year").html(new_year); // se le agrega e texto a la clase
date.setFullYear(new_year); // le asigna al date, el año proximo con un mes antes
init_calendar(date);// el calendario se inicia con el ultimo dia del mes anterio
}
else{
window.alert("Año no disponible");
}
}
// Controlador de eventos para cuando se hace clic en el botón izquierdo del año
function prev_year(event, anio) {
$("#dialog").hide(250);
$(".events-container").hide(250);
var date = event.data.date; // obtiene la fecha del ultimo dia del mes anterior
new_year = date.getFullYear() - 1;// devuelve el año actaul, y se le le resta -1 para el año anterior
if (event.data.anio == new_year)
{
$("year").html(new_year);// se le agrega e texto a la clase
date.setFullYear(new_year);// le asigna al date, el año anterior, con fecha del ultimo dia del mes anterior a la que estamos
init_calendar(date);//inicia el calendario
}
}
// Controlador de eventos para hacer clic en el botón del nuevo evento
function new_event(event) {
if (ban==0){
window.alert("Antes de, escoge una fecha para tu cita.");
return false;
}
/*if (ban2 == 0) {
window.alert("Antes de, escoge una hora para tu cita.");
return false;
}*/
if(ban==1){
var date = event.data.date;
var a = date.getFullYear();
var m = date.getMonth();
$(".horarios-container").hide(250);
$(".events-container").hide(250);
$(".dialog").show(250); // muestra el contenedor para requisitar la cita
switch (m) {
case 0:
m = "01";
break;
case 1:
m = "02";
break;
case 2:
m = "03";
break;
case 3:
m = "04";
break;
case 4:
m = "05";
break;
case 5:
m = "06";
break;
case 6:
m = "07";
break;
case 7:
m = "08";
break;
case 8:
m = "09";
break;
case 9:
m = "10";
break;
case 10:
m = "11";
break;
case 11:
m = "12";
break;
}
fechaCita = a + "/" + m + "/" + d; //formato año/mes/dia hecho por evelyn
document.getElementById('fechaId').value = fechaCita;
//document.getElementById('lider').value = fechaCita;
return false;
}else {
window.alert("Por favor, verifica sí ya escogistes fecha de cita y horario");
return false;
}
return true;
}
// Mostrar todos los eventos de la fecha seleccionada en vistas de tarjeta
function show_events(events, month, day) {
// limpiar de datos los dos container
$(".events-container").empty(); /* */
// $(".events-container").show(250);
console.log(event_data["events"]);
// var date = event.data.date;
// Si no hay eventos para esta fecha, notifique al usuario
if (events.length === 0) {
var event_card = $("<div class='event-card'></div>");
var event_name = $("<div class='event-name'>There are no events planned for " + month + " " + day + ".</div>");
$(event_card).css({ "border-left": "10px solid #FF1744" });
$(event_card).append(event_name);
$(".events-container").append(event_card);
}
else {
// Go through and add each event as a card to the events container
for (var i = 0; i < events.length; i++) {
var event_card = $("<div class='event-card'></div>");
var event_name = $("<div class='event-name'>" + events[i]["occasion"] + ":</div>");
var event_count = $("<div class='event-count'>" + events[i]["invited_count"] + " Invited</div>");
if (events[i]["cancelled"] === true) {
$(event_card).css({
"border-left": "10px solid #FF1744"
});
event_count = $("<div class='event-cancelled'>Cancelled</div>");
}
$(event_card).append(event_name).append(event_count);
$(".events-container").append(event_card);
}
}
}
// Comprueba si una fecha específica tiene algún evento
function check_events(day, month, year) {
var events = [];
for (var i = 0; i < event_data["events"].length; i++) {
var event = event_data["events"][i];
if (event["day"] === day &&
event["month"] === month &&
event["year"] === year) {
events.push(event);
}
}
return events;
}
// // Datos dados para eventos en formato JSON
var event_data = {
"events": [
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
| "day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Test Event",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 11
}
]
};
const months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
];
// funcion para obtner la hora
function obtenerHor(id){
switch(id){
case "9":
document.getElementById('horaId').value = '09-10'; /* asignando un valor al id */
break;
case "10":
document.getElementById('horaId').value = '10-11';
ban2 = 1;
break;
case "11":
document.getElementById('horaId').value = '11-12';
ban2 = 1;
break;
case "12":
document.getElementById('horaId').value = "12-13";
ban2 = 1;
break
case "13":
document.getElementById('horaId').value = "13-14";
ban2 = 1;
break;
case "16":
document.getElementById('horaId').value = "16-17";
ban2 = 1;
break;
case "17":
document.getElementById('horaId').value = "17-18";
ban2 = 1;
break;
case "18":
document.getElementById('horaId').value = "18-19";
ban2 = 1;
break;
}
}
/* validacion de los campos del formmulario */
function validacionForm() {
var reason = "";
var nom = document.getElementById("name");
var mot = document.getElementById("cita");
var tel = document.getElementById("numer");
reason += validateName(nom);
reason += validateCita(mot);
reason += validatePhone(tel);
if (reason != "") {
window.alert("Algunos de los campos necesita correción\n" + reason);
return false;
}
return true;
}
/* validacion de la caja de nombre */
function validateName(nombre) {
var error = "";
// var illegalChars = /\W/; // permite letras, números y guiones bajos
if (nombre.value == "" || nombre.value == null || (/^\s+$/.test(nombre.value))) {
nombre.style.background = 'red';
error="La caja para nombre no contiene nada...\n";
nombre.focus();
}else if ((nombre.length < 3) || (nombre.length > 30)) {
nombre.style.background = 'red';
error = "El nombre tiene una longitud incorrecta...\n";
nombre.focus();
}else {
nombre.style.background = 'White';
}
// } else if (illegalChars.test(nombre.value)) {
// nombre.style.background = 'red';
// error = "El nombre ingresado contiene caracteres ilegales.\n";
// nombre.focus();
// }
return error;
}
function validateCita(cita){
var error = "";
if (cita.value == 0 || cita.length < 5 || cita.length > 30 ) {
cita.style.background = 'reed';
error = "Verifique el campo cita, antes de seguir\n"
} else {
cita.style.background = 'White';
}
return error;
}
function validatePhone(tel) {
var error = "";
var stripped = tel.value.replace(/[\(\)\.\-\ ]/g, '');
if (tel.value == "" || tel.value==null) {
error = "No ingresó un número de teléfono..\n";
tel.style.background = 'red';
} else if (isNaN(parseInt(stripped))) {
error = "El número de teléfono contiene caracteres ilegales..\n";
tel.style.background = 'red';
} else if (!(stripped.length == 10)) {
error = "El número de teléfono tiene la longitud incorrecta. Asegúrese de incluir un código de área.\n";
tel.style.background = 'red';
}
return error;
}
// Obtener referencia a botones
// Recuerda: el punto . indica clases
const botones = document.querySelectorAll(".botonC");
// Definir función y evitar definirla de manera anónima
const cuandoSeHaceClick = function (evento) {
// Recuerda, this es el elemento
// window.alert("El texto que tiene es: ", this.innerText);
ban2=1;
}
// botones es un arreglo así que lo recorremos
botones.forEach(boton => {
//Agregar listener
boton.addEventListener("click", cuandoSeHaceClick);
});
function botonCancelar(){
$("#dialog").hide(250);
$(".horarios-container").show(250);
//$(".horariosss").show(250);
//$(".horariosss").show(250);
ban=0;
ban2=0;
document.getElementById('fechaId').innerHTML = "";
document.getElementById('horaId').innerHTML = "";
document.getElementById('name').innerHTML = "";
document.getElementById('cita').innerHTML = "";
document.getElementById('numer').innerHTML = "";
} | "occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
| random_line_split |
scriptAppoint.js | // Setup the calendar with the current date
$(document).ready(function () {
var date = new Date(); // Date() imprime la fecha actual hora y día.
var today = date.getDate(); /* devuelve el dia del mes, hoy 4 */
var mes = date.getMonth(); /* Obtiene el numero indicado del mes */
var anio= date.getFullYear();
var new_year = anio;
var fechaCita = date; /* va a guardar la fecha que se escogio para la cita */
var ban2 = 0;
var ban = 0; // bandera para verificar si se ha seleccionado una fecha
// Establecer controladores de clic para elementos DOM
$(".right-button").click({ date: date, anio}, next_year); /* obtiene el siguiente año */
$(".left-button").click({ date: date, anio}, prev_year); /* obtiene el el calendario de años anteriores */
$(".month").click({ date: date, mes, anio}, month_click);/* al darle clic en algunos de los meses */
$("#add-button").click({ date: date }, new_event); /* con el clic en el boton hace el llamado a una nuevo evento */
// Establecer el mes actual como activo
$(".months-row").children().eq(date.getMonth()).addClass("active-month");/* activa el mes que estamos, selecciona getmonth */
init_calendar(date);/* inicia el calendario con la fecha ultima del mes anterior */
/* check_events: se encarga de verificar los eventos es un evento que obtiene la fecha del dia, obtiene
un numero del mes+1, date.getFullYear obtiene el año actual*/
/* date.getFullYear: obtiene el año: 2019, date.getMonth() + 1: obtiene 8 que es sep */
var events = check_events(today, date.getMonth() + 1, date.getFullYear()); //
/* refleja los eventos llevados acabo */
show_events(events, months[date.getMonth()], today );
/* manda a llamar a una funcion */
});
// Inicializa el calendario agregando las fechas HTML
function init_calendar(date) {
ban2 = 0;
ban = 0;
$(".events-container").hide(250);
$(".tbody").empty();//asignando a la clase tbody, una sentencia vacia, para guardar nuevos datos
$(".events-container").empty(); /* asignando a la clase events-container una sentencia vacia */
var calendar_days = $(".tbody"); // lo de la clase se agrega a la var.
var month = date.getMonth(); // obtiene un numero del mes del 0-11
var year = date.getFullYear();//obtiene un numero que representa el año
var day_count = days_in_month(month, year); // llama a la función, y le manda dos var., la var day_c guarda los dias
var row = $("<tr class='table-row'></tr>"); //agrega una tabla, con una fila
var today = date.getDate(); // obtiene la fecha 11
//Establezca la fecha en 1 para encontrar el primer día del mes.
date.setDate(1); // le asigna al dato como fecha 1
var first_day = date.getDay(); // se obtiene el dia primero, lunes es 0 y domingo 6
/*35 + firstDay es el número de elementos de fecha que se agregarán a la tabla de fechas,
35 +1 es la cantidad de números a agregar a la tabla*/
// 35 es de(7 días en una semana) * (hasta 5 filas de fechas en un mes)
for (var i = 0; i < 35 + first_day; i++) {
/*
Dado que algunos de los elementos estarán en blanco
es necesario calcular la fecha real a partir del índice */
var day = i - first_day+1;
// Si es domingo, haz una nueva fila
if (i % 7 === 0) {
calendar_days.append(row); // Agrega esta fila al final de la tabla a calendar_days
row = $("<tr class='table-row'></tr>");
}
// si el índice actual no es un día de este mes, déjelo en blanco
if (i < first_day || day > day_count) {
var curr_date = $("<td class='table-date nil'>" + "</td>");
row.append(curr_date); // le agrega una fila a la tabla
}
else {
var curr_date = $("<td class='table-date'>" + day + "</td>");
var events = check_events(day, month + 1, year);
if (today === day && $(".active-date").length === 0) {
curr_date.addClass("active-date");
show_events(events, months[month], day);
}
// Si esta fecha tiene algún evento, dale estilo con .event-date
if (events.length !== 0) {
curr_date.addClass("event-date");
}
// Configure el controlador onClick para hacer clic en una fecha
curr_date.click({ events: events, month: months[month], day: day }, date_click);
row.append(curr_date);
}
}
// Agregue la última fila y configure el año actual
calendar_days.append(row);
$(".year").text(year);
}
// Obtenga la cantidad de días en un mes / año determinado
function days_in_month(month, year) {
var monthStart = new Date(year, month, 1);// obtiene el inicio del mes
var monthEnd = new Date(year, month + 1, 1); // obtiene el inicio del mes consecutivo
return (monthEnd - monthStart) / (1000 * 60 * 60 * 24); /* regresa la cantidad de dias que hay */
}
//Controlador de eventos para cuando se hace clic en una fecha
function date_click(event) {
//document.getElementById('horaCita').innerHTML = "";//limpiar hora hecho por evelyn
ban=1;
$("#dialog").hide(250); //muestra el apartado de horario
$(".events-container").hide(250); // ocultar contendio
var datee = new Date(); // Date() imprime la fecha actual hora y día.
var today = datee.getDate(); // se obtiene el día
var anio= datee.getFullYear();
var mes = datee.getMonth(); // obtiene mes 8
var m=0;
switch (event.data.month){
case 'January':
m = 0;
break;
case 'February':
m = 1;
break;
case 'March':
m = 2;
break;
case 'April':
m = 3;
break;
case 'May':
m = 4;
break;
case 'June':
m = 5;
break;
case 'July':
m = 6;
break;
case 'August':
m = 7;
break;
case 'September':
m=8;
break;
case 'October':
m = 9;
break;
case 'November':
m = 10;
break;
case 'December':
m = 11;
break;
}
d = event.data.day;
$(".events-container").empty(); // ocultar contendio
var fech = anio + "-" + (m+1) + "-" + d;
if (m >= mes && m <= (mes + 2)) {
if (event.data.day < today && mes == m) {
window.alert("Fecha invalida");
$(".horarios-container").hide(250);
} else {
$(".active-date").removeClass("active-date");
$(this).addClass("active-date");
$(".horarios-container").show(250); //muestra el apartado de horario
$(".events-container").show(250); // ocultar contendio
}
}
};
// Controlador de eventos para cuando se hace clic en un mes
function month_click(event) {
$(".events-container").hide(250);
$("#dialog").hide(250);
var date = event.data.date; // obtiene la fecha del ultimo dia del mes anterior
var mesAc = event.data.mes;// obtiene el mes actual, regresa un num
var new_month = $(".month").index(this); // regresa el nuemero del mes seleccionado
var ahioSel = date.getFullYear();
var ahioAc= event.data.anio;
if (mesAc==11 && ahioSel!==ahioAc){
if(new_month==0 || new_month==1){
$(".active-month").removeClass("active-month"); //remueve las clases
$(this).addClass("active-month"); // agrega una clase
date.setMonth(new_month); // se le asigna al dato al mes nuevo
init_calendar(date); //inicia el calendario con la fecha propor
}else{
window.alert("Mes no disponible");
}
} else if (new_month >= mesAc && new_month<= (mesAc + 2) && ahioSel==ahioAc) {
$(".active-month").removeClass("active-month"); //remueve las clases
$(this).addClass("active-month"); // agrega una clase
date.setMonth(new_month); // se le asigna al dato al mes nuevo
init_calendar(date); //inicia el calendario con la fecha propor
}
else {
window.alert("Mes no disponible");
}
}
//Controlador de eventos para cuando se hace clic en el botón derecho del año
function next_year(event, anio) {
$("#dialog").hide(250);
$(".events-container").hide(250);
var date = event.data.date; //obtiene la fecha del ultimo día del mes anterior
new_year = date.getFullYear() + 1; // devuelve el año actaul, y se le agrega 1 para el proximo año
if ((event.data.anio + 1) == new_year) {
$(".year").html(new_year); // se le agrega e texto a la clase
date.setFullYear(new_year); // le asigna al date, el año proximo con un mes antes
init_calendar(date);// el calendario se inicia con el ultimo dia del mes anterio
}
else{
window.alert("Año no disponible");
}
}
// Controlador de eventos para cuando se hace clic en el botón izquierdo del año
function prev_year(event, anio) {
$("#dialog").hide(250);
$(".events-container").hide(250);
var date = event.data.date; // obtiene la fecha del ultimo dia del mes anterior
new_year = date.getFullYear() - 1;// devuelve el año actaul, y se le le resta -1 para el año anterior
if (event.data.anio == new_year)
{
$("year").html(new_year);// se le agrega e texto a la clase
date.setFullYear(new_year);// le asigna al date, el año anterior, con fecha del ultimo dia del mes anterior a la que estamos
init_calendar(date);//inicia el calendario
}
}
// Controlador de eventos para hacer clic en el botón del nuevo evento
function new_event(event) {
if (ban==0){
window.alert("Antes de, escoge una fecha para tu cita.");
return false;
}
/*if (ban2 == 0) {
window.alert("Antes de, escoge una hora para tu cita.");
return false;
}*/
if(ban==1){
var date = event.data.date;
var a = date.getFullYear();
var m = date.getMonth();
$(".horarios-container").hide(250);
$(".events-container").hide(250);
$(".dialog").show(250); // muestra el contenedor para requisitar la cita
switch (m) {
case 0:
m = "01";
break;
case 1:
m = "02";
break;
case 2:
m = "03";
break;
case 3:
m = "04";
break;
case 4:
m = "05";
break;
case 5:
m = "06";
break;
case 6:
m = "07";
break;
case 7:
m = "08";
break;
case 8:
m = "09";
break;
case 9:
m = "10";
break;
case 10:
m = "11";
break;
case 11:
m = "12";
break;
}
fechaCita = a + "/" + m + "/" + d; //formato año/mes/dia hecho por evelyn
document.getElementById('fechaId').value = fechaCita;
//document.getElementById('lider').value = fechaCita;
return false;
}else {
window.alert("Por favor, verifica sí ya escogistes fecha de cita y horario");
return false;
}
return true;
}
// Mostrar todos los eventos de la fecha seleccionada en vistas de tarjeta
function show_events(events, month, day) {
// limpiar de datos los dos container
$(".events-container").empty(); /* */
// $(".events-container").show(250);
console.log(event_data["events"]);
// var date = event.data.date;
// Si no hay eventos para esta fecha, notifique al usuario
if (events.length === 0) {
var event_card = $("<div class='event-card'></div>");
var event_name = $("<div class='event-name'>There are no events planned for " + month + " " + day + ".</div>");
$(event_card).css({ "border-left": "10px solid #FF1744" });
$(event_card).append(event_name);
$(".events-container").append(event_card);
}
else {
// Go through and add each event as a card to the events container
for (var i = 0; i < events.length; i++) {
var event_card = $("<div class='event-card'></div>");
var event_name = $("<div class='event-name'>" + events[i]["occasion"] + ":</div>");
var event_count = $("<div class='event-count'>" + events[i]["invited_count"] + " Invited</div>");
if (events[i]["cancelled"] === true) {
$(event_card).css({
"border-left": "10px solid #FF1744"
});
event_count = $("<div class='event-cancelled'>Cancelled</div>");
}
$(event_card).append(event_name).append(event_count);
$(".events-container").append(event_card);
}
}
}
// Comprueba si una fecha específica tiene algún evento
function check_events(day, month, year) {
var events = [];
for (var i = 0; i < event_data["events"].length; i++) {
var event = event_data["events"][i];
if (event["day"] === day &&
event["month"] === month &&
event["year"] === year) {
events.push(event);
}
}
return events;
}
// // Datos dados para eventos en formato JSON
var event_data = {
"events": [
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Test Event",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 11
}
]
};
const months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
];
// funcion para obtner la hora
function obtenerHor(id){
switch(id){
case "9":
document.getElementById('horaId').value = '09-10'; /* asignando un valor al id */
break;
case "10":
document.getElementById('horaId').value = '10-11';
ban2 = 1;
break;
case "11":
document.getElementById('horaId').value = '11-12';
ban2 = 1;
break;
case "12":
document.getElementById('horaId').value = "12-13";
ban2 = 1;
break
case "13":
document.getElementById('horaId').value = "13-14";
ban2 = 1;
break;
case "16":
document.getElementById('horaId').value = "16-17";
ban2 = 1;
break;
case "17":
document.getElementById('horaId').value = "17-18";
ban2 = 1;
break;
case "18":
document.getElementById('horaId').value = "18-19";
ban2 = 1;
break;
}
}
/* validacion de los campos del formmulario */
function validacionForm() {
var reason = "";
var nom = document.getElementById("name");
var mot = document.getElementById("cita");
var tel = document.getElementById("numer");
reason += validateName(nom);
reason += validateCita(mot);
reason += validatePhone(tel);
if (reason != "") {
window.alert("Algunos de los campos necesita correción\n" + reason);
return false;
}
return true;
}
/* validacion de la caja de nombre */
function validateName(nombre) {
var error = "";
// var illegalChars = /\W/; // permite letras, números y guiones bajos
if (nombre.value == "" || nombre.value == null || (/^\s+$/.test(nombre.value))) {
nombre.style.background = 'red';
error="La caja para nombre no contiene nada...\n";
nombre.focus();
}else if ((nombre.length < 3) || (nombre.length > 30)) {
nombre.style.background = 'red';
error = "El nombre tiene una longitud incorrecta...\n";
nombre.focus();
}else {
nombre.style.background = 'White';
}
// } else if (illegalChars.test(nombre.value)) {
// nombre.style.background = 'red';
// error = "El nombre ingresado contiene caracteres ilegales.\n";
// nombre.focus();
// }
return error;
}
function validateCita(cita){
var error = "";
if (cita.value == 0 || cita.length < 5 || cita.length > 30 ) {
cita.style.background = 'reed';
error = "Verifique el campo cita, antes de seguir\n"
} else {
cita.style.background = 'White';
}
return error;
}
function validatePhone(tel) {
var error = "";
var stripped = tel.value.replace(/[\(\)\.\-\ ]/g, '');
if (tel.value == "" || tel.value==null) {
error = "No ingresó un número de teléfono..\n";
tel.style.background = 'red';
} else if (isNaN(parseInt(stripped))) {
error = "El número de teléfono contiene caracteres ilegales..\n";
tel.style.background = 'red';
} else if (!(stripped.length == 10)) {
error = "El número de teléfono tiene la longitud incorrecta. Asegúrese de incluir un código de área.\n";
tel.style.background = 'red';
}
return error;
}
// Obtener referencia a botones
// Recuerda: el punto . indica clases
const botones = document.querySelectorAll(".botonC");
// Definir función y evitar definirla de manera anónima
const cuandoSeHaceClick = function (evento) {
// Recuerda, this es el elemento
// window.alert("El texto que tiene es: ", this.innerText);
ban2=1;
}
// botones es un arreglo así que lo recorremos
botones.forEach(boton => {
//Agregar listener
boton.addEventListener("click", cuandoSeHaceClick);
});
function botonCancelar(){
$("#dialog").hide(250);
$(".horarios-container").s | how(250);
//$(".horariosss").show(250);
//$(".horariosss").show(250);
ban=0;
ban2=0;
document.getElementById('fechaId').innerHTML = "";
document.getElementById('horaId').innerHTML = "";
document.getElementById('name').innerHTML = "";
document.getElementById('cita').innerHTML = "";
document.getElementById('numer').innerHTML = "";
}
| identifier_body | |
scriptAppoint.js | // Setup the calendar with the current date
$(document).ready(function () {
var date = new Date(); // Date() imprime la fecha actual hora y día.
var today = date.getDate(); /* devuelve el dia del mes, hoy 4 */
var mes = date.getMonth(); /* Obtiene el numero indicado del mes */
var anio= date.getFullYear();
var new_year = anio;
var fechaCita = date; /* va a guardar la fecha que se escogio para la cita */
var ban2 = 0;
var ban = 0; // bandera para verificar si se ha seleccionado una fecha
// Establecer controladores de clic para elementos DOM
$(".right-button").click({ date: date, anio}, next_year); /* obtiene el siguiente año */
$(".left-button").click({ date: date, anio}, prev_year); /* obtiene el el calendario de años anteriores */
$(".month").click({ date: date, mes, anio}, month_click);/* al darle clic en algunos de los meses */
$("#add-button").click({ date: date }, new_event); /* con el clic en el boton hace el llamado a una nuevo evento */
// Establecer el mes actual como activo
$(".months-row").children().eq(date.getMonth()).addClass("active-month");/* activa el mes que estamos, selecciona getmonth */
init_calendar(date);/* inicia el calendario con la fecha ultima del mes anterior */
/* check_events: se encarga de verificar los eventos es un evento que obtiene la fecha del dia, obtiene
un numero del mes+1, date.getFullYear obtiene el año actual*/
/* date.getFullYear: obtiene el año: 2019, date.getMonth() + 1: obtiene 8 que es sep */
var events = check_events(today, date.getMonth() + 1, date.getFullYear()); //
/* refleja los eventos llevados acabo */
show_events(events, months[date.getMonth()], today );
/* manda a llamar a una funcion */
});
// Inicializa el calendario agregando las fechas HTML
function init_calendar(date) {
ban2 = 0;
ban = 0;
$(".events-container").hide(250);
$(".tbody").empty();//asignando a la clase tbody, una sentencia vacia, para guardar nuevos datos
$(".events-container").empty(); /* asignando a la clase events-container una sentencia vacia */
var calendar_days = $(".tbody"); // lo de la clase se agrega a la var.
var month = date.getMonth(); // obtiene un numero del mes del 0-11
var year = date.getFullYear();//obtiene un numero que representa el año
var day_count = days_in_month(month, year); // llama a la función, y le manda dos var., la var day_c guarda los dias
var row = $("<tr class='table-row'></tr>"); //agrega una tabla, con una fila
var today = date.getDate(); // obtiene la fecha 11
//Establezca la fecha en 1 para encontrar el primer día del mes.
date.setDate(1); // le asigna al dato como fecha 1
var first_day = date.getDay(); // se obtiene el dia primero, lunes es 0 y domingo 6
/*35 + firstDay es el número de elementos de fecha que se agregarán a la tabla de fechas,
35 +1 es la cantidad de números a agregar a la tabla*/
// 35 es de(7 días en una semana) * (hasta 5 filas de fechas en un mes)
for (var i = 0; i < 35 + first_day; i++) {
/*
Dado que algunos de los elementos estarán en blanco
es necesario calcular la fecha real a partir del índice */
var day = i - first_day+1;
// Si es domingo, haz una nueva fila
if (i % 7 === 0) {
calendar_days.append(row); // Agrega esta fila al final de la tabla a calendar_days
row = $("<tr class='table-row'></tr>");
}
// si el índice actual no es un día de este mes, déjelo en blanco
if (i < first_day || day > day_count) {
var curr_date = $("<td class='table-date nil'>" + "</td>");
row.append(curr_date); // le agrega una fila a la tabla
}
else {
var curr_date = $("<td class='table-date'>" + day + "</td>");
var events = check_events(day, month + 1, year);
if (today === day && $(".active-date").length === 0) {
curr_date.addClass("active-date");
show_events(events, months[month], day);
}
// Si esta fecha tiene algún evento, dale estilo con .event-date
if (events.length !== 0) {
curr_date.addClass("event-date");
}
// Configure el controlador onClick para hacer clic en una fecha
curr_date.click({ events: events, month: months[month], day: day }, date_click);
row.append(curr_date);
}
}
// Agregue la última fila y configure el año actual
calendar_days.append(row);
$(".year").text(year);
}
// Obtenga la cantidad de días en un mes / año determinado
function days_in_month(month, year) {
var monthStart = new Date(year, month, 1);// obtiene el inicio del mes
var monthEnd = new Date(year, month + 1, 1); // obtiene el inicio del mes consecutivo
return (monthEnd - monthStart) / (1000 * 60 * 60 * 24); /* regresa la cantidad de dias que hay */
}
//Controlador de eventos para cuando se hace clic en una fecha
function date_click(event) {
//document.getElementById('horaCita').innerHTML = "";//limpiar hora hecho por evelyn
ban=1;
$("#dialog").hide(250); //muestra el apartado de horario
$(".events-container").hide(250); // ocultar contendio
var datee = new Date(); // Date() imprime la fecha actual hora y día.
var today = datee.getDate(); // se obtiene el día
var anio= datee.getFullYear();
var mes = datee.getMonth(); // obtiene mes 8
var m=0;
switch (event.data.month){
case 'January':
m = 0;
break;
case 'February':
m = 1;
break;
case 'March':
m = 2;
break;
case 'April':
m = 3;
break;
case 'May':
m = 4;
break;
case 'June':
m = 5;
break;
case 'July':
m = 6;
break;
case 'August':
m = 7;
break;
case 'September':
m=8;
break;
case 'October':
m = 9;
break;
case 'November':
m = 10;
break;
case 'December':
m = 11;
break;
}
d = event.data.day;
$(".events-container").empty(); // ocultar contendio
var fech = anio + "-" + (m+1) + "-" + d;
if (m >= mes && m <= (mes + 2)) {
if (event.data.day < today && mes == m) {
window.alert("Fecha invalida");
$(".horarios-container").hide(250);
} else {
$(".active-date").removeClass("active-date");
$(this).addClass("active-date");
$(".horarios-container").show(250); //muestra el apartado de horario
$(".events-container").show(250); // ocultar contendio
}
}
};
// Controlador de eventos para cuando se hace clic en un mes
function month_click(event) {
$(".events-container").hide(250);
$("#dialog").hide(250);
var date = event.data.date; // obtiene la fecha del ultimo dia del mes anterior
var mesAc = event.data.mes;// obtiene el mes actual, regresa un num
var new_month = $(".month").index(this); // regresa el nuemero del mes seleccionado
var ahioSel = date.getFullYear();
var ahioAc= event.data.anio;
if (mesAc==11 && ahioSel!==ahioAc){
if(new_month==0 || new_month==1){
$(".active-month").removeClass("active-month"); //remueve las clases
$(this).addClass("active-month"); // agrega una clase
date.setMonth(new_month); // se le asigna al dato al mes nuevo
init_calendar(date); //inicia el calendario con la fecha propor
}else{
window.alert("Mes no disponible");
}
} else if (new_month >= mesAc && new_month<= (mesAc + 2) && ahioSel==ahioAc) {
$(".active-month").removeClass("active-month"); //remueve las clases
$(this).addClass("active-month"); // agrega una clase
date.setMonth(new_month); // se le asigna al dato al mes nuevo
init_calendar(date); //inicia el calendario con la fecha propor
}
else {
window.alert("Mes no disponible");
}
}
//Controlador de eventos para cuando se hace clic en el botón derecho del año
function next_year(event, anio) {
$("#dialog").hide(250);
$(".events-container").hide(250);
var date = event.data.date; //obtiene la fecha del ultimo día del mes anterior
new_year = date.getFullYear() + 1; // devuelve el año actaul, y se le agrega 1 para el proximo año
if ((event.data.anio + 1) == new_year) {
$(".year").html(new_year); // se le agrega e texto a la clase
date.setFullYear(new_year); // le asigna al date, el año proximo con un mes antes
init_calendar(date);// el calendario se inicia con el ultimo dia del mes anterio
}
else{
window.alert("Año no disponible");
}
}
// Controlador de eventos para cuando se hace clic en el botón izquierdo del año
function prev_year(event, anio) {
$("#dialog").hide(250);
$(".events-container").hide(250);
var date = event.data.date; // obtiene la fecha del ultimo dia del mes anterior
new_year = date.getFullYear() - 1;// devuelve el año actaul, y se le le resta -1 para el año anterior
if (event.data.anio == new_year)
{
$("year").html(new_year);// se le agrega e texto a la clase
date.setFullYear(new_year);// le asigna al date, el año anterior, con fecha del ultimo dia del mes anterior a la que estamos
init_calendar(date);//inicia el calendario
}
}
// Controlador de eventos para hacer clic en el botón del nuevo evento
function new_event(event) {
if (ban==0){
window.alert("Antes de, escoge una fecha para tu cita.");
return false;
}
/*if (ban2 == 0) {
window.alert("Antes de, escoge una hora para tu cita.");
return false;
}*/
if(ban==1){
var date = event.data.date;
var a = date.getFullYear();
var m = date.getMonth();
$(".horarios-container").hide(250);
$(".events-container").hide(250);
$(".dialog").show(250); // muestra el contenedor para requisitar la cita
switch (m) {
case 0:
m = "01";
break;
case 1:
m = "02";
break;
case 2:
m = "03";
break;
case 3:
m = "04";
break;
case 4:
m = "05";
break;
case 5:
m = "06";
break;
case 6:
m = "07";
break;
case 7:
m = "08";
break;
case 8:
m = "09";
break;
case 9:
m = "10";
break;
case 10:
m = "11";
break;
case 11:
m = "12";
break;
}
fechaCita = a + "/" + m + "/" + d; //formato año/mes/dia hecho por evelyn
document.getElementById('fechaId').value = fechaCita;
//document.getElementById('lider').value = fechaCita;
return false;
}else {
window.alert("Por favor, verifica sí ya escogistes fecha de cita y horario");
return false;
}
return true;
}
// Mostrar todos los eventos de la fecha seleccionada en vistas de tarjeta
function show_events(events, month, day) {
// limpiar de datos los dos container
$(".events-container").empty(); /* */
// $(".events-container").show(250);
console.log(event_data["events"]);
// var date = event.data.date;
// Si no hay eventos para esta fecha, notifique al usuario
if (events.length === 0) {
var event_card = $("<div class='event-card'></div>");
var event_name = $("<div class='event-name'>There are no events planned for " + month + " " + day + ".</div>");
$(event_card).css({ "border-left": "10px solid #FF1744" });
$(event_card).append(event_name);
$(".events-container").append(event_card);
}
else {
// Go through and add each event as a card to the events container
for (var i = 0; i < events.length; i++) {
var event_card = $("<div class='event-card'></div>");
var event_name = $("<div class='event-name'>" + events[i]["occasion"] + ":</div>");
var event_count = $("<div class='event-count'>" + events[i]["invited_count"] + " Invited</div>");
if (events[i]["cancelled"] === true) {
$(event_card).css({
"border-left": "10px solid #FF1744"
});
event_count = $("<div class='event-cancelled'>Cancelled</div>");
}
$(event_card).append(event_name).append(event_count);
$(".events-container").append(event_card);
}
}
}
// Comprueba si una fecha específica tiene algún evento
function check_events(day, month, year) {
var events = [];
for (var i = 0; i < event_data["events"].length; i++) {
var event = event_data["events"][i];
if (event["day"] === day &&
event["month"] === month &&
event["year"] === year) {
events.push(event);
}
}
return events;
}
// // Datos dados para eventos en formato JSON
var event_data = {
"events": [
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10,
"cancelled": true
},
{
"occasion": " Repeated Test Event ",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 10
},
{
"occasion": " Test Event",
"invited_count": 120,
"year": 2017,
"month": 5,
"day": 11
}
]
};
const months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
];
// funcion para obtner la hora
function obtenerHor(id){
switch(id){
case "9":
document.getElementById('horaId').value = '09-10'; /* asignando un valor al id */
break;
case "10":
document.getElementById('horaId').value = '10-11';
ban2 = 1;
break;
case "11":
document.getElementById('horaId').value = '11-12';
ban2 = 1;
break;
case "12":
document.getElementById('horaId').value = "12-13";
ban2 = 1;
break
case "13":
document.getElementById('horaId').value = "13-14";
ban2 = 1;
break;
case "16":
document.getElementById('horaId').value = "16-17";
ban2 = 1;
break;
case "17":
document.getElementById('horaId').value = "17-18";
ban2 = 1;
break;
case "18":
document.getElementById('horaId').value = "18-19";
ban2 = 1;
break;
}
}
/* validacion de los campos del formmulario */
function validacionForm() {
var reason = "";
var nom = document.getElementById("name");
var mot = document.getElementById("cita");
var tel = document.getElementById("numer");
reason += validateName(nom);
reason += validateCita(mot);
reason += validatePhone(tel);
if (reason != "") {
window.alert("Algunos de los campos necesita correción\n" + reason);
return false;
}
return true;
}
/* validacion de la caja de nombre */
function validateName(nombre) {
var error = "" | llegalChars = /\W/; // permite letras, números y guiones bajos
if (nombre.value == "" || nombre.value == null || (/^\s+$/.test(nombre.value))) {
nombre.style.background = 'red';
error="La caja para nombre no contiene nada...\n";
nombre.focus();
}else if ((nombre.length < 3) || (nombre.length > 30)) {
nombre.style.background = 'red';
error = "El nombre tiene una longitud incorrecta...\n";
nombre.focus();
}else {
nombre.style.background = 'White';
}
// } else if (illegalChars.test(nombre.value)) {
// nombre.style.background = 'red';
// error = "El nombre ingresado contiene caracteres ilegales.\n";
// nombre.focus();
// }
return error;
}
function validateCita(cita){
var error = "";
if (cita.value == 0 || cita.length < 5 || cita.length > 30 ) {
cita.style.background = 'reed';
error = "Verifique el campo cita, antes de seguir\n"
} else {
cita.style.background = 'White';
}
return error;
}
function validatePhone(tel) {
var error = "";
var stripped = tel.value.replace(/[\(\)\.\-\ ]/g, '');
if (tel.value == "" || tel.value==null) {
error = "No ingresó un número de teléfono..\n";
tel.style.background = 'red';
} else if (isNaN(parseInt(stripped))) {
error = "El número de teléfono contiene caracteres ilegales..\n";
tel.style.background = 'red';
} else if (!(stripped.length == 10)) {
error = "El número de teléfono tiene la longitud incorrecta. Asegúrese de incluir un código de área.\n";
tel.style.background = 'red';
}
return error;
}
// Obtener referencia a botones
// Recuerda: el punto . indica clases
const botones = document.querySelectorAll(".botonC");
// Definir función y evitar definirla de manera anónima
const cuandoSeHaceClick = function (evento) {
// Recuerda, this es el elemento
// window.alert("El texto que tiene es: ", this.innerText);
ban2=1;
}
// botones es un arreglo así que lo recorremos
botones.forEach(boton => {
//Agregar listener
boton.addEventListener("click", cuandoSeHaceClick);
});
function botonCancelar(){
$("#dialog").hide(250);
$(".horarios-container").show(250);
//$(".horariosss").show(250);
//$(".horariosss").show(250);
ban=0;
ban2=0;
document.getElementById('fechaId').innerHTML = "";
document.getElementById('horaId').innerHTML = "";
document.getElementById('name').innerHTML = "";
document.getElementById('cita').innerHTML = "";
document.getElementById('numer').innerHTML = "";
}
| ;
// var i | identifier_name |
example_all.py | import numpy as np
from functools import partial
from tqdm import tqdm
import nlopt
import matplotlib.pylab as plt
import bbq.models.rff as ff
from bbq.examples.utils import toy_functions
from bbq.examples.utils.train_model import train_model
from bbq.examples.utils.saveResults import ResultSaver
from bbq.utils.enums import QMC_KWARG, QMC_SCRAMBLING, QMC_SEQUENCE
from bbq.interpolate import InterpWithAsymptotes
from bbq.models.ABLR import ABLR
from bbq.utils import datasets
from bbq import parametrisations
from bbq.utils.metrics import mnll
tqdm.monitor_interval = 0
"""
Options
"""
# Dataset
# datasetName = "tread"
# datasetName = "pores"
# datasetName = "rubber"
# datasetName = "concrete"
# datasetName = "airfoil_noise"
# datasetName = "co2"
# datasetName = "airline"
# datasetName = "cosine"
# datasetName = "pattern"
datasetName = "steps"
# datasetName = "quadraticCos"
# datasetName = "harmonics"
# datasetName = "heaviside"
extrapolationDataset = True # Better plotting for extrapolation dataset
# BLR Model
M = 300 # Number of random fourier features in BLR
blrAlpha, blrBeta = 1, 10 # Precision params for BLR (weight prior, noise)
learnBeta = False # Learn noise precision
# Quantile type
# useARDkernel = True # To use an ARD kernel
useARDkernel = False # To use an isotropic kernel
basicRBF = False # To run basic RFF with RBF (opt lengthscale)
# quantileParametrisation = parametrisations.InterpPieceWise4points
quantileParametrisation = parametrisations.InterpSingleSpline
# quantileParametrisation = parametrisations.PeriodicSimple
# quantileParametrisation = parametrisations.InterpIncrementY6pts
# quantileParametrisation = parametrisations.BoundedQPoints
# quantileParametrisation = parametrisations.StairCase
# quantileParametrisation = parametrisations.InterpWeibull
# Score metric to use
scoreMetric = "NLML" # Negative log marginal likelihood
# scoreMetric = "RMSE" # Root mean square error
# Search strategy
# searchStrategy = "Gridsearch"
# searchStrategy = "BO" # Bayesian optimisation
searchStrategy = "NLopt" # Search using NLopt optimisation algorithms
# Grid-search parameters
gridRes = int(np.sqrt(1000)) # Grid-search resolution across each dimension
# BO parameters
nBOIter = 200 # Number of BO iterations
boKappa = 1 # BO Exploration-exploitation parameter
boModelLengthscale = 0.03 # RBF Lenghtscal of approximate GP used in BO
boModelType = "BLR-RFF" # Approximate GP to be used in BO
# boModelType = "LocalGP"
# NLopt parameters
nlOptIter = 200
# NLopt Global algorithms (derivative free)
# nlOptAlgo = nlopt.GN_CRS
# nlOptAlgo = nlopt.GN_CRS2_LM
# nlOptAlgo = nlopt.GN_DIRECT
# nlOptAlgo = nlopt.GN_DIRECT_L
# nlOptAlgo = nlopt.GN_ISRES
# nlOptAlgo = nlopt.GN_ESCH
# NLopt Local algotithms (derivative free)
nlOptAlgo = nlopt.LN_COBYLA
# nlOptAlgo = nlopt.LN_BOBYQA
# nlOptAlgo = nlopt.LN_SBPLX
"""
Generate dataset
"""
if datasetName == "co2":
train_data, test_data = datasets.mauna_loa()
elif datasetName == "airline":
train_data, test_data = datasets.airline_passengers()
elif datasetName == "airfoil_noise":
train_data, test_data = datasets.airfoil_noise()
elif datasetName == "concrete":
train_data, test_data = datasets.concrete()
elif datasetName == "rubber" \
or datasetName == "pores" \
or datasetName == "tread":
train_data, test_data = datasets.textures_2D(texture_name=datasetName)
else:
if datasetName == "cosine":
objectiveFunction = toy_functions.cosine
elif datasetName == "harmonics":
objectiveFunction = toy_functions.harmonics
elif datasetName == "pattern":
objectiveFunction = toy_functions.pattern
elif datasetName == "heaviside":
objectiveFunction = toy_functions.heaviside
elif datasetName == "quadraticCos":
objectiveFunction = toy_functions.quadratic_cos
elif datasetName == "steps":
objectiveFunction = toy_functions.steps
else:
raise RuntimeError("Objective function was not defined")
train_x = np.sort(np.random.uniform(-0.3, 1.2, (100, 1)), axis=0)
train_y = objectiveFunction(train_x)
train_data = np.hstack([train_x, train_y])
test_x = np.linspace(-0.5, 1.5, 1000).reshape(-1, 1)
# test_x = np.sort(np.random.uniform(-1, 1, (100, 1)), axis=0)
test_y = objectiveFunction(test_x)
test_data = np.hstack([test_x, test_y])
train_x = train_data[:, :-1]
train_y = train_data[:, [-1]]
test_x = test_data[:, :-1]
test_y = test_data[:, [-1]]
N, D = train_x.shape
print("Dataset size: {} * {}".format(N, D))
if scoreMetric == "RMSE":
Nv = int(0.2 * N)
N -= Nv
val_x = train_x[-Nv:, :]
val_y = train_y[-Nv:].reshape(-1, 1)
train_x = train_x[:-Nv, :]
train_y = train_y[:-Nv].reshape(-1, 1)
vizData = False
if vizData:
plt.figure()
plt.plot(train_x, train_y, 'b-', label="train set")
if scoreMetric == "RMSE":
plt.plot(val_x, val_y, 'g-', label="validation set")
plt.plot(test_x, test_y, 'r-', label="test set")
plt.xlim(min(np.min(train_x), np.min(test_x)) - .2,
max(np.max(train_x), np.max(test_x)) + .2)
plt.ylim(min(np.min(train_y), np.min(test_y)) - .2,
max(np.max(train_y), np.max(test_y)) + .2)
plt.title("Data set")
plt.legend(loc=2)
plt.show()
exit()
"""
Quantile parametrisation
"""
if quantileParametrisation == parametrisations.BoundedQPoints:
qp = quantileParametrisation(6, p1=(0.03, -100), p2=(0.97, 100))
elif quantileParametrisation == parametrisations.StairCase:
qp = quantileParametrisation(9, p1=(0.03, -150), p2=(0.97, 150))
else:
qp = quantileParametrisation()
genParams = qp.gen_params
interpolator = qp.interpolator
paramLow = qp.paramLow
paramHigh = qp.paramHigh
scaleParams = qp.scale_params
if datasetName == "co2":
if quantileParametrisation == parametrisations.PeriodicSimple:
paramHigh[0] = 2.3
paramLow[0] = 2
paramHigh[1] = 0.2
paramLow[1] = 0.01
elif datasetName == "airline":
if quantileParametrisation == parametrisations.PeriodicSimple:
paramLow[0] = 2.3
paramHigh[0] = 2.8
paramLow[1] = 0.001
elif datasetName == "pores":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.8
# paramLow[1] = 0.01
# paramHigh[1] = 0.45
elif datasetName == "rubber":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.8
# paramLow[1] = 0.01
# paramHigh[1] = 0.25
elif datasetName == "tread":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.5
# paramLow[1] = 0.001
# paramHigh[1] = 0.01
if quantileParametrisation == parametrisations.InterpSingleSpline:
paramHigh[1] = 6
paramHigh[0] = 2.3
paramLow[0] = 1.5
"""
Kernel composition
"""
# composition = ["linear", "+", "linear_no_bias", "*", "bbq"]
# composition = ["linear", "+", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+", "bbq"]
# composition = ["linear", "+", "linear_no_bias", "*", "bbq"]
composition = ["bbq"]
if datasetName == "co2":
# composition = ["linear", "*", "linear_no_bias", "+", "bbq"]
composition = ["linear_no_bias", "+", "bbq"]
elif datasetName == "airline":
composition = ["linear", "+", "linear", "*", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+",
# "linear", "*", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+",
# "linear", "*", "linear_no_bias", "*", "bbq", "+", "bbq"]
if basicRBF:
# composition = ["rbf" if e == "bbq" else e for e in composition]
composition = ["rbf"]
qp.paramLow = np.array([-3])
qp.paramHigh = np.array([0])
qp.logScaleParams = np.array([True])
paramLow = qp.paramLow
paramHigh = qp.paramHigh
"""
Search/optimisation for best quantile
"""
if useARDkernel:
paramLow = np.array(D * list(paramLow))
paramHigh = np.array(D * list(paramHigh))
qp.logScaleParams = np.array(D * list(qp.logScaleParams))
if learnBeta:
paramLow = np.array([-3] + list(paramLow))
paramHigh = np.array([3] + list(paramHigh))
print("parameter space high: {}".format(paramHigh))
print("parameter space low: {}".format(paramLow))
score, allParams, bo = None, None, None
if searchStrategy == "Gridsearch":
assert (len(paramLow) == 2 and "Gridsearch only designed for 2 params")
X, Y = np.meshgrid(np.linspace(paramLow[0], paramHigh[0], gridRes),
np.linspace(paramLow[1], paramHigh[1], gridRes))
allParams = np.hstack([X.reshape(-1, 1), Y.reshape(-1, 1)])
score = np.zeros((gridRes ** 2,))
for i in tqdm(range(allParams.shape[0])):
if learnBeta:
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
score[i] = -train_model(scaledParams, blrBeta)
elif searchStrategy == "BO":
search_int = np.vstack([np.atleast_2d(paramLow),
np.atleast_2d(paramHigh)]).T
bo = None # BO.BO(search_int, acq_fun=BO.UCB(boKappa), opt_maxeval=20,
allParams = np.zeros((nBOIter, len(paramLow)))
score = np.zeros((nBOIter,))
for i in tqdm(range(nBOIter)):
allParams[i, :] = bo.next_sample()
if learnBeta:
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
score[i] = -train_model(scaledParams, blrBeta)
bo.update(allParams[i, :].reshape(1, -1),
np.array(score[i]).reshape(-1, 1))
elif searchStrategy == "NLopt":
allParams = np.zeros((nlOptIter, len(paramLow)))
score = np.zeros((nlOptIter,))
pbar = tqdm(total=nlOptIter)
i = 0
# Nlopt params
opt = nlopt.opt(nlOptAlgo, len(paramLow))
opt.set_lower_bounds(np.array(paramLow).reshape(-1))
opt.set_upper_bounds(np.array(paramHigh).reshape(-1))
opt.set_maxeval(nlOptIter)
def _fun_maximize(_x, grad):
|
opt.set_max_objective(_fun_maximize)
init_opt = np.random.uniform(0, 1, len(paramLow)) * \
(paramHigh - paramLow) + paramLow
opt.optimize(init_opt)
pbar.close()
"""
Saving
"""
# Compute best quantile
imax = np.argmax(score)
curParams = allParams[imax, :]
if learnBeta:
curParams = allParams[imax, 1:]
blrBeta = 10 ** allParams[imax, 0]
print("Best BLR beta: {}".format(blrBeta))
bestParam = scaleParams(curParams)
print("Best parameters: {}".format(bestParam))
bbq_qf = None
if not basicRBF:
# Interpolation
if useARDkernel:
bbq_qf = []
nprm = len(bestParam)
for j in range(D):
x, y, params = genParams(
bestParam[int(j * nprm / D):int((j + 1) * nprm / D)])
bbq_qf.append(InterpWithAsymptotes(x=x, y=y,
interpolator=interpolator,
params=params))
else:
x, y, params = genParams(bestParam)
bbq_qf = InterpWithAsymptotes(x=x, y=y, interpolator=interpolator,
params=params)
# Draw features with interpolated quantile
f_dict = {"linear_no_bias": ff.Linear(d=D, has_bias_term=False),
"linear": ff.Linear(d=D, has_bias_term=True)}
if basicRBF:
f_dict["rbf"] = ff.RFF_RBF(m=M, d=D, ls=bestParam)
else:
f_dict["bbq"] = ff.QMCF_BBQ(m=M, d=D,
sampler=partial(freqGenFn, bbq_qf=bbq_qf),
sequence_type=QMC_SEQUENCE.HALTON,
scramble_type=QMC_SCRAMBLING.GENERALISED,
qmc_kwargs={QMC_KWARG.PERM: None})
qmcf_bbq = ff.FComposition(f_dict=f_dict, composition=composition)
# Compute best data fit
blr = ABLR(k_phi=qmcf_bbq, d=D, d_out=1,
alpha=blrAlpha, beta=blrBeta, log_dir=None)
blr.learn_from_history(x_trn=train_x, y_trn=train_y)
prediction = blr.predict(x_tst=train_x, pred_var=True)
pred_train, predVar_train = prediction.mean, prediction.var
# Compute final score metrics
prediction = blr.predict(x_tst=test_x, pred_var=True)
pred_test, predVar_test = prediction.mean, prediction.var
finalRMSE = np.sqrt(np.mean((pred_test - test_y) ** 2))
finalMNLL = mnll(test_y, pred_test, predVar_test)
print("Final model RMSE: {}".format(finalRMSE))
print("Final model MNLL: {}".format(finalMNLL))
settingsVars = {
"RMSE": finalRMSE,
"MNLL": finalMNLL,
"bestParam": bestParam.tolist(),
"datasetName": datasetName,
"nlOptIter": nlOptIter,
"nlOptAlgo": nlOptAlgo,
"gridRes": gridRes,
"paramLow": paramLow.tolist(),
"paramHigh": paramHigh.tolist(),
"M": M,
"N": N,
"D": D,
"blrAlpha": blrAlpha,
"blrBeta": blrBeta,
"useARDkernel": useARDkernel,
"basicRBF": basicRBF,
"quantileParametrisation": quantileParametrisation.__name__,
"scoreMetric": scoreMetric,
"searchStrategy": searchStrategy,
"composition": composition,
"interpolator": interpolator.__name__
}
rs = ResultSaver(settingsVars)
rs.save_params_and_loss(allParams, -score)
if not basicRBF:
rs.save_quantile(bbq_qf, x, y)
rs.save_pdf(bbq_qf)
if D != 2:
if D == 1:
rs.save_dataset(train_x, train_y, test_x, test_y)
rs.save_data_fit(train_x, pred_train, predVar_train,
test_x, pred_test, predVar_test)
else:
# Generate images
imSize = 130
trainIdx = np.round((train_x + 1) * (imSize - 1) / 2.0).astype(int)
testIdx = np.round((test_x + 1) * (imSize - 1) / 2.0).astype(int)
fullImg = np.zeros((imSize, imSize))
for idx, y in zip(trainIdx, train_y):
fullImg[idx[0], idx[1]] = y
for idx, y in zip(testIdx, test_y):
fullImg[idx[0], idx[1]] = y
predImg = np.zeros((imSize, imSize))
for idx, y in zip(trainIdx, pred_train):
predImg[idx[0], idx[1]] = y
for idx, y in zip(testIdx, pred_test):
predImg[idx[0], idx[1]] = y
predImgPrcs = np.clip(predImg, -1, 1)
rs.save_pred_image(predImgPrcs)
if len(paramLow) - 1 * learnBeta == 2: # Plot score map
if searchStrategy == "BO" or searchStrategy == "NLopt":
plotRes = 64
X, Y = np.meshgrid(np.linspace(paramLow[0], paramHigh[0], plotRes),
np.linspace(paramLow[1], paramHigh[1], plotRes))
Z = np.hstack([X.reshape(-1, 1), Y.reshape(-1, 1)])
scaledZ = scaleParams(Z)
scaledX = scaledZ[:, 0].reshape(X.shape)
scaledY = scaledZ[:, 1].reshape(Y.shape)
if searchStrategy == "Gridsearch":
score_surface = -score
elif searchStrategy == "BO":
score_surface = -bo.model.predictMean(Z)
if searchStrategy == "BO" or searchStrategy == "Gridsearch":
rs.save_loss_surface(scaledX, scaledY, score_surface.reshape(X.shape))
print("Finished.")
| global i
if i == nlOptIter:
print("Warning: maximum number of iterations reached.")
return float(np.min(score))
allParams[i, :] = _x
if learnBeta:
global blrBeta
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
curScore = float(-train_model(scaledParams, blrBeta))
# Keep track of previous tries
score[i] = curScore
i += 1
pbar.update(1)
return curScore | identifier_body |
example_all.py | import numpy as np
from functools import partial
from tqdm import tqdm
import nlopt
import matplotlib.pylab as plt
import bbq.models.rff as ff
from bbq.examples.utils import toy_functions
from bbq.examples.utils.train_model import train_model
from bbq.examples.utils.saveResults import ResultSaver
from bbq.utils.enums import QMC_KWARG, QMC_SCRAMBLING, QMC_SEQUENCE
from bbq.interpolate import InterpWithAsymptotes
from bbq.models.ABLR import ABLR
from bbq.utils import datasets
from bbq import parametrisations
from bbq.utils.metrics import mnll
tqdm.monitor_interval = 0
"""
Options
"""
# Dataset
# datasetName = "tread"
# datasetName = "pores"
# datasetName = "rubber"
# datasetName = "concrete"
# datasetName = "airfoil_noise"
# datasetName = "co2"
# datasetName = "airline"
# datasetName = "cosine"
# datasetName = "pattern"
datasetName = "steps"
# datasetName = "quadraticCos"
# datasetName = "harmonics"
# datasetName = "heaviside"
extrapolationDataset = True # Better plotting for extrapolation dataset
# BLR Model
M = 300 # Number of random fourier features in BLR
blrAlpha, blrBeta = 1, 10 # Precision params for BLR (weight prior, noise)
learnBeta = False # Learn noise precision
# Quantile type
# useARDkernel = True # To use an ARD kernel
useARDkernel = False # To use an isotropic kernel
basicRBF = False # To run basic RFF with RBF (opt lengthscale)
# quantileParametrisation = parametrisations.InterpPieceWise4points
quantileParametrisation = parametrisations.InterpSingleSpline
# quantileParametrisation = parametrisations.PeriodicSimple
# quantileParametrisation = parametrisations.InterpIncrementY6pts
# quantileParametrisation = parametrisations.BoundedQPoints
# quantileParametrisation = parametrisations.StairCase
# quantileParametrisation = parametrisations.InterpWeibull
# Score metric to use
scoreMetric = "NLML" # Negative log marginal likelihood
# scoreMetric = "RMSE" # Root mean square error
# Search strategy
# searchStrategy = "Gridsearch"
# searchStrategy = "BO" # Bayesian optimisation
searchStrategy = "NLopt" # Search using NLopt optimisation algorithms
# Grid-search parameters
gridRes = int(np.sqrt(1000)) # Grid-search resolution across each dimension
# BO parameters
nBOIter = 200 # Number of BO iterations
boKappa = 1 # BO Exploration-exploitation parameter
boModelLengthscale = 0.03 # RBF Lenghtscal of approximate GP used in BO
boModelType = "BLR-RFF" # Approximate GP to be used in BO
# boModelType = "LocalGP"
# NLopt parameters
nlOptIter = 200
# NLopt Global algorithms (derivative free)
# nlOptAlgo = nlopt.GN_CRS
# nlOptAlgo = nlopt.GN_CRS2_LM
# nlOptAlgo = nlopt.GN_DIRECT
# nlOptAlgo = nlopt.GN_DIRECT_L
# nlOptAlgo = nlopt.GN_ISRES
# nlOptAlgo = nlopt.GN_ESCH
# NLopt Local algotithms (derivative free)
nlOptAlgo = nlopt.LN_COBYLA
# nlOptAlgo = nlopt.LN_BOBYQA
# nlOptAlgo = nlopt.LN_SBPLX
"""
Generate dataset
"""
if datasetName == "co2":
train_data, test_data = datasets.mauna_loa()
elif datasetName == "airline":
train_data, test_data = datasets.airline_passengers()
elif datasetName == "airfoil_noise":
train_data, test_data = datasets.airfoil_noise()
elif datasetName == "concrete":
train_data, test_data = datasets.concrete()
elif datasetName == "rubber" \
or datasetName == "pores" \
or datasetName == "tread":
train_data, test_data = datasets.textures_2D(texture_name=datasetName)
else:
if datasetName == "cosine":
objectiveFunction = toy_functions.cosine
elif datasetName == "harmonics":
objectiveFunction = toy_functions.harmonics
elif datasetName == "pattern":
objectiveFunction = toy_functions.pattern
elif datasetName == "heaviside":
objectiveFunction = toy_functions.heaviside
elif datasetName == "quadraticCos":
objectiveFunction = toy_functions.quadratic_cos
elif datasetName == "steps":
objectiveFunction = toy_functions.steps
else:
raise RuntimeError("Objective function was not defined")
train_x = np.sort(np.random.uniform(-0.3, 1.2, (100, 1)), axis=0)
train_y = objectiveFunction(train_x)
train_data = np.hstack([train_x, train_y])
test_x = np.linspace(-0.5, 1.5, 1000).reshape(-1, 1)
# test_x = np.sort(np.random.uniform(-1, 1, (100, 1)), axis=0)
test_y = objectiveFunction(test_x)
test_data = np.hstack([test_x, test_y])
train_x = train_data[:, :-1]
train_y = train_data[:, [-1]]
test_x = test_data[:, :-1]
test_y = test_data[:, [-1]]
N, D = train_x.shape
print("Dataset size: {} * {}".format(N, D))
if scoreMetric == "RMSE":
Nv = int(0.2 * N)
N -= Nv
val_x = train_x[-Nv:, :]
val_y = train_y[-Nv:].reshape(-1, 1)
train_x = train_x[:-Nv, :]
train_y = train_y[:-Nv].reshape(-1, 1)
vizData = False
if vizData:
plt.figure()
plt.plot(train_x, train_y, 'b-', label="train set")
if scoreMetric == "RMSE":
plt.plot(val_x, val_y, 'g-', label="validation set")
plt.plot(test_x, test_y, 'r-', label="test set")
plt.xlim(min(np.min(train_x), np.min(test_x)) - .2,
max(np.max(train_x), np.max(test_x)) + .2)
plt.ylim(min(np.min(train_y), np.min(test_y)) - .2,
max(np.max(train_y), np.max(test_y)) + .2)
plt.title("Data set")
plt.legend(loc=2)
plt.show()
exit()
"""
Quantile parametrisation
"""
if quantileParametrisation == parametrisations.BoundedQPoints:
qp = quantileParametrisation(6, p1=(0.03, -100), p2=(0.97, 100))
elif quantileParametrisation == parametrisations.StairCase:
qp = quantileParametrisation(9, p1=(0.03, -150), p2=(0.97, 150))
else:
qp = quantileParametrisation()
genParams = qp.gen_params
interpolator = qp.interpolator
paramLow = qp.paramLow
paramHigh = qp.paramHigh
scaleParams = qp.scale_params
if datasetName == "co2":
if quantileParametrisation == parametrisations.PeriodicSimple:
paramHigh[0] = 2.3
paramLow[0] = 2
paramHigh[1] = 0.2
paramLow[1] = 0.01
elif datasetName == "airline":
if quantileParametrisation == parametrisations.PeriodicSimple:
paramLow[0] = 2.3
paramHigh[0] = 2.8
paramLow[1] = 0.001
elif datasetName == "pores":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.8
# paramLow[1] = 0.01
# paramHigh[1] = 0.45
elif datasetName == "rubber":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.8
# paramLow[1] = 0.01
# paramHigh[1] = 0.25
elif datasetName == "tread":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.5
# paramLow[1] = 0.001
# paramHigh[1] = 0.01
if quantileParametrisation == parametrisations.InterpSingleSpline:
paramHigh[1] = 6
paramHigh[0] = 2.3
paramLow[0] = 1.5
"""
Kernel composition
"""
# composition = ["linear", "+", "linear_no_bias", "*", "bbq"]
# composition = ["linear", "+", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+", "bbq"]
# composition = ["linear", "+", "linear_no_bias", "*", "bbq"]
composition = ["bbq"]
if datasetName == "co2":
# composition = ["linear", "*", "linear_no_bias", "+", "bbq"]
composition = ["linear_no_bias", "+", "bbq"]
elif datasetName == "airline":
composition = ["linear", "+", "linear", "*", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+",
# "linear", "*", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+",
# "linear", "*", "linear_no_bias", "*", "bbq", "+", "bbq"]
if basicRBF:
# composition = ["rbf" if e == "bbq" else e for e in composition]
composition = ["rbf"]
qp.paramLow = np.array([-3])
qp.paramHigh = np.array([0])
qp.logScaleParams = np.array([True])
paramLow = qp.paramLow
paramHigh = qp.paramHigh
"""
Search/optimisation for best quantile
"""
if useARDkernel:
paramLow = np.array(D * list(paramLow))
paramHigh = np.array(D * list(paramHigh))
qp.logScaleParams = np.array(D * list(qp.logScaleParams))
if learnBeta:
paramLow = np.array([-3] + list(paramLow))
paramHigh = np.array([3] + list(paramHigh))
print("parameter space high: {}".format(paramHigh))
print("parameter space low: {}".format(paramLow))
score, allParams, bo = None, None, None
if searchStrategy == "Gridsearch":
assert (len(paramLow) == 2 and "Gridsearch only designed for 2 params")
X, Y = np.meshgrid(np.linspace(paramLow[0], paramHigh[0], gridRes),
np.linspace(paramLow[1], paramHigh[1], gridRes))
allParams = np.hstack([X.reshape(-1, 1), Y.reshape(-1, 1)])
score = np.zeros((gridRes ** 2,))
for i in tqdm(range(allParams.shape[0])):
if learnBeta:
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
score[i] = -train_model(scaledParams, blrBeta)
elif searchStrategy == "BO":
search_int = np.vstack([np.atleast_2d(paramLow),
np.atleast_2d(paramHigh)]).T
bo = None # BO.BO(search_int, acq_fun=BO.UCB(boKappa), opt_maxeval=20,
allParams = np.zeros((nBOIter, len(paramLow)))
score = np.zeros((nBOIter,))
for i in tqdm(range(nBOIter)):
allParams[i, :] = bo.next_sample()
if learnBeta:
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
score[i] = -train_model(scaledParams, blrBeta)
bo.update(allParams[i, :].reshape(1, -1),
np.array(score[i]).reshape(-1, 1))
elif searchStrategy == "NLopt":
allParams = np.zeros((nlOptIter, len(paramLow)))
score = np.zeros((nlOptIter,))
pbar = tqdm(total=nlOptIter)
i = 0
# Nlopt params
opt = nlopt.opt(nlOptAlgo, len(paramLow))
opt.set_lower_bounds(np.array(paramLow).reshape(-1))
opt.set_upper_bounds(np.array(paramHigh).reshape(-1))
opt.set_maxeval(nlOptIter)
def | (_x, grad):
global i
if i == nlOptIter:
print("Warning: maximum number of iterations reached.")
return float(np.min(score))
allParams[i, :] = _x
if learnBeta:
global blrBeta
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
curScore = float(-train_model(scaledParams, blrBeta))
# Keep track of previous tries
score[i] = curScore
i += 1
pbar.update(1)
return curScore
opt.set_max_objective(_fun_maximize)
init_opt = np.random.uniform(0, 1, len(paramLow)) * \
(paramHigh - paramLow) + paramLow
opt.optimize(init_opt)
pbar.close()
"""
Saving
"""
# Compute best quantile
imax = np.argmax(score)
curParams = allParams[imax, :]
if learnBeta:
curParams = allParams[imax, 1:]
blrBeta = 10 ** allParams[imax, 0]
print("Best BLR beta: {}".format(blrBeta))
bestParam = scaleParams(curParams)
print("Best parameters: {}".format(bestParam))
bbq_qf = None
if not basicRBF:
# Interpolation
if useARDkernel:
bbq_qf = []
nprm = len(bestParam)
for j in range(D):
x, y, params = genParams(
bestParam[int(j * nprm / D):int((j + 1) * nprm / D)])
bbq_qf.append(InterpWithAsymptotes(x=x, y=y,
interpolator=interpolator,
params=params))
else:
x, y, params = genParams(bestParam)
bbq_qf = InterpWithAsymptotes(x=x, y=y, interpolator=interpolator,
params=params)
# Draw features with interpolated quantile
f_dict = {"linear_no_bias": ff.Linear(d=D, has_bias_term=False),
"linear": ff.Linear(d=D, has_bias_term=True)}
if basicRBF:
f_dict["rbf"] = ff.RFF_RBF(m=M, d=D, ls=bestParam)
else:
f_dict["bbq"] = ff.QMCF_BBQ(m=M, d=D,
sampler=partial(freqGenFn, bbq_qf=bbq_qf),
sequence_type=QMC_SEQUENCE.HALTON,
scramble_type=QMC_SCRAMBLING.GENERALISED,
qmc_kwargs={QMC_KWARG.PERM: None})
qmcf_bbq = ff.FComposition(f_dict=f_dict, composition=composition)
# Compute best data fit
blr = ABLR(k_phi=qmcf_bbq, d=D, d_out=1,
alpha=blrAlpha, beta=blrBeta, log_dir=None)
blr.learn_from_history(x_trn=train_x, y_trn=train_y)
prediction = blr.predict(x_tst=train_x, pred_var=True)
pred_train, predVar_train = prediction.mean, prediction.var
# Compute final score metrics
prediction = blr.predict(x_tst=test_x, pred_var=True)
pred_test, predVar_test = prediction.mean, prediction.var
finalRMSE = np.sqrt(np.mean((pred_test - test_y) ** 2))
finalMNLL = mnll(test_y, pred_test, predVar_test)
print("Final model RMSE: {}".format(finalRMSE))
print("Final model MNLL: {}".format(finalMNLL))
settingsVars = {
"RMSE": finalRMSE,
"MNLL": finalMNLL,
"bestParam": bestParam.tolist(),
"datasetName": datasetName,
"nlOptIter": nlOptIter,
"nlOptAlgo": nlOptAlgo,
"gridRes": gridRes,
"paramLow": paramLow.tolist(),
"paramHigh": paramHigh.tolist(),
"M": M,
"N": N,
"D": D,
"blrAlpha": blrAlpha,
"blrBeta": blrBeta,
"useARDkernel": useARDkernel,
"basicRBF": basicRBF,
"quantileParametrisation": quantileParametrisation.__name__,
"scoreMetric": scoreMetric,
"searchStrategy": searchStrategy,
"composition": composition,
"interpolator": interpolator.__name__
}
rs = ResultSaver(settingsVars)
rs.save_params_and_loss(allParams, -score)
if not basicRBF:
rs.save_quantile(bbq_qf, x, y)
rs.save_pdf(bbq_qf)
if D != 2:
if D == 1:
rs.save_dataset(train_x, train_y, test_x, test_y)
rs.save_data_fit(train_x, pred_train, predVar_train,
test_x, pred_test, predVar_test)
else:
# Generate images
imSize = 130
trainIdx = np.round((train_x + 1) * (imSize - 1) / 2.0).astype(int)
testIdx = np.round((test_x + 1) * (imSize - 1) / 2.0).astype(int)
fullImg = np.zeros((imSize, imSize))
for idx, y in zip(trainIdx, train_y):
fullImg[idx[0], idx[1]] = y
for idx, y in zip(testIdx, test_y):
fullImg[idx[0], idx[1]] = y
predImg = np.zeros((imSize, imSize))
for idx, y in zip(trainIdx, pred_train):
predImg[idx[0], idx[1]] = y
for idx, y in zip(testIdx, pred_test):
predImg[idx[0], idx[1]] = y
predImgPrcs = np.clip(predImg, -1, 1)
rs.save_pred_image(predImgPrcs)
if len(paramLow) - 1 * learnBeta == 2: # Plot score map
if searchStrategy == "BO" or searchStrategy == "NLopt":
plotRes = 64
X, Y = np.meshgrid(np.linspace(paramLow[0], paramHigh[0], plotRes),
np.linspace(paramLow[1], paramHigh[1], plotRes))
Z = np.hstack([X.reshape(-1, 1), Y.reshape(-1, 1)])
scaledZ = scaleParams(Z)
scaledX = scaledZ[:, 0].reshape(X.shape)
scaledY = scaledZ[:, 1].reshape(Y.shape)
if searchStrategy == "Gridsearch":
score_surface = -score
elif searchStrategy == "BO":
score_surface = -bo.model.predictMean(Z)
if searchStrategy == "BO" or searchStrategy == "Gridsearch":
rs.save_loss_surface(scaledX, scaledY, score_surface.reshape(X.shape))
print("Finished.")
| _fun_maximize | identifier_name |
example_all.py | import numpy as np
from functools import partial
from tqdm import tqdm
import nlopt
import matplotlib.pylab as plt
import bbq.models.rff as ff
from bbq.examples.utils import toy_functions
from bbq.examples.utils.train_model import train_model
from bbq.examples.utils.saveResults import ResultSaver
from bbq.utils.enums import QMC_KWARG, QMC_SCRAMBLING, QMC_SEQUENCE
from bbq.interpolate import InterpWithAsymptotes
from bbq.models.ABLR import ABLR
from bbq.utils import datasets
from bbq import parametrisations
from bbq.utils.metrics import mnll
tqdm.monitor_interval = 0
"""
Options
"""
# Dataset
# datasetName = "tread"
# datasetName = "pores"
# datasetName = "rubber"
# datasetName = "concrete"
# datasetName = "airfoil_noise"
# datasetName = "co2"
# datasetName = "airline"
# datasetName = "cosine"
# datasetName = "pattern"
datasetName = "steps"
# datasetName = "quadraticCos"
# datasetName = "harmonics"
# datasetName = "heaviside"
extrapolationDataset = True # Better plotting for extrapolation dataset
# BLR Model
M = 300 # Number of random fourier features in BLR
blrAlpha, blrBeta = 1, 10 # Precision params for BLR (weight prior, noise)
learnBeta = False # Learn noise precision
# Quantile type
# useARDkernel = True # To use an ARD kernel
useARDkernel = False # To use an isotropic kernel
basicRBF = False # To run basic RFF with RBF (opt lengthscale)
# quantileParametrisation = parametrisations.InterpPieceWise4points
quantileParametrisation = parametrisations.InterpSingleSpline
# quantileParametrisation = parametrisations.PeriodicSimple
# quantileParametrisation = parametrisations.InterpIncrementY6pts
# quantileParametrisation = parametrisations.BoundedQPoints
# quantileParametrisation = parametrisations.StairCase
# quantileParametrisation = parametrisations.InterpWeibull
# Score metric to use
scoreMetric = "NLML" # Negative log marginal likelihood
# scoreMetric = "RMSE" # Root mean square error
# Search strategy
# searchStrategy = "Gridsearch"
# searchStrategy = "BO" # Bayesian optimisation
searchStrategy = "NLopt" # Search using NLopt optimisation algorithms
# Grid-search parameters
gridRes = int(np.sqrt(1000)) # Grid-search resolution across each dimension
# BO parameters
nBOIter = 200 # Number of BO iterations
boKappa = 1 # BO Exploration-exploitation parameter
boModelLengthscale = 0.03 # RBF Lenghtscal of approximate GP used in BO
boModelType = "BLR-RFF" # Approximate GP to be used in BO
# boModelType = "LocalGP"
# NLopt parameters
nlOptIter = 200
# NLopt Global algorithms (derivative free)
# nlOptAlgo = nlopt.GN_CRS
# nlOptAlgo = nlopt.GN_CRS2_LM
# nlOptAlgo = nlopt.GN_DIRECT
# nlOptAlgo = nlopt.GN_DIRECT_L
# nlOptAlgo = nlopt.GN_ISRES
# nlOptAlgo = nlopt.GN_ESCH
# NLopt Local algotithms (derivative free)
nlOptAlgo = nlopt.LN_COBYLA
# nlOptAlgo = nlopt.LN_BOBYQA
# nlOptAlgo = nlopt.LN_SBPLX
"""
Generate dataset
"""
if datasetName == "co2":
train_data, test_data = datasets.mauna_loa()
elif datasetName == "airline":
train_data, test_data = datasets.airline_passengers()
elif datasetName == "airfoil_noise":
train_data, test_data = datasets.airfoil_noise()
elif datasetName == "concrete":
train_data, test_data = datasets.concrete()
elif datasetName == "rubber" \
or datasetName == "pores" \
or datasetName == "tread":
train_data, test_data = datasets.textures_2D(texture_name=datasetName)
else:
if datasetName == "cosine":
objectiveFunction = toy_functions.cosine
elif datasetName == "harmonics":
objectiveFunction = toy_functions.harmonics
elif datasetName == "pattern":
objectiveFunction = toy_functions.pattern
elif datasetName == "heaviside":
objectiveFunction = toy_functions.heaviside
elif datasetName == "quadraticCos":
objectiveFunction = toy_functions.quadratic_cos
elif datasetName == "steps":
objectiveFunction = toy_functions.steps
else:
raise RuntimeError("Objective function was not defined")
train_x = np.sort(np.random.uniform(-0.3, 1.2, (100, 1)), axis=0)
train_y = objectiveFunction(train_x)
train_data = np.hstack([train_x, train_y])
test_x = np.linspace(-0.5, 1.5, 1000).reshape(-1, 1)
# test_x = np.sort(np.random.uniform(-1, 1, (100, 1)), axis=0)
test_y = objectiveFunction(test_x)
test_data = np.hstack([test_x, test_y])
train_x = train_data[:, :-1]
train_y = train_data[:, [-1]]
test_x = test_data[:, :-1]
test_y = test_data[:, [-1]]
N, D = train_x.shape
print("Dataset size: {} * {}".format(N, D))
if scoreMetric == "RMSE":
Nv = int(0.2 * N)
N -= Nv
val_x = train_x[-Nv:, :]
val_y = train_y[-Nv:].reshape(-1, 1)
train_x = train_x[:-Nv, :]
train_y = train_y[:-Nv].reshape(-1, 1)
vizData = False
if vizData:
plt.figure()
plt.plot(train_x, train_y, 'b-', label="train set")
if scoreMetric == "RMSE":
plt.plot(val_x, val_y, 'g-', label="validation set")
plt.plot(test_x, test_y, 'r-', label="test set")
plt.xlim(min(np.min(train_x), np.min(test_x)) - .2,
max(np.max(train_x), np.max(test_x)) + .2)
plt.ylim(min(np.min(train_y), np.min(test_y)) - .2,
max(np.max(train_y), np.max(test_y)) + .2)
plt.title("Data set")
plt.legend(loc=2)
plt.show()
exit()
"""
Quantile parametrisation
"""
if quantileParametrisation == parametrisations.BoundedQPoints:
qp = quantileParametrisation(6, p1=(0.03, -100), p2=(0.97, 100))
elif quantileParametrisation == parametrisations.StairCase:
qp = quantileParametrisation(9, p1=(0.03, -150), p2=(0.97, 150))
else:
qp = quantileParametrisation()
genParams = qp.gen_params
interpolator = qp.interpolator
paramLow = qp.paramLow
paramHigh = qp.paramHigh
scaleParams = qp.scale_params
if datasetName == "co2":
if quantileParametrisation == parametrisations.PeriodicSimple:
paramHigh[0] = 2.3
paramLow[0] = 2
paramHigh[1] = 0.2
paramLow[1] = 0.01
elif datasetName == "airline":
if quantileParametrisation == parametrisations.PeriodicSimple:
paramLow[0] = 2.3
paramHigh[0] = 2.8
paramLow[1] = 0.001
elif datasetName == "pores":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.8
# paramLow[1] = 0.01
# paramHigh[1] = 0.45
elif datasetName == "rubber":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.8
# paramLow[1] = 0.01
# paramHigh[1] = 0.25
elif datasetName == "tread":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.5
# paramLow[1] = 0.001
# paramHigh[1] = 0.01
if quantileParametrisation == parametrisations.InterpSingleSpline:
paramHigh[1] = 6
paramHigh[0] = 2.3
paramLow[0] = 1.5
"""
Kernel composition
"""
# composition = ["linear", "+", "linear_no_bias", "*", "bbq"]
# composition = ["linear", "+", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+", "bbq"]
# composition = ["linear", "+", "linear_no_bias", "*", "bbq"]
composition = ["bbq"]
if datasetName == "co2":
# composition = ["linear", "*", "linear_no_bias", "+", "bbq"]
composition = ["linear_no_bias", "+", "bbq"]
elif datasetName == "airline":
composition = ["linear", "+", "linear", "*", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+",
# "linear", "*", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+",
# "linear", "*", "linear_no_bias", "*", "bbq", "+", "bbq"]
if basicRBF:
# composition = ["rbf" if e == "bbq" else e for e in composition]
composition = ["rbf"]
qp.paramLow = np.array([-3])
qp.paramHigh = np.array([0])
qp.logScaleParams = np.array([True])
paramLow = qp.paramLow
paramHigh = qp.paramHigh
"""
Search/optimisation for best quantile
"""
if useARDkernel:
paramLow = np.array(D * list(paramLow))
paramHigh = np.array(D * list(paramHigh))
qp.logScaleParams = np.array(D * list(qp.logScaleParams))
if learnBeta:
paramLow = np.array([-3] + list(paramLow))
paramHigh = np.array([3] + list(paramHigh))
print("parameter space high: {}".format(paramHigh))
print("parameter space low: {}".format(paramLow))
score, allParams, bo = None, None, None
if searchStrategy == "Gridsearch":
assert (len(paramLow) == 2 and "Gridsearch only designed for 2 params")
X, Y = np.meshgrid(np.linspace(paramLow[0], paramHigh[0], gridRes),
np.linspace(paramLow[1], paramHigh[1], gridRes))
allParams = np.hstack([X.reshape(-1, 1), Y.reshape(-1, 1)])
score = np.zeros((gridRes ** 2,))
for i in tqdm(range(allParams.shape[0])):
if learnBeta:
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
score[i] = -train_model(scaledParams, blrBeta)
elif searchStrategy == "BO":
search_int = np.vstack([np.atleast_2d(paramLow),
np.atleast_2d(paramHigh)]).T
bo = None # BO.BO(search_int, acq_fun=BO.UCB(boKappa), opt_maxeval=20,
allParams = np.zeros((nBOIter, len(paramLow)))
score = np.zeros((nBOIter,))
for i in tqdm(range(nBOIter)):
allParams[i, :] = bo.next_sample()
if learnBeta:
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
score[i] = -train_model(scaledParams, blrBeta)
bo.update(allParams[i, :].reshape(1, -1),
np.array(score[i]).reshape(-1, 1))
elif searchStrategy == "NLopt":
allParams = np.zeros((nlOptIter, len(paramLow)))
score = np.zeros((nlOptIter,))
pbar = tqdm(total=nlOptIter)
i = 0
# Nlopt params
opt = nlopt.opt(nlOptAlgo, len(paramLow))
opt.set_lower_bounds(np.array(paramLow).reshape(-1))
opt.set_upper_bounds(np.array(paramHigh).reshape(-1))
opt.set_maxeval(nlOptIter)
def _fun_maximize(_x, grad):
global i
if i == nlOptIter:
print("Warning: maximum number of iterations reached.")
return float(np.min(score))
allParams[i, :] = _x
if learnBeta:
global blrBeta
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
curScore = float(-train_model(scaledParams, blrBeta))
# Keep track of previous tries
score[i] = curScore
i += 1
pbar.update(1)
return curScore
opt.set_max_objective(_fun_maximize)
init_opt = np.random.uniform(0, 1, len(paramLow)) * \
(paramHigh - paramLow) + paramLow
opt.optimize(init_opt)
pbar.close()
"""
Saving
"""
# Compute best quantile
imax = np.argmax(score)
curParams = allParams[imax, :]
if learnBeta:
curParams = allParams[imax, 1:]
blrBeta = 10 ** allParams[imax, 0]
print("Best BLR beta: {}".format(blrBeta))
bestParam = scaleParams(curParams)
print("Best parameters: {}".format(bestParam))
bbq_qf = None
if not basicRBF:
# Interpolation
if useARDkernel:
bbq_qf = []
nprm = len(bestParam)
for j in range(D):
x, y, params = genParams(
bestParam[int(j * nprm / D):int((j + 1) * nprm / D)])
bbq_qf.append(InterpWithAsymptotes(x=x, y=y,
interpolator=interpolator,
params=params))
else:
x, y, params = genParams(bestParam)
bbq_qf = InterpWithAsymptotes(x=x, y=y, interpolator=interpolator,
params=params)
# Draw features with interpolated quantile
f_dict = {"linear_no_bias": ff.Linear(d=D, has_bias_term=False),
"linear": ff.Linear(d=D, has_bias_term=True)}
if basicRBF:
f_dict["rbf"] = ff.RFF_RBF(m=M, d=D, ls=bestParam)
else:
f_dict["bbq"] = ff.QMCF_BBQ(m=M, d=D,
sampler=partial(freqGenFn, bbq_qf=bbq_qf),
sequence_type=QMC_SEQUENCE.HALTON,
scramble_type=QMC_SCRAMBLING.GENERALISED,
qmc_kwargs={QMC_KWARG.PERM: None})
qmcf_bbq = ff.FComposition(f_dict=f_dict, composition=composition)
# Compute best data fit
blr = ABLR(k_phi=qmcf_bbq, d=D, d_out=1,
alpha=blrAlpha, beta=blrBeta, log_dir=None)
blr.learn_from_history(x_trn=train_x, y_trn=train_y)
prediction = blr.predict(x_tst=train_x, pred_var=True)
pred_train, predVar_train = prediction.mean, prediction.var
# Compute final score metrics
prediction = blr.predict(x_tst=test_x, pred_var=True)
pred_test, predVar_test = prediction.mean, prediction.var
finalRMSE = np.sqrt(np.mean((pred_test - test_y) ** 2))
finalMNLL = mnll(test_y, pred_test, predVar_test)
print("Final model RMSE: {}".format(finalRMSE))
print("Final model MNLL: {}".format(finalMNLL))
settingsVars = {
"RMSE": finalRMSE,
"MNLL": finalMNLL,
"bestParam": bestParam.tolist(),
"datasetName": datasetName,
"nlOptIter": nlOptIter,
"nlOptAlgo": nlOptAlgo,
"gridRes": gridRes,
"paramLow": paramLow.tolist(),
"paramHigh": paramHigh.tolist(),
"M": M,
"N": N,
"D": D,
"blrAlpha": blrAlpha,
"blrBeta": blrBeta,
"useARDkernel": useARDkernel,
"basicRBF": basicRBF,
"quantileParametrisation": quantileParametrisation.__name__,
"scoreMetric": scoreMetric,
"searchStrategy": searchStrategy,
"composition": composition,
"interpolator": interpolator.__name__
}
rs = ResultSaver(settingsVars)
rs.save_params_and_loss(allParams, -score)
if not basicRBF:
rs.save_quantile(bbq_qf, x, y)
rs.save_pdf(bbq_qf)
if D != 2:
if D == 1:
rs.save_dataset(train_x, train_y, test_x, test_y)
rs.save_data_fit(train_x, pred_train, predVar_train,
test_x, pred_test, predVar_test)
else:
# Generate images
imSize = 130
trainIdx = np.round((train_x + 1) * (imSize - 1) / 2.0).astype(int)
testIdx = np.round((test_x + 1) * (imSize - 1) / 2.0).astype(int)
fullImg = np.zeros((imSize, imSize))
for idx, y in zip(trainIdx, train_y):
fullImg[idx[0], idx[1]] = y
for idx, y in zip(testIdx, test_y):
fullImg[idx[0], idx[1]] = y
predImg = np.zeros((imSize, imSize))
for idx, y in zip(trainIdx, pred_train):
|
for idx, y in zip(testIdx, pred_test):
predImg[idx[0], idx[1]] = y
predImgPrcs = np.clip(predImg, -1, 1)
rs.save_pred_image(predImgPrcs)
if len(paramLow) - 1 * learnBeta == 2: # Plot score map
if searchStrategy == "BO" or searchStrategy == "NLopt":
plotRes = 64
X, Y = np.meshgrid(np.linspace(paramLow[0], paramHigh[0], plotRes),
np.linspace(paramLow[1], paramHigh[1], plotRes))
Z = np.hstack([X.reshape(-1, 1), Y.reshape(-1, 1)])
scaledZ = scaleParams(Z)
scaledX = scaledZ[:, 0].reshape(X.shape)
scaledY = scaledZ[:, 1].reshape(Y.shape)
if searchStrategy == "Gridsearch":
score_surface = -score
elif searchStrategy == "BO":
score_surface = -bo.model.predictMean(Z)
if searchStrategy == "BO" or searchStrategy == "Gridsearch":
rs.save_loss_surface(scaledX, scaledY, score_surface.reshape(X.shape))
print("Finished.")
| predImg[idx[0], idx[1]] = y | conditional_block |
example_all.py | import numpy as np
from functools import partial
from tqdm import tqdm
import nlopt
import matplotlib.pylab as plt
import bbq.models.rff as ff
from bbq.examples.utils import toy_functions
from bbq.examples.utils.train_model import train_model
from bbq.examples.utils.saveResults import ResultSaver
from bbq.utils.enums import QMC_KWARG, QMC_SCRAMBLING, QMC_SEQUENCE
from bbq.interpolate import InterpWithAsymptotes
from bbq.models.ABLR import ABLR
from bbq.utils import datasets
from bbq import parametrisations
from bbq.utils.metrics import mnll
tqdm.monitor_interval = 0
"""
Options
"""
# Dataset
# datasetName = "tread"
# datasetName = "pores"
# datasetName = "rubber"
# datasetName = "concrete"
# datasetName = "airfoil_noise"
# datasetName = "co2"
# datasetName = "airline"
# datasetName = "cosine"
# datasetName = "pattern"
datasetName = "steps"
# datasetName = "quadraticCos"
# datasetName = "harmonics"
# datasetName = "heaviside"
extrapolationDataset = True # Better plotting for extrapolation dataset
# BLR Model
M = 300 # Number of random fourier features in BLR
blrAlpha, blrBeta = 1, 10 # Precision params for BLR (weight prior, noise)
learnBeta = False # Learn noise precision
# Quantile type
# useARDkernel = True # To use an ARD kernel
useARDkernel = False # To use an isotropic kernel
basicRBF = False # To run basic RFF with RBF (opt lengthscale)
# quantileParametrisation = parametrisations.InterpPieceWise4points
quantileParametrisation = parametrisations.InterpSingleSpline
# quantileParametrisation = parametrisations.PeriodicSimple
# quantileParametrisation = parametrisations.InterpIncrementY6pts
# quantileParametrisation = parametrisations.BoundedQPoints
# quantileParametrisation = parametrisations.StairCase
# quantileParametrisation = parametrisations.InterpWeibull
# Score metric to use
scoreMetric = "NLML" # Negative log marginal likelihood
# scoreMetric = "RMSE" # Root mean square error
# Search strategy
# searchStrategy = "Gridsearch"
# searchStrategy = "BO" # Bayesian optimisation
searchStrategy = "NLopt" # Search using NLopt optimisation algorithms
# Grid-search parameters
gridRes = int(np.sqrt(1000)) # Grid-search resolution across each dimension
# BO parameters
nBOIter = 200 # Number of BO iterations
boKappa = 1 # BO Exploration-exploitation parameter
boModelLengthscale = 0.03 # RBF Lenghtscal of approximate GP used in BO
boModelType = "BLR-RFF" # Approximate GP to be used in BO
# boModelType = "LocalGP"
# NLopt parameters
nlOptIter = 200
# NLopt Global algorithms (derivative free)
# nlOptAlgo = nlopt.GN_CRS
# nlOptAlgo = nlopt.GN_CRS2_LM
# nlOptAlgo = nlopt.GN_DIRECT
# nlOptAlgo = nlopt.GN_DIRECT_L
# nlOptAlgo = nlopt.GN_ISRES
# nlOptAlgo = nlopt.GN_ESCH
# NLopt Local algotithms (derivative free)
nlOptAlgo = nlopt.LN_COBYLA
# nlOptAlgo = nlopt.LN_BOBYQA
# nlOptAlgo = nlopt.LN_SBPLX
"""
Generate dataset
"""
if datasetName == "co2":
train_data, test_data = datasets.mauna_loa()
elif datasetName == "airline":
train_data, test_data = datasets.airline_passengers()
elif datasetName == "airfoil_noise":
train_data, test_data = datasets.airfoil_noise()
elif datasetName == "concrete":
train_data, test_data = datasets.concrete()
elif datasetName == "rubber" \
or datasetName == "pores" \
or datasetName == "tread":
train_data, test_data = datasets.textures_2D(texture_name=datasetName)
else:
if datasetName == "cosine":
objectiveFunction = toy_functions.cosine
elif datasetName == "harmonics":
objectiveFunction = toy_functions.harmonics
elif datasetName == "pattern":
objectiveFunction = toy_functions.pattern
elif datasetName == "heaviside":
objectiveFunction = toy_functions.heaviside
elif datasetName == "quadraticCos":
objectiveFunction = toy_functions.quadratic_cos
elif datasetName == "steps":
objectiveFunction = toy_functions.steps
else:
raise RuntimeError("Objective function was not defined")
train_x = np.sort(np.random.uniform(-0.3, 1.2, (100, 1)), axis=0)
train_y = objectiveFunction(train_x)
train_data = np.hstack([train_x, train_y])
test_x = np.linspace(-0.5, 1.5, 1000).reshape(-1, 1)
# test_x = np.sort(np.random.uniform(-1, 1, (100, 1)), axis=0)
test_y = objectiveFunction(test_x)
test_data = np.hstack([test_x, test_y])
train_x = train_data[:, :-1]
train_y = train_data[:, [-1]]
test_x = test_data[:, :-1]
test_y = test_data[:, [-1]]
N, D = train_x.shape
print("Dataset size: {} * {}".format(N, D))
if scoreMetric == "RMSE":
Nv = int(0.2 * N)
N -= Nv
val_x = train_x[-Nv:, :]
val_y = train_y[-Nv:].reshape(-1, 1)
train_x = train_x[:-Nv, :]
train_y = train_y[:-Nv].reshape(-1, 1)
vizData = False
if vizData:
plt.figure()
plt.plot(train_x, train_y, 'b-', label="train set")
if scoreMetric == "RMSE":
plt.plot(val_x, val_y, 'g-', label="validation set")
plt.plot(test_x, test_y, 'r-', label="test set")
plt.xlim(min(np.min(train_x), np.min(test_x)) - .2,
max(np.max(train_x), np.max(test_x)) + .2)
plt.ylim(min(np.min(train_y), np.min(test_y)) - .2,
max(np.max(train_y), np.max(test_y)) + .2)
plt.title("Data set")
plt.legend(loc=2)
plt.show()
exit()
"""
Quantile parametrisation
"""
if quantileParametrisation == parametrisations.BoundedQPoints:
qp = quantileParametrisation(6, p1=(0.03, -100), p2=(0.97, 100))
elif quantileParametrisation == parametrisations.StairCase:
qp = quantileParametrisation(9, p1=(0.03, -150), p2=(0.97, 150))
else:
qp = quantileParametrisation()
genParams = qp.gen_params
interpolator = qp.interpolator
paramLow = qp.paramLow
paramHigh = qp.paramHigh
scaleParams = qp.scale_params
if datasetName == "co2":
if quantileParametrisation == parametrisations.PeriodicSimple:
paramHigh[0] = 2.3
paramLow[0] = 2
paramHigh[1] = 0.2
paramLow[1] = 0.01
elif datasetName == "airline":
if quantileParametrisation == parametrisations.PeriodicSimple:
paramLow[0] = 2.3
paramHigh[0] = 2.8
paramLow[1] = 0.001
elif datasetName == "pores":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.8
# paramLow[1] = 0.01
# paramHigh[1] = 0.45
elif datasetName == "rubber":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.8
# paramLow[1] = 0.01
# paramHigh[1] = 0.25
elif datasetName == "tread":
pass
# paramLow[0] = 1.0
# paramHigh[0] = 2.5
# paramLow[1] = 0.001
# paramHigh[1] = 0.01
if quantileParametrisation == parametrisations.InterpSingleSpline:
paramHigh[1] = 6
paramHigh[0] = 2.3
paramLow[0] = 1.5
"""
Kernel composition
"""
# composition = ["linear", "+", "linear_no_bias", "*", "bbq"]
# composition = ["linear", "+", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+", "bbq"]
# composition = ["linear", "+", "linear_no_bias", "*", "bbq"]
composition = ["bbq"]
if datasetName == "co2":
# composition = ["linear", "*", "linear_no_bias", "+", "bbq"]
composition = ["linear_no_bias", "+", "bbq"]
elif datasetName == "airline":
composition = ["linear", "+", "linear", "*", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+",
# "linear", "*", "bbq"]
# composition = ["linear", "*", "linear_no_bias", "+",
# "linear", "*", "linear_no_bias", "*", "bbq", "+", "bbq"]
if basicRBF:
# composition = ["rbf" if e == "bbq" else e for e in composition]
composition = ["rbf"]
qp.paramLow = np.array([-3])
qp.paramHigh = np.array([0])
qp.logScaleParams = np.array([True]) | """
Search/optimisation for best quantile
"""
if useARDkernel:
paramLow = np.array(D * list(paramLow))
paramHigh = np.array(D * list(paramHigh))
qp.logScaleParams = np.array(D * list(qp.logScaleParams))
if learnBeta:
paramLow = np.array([-3] + list(paramLow))
paramHigh = np.array([3] + list(paramHigh))
print("parameter space high: {}".format(paramHigh))
print("parameter space low: {}".format(paramLow))
score, allParams, bo = None, None, None
if searchStrategy == "Gridsearch":
assert (len(paramLow) == 2 and "Gridsearch only designed for 2 params")
X, Y = np.meshgrid(np.linspace(paramLow[0], paramHigh[0], gridRes),
np.linspace(paramLow[1], paramHigh[1], gridRes))
allParams = np.hstack([X.reshape(-1, 1), Y.reshape(-1, 1)])
score = np.zeros((gridRes ** 2,))
for i in tqdm(range(allParams.shape[0])):
if learnBeta:
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
score[i] = -train_model(scaledParams, blrBeta)
elif searchStrategy == "BO":
search_int = np.vstack([np.atleast_2d(paramLow),
np.atleast_2d(paramHigh)]).T
bo = None # BO.BO(search_int, acq_fun=BO.UCB(boKappa), opt_maxeval=20,
allParams = np.zeros((nBOIter, len(paramLow)))
score = np.zeros((nBOIter,))
for i in tqdm(range(nBOIter)):
allParams[i, :] = bo.next_sample()
if learnBeta:
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
score[i] = -train_model(scaledParams, blrBeta)
bo.update(allParams[i, :].reshape(1, -1),
np.array(score[i]).reshape(-1, 1))
elif searchStrategy == "NLopt":
allParams = np.zeros((nlOptIter, len(paramLow)))
score = np.zeros((nlOptIter,))
pbar = tqdm(total=nlOptIter)
i = 0
# Nlopt params
opt = nlopt.opt(nlOptAlgo, len(paramLow))
opt.set_lower_bounds(np.array(paramLow).reshape(-1))
opt.set_upper_bounds(np.array(paramHigh).reshape(-1))
opt.set_maxeval(nlOptIter)
def _fun_maximize(_x, grad):
global i
if i == nlOptIter:
print("Warning: maximum number of iterations reached.")
return float(np.min(score))
allParams[i, :] = _x
if learnBeta:
global blrBeta
blrBeta = 10 ** (allParams[i, 0])
curParams = allParams[i, 1:]
else:
curParams = allParams[i, :]
scaledParams = scaleParams(curParams)
curScore = float(-train_model(scaledParams, blrBeta))
# Keep track of previous tries
score[i] = curScore
i += 1
pbar.update(1)
return curScore
opt.set_max_objective(_fun_maximize)
init_opt = np.random.uniform(0, 1, len(paramLow)) * \
(paramHigh - paramLow) + paramLow
opt.optimize(init_opt)
pbar.close()
"""
Saving
"""
# Compute best quantile
imax = np.argmax(score)
curParams = allParams[imax, :]
if learnBeta:
curParams = allParams[imax, 1:]
blrBeta = 10 ** allParams[imax, 0]
print("Best BLR beta: {}".format(blrBeta))
bestParam = scaleParams(curParams)
print("Best parameters: {}".format(bestParam))
bbq_qf = None
if not basicRBF:
# Interpolation
if useARDkernel:
bbq_qf = []
nprm = len(bestParam)
for j in range(D):
x, y, params = genParams(
bestParam[int(j * nprm / D):int((j + 1) * nprm / D)])
bbq_qf.append(InterpWithAsymptotes(x=x, y=y,
interpolator=interpolator,
params=params))
else:
x, y, params = genParams(bestParam)
bbq_qf = InterpWithAsymptotes(x=x, y=y, interpolator=interpolator,
params=params)
# Draw features with interpolated quantile
f_dict = {"linear_no_bias": ff.Linear(d=D, has_bias_term=False),
"linear": ff.Linear(d=D, has_bias_term=True)}
if basicRBF:
f_dict["rbf"] = ff.RFF_RBF(m=M, d=D, ls=bestParam)
else:
f_dict["bbq"] = ff.QMCF_BBQ(m=M, d=D,
sampler=partial(freqGenFn, bbq_qf=bbq_qf),
sequence_type=QMC_SEQUENCE.HALTON,
scramble_type=QMC_SCRAMBLING.GENERALISED,
qmc_kwargs={QMC_KWARG.PERM: None})
qmcf_bbq = ff.FComposition(f_dict=f_dict, composition=composition)
# Compute best data fit
blr = ABLR(k_phi=qmcf_bbq, d=D, d_out=1,
alpha=blrAlpha, beta=blrBeta, log_dir=None)
blr.learn_from_history(x_trn=train_x, y_trn=train_y)
prediction = blr.predict(x_tst=train_x, pred_var=True)
pred_train, predVar_train = prediction.mean, prediction.var
# Compute final score metrics
prediction = blr.predict(x_tst=test_x, pred_var=True)
pred_test, predVar_test = prediction.mean, prediction.var
finalRMSE = np.sqrt(np.mean((pred_test - test_y) ** 2))
finalMNLL = mnll(test_y, pred_test, predVar_test)
print("Final model RMSE: {}".format(finalRMSE))
print("Final model MNLL: {}".format(finalMNLL))
settingsVars = {
"RMSE": finalRMSE,
"MNLL": finalMNLL,
"bestParam": bestParam.tolist(),
"datasetName": datasetName,
"nlOptIter": nlOptIter,
"nlOptAlgo": nlOptAlgo,
"gridRes": gridRes,
"paramLow": paramLow.tolist(),
"paramHigh": paramHigh.tolist(),
"M": M,
"N": N,
"D": D,
"blrAlpha": blrAlpha,
"blrBeta": blrBeta,
"useARDkernel": useARDkernel,
"basicRBF": basicRBF,
"quantileParametrisation": quantileParametrisation.__name__,
"scoreMetric": scoreMetric,
"searchStrategy": searchStrategy,
"composition": composition,
"interpolator": interpolator.__name__
}
rs = ResultSaver(settingsVars)
rs.save_params_and_loss(allParams, -score)
if not basicRBF:
rs.save_quantile(bbq_qf, x, y)
rs.save_pdf(bbq_qf)
if D != 2:
if D == 1:
rs.save_dataset(train_x, train_y, test_x, test_y)
rs.save_data_fit(train_x, pred_train, predVar_train,
test_x, pred_test, predVar_test)
else:
# Generate images
imSize = 130
trainIdx = np.round((train_x + 1) * (imSize - 1) / 2.0).astype(int)
testIdx = np.round((test_x + 1) * (imSize - 1) / 2.0).astype(int)
fullImg = np.zeros((imSize, imSize))
for idx, y in zip(trainIdx, train_y):
fullImg[idx[0], idx[1]] = y
for idx, y in zip(testIdx, test_y):
fullImg[idx[0], idx[1]] = y
predImg = np.zeros((imSize, imSize))
for idx, y in zip(trainIdx, pred_train):
predImg[idx[0], idx[1]] = y
for idx, y in zip(testIdx, pred_test):
predImg[idx[0], idx[1]] = y
predImgPrcs = np.clip(predImg, -1, 1)
rs.save_pred_image(predImgPrcs)
if len(paramLow) - 1 * learnBeta == 2: # Plot score map
if searchStrategy == "BO" or searchStrategy == "NLopt":
plotRes = 64
X, Y = np.meshgrid(np.linspace(paramLow[0], paramHigh[0], plotRes),
np.linspace(paramLow[1], paramHigh[1], plotRes))
Z = np.hstack([X.reshape(-1, 1), Y.reshape(-1, 1)])
scaledZ = scaleParams(Z)
scaledX = scaledZ[:, 0].reshape(X.shape)
scaledY = scaledZ[:, 1].reshape(Y.shape)
if searchStrategy == "Gridsearch":
score_surface = -score
elif searchStrategy == "BO":
score_surface = -bo.model.predictMean(Z)
if searchStrategy == "BO" or searchStrategy == "Gridsearch":
rs.save_loss_surface(scaledX, scaledY, score_surface.reshape(X.shape))
print("Finished.") | paramLow = qp.paramLow
paramHigh = qp.paramHigh
| random_line_split |
lib.rs | //! This is a platform-agnostic Rust driver for the ADS1013, ADS1014, ADS1015,
//! ADS1113, ADS1114, and ADS1115 ultra-small, low-power
//! analog-to-digital converters (ADC), based on the [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! This driver allows you to:
//! - Set the operating mode to one-shot or continuous. See: [`into_continuous()`].
//! - Make a measurement in one-shot mode. See: [`read()`][read_os].
//! - Start continuous conversion mode. See: [`start()`].
//! - Read the last measurement made in continuous conversion mode. See: [`read()`][read_cont].
//! - Set the data rate. See: [`set_data_rate()`].
//! - Set the full-scale range (gain amplifier). See [`set_full_scale_range()`].
//! - Read whether a measurement is in progress. See: [`is_measurement_in_progress()`].
//! - Set the ALERT/RDY pin to be used as conversion-ready pin. See: [`use_alert_rdy_pin_as_ready()`].
//! - Comparator:
//! - Set the low and high thresholds. See: [`set_high_threshold_raw()`].
//! - Set the comparator mode. See: [`set_comparator_mode()`].
//! - Set the comparator polarity. See: [`set_comparator_polarity()`].
//! - Set the comparator latching. See: [`set_comparator_latching()`].
//! - Set the comparator queue. See: [`set_comparator_queue()`].
//! - Disable the comparator. See: [`disable_comparator()`].
//!
//! [`into_continuous()`]: struct.Ads1x1x.html#method.into_continuous
//! [read_os]: struct.Ads1x1x.html#method.read
//! [`start()`]: struct.Ads1x1x.html#method.start
//! [read_cont]: struct.Ads1x1x.html#impl-OneShot%3CAds1x1x%3CDI%2C%20IC%2C%20CONV%2C%20OneShot%3E%2C%20i16%2C%20CH%3E
//! [`set_data_rate()`]: struct.Ads1x1x.html#method.set_data_rate
//! [`set_full_scale_range()`]: struct.Ads1x1x.html#method.set_full_scale_range
//! [`is_measurement_in_progress()`]: struct.Ads1x1x.html#method.is_measurement_in_progress
//! [`set_high_threshold_raw()`]: struct.Ads1x1x.html#method.set_high_threshold_raw
//! [`set_comparator_mode()`]: struct.Ads1x1x.html#method.set_comparator_mode
//! [`set_comparator_polarity()`]: struct.Ads1x1x.html#method.set_comparator_polarity
//! [`set_comparator_latching()`]: struct.Ads1x1x.html#method.set_comparator_latching
//! [`set_comparator_queue()`]: struct.Ads1x1x.html#method.set_comparator_queue
//! [`disable_comparator()`]: struct.Ads1x1x.html#method.disable_comparator
//! [`use_alert_rdy_pin_as_ready()`]: struct.Ads1x1x.html#method.use_alert_rdy_pin_as_ready
//!
//! ## The devices
//!
//! The devices are precision, low power, 12/16-bit analog-to-digital
//! converters (ADC) that provide all features necessary to measure the most
//! common sensor signals in an ultra-small package. Depending on the device,
//! these integrate a programmable gain amplifier (PGA), voltage reference,
//! oscillator and high-accuracy temperature sensor.
//!
//! The devices can perform conversions at data rates up to 3300 samples per
//! second (SPS). The PGA offers input ranges from ±256 mV to ±6.144 V,
//! allowing both large and small signals to be measured with high resolution.
//! An input multiplexer (MUX) allows to measure two differential or four
//! single-ended inputs. The high-accuracy temperature sensor can be used for
//! system-level temperature monitoring or cold-junction compensation for
//! thermocouples.
//!
//! The devices operate either in continuous-conversion mode, or in a
//! single-shot mode that automatically powers down after a conversion.
//! Single-shot mode significantly reduces current consumption during idle
//! periods. Data is transferred through I2C.
//!
//! Here is a comparison of the caracteristics of the devices:
//!
//! | Device | Resolution | Sample Rate | Channels | Multi-channel | Features |
//! |---------|------------|--------------|----------|---------------|-----------------|
//! | ADS1013 | 12-bit | Max 3300 SPS | 1 | N/A | |
//! | ADS1014 | 12-bit | Max 3300 SPS | 1 | N/A | Comparator, PGA |
//! | ADS1015 | 12-bit | Max 3300 SPS | 4 | Multiplexed | Comparator, PGA |
//! | ADS1113 | 16-bit | Max 860 SPS | 1 | N/A | |
//! | ADS1114 | 16-bit | Max 860 SPS | 1 | N/A | Comparator, PGA |
//! | ADS1115 | 16-bit | Max 860 SPS | 4 | Multiplexed | Comparator, PGA |
//!
//! Datasheets:
//! - [ADS101x](http://www.ti.com/lit/ds/symlink/ads1015.pdf)
//! - [ADS111x](http://www.ti.com/lit/ds/symlink/ads1115.pdf)
//!
//! ## Usage examples (see also examples folder)
//!
//! To use this driver, import this crate and an `embedded_hal` implementation,
//! then instantiate the appropriate device.
//! In the following examples an instance of the device ADS1013 will be created
//! as an example. Other devices can be created with similar methods like:
//! `Ads1x1x::new_ads1114(...)`.
//!
//! Please find additional examples using hardware in this repository: [driver-examples]
//!
//! [driver-examples]: https://github.com/eldruin/driver-examples
//!
//! ### Create a driver instance for the ADS1013
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! // do something...
//!
//! // get the I2C device back
//! let dev = adc.destroy_ads1013();
//! ```
//!
//! ### Create a driver instance for the ADS1013 with an alternative address (method 1)
//! | //! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let (bit1, bit0) = (true, false); // last two bits of address
//! let address = SlaveAddr::Alternative(bit1, bit0);
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! ```
//! ### Create a driver instance for the ADS1013 with an alternative address (method 2)
//!
//! Using helper `SlaveAddr` creation method depending on the connection of
//! the `ADDR` pin.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! // `ADDR` pin connected to SDA results in the 0x4A effective address
//! let address = SlaveAddr::new_sda();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! ```
//!
//! ### Make a one-shot measurement
//! ```no_run
//! use ads1x1x::{channel, Ads1x1x, SlaveAddr};
//! use embedded_hal::adc::OneShot;
//! use linux_embedded_hal::I2cdev;
//! use nb::block;
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let mut adc = Ads1x1x::new_ads1013(dev, SlaveAddr::default());
//! let measurement = block!(adc.read(&mut channel::DifferentialA0A1)).unwrap();
//! println!("Measurement: {}", measurement);
//! let _dev = adc.destroy_ads1013(); // get I2C device back
//! ```
//!
//! ### Change into continuous conversion mode and read the last measurement
//!
//! Changing the mode may fail in case there was a communication error.
//! In this case, you can retrieve the unchanged device from the error type.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, ModeChangeError, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! match adc.into_continuous() {
//! Err(ModeChangeError::I2C(e, adc)) => /* mode change failed handling */ panic!(),
//! Ok(mut adc) => {
//! let measurement = adc.read().unwrap();
//! // ...
//! }
//! }
//! ```
//!
//!
//! ### Set the data rate
//! For 12-bit devices, the available data rates are given by `DataRate12Bit`.
//! For 16-bit devices, the available data rates are given by `DataRate16Bit`.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, DataRate16Bit, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let mut adc = Ads1x1x::new_ads1115(dev, address);
//! adc.set_data_rate(DataRate16Bit::Sps860).unwrap();
//! ```
//!
//! ### Configure the comparator
//! Configure the comparator to assert when the voltage drops below -1.5V
//! or goes above 1.5V in at least two consecutive conversions. Then the
//! ALERT/RDY pin will be set high and it will be kept so until the
//! measurement is read or an appropriate SMBus alert response is sent by
//! the master.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{
//! Ads1x1x, SlaveAddr, ComparatorQueue, ComparatorPolarity,
//! ComparatorMode, ComparatorLatching, FullScaleRange
//! };
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let mut adc = Ads1x1x::new_ads1015(dev, address);
//! adc.set_comparator_queue(ComparatorQueue::Two).unwrap();
//! adc.set_comparator_polarity(ComparatorPolarity::ActiveHigh).unwrap();
//! adc.set_comparator_mode(ComparatorMode::Window).unwrap();
//! adc.set_full_scale_range(FullScaleRange::Within2_048V).unwrap();
//! adc.set_low_threshold_raw(-1500).unwrap();
//! adc.set_high_threshold_raw(1500).unwrap();
//! adc.set_comparator_latching(ComparatorLatching::Latching).unwrap();
//! ```
#![doc(html_root_url = "https://docs.rs/ads1x1x/0.2.2")]
#![deny(unsafe_code)]
#![deny(missing_docs)]
#![no_std]
const DEVICE_BASE_ADDRESS: u8 = 0b100_1000;
struct Register;
impl Register {
const CONVERSION: u8 = 0x00;
const CONFIG: u8 = 0x01;
const LOW_TH: u8 = 0x02;
const HIGH_TH: u8 = 0x03;
}
struct BitFlags;
impl BitFlags {
const OS: u16 = 0b1000_0000_0000_0000;
const MUX2: u16 = 0b0100_0000_0000_0000;
const MUX1: u16 = 0b0010_0000_0000_0000;
const MUX0: u16 = 0b0001_0000_0000_0000;
const PGA2: u16 = 0b0000_1000_0000_0000;
const PGA1: u16 = 0b0000_0100_0000_0000;
const PGA0: u16 = 0b0000_0010_0000_0000;
const OP_MODE: u16 = 0b0000_0001_0000_0000;
const DR2: u16 = 0b0000_0000_1000_0000;
const DR1: u16 = 0b0000_0000_0100_0000;
const DR0: u16 = 0b0000_0000_0010_0000;
const COMP_MODE: u16 = 0b0000_0000_0001_0000;
const COMP_POL: u16 = 0b0000_0000_0000_1000;
const COMP_LAT: u16 = 0b0000_0000_0000_0100;
const COMP_QUE1: u16 = 0b0000_0000_0000_0010;
const COMP_QUE0: u16 = 0b0000_0000_0000_0001;
}
mod channels;
pub use crate::channels::{channel, ChannelSelection};
mod construction;
mod conversion;
pub use crate::conversion::{ConvertMeasurement, ConvertThreshold};
mod devices;
#[doc(hidden)]
pub mod ic;
#[doc(hidden)]
pub mod interface;
mod types;
use crate::types::Config;
pub use crate::types::{
mode, Ads1x1x, ComparatorLatching, ComparatorMode, ComparatorPolarity, ComparatorQueue,
DataRate12Bit, DataRate16Bit, DynamicOneShot, Error, FullScaleRange, ModeChangeError,
SlaveAddr,
};
mod private {
use super::{ic, interface, Ads1x1x};
pub trait Sealed {}
impl<I2C> Sealed for interface::I2cInterface<I2C> {}
impl<DI, IC, CONV, MODE> Sealed for Ads1x1x<DI, IC, CONV, MODE> {}
impl Sealed for ic::Resolution12Bit {}
impl Sealed for ic::Resolution16Bit {}
impl Sealed for ic::Ads1013 {}
impl Sealed for ic::Ads1113 {}
impl Sealed for ic::Ads1014 {}
impl Sealed for ic::Ads1114 {}
impl Sealed for ic::Ads1015 {}
impl Sealed for ic::Ads1115 {}
} | random_line_split | |
lib.rs | //! This is a platform-agnostic Rust driver for the ADS1013, ADS1014, ADS1015,
//! ADS1113, ADS1114, and ADS1115 ultra-small, low-power
//! analog-to-digital converters (ADC), based on the [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! This driver allows you to:
//! - Set the operating mode to one-shot or continuous. See: [`into_continuous()`].
//! - Make a measurement in one-shot mode. See: [`read()`][read_os].
//! - Start continuous conversion mode. See: [`start()`].
//! - Read the last measurement made in continuous conversion mode. See: [`read()`][read_cont].
//! - Set the data rate. See: [`set_data_rate()`].
//! - Set the full-scale range (gain amplifier). See [`set_full_scale_range()`].
//! - Read whether a measurement is in progress. See: [`is_measurement_in_progress()`].
//! - Set the ALERT/RDY pin to be used as conversion-ready pin. See: [`use_alert_rdy_pin_as_ready()`].
//! - Comparator:
//! - Set the low and high thresholds. See: [`set_high_threshold_raw()`].
//! - Set the comparator mode. See: [`set_comparator_mode()`].
//! - Set the comparator polarity. See: [`set_comparator_polarity()`].
//! - Set the comparator latching. See: [`set_comparator_latching()`].
//! - Set the comparator queue. See: [`set_comparator_queue()`].
//! - Disable the comparator. See: [`disable_comparator()`].
//!
//! [`into_continuous()`]: struct.Ads1x1x.html#method.into_continuous
//! [read_os]: struct.Ads1x1x.html#method.read
//! [`start()`]: struct.Ads1x1x.html#method.start
//! [read_cont]: struct.Ads1x1x.html#impl-OneShot%3CAds1x1x%3CDI%2C%20IC%2C%20CONV%2C%20OneShot%3E%2C%20i16%2C%20CH%3E
//! [`set_data_rate()`]: struct.Ads1x1x.html#method.set_data_rate
//! [`set_full_scale_range()`]: struct.Ads1x1x.html#method.set_full_scale_range
//! [`is_measurement_in_progress()`]: struct.Ads1x1x.html#method.is_measurement_in_progress
//! [`set_high_threshold_raw()`]: struct.Ads1x1x.html#method.set_high_threshold_raw
//! [`set_comparator_mode()`]: struct.Ads1x1x.html#method.set_comparator_mode
//! [`set_comparator_polarity()`]: struct.Ads1x1x.html#method.set_comparator_polarity
//! [`set_comparator_latching()`]: struct.Ads1x1x.html#method.set_comparator_latching
//! [`set_comparator_queue()`]: struct.Ads1x1x.html#method.set_comparator_queue
//! [`disable_comparator()`]: struct.Ads1x1x.html#method.disable_comparator
//! [`use_alert_rdy_pin_as_ready()`]: struct.Ads1x1x.html#method.use_alert_rdy_pin_as_ready
//!
//! ## The devices
//!
//! The devices are precision, low power, 12/16-bit analog-to-digital
//! converters (ADC) that provide all features necessary to measure the most
//! common sensor signals in an ultra-small package. Depending on the device,
//! these integrate a programmable gain amplifier (PGA), voltage reference,
//! oscillator and high-accuracy temperature sensor.
//!
//! The devices can perform conversions at data rates up to 3300 samples per
//! second (SPS). The PGA offers input ranges from ±256 mV to ±6.144 V,
//! allowing both large and small signals to be measured with high resolution.
//! An input multiplexer (MUX) allows to measure two differential or four
//! single-ended inputs. The high-accuracy temperature sensor can be used for
//! system-level temperature monitoring or cold-junction compensation for
//! thermocouples.
//!
//! The devices operate either in continuous-conversion mode, or in a
//! single-shot mode that automatically powers down after a conversion.
//! Single-shot mode significantly reduces current consumption during idle
//! periods. Data is transferred through I2C.
//!
//! Here is a comparison of the caracteristics of the devices:
//!
//! | Device | Resolution | Sample Rate | Channels | Multi-channel | Features |
//! |---------|------------|--------------|----------|---------------|-----------------|
//! | ADS1013 | 12-bit | Max 3300 SPS | 1 | N/A | |
//! | ADS1014 | 12-bit | Max 3300 SPS | 1 | N/A | Comparator, PGA |
//! | ADS1015 | 12-bit | Max 3300 SPS | 4 | Multiplexed | Comparator, PGA |
//! | ADS1113 | 16-bit | Max 860 SPS | 1 | N/A | |
//! | ADS1114 | 16-bit | Max 860 SPS | 1 | N/A | Comparator, PGA |
//! | ADS1115 | 16-bit | Max 860 SPS | 4 | Multiplexed | Comparator, PGA |
//!
//! Datasheets:
//! - [ADS101x](http://www.ti.com/lit/ds/symlink/ads1015.pdf)
//! - [ADS111x](http://www.ti.com/lit/ds/symlink/ads1115.pdf)
//!
//! ## Usage examples (see also examples folder)
//!
//! To use this driver, import this crate and an `embedded_hal` implementation,
//! then instantiate the appropriate device.
//! In the following examples an instance of the device ADS1013 will be created
//! as an example. Other devices can be created with similar methods like:
//! `Ads1x1x::new_ads1114(...)`.
//!
//! Please find additional examples using hardware in this repository: [driver-examples]
//!
//! [driver-examples]: https://github.com/eldruin/driver-examples
//!
//! ### Create a driver instance for the ADS1013
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! // do something...
//!
//! // get the I2C device back
//! let dev = adc.destroy_ads1013();
//! ```
//!
//! ### Create a driver instance for the ADS1013 with an alternative address (method 1)
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let (bit1, bit0) = (true, false); // last two bits of address
//! let address = SlaveAddr::Alternative(bit1, bit0);
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! ```
//! ### Create a driver instance for the ADS1013 with an alternative address (method 2)
//!
//! Using helper `SlaveAddr` creation method depending on the connection of
//! the `ADDR` pin.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! // `ADDR` pin connected to SDA results in the 0x4A effective address
//! let address = SlaveAddr::new_sda();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! ```
//!
//! ### Make a one-shot measurement
//! ```no_run
//! use ads1x1x::{channel, Ads1x1x, SlaveAddr};
//! use embedded_hal::adc::OneShot;
//! use linux_embedded_hal::I2cdev;
//! use nb::block;
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let mut adc = Ads1x1x::new_ads1013(dev, SlaveAddr::default());
//! let measurement = block!(adc.read(&mut channel::DifferentialA0A1)).unwrap();
//! println!("Measurement: {}", measurement);
//! let _dev = adc.destroy_ads1013(); // get I2C device back
//! ```
//!
//! ### Change into continuous conversion mode and read the last measurement
//!
//! Changing the mode may fail in case there was a communication error.
//! In this case, you can retrieve the unchanged device from the error type.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, ModeChangeError, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! match adc.into_continuous() {
//! Err(ModeChangeError::I2C(e, adc)) => /* mode change failed handling */ panic!(),
//! Ok(mut adc) => {
//! let measurement = adc.read().unwrap();
//! // ...
//! }
//! }
//! ```
//!
//!
//! ### Set the data rate
//! For 12-bit devices, the available data rates are given by `DataRate12Bit`.
//! For 16-bit devices, the available data rates are given by `DataRate16Bit`.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, DataRate16Bit, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let mut adc = Ads1x1x::new_ads1115(dev, address);
//! adc.set_data_rate(DataRate16Bit::Sps860).unwrap();
//! ```
//!
//! ### Configure the comparator
//! Configure the comparator to assert when the voltage drops below -1.5V
//! or goes above 1.5V in at least two consecutive conversions. Then the
//! ALERT/RDY pin will be set high and it will be kept so until the
//! measurement is read or an appropriate SMBus alert response is sent by
//! the master.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{
//! Ads1x1x, SlaveAddr, ComparatorQueue, ComparatorPolarity,
//! ComparatorMode, ComparatorLatching, FullScaleRange
//! };
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let mut adc = Ads1x1x::new_ads1015(dev, address);
//! adc.set_comparator_queue(ComparatorQueue::Two).unwrap();
//! adc.set_comparator_polarity(ComparatorPolarity::ActiveHigh).unwrap();
//! adc.set_comparator_mode(ComparatorMode::Window).unwrap();
//! adc.set_full_scale_range(FullScaleRange::Within2_048V).unwrap();
//! adc.set_low_threshold_raw(-1500).unwrap();
//! adc.set_high_threshold_raw(1500).unwrap();
//! adc.set_comparator_latching(ComparatorLatching::Latching).unwrap();
//! ```
#![doc(html_root_url = "https://docs.rs/ads1x1x/0.2.2")]
#![deny(unsafe_code)]
#![deny(missing_docs)]
#![no_std]
const DEVICE_BASE_ADDRESS: u8 = 0b100_1000;
struct Re | impl Register {
const CONVERSION: u8 = 0x00;
const CONFIG: u8 = 0x01;
const LOW_TH: u8 = 0x02;
const HIGH_TH: u8 = 0x03;
}
struct BitFlags;
impl BitFlags {
const OS: u16 = 0b1000_0000_0000_0000;
const MUX2: u16 = 0b0100_0000_0000_0000;
const MUX1: u16 = 0b0010_0000_0000_0000;
const MUX0: u16 = 0b0001_0000_0000_0000;
const PGA2: u16 = 0b0000_1000_0000_0000;
const PGA1: u16 = 0b0000_0100_0000_0000;
const PGA0: u16 = 0b0000_0010_0000_0000;
const OP_MODE: u16 = 0b0000_0001_0000_0000;
const DR2: u16 = 0b0000_0000_1000_0000;
const DR1: u16 = 0b0000_0000_0100_0000;
const DR0: u16 = 0b0000_0000_0010_0000;
const COMP_MODE: u16 = 0b0000_0000_0001_0000;
const COMP_POL: u16 = 0b0000_0000_0000_1000;
const COMP_LAT: u16 = 0b0000_0000_0000_0100;
const COMP_QUE1: u16 = 0b0000_0000_0000_0010;
const COMP_QUE0: u16 = 0b0000_0000_0000_0001;
}
mod channels;
pub use crate::channels::{channel, ChannelSelection};
mod construction;
mod conversion;
pub use crate::conversion::{ConvertMeasurement, ConvertThreshold};
mod devices;
#[doc(hidden)]
pub mod ic;
#[doc(hidden)]
pub mod interface;
mod types;
use crate::types::Config;
pub use crate::types::{
mode, Ads1x1x, ComparatorLatching, ComparatorMode, ComparatorPolarity, ComparatorQueue,
DataRate12Bit, DataRate16Bit, DynamicOneShot, Error, FullScaleRange, ModeChangeError,
SlaveAddr,
};
mod private {
use super::{ic, interface, Ads1x1x};
pub trait Sealed {}
impl<I2C> Sealed for interface::I2cInterface<I2C> {}
impl<DI, IC, CONV, MODE> Sealed for Ads1x1x<DI, IC, CONV, MODE> {}
impl Sealed for ic::Resolution12Bit {}
impl Sealed for ic::Resolution16Bit {}
impl Sealed for ic::Ads1013 {}
impl Sealed for ic::Ads1113 {}
impl Sealed for ic::Ads1014 {}
impl Sealed for ic::Ads1114 {}
impl Sealed for ic::Ads1015 {}
impl Sealed for ic::Ads1115 {}
}
| gister;
| identifier_name |
shear_calculation_withoutplot_vasp.py | #
###################################
#2018-04-11
#Jin Zhang@Stony Brook University
######################################
###For VASP
###----------------------------------Readme------------------------------------------------------------
###This script is to calculate the Tensile strength and shear strength using VASP.
###1. To calculate the tensile strength. ###
###If we want to calculate the tensile strength using this script, we should firstly add
###" FCELL(1,1)=0.0 " (add " FCELL(1,1)=0.0 " after REAL(q)FCELL(3,3)) to the constr_cell_relax.F
###in the VASP. This modification means that we only optimize the y and z axis, x axis will not be
###optimized. Then recompile the VASP again. Finally, we can use this script to calculate the tensile
###strength.
###For the tensile strength, this script firstly rotate the tensile axis which we want to calculate into
###[1, 0, 0] axis. The reason of rotating to [1, 0, 0] axis is that we set "FCELL(1,1)=0.0" in the
###constr_cell_relax.F in the VASP. e.g. if we want to calculate the tensile strength along [1, 1, 1] direction
###of diamond, we need to rotate [1, 1, 1] direction to [1, 0, 0] axis.
###2. To calculate the shear strength. ###
###If we want to calculate the shear strength using this script, we should firstly add
###" FCELL(1,3)=0.0 " and " FCELL(3,1)=0.0 " (add " FCELL(1,3)=0.0 " and " FCELL(3,1)=0.0 "
###after REAL(q)FCELL(3,3)) to the constr_cell_relax.F in the VASP. This modification means that
###we only optimize the axis except for the xz and zx axies. Then recompile the VASP again.
###Finally, we can use this script to calculate the tensile strength.
###For the shear strength, this script firstly rotate the normal axis of shear plane to
###[0, 0, 1] axis. And then rotate the shear direction to [1, 0, 0] axis. The reason is that
###we set "FCELL(1,3)=0.0" and "FCELL(3,1)=0.0" to the constr_cell_relax.F in the VASP. e.g. if we
###want to calculate the shear strength along (1, 1, 1) shear plane and [1, 1, -2] shear direction for
###diamond, we need to rotate the [1, 1, 1] direction to [0, 0, 1] axis and rotate [1, 1, -2] direction
###to [1, 0, 0] axis.
###--------------------------------------------------------------------------------------------------------
import numpy as np
import math
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d as plt3d
from itertools import product, combinations
from matplotlib import style
from fractions import gcd
#------------------------------------------------------------------------------------
###start to read the input file
stra=0.02 ######provide the strain
times=9
Sh_pla=[int(x) for x in input("What is the shear plane? e.g. 1 1 1 \n").split()]
if len(Sh_pla)==3:
Sh_pla=Sh_pla
elif len(Sh_pla)==4:
h=Sh_pla[0]
k=Sh_pla[1]
l=Sh_pla[3]
#hkl_gcd=gcd(gcd(h,k),l)
Sh_pla=np.array([h, k, l])
Sh_pla_point1=np.array([1/Sh_pla[0], 0, 0])
Sh_pla_point2=np.array([0, 1/Sh_pla[1], 0])
Sh_pla_point3=np.array([0, 0, 1/Sh_pla[2]])
Sh_pla_vec1=np.array([Sh_pla_point1[0]-Sh_pla_point2[0], Sh_pla_point1[1]-Sh_pla_point2[1], Sh_pla_point1[2]-Sh_pla_point2[2]])
Sh_pla_vec2=np.array([Sh_pla_point3[0]-Sh_pla_point2[0], Sh_pla_point3[1]-Sh_pla_point2[1], Sh_pla_point3[2]-Sh_pla_point2[2]])
Sh_pla_normal_x=Sh_pla_vec1[1]*Sh_pla_vec2[2]-Sh_pla_vec2[1]*Sh_pla_vec1[2]
Sh_pla_normal_y=-(Sh_pla_vec1[0]*Sh_pla_vec2[2]-Sh_pla_vec2[0]*Sh_pla_vec1[2])
Sh_pla_normal_z=Sh_pla_vec1[0]*Sh_pla_vec2[1]-Sh_pla_vec2[0]*Sh_pla_vec1[1]
#Sh_pla_normal=Sh_pla # the value of the normal of the shear plane is equal to shear plan
Sh_pla_normal_gcd=gcd(gcd(Sh_pla_normal_x,Sh_pla_normal_y),Sh_pla_normal_z)
P0=np.array([Sh_pla_normal_x/Sh_pla_normal_gcd, Sh_pla_normal_y/Sh_pla_normal_gcd, Sh_pla_normal_z/Sh_pla_normal_gcd])#P0 is the normal of the shear plane,divide the greatest common dividor
Q0=np.array([1, 0, 0]) #after rotation. Q0 is the x axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
Sh_dir=[int(x) for x in input("What is the shear direction? e.g. 1 1 -2 \n").split()]
if len(Sh_dir)==3:
Sh_dir=Sh_dir
elif len(Sh_pla)==4:
U=Sh_dir[1]+2*Sh_dir[0]
V=Sh_dir[0]+2*Sh_dir[1]
W=Sh_dir[3]
UVW_gcd=gcd(gcd(U,V),W)
Sh_dir=np.array([U/UVW_gcd, V/UVW_gcd, W/UVW_gcd])
L0=Sh_dir
K0=np.array([0, 0, 1]) #after rotation. K0 is the z axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
#P0=np.array([1, 1, 1]) #provide the axis before rotation.. P0 is the tensile direction we want to calculate.
###finish to read the input file
#Q0=np.array([1, 0, 0]) #after rotation. Q0 is the x axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
#------------------------------------------------------------------------------------
###open POSCAR
t=[]
t1=[]
with open ("POSCAR") as poscar0:
pos0=poscar0.readlines()
length=len(pos0)
for i in range(2,5):
j=pos0[i]
j=j.split()
t.extend(j)
print(t)
for i in range(len(t)):
t1.extend([float(t[i])])
pos_array=np.array(t1).reshape((3,3)) ####read basis vector from POSCAR
print(pos_array)
oa=np.array([pos_array[0][0], pos_array[0][1], pos_array[0][2]])
ob=np.array([pos_array[1][0], pos_array[1][1], pos_array[1][2]])
oc=np.array([pos_array[2][0], pos_array[2][1], pos_array[2][2]])
#------------------------------------------------------------------------------------
###start to calculate the structure data before rotation######
##a1_len=(float(pos_array[0][0])**2+float(pos_array[0][1])**2+float(pos_array[0][2])**2)**0.5
##a2_len=(float(pos_array[1][0])**2+float(pos_array[1][1])**2+float(pos_array[1][2])**2)**0.5
##a3_len=(float(pos_array[2][0])**2+float(pos_array[2][1])**2+float(pos_array[2][2])**2)**0.5
##
##P=np.array([P0[0]*a1_len,P0[1]*a2_len,P0[2]*a3_len])
##Q=np.array([Q0[0]*a1_len,Q0[1]*a2_len,Q0[2]*a3_len])
##
##XYZ_a=np.array([[a1_len, 0, 0],
## [0, a2_len, 0],
## [0, 0, a3_len]])
#----------------------------------------
basis=np.array([oa, ob, oc])
P0_abc=np.array([[P0[0], 0, 0],
[0, P0[1], 0],
[0, 0, P0[2]]])
P0_abc_basis=np.dot(P0_abc,basis)
P=P0_abc_basis[0]+P0_abc_basis[1]+P0_abc_basis[2] # the position of the normal of shear plane before rotation
Q0_abc=np.array([[Q0[0], 0, 0],
[0, Q0[1], 0],
[0, 0, Q0[2]]])
Q0_abc_basis=np.dot(Q0_abc,basis)
Q=Q0_abc_basis[0]+Q0_abc_basis[1]+Q0_abc_basis[2] # the position of the normal of shear plane after rotation
#----------------------------------------
#basis=np.array([oa, ob, oc])
L0_abc=np.array([[L0[0], 0, 0],
[0, L0[1], 0],
[0, 0, L0[2]]])
L0_abc_basis=np.dot(L0_abc,basis)
L=L0_abc_basis[0]+L0_abc_basis[1]+L0_abc_basis[2] # the position of shear direction before rotation
K0_abc=np.array([[K0[0], 0, 0],
[0, K0[1], 0],
[0, 0, K0[2]]])
K0_abc_basis=np.dot(K0_abc,basis)
K=K0_abc_basis[0]+K0_abc_basis[1]+K0_abc_basis[2] # the position of shear direction after rotation
#------------------------------------------------------------------------------------
###start to calculate the structure data after rotation######
############## the first rotation begin##############
######1.get the angel between two vectors########
P_norm=(P[0]**2+P[1]**2+P[2]**2)**0.5 #the norm of vetor before roation
Q_norm=(Q[0]**2+Q[1]**2+Q[2]**2)**0.5 #the norm of vetor after roation
PQ_dot=np.dot(P,Q)
theta=math.acos(PQ_dot/(P_norm*Q_norm))##obtain theta, the unit is radian instead of degree
theta_cos=PQ_dot/(P_norm*Q_norm)
theta_sin=math.sin(theta)
######2.get the rotation axis##############
M=np.array([P[1]*Q[2]-P[2]*Q[1], P[2]*Q[0]-P[0]*Q[2], P[0]*Q[1]-P[1]*Q[0]])
M_norm=(M[0]**2+M[1]**2+M[2]**2)**0.5 #the norm of M vetor
M_unit=np.array([M[0]/M_norm, M[1]/M_norm, M[2]/M_norm])
######3.get the rotation matrix###########
Nx=M_unit[0]
Ny=M_unit[1]
Nz=M_unit[2]
PQ_rot=np.array([[Nx*Nx*(1-theta_cos)+theta_cos, Nx*Ny*(1-theta_cos)+Nz*theta_sin, Nx*Nz*(1-theta_cos)-Ny*theta_sin],
[Nx*Ny*(1-theta_cos)-Nz*theta_sin, Ny*Ny*(1-theta_cos)+theta_cos, Ny*Nz*(1-theta_cos)+Nx*theta_sin],
[Nx*Nz*(1-theta_cos)+Ny*theta_sin, Ny*Nz*(1-theta_cos)-Nx*theta_sin, Nz*Nz*(1-theta_cos)+theta_cos]])
###### #####
a_PQ_rot=np.dot(pos_array,PQ_rot) #three basis vector after rotation
a_PQ_rot=a_PQ_rot.tolist()
L_rot=np.dot(L,PQ_rot)
############## the second rotation begin##############
######1.get the angel between two vectors########
L_norm=(L_rot[0]**2+L_rot[1]**2+L_rot[2]**2)**0.5 #the norm of vetor before roation
K_norm=(K[0]**2+K[1]**2+K[2]**2)**0.5 #the norm of vetor after roation
LK_dot=np.dot(L_rot,K)
theta=math.acos(LK_dot/(L_norm*K_norm))##obtain theta, the unit is radian instead of degree
theta_cos=LK_dot/(L_norm*K_norm)
theta_sin=math.sin(theta)
######2.get the rotation axis##############
H=np.array([L_rot[1]*K[2]-L_rot[2]*K[1], L_rot[2]*K[0]-L_rot[0]*K[2], L_rot[0]*K[1]-L_rot[1]*K[0]])
H_norm=(H[0]**2+H[1]**2+H[2]**2)**0.5 #the norm of M vetor
H_unit=np.array([H[0]/H_norm, H[1]/H_norm, H[2]/H_norm])
######3.get the rotation matrix###########
Ex=H_unit[0]
Ey=H_unit[1]
Ez=H_unit[2]
LK_rot=np.array([[Ex*Ex*(1-theta_cos)+theta_cos, Ex*Ey*(1-theta_cos)+Ez*theta_sin, Ex*Ez*(1-theta_cos)-Ey*theta_sin],
[Ex*Ey*(1-theta_cos)-Ez*theta_sin, Ey*Ey*(1-theta_cos)+theta_cos, Ey*Ez*(1-theta_cos)+Ex*theta_sin],
[Ex*Ez*(1-theta_cos)+Ey*theta_sin, Ey*Nz*(1-theta_cos)-Ex*theta_sin, Ez*Ez*(1-theta_cos)+theta_cos]])
###### #####
a_LK_rot=np.dot(a_PQ_rot,LK_rot) #three basis vector after rotation
a_LK_rot=a_LK_rot.tolist()
os.system("cp POSCAR POSCAR_original") #copy original POSCAR
r=[]
with open ("POSCAR") as poscar1:
for line in poscar1:
r.append(line)
f=open("POSCAR_rota","w") #write POSCAR after rotation as POSCAR_rota
for i in range(0,2):
f.write(r[i])
for j in range(len(a_LK_rot)):
f.write(str(a_LK_rot[j][0])+' ')
f.write(str(a_LK_rot[j][1])+' ')
f.write(str(a_LK_rot[j][2]))
f.write('\n')
for x in range(5,len(r)):
f.write(r[x])
f.close()
os.system("cp POSCAR_rota POSCAR") ## copy POSCAR_rota as POSCAR
###### Thirdly,apply tensile strain to the rotated basis vector
def postrain(poscar):
|
###### Fourthly, performing VASP calculation
for i in np.arange(stra,stra*(times+1),stra):
print("****************************")
i=round(i,2)
print(i)
postrain("POSCAR")
os.system("cp POSCAR_stra POSCAR")
os.system("cp POSCAR_stra POSCAR_"+str(i))####
#os.system("bsub < Job_qsh.sh")
os.system("srun -n 20 /scratch/jin.zhang3_397857/Software/vasp.5.4.4/bin/vasp_std > vasp.out")
chek=os.popen("grep Voluntary OUTCAR").read()
while "Voluntary" not in chek:
chek=os.popen("grep Voluntary OUTCAR").read()
if "Voluntary" in chek:
break
os.system("cp OUTCAR OUTCAR_"+str(i))####
os.system("cp CONTCAR CONTCAR_"+str(i))####
os.system("grep 'in kB' OUTCAR > kB")
fin=os.popen("tail -1 kB").read()
fin=fin.split()
#os.system("cp CONTCAR POSCAR")
#os.system("bsub < Job_qsh.sh")
#os.system("srun -n 20 ~/Software/VASP/VASP5.4.4/vasp.5.4.4/bin/vasp_std > vasp.out")
if (Q0==np.array([1, 0, 0])).all():
stress=(float(fin[7])/10)*(-1)
strain=((1.0+float(stra))**(float(i)/float(stra))-1.0)*2 ##output strain
u=open("strain_shearStress.dat","a")
u.write(str(strain)+' ')
u.write(str(stress))
u.write('\n')
os.system("cp CONTCAR POSCAR")
#print(i)
u.close()
os.system("rm POSCAR_stra")
| t2=[]
t3=[]
with open (poscar) as poscar2:
pos2=poscar2.readlines()
length=len(pos2)
for i in range(2,5):
j=pos2[i]
j=j.split()
t2.extend(j)
for i in range(len(t2)):
t3.extend([float(t2[i])])
pos_array3=np.array(t3).reshape((3,3))
if (Q0==np.array([1, 0, 0])).all():
stra_matr=np.array([[1,0,stra],
[0,1,0],
[stra,0,1]]) # strain(xz) and strain(zx)
a_stra=np.dot(pos_array3,stra_matr)
with open (poscar) as poscar:
s=[]
for line in poscar:
s.append(line)
f=open("POSCAR_stra","w")
for i in range(0,2):
f.write(s[i])
for j in range(len(a_stra)):
f.write(str(a_stra[j][0])+' ')
f.write(str(a_stra[j][1])+' ')
f.write(str(a_stra[j][2]))
f.write('\n')
for x in range(5,len(s)):
f.write(s[x])
f.close() | identifier_body |
shear_calculation_withoutplot_vasp.py | #
###################################
#2018-04-11
#Jin Zhang@Stony Brook University
######################################
###For VASP
###----------------------------------Readme------------------------------------------------------------
###This script is to calculate the Tensile strength and shear strength using VASP.
###1. To calculate the tensile strength. ###
###If we want to calculate the tensile strength using this script, we should firstly add
###" FCELL(1,1)=0.0 " (add " FCELL(1,1)=0.0 " after REAL(q)FCELL(3,3)) to the constr_cell_relax.F
###in the VASP. This modification means that we only optimize the y and z axis, x axis will not be
###optimized. Then recompile the VASP again. Finally, we can use this script to calculate the tensile
###strength.
###For the tensile strength, this script firstly rotate the tensile axis which we want to calculate into
###[1, 0, 0] axis. The reason of rotating to [1, 0, 0] axis is that we set "FCELL(1,1)=0.0" in the
###constr_cell_relax.F in the VASP. e.g. if we want to calculate the tensile strength along [1, 1, 1] direction
###of diamond, we need to rotate [1, 1, 1] direction to [1, 0, 0] axis.
###2. To calculate the shear strength. ###
###If we want to calculate the shear strength using this script, we should firstly add
###" FCELL(1,3)=0.0 " and " FCELL(3,1)=0.0 " (add " FCELL(1,3)=0.0 " and " FCELL(3,1)=0.0 "
###after REAL(q)FCELL(3,3)) to the constr_cell_relax.F in the VASP. This modification means that
###we only optimize the axis except for the xz and zx axies. Then recompile the VASP again.
###Finally, we can use this script to calculate the tensile strength.
###For the shear strength, this script firstly rotate the normal axis of shear plane to
###[0, 0, 1] axis. And then rotate the shear direction to [1, 0, 0] axis. The reason is that
###we set "FCELL(1,3)=0.0" and "FCELL(3,1)=0.0" to the constr_cell_relax.F in the VASP. e.g. if we
###want to calculate the shear strength along (1, 1, 1) shear plane and [1, 1, -2] shear direction for
###diamond, we need to rotate the [1, 1, 1] direction to [0, 0, 1] axis and rotate [1, 1, -2] direction
###to [1, 0, 0] axis.
###--------------------------------------------------------------------------------------------------------
import numpy as np
import math
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d as plt3d
from itertools import product, combinations
from matplotlib import style
from fractions import gcd
#------------------------------------------------------------------------------------
###start to read the input file
stra=0.02 ######provide the strain
times=9
Sh_pla=[int(x) for x in input("What is the shear plane? e.g. 1 1 1 \n").split()]
if len(Sh_pla)==3:
Sh_pla=Sh_pla
elif len(Sh_pla)==4:
h=Sh_pla[0]
k=Sh_pla[1]
l=Sh_pla[3]
#hkl_gcd=gcd(gcd(h,k),l)
Sh_pla=np.array([h, k, l])
Sh_pla_point1=np.array([1/Sh_pla[0], 0, 0])
Sh_pla_point2=np.array([0, 1/Sh_pla[1], 0])
Sh_pla_point3=np.array([0, 0, 1/Sh_pla[2]])
Sh_pla_vec1=np.array([Sh_pla_point1[0]-Sh_pla_point2[0], Sh_pla_point1[1]-Sh_pla_point2[1], Sh_pla_point1[2]-Sh_pla_point2[2]])
Sh_pla_vec2=np.array([Sh_pla_point3[0]-Sh_pla_point2[0], Sh_pla_point3[1]-Sh_pla_point2[1], Sh_pla_point3[2]-Sh_pla_point2[2]])
Sh_pla_normal_x=Sh_pla_vec1[1]*Sh_pla_vec2[2]-Sh_pla_vec2[1]*Sh_pla_vec1[2]
Sh_pla_normal_y=-(Sh_pla_vec1[0]*Sh_pla_vec2[2]-Sh_pla_vec2[0]*Sh_pla_vec1[2])
Sh_pla_normal_z=Sh_pla_vec1[0]*Sh_pla_vec2[1]-Sh_pla_vec2[0]*Sh_pla_vec1[1]
#Sh_pla_normal=Sh_pla # the value of the normal of the shear plane is equal to shear plan
Sh_pla_normal_gcd=gcd(gcd(Sh_pla_normal_x,Sh_pla_normal_y),Sh_pla_normal_z)
P0=np.array([Sh_pla_normal_x/Sh_pla_normal_gcd, Sh_pla_normal_y/Sh_pla_normal_gcd, Sh_pla_normal_z/Sh_pla_normal_gcd])#P0 is the normal of the shear plane,divide the greatest common dividor
Q0=np.array([1, 0, 0]) #after rotation. Q0 is the x axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
Sh_dir=[int(x) for x in input("What is the shear direction? e.g. 1 1 -2 \n").split()]
if len(Sh_dir)==3:
Sh_dir=Sh_dir
elif len(Sh_pla)==4:
U=Sh_dir[1]+2*Sh_dir[0]
V=Sh_dir[0]+2*Sh_dir[1]
W=Sh_dir[3]
UVW_gcd=gcd(gcd(U,V),W)
Sh_dir=np.array([U/UVW_gcd, V/UVW_gcd, W/UVW_gcd])
L0=Sh_dir
K0=np.array([0, 0, 1]) #after rotation. K0 is the z axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
#P0=np.array([1, 1, 1]) #provide the axis before rotation.. P0 is the tensile direction we want to calculate.
###finish to read the input file
#Q0=np.array([1, 0, 0]) #after rotation. Q0 is the x axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
#------------------------------------------------------------------------------------
###open POSCAR
t=[]
t1=[]
with open ("POSCAR") as poscar0:
pos0=poscar0.readlines()
length=len(pos0)
for i in range(2,5):
j=pos0[i]
j=j.split()
t.extend(j)
print(t)
for i in range(len(t)):
t1.extend([float(t[i])])
pos_array=np.array(t1).reshape((3,3)) ####read basis vector from POSCAR
print(pos_array)
oa=np.array([pos_array[0][0], pos_array[0][1], pos_array[0][2]])
ob=np.array([pos_array[1][0], pos_array[1][1], pos_array[1][2]])
oc=np.array([pos_array[2][0], pos_array[2][1], pos_array[2][2]])
#------------------------------------------------------------------------------------
###start to calculate the structure data before rotation######
##a1_len=(float(pos_array[0][0])**2+float(pos_array[0][1])**2+float(pos_array[0][2])**2)**0.5
##a2_len=(float(pos_array[1][0])**2+float(pos_array[1][1])**2+float(pos_array[1][2])**2)**0.5
##a3_len=(float(pos_array[2][0])**2+float(pos_array[2][1])**2+float(pos_array[2][2])**2)**0.5
##
##P=np.array([P0[0]*a1_len,P0[1]*a2_len,P0[2]*a3_len])
##Q=np.array([Q0[0]*a1_len,Q0[1]*a2_len,Q0[2]*a3_len])
##
##XYZ_a=np.array([[a1_len, 0, 0],
## [0, a2_len, 0],
## [0, 0, a3_len]])
#----------------------------------------
basis=np.array([oa, ob, oc])
P0_abc=np.array([[P0[0], 0, 0],
[0, P0[1], 0],
[0, 0, P0[2]]])
P0_abc_basis=np.dot(P0_abc,basis)
P=P0_abc_basis[0]+P0_abc_basis[1]+P0_abc_basis[2] # the position of the normal of shear plane before rotation
Q0_abc=np.array([[Q0[0], 0, 0],
[0, Q0[1], 0],
[0, 0, Q0[2]]])
Q0_abc_basis=np.dot(Q0_abc,basis)
Q=Q0_abc_basis[0]+Q0_abc_basis[1]+Q0_abc_basis[2] # the position of the normal of shear plane after rotation
#----------------------------------------
#basis=np.array([oa, ob, oc])
L0_abc=np.array([[L0[0], 0, 0],
[0, L0[1], 0],
[0, 0, L0[2]]])
L0_abc_basis=np.dot(L0_abc,basis)
L=L0_abc_basis[0]+L0_abc_basis[1]+L0_abc_basis[2] # the position of shear direction before rotation
K0_abc=np.array([[K0[0], 0, 0],
[0, K0[1], 0],
[0, 0, K0[2]]])
K0_abc_basis=np.dot(K0_abc,basis)
K=K0_abc_basis[0]+K0_abc_basis[1]+K0_abc_basis[2] # the position of shear direction after rotation
#------------------------------------------------------------------------------------
###start to calculate the structure data after rotation######
############## the first rotation begin##############
######1.get the angel between two vectors########
P_norm=(P[0]**2+P[1]**2+P[2]**2)**0.5 #the norm of vetor before roation
Q_norm=(Q[0]**2+Q[1]**2+Q[2]**2)**0.5 #the norm of vetor after roation
PQ_dot=np.dot(P,Q)
theta=math.acos(PQ_dot/(P_norm*Q_norm))##obtain theta, the unit is radian instead of degree
theta_cos=PQ_dot/(P_norm*Q_norm)
theta_sin=math.sin(theta)
######2.get the rotation axis##############
M=np.array([P[1]*Q[2]-P[2]*Q[1], P[2]*Q[0]-P[0]*Q[2], P[0]*Q[1]-P[1]*Q[0]])
M_norm=(M[0]**2+M[1]**2+M[2]**2)**0.5 #the norm of M vetor
M_unit=np.array([M[0]/M_norm, M[1]/M_norm, M[2]/M_norm])
######3.get the rotation matrix###########
Nx=M_unit[0]
Ny=M_unit[1]
Nz=M_unit[2]
PQ_rot=np.array([[Nx*Nx*(1-theta_cos)+theta_cos, Nx*Ny*(1-theta_cos)+Nz*theta_sin, Nx*Nz*(1-theta_cos)-Ny*theta_sin],
[Nx*Ny*(1-theta_cos)-Nz*theta_sin, Ny*Ny*(1-theta_cos)+theta_cos, Ny*Nz*(1-theta_cos)+Nx*theta_sin],
[Nx*Nz*(1-theta_cos)+Ny*theta_sin, Ny*Nz*(1-theta_cos)-Nx*theta_sin, Nz*Nz*(1-theta_cos)+theta_cos]])
###### #####
a_PQ_rot=np.dot(pos_array,PQ_rot) #three basis vector after rotation
a_PQ_rot=a_PQ_rot.tolist()
L_rot=np.dot(L,PQ_rot)
############## the second rotation begin##############
######1.get the angel between two vectors########
L_norm=(L_rot[0]**2+L_rot[1]**2+L_rot[2]**2)**0.5 #the norm of vetor before roation
K_norm=(K[0]**2+K[1]**2+K[2]**2)**0.5 #the norm of vetor after roation
LK_dot=np.dot(L_rot,K)
theta=math.acos(LK_dot/(L_norm*K_norm))##obtain theta, the unit is radian instead of degree
theta_cos=LK_dot/(L_norm*K_norm)
theta_sin=math.sin(theta)
######2.get the rotation axis##############
H=np.array([L_rot[1]*K[2]-L_rot[2]*K[1], L_rot[2]*K[0]-L_rot[0]*K[2], L_rot[0]*K[1]-L_rot[1]*K[0]])
H_norm=(H[0]**2+H[1]**2+H[2]**2)**0.5 #the norm of M vetor
H_unit=np.array([H[0]/H_norm, H[1]/H_norm, H[2]/H_norm])
######3.get the rotation matrix###########
Ex=H_unit[0]
Ey=H_unit[1]
Ez=H_unit[2]
LK_rot=np.array([[Ex*Ex*(1-theta_cos)+theta_cos, Ex*Ey*(1-theta_cos)+Ez*theta_sin, Ex*Ez*(1-theta_cos)-Ey*theta_sin],
[Ex*Ey*(1-theta_cos)-Ez*theta_sin, Ey*Ey*(1-theta_cos)+theta_cos, Ey*Ez*(1-theta_cos)+Ex*theta_sin],
[Ex*Ez*(1-theta_cos)+Ey*theta_sin, Ey*Nz*(1-theta_cos)-Ex*theta_sin, Ez*Ez*(1-theta_cos)+theta_cos]])
###### #####
a_LK_rot=np.dot(a_PQ_rot,LK_rot) #three basis vector after rotation
a_LK_rot=a_LK_rot.tolist()
os.system("cp POSCAR POSCAR_original") #copy original POSCAR
r=[]
with open ("POSCAR") as poscar1:
for line in poscar1:
r.append(line)
f=open("POSCAR_rota","w") #write POSCAR after rotation as POSCAR_rota
for i in range(0,2):
f.write(r[i])
for j in range(len(a_LK_rot)):
f.write(str(a_LK_rot[j][0])+' ')
f.write(str(a_LK_rot[j][1])+' ')
f.write(str(a_LK_rot[j][2]))
f.write('\n')
for x in range(5,len(r)):
f.write(r[x])
f.close()
os.system("cp POSCAR_rota POSCAR") ## copy POSCAR_rota as POSCAR
###### Thirdly,apply tensile strain to the rotated basis vector
def | (poscar):
t2=[]
t3=[]
with open (poscar) as poscar2:
pos2=poscar2.readlines()
length=len(pos2)
for i in range(2,5):
j=pos2[i]
j=j.split()
t2.extend(j)
for i in range(len(t2)):
t3.extend([float(t2[i])])
pos_array3=np.array(t3).reshape((3,3))
if (Q0==np.array([1, 0, 0])).all():
stra_matr=np.array([[1,0,stra],
[0,1,0],
[stra,0,1]]) # strain(xz) and strain(zx)
a_stra=np.dot(pos_array3,stra_matr)
with open (poscar) as poscar:
s=[]
for line in poscar:
s.append(line)
f=open("POSCAR_stra","w")
for i in range(0,2):
f.write(s[i])
for j in range(len(a_stra)):
f.write(str(a_stra[j][0])+' ')
f.write(str(a_stra[j][1])+' ')
f.write(str(a_stra[j][2]))
f.write('\n')
for x in range(5,len(s)):
f.write(s[x])
f.close()
###### Fourthly, performing VASP calculation
for i in np.arange(stra,stra*(times+1),stra):
print("****************************")
i=round(i,2)
print(i)
postrain("POSCAR")
os.system("cp POSCAR_stra POSCAR")
os.system("cp POSCAR_stra POSCAR_"+str(i))####
#os.system("bsub < Job_qsh.sh")
os.system("srun -n 20 /scratch/jin.zhang3_397857/Software/vasp.5.4.4/bin/vasp_std > vasp.out")
chek=os.popen("grep Voluntary OUTCAR").read()
while "Voluntary" not in chek:
chek=os.popen("grep Voluntary OUTCAR").read()
if "Voluntary" in chek:
break
os.system("cp OUTCAR OUTCAR_"+str(i))####
os.system("cp CONTCAR CONTCAR_"+str(i))####
os.system("grep 'in kB' OUTCAR > kB")
fin=os.popen("tail -1 kB").read()
fin=fin.split()
#os.system("cp CONTCAR POSCAR")
#os.system("bsub < Job_qsh.sh")
#os.system("srun -n 20 ~/Software/VASP/VASP5.4.4/vasp.5.4.4/bin/vasp_std > vasp.out")
if (Q0==np.array([1, 0, 0])).all():
stress=(float(fin[7])/10)*(-1)
strain=((1.0+float(stra))**(float(i)/float(stra))-1.0)*2 ##output strain
u=open("strain_shearStress.dat","a")
u.write(str(strain)+' ')
u.write(str(stress))
u.write('\n')
os.system("cp CONTCAR POSCAR")
#print(i)
u.close()
os.system("rm POSCAR_stra")
| postrain | identifier_name |
shear_calculation_withoutplot_vasp.py | #
###################################
#2018-04-11
#Jin Zhang@Stony Brook University
######################################
###For VASP
###----------------------------------Readme------------------------------------------------------------
###This script is to calculate the Tensile strength and shear strength using VASP.
###1. To calculate the tensile strength. ###
###If we want to calculate the tensile strength using this script, we should firstly add
###" FCELL(1,1)=0.0 " (add " FCELL(1,1)=0.0 " after REAL(q)FCELL(3,3)) to the constr_cell_relax.F
###in the VASP. This modification means that we only optimize the y and z axis, x axis will not be
###optimized. Then recompile the VASP again. Finally, we can use this script to calculate the tensile
###strength.
###For the tensile strength, this script firstly rotate the tensile axis which we want to calculate into
###[1, 0, 0] axis. The reason of rotating to [1, 0, 0] axis is that we set "FCELL(1,1)=0.0" in the
###constr_cell_relax.F in the VASP. e.g. if we want to calculate the tensile strength along [1, 1, 1] direction
###of diamond, we need to rotate [1, 1, 1] direction to [1, 0, 0] axis.
###2. To calculate the shear strength. ###
###If we want to calculate the shear strength using this script, we should firstly add
###" FCELL(1,3)=0.0 " and " FCELL(3,1)=0.0 " (add " FCELL(1,3)=0.0 " and " FCELL(3,1)=0.0 "
###after REAL(q)FCELL(3,3)) to the constr_cell_relax.F in the VASP. This modification means that
###we only optimize the axis except for the xz and zx axies. Then recompile the VASP again.
###Finally, we can use this script to calculate the tensile strength.
###For the shear strength, this script firstly rotate the normal axis of shear plane to
###[0, 0, 1] axis. And then rotate the shear direction to [1, 0, 0] axis. The reason is that
###we set "FCELL(1,3)=0.0" and "FCELL(3,1)=0.0" to the constr_cell_relax.F in the VASP. e.g. if we
###want to calculate the shear strength along (1, 1, 1) shear plane and [1, 1, -2] shear direction for
###diamond, we need to rotate the [1, 1, 1] direction to [0, 0, 1] axis and rotate [1, 1, -2] direction
###to [1, 0, 0] axis.
###--------------------------------------------------------------------------------------------------------
import numpy as np
import math
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d as plt3d
from itertools import product, combinations
from matplotlib import style
from fractions import gcd
#------------------------------------------------------------------------------------
###start to read the input file
stra=0.02 ######provide the strain
times=9
Sh_pla=[int(x) for x in input("What is the shear plane? e.g. 1 1 1 \n").split()]
if len(Sh_pla)==3:
Sh_pla=Sh_pla
elif len(Sh_pla)==4:
h=Sh_pla[0]
k=Sh_pla[1]
l=Sh_pla[3]
#hkl_gcd=gcd(gcd(h,k),l)
Sh_pla=np.array([h, k, l])
Sh_pla_point1=np.array([1/Sh_pla[0], 0, 0])
Sh_pla_point2=np.array([0, 1/Sh_pla[1], 0])
Sh_pla_point3=np.array([0, 0, 1/Sh_pla[2]])
Sh_pla_vec1=np.array([Sh_pla_point1[0]-Sh_pla_point2[0], Sh_pla_point1[1]-Sh_pla_point2[1], Sh_pla_point1[2]-Sh_pla_point2[2]])
Sh_pla_vec2=np.array([Sh_pla_point3[0]-Sh_pla_point2[0], Sh_pla_point3[1]-Sh_pla_point2[1], Sh_pla_point3[2]-Sh_pla_point2[2]])
Sh_pla_normal_x=Sh_pla_vec1[1]*Sh_pla_vec2[2]-Sh_pla_vec2[1]*Sh_pla_vec1[2]
Sh_pla_normal_y=-(Sh_pla_vec1[0]*Sh_pla_vec2[2]-Sh_pla_vec2[0]*Sh_pla_vec1[2])
Sh_pla_normal_z=Sh_pla_vec1[0]*Sh_pla_vec2[1]-Sh_pla_vec2[0]*Sh_pla_vec1[1]
#Sh_pla_normal=Sh_pla # the value of the normal of the shear plane is equal to shear plan
Sh_pla_normal_gcd=gcd(gcd(Sh_pla_normal_x,Sh_pla_normal_y),Sh_pla_normal_z)
P0=np.array([Sh_pla_normal_x/Sh_pla_normal_gcd, Sh_pla_normal_y/Sh_pla_normal_gcd, Sh_pla_normal_z/Sh_pla_normal_gcd])#P0 is the normal of the shear plane,divide the greatest common dividor
Q0=np.array([1, 0, 0]) #after rotation. Q0 is the x axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
Sh_dir=[int(x) for x in input("What is the shear direction? e.g. 1 1 -2 \n").split()]
if len(Sh_dir)==3:
Sh_dir=Sh_dir
elif len(Sh_pla)==4:
U=Sh_dir[1]+2*Sh_dir[0]
V=Sh_dir[0]+2*Sh_dir[1]
W=Sh_dir[3]
UVW_gcd=gcd(gcd(U,V),W)
Sh_dir=np.array([U/UVW_gcd, V/UVW_gcd, W/UVW_gcd])
L0=Sh_dir
K0=np.array([0, 0, 1]) #after rotation. K0 is the z axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
#P0=np.array([1, 1, 1]) #provide the axis before rotation.. P0 is the tensile direction we want to calculate.
###finish to read the input file
#Q0=np.array([1, 0, 0]) #after rotation. Q0 is the x axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
#------------------------------------------------------------------------------------
###open POSCAR
t=[]
t1=[]
with open ("POSCAR") as poscar0:
pos0=poscar0.readlines()
length=len(pos0)
for i in range(2,5):
j=pos0[i]
j=j.split()
t.extend(j)
print(t)
for i in range(len(t)):
t1.extend([float(t[i])])
pos_array=np.array(t1).reshape((3,3)) ####read basis vector from POSCAR
print(pos_array)
oa=np.array([pos_array[0][0], pos_array[0][1], pos_array[0][2]])
ob=np.array([pos_array[1][0], pos_array[1][1], pos_array[1][2]])
oc=np.array([pos_array[2][0], pos_array[2][1], pos_array[2][2]])
#------------------------------------------------------------------------------------
###start to calculate the structure data before rotation######
##a1_len=(float(pos_array[0][0])**2+float(pos_array[0][1])**2+float(pos_array[0][2])**2)**0.5
##a2_len=(float(pos_array[1][0])**2+float(pos_array[1][1])**2+float(pos_array[1][2])**2)**0.5
##a3_len=(float(pos_array[2][0])**2+float(pos_array[2][1])**2+float(pos_array[2][2])**2)**0.5
##
##P=np.array([P0[0]*a1_len,P0[1]*a2_len,P0[2]*a3_len])
##Q=np.array([Q0[0]*a1_len,Q0[1]*a2_len,Q0[2]*a3_len])
##
##XYZ_a=np.array([[a1_len, 0, 0],
## [0, a2_len, 0],
## [0, 0, a3_len]])
#----------------------------------------
basis=np.array([oa, ob, oc])
P0_abc=np.array([[P0[0], 0, 0],
[0, P0[1], 0],
[0, 0, P0[2]]])
P0_abc_basis=np.dot(P0_abc,basis)
P=P0_abc_basis[0]+P0_abc_basis[1]+P0_abc_basis[2] # the position of the normal of shear plane before rotation
Q0_abc=np.array([[Q0[0], 0, 0],
[0, Q0[1], 0],
[0, 0, Q0[2]]])
Q0_abc_basis=np.dot(Q0_abc,basis)
Q=Q0_abc_basis[0]+Q0_abc_basis[1]+Q0_abc_basis[2] # the position of the normal of shear plane after rotation
#----------------------------------------
#basis=np.array([oa, ob, oc])
L0_abc=np.array([[L0[0], 0, 0],
[0, L0[1], 0],
[0, 0, L0[2]]])
L0_abc_basis=np.dot(L0_abc,basis)
L=L0_abc_basis[0]+L0_abc_basis[1]+L0_abc_basis[2] # the position of shear direction before rotation
K0_abc=np.array([[K0[0], 0, 0],
[0, K0[1], 0],
[0, 0, K0[2]]])
K0_abc_basis=np.dot(K0_abc,basis)
K=K0_abc_basis[0]+K0_abc_basis[1]+K0_abc_basis[2] # the position of shear direction after rotation
#------------------------------------------------------------------------------------
###start to calculate the structure data after rotation######
############## the first rotation begin##############
######1.get the angel between two vectors########
P_norm=(P[0]**2+P[1]**2+P[2]**2)**0.5 #the norm of vetor before roation
Q_norm=(Q[0]**2+Q[1]**2+Q[2]**2)**0.5 #the norm of vetor after roation
PQ_dot=np.dot(P,Q)
theta=math.acos(PQ_dot/(P_norm*Q_norm))##obtain theta, the unit is radian instead of degree
theta_cos=PQ_dot/(P_norm*Q_norm)
theta_sin=math.sin(theta)
######2.get the rotation axis##############
M=np.array([P[1]*Q[2]-P[2]*Q[1], P[2]*Q[0]-P[0]*Q[2], P[0]*Q[1]-P[1]*Q[0]])
M_norm=(M[0]**2+M[1]**2+M[2]**2)**0.5 #the norm of M vetor
M_unit=np.array([M[0]/M_norm, M[1]/M_norm, M[2]/M_norm])
######3.get the rotation matrix###########
Nx=M_unit[0]
Ny=M_unit[1]
Nz=M_unit[2]
PQ_rot=np.array([[Nx*Nx*(1-theta_cos)+theta_cos, Nx*Ny*(1-theta_cos)+Nz*theta_sin, Nx*Nz*(1-theta_cos)-Ny*theta_sin],
[Nx*Ny*(1-theta_cos)-Nz*theta_sin, Ny*Ny*(1-theta_cos)+theta_cos, Ny*Nz*(1-theta_cos)+Nx*theta_sin],
[Nx*Nz*(1-theta_cos)+Ny*theta_sin, Ny*Nz*(1-theta_cos)-Nx*theta_sin, Nz*Nz*(1-theta_cos)+theta_cos]])
###### #####
a_PQ_rot=np.dot(pos_array,PQ_rot) #three basis vector after rotation
a_PQ_rot=a_PQ_rot.tolist()
L_rot=np.dot(L,PQ_rot)
############## the second rotation begin##############
######1.get the angel between two vectors########
L_norm=(L_rot[0]**2+L_rot[1]**2+L_rot[2]**2)**0.5 #the norm of vetor before roation
K_norm=(K[0]**2+K[1]**2+K[2]**2)**0.5 #the norm of vetor after roation
LK_dot=np.dot(L_rot,K)
theta=math.acos(LK_dot/(L_norm*K_norm))##obtain theta, the unit is radian instead of degree
theta_cos=LK_dot/(L_norm*K_norm)
theta_sin=math.sin(theta)
######2.get the rotation axis##############
H=np.array([L_rot[1]*K[2]-L_rot[2]*K[1], L_rot[2]*K[0]-L_rot[0]*K[2], L_rot[0]*K[1]-L_rot[1]*K[0]])
H_norm=(H[0]**2+H[1]**2+H[2]**2)**0.5 #the norm of M vetor
H_unit=np.array([H[0]/H_norm, H[1]/H_norm, H[2]/H_norm])
######3.get the rotation matrix###########
Ex=H_unit[0]
Ey=H_unit[1]
Ez=H_unit[2]
LK_rot=np.array([[Ex*Ex*(1-theta_cos)+theta_cos, Ex*Ey*(1-theta_cos)+Ez*theta_sin, Ex*Ez*(1-theta_cos)-Ey*theta_sin],
[Ex*Ey*(1-theta_cos)-Ez*theta_sin, Ey*Ey*(1-theta_cos)+theta_cos, Ey*Ez*(1-theta_cos)+Ex*theta_sin],
[Ex*Ez*(1-theta_cos)+Ey*theta_sin, Ey*Nz*(1-theta_cos)-Ex*theta_sin, Ez*Ez*(1-theta_cos)+theta_cos]])
###### #####
a_LK_rot=np.dot(a_PQ_rot,LK_rot) #three basis vector after rotation
a_LK_rot=a_LK_rot.tolist()
os.system("cp POSCAR POSCAR_original") #copy original POSCAR
r=[]
with open ("POSCAR") as poscar1:
for line in poscar1:
r.append(line)
f=open("POSCAR_rota","w") #write POSCAR after rotation as POSCAR_rota
for i in range(0,2):
f.write(r[i])
for j in range(len(a_LK_rot)):
f.write(str(a_LK_rot[j][0])+' ')
f.write(str(a_LK_rot[j][1])+' ')
f.write(str(a_LK_rot[j][2]))
f.write('\n')
for x in range(5,len(r)):
f.write(r[x])
f.close()
os.system("cp POSCAR_rota POSCAR") ## copy POSCAR_rota as POSCAR
###### Thirdly,apply tensile strain to the rotated basis vector
def postrain(poscar):
t2=[]
t3=[]
with open (poscar) as poscar2:
pos2=poscar2.readlines()
length=len(pos2)
for i in range(2,5):
j=pos2[i]
j=j.split()
t2.extend(j)
for i in range(len(t2)):
t3.extend([float(t2[i])])
pos_array3=np.array(t3).reshape((3,3))
if (Q0==np.array([1, 0, 0])).all():
stra_matr=np.array([[1,0,stra],
[0,1,0],
[stra,0,1]]) # strain(xz) and strain(zx)
a_stra=np.dot(pos_array3,stra_matr)
with open (poscar) as poscar:
s=[]
for line in poscar:
s.append(line)
f=open("POSCAR_stra","w")
for i in range(0,2):
f.write(s[i])
for j in range(len(a_stra)):
f.write(str(a_stra[j][0])+' ')
f.write(str(a_stra[j][1])+' ')
f.write(str(a_stra[j][2]))
f.write('\n')
for x in range(5,len(s)):
f.write(s[x])
f.close()
###### Fourthly, performing VASP calculation
for i in np.arange(stra,stra*(times+1),stra):
|
u.close()
os.system("rm POSCAR_stra")
| print("****************************")
i=round(i,2)
print(i)
postrain("POSCAR")
os.system("cp POSCAR_stra POSCAR")
os.system("cp POSCAR_stra POSCAR_"+str(i))####
#os.system("bsub < Job_qsh.sh")
os.system("srun -n 20 /scratch/jin.zhang3_397857/Software/vasp.5.4.4/bin/vasp_std > vasp.out")
chek=os.popen("grep Voluntary OUTCAR").read()
while "Voluntary" not in chek:
chek=os.popen("grep Voluntary OUTCAR").read()
if "Voluntary" in chek:
break
os.system("cp OUTCAR OUTCAR_"+str(i))####
os.system("cp CONTCAR CONTCAR_"+str(i))####
os.system("grep 'in kB' OUTCAR > kB")
fin=os.popen("tail -1 kB").read()
fin=fin.split()
#os.system("cp CONTCAR POSCAR")
#os.system("bsub < Job_qsh.sh")
#os.system("srun -n 20 ~/Software/VASP/VASP5.4.4/vasp.5.4.4/bin/vasp_std > vasp.out")
if (Q0==np.array([1, 0, 0])).all():
stress=(float(fin[7])/10)*(-1)
strain=((1.0+float(stra))**(float(i)/float(stra))-1.0)*2 ##output strain
u=open("strain_shearStress.dat","a")
u.write(str(strain)+' ')
u.write(str(stress))
u.write('\n')
os.system("cp CONTCAR POSCAR")
#print(i) | conditional_block |
shear_calculation_withoutplot_vasp.py | #
###################################
#2018-04-11
#Jin Zhang@Stony Brook University
######################################
###For VASP
###----------------------------------Readme------------------------------------------------------------
###This script is to calculate the Tensile strength and shear strength using VASP.
###1. To calculate the tensile strength. ###
###If we want to calculate the tensile strength using this script, we should firstly add
###" FCELL(1,1)=0.0 " (add " FCELL(1,1)=0.0 " after REAL(q)FCELL(3,3)) to the constr_cell_relax.F
###in the VASP. This modification means that we only optimize the y and z axis, x axis will not be
###optimized. Then recompile the VASP again. Finally, we can use this script to calculate the tensile
###strength.
###For the tensile strength, this script firstly rotate the tensile axis which we want to calculate into
###[1, 0, 0] axis. The reason of rotating to [1, 0, 0] axis is that we set "FCELL(1,1)=0.0" in the
###constr_cell_relax.F in the VASP. e.g. if we want to calculate the tensile strength along [1, 1, 1] direction
###of diamond, we need to rotate [1, 1, 1] direction to [1, 0, 0] axis.
###2. To calculate the shear strength. ###
###If we want to calculate the shear strength using this script, we should firstly add
###" FCELL(1,3)=0.0 " and " FCELL(3,1)=0.0 " (add " FCELL(1,3)=0.0 " and " FCELL(3,1)=0.0 "
###after REAL(q)FCELL(3,3)) to the constr_cell_relax.F in the VASP. This modification means that
###we only optimize the axis except for the xz and zx axies. Then recompile the VASP again.
###Finally, we can use this script to calculate the tensile strength.
###For the shear strength, this script firstly rotate the normal axis of shear plane to
###[0, 0, 1] axis. And then rotate the shear direction to [1, 0, 0] axis. The reason is that
###we set "FCELL(1,3)=0.0" and "FCELL(3,1)=0.0" to the constr_cell_relax.F in the VASP. e.g. if we
###want to calculate the shear strength along (1, 1, 1) shear plane and [1, 1, -2] shear direction for
###diamond, we need to rotate the [1, 1, 1] direction to [0, 0, 1] axis and rotate [1, 1, -2] direction
###to [1, 0, 0] axis.
###--------------------------------------------------------------------------------------------------------
import numpy as np
import math
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d as plt3d
from itertools import product, combinations
from matplotlib import style
from fractions import gcd
#------------------------------------------------------------------------------------
###start to read the input file
stra=0.02 ######provide the strain
times=9
Sh_pla=[int(x) for x in input("What is the shear plane? e.g. 1 1 1 \n").split()]
if len(Sh_pla)==3:
Sh_pla=Sh_pla
elif len(Sh_pla)==4:
h=Sh_pla[0]
k=Sh_pla[1]
l=Sh_pla[3]
#hkl_gcd=gcd(gcd(h,k),l)
Sh_pla=np.array([h, k, l])
Sh_pla_point1=np.array([1/Sh_pla[0], 0, 0])
Sh_pla_point2=np.array([0, 1/Sh_pla[1], 0])
Sh_pla_point3=np.array([0, 0, 1/Sh_pla[2]])
Sh_pla_vec1=np.array([Sh_pla_point1[0]-Sh_pla_point2[0], Sh_pla_point1[1]-Sh_pla_point2[1], Sh_pla_point1[2]-Sh_pla_point2[2]])
Sh_pla_vec2=np.array([Sh_pla_point3[0]-Sh_pla_point2[0], Sh_pla_point3[1]-Sh_pla_point2[1], Sh_pla_point3[2]-Sh_pla_point2[2]])
Sh_pla_normal_x=Sh_pla_vec1[1]*Sh_pla_vec2[2]-Sh_pla_vec2[1]*Sh_pla_vec1[2]
Sh_pla_normal_y=-(Sh_pla_vec1[0]*Sh_pla_vec2[2]-Sh_pla_vec2[0]*Sh_pla_vec1[2])
Sh_pla_normal_z=Sh_pla_vec1[0]*Sh_pla_vec2[1]-Sh_pla_vec2[0]*Sh_pla_vec1[1]
#Sh_pla_normal=Sh_pla # the value of the normal of the shear plane is equal to shear plan
Sh_pla_normal_gcd=gcd(gcd(Sh_pla_normal_x,Sh_pla_normal_y),Sh_pla_normal_z)
P0=np.array([Sh_pla_normal_x/Sh_pla_normal_gcd, Sh_pla_normal_y/Sh_pla_normal_gcd, Sh_pla_normal_z/Sh_pla_normal_gcd])#P0 is the normal of the shear plane,divide the greatest common dividor
Q0=np.array([1, 0, 0]) #after rotation. Q0 is the x axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
Sh_dir=[int(x) for x in input("What is the shear direction? e.g. 1 1 -2 \n").split()]
if len(Sh_dir)==3:
Sh_dir=Sh_dir
elif len(Sh_pla)==4:
U=Sh_dir[1]+2*Sh_dir[0]
V=Sh_dir[0]+2*Sh_dir[1]
W=Sh_dir[3]
UVW_gcd=gcd(gcd(U,V),W)
Sh_dir=np.array([U/UVW_gcd, V/UVW_gcd, W/UVW_gcd])
L0=Sh_dir
K0=np.array([0, 0, 1]) #after rotation. K0 is the z axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
#P0=np.array([1, 1, 1]) #provide the axis before rotation.. P0 is the tensile direction we want to calculate.
###finish to read the input file
#Q0=np.array([1, 0, 0]) #after rotation. Q0 is the x axis. Since the x axis will not be optimized as we set in VASP, so Q0 is [1, 0, 0]
#------------------------------------------------------------------------------------
###open POSCAR
t=[]
t1=[]
with open ("POSCAR") as poscar0:
pos0=poscar0.readlines()
length=len(pos0)
for i in range(2,5):
j=pos0[i]
j=j.split()
t.extend(j)
print(t)
for i in range(len(t)):
t1.extend([float(t[i])])
pos_array=np.array(t1).reshape((3,3)) ####read basis vector from POSCAR
print(pos_array)
oa=np.array([pos_array[0][0], pos_array[0][1], pos_array[0][2]])
ob=np.array([pos_array[1][0], pos_array[1][1], pos_array[1][2]])
oc=np.array([pos_array[2][0], pos_array[2][1], pos_array[2][2]])
#------------------------------------------------------------------------------------
###start to calculate the structure data before rotation######
##a1_len=(float(pos_array[0][0])**2+float(pos_array[0][1])**2+float(pos_array[0][2])**2)**0.5
##a2_len=(float(pos_array[1][0])**2+float(pos_array[1][1])**2+float(pos_array[1][2])**2)**0.5
##a3_len=(float(pos_array[2][0])**2+float(pos_array[2][1])**2+float(pos_array[2][2])**2)**0.5
##
##P=np.array([P0[0]*a1_len,P0[1]*a2_len,P0[2]*a3_len])
##Q=np.array([Q0[0]*a1_len,Q0[1]*a2_len,Q0[2]*a3_len])
##
##XYZ_a=np.array([[a1_len, 0, 0],
## [0, a2_len, 0],
## [0, 0, a3_len]])
#----------------------------------------
basis=np.array([oa, ob, oc])
P0_abc=np.array([[P0[0], 0, 0],
[0, P0[1], 0],
[0, 0, P0[2]]])
P0_abc_basis=np.dot(P0_abc,basis)
P=P0_abc_basis[0]+P0_abc_basis[1]+P0_abc_basis[2] # the position of the normal of shear plane before rotation
Q0_abc=np.array([[Q0[0], 0, 0],
[0, Q0[1], 0],
[0, 0, Q0[2]]])
Q0_abc_basis=np.dot(Q0_abc,basis)
Q=Q0_abc_basis[0]+Q0_abc_basis[1]+Q0_abc_basis[2] # the position of the normal of shear plane after rotation
#----------------------------------------
#basis=np.array([oa, ob, oc])
L0_abc=np.array([[L0[0], 0, 0],
[0, L0[1], 0],
[0, 0, L0[2]]])
L0_abc_basis=np.dot(L0_abc,basis)
L=L0_abc_basis[0]+L0_abc_basis[1]+L0_abc_basis[2] # the position of shear direction before rotation
K0_abc=np.array([[K0[0], 0, 0],
[0, K0[1], 0],
[0, 0, K0[2]]])
K0_abc_basis=np.dot(K0_abc,basis)
K=K0_abc_basis[0]+K0_abc_basis[1]+K0_abc_basis[2] # the position of shear direction after rotation
#------------------------------------------------------------------------------------
###start to calculate the structure data after rotation######
############## the first rotation begin##############
######1.get the angel between two vectors########
P_norm=(P[0]**2+P[1]**2+P[2]**2)**0.5 #the norm of vetor before roation
Q_norm=(Q[0]**2+Q[1]**2+Q[2]**2)**0.5 #the norm of vetor after roation
PQ_dot=np.dot(P,Q)
theta=math.acos(PQ_dot/(P_norm*Q_norm))##obtain theta, the unit is radian instead of degree
theta_cos=PQ_dot/(P_norm*Q_norm)
theta_sin=math.sin(theta)
######2.get the rotation axis##############
M=np.array([P[1]*Q[2]-P[2]*Q[1], P[2]*Q[0]-P[0]*Q[2], P[0]*Q[1]-P[1]*Q[0]])
M_norm=(M[0]**2+M[1]**2+M[2]**2)**0.5 #the norm of M vetor
M_unit=np.array([M[0]/M_norm, M[1]/M_norm, M[2]/M_norm])
######3.get the rotation matrix###########
Nx=M_unit[0]
Ny=M_unit[1]
Nz=M_unit[2]
PQ_rot=np.array([[Nx*Nx*(1-theta_cos)+theta_cos, Nx*Ny*(1-theta_cos)+Nz*theta_sin, Nx*Nz*(1-theta_cos)-Ny*theta_sin],
[Nx*Ny*(1-theta_cos)-Nz*theta_sin, Ny*Ny*(1-theta_cos)+theta_cos, Ny*Nz*(1-theta_cos)+Nx*theta_sin],
[Nx*Nz*(1-theta_cos)+Ny*theta_sin, Ny*Nz*(1-theta_cos)-Nx*theta_sin, Nz*Nz*(1-theta_cos)+theta_cos]])
###### #####
a_PQ_rot=np.dot(pos_array,PQ_rot) #three basis vector after rotation
a_PQ_rot=a_PQ_rot.tolist()
L_rot=np.dot(L,PQ_rot)
############## the second rotation begin##############
######1.get the angel between two vectors########
L_norm=(L_rot[0]**2+L_rot[1]**2+L_rot[2]**2)**0.5 #the norm of vetor before roation
K_norm=(K[0]**2+K[1]**2+K[2]**2)**0.5 #the norm of vetor after roation
LK_dot=np.dot(L_rot,K)
theta=math.acos(LK_dot/(L_norm*K_norm))##obtain theta, the unit is radian instead of degree
theta_cos=LK_dot/(L_norm*K_norm)
theta_sin=math.sin(theta)
######2.get the rotation axis##############
H=np.array([L_rot[1]*K[2]-L_rot[2]*K[1], L_rot[2]*K[0]-L_rot[0]*K[2], L_rot[0]*K[1]-L_rot[1]*K[0]])
H_norm=(H[0]**2+H[1]**2+H[2]**2)**0.5 #the norm of M vetor
H_unit=np.array([H[0]/H_norm, H[1]/H_norm, H[2]/H_norm])
######3.get the rotation matrix###########
Ex=H_unit[0]
Ey=H_unit[1]
Ez=H_unit[2]
LK_rot=np.array([[Ex*Ex*(1-theta_cos)+theta_cos, Ex*Ey*(1-theta_cos)+Ez*theta_sin, Ex*Ez*(1-theta_cos)-Ey*theta_sin],
[Ex*Ey*(1-theta_cos)-Ez*theta_sin, Ey*Ey*(1-theta_cos)+theta_cos, Ey*Ez*(1-theta_cos)+Ex*theta_sin],
[Ex*Ez*(1-theta_cos)+Ey*theta_sin, Ey*Nz*(1-theta_cos)-Ex*theta_sin, Ez*Ez*(1-theta_cos)+theta_cos]])
###### #####
a_LK_rot=np.dot(a_PQ_rot,LK_rot) #three basis vector after rotation
a_LK_rot=a_LK_rot.tolist()
| for line in poscar1:
r.append(line)
f=open("POSCAR_rota","w") #write POSCAR after rotation as POSCAR_rota
for i in range(0,2):
f.write(r[i])
for j in range(len(a_LK_rot)):
f.write(str(a_LK_rot[j][0])+' ')
f.write(str(a_LK_rot[j][1])+' ')
f.write(str(a_LK_rot[j][2]))
f.write('\n')
for x in range(5,len(r)):
f.write(r[x])
f.close()
os.system("cp POSCAR_rota POSCAR") ## copy POSCAR_rota as POSCAR
###### Thirdly,apply tensile strain to the rotated basis vector
def postrain(poscar):
t2=[]
t3=[]
with open (poscar) as poscar2:
pos2=poscar2.readlines()
length=len(pos2)
for i in range(2,5):
j=pos2[i]
j=j.split()
t2.extend(j)
for i in range(len(t2)):
t3.extend([float(t2[i])])
pos_array3=np.array(t3).reshape((3,3))
if (Q0==np.array([1, 0, 0])).all():
stra_matr=np.array([[1,0,stra],
[0,1,0],
[stra,0,1]]) # strain(xz) and strain(zx)
a_stra=np.dot(pos_array3,stra_matr)
with open (poscar) as poscar:
s=[]
for line in poscar:
s.append(line)
f=open("POSCAR_stra","w")
for i in range(0,2):
f.write(s[i])
for j in range(len(a_stra)):
f.write(str(a_stra[j][0])+' ')
f.write(str(a_stra[j][1])+' ')
f.write(str(a_stra[j][2]))
f.write('\n')
for x in range(5,len(s)):
f.write(s[x])
f.close()
###### Fourthly, performing VASP calculation
for i in np.arange(stra,stra*(times+1),stra):
print("****************************")
i=round(i,2)
print(i)
postrain("POSCAR")
os.system("cp POSCAR_stra POSCAR")
os.system("cp POSCAR_stra POSCAR_"+str(i))####
#os.system("bsub < Job_qsh.sh")
os.system("srun -n 20 /scratch/jin.zhang3_397857/Software/vasp.5.4.4/bin/vasp_std > vasp.out")
chek=os.popen("grep Voluntary OUTCAR").read()
while "Voluntary" not in chek:
chek=os.popen("grep Voluntary OUTCAR").read()
if "Voluntary" in chek:
break
os.system("cp OUTCAR OUTCAR_"+str(i))####
os.system("cp CONTCAR CONTCAR_"+str(i))####
os.system("grep 'in kB' OUTCAR > kB")
fin=os.popen("tail -1 kB").read()
fin=fin.split()
#os.system("cp CONTCAR POSCAR")
#os.system("bsub < Job_qsh.sh")
#os.system("srun -n 20 ~/Software/VASP/VASP5.4.4/vasp.5.4.4/bin/vasp_std > vasp.out")
if (Q0==np.array([1, 0, 0])).all():
stress=(float(fin[7])/10)*(-1)
strain=((1.0+float(stra))**(float(i)/float(stra))-1.0)*2 ##output strain
u=open("strain_shearStress.dat","a")
u.write(str(strain)+' ')
u.write(str(stress))
u.write('\n')
os.system("cp CONTCAR POSCAR")
#print(i)
u.close()
os.system("rm POSCAR_stra") | os.system("cp POSCAR POSCAR_original") #copy original POSCAR
r=[]
with open ("POSCAR") as poscar1: | random_line_split |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
#[allow(unused)]
#[cfg(all(feature = "std", test))]
mod mock;
#[cfg(all(feature = "std", test))]
mod tests;
#[cfg(not(feature = "std"))]
use rstd::borrow::ToOwned;
use rstd::{cmp, fmt::Debug, mem, prelude::*, result};
use sr_primitives::{
traits::{
Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, SaturatedConversion, Saturating, StaticLookup, Zero,
},
weights::SimpleDispatchInfo,
};
use support::{
decl_event, decl_module, decl_storage,
dispatch::Result,
traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, SignedImbalance, UpdateBalanceOutcome},
StorageMap, StorageValue,
};
use system::{ensure_root, ensure_signed};
use darwinia_support::{BalanceLock, LockIdentifier, LockableCurrency, WithdrawLock, WithdrawReason, WithdrawReasons};
use imbalances::{NegativeImbalance, PositiveImbalance};
use ring::{imbalances::NegativeImbalance as NegativeImbalanceRing, Balance, VestingSchedule};
pub trait Trait: ring::Trait {
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
}
decl_event!(
pub enum Event<T> where
<T as system::Trait>::AccountId,
{
/// Transfer succeeded (from, to, value, fees).
Transfer(AccountId, AccountId, Balance, Balance),
}
);
decl_storage! {
trait Store for Module<T: Trait> as Kton {
/// The total units issued in the system.
pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig<T>| {
config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n)
}): T::Balance;
/// Information regarding the vesting of a given account.
pub Vesting get(fn vesting) build(|config: &GenesisConfig<T>| {
// Generate initial vesting configuration
// * who - Account which we are generating vesting configuration for
// * begin - Block when the account will start to vest
// * length - Number of blocks from `begin` until fully vested
// * liquid - Number of units which can be spent before vesting begins
config.vesting.iter().filter_map(|&(ref who, begin, length, liquid)| {
let length = <T::Balance as From<T::BlockNumber>>::from(length);
config.balances.iter()
.find(|&&(ref w, _)| w == who)
.map(|&(_, balance)| {
// Total genesis `balance` minus `liquid` equals funds locked for vesting
let locked = balance.saturating_sub(liquid);
// Number of units unlocked per block after `begin`
let per_block = locked / length.max(sr_primitives::traits::One::one());
(who.clone(), VestingSchedule {
locked: locked,
per_block: per_block,
starting_block: begin
})
})
}).collect::<Vec<_>>()
}): map T::AccountId => Option<VestingSchedule<T::Balance, T::BlockNumber>>;
/// The 'free' balance of a given account.
///
/// This is the only balance that matters in terms of most operations on tokens. It
/// alone is used to determine the balance when in the contract execution environment. When this
/// balance falls below the value of `ExistentialDeposit`, then the 'current account' is
/// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback
/// is invoked, giving a chance to external modules to clean up data associated with
/// the deleted account.
///
/// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.
pub FreeBalance get(fn free_balance) build(|config: &GenesisConfig<T>| config.balances.clone()):
map T::AccountId => T::Balance;
/// The amount of the balance of a given account that is externally reserved; this can still get
/// slashed, but gets slashed last of all.
///
/// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens
/// that are still 'owned' by the account holder, but which are suspendable.
///
/// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account'
/// is deleted: specifically, `ReservedBalance`.
///
/// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.)
pub ReservedBalance get(fn reserved_balance): map T::AccountId => T::Balance;
pub Locks get(fn locks): map T::AccountId => Vec<BalanceLock<T::Balance, T::Moment>>;
}
add_extra_genesis {
config(balances): Vec<(T::AccountId, T::Balance)>;
config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, T::Balance)>;
// ^^ begin, length, amount liquid at genesis
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn deposit_event() = default;
/// Transfer some liquid free balance to another account.
///
/// `transfer` will set the `FreeBalance` of the sender and receiver.
/// It will decrease the total issuance of the system by the `TransferFee`.
/// If the sender's account is below the existential deposit as a result
/// of the transfer, the account will be reaped.
///
/// The dispatch origin for this call must be `Signed` by the transactor.
///
/// # <weight>
/// - Dependent on arguments but not critical, given proper implementations for
/// input config types. See related functions below.
/// - It contains a limited number of reads and writes internally and no complex computation.
///
/// Related functions:
///
/// - `ensure_can_withdraw` is always called internally but has a bounded complexity.
/// - Transferring balances to accounts that did not exist before will cause
/// `T::OnNewAccount::on_new_account` to be called.
/// - Removing enough funds from an account will trigger
/// `T::DustRemoval::on_unbalanced` and `T::OnFreeBalanceZero::on_free_balance_zero`.
/// - `transfer_keep_alive` works the same way as `transfer`, but has an additional
/// check that the transfer will not kill the origin account.
///
/// # </weight>
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn transfer(
origin,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
let transactor = ensure_signed(origin)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?;
}
/// Set the balances of a given account.
///
/// This will alter `FreeBalance` and `ReservedBalance` in storage. it will
/// also decrease the total issuance of the system (`TotalIssuance`).
/// If the new free or reserved balance is below the existential deposit,
/// it will reset the account nonce (`system::AccountNonce`).
///
/// The dispatch origin for this call is `root`.
///
/// # <weight>
/// - Independent of the arguments.
/// - Contains a limited number of reads and writes.
/// # </weight>
#[weight = SimpleDispatchInfo::FixedOperational(50_000)]
fn set_balance(
origin,
who: <T::Lookup as StaticLookup>::Source,
#[compact] new_free: T::Balance,
#[compact] new_reserved: T::Balance
) {
ensure_root(origin)?;
let who = T::Lookup::lookup(who)?;
let current_free = <FreeBalance<T>>::get(&who);
if new_free > current_free {
mem::drop(PositiveImbalance::<T>::new(new_free - current_free));
} else if new_free < current_free {
mem::drop(NegativeImbalance::<T>::new(current_free - new_free));
}
Self::set_free_balance(&who, new_free);
let current_reserved = <ReservedBalance<T>>::get(&who);
if new_reserved > current_reserved {
mem::drop(PositiveImbalance::<T>::new(new_reserved - current_reserved));
} else if new_reserved < current_reserved {
mem::drop(NegativeImbalance::<T>::new(current_reserved - new_reserved));
}
Self::set_reserved_balance(&who, new_reserved);
}
/// Exactly as `transfer`, except the origin must be root and the source account may be
/// specified.
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn force_transfer(
origin,
source: <T::Lookup as StaticLookup>::Source,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
ensure_root(origin)?;
let source = T::Lookup::lookup(source)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?;
}
}
}
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
/// Get the amount that is currently being vested and cannot be transferred out of this account.
pub fn vesting_balance(who: &T::AccountId) -> T::Balance {
if let Some(v) = Self::vesting(who) {
Self::free_balance(who).min(v.locked_at(<system::Module<T>>::block_number()))
} else {
Zero::zero()
}
}
// PRIVATE MUTABLES
/// Set the reserved balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_reserved_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
<ReservedBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
/// Set the free balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
// Commented out for now - but consider it instructive.
// assert!(!Self::total_balance(who).is_zero());
<FreeBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
}
// wrapping these imbalances in a private module is necessary to ensure absolute privacy
// of the inner member.
mod imbalances {
use rstd::mem;
use crate::{result, Imbalance, Saturating, StorageValue, Trait, Zero};
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been created without any equal and opposite accounting.
#[must_use]
pub struct PositiveImbalance<T: Trait>(T::Balance);
impl<T: Trait> PositiveImbalance<T> {
/// Create a new positive imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
PositiveImbalance(amount)
}
}
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been destroyed without any equal and opposite accounting.
#[must_use]
pub struct | <T: Trait>(T::Balance);
impl<T: Trait> NegativeImbalance<T> {
/// Create a new negative imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
NegativeImbalance(amount)
}
}
impl<T: Trait> Imbalance<T::Balance> for PositiveImbalance<T> {
type Opposite = NegativeImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(NegativeImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Imbalance<T::Balance> for NegativeImbalance<T> {
type Opposite = PositiveImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(PositiveImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Drop for PositiveImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_add(self.0));
}
}
impl<T: Trait> Drop for NegativeImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_sub(self.0));
}
}
}
impl<T: Trait> Currency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Balance = T::Balance;
type PositiveImbalance = PositiveImbalance<T>;
type NegativeImbalance = NegativeImbalance<T>;
fn total_balance(who: &T::AccountId) -> Self::Balance {
Self::free_balance(who) + Self::reserved_balance(who)
}
fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool {
Self::free_balance(who) >= value
}
fn total_issuance() -> Self::Balance {
<TotalIssuance<T>>::get()
}
fn minimum_balance() -> Self::Balance {
Zero::zero()
}
fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_sub(&amount).unwrap_or_else(|| {
amount = *issued;
Zero::zero()
});
});
PositiveImbalance::new(amount)
}
fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_add(&amount).unwrap_or_else(|| {
amount = Self::Balance::max_value() - *issued;
Self::Balance::max_value()
})
});
NegativeImbalance::new(amount)
}
fn free_balance(who: &T::AccountId) -> Self::Balance {
<FreeBalance<T>>::get(who)
}
// # <weight>
// Despite iterating over a list of locks, they are limited by the number of
// lock IDs, which means the number of runtime modules that intend to use and create locks.
// # </weight>
fn ensure_can_withdraw(
who: &T::AccountId,
_amount: T::Balance,
reasons: WithdrawReasons,
new_balance: T::Balance,
) -> Result {
if reasons.intersects(WithdrawReason::Reserve | WithdrawReason::Transfer)
&& Self::vesting_balance(who) > new_balance
{
return Err("vesting balance too high to send value");
}
let locks = Self::locks(who);
if locks.is_empty() {
return Ok(());
}
let now = <timestamp::Module<T>>::now();
if locks
.into_iter()
.all(|l| l.withdraw_lock.can_withdraw(now, new_balance) || !l.reasons.intersects(reasons))
{
Ok(())
} else {
Err("account liquidity restrictions prevent withdrawal")
}
}
fn transfer(
transactor: &T::AccountId,
dest: &T::AccountId,
value: Self::Balance,
_existence_requirement: ExistenceRequirement,
) -> Result {
let fee = <T as ring::Trait>::TransferFee::get();
let new_from_ring = <ring::FreeBalance<T>>::get(transactor)
.checked_sub(&fee)
.ok_or("Transfer Fee - NOT ENOUGH RING")?;
<ring::Module<T>>::ensure_can_withdraw(transactor, fee, WithdrawReason::Fee.into(), new_from_ring)?;
let new_from_kton = Self::free_balance(transactor)
.checked_sub(&value)
.ok_or("balance too low to send value")?;
Self::ensure_can_withdraw(transactor, value, WithdrawReason::Transfer.into(), new_from_kton)?;
let new_to_kton = Self::free_balance(dest)
.checked_add(&value)
.ok_or("destination balance too high to receive value")?;
if transactor != dest {
if new_from_ring < <ring::Module<T> as Currency<<T as system::Trait>::AccountId>>::minimum_balance() {
return Err("transfer would kill account");
}
<ring::Module<T>>::set_free_balance(transactor, new_from_ring);
Self::set_free_balance(transactor, new_from_kton);
Self::set_free_balance(dest, new_to_kton);
<T as ring::Trait>::TransferPayment::on_unbalanced(NegativeImbalanceRing::new(fee));
Self::deposit_event(RawEvent::Transfer(
transactor.to_owned(),
dest.to_owned(),
value.saturated_into(),
fee.saturated_into(),
));
}
Ok(())
}
fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) {
let free_balance = Self::free_balance(who);
let free_slash = cmp::min(free_balance, value);
Self::set_free_balance(who, free_balance - free_slash);
let remaining_slash = value - free_slash;
// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn
// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having
// to draw from reserved funds, however we err on the side of punishment if things are inconsistent
// or `can_slash` wasn't used appropriately.
if !remaining_slash.is_zero() {
let reserved_balance = Self::reserved_balance(who);
let reserved_slash = cmp::min(reserved_balance, remaining_slash);
Self::set_reserved_balance(who, reserved_balance - reserved_slash);
(
NegativeImbalance::new(free_slash + reserved_slash),
remaining_slash - reserved_slash,
)
} else {
(NegativeImbalance::new(value), Zero::zero())
}
}
fn deposit_into_existing(
who: &T::AccountId,
value: Self::Balance,
) -> result::Result<Self::PositiveImbalance, &'static str> {
if Self::total_balance(who).is_zero() {
return Err("beneficiary account must pre-exist");
}
Self::set_free_balance(who, Self::free_balance(who) + value);
Ok(PositiveImbalance::new(value))
}
fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance {
let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value);
if let SignedImbalance::Positive(p) = imbalance {
p
} else {
// Impossible, but be defensive.
Self::PositiveImbalance::zero()
}
}
fn withdraw(
who: &T::AccountId,
value: Self::Balance,
reasons: WithdrawReasons,
_liveness: ExistenceRequirement,
) -> result::Result<Self::NegativeImbalance, &'static str> {
let old_balance = Self::free_balance(who);
if let Some(new_balance) = old_balance.checked_sub(&value) {
Self::ensure_can_withdraw(who, value, reasons, new_balance)?;
Self::set_free_balance(who, new_balance);
Ok(NegativeImbalance::new(value))
} else {
Err("too few free funds in account")
}
}
fn make_free_balance_be(
who: &T::AccountId,
balance: Self::Balance,
) -> (
SignedImbalance<Self::Balance, Self::PositiveImbalance>,
UpdateBalanceOutcome,
) {
let original = Self::free_balance(who);
let imbalance = if original <= balance {
SignedImbalance::Positive(PositiveImbalance::new(balance - original))
} else {
SignedImbalance::Negative(NegativeImbalance::new(original - balance))
};
let outcome = {
Self::set_free_balance(who, balance);
UpdateBalanceOutcome::Updated
};
(imbalance, outcome)
}
}
impl<T: Trait> LockableCurrency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Moment = T::Moment;
fn set_lock(
id: LockIdentifier,
who: &T::AccountId,
withdraw_lock: WithdrawLock<Self::Balance, Self::Moment>,
reasons: WithdrawReasons,
) {
let mut new_lock = Some(BalanceLock {
id,
withdraw_lock,
reasons,
});
let mut locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) })
.collect::<Vec<_>>();
if let Some(lock) = new_lock {
locks.push(lock)
}
<Locks<T>>::insert(who, locks);
}
fn remove_lock(id: LockIdentifier, who: &T::AccountId) {
let locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id != id { Some(l) } else { None })
.collect::<Vec<_>>();
<Locks<T>>::insert(who, locks);
}
}
| NegativeImbalance | identifier_name |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
#[allow(unused)]
#[cfg(all(feature = "std", test))]
mod mock;
#[cfg(all(feature = "std", test))]
mod tests;
#[cfg(not(feature = "std"))]
use rstd::borrow::ToOwned;
use rstd::{cmp, fmt::Debug, mem, prelude::*, result};
use sr_primitives::{
traits::{
Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, SaturatedConversion, Saturating, StaticLookup, Zero,
},
weights::SimpleDispatchInfo,
};
use support::{
decl_event, decl_module, decl_storage,
dispatch::Result,
traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, SignedImbalance, UpdateBalanceOutcome},
StorageMap, StorageValue,
};
use system::{ensure_root, ensure_signed};
use darwinia_support::{BalanceLock, LockIdentifier, LockableCurrency, WithdrawLock, WithdrawReason, WithdrawReasons};
use imbalances::{NegativeImbalance, PositiveImbalance};
use ring::{imbalances::NegativeImbalance as NegativeImbalanceRing, Balance, VestingSchedule};
pub trait Trait: ring::Trait {
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
}
decl_event!(
pub enum Event<T> where
<T as system::Trait>::AccountId,
{
/// Transfer succeeded (from, to, value, fees).
Transfer(AccountId, AccountId, Balance, Balance),
}
);
decl_storage! {
trait Store for Module<T: Trait> as Kton {
/// The total units issued in the system.
pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig<T>| {
config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n)
}): T::Balance;
/// Information regarding the vesting of a given account.
pub Vesting get(fn vesting) build(|config: &GenesisConfig<T>| {
// Generate initial vesting configuration
// * who - Account which we are generating vesting configuration for
// * begin - Block when the account will start to vest
// * length - Number of blocks from `begin` until fully vested
// * liquid - Number of units which can be spent before vesting begins
config.vesting.iter().filter_map(|&(ref who, begin, length, liquid)| {
let length = <T::Balance as From<T::BlockNumber>>::from(length);
config.balances.iter()
.find(|&&(ref w, _)| w == who)
.map(|&(_, balance)| {
// Total genesis `balance` minus `liquid` equals funds locked for vesting
let locked = balance.saturating_sub(liquid);
// Number of units unlocked per block after `begin`
let per_block = locked / length.max(sr_primitives::traits::One::one());
(who.clone(), VestingSchedule {
locked: locked,
per_block: per_block,
starting_block: begin
})
})
}).collect::<Vec<_>>()
}): map T::AccountId => Option<VestingSchedule<T::Balance, T::BlockNumber>>;
/// The 'free' balance of a given account.
///
/// This is the only balance that matters in terms of most operations on tokens. It
/// alone is used to determine the balance when in the contract execution environment. When this
/// balance falls below the value of `ExistentialDeposit`, then the 'current account' is
/// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback
/// is invoked, giving a chance to external modules to clean up data associated with
/// the deleted account.
///
/// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.
pub FreeBalance get(fn free_balance) build(|config: &GenesisConfig<T>| config.balances.clone()):
map T::AccountId => T::Balance;
/// The amount of the balance of a given account that is externally reserved; this can still get
/// slashed, but gets slashed last of all.
///
/// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens
/// that are still 'owned' by the account holder, but which are suspendable.
///
/// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account'
/// is deleted: specifically, `ReservedBalance`.
///
/// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.)
pub ReservedBalance get(fn reserved_balance): map T::AccountId => T::Balance;
pub Locks get(fn locks): map T::AccountId => Vec<BalanceLock<T::Balance, T::Moment>>;
}
add_extra_genesis {
config(balances): Vec<(T::AccountId, T::Balance)>;
config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, T::Balance)>;
// ^^ begin, length, amount liquid at genesis
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn deposit_event() = default;
/// Transfer some liquid free balance to another account.
///
/// `transfer` will set the `FreeBalance` of the sender and receiver.
/// It will decrease the total issuance of the system by the `TransferFee`.
/// If the sender's account is below the existential deposit as a result
/// of the transfer, the account will be reaped.
///
/// The dispatch origin for this call must be `Signed` by the transactor.
///
/// # <weight>
/// - Dependent on arguments but not critical, given proper implementations for
/// input config types. See related functions below.
/// - It contains a limited number of reads and writes internally and no complex computation.
///
/// Related functions:
///
/// - `ensure_can_withdraw` is always called internally but has a bounded complexity.
/// - Transferring balances to accounts that did not exist before will cause
/// `T::OnNewAccount::on_new_account` to be called.
/// - Removing enough funds from an account will trigger
/// `T::DustRemoval::on_unbalanced` and `T::OnFreeBalanceZero::on_free_balance_zero`.
/// - `transfer_keep_alive` works the same way as `transfer`, but has an additional
/// check that the transfer will not kill the origin account.
///
/// # </weight>
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn transfer(
origin,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
let transactor = ensure_signed(origin)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?;
}
/// Set the balances of a given account.
///
/// This will alter `FreeBalance` and `ReservedBalance` in storage. it will
/// also decrease the total issuance of the system (`TotalIssuance`).
/// If the new free or reserved balance is below the existential deposit,
/// it will reset the account nonce (`system::AccountNonce`).
///
/// The dispatch origin for this call is `root`.
///
/// # <weight>
/// - Independent of the arguments.
/// - Contains a limited number of reads and writes.
/// # </weight>
#[weight = SimpleDispatchInfo::FixedOperational(50_000)]
fn set_balance(
origin,
who: <T::Lookup as StaticLookup>::Source,
#[compact] new_free: T::Balance,
#[compact] new_reserved: T::Balance
) {
ensure_root(origin)?;
let who = T::Lookup::lookup(who)?;
let current_free = <FreeBalance<T>>::get(&who);
if new_free > current_free {
mem::drop(PositiveImbalance::<T>::new(new_free - current_free));
} else if new_free < current_free {
mem::drop(NegativeImbalance::<T>::new(current_free - new_free));
}
Self::set_free_balance(&who, new_free);
let current_reserved = <ReservedBalance<T>>::get(&who);
if new_reserved > current_reserved {
mem::drop(PositiveImbalance::<T>::new(new_reserved - current_reserved));
} else if new_reserved < current_reserved {
mem::drop(NegativeImbalance::<T>::new(current_reserved - new_reserved));
}
Self::set_reserved_balance(&who, new_reserved);
}
/// Exactly as `transfer`, except the origin must be root and the source account may be
/// specified.
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn force_transfer(
origin,
source: <T::Lookup as StaticLookup>::Source,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
ensure_root(origin)?;
let source = T::Lookup::lookup(source)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?;
}
}
}
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
/// Get the amount that is currently being vested and cannot be transferred out of this account.
pub fn vesting_balance(who: &T::AccountId) -> T::Balance {
if let Some(v) = Self::vesting(who) {
Self::free_balance(who).min(v.locked_at(<system::Module<T>>::block_number()))
} else {
Zero::zero()
}
}
// PRIVATE MUTABLES
/// Set the reserved balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_reserved_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
<ReservedBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
/// Set the free balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
// Commented out for now - but consider it instructive.
// assert!(!Self::total_balance(who).is_zero());
<FreeBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
}
// wrapping these imbalances in a private module is necessary to ensure absolute privacy
// of the inner member.
mod imbalances {
use rstd::mem;
use crate::{result, Imbalance, Saturating, StorageValue, Trait, Zero};
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been created without any equal and opposite accounting.
#[must_use]
pub struct PositiveImbalance<T: Trait>(T::Balance);
impl<T: Trait> PositiveImbalance<T> {
/// Create a new positive imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
PositiveImbalance(amount)
}
}
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been destroyed without any equal and opposite accounting.
#[must_use]
pub struct NegativeImbalance<T: Trait>(T::Balance);
impl<T: Trait> NegativeImbalance<T> {
/// Create a new negative imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
NegativeImbalance(amount)
}
}
impl<T: Trait> Imbalance<T::Balance> for PositiveImbalance<T> {
type Opposite = NegativeImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(NegativeImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Imbalance<T::Balance> for NegativeImbalance<T> {
type Opposite = PositiveImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(PositiveImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
} |
impl<T: Trait> Drop for PositiveImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_add(self.0));
}
}
impl<T: Trait> Drop for NegativeImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_sub(self.0));
}
}
}
impl<T: Trait> Currency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Balance = T::Balance;
type PositiveImbalance = PositiveImbalance<T>;
type NegativeImbalance = NegativeImbalance<T>;
fn total_balance(who: &T::AccountId) -> Self::Balance {
Self::free_balance(who) + Self::reserved_balance(who)
}
fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool {
Self::free_balance(who) >= value
}
fn total_issuance() -> Self::Balance {
<TotalIssuance<T>>::get()
}
fn minimum_balance() -> Self::Balance {
Zero::zero()
}
fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_sub(&amount).unwrap_or_else(|| {
amount = *issued;
Zero::zero()
});
});
PositiveImbalance::new(amount)
}
fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_add(&amount).unwrap_or_else(|| {
amount = Self::Balance::max_value() - *issued;
Self::Balance::max_value()
})
});
NegativeImbalance::new(amount)
}
fn free_balance(who: &T::AccountId) -> Self::Balance {
<FreeBalance<T>>::get(who)
}
// # <weight>
// Despite iterating over a list of locks, they are limited by the number of
// lock IDs, which means the number of runtime modules that intend to use and create locks.
// # </weight>
fn ensure_can_withdraw(
who: &T::AccountId,
_amount: T::Balance,
reasons: WithdrawReasons,
new_balance: T::Balance,
) -> Result {
if reasons.intersects(WithdrawReason::Reserve | WithdrawReason::Transfer)
&& Self::vesting_balance(who) > new_balance
{
return Err("vesting balance too high to send value");
}
let locks = Self::locks(who);
if locks.is_empty() {
return Ok(());
}
let now = <timestamp::Module<T>>::now();
if locks
.into_iter()
.all(|l| l.withdraw_lock.can_withdraw(now, new_balance) || !l.reasons.intersects(reasons))
{
Ok(())
} else {
Err("account liquidity restrictions prevent withdrawal")
}
}
fn transfer(
transactor: &T::AccountId,
dest: &T::AccountId,
value: Self::Balance,
_existence_requirement: ExistenceRequirement,
) -> Result {
let fee = <T as ring::Trait>::TransferFee::get();
let new_from_ring = <ring::FreeBalance<T>>::get(transactor)
.checked_sub(&fee)
.ok_or("Transfer Fee - NOT ENOUGH RING")?;
<ring::Module<T>>::ensure_can_withdraw(transactor, fee, WithdrawReason::Fee.into(), new_from_ring)?;
let new_from_kton = Self::free_balance(transactor)
.checked_sub(&value)
.ok_or("balance too low to send value")?;
Self::ensure_can_withdraw(transactor, value, WithdrawReason::Transfer.into(), new_from_kton)?;
let new_to_kton = Self::free_balance(dest)
.checked_add(&value)
.ok_or("destination balance too high to receive value")?;
if transactor != dest {
if new_from_ring < <ring::Module<T> as Currency<<T as system::Trait>::AccountId>>::minimum_balance() {
return Err("transfer would kill account");
}
<ring::Module<T>>::set_free_balance(transactor, new_from_ring);
Self::set_free_balance(transactor, new_from_kton);
Self::set_free_balance(dest, new_to_kton);
<T as ring::Trait>::TransferPayment::on_unbalanced(NegativeImbalanceRing::new(fee));
Self::deposit_event(RawEvent::Transfer(
transactor.to_owned(),
dest.to_owned(),
value.saturated_into(),
fee.saturated_into(),
));
}
Ok(())
}
fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) {
let free_balance = Self::free_balance(who);
let free_slash = cmp::min(free_balance, value);
Self::set_free_balance(who, free_balance - free_slash);
let remaining_slash = value - free_slash;
// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn
// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having
// to draw from reserved funds, however we err on the side of punishment if things are inconsistent
// or `can_slash` wasn't used appropriately.
if !remaining_slash.is_zero() {
let reserved_balance = Self::reserved_balance(who);
let reserved_slash = cmp::min(reserved_balance, remaining_slash);
Self::set_reserved_balance(who, reserved_balance - reserved_slash);
(
NegativeImbalance::new(free_slash + reserved_slash),
remaining_slash - reserved_slash,
)
} else {
(NegativeImbalance::new(value), Zero::zero())
}
}
fn deposit_into_existing(
who: &T::AccountId,
value: Self::Balance,
) -> result::Result<Self::PositiveImbalance, &'static str> {
if Self::total_balance(who).is_zero() {
return Err("beneficiary account must pre-exist");
}
Self::set_free_balance(who, Self::free_balance(who) + value);
Ok(PositiveImbalance::new(value))
}
fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance {
let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value);
if let SignedImbalance::Positive(p) = imbalance {
p
} else {
// Impossible, but be defensive.
Self::PositiveImbalance::zero()
}
}
fn withdraw(
who: &T::AccountId,
value: Self::Balance,
reasons: WithdrawReasons,
_liveness: ExistenceRequirement,
) -> result::Result<Self::NegativeImbalance, &'static str> {
let old_balance = Self::free_balance(who);
if let Some(new_balance) = old_balance.checked_sub(&value) {
Self::ensure_can_withdraw(who, value, reasons, new_balance)?;
Self::set_free_balance(who, new_balance);
Ok(NegativeImbalance::new(value))
} else {
Err("too few free funds in account")
}
}
fn make_free_balance_be(
who: &T::AccountId,
balance: Self::Balance,
) -> (
SignedImbalance<Self::Balance, Self::PositiveImbalance>,
UpdateBalanceOutcome,
) {
let original = Self::free_balance(who);
let imbalance = if original <= balance {
SignedImbalance::Positive(PositiveImbalance::new(balance - original))
} else {
SignedImbalance::Negative(NegativeImbalance::new(original - balance))
};
let outcome = {
Self::set_free_balance(who, balance);
UpdateBalanceOutcome::Updated
};
(imbalance, outcome)
}
}
impl<T: Trait> LockableCurrency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Moment = T::Moment;
fn set_lock(
id: LockIdentifier,
who: &T::AccountId,
withdraw_lock: WithdrawLock<Self::Balance, Self::Moment>,
reasons: WithdrawReasons,
) {
let mut new_lock = Some(BalanceLock {
id,
withdraw_lock,
reasons,
});
let mut locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) })
.collect::<Vec<_>>();
if let Some(lock) = new_lock {
locks.push(lock)
}
<Locks<T>>::insert(who, locks);
}
fn remove_lock(id: LockIdentifier, who: &T::AccountId) {
let locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id != id { Some(l) } else { None })
.collect::<Vec<_>>();
<Locks<T>>::insert(who, locks);
}
} | random_line_split | |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
#[allow(unused)]
#[cfg(all(feature = "std", test))]
mod mock;
#[cfg(all(feature = "std", test))]
mod tests;
#[cfg(not(feature = "std"))]
use rstd::borrow::ToOwned;
use rstd::{cmp, fmt::Debug, mem, prelude::*, result};
use sr_primitives::{
traits::{
Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, SaturatedConversion, Saturating, StaticLookup, Zero,
},
weights::SimpleDispatchInfo,
};
use support::{
decl_event, decl_module, decl_storage,
dispatch::Result,
traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, SignedImbalance, UpdateBalanceOutcome},
StorageMap, StorageValue,
};
use system::{ensure_root, ensure_signed};
use darwinia_support::{BalanceLock, LockIdentifier, LockableCurrency, WithdrawLock, WithdrawReason, WithdrawReasons};
use imbalances::{NegativeImbalance, PositiveImbalance};
use ring::{imbalances::NegativeImbalance as NegativeImbalanceRing, Balance, VestingSchedule};
pub trait Trait: ring::Trait {
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
}
decl_event!(
pub enum Event<T> where
<T as system::Trait>::AccountId,
{
/// Transfer succeeded (from, to, value, fees).
Transfer(AccountId, AccountId, Balance, Balance),
}
);
decl_storage! {
trait Store for Module<T: Trait> as Kton {
/// The total units issued in the system.
pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig<T>| {
config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n)
}): T::Balance;
/// Information regarding the vesting of a given account.
pub Vesting get(fn vesting) build(|config: &GenesisConfig<T>| {
// Generate initial vesting configuration
// * who - Account which we are generating vesting configuration for
// * begin - Block when the account will start to vest
// * length - Number of blocks from `begin` until fully vested
// * liquid - Number of units which can be spent before vesting begins
config.vesting.iter().filter_map(|&(ref who, begin, length, liquid)| {
let length = <T::Balance as From<T::BlockNumber>>::from(length);
config.balances.iter()
.find(|&&(ref w, _)| w == who)
.map(|&(_, balance)| {
// Total genesis `balance` minus `liquid` equals funds locked for vesting
let locked = balance.saturating_sub(liquid);
// Number of units unlocked per block after `begin`
let per_block = locked / length.max(sr_primitives::traits::One::one());
(who.clone(), VestingSchedule {
locked: locked,
per_block: per_block,
starting_block: begin
})
})
}).collect::<Vec<_>>()
}): map T::AccountId => Option<VestingSchedule<T::Balance, T::BlockNumber>>;
/// The 'free' balance of a given account.
///
/// This is the only balance that matters in terms of most operations on tokens. It
/// alone is used to determine the balance when in the contract execution environment. When this
/// balance falls below the value of `ExistentialDeposit`, then the 'current account' is
/// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback
/// is invoked, giving a chance to external modules to clean up data associated with
/// the deleted account.
///
/// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.
pub FreeBalance get(fn free_balance) build(|config: &GenesisConfig<T>| config.balances.clone()):
map T::AccountId => T::Balance;
/// The amount of the balance of a given account that is externally reserved; this can still get
/// slashed, but gets slashed last of all.
///
/// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens
/// that are still 'owned' by the account holder, but which are suspendable.
///
/// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account'
/// is deleted: specifically, `ReservedBalance`.
///
/// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.)
pub ReservedBalance get(fn reserved_balance): map T::AccountId => T::Balance;
pub Locks get(fn locks): map T::AccountId => Vec<BalanceLock<T::Balance, T::Moment>>;
}
add_extra_genesis {
config(balances): Vec<(T::AccountId, T::Balance)>;
config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, T::Balance)>;
// ^^ begin, length, amount liquid at genesis
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn deposit_event() = default;
/// Transfer some liquid free balance to another account.
///
/// `transfer` will set the `FreeBalance` of the sender and receiver.
/// It will decrease the total issuance of the system by the `TransferFee`.
/// If the sender's account is below the existential deposit as a result
/// of the transfer, the account will be reaped.
///
/// The dispatch origin for this call must be `Signed` by the transactor.
///
/// # <weight>
/// - Dependent on arguments but not critical, given proper implementations for
/// input config types. See related functions below.
/// - It contains a limited number of reads and writes internally and no complex computation.
///
/// Related functions:
///
/// - `ensure_can_withdraw` is always called internally but has a bounded complexity.
/// - Transferring balances to accounts that did not exist before will cause
/// `T::OnNewAccount::on_new_account` to be called.
/// - Removing enough funds from an account will trigger
/// `T::DustRemoval::on_unbalanced` and `T::OnFreeBalanceZero::on_free_balance_zero`.
/// - `transfer_keep_alive` works the same way as `transfer`, but has an additional
/// check that the transfer will not kill the origin account.
///
/// # </weight>
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn transfer(
origin,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
let transactor = ensure_signed(origin)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?;
}
/// Set the balances of a given account.
///
/// This will alter `FreeBalance` and `ReservedBalance` in storage. it will
/// also decrease the total issuance of the system (`TotalIssuance`).
/// If the new free or reserved balance is below the existential deposit,
/// it will reset the account nonce (`system::AccountNonce`).
///
/// The dispatch origin for this call is `root`.
///
/// # <weight>
/// - Independent of the arguments.
/// - Contains a limited number of reads and writes.
/// # </weight>
#[weight = SimpleDispatchInfo::FixedOperational(50_000)]
fn set_balance(
origin,
who: <T::Lookup as StaticLookup>::Source,
#[compact] new_free: T::Balance,
#[compact] new_reserved: T::Balance
) {
ensure_root(origin)?;
let who = T::Lookup::lookup(who)?;
let current_free = <FreeBalance<T>>::get(&who);
if new_free > current_free {
mem::drop(PositiveImbalance::<T>::new(new_free - current_free));
} else if new_free < current_free {
mem::drop(NegativeImbalance::<T>::new(current_free - new_free));
}
Self::set_free_balance(&who, new_free);
let current_reserved = <ReservedBalance<T>>::get(&who);
if new_reserved > current_reserved {
mem::drop(PositiveImbalance::<T>::new(new_reserved - current_reserved));
} else if new_reserved < current_reserved {
mem::drop(NegativeImbalance::<T>::new(current_reserved - new_reserved));
}
Self::set_reserved_balance(&who, new_reserved);
}
/// Exactly as `transfer`, except the origin must be root and the source account may be
/// specified.
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn force_transfer(
origin,
source: <T::Lookup as StaticLookup>::Source,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
ensure_root(origin)?;
let source = T::Lookup::lookup(source)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?;
}
}
}
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
/// Get the amount that is currently being vested and cannot be transferred out of this account.
pub fn vesting_balance(who: &T::AccountId) -> T::Balance {
if let Some(v) = Self::vesting(who) {
Self::free_balance(who).min(v.locked_at(<system::Module<T>>::block_number()))
} else {
Zero::zero()
}
}
// PRIVATE MUTABLES
/// Set the reserved balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_reserved_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
<ReservedBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
/// Set the free balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
// Commented out for now - but consider it instructive.
// assert!(!Self::total_balance(who).is_zero());
<FreeBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
}
// wrapping these imbalances in a private module is necessary to ensure absolute privacy
// of the inner member.
mod imbalances {
use rstd::mem;
use crate::{result, Imbalance, Saturating, StorageValue, Trait, Zero};
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been created without any equal and opposite accounting.
#[must_use]
pub struct PositiveImbalance<T: Trait>(T::Balance);
impl<T: Trait> PositiveImbalance<T> {
/// Create a new positive imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
PositiveImbalance(amount)
}
}
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been destroyed without any equal and opposite accounting.
#[must_use]
pub struct NegativeImbalance<T: Trait>(T::Balance);
impl<T: Trait> NegativeImbalance<T> {
/// Create a new negative imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
NegativeImbalance(amount)
}
}
impl<T: Trait> Imbalance<T::Balance> for PositiveImbalance<T> {
type Opposite = NegativeImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(NegativeImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Imbalance<T::Balance> for NegativeImbalance<T> {
type Opposite = PositiveImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(PositiveImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Drop for PositiveImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_add(self.0));
}
}
impl<T: Trait> Drop for NegativeImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_sub(self.0));
}
}
}
impl<T: Trait> Currency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Balance = T::Balance;
type PositiveImbalance = PositiveImbalance<T>;
type NegativeImbalance = NegativeImbalance<T>;
fn total_balance(who: &T::AccountId) -> Self::Balance {
Self::free_balance(who) + Self::reserved_balance(who)
}
fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool {
Self::free_balance(who) >= value
}
fn total_issuance() -> Self::Balance {
<TotalIssuance<T>>::get()
}
fn minimum_balance() -> Self::Balance {
Zero::zero()
}
fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_sub(&amount).unwrap_or_else(|| {
amount = *issued;
Zero::zero()
});
});
PositiveImbalance::new(amount)
}
fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_add(&amount).unwrap_or_else(|| {
amount = Self::Balance::max_value() - *issued;
Self::Balance::max_value()
})
});
NegativeImbalance::new(amount)
}
fn free_balance(who: &T::AccountId) -> Self::Balance |
// # <weight>
// Despite iterating over a list of locks, they are limited by the number of
// lock IDs, which means the number of runtime modules that intend to use and create locks.
// # </weight>
fn ensure_can_withdraw(
who: &T::AccountId,
_amount: T::Balance,
reasons: WithdrawReasons,
new_balance: T::Balance,
) -> Result {
if reasons.intersects(WithdrawReason::Reserve | WithdrawReason::Transfer)
&& Self::vesting_balance(who) > new_balance
{
return Err("vesting balance too high to send value");
}
let locks = Self::locks(who);
if locks.is_empty() {
return Ok(());
}
let now = <timestamp::Module<T>>::now();
if locks
.into_iter()
.all(|l| l.withdraw_lock.can_withdraw(now, new_balance) || !l.reasons.intersects(reasons))
{
Ok(())
} else {
Err("account liquidity restrictions prevent withdrawal")
}
}
fn transfer(
transactor: &T::AccountId,
dest: &T::AccountId,
value: Self::Balance,
_existence_requirement: ExistenceRequirement,
) -> Result {
let fee = <T as ring::Trait>::TransferFee::get();
let new_from_ring = <ring::FreeBalance<T>>::get(transactor)
.checked_sub(&fee)
.ok_or("Transfer Fee - NOT ENOUGH RING")?;
<ring::Module<T>>::ensure_can_withdraw(transactor, fee, WithdrawReason::Fee.into(), new_from_ring)?;
let new_from_kton = Self::free_balance(transactor)
.checked_sub(&value)
.ok_or("balance too low to send value")?;
Self::ensure_can_withdraw(transactor, value, WithdrawReason::Transfer.into(), new_from_kton)?;
let new_to_kton = Self::free_balance(dest)
.checked_add(&value)
.ok_or("destination balance too high to receive value")?;
if transactor != dest {
if new_from_ring < <ring::Module<T> as Currency<<T as system::Trait>::AccountId>>::minimum_balance() {
return Err("transfer would kill account");
}
<ring::Module<T>>::set_free_balance(transactor, new_from_ring);
Self::set_free_balance(transactor, new_from_kton);
Self::set_free_balance(dest, new_to_kton);
<T as ring::Trait>::TransferPayment::on_unbalanced(NegativeImbalanceRing::new(fee));
Self::deposit_event(RawEvent::Transfer(
transactor.to_owned(),
dest.to_owned(),
value.saturated_into(),
fee.saturated_into(),
));
}
Ok(())
}
fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) {
let free_balance = Self::free_balance(who);
let free_slash = cmp::min(free_balance, value);
Self::set_free_balance(who, free_balance - free_slash);
let remaining_slash = value - free_slash;
// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn
// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having
// to draw from reserved funds, however we err on the side of punishment if things are inconsistent
// or `can_slash` wasn't used appropriately.
if !remaining_slash.is_zero() {
let reserved_balance = Self::reserved_balance(who);
let reserved_slash = cmp::min(reserved_balance, remaining_slash);
Self::set_reserved_balance(who, reserved_balance - reserved_slash);
(
NegativeImbalance::new(free_slash + reserved_slash),
remaining_slash - reserved_slash,
)
} else {
(NegativeImbalance::new(value), Zero::zero())
}
}
fn deposit_into_existing(
who: &T::AccountId,
value: Self::Balance,
) -> result::Result<Self::PositiveImbalance, &'static str> {
if Self::total_balance(who).is_zero() {
return Err("beneficiary account must pre-exist");
}
Self::set_free_balance(who, Self::free_balance(who) + value);
Ok(PositiveImbalance::new(value))
}
fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance {
let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value);
if let SignedImbalance::Positive(p) = imbalance {
p
} else {
// Impossible, but be defensive.
Self::PositiveImbalance::zero()
}
}
fn withdraw(
who: &T::AccountId,
value: Self::Balance,
reasons: WithdrawReasons,
_liveness: ExistenceRequirement,
) -> result::Result<Self::NegativeImbalance, &'static str> {
let old_balance = Self::free_balance(who);
if let Some(new_balance) = old_balance.checked_sub(&value) {
Self::ensure_can_withdraw(who, value, reasons, new_balance)?;
Self::set_free_balance(who, new_balance);
Ok(NegativeImbalance::new(value))
} else {
Err("too few free funds in account")
}
}
fn make_free_balance_be(
who: &T::AccountId,
balance: Self::Balance,
) -> (
SignedImbalance<Self::Balance, Self::PositiveImbalance>,
UpdateBalanceOutcome,
) {
let original = Self::free_balance(who);
let imbalance = if original <= balance {
SignedImbalance::Positive(PositiveImbalance::new(balance - original))
} else {
SignedImbalance::Negative(NegativeImbalance::new(original - balance))
};
let outcome = {
Self::set_free_balance(who, balance);
UpdateBalanceOutcome::Updated
};
(imbalance, outcome)
}
}
impl<T: Trait> LockableCurrency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Moment = T::Moment;
fn set_lock(
id: LockIdentifier,
who: &T::AccountId,
withdraw_lock: WithdrawLock<Self::Balance, Self::Moment>,
reasons: WithdrawReasons,
) {
let mut new_lock = Some(BalanceLock {
id,
withdraw_lock,
reasons,
});
let mut locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) })
.collect::<Vec<_>>();
if let Some(lock) = new_lock {
locks.push(lock)
}
<Locks<T>>::insert(who, locks);
}
fn remove_lock(id: LockIdentifier, who: &T::AccountId) {
let locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id != id { Some(l) } else { None })
.collect::<Vec<_>>();
<Locks<T>>::insert(who, locks);
}
}
| {
<FreeBalance<T>>::get(who)
} | identifier_body |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
#[allow(unused)]
#[cfg(all(feature = "std", test))]
mod mock;
#[cfg(all(feature = "std", test))]
mod tests;
#[cfg(not(feature = "std"))]
use rstd::borrow::ToOwned;
use rstd::{cmp, fmt::Debug, mem, prelude::*, result};
use sr_primitives::{
traits::{
Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, SaturatedConversion, Saturating, StaticLookup, Zero,
},
weights::SimpleDispatchInfo,
};
use support::{
decl_event, decl_module, decl_storage,
dispatch::Result,
traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, SignedImbalance, UpdateBalanceOutcome},
StorageMap, StorageValue,
};
use system::{ensure_root, ensure_signed};
use darwinia_support::{BalanceLock, LockIdentifier, LockableCurrency, WithdrawLock, WithdrawReason, WithdrawReasons};
use imbalances::{NegativeImbalance, PositiveImbalance};
use ring::{imbalances::NegativeImbalance as NegativeImbalanceRing, Balance, VestingSchedule};
pub trait Trait: ring::Trait {
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
}
decl_event!(
pub enum Event<T> where
<T as system::Trait>::AccountId,
{
/// Transfer succeeded (from, to, value, fees).
Transfer(AccountId, AccountId, Balance, Balance),
}
);
decl_storage! {
trait Store for Module<T: Trait> as Kton {
/// The total units issued in the system.
pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig<T>| {
config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n)
}): T::Balance;
/// Information regarding the vesting of a given account.
pub Vesting get(fn vesting) build(|config: &GenesisConfig<T>| {
// Generate initial vesting configuration
// * who - Account which we are generating vesting configuration for
// * begin - Block when the account will start to vest
// * length - Number of blocks from `begin` until fully vested
// * liquid - Number of units which can be spent before vesting begins
config.vesting.iter().filter_map(|&(ref who, begin, length, liquid)| {
let length = <T::Balance as From<T::BlockNumber>>::from(length);
config.balances.iter()
.find(|&&(ref w, _)| w == who)
.map(|&(_, balance)| {
// Total genesis `balance` minus `liquid` equals funds locked for vesting
let locked = balance.saturating_sub(liquid);
// Number of units unlocked per block after `begin`
let per_block = locked / length.max(sr_primitives::traits::One::one());
(who.clone(), VestingSchedule {
locked: locked,
per_block: per_block,
starting_block: begin
})
})
}).collect::<Vec<_>>()
}): map T::AccountId => Option<VestingSchedule<T::Balance, T::BlockNumber>>;
/// The 'free' balance of a given account.
///
/// This is the only balance that matters in terms of most operations on tokens. It
/// alone is used to determine the balance when in the contract execution environment. When this
/// balance falls below the value of `ExistentialDeposit`, then the 'current account' is
/// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback
/// is invoked, giving a chance to external modules to clean up data associated with
/// the deleted account.
///
/// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.
pub FreeBalance get(fn free_balance) build(|config: &GenesisConfig<T>| config.balances.clone()):
map T::AccountId => T::Balance;
/// The amount of the balance of a given account that is externally reserved; this can still get
/// slashed, but gets slashed last of all.
///
/// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens
/// that are still 'owned' by the account holder, but which are suspendable.
///
/// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account'
/// is deleted: specifically, `ReservedBalance`.
///
/// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.)
pub ReservedBalance get(fn reserved_balance): map T::AccountId => T::Balance;
pub Locks get(fn locks): map T::AccountId => Vec<BalanceLock<T::Balance, T::Moment>>;
}
add_extra_genesis {
config(balances): Vec<(T::AccountId, T::Balance)>;
config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, T::Balance)>;
// ^^ begin, length, amount liquid at genesis
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn deposit_event() = default;
/// Transfer some liquid free balance to another account.
///
/// `transfer` will set the `FreeBalance` of the sender and receiver.
/// It will decrease the total issuance of the system by the `TransferFee`.
/// If the sender's account is below the existential deposit as a result
/// of the transfer, the account will be reaped.
///
/// The dispatch origin for this call must be `Signed` by the transactor.
///
/// # <weight>
/// - Dependent on arguments but not critical, given proper implementations for
/// input config types. See related functions below.
/// - It contains a limited number of reads and writes internally and no complex computation.
///
/// Related functions:
///
/// - `ensure_can_withdraw` is always called internally but has a bounded complexity.
/// - Transferring balances to accounts that did not exist before will cause
/// `T::OnNewAccount::on_new_account` to be called.
/// - Removing enough funds from an account will trigger
/// `T::DustRemoval::on_unbalanced` and `T::OnFreeBalanceZero::on_free_balance_zero`.
/// - `transfer_keep_alive` works the same way as `transfer`, but has an additional
/// check that the transfer will not kill the origin account.
///
/// # </weight>
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn transfer(
origin,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
let transactor = ensure_signed(origin)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?;
}
/// Set the balances of a given account.
///
/// This will alter `FreeBalance` and `ReservedBalance` in storage. it will
/// also decrease the total issuance of the system (`TotalIssuance`).
/// If the new free or reserved balance is below the existential deposit,
/// it will reset the account nonce (`system::AccountNonce`).
///
/// The dispatch origin for this call is `root`.
///
/// # <weight>
/// - Independent of the arguments.
/// - Contains a limited number of reads and writes.
/// # </weight>
#[weight = SimpleDispatchInfo::FixedOperational(50_000)]
fn set_balance(
origin,
who: <T::Lookup as StaticLookup>::Source,
#[compact] new_free: T::Balance,
#[compact] new_reserved: T::Balance
) {
ensure_root(origin)?;
let who = T::Lookup::lookup(who)?;
let current_free = <FreeBalance<T>>::get(&who);
if new_free > current_free {
mem::drop(PositiveImbalance::<T>::new(new_free - current_free));
} else if new_free < current_free {
mem::drop(NegativeImbalance::<T>::new(current_free - new_free));
}
Self::set_free_balance(&who, new_free);
let current_reserved = <ReservedBalance<T>>::get(&who);
if new_reserved > current_reserved {
mem::drop(PositiveImbalance::<T>::new(new_reserved - current_reserved));
} else if new_reserved < current_reserved {
mem::drop(NegativeImbalance::<T>::new(current_reserved - new_reserved));
}
Self::set_reserved_balance(&who, new_reserved);
}
/// Exactly as `transfer`, except the origin must be root and the source account may be
/// specified.
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn force_transfer(
origin,
source: <T::Lookup as StaticLookup>::Source,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
ensure_root(origin)?;
let source = T::Lookup::lookup(source)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?;
}
}
}
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
/// Get the amount that is currently being vested and cannot be transferred out of this account.
pub fn vesting_balance(who: &T::AccountId) -> T::Balance {
if let Some(v) = Self::vesting(who) {
Self::free_balance(who).min(v.locked_at(<system::Module<T>>::block_number()))
} else {
Zero::zero()
}
}
// PRIVATE MUTABLES
/// Set the reserved balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_reserved_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
<ReservedBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
/// Set the free balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
// Commented out for now - but consider it instructive.
// assert!(!Self::total_balance(who).is_zero());
<FreeBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
}
// wrapping these imbalances in a private module is necessary to ensure absolute privacy
// of the inner member.
mod imbalances {
use rstd::mem;
use crate::{result, Imbalance, Saturating, StorageValue, Trait, Zero};
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been created without any equal and opposite accounting.
#[must_use]
pub struct PositiveImbalance<T: Trait>(T::Balance);
impl<T: Trait> PositiveImbalance<T> {
/// Create a new positive imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
PositiveImbalance(amount)
}
}
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been destroyed without any equal and opposite accounting.
#[must_use]
pub struct NegativeImbalance<T: Trait>(T::Balance);
impl<T: Trait> NegativeImbalance<T> {
/// Create a new negative imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
NegativeImbalance(amount)
}
}
impl<T: Trait> Imbalance<T::Balance> for PositiveImbalance<T> {
type Opposite = NegativeImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else |
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(NegativeImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Imbalance<T::Balance> for NegativeImbalance<T> {
type Opposite = PositiveImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(PositiveImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Drop for PositiveImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_add(self.0));
}
}
impl<T: Trait> Drop for NegativeImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_sub(self.0));
}
}
}
impl<T: Trait> Currency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Balance = T::Balance;
type PositiveImbalance = PositiveImbalance<T>;
type NegativeImbalance = NegativeImbalance<T>;
fn total_balance(who: &T::AccountId) -> Self::Balance {
Self::free_balance(who) + Self::reserved_balance(who)
}
fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool {
Self::free_balance(who) >= value
}
fn total_issuance() -> Self::Balance {
<TotalIssuance<T>>::get()
}
fn minimum_balance() -> Self::Balance {
Zero::zero()
}
fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_sub(&amount).unwrap_or_else(|| {
amount = *issued;
Zero::zero()
});
});
PositiveImbalance::new(amount)
}
fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_add(&amount).unwrap_or_else(|| {
amount = Self::Balance::max_value() - *issued;
Self::Balance::max_value()
})
});
NegativeImbalance::new(amount)
}
fn free_balance(who: &T::AccountId) -> Self::Balance {
<FreeBalance<T>>::get(who)
}
// # <weight>
// Despite iterating over a list of locks, they are limited by the number of
// lock IDs, which means the number of runtime modules that intend to use and create locks.
// # </weight>
fn ensure_can_withdraw(
who: &T::AccountId,
_amount: T::Balance,
reasons: WithdrawReasons,
new_balance: T::Balance,
) -> Result {
if reasons.intersects(WithdrawReason::Reserve | WithdrawReason::Transfer)
&& Self::vesting_balance(who) > new_balance
{
return Err("vesting balance too high to send value");
}
let locks = Self::locks(who);
if locks.is_empty() {
return Ok(());
}
let now = <timestamp::Module<T>>::now();
if locks
.into_iter()
.all(|l| l.withdraw_lock.can_withdraw(now, new_balance) || !l.reasons.intersects(reasons))
{
Ok(())
} else {
Err("account liquidity restrictions prevent withdrawal")
}
}
fn transfer(
transactor: &T::AccountId,
dest: &T::AccountId,
value: Self::Balance,
_existence_requirement: ExistenceRequirement,
) -> Result {
let fee = <T as ring::Trait>::TransferFee::get();
let new_from_ring = <ring::FreeBalance<T>>::get(transactor)
.checked_sub(&fee)
.ok_or("Transfer Fee - NOT ENOUGH RING")?;
<ring::Module<T>>::ensure_can_withdraw(transactor, fee, WithdrawReason::Fee.into(), new_from_ring)?;
let new_from_kton = Self::free_balance(transactor)
.checked_sub(&value)
.ok_or("balance too low to send value")?;
Self::ensure_can_withdraw(transactor, value, WithdrawReason::Transfer.into(), new_from_kton)?;
let new_to_kton = Self::free_balance(dest)
.checked_add(&value)
.ok_or("destination balance too high to receive value")?;
if transactor != dest {
if new_from_ring < <ring::Module<T> as Currency<<T as system::Trait>::AccountId>>::minimum_balance() {
return Err("transfer would kill account");
}
<ring::Module<T>>::set_free_balance(transactor, new_from_ring);
Self::set_free_balance(transactor, new_from_kton);
Self::set_free_balance(dest, new_to_kton);
<T as ring::Trait>::TransferPayment::on_unbalanced(NegativeImbalanceRing::new(fee));
Self::deposit_event(RawEvent::Transfer(
transactor.to_owned(),
dest.to_owned(),
value.saturated_into(),
fee.saturated_into(),
));
}
Ok(())
}
fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) {
let free_balance = Self::free_balance(who);
let free_slash = cmp::min(free_balance, value);
Self::set_free_balance(who, free_balance - free_slash);
let remaining_slash = value - free_slash;
// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn
// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having
// to draw from reserved funds, however we err on the side of punishment if things are inconsistent
// or `can_slash` wasn't used appropriately.
if !remaining_slash.is_zero() {
let reserved_balance = Self::reserved_balance(who);
let reserved_slash = cmp::min(reserved_balance, remaining_slash);
Self::set_reserved_balance(who, reserved_balance - reserved_slash);
(
NegativeImbalance::new(free_slash + reserved_slash),
remaining_slash - reserved_slash,
)
} else {
(NegativeImbalance::new(value), Zero::zero())
}
}
fn deposit_into_existing(
who: &T::AccountId,
value: Self::Balance,
) -> result::Result<Self::PositiveImbalance, &'static str> {
if Self::total_balance(who).is_zero() {
return Err("beneficiary account must pre-exist");
}
Self::set_free_balance(who, Self::free_balance(who) + value);
Ok(PositiveImbalance::new(value))
}
fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance {
let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value);
if let SignedImbalance::Positive(p) = imbalance {
p
} else {
// Impossible, but be defensive.
Self::PositiveImbalance::zero()
}
}
fn withdraw(
who: &T::AccountId,
value: Self::Balance,
reasons: WithdrawReasons,
_liveness: ExistenceRequirement,
) -> result::Result<Self::NegativeImbalance, &'static str> {
let old_balance = Self::free_balance(who);
if let Some(new_balance) = old_balance.checked_sub(&value) {
Self::ensure_can_withdraw(who, value, reasons, new_balance)?;
Self::set_free_balance(who, new_balance);
Ok(NegativeImbalance::new(value))
} else {
Err("too few free funds in account")
}
}
fn make_free_balance_be(
who: &T::AccountId,
balance: Self::Balance,
) -> (
SignedImbalance<Self::Balance, Self::PositiveImbalance>,
UpdateBalanceOutcome,
) {
let original = Self::free_balance(who);
let imbalance = if original <= balance {
SignedImbalance::Positive(PositiveImbalance::new(balance - original))
} else {
SignedImbalance::Negative(NegativeImbalance::new(original - balance))
};
let outcome = {
Self::set_free_balance(who, balance);
UpdateBalanceOutcome::Updated
};
(imbalance, outcome)
}
}
impl<T: Trait> LockableCurrency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Moment = T::Moment;
fn set_lock(
id: LockIdentifier,
who: &T::AccountId,
withdraw_lock: WithdrawLock<Self::Balance, Self::Moment>,
reasons: WithdrawReasons,
) {
let mut new_lock = Some(BalanceLock {
id,
withdraw_lock,
reasons,
});
let mut locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) })
.collect::<Vec<_>>();
if let Some(lock) = new_lock {
locks.push(lock)
}
<Locks<T>>::insert(who, locks);
}
fn remove_lock(id: LockIdentifier, who: &T::AccountId) {
let locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id != id { Some(l) } else { None })
.collect::<Vec<_>>();
<Locks<T>>::insert(who, locks);
}
}
| {
Err(self)
} | conditional_block |
query.py | import requests
import json
import csv
import datetime
import time
from collections import defaultdict
import sys
import os
import glob
import logging
import pandas as pd
json_query = '''
{
"query": {
"bool": {
"filter": [
{
"terms": {
"metricset.name": [
"POP_tunnel_monitor"
]
}
},
{
"terms": {
"metricset.module": [
"business"
]
}
},
{
"range": {
"timestamp": {
"gte": 1563349200000,
"lt": 1563349260000
}
}
},
{
"nested": {
"path": "pairs",
"query": {
"bool": {
"filter": [
{"term": {"pairs.name": "ns_id"}},
{"term": {"pairs.value.string": "$nsid"}}
]
}
}
}
},
{
"nested": {
"path": "pairs",
"query": {
"bool": {
"filter": [
{"term": {"pairs.name": "tun_oip"}},
{"term": {"pairs.value.string": "$tunoip"}}
]
}
}
}
}
]
}
},
"size": 10000
}
'''
url = 'http://rpc.dsp.chinanetcenter.com:10200/api/console/proxy?path=*metricelf*%2F_search&method=POST'
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Cookie': '__lcl=zh_CN; cluster=dashboard',
'DNT': '1',
'Host': 'rpc.dsp.chinanetcenter.com:10200',
'kbn-version': '6.1.1',
'Origin': 'http://rpc.dsp.chinanetcenter.com:10200',
'Referer': 'http://rpc.dsp.chinanetcenter.com:10200/app/kibana',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36',
}
keys = [
"timestamp",
"pop_proname", ##项目名称
"pop_proid", ##oms上项目ID
"ns_id", ##项目的enterpriseID
"tun_uip", ##underlay链路ip
"tun_oip", ##overlay链路ip
"pop_ip", ##POP服务器管理ip
"tun_cost", ##链路权重
"tun_status", ##链路状态,0表示正常,1表示异常
"tun_cost_status", ##链路是否主备线路,0表示备线路,1表示主线路
"over_drop", ##overlay非云网链路丢包率
"over_dropth", ##overlay非云网链路丢包率阈值
"over_sl_drop", ##overlay云网链路丢包率
"over_sl_dropth", ##overlay云网链路丢包率阈值
"over_delay", ##overlay链路时延
"over_shakedelay", ##overlay链路抖动
"over_mindelay",
"over_maxdelay",
"over_mdev",
"under_drop", ##underlay非云网链路丢包率
"under_dropth", ##underlay非云网链路丢包率阈值
"under_sl_drop", ##underlay云网链路丢包率
"under_sl_dropth", ##underlay云网链路丢包率阈值
"under_delay", ##underlay链路时延
"under_shakedelay", ##underlay链路抖动
"under_mindelay",
"under_maxdelay",
"under_mdev",
"tags",
#"metricset_name"
]
key2index = {k : i for i, k in enumerate(keys)}
def request_data(sess, postdata):
postdata = json.dumps(postdata)
response = sess.post(url, data=postdata)
res = response.text
data = json.loads(res)
samples = []
for i, a in enumerate(data['hits']['hits']):
pairs = a['_source']['pairs']
pair_dict = defaultdict(str)
pair_dict['timestamp'] = datetime.datetime.fromtimestamp(float(a['_source']['timestamp'])/1000)
if len(a['_source']['tags']) > 0:
pair_dict['tags'] = a['_source']['tags'][0]['value']
if len(a['_source']['tags']) > 1:
print('Warning:', a['_source']['tags'])
pair_dict['metricset_name'] = a['_source']['metricset']['name']
for p in pairs:
pair_dict[p['name']] = list(p['value'].values())[0]
samples.append([pair_dict[key] for key in keys])
return sorted(samples, key=lambda x:x[0]), data['hits']['total']
def get_list():
postdata = json.loads(json_query)
del postdata['query']['bool']['filter'][3:]
timestamp_range = postdata['query']['bool']['filter'][2]['range']['timestamp']
ta = datetime.datetime.now() - datetime.timedelta(hours=1)
tb = ta + datetime.timedelta(minutes=2)
timestamp_range['gte'] = ta.timestamp()*1000
timestamp_range['lt'] = tb.timestamp()*1000
logging.info('Request data in [{}, {})'.format(ta, tb))
sess = requests.Session()
sess.headers.update(headers)
for _ in range(3):
try:
samples, tot = request_data(sess, postdata)
logging.info('Hists: {} / {}'.format(len(samples), tot))
break
except Exception as e:
logging.warning(e)
logging.info('Retry')
time.sleep(3)
df = pd.DataFrame(samples, columns=keys)
tunoip = {}
proname = {}
for nsid in df['ns_id'].unique():
if nsid=='': continue
t = df[df.ns_id==nsid]
tunoip[nsid] = sorted(list(t.tun_oip.unique()))
proname[nsid] = t.pop_proname.iloc[-1]
return tunoip, proname
def query(nsid, tunoip, begin_time=None, end_time=None, data_dir='./data'):
""" Query the ping data of specifical ns_id and tun_oip between specifical time range.
Time is datetime type or a string with format '%Y-%m-%d %H:%M:%S', e.g. '2019-10-13 10:09:00'
nsid: ns_id value.
tunoip: tun_oip value.
begin_time: The begining time to query. Default None means the time before 30 days.
end_time: The ending time to query. Default None means the current time.
"""
if type(begin_time) is str: begin_time = datetime.datetime.fromisoformat(begin_time)
if type(end_time) is str: end_time = datetime.datetime.fromisoformat(end_time)
nowtime = datetime.datetime.now()
pretime = nowtime - datetime.timedelta(days=30)
if end_time is None: end_time = nowtime
if begin_time is None: begin_time = pretime
q_begin_time, q_end_time = begin_time, end_time
if not os.path.exists(data_dir): os.mkdir(data_dir)
nsid_dir = os.path.join(data_dir, nsid)
if not os.path.exists(nsid_dir): os.mkdir(nsid_dir)
file_prefix = tunoip.replace('>', '')
# timestamp_path = os.path.join(nsid_dir, '%s.ts'%file_prefix)
csv_path = os.path.join(nsid_dir, '%s.csv'%file_prefix)
logging.info('Query: nsid={}, tunoip={}, timerange=[{}, {})'.format(nsid, tunoip, begin_time, end_time))
if os.path.exists(csv_path):
logging.info('Read data from %s'%csv_path)
df = pd.read_csv(csv_path)
logging.info('Time range: [{}, {}]'.format(df.iloc[0]['timestamp'], df.iloc[-1]['timestamp']))
begin_time = datetime.datetime.fromisoformat(df.iloc[-1]['timestamp']) + datetime.timedelta(minutes=1)
if begin_time < pretime:
begin_time = pretime
df.index = pd.to_datetime(df['timestamp'])
p_num = len(df)
if df.index[0] < pretime:
df = df.loc[pretime:]
logging.info('Forget %d records'%(p_num-len(df)))
else:
df = pd.DataFrame(columns=keys)
begin_time = pretime
# if nowtime - end_time < datetime.timedelta(hours=1):
# tmp_end_time = end_time - datetime.timedelta(hours=1)
# else:
# tmp_end_time = end_time
tmp_end_time = end_time
if q_end_time < begin_time:
df = df.loc[q_begin_time:q_end_time]
logging.info('Query: {} items'.format(len(df)))
logging.info('done.')
return
s = json_query.replace('$nsid', nsid).replace('$tunoip', tunoip)
postdata = json.loads(s)
timestamp_range = postdata['query']['bool']['filter'][2]['range']['timestamp']
t_begin_time, t_end_time = int(begin_time.timestamp()), int(end_time.timestamp())
t_tmp_end_time = int(tmp_end_time.timestamp())
postdata['size'] = 10000
delta_time = 3600*24*3 # 7 days
sess = requests.Session()
sess.headers.update(headers)
def get_data(t, et):
if t >= et: return []
timestamp_range['gte'] = t*1000
timestamp_range['lt'] = et*1000
logging.info('Request data in [{}, {})'.format(datetime.datetime.fromtimestamp(t), datetime.datetime.fromtimestamp(et)))
for _ in range(5):
try:
samples, tot = request_data(sess, postdata)
logging.info('Hists: {} / {}'.format(len(samples), tot))
return samples
except Exception as e:
logging.warning(e)
logging.info('Retry')
time.sleep(3)
data = []
for t in range(t_begin_time, t_tmp_end_time, delta_time):
et = min(t+delta_time, t_tmp_end_time)
data.extend(get_data(t, et))
df = df.append(pd.DataFrame(data, columns=keys), ignore_index=True)
# samples = get_data(t_tmp_end_time, t_end_time)
logging.info('Total Hists: {}'.format(len(data)))
if len(data)>0:
logging.info('Save data to %s'%csv_path)
df.to_csv(csv_path, index=False)
# df = df.append(pd.DataFrame(samples, columns=keys), ignore_index=True)
df.index = pd.to_datetime(df['timestamp'])
df = df.loc[q_begin_time:q_end_time]
logging.info('Query: {} items'.format(len(df)))
logging.info('done.')
df.fillna(0, inplace=True)
return df
def cache():
tunoip, proname = get_list()
nsids = ['500850', '504608', '502902']
for nsid in nsids:
for lk in tunoip[nsid]:
query(nsid, lk, data_dir='/home/monitor/data')
# data_dir = '/home/monitor/data'
# fn = os.path.join(data_dir, 'collect.log')
fn = '/dev/stdout'
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
| if __name__ == "__main__":
cache()
nsid = '1'
tunoip = '10.0.1.214->10.0.1.213'
# ta = '2019-10-30 13:00:00'
# tb = '2019-10-30 14:05:00'
# ta = None
# df = query(nsid, tunoip, data_dir='/home/monitor/data')
# print(proname)
# print(data['over_drop'])
# print(data['over_drop'].astype(float))
# a = df['tun_uip'].unique()
# b = df['tun_oip'].unique()
# print(b,a) | random_line_split | |
query.py |
import requests
import json
import csv
import datetime
import time
from collections import defaultdict
import sys
import os
import glob
import logging
import pandas as pd
json_query = '''
{
"query": {
"bool": {
"filter": [
{
"terms": {
"metricset.name": [
"POP_tunnel_monitor"
]
}
},
{
"terms": {
"metricset.module": [
"business"
]
}
},
{
"range": {
"timestamp": {
"gte": 1563349200000,
"lt": 1563349260000
}
}
},
{
"nested": {
"path": "pairs",
"query": {
"bool": {
"filter": [
{"term": {"pairs.name": "ns_id"}},
{"term": {"pairs.value.string": "$nsid"}}
]
}
}
}
},
{
"nested": {
"path": "pairs",
"query": {
"bool": {
"filter": [
{"term": {"pairs.name": "tun_oip"}},
{"term": {"pairs.value.string": "$tunoip"}}
]
}
}
}
}
]
}
},
"size": 10000
}
'''
url = 'http://rpc.dsp.chinanetcenter.com:10200/api/console/proxy?path=*metricelf*%2F_search&method=POST'
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Cookie': '__lcl=zh_CN; cluster=dashboard',
'DNT': '1',
'Host': 'rpc.dsp.chinanetcenter.com:10200',
'kbn-version': '6.1.1',
'Origin': 'http://rpc.dsp.chinanetcenter.com:10200',
'Referer': 'http://rpc.dsp.chinanetcenter.com:10200/app/kibana',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36',
}
keys = [
"timestamp",
"pop_proname", ##项目名称
"pop_proid", ##oms上项目ID
"ns_id", ##项目的enterpriseID
"tun_uip", ##underlay链路ip
"tun_oip", ##overlay链路ip
"pop_ip", ##POP服务器管理ip
"tun_cost", ##链路权重
"tun_status", ##链路状态,0表示正常,1表示异常
"tun_cost_status", ##链路是否主备线路,0表示备线路,1表示主线路
"over_drop", ##overlay非云网链路丢包率
"over_dropth", ##overlay非云网链路丢包率阈值
"over_sl_drop", ##overlay云网链路丢包率
"over_sl_dropth", ##overlay云网链路丢包率阈值
"over_delay", ##overlay链路时延
"over_shakedelay", ##overlay链路抖动
"over_mindelay",
"over_maxdelay",
"over_mdev",
"under_drop", ##underlay非云网链路丢包率
"under_dropth", ##underlay非云网链路丢包率阈值
"under_sl_drop", ##underlay云网链路丢包率
"under_sl_dropth", ##underlay云网链路丢包率阈值
"under_delay", ##underlay链路时延
"under_shakedelay", ##underlay链路抖动
"under_mindelay",
"under_maxdelay",
"under_mdev",
"tags",
#"metricset_name"
]
key2index = {k : i for i, k in enumerate(keys)}
def request_data(sess, postdata):
postdata = json.dumps(postdata)
response = sess.post(url, data=postdata)
res = response.text
data = json.loads(res)
samples = []
for i, a in enumerate(data['hits']['hits']):
pairs = a['_source']['pairs']
pair_dict = defaultdict(str)
pair_dict['timestamp'] = datetime.datetime.fromtimestamp(float(a['_source']['timestamp'])/1000)
if len(a['_source']['tags']) > 0:
pair_dict['tags'] = a['_source']['tags'][0]['value']
if len(a['_source']['tags']) > 1:
print('Warning:', a['_source']['tags'])
pair_dict['metricset_name'] = a['_source']['metricset']['name']
for p in pairs:
pair_dict[p['name']] = list(p['value'].values())[0]
samples.append([pair_dict[key] for key in keys])
return sorted(samples, key=lambda x:x[0]), data['hits']['total']
def get_list():
postdata = json.loads(json_query)
del postdata['query']['bool']['filter'][3:]
timestamp_range = postdata['query']['bool']['filter'][2]['range']['timestamp']
ta = datetime.datetime.now() - datetime.timedelta(hours=1)
tb = ta + datetime.timedelta(minutes=2)
timestamp_range['gte'] = ta.timestamp()*1000
timestamp_range['lt'] = tb.timestamp()*1000
logging.info('Request data in [{}, {})'.format(ta, tb))
sess = requests.Session()
sess.headers.update(headers)
for _ in range(3):
try:
samples, tot = request_data(sess, postdata)
logging.info('Hists: {} / {}'.format(len(samples), tot))
break
except Exception as e:
logging.warning(e)
logging.info('Retry')
time.sleep(3)
df = pd.DataFrame(samples, columns=keys)
tunoip = {}
proname = {}
for nsid in df['ns_id'].unique():
if nsid=='': continue
t = df[df.ns_id==nsid]
tunoip[nsid] = sorted(list(t.tun_oip.unique()))
proname[nsid] = t.pop_proname.iloc[-1]
return tunoip, proname
def query(nsid, tunoip, begin_time=None, end_time=None, data_dir='./data'):
""" Query the ping data of specifical ns_id and tun_oip between specifical time range.
Time is datetime type or a string with format '%Y-%m-%d %H:%M:%S', e.g. '2019-10-13 10:09:00'
nsid: ns_id value.
tunoip: tun_oip value.
begin_time: The begining time to query. Default None means the time before 30 days.
end_time: The ending time to query. Default None means the current time.
"""
if type(begin_time) is str: begin_time = datetime.datetime.fromisoformat(begin_time)
if type(end_time) is str: end_time = datetime.datetime.fromisoformat(end_time)
nowtime = datetime.datetime.now()
pretime = nowtime - datetime.timedelta(days=30)
if end_time is None: end_time = nowtime
if begin_time is None: begin_time = pretime
q_begin_time, q_end_time = begin_time, end_time
if not os.path.exists(data_dir): os.mkdir(data_dir)
nsid_dir = os.path.join(data_dir, nsid)
if not os.path.exists(nsid_dir): os.mkdir(nsid_dir)
file_prefix = tunoip.replace('>', '')
# timestamp_path = os.path.join(nsid_dir, '%s.ts'%file_prefix)
csv_path = os.path.join(nsid_dir, '%s.csv'%file_prefix)
logging.info('Query: nsid={}, tunoip={}, timerange=[{}, {})'.format(nsid, tunoip, begin_t | if os.path.exists(csv_path):
logging.info('Read data from %s'%csv_path)
df = pd.read_csv(csv_path)
logging.info('Time range: [{}, {}]'.format(df.iloc[0]['timestamp'], df.iloc[-1]['timestamp']))
begin_time = datetime.datetime.fromisoformat(df.iloc[-1]['timestamp']) + datetime.timedelta(minutes=1)
if begin_time < pretime:
begin_time = pretime
df.index = pd.to_datetime(df['timestamp'])
p_num = len(df)
if df.index[0] < pretime:
df = df.loc[pretime:]
logging.info('Forget %d records'%(p_num-len(df)))
else:
df = pd.DataFrame(columns=keys)
begin_time = pretime
# if nowtime - end_time < datetime.timedelta(hours=1):
# tmp_end_time = end_time - datetime.timedelta(hours=1)
# else:
# tmp_end_time = end_time
tmp_end_time = end_time
if q_end_time < begin_time:
df = df.loc[q_begin_time:q_end_time]
logging.info('Query: {} items'.format(len(df)))
logging.info('done.')
return
s = json_query.replace('$nsid', nsid).replace('$tunoip', tunoip)
postdata = json.loads(s)
timestamp_range = postdata['query']['bool']['filter'][2]['range']['timestamp']
t_begin_time, t_end_time = int(begin_time.timestamp()), int(end_time.timestamp())
t_tmp_end_time = int(tmp_end_time.timestamp())
postdata['size'] = 10000
delta_time = 3600*24*3 # 7 days
sess = requests.Session()
sess.headers.update(headers)
def get_data(t, et):
if t >= et: return []
timestamp_range['gte'] = t*1000
timestamp_range['lt'] = et*1000
logging.info('Request data in [{}, {})'.format(datetime.datetime.fromtimestamp(t), datetime.datetime.fromtimestamp(et)))
for _ in range(5):
try:
samples, tot = request_data(sess, postdata)
logging.info('Hists: {} / {}'.format(len(samples), tot))
return samples
except Exception as e:
logging.warning(e)
logging.info('Retry')
time.sleep(3)
data = []
for t in range(t_begin_time, t_tmp_end_time, delta_time):
et = min(t+delta_time, t_tmp_end_time)
data.extend(get_data(t, et))
df = df.append(pd.DataFrame(data, columns=keys), ignore_index=True)
# samples = get_data(t_tmp_end_time, t_end_time)
logging.info('Total Hists: {}'.format(len(data)))
if len(data)>0:
logging.info('Save data to %s'%csv_path)
df.to_csv(csv_path, index=False)
# df = df.append(pd.DataFrame(samples, columns=keys), ignore_index=True)
df.index = pd.to_datetime(df['timestamp'])
df = df.loc[q_begin_time:q_end_time]
logging.info('Query: {} items'.format(len(df)))
logging.info('done.')
df.fillna(0, inplace=True)
return df
def cache():
tunoip, proname = get_list()
nsids = ['500850', '504608', '502902']
for nsid in nsids:
for lk in tunoip[nsid]:
query(nsid, lk, data_dir='/home/monitor/data')
# data_dir = '/home/monitor/data'
# fn = os.path.join(data_dir, 'collect.log')
fn = '/dev/stdout'
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if __name__ == "__main__":
cache()
nsid = '1'
tunoip = '10.0.1.214->10.0.1.213'
# ta = '2019-10-30 13:00:00'
# tb = '2019-10-30 14:05:00'
# ta = None
# df = query(nsid, tunoip, data_dir='/home/monitor/data')
# print(proname)
# print(data['over_drop'])
# print(data['over_drop'].astype(float))
# a = df['tun_uip'].unique()
# b = df['tun_oip'].unique()
# print(b,a) | ime, end_time))
| conditional_block |
query.py |
import requests
import json
import csv
import datetime
import time
from collections import defaultdict
import sys
import os
import glob
import logging
import pandas as pd
json_query = '''
{
"query": {
"bool": {
"filter": [
{
"terms": {
"metricset.name": [
"POP_tunnel_monitor"
]
}
},
{
"terms": {
"metricset.module": [
"business"
]
}
},
{
"range": {
"timestamp": {
"gte": 1563349200000,
"lt": 1563349260000
}
}
},
{
"nested": {
"path": "pairs",
"query": {
"bool": {
"filter": [
{"term": {"pairs.name": "ns_id"}},
{"term": {"pairs.value.string": "$nsid"}}
]
}
}
}
},
{
"nested": {
"path": "pairs",
"query": {
"bool": {
"filter": [
{"term": {"pairs.name": "tun_oip"}},
{"term": {"pairs.value.string": "$tunoip"}}
]
}
}
}
}
]
}
},
"size": 10000
}
'''
url = 'http://rpc.dsp.chinanetcenter.com:10200/api/console/proxy?path=*metricelf*%2F_search&method=POST'
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Cookie': '__lcl=zh_CN; cluster=dashboard',
'DNT': '1',
'Host': 'rpc.dsp.chinanetcenter.com:10200',
'kbn-version': '6.1.1',
'Origin': 'http://rpc.dsp.chinanetcenter.com:10200',
'Referer': 'http://rpc.dsp.chinanetcenter.com:10200/app/kibana',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36',
}
keys = [
"timestamp",
"pop_proname", ##项目名称
"pop_proid", ##oms上项目ID
"ns_id", ##项目的enterpriseID
"tun_uip", ##underlay链路ip
"tun_oip", ##overlay链路ip
"pop_ip", ##POP服务器管理ip
"tun_cost", ##链路权重
"tun_status", ##链路状态,0表示正常,1表示异常
"tun_cost_status", ##链路是否主备线路,0表示备线路,1表示主线路
"over_drop", ##overlay非云网链路丢包率
"over_dropth", ##overlay非云网链路丢包率阈值
"over_sl_drop", ##overlay云网链路丢包率
"over_sl_dropth", ##overlay云网链路丢包率阈值
"over_delay", ##overlay链路时延
"over_shakedelay", ##overlay链路抖动
"over_mindelay",
"over_maxdelay",
"over_mdev",
"under_drop", ##underlay非云网链路丢包率
"under_dropth", ##underlay非云网链路丢包率阈值
"under_sl_drop", ##underlay云网链路丢包率
"under_sl_dropth", ##underlay云网链路丢包率阈值
"under_delay", ##underlay链路时延
"under_shakedelay", ##underlay链路抖动
"under_mindelay",
"under_maxdelay",
"under_mdev",
"tags",
#"metricset_name"
]
key2index = {k : i for i, k in enumerate(keys)}
def request_data(sess, postdata):
postdata = json.dumps(postdata)
response = sess.post(url, data=postdata)
res = response.text
data = json.loads(res)
samples = []
for i, a in enumerate(data['hits']['hits']):
pairs = a['_source']['pairs']
pair_dict = defaultdict(str)
pair_dict['timestamp'] = datetime.datetime.fromtimestamp(float(a['_source']['timestamp'])/1000)
if len(a['_source']['tags']) > 0:
pair_dict['tags'] = a['_source']['tags'][0]['value']
if len(a['_source']['tags']) > 1:
print('Warning:', a['_source']['tags'])
pair_dict['metricset_name'] = a['_source']['metricset']['name']
for p in pairs:
pair_dict[p['name']] = list(p['value'].values())[0]
samples.append([pair_dict[key] for key in keys])
return sorted(samples, key=lambda x:x[0]), data['hits']['total']
def get_list():
postdata = json.loads(json_query)
del postdata['query']['bool']['filter'][3:]
timestamp_range = postdata['query']['bool']['filter'][2]['range']['timestamp']
ta = datetime.datetime.now() - datetime.timedelta(hours=1)
tb = ta + datetime.timedelta(minutes=2)
timestamp_range['gte'] = ta.timestamp()*1000
timestamp_range['lt'] = tb.timestamp()*1000
logging.info('Request data in [{}, {})'.format(ta, tb))
sess = requests.Session()
sess.headers.update(headers)
for _ in range(3):
try:
samples, tot = request_data(sess, postdata)
logging.info('Hists: {} / {}'.format(len(samples), tot))
break
except Exception as e:
logging.warning(e)
logging.info('Retry')
time.sleep(3)
df = pd.DataFrame(samples, columns=keys)
tunoip = {}
proname = {}
for nsid in df['ns_id'].unique():
if nsid=='': continue
t = df[df.ns_id==nsid]
tunoip[nsid] = sorted(list(t.tun_oip.unique()))
proname[nsid] = t.pop_proname.iloc[-1]
return tunoip, proname
def query(nsid, tunoip, begin_time=None, end_time=None, data_dir='./data'):
""" Query the ping data of specifical ns_id and tun_oip between specifical time range.
Time is datetime type or a string with format '%Y-%m-%d %H:%M:%S', e.g. '2019-10-13 10:09:00'
nsid: ns_id value.
tunoip: tun_oip value.
begin_time: The begining time to query. Default None means the time before 30 days.
end_time: The ending time to query. Default None means the current time.
"""
if type(begin_time) is str: begin_time = datetime.datetime.fromisoformat(begin_time)
if type(end_time) is str: end_time = datetime.datetime.fromisoformat(end_time)
nowtime = datetime.datetime.now()
pretime = nowtime - datetime.timedelta(days=30)
if end_time is None: end_time = nowtime
if begin_time is None: begin_time = pretime
q_begin_time, q_end_time = begin_time, end_time
if not os.path.exists(data_dir): os.mkdir(data_dir)
nsid_dir = os.path.join(data_dir, nsid)
if not os.path.exists(nsid_dir): os.mkdir(nsid_dir)
file_prefix = tunoip.replace('>', '')
# timestamp_path = os.path.join(nsid_dir, '%s.ts'%file_prefix)
csv_path = os.path.join(nsid_dir, '%s.csv'%file_prefix)
logging.info('Query: nsid={}, tunoip={}, timerange=[{}, {})'.format(nsid, tunoip, begin_time, end_time))
if os.path.exists(csv_path):
logging.info('Read data from %s'%csv_path)
df = pd.read_csv(csv_path)
logging.info('Time range: [{}, {}]'.format(df.iloc[0]['timestamp'], df.iloc[-1]['timestamp']))
begin_time = datetime.datetime.fromisoformat(df.iloc[-1]['timestamp']) + datetime.timedelta(minutes=1)
if begin_time < pretime:
begin_time = pretime
df.index = pd.to_datetime(df['timestamp'])
p_num = len(df)
if df.index[0] < pretime:
df = df.loc[pretime:]
logging.info('Forget %d records'%(p_num-len(df)))
else:
df = pd.DataFrame(columns=keys)
begin_time = pretime
# if nowtime - end_time < datetime.timedelta(hours=1):
# tmp_end_time = end_time - datetime.timedelta(hours=1)
# else:
# tmp_end_time = end_time
tmp_end_time = end_time
if q_end_time < begin_time:
df = df.loc[q_begin_time:q_end_time]
logging.info('Query: {} items'.format(len(df)))
logging.info('done.')
return
s = json_query.replace('$nsid', nsid).replace('$tunoip', tunoip)
postdata = json.loads(s)
timestamp_range = postdata['query']['bool']['filter'][2]['range']['timestamp']
t_begin_time, t_end_time = int(begin_time.timestamp()), int(end_time.timestamp())
t_tmp_end_time = int(tmp_end_time.timestamp())
postdata['size'] = 10000
delta_time = 3600*24*3 # 7 days
sess = requests.Session()
sess.headers.update(headers)
def get_data(t, et):
if t >= et: return []
timestamp_range['gte'] = t*1000
timestamp_range['lt'] = et*1000
logging.info('Request data in [{}, {})'.format(datetime.datetime.fromtimestamp(t), datetime.datetime.fromtimestamp(et)))
for _ in range(5):
try:
samples, tot = request_data(sess, postdata)
logging.info('Hists: {} / {}'.format(len(samples), tot))
return samples
except Exception as e:
logging.warning(e)
logging.info('Retry')
time.sleep(3)
data = []
for t in range(t_begin_time, t_tmp_end_time, delta_time):
et = min(t+delta_time, t_tmp_end_time)
data.extend(get_data(t, et))
df = df.append(pd.DataFrame(data, columns=keys), ignore_index=True)
# samples = get_data(t_tmp_end_time, t_end_time)
logging.info('Total Hists: {}'.format(len(data)))
if len(data)>0:
logging.info('Save data to %s'%csv_path)
df.to_csv(csv_path, index=False)
# df = df.append(pd.DataFrame(samples, columns=keys), ignore_index=True)
df.index = pd.to_datetime(df['timestamp'])
df = df.loc[q_begin_time:q_end_time]
logging.info('Query: {} items'.format(len(df)))
logging.info('done.')
df.fillna(0, inplace=True)
return df
def cache():
tunoip, proname = get_list()
nsids = ['500850', '504608', '502902']
for nsid in nsids:
for lk in tunoip[nsid]:
query(nsid, lk, data_dir='/home/monitor/data')
# data_dir = '/home/monitor/data'
# fn = os.path.join(data_dir, 'collect.log')
fn = '/dev/stdou | cache()
nsid = '1'
tunoip = '10.0.1.214->10.0.1.213'
# ta = '2019-10-30 13:00:00'
# tb = '2019-10-30 14:05:00'
# ta = None
# df = query(nsid, tunoip, data_dir='/home/monitor/data')
# print(proname)
# print(data['over_drop'])
# print(data['over_drop'].astype(float))
# a = df['tun_uip'].unique()
# b = df['tun_oip'].unique()
# print(b,a) | t'
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if __name__ == "__main__":
| identifier_body |
query.py |
import requests
import json
import csv
import datetime
import time
from collections import defaultdict
import sys
import os
import glob
import logging
import pandas as pd
json_query = '''
{
"query": {
"bool": {
"filter": [
{
"terms": {
"metricset.name": [
"POP_tunnel_monitor"
]
}
},
{
"terms": {
"metricset.module": [
"business"
]
}
},
{
"range": {
"timestamp": {
"gte": 1563349200000,
"lt": 1563349260000
}
}
},
{
"nested": {
"path": "pairs",
"query": {
"bool": {
"filter": [
{"term": {"pairs.name": "ns_id"}},
{"term": {"pairs.value.string": "$nsid"}}
]
}
}
}
},
{
"nested": {
"path": "pairs",
"query": {
"bool": {
"filter": [
{"term": {"pairs.name": "tun_oip"}},
{"term": {"pairs.value.string": "$tunoip"}}
]
}
}
}
}
]
}
},
"size": 10000
}
'''
url = 'http://rpc.dsp.chinanetcenter.com:10200/api/console/proxy?path=*metricelf*%2F_search&method=POST'
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Cookie': '__lcl=zh_CN; cluster=dashboard',
'DNT': '1',
'Host': 'rpc.dsp.chinanetcenter.com:10200',
'kbn-version': '6.1.1',
'Origin': 'http://rpc.dsp.chinanetcenter.com:10200',
'Referer': 'http://rpc.dsp.chinanetcenter.com:10200/app/kibana',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36',
}
keys = [
"timestamp",
"pop_proname", ##项目名称
"pop_proid", ##oms上项目ID
"ns_id", ##项目的enterpriseID
"tun_uip", ##underlay链路ip
"tun_oip", ##overlay链路ip
"pop_ip", ##POP服务器管理ip
"tun_cost", ##链路权重
"tun_status", ##链路状态,0表示正常,1表示异常
"tun_cost_status", ##链路是否主备线路,0表示备线路,1表示主线路
"over_drop", ##overlay非云网链路丢包率
"over_dropth", ##overlay非云网链路丢包率阈值
"over_sl_drop", ##overlay云网链路丢包率
"over_sl_dropth", ##overlay云网链路丢包率阈值
"over_delay", ##overlay链路时延
"over_shakedelay", ##overlay链路抖动
"over_mindelay",
"over_maxdelay",
"over_mdev",
"under_drop", ##underlay非云网链路丢包率
"under_dropth", ##underlay非云网链路丢包率阈值
"under_sl_drop", ##underlay云网链路丢包率
"under_sl_dropth", ##underlay云网链路丢包率阈值
"under_delay", ##underlay链路时延
"under_shakedelay", ##underlay链路抖动
"under_mindelay",
"under_maxdelay",
"under_mdev",
"tags",
#"metricset_name"
]
key2index = {k : i for i, k in enumerate(keys)}
def request_data(sess, postdata):
postdata = json.dumps(postdata)
response = sess.post(url, data=postdata)
res = response.text
data = json.loads(res)
samples = []
for i, a in enumerate(data['hits']['hits']):
pairs = a['_source']['pairs']
| defaultdict(str)
pair_dict['timestamp'] = datetime.datetime.fromtimestamp(float(a['_source']['timestamp'])/1000)
if len(a['_source']['tags']) > 0:
pair_dict['tags'] = a['_source']['tags'][0]['value']
if len(a['_source']['tags']) > 1:
print('Warning:', a['_source']['tags'])
pair_dict['metricset_name'] = a['_source']['metricset']['name']
for p in pairs:
pair_dict[p['name']] = list(p['value'].values())[0]
samples.append([pair_dict[key] for key in keys])
return sorted(samples, key=lambda x:x[0]), data['hits']['total']
def get_list():
postdata = json.loads(json_query)
del postdata['query']['bool']['filter'][3:]
timestamp_range = postdata['query']['bool']['filter'][2]['range']['timestamp']
ta = datetime.datetime.now() - datetime.timedelta(hours=1)
tb = ta + datetime.timedelta(minutes=2)
timestamp_range['gte'] = ta.timestamp()*1000
timestamp_range['lt'] = tb.timestamp()*1000
logging.info('Request data in [{}, {})'.format(ta, tb))
sess = requests.Session()
sess.headers.update(headers)
for _ in range(3):
try:
samples, tot = request_data(sess, postdata)
logging.info('Hists: {} / {}'.format(len(samples), tot))
break
except Exception as e:
logging.warning(e)
logging.info('Retry')
time.sleep(3)
df = pd.DataFrame(samples, columns=keys)
tunoip = {}
proname = {}
for nsid in df['ns_id'].unique():
if nsid=='': continue
t = df[df.ns_id==nsid]
tunoip[nsid] = sorted(list(t.tun_oip.unique()))
proname[nsid] = t.pop_proname.iloc[-1]
return tunoip, proname
def query(nsid, tunoip, begin_time=None, end_time=None, data_dir='./data'):
""" Query the ping data of specifical ns_id and tun_oip between specifical time range.
Time is datetime type or a string with format '%Y-%m-%d %H:%M:%S', e.g. '2019-10-13 10:09:00'
nsid: ns_id value.
tunoip: tun_oip value.
begin_time: The begining time to query. Default None means the time before 30 days.
end_time: The ending time to query. Default None means the current time.
"""
if type(begin_time) is str: begin_time = datetime.datetime.fromisoformat(begin_time)
if type(end_time) is str: end_time = datetime.datetime.fromisoformat(end_time)
nowtime = datetime.datetime.now()
pretime = nowtime - datetime.timedelta(days=30)
if end_time is None: end_time = nowtime
if begin_time is None: begin_time = pretime
q_begin_time, q_end_time = begin_time, end_time
if not os.path.exists(data_dir): os.mkdir(data_dir)
nsid_dir = os.path.join(data_dir, nsid)
if not os.path.exists(nsid_dir): os.mkdir(nsid_dir)
file_prefix = tunoip.replace('>', '')
# timestamp_path = os.path.join(nsid_dir, '%s.ts'%file_prefix)
csv_path = os.path.join(nsid_dir, '%s.csv'%file_prefix)
logging.info('Query: nsid={}, tunoip={}, timerange=[{}, {})'.format(nsid, tunoip, begin_time, end_time))
if os.path.exists(csv_path):
logging.info('Read data from %s'%csv_path)
df = pd.read_csv(csv_path)
logging.info('Time range: [{}, {}]'.format(df.iloc[0]['timestamp'], df.iloc[-1]['timestamp']))
begin_time = datetime.datetime.fromisoformat(df.iloc[-1]['timestamp']) + datetime.timedelta(minutes=1)
if begin_time < pretime:
begin_time = pretime
df.index = pd.to_datetime(df['timestamp'])
p_num = len(df)
if df.index[0] < pretime:
df = df.loc[pretime:]
logging.info('Forget %d records'%(p_num-len(df)))
else:
df = pd.DataFrame(columns=keys)
begin_time = pretime
# if nowtime - end_time < datetime.timedelta(hours=1):
# tmp_end_time = end_time - datetime.timedelta(hours=1)
# else:
# tmp_end_time = end_time
tmp_end_time = end_time
if q_end_time < begin_time:
df = df.loc[q_begin_time:q_end_time]
logging.info('Query: {} items'.format(len(df)))
logging.info('done.')
return
s = json_query.replace('$nsid', nsid).replace('$tunoip', tunoip)
postdata = json.loads(s)
timestamp_range = postdata['query']['bool']['filter'][2]['range']['timestamp']
t_begin_time, t_end_time = int(begin_time.timestamp()), int(end_time.timestamp())
t_tmp_end_time = int(tmp_end_time.timestamp())
postdata['size'] = 10000
delta_time = 3600*24*3 # 7 days
sess = requests.Session()
sess.headers.update(headers)
def get_data(t, et):
if t >= et: return []
timestamp_range['gte'] = t*1000
timestamp_range['lt'] = et*1000
logging.info('Request data in [{}, {})'.format(datetime.datetime.fromtimestamp(t), datetime.datetime.fromtimestamp(et)))
for _ in range(5):
try:
samples, tot = request_data(sess, postdata)
logging.info('Hists: {} / {}'.format(len(samples), tot))
return samples
except Exception as e:
logging.warning(e)
logging.info('Retry')
time.sleep(3)
data = []
for t in range(t_begin_time, t_tmp_end_time, delta_time):
et = min(t+delta_time, t_tmp_end_time)
data.extend(get_data(t, et))
df = df.append(pd.DataFrame(data, columns=keys), ignore_index=True)
# samples = get_data(t_tmp_end_time, t_end_time)
logging.info('Total Hists: {}'.format(len(data)))
if len(data)>0:
logging.info('Save data to %s'%csv_path)
df.to_csv(csv_path, index=False)
# df = df.append(pd.DataFrame(samples, columns=keys), ignore_index=True)
df.index = pd.to_datetime(df['timestamp'])
df = df.loc[q_begin_time:q_end_time]
logging.info('Query: {} items'.format(len(df)))
logging.info('done.')
df.fillna(0, inplace=True)
return df
def cache():
tunoip, proname = get_list()
nsids = ['500850', '504608', '502902']
for nsid in nsids:
for lk in tunoip[nsid]:
query(nsid, lk, data_dir='/home/monitor/data')
# data_dir = '/home/monitor/data'
# fn = os.path.join(data_dir, 'collect.log')
fn = '/dev/stdout'
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if __name__ == "__main__":
cache()
nsid = '1'
tunoip = '10.0.1.214->10.0.1.213'
# ta = '2019-10-30 13:00:00'
# tb = '2019-10-30 14:05:00'
# ta = None
# df = query(nsid, tunoip, data_dir='/home/monitor/data')
# print(proname)
# print(data['over_drop'])
# print(data['over_drop'].astype(float))
# a = df['tun_uip'].unique()
# b = df['tun_oip'].unique()
# print(b,a) | pair_dict = | identifier_name |
day.page.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { SyncService } from 'src/app/services/sync.service';
import { LoadingController, Platform } from '@ionic/angular';
import { DatePipe, getLocaleDateTimeFormat } from '@angular/common';
import { DatePicker } from '@ionic-native/date-picker/ngx';
import { Storage } from '@ionic/storage';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { File } from '@ionic-native/file/ngx';
import { DatabaseService} from 'src/app/services/database.service';
import pdfMake from 'pdfmake/build/pdfmake';
import pdfFonts from 'pdfmake/build/vfs_fonts';
import { FileOpener } from '@ionic-native/file-opener/ngx';
// import { lstat } from 'fs';
pdfMake.vfs = pdfFonts.pdfMake.vfs;
@Component({
selector: 'app-day',
templateUrl: './day.page.html',
styleUrls: ['./day.page.scss'],
})
export class DayPage implements OnInit {
style = 'bootstrap';
data = [];
items = [];
total = [];
day = new Date().toString();
start = new Date().setHours(0, 0, 0, 0).toString();
end = new Date().setHours(23, 59, 59, 999).toString();
i = 0;
currency = '';
pdfObj = null;
company = '';
displaystart = '';
displayend = '';
a = 0;
salesvalue = 0;
min = '2020';
max = '';
constructor(
private router: Router,
private syncService: SyncService,
private loadingCtrl: LoadingController,
private datePipe: DatePipe,
private datepicker: DatePicker,
private plt: Platform,
private socialSharing: SocialSharing,
private file: File,
private fileOpener: FileOpener,
private storage: Storage,
private databaseservice: DatabaseService
) |
ngOnInit() {
}
share() {
// this.getData();
this.createPdf();
}
back() {
this.router.navigate(['/menu/reports']);
}
onActivate(event) {
if (event.type === 'click') {
console.log(event.row);
}
}
async getData() {
let loading = await this.loadingCtrl.create();
await loading.present();
let now = new Date().toString();
const a = this.datePipe.transform(now, 'dd MMM yyyy h:mm a');
let start = new Date().setHours(0, 0, 0, 0).toString();
const b = this.datePipe.transform(start, 'dd MMM yyyy h:mm a');
// alert('start:' + this.start + '\ncurrent:' + this.end);
// alert('day:' + this.day)
this.syncService.getTodaysSales(this.start, this.end, this.currency).then((data) => {
this.total = data;
console.log(data);
for (let i = 0; i < this.total.length; i++) {
const data1 = {
day: this.total[i].Date,
sales: this.total[i].Total,
paid: this.total[i].Paid
};
this.data.push(data1);
}
this.data = [...this.data];
loading.dismiss();
// alert('y' + this.total[0].Total + 'p' + this.total[0].Paid);
});
}
selectDate() {
var options={
date: new Date(),
mode: 'date',
androidTheme: this.datepicker.ANDROID_THEMES.THEME_DEVICE_DEFAULT_LIGHT
};
this.datepicker.show(options).then((date) => {
this.day = this.datePipe.transform(date, 'dd MMM yyyy h:mm a');
this.data = [];
this.getData();
// console.log('selected:',this.myDate);
});
}
changeDate() {
//alert('yes' + this.data.length);
//alert('y' + this.start);
if ( this.i === 2 || (this.a === 0 && this.i === this.salesvalue)) {
this.i = 0;
this.a = 1;
const firstdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth(), 1).setHours(0, 0, 0, 0);
const first1 = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy h:mm a');
const lastdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth() + 1, 0).setHours(23, 59, 59, 999);
const last1 = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy h:mm a');
this.displaystart = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy');
this.displayend = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy');
// const s = new Date(this.day).setHours(0, 0, 0, 0).toString();
// const l = new Date(this.day).setHours(23, 59, 59, 999).toString();
this.start = first1;
this.end = last1;
// this.start = this.datePipe.transform(s, 'dd MMM yyyy h:mm a');
// this.end = this.datePipe.transform(l, 'dd MMM yyyy h:mm a');
// alert('s: ' + first1 + '\nL: ' + last1 );
this.data = [];
this.getData();
}
this.i = this.i + 1;
this.day = this.datePipe.transform(this.day, 'dd MMM yyyy h:mm a');
}
async createPdf() {
let a ;
let x ;
let y ;
let z ;
let left;
let right;
let items = [];
let loading = await this.loadingCtrl.create();
await loading.present();
const itemss = [];
for (let i = 0; i < this.data.length; i++) {
itemss.push(
[
{ text: this.data[i].day.toString() , fontSize: 18, color: '#000000' },
{ text: this.data[i].sales.toString() , fontSize: 18, color: '#000000' },
{ text: this.data[i].paid.toString(), fontSize: 18, color: '#000000' }
]
);
}
left = {
text: [
{ text: this.company, bold: true, fontSize: 20, alignment: 'left'},
]
};
right = {
text: [
{ text: this.datePipe.transform(new Date(), 'dd MMM yyyy') , color: '#000000' , fontSize: 18, alignment: 'right'},
]
};
var docDefinition = {
pageSize: 'A4',
pageMargins: [ 20, 20, 20, 20 ],
content: [
{ text: 'SALES BY DAY', bold: true, alignment: 'center', fontSize: 25, style: 'subheader'},
{ text: this.displaystart + ' - ' + this.displayend, bold: true, alignment: 'center', fontSize: 20, style: 'subheader'},
// { margin: [0, 10, 0, 0],
// text: 'CUSTOMER STATEMENT', style: 'header', fontSize: 25, alignment: 'left', color: '#ff0000' },
{
margin: [0, 10, 0, 0],
canvas:
[
{
type: 'line',
x1: 0, y1: 0,
x2: 555, y2: 0,
lineWidth: 3
},
]
},
{
margin: [0, 5, 0, 0],
style: 'totalsTable',
table: {
widths: [ 300, 250 ],
body: [
[
left, right
],
],
},
layout: 'noBorders'
},
{
margin: [0, 20, 0, 0],
text: [
{ text: 'Client ' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
{ text: ' Total' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
{ text: ' Paid' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
]
},
{
margin: [0, 5, 0, 0],
style: 'totalsTable',
table: {
widths: [220, 160, 200 ],
body: itemss
},
layout: 'noBorders'
},
],
styles: {
header: {
fontSize: 18,
bold: true,
},
subheader: {
fontSize: 10,
bold: true,
// margin: [0, 15, 0, 0],
color: '#000000'
},
story: {
italic: true,
alignment: 'center',
width: '50%',
},
backgroundcolor: ''
}
};
this.pdfObj = pdfMake.createPdf(docDefinition);
this.downloadPdf(loading);
// });
}
ionViewWillLeave() {
this.databaseservice.setSalesByDayValue(2);
}
downloadPdf(loading) {
debugger
if (this.plt.is('cordova')) {
this.pdfObj.getBuffer((buffer) => {
// var blob = new Blob([buffer], { type: 'application/pdf' });
var utf8 = new Uint8Array(buffer);
var binaryArray = utf8.buffer;
var blob = new Blob([binaryArray], { type: 'application/pdf' });
const a = new Date().getTime().toString();
this.file.writeFile(this.file.dataDirectory, a + '.pdf', blob).then(fileEntry => {
// this.fileOpener.open(this.file.dataDirectory + a + '.pdf', 'application/pdf');
// loading.dismiss();
});
this.socialSharing.share("test", null, this.file.dataDirectory + a + '.pdf', null).then(() => {
loading.dismiss();
console.log('social share');
console.log(this.file.dataDirectory);
}).catch(() => {
});
debugger
// Save the PDF to the data Directory of our App
// this.file.writeFile(this.file.dataDirectory, 'Invoice4.pdf', blob).then(fileEntry => {
// this.fileOpener.open(this.file.dataDirectory + 'Invoice.pdf', 'application/pdf');
// });
});
} else {
// On a browser simply use download!
this.pdfObj.download();
}
}
}
| {
const last = new Date(new Date().getFullYear(), 11, 31);
this.max = this.datePipe.transform(last, 'yyyy');
this.storage.get('COM').then((val) => {
this.company = val;
});
storage.get('currency').then((val) => {
if (val !== null) {
this.currency = val.toString();
debugger
}
});
this.salesvalue = this.databaseservice.getSalesByDayValue();
this.day = this.datePipe.transform(this.day, 'MMM yyyy h:mm a');
const firstdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth(), 1).setHours(0, 0, 0, 0);
const first1 = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy h:mm a');
const lastdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth() + 1, 0).setHours(23, 59, 59, 999);
const last1 = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy h:mm a');
this.displaystart = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy');
this.displayend = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy');
this.start = first1;
this.end = last1;
// alert('start: ' + this.start + '\n' + this.end);
this.getData();
} | identifier_body |
day.page.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { SyncService } from 'src/app/services/sync.service';
import { LoadingController, Platform } from '@ionic/angular';
import { DatePipe, getLocaleDateTimeFormat } from '@angular/common';
import { DatePicker } from '@ionic-native/date-picker/ngx';
import { Storage } from '@ionic/storage';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { File } from '@ionic-native/file/ngx';
import { DatabaseService} from 'src/app/services/database.service';
import pdfMake from 'pdfmake/build/pdfmake';
import pdfFonts from 'pdfmake/build/vfs_fonts';
import { FileOpener } from '@ionic-native/file-opener/ngx';
// import { lstat } from 'fs';
pdfMake.vfs = pdfFonts.pdfMake.vfs;
@Component({
selector: 'app-day',
templateUrl: './day.page.html',
styleUrls: ['./day.page.scss'],
})
export class DayPage implements OnInit {
style = 'bootstrap';
data = [];
items = [];
total = [];
day = new Date().toString();
start = new Date().setHours(0, 0, 0, 0).toString();
end = new Date().setHours(23, 59, 59, 999).toString();
i = 0;
currency = '';
pdfObj = null;
company = '';
displaystart = '';
displayend = '';
a = 0;
salesvalue = 0;
min = '2020';
max = '';
constructor(
private router: Router,
private syncService: SyncService,
private loadingCtrl: LoadingController,
private datePipe: DatePipe,
private datepicker: DatePicker,
private plt: Platform,
private socialSharing: SocialSharing,
private file: File,
private fileOpener: FileOpener,
private storage: Storage,
private databaseservice: DatabaseService
) {
const last = new Date(new Date().getFullYear(), 11, 31);
this.max = this.datePipe.transform(last, 'yyyy');
this.storage.get('COM').then((val) => {
this.company = val;
});
storage.get('currency').then((val) => {
if (val !== null) {
this.currency = val.toString();
debugger
}
});
this.salesvalue = this.databaseservice.getSalesByDayValue();
this.day = this.datePipe.transform(this.day, 'MMM yyyy h:mm a');
const firstdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth(), 1).setHours(0, 0, 0, 0);
const first1 = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy h:mm a');
const lastdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth() + 1, 0).setHours(23, 59, 59, 999);
const last1 = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy h:mm a');
this.displaystart = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy');
this.displayend = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy');
this.start = first1;
this.end = last1;
// alert('start: ' + this.start + '\n' + this.end);
this.getData();
}
ngOnInit() {
}
share() {
// this.getData();
this.createPdf();
}
back() {
this.router.navigate(['/menu/reports']);
}
onActivate(event) {
if (event.type === 'click') {
console.log(event.row);
}
}
async getData() {
let loading = await this.loadingCtrl.create();
await loading.present();
let now = new Date().toString();
const a = this.datePipe.transform(now, 'dd MMM yyyy h:mm a');
let start = new Date().setHours(0, 0, 0, 0).toString();
const b = this.datePipe.transform(start, 'dd MMM yyyy h:mm a');
// alert('start:' + this.start + '\ncurrent:' + this.end);
// alert('day:' + this.day)
this.syncService.getTodaysSales(this.start, this.end, this.currency).then((data) => {
this.total = data;
console.log(data);
for (let i = 0; i < this.total.length; i++) {
const data1 = {
day: this.total[i].Date,
sales: this.total[i].Total,
paid: this.total[i].Paid
};
this.data.push(data1);
}
this.data = [...this.data];
loading.dismiss();
// alert('y' + this.total[0].Total + 'p' + this.total[0].Paid);
});
}
selectDate() {
var options={
date: new Date(),
mode: 'date',
androidTheme: this.datepicker.ANDROID_THEMES.THEME_DEVICE_DEFAULT_LIGHT
};
this.datepicker.show(options).then((date) => {
this.day = this.datePipe.transform(date, 'dd MMM yyyy h:mm a');
this.data = [];
this.getData();
// console.log('selected:',this.myDate);
});
}
changeDate() {
//alert('yes' + this.data.length);
//alert('y' + this.start);
if ( this.i === 2 || (this.a === 0 && this.i === this.salesvalue)) {
this.i = 0;
this.a = 1;
const firstdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth(), 1).setHours(0, 0, 0, 0);
const first1 = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy h:mm a');
const lastdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth() + 1, 0).setHours(23, 59, 59, 999);
const last1 = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy h:mm a');
this.displaystart = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy');
this.displayend = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy');
// const s = new Date(this.day).setHours(0, 0, 0, 0).toString();
// const l = new Date(this.day).setHours(23, 59, 59, 999).toString();
this.start = first1;
this.end = last1;
// this.start = this.datePipe.transform(s, 'dd MMM yyyy h:mm a');
// this.end = this.datePipe.transform(l, 'dd MMM yyyy h:mm a');
// alert('s: ' + first1 + '\nL: ' + last1 );
this.data = [];
this.getData();
}
this.i = this.i + 1;
this.day = this.datePipe.transform(this.day, 'dd MMM yyyy h:mm a');
}
async createPdf() {
let a ;
let x ;
let y ;
let z ;
let left;
let right;
let items = [];
let loading = await this.loadingCtrl.create();
await loading.present();
const itemss = [];
for (let i = 0; i < this.data.length; i++) {
itemss.push(
[
{ text: this.data[i].day.toString() , fontSize: 18, color: '#000000' },
{ text: this.data[i].sales.toString() , fontSize: 18, color: '#000000' },
{ text: this.data[i].paid.toString(), fontSize: 18, color: '#000000' }
]
);
}
left = {
text: [
{ text: this.company, bold: true, fontSize: 20, alignment: 'left'},
]
};
right = {
text: [
{ text: this.datePipe.transform(new Date(), 'dd MMM yyyy') , color: '#000000' , fontSize: 18, alignment: 'right'},
]
};
var docDefinition = {
pageSize: 'A4',
pageMargins: [ 20, 20, 20, 20 ],
content: [
{ text: 'SALES BY DAY', bold: true, alignment: 'center', fontSize: 25, style: 'subheader'},
{ text: this.displaystart + ' - ' + this.displayend, bold: true, alignment: 'center', fontSize: 20, style: 'subheader'},
// { margin: [0, 10, 0, 0],
// text: 'CUSTOMER STATEMENT', style: 'header', fontSize: 25, alignment: 'left', color: '#ff0000' },
{
margin: [0, 10, 0, 0],
canvas:
[
{
type: 'line',
x1: 0, y1: 0,
x2: 555, y2: 0,
lineWidth: 3
},
]
},
{
margin: [0, 5, 0, 0],
style: 'totalsTable',
table: {
widths: [ 300, 250 ],
body: [
[
left, right
],
],
},
layout: 'noBorders'
},
{
margin: [0, 20, 0, 0],
text: [
{ text: 'Client ' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
{ text: ' Total' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
{ text: ' Paid' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
]
},
{
margin: [0, 5, 0, 0],
style: 'totalsTable',
table: {
widths: [220, 160, 200 ],
body: itemss
},
layout: 'noBorders'
},
],
styles: {
header: {
fontSize: 18,
bold: true,
},
subheader: {
fontSize: 10,
bold: true,
// margin: [0, 15, 0, 0],
color: '#000000'
},
story: {
italic: true,
alignment: 'center',
width: '50%',
},
backgroundcolor: ''
}
};
this.pdfObj = pdfMake.createPdf(docDefinition);
this.downloadPdf(loading);
// });
}
ionViewWillLeave() {
this.databaseservice.setSalesByDayValue(2);
}
downloadPdf(loading) {
debugger
if (this.plt.is('cordova')) {
this.pdfObj.getBuffer((buffer) => {
// var blob = new Blob([buffer], { type: 'application/pdf' });
var utf8 = new Uint8Array(buffer);
var binaryArray = utf8.buffer;
var blob = new Blob([binaryArray], { type: 'application/pdf' });
const a = new Date().getTime().toString();
this.file.writeFile(this.file.dataDirectory, a + '.pdf', blob).then(fileEntry => {
// this.fileOpener.open(this.file.dataDirectory + a + '.pdf', 'application/pdf');
// loading.dismiss();
});
this.socialSharing.share("test", null, this.file.dataDirectory + a + '.pdf', null).then(() => {
loading.dismiss();
console.log('social share');
console.log(this.file.dataDirectory);
}).catch(() => {
});
debugger
// Save the PDF to the data Directory of our App
// this.file.writeFile(this.file.dataDirectory, 'Invoice4.pdf', blob).then(fileEntry => {
// this.fileOpener.open(this.file.dataDirectory + 'Invoice.pdf', 'application/pdf');
// });
});
} else |
}
}
| {
// On a browser simply use download!
this.pdfObj.download();
} | conditional_block |
day.page.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { SyncService } from 'src/app/services/sync.service';
import { LoadingController, Platform } from '@ionic/angular';
import { DatePipe, getLocaleDateTimeFormat } from '@angular/common';
import { DatePicker } from '@ionic-native/date-picker/ngx';
import { Storage } from '@ionic/storage';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { File } from '@ionic-native/file/ngx';
import { DatabaseService} from 'src/app/services/database.service';
import pdfMake from 'pdfmake/build/pdfmake';
import pdfFonts from 'pdfmake/build/vfs_fonts';
import { FileOpener } from '@ionic-native/file-opener/ngx';
// import { lstat } from 'fs';
pdfMake.vfs = pdfFonts.pdfMake.vfs;
@Component({
selector: 'app-day',
templateUrl: './day.page.html',
styleUrls: ['./day.page.scss'],
})
export class DayPage implements OnInit {
style = 'bootstrap';
data = [];
items = [];
total = [];
day = new Date().toString();
start = new Date().setHours(0, 0, 0, 0).toString();
end = new Date().setHours(23, 59, 59, 999).toString();
i = 0;
currency = '';
pdfObj = null;
company = '';
displaystart = '';
displayend = '';
a = 0;
salesvalue = 0;
min = '2020';
max = '';
constructor(
private router: Router,
private syncService: SyncService,
private loadingCtrl: LoadingController,
private datePipe: DatePipe,
private datepicker: DatePicker,
private plt: Platform,
private socialSharing: SocialSharing,
private file: File,
private fileOpener: FileOpener,
private storage: Storage,
private databaseservice: DatabaseService
) {
const last = new Date(new Date().getFullYear(), 11, 31);
this.max = this.datePipe.transform(last, 'yyyy');
this.storage.get('COM').then((val) => {
this.company = val;
});
storage.get('currency').then((val) => {
if (val !== null) {
this.currency = val.toString();
debugger
}
});
this.salesvalue = this.databaseservice.getSalesByDayValue();
this.day = this.datePipe.transform(this.day, 'MMM yyyy h:mm a');
const firstdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth(), 1).setHours(0, 0, 0, 0);
const first1 = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy h:mm a');
const lastdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth() + 1, 0).setHours(23, 59, 59, 999);
const last1 = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy h:mm a');
this.displaystart = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy');
this.displayend = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy');
this.start = first1;
this.end = last1;
// alert('start: ' + this.start + '\n' + this.end);
this.getData();
}
ngOnInit() {
}
share() {
// this.getData();
this.createPdf();
}
back() {
this.router.navigate(['/menu/reports']);
}
onActivate(event) {
if (event.type === 'click') {
console.log(event.row);
}
}
async getData() {
let loading = await this.loadingCtrl.create();
await loading.present();
let now = new Date().toString();
const a = this.datePipe.transform(now, 'dd MMM yyyy h:mm a');
let start = new Date().setHours(0, 0, 0, 0).toString();
const b = this.datePipe.transform(start, 'dd MMM yyyy h:mm a');
// alert('start:' + this.start + '\ncurrent:' + this.end);
// alert('day:' + this.day)
this.syncService.getTodaysSales(this.start, this.end, this.currency).then((data) => {
this.total = data;
console.log(data);
for (let i = 0; i < this.total.length; i++) {
const data1 = {
day: this.total[i].Date,
sales: this.total[i].Total,
paid: this.total[i].Paid
};
this.data.push(data1);
}
this.data = [...this.data];
loading.dismiss();
// alert('y' + this.total[0].Total + 'p' + this.total[0].Paid);
});
}
selectDate() {
var options={
date: new Date(),
mode: 'date',
androidTheme: this.datepicker.ANDROID_THEMES.THEME_DEVICE_DEFAULT_LIGHT
};
this.datepicker.show(options).then((date) => {
this.day = this.datePipe.transform(date, 'dd MMM yyyy h:mm a');
this.data = [];
this.getData();
// console.log('selected:',this.myDate);
});
}
changeDate() {
//alert('yes' + this.data.length);
//alert('y' + this.start);
if ( this.i === 2 || (this.a === 0 && this.i === this.salesvalue)) {
this.i = 0;
this.a = 1;
const firstdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth(), 1).setHours(0, 0, 0, 0);
const first1 = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy h:mm a');
const lastdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth() + 1, 0).setHours(23, 59, 59, 999);
const last1 = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy h:mm a');
this.displaystart = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy');
this.displayend = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy');
// const s = new Date(this.day).setHours(0, 0, 0, 0).toString();
// const l = new Date(this.day).setHours(23, 59, 59, 999).toString();
this.start = first1;
this.end = last1;
// this.start = this.datePipe.transform(s, 'dd MMM yyyy h:mm a');
// this.end = this.datePipe.transform(l, 'dd MMM yyyy h:mm a');
// alert('s: ' + first1 + '\nL: ' + last1 );
this.data = [];
this.getData();
}
this.i = this.i + 1;
this.day = this.datePipe.transform(this.day, 'dd MMM yyyy h:mm a');
}
async createPdf() {
let a ;
let x ;
let y ;
let z ;
let left;
let right;
let items = [];
let loading = await this.loadingCtrl.create();
await loading.present();
const itemss = [];
for (let i = 0; i < this.data.length; i++) {
itemss.push(
[
{ text: this.data[i].day.toString() , fontSize: 18, color: '#000000' },
{ text: this.data[i].sales.toString() , fontSize: 18, color: '#000000' },
{ text: this.data[i].paid.toString(), fontSize: 18, color: '#000000' }
]
);
}
left = {
text: [
{ text: this.company, bold: true, fontSize: 20, alignment: 'left'},
]
};
right = {
text: [
{ text: this.datePipe.transform(new Date(), 'dd MMM yyyy') , color: '#000000' , fontSize: 18, alignment: 'right'},
]
};
var docDefinition = {
pageSize: 'A4',
pageMargins: [ 20, 20, 20, 20 ],
content: [
{ text: 'SALES BY DAY', bold: true, alignment: 'center', fontSize: 25, style: 'subheader'},
{ text: this.displaystart + ' - ' + this.displayend, bold: true, alignment: 'center', fontSize: 20, style: 'subheader'},
// { margin: [0, 10, 0, 0],
// text: 'CUSTOMER STATEMENT', style: 'header', fontSize: 25, alignment: 'left', color: '#ff0000' },
{
margin: [0, 10, 0, 0],
canvas:
[
{
type: 'line',
x1: 0, y1: 0,
x2: 555, y2: 0,
lineWidth: 3
},
]
},
{
margin: [0, 5, 0, 0],
style: 'totalsTable',
table: {
widths: [ 300, 250 ],
body: [
[
left, right
],
],
},
layout: 'noBorders'
},
{
margin: [0, 20, 0, 0],
text: [
{ text: 'Client ' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
{ text: ' Total' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
{ text: ' Paid' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
]
},
{
margin: [0, 5, 0, 0],
style: 'totalsTable',
table: {
widths: [220, 160, 200 ],
body: itemss
},
layout: 'noBorders'
},
],
styles: {
header: {
fontSize: 18,
bold: true,
},
subheader: {
fontSize: 10,
bold: true,
// margin: [0, 15, 0, 0],
color: '#000000'
},
story: {
italic: true,
alignment: 'center',
width: '50%',
},
backgroundcolor: ''
}
};
this.pdfObj = pdfMake.createPdf(docDefinition);
this.downloadPdf(loading);
// });
}
ionViewWillLeave() {
this.databaseservice.setSalesByDayValue(2);
}
downloadPdf(loading) {
debugger
if (this.plt.is('cordova')) {
this.pdfObj.getBuffer((buffer) => {
// var blob = new Blob([buffer], { type: 'application/pdf' });
var utf8 = new Uint8Array(buffer);
var binaryArray = utf8.buffer;
var blob = new Blob([binaryArray], { type: 'application/pdf' });
const a = new Date().getTime().toString();
this.file.writeFile(this.file.dataDirectory, a + '.pdf', blob).then(fileEntry => {
// this.fileOpener.open(this.file.dataDirectory + a + '.pdf', 'application/pdf');
// loading.dismiss();
});
this.socialSharing.share("test", null, this.file.dataDirectory + a + '.pdf', null).then(() => {
loading.dismiss();
console.log('social share');
console.log(this.file.dataDirectory);
}).catch(() => { | // this.file.writeFile(this.file.dataDirectory, 'Invoice4.pdf', blob).then(fileEntry => {
// this.fileOpener.open(this.file.dataDirectory + 'Invoice.pdf', 'application/pdf');
// });
});
} else {
// On a browser simply use download!
this.pdfObj.download();
}
}
} |
});
debugger
// Save the PDF to the data Directory of our App | random_line_split |
day.page.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { SyncService } from 'src/app/services/sync.service';
import { LoadingController, Platform } from '@ionic/angular';
import { DatePipe, getLocaleDateTimeFormat } from '@angular/common';
import { DatePicker } from '@ionic-native/date-picker/ngx';
import { Storage } from '@ionic/storage';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { File } from '@ionic-native/file/ngx';
import { DatabaseService} from 'src/app/services/database.service';
import pdfMake from 'pdfmake/build/pdfmake';
import pdfFonts from 'pdfmake/build/vfs_fonts';
import { FileOpener } from '@ionic-native/file-opener/ngx';
// import { lstat } from 'fs';
pdfMake.vfs = pdfFonts.pdfMake.vfs;
@Component({
selector: 'app-day',
templateUrl: './day.page.html',
styleUrls: ['./day.page.scss'],
})
export class | implements OnInit {
style = 'bootstrap';
data = [];
items = [];
total = [];
day = new Date().toString();
start = new Date().setHours(0, 0, 0, 0).toString();
end = new Date().setHours(23, 59, 59, 999).toString();
i = 0;
currency = '';
pdfObj = null;
company = '';
displaystart = '';
displayend = '';
a = 0;
salesvalue = 0;
min = '2020';
max = '';
constructor(
private router: Router,
private syncService: SyncService,
private loadingCtrl: LoadingController,
private datePipe: DatePipe,
private datepicker: DatePicker,
private plt: Platform,
private socialSharing: SocialSharing,
private file: File,
private fileOpener: FileOpener,
private storage: Storage,
private databaseservice: DatabaseService
) {
const last = new Date(new Date().getFullYear(), 11, 31);
this.max = this.datePipe.transform(last, 'yyyy');
this.storage.get('COM').then((val) => {
this.company = val;
});
storage.get('currency').then((val) => {
if (val !== null) {
this.currency = val.toString();
debugger
}
});
this.salesvalue = this.databaseservice.getSalesByDayValue();
this.day = this.datePipe.transform(this.day, 'MMM yyyy h:mm a');
const firstdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth(), 1).setHours(0, 0, 0, 0);
const first1 = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy h:mm a');
const lastdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth() + 1, 0).setHours(23, 59, 59, 999);
const last1 = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy h:mm a');
this.displaystart = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy');
this.displayend = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy');
this.start = first1;
this.end = last1;
// alert('start: ' + this.start + '\n' + this.end);
this.getData();
}
ngOnInit() {
}
share() {
// this.getData();
this.createPdf();
}
back() {
this.router.navigate(['/menu/reports']);
}
onActivate(event) {
if (event.type === 'click') {
console.log(event.row);
}
}
async getData() {
let loading = await this.loadingCtrl.create();
await loading.present();
let now = new Date().toString();
const a = this.datePipe.transform(now, 'dd MMM yyyy h:mm a');
let start = new Date().setHours(0, 0, 0, 0).toString();
const b = this.datePipe.transform(start, 'dd MMM yyyy h:mm a');
// alert('start:' + this.start + '\ncurrent:' + this.end);
// alert('day:' + this.day)
this.syncService.getTodaysSales(this.start, this.end, this.currency).then((data) => {
this.total = data;
console.log(data);
for (let i = 0; i < this.total.length; i++) {
const data1 = {
day: this.total[i].Date,
sales: this.total[i].Total,
paid: this.total[i].Paid
};
this.data.push(data1);
}
this.data = [...this.data];
loading.dismiss();
// alert('y' + this.total[0].Total + 'p' + this.total[0].Paid);
});
}
selectDate() {
var options={
date: new Date(),
mode: 'date',
androidTheme: this.datepicker.ANDROID_THEMES.THEME_DEVICE_DEFAULT_LIGHT
};
this.datepicker.show(options).then((date) => {
this.day = this.datePipe.transform(date, 'dd MMM yyyy h:mm a');
this.data = [];
this.getData();
// console.log('selected:',this.myDate);
});
}
changeDate() {
//alert('yes' + this.data.length);
//alert('y' + this.start);
if ( this.i === 2 || (this.a === 0 && this.i === this.salesvalue)) {
this.i = 0;
this.a = 1;
const firstdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth(), 1).setHours(0, 0, 0, 0);
const first1 = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy h:mm a');
const lastdayofmonth = new Date(new Date(this.day).getFullYear(), new Date(this.day).getMonth() + 1, 0).setHours(23, 59, 59, 999);
const last1 = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy h:mm a');
this.displaystart = this.datePipe.transform(firstdayofmonth, 'dd MMM yyyy');
this.displayend = this.datePipe.transform(lastdayofmonth, 'dd MMM yyyy');
// const s = new Date(this.day).setHours(0, 0, 0, 0).toString();
// const l = new Date(this.day).setHours(23, 59, 59, 999).toString();
this.start = first1;
this.end = last1;
// this.start = this.datePipe.transform(s, 'dd MMM yyyy h:mm a');
// this.end = this.datePipe.transform(l, 'dd MMM yyyy h:mm a');
// alert('s: ' + first1 + '\nL: ' + last1 );
this.data = [];
this.getData();
}
this.i = this.i + 1;
this.day = this.datePipe.transform(this.day, 'dd MMM yyyy h:mm a');
}
async createPdf() {
let a ;
let x ;
let y ;
let z ;
let left;
let right;
let items = [];
let loading = await this.loadingCtrl.create();
await loading.present();
const itemss = [];
for (let i = 0; i < this.data.length; i++) {
itemss.push(
[
{ text: this.data[i].day.toString() , fontSize: 18, color: '#000000' },
{ text: this.data[i].sales.toString() , fontSize: 18, color: '#000000' },
{ text: this.data[i].paid.toString(), fontSize: 18, color: '#000000' }
]
);
}
left = {
text: [
{ text: this.company, bold: true, fontSize: 20, alignment: 'left'},
]
};
right = {
text: [
{ text: this.datePipe.transform(new Date(), 'dd MMM yyyy') , color: '#000000' , fontSize: 18, alignment: 'right'},
]
};
var docDefinition = {
pageSize: 'A4',
pageMargins: [ 20, 20, 20, 20 ],
content: [
{ text: 'SALES BY DAY', bold: true, alignment: 'center', fontSize: 25, style: 'subheader'},
{ text: this.displaystart + ' - ' + this.displayend, bold: true, alignment: 'center', fontSize: 20, style: 'subheader'},
// { margin: [0, 10, 0, 0],
// text: 'CUSTOMER STATEMENT', style: 'header', fontSize: 25, alignment: 'left', color: '#ff0000' },
{
margin: [0, 10, 0, 0],
canvas:
[
{
type: 'line',
x1: 0, y1: 0,
x2: 555, y2: 0,
lineWidth: 3
},
]
},
{
margin: [0, 5, 0, 0],
style: 'totalsTable',
table: {
widths: [ 300, 250 ],
body: [
[
left, right
],
],
},
layout: 'noBorders'
},
{
margin: [0, 20, 0, 0],
text: [
{ text: 'Client ' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
{ text: ' Total' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
{ text: ' Paid' , style: 'subheader', bold: true, fontSize: 20, color: '#000000' },
]
},
{
margin: [0, 5, 0, 0],
style: 'totalsTable',
table: {
widths: [220, 160, 200 ],
body: itemss
},
layout: 'noBorders'
},
],
styles: {
header: {
fontSize: 18,
bold: true,
},
subheader: {
fontSize: 10,
bold: true,
// margin: [0, 15, 0, 0],
color: '#000000'
},
story: {
italic: true,
alignment: 'center',
width: '50%',
},
backgroundcolor: ''
}
};
this.pdfObj = pdfMake.createPdf(docDefinition);
this.downloadPdf(loading);
// });
}
ionViewWillLeave() {
this.databaseservice.setSalesByDayValue(2);
}
downloadPdf(loading) {
debugger
if (this.plt.is('cordova')) {
this.pdfObj.getBuffer((buffer) => {
// var blob = new Blob([buffer], { type: 'application/pdf' });
var utf8 = new Uint8Array(buffer);
var binaryArray = utf8.buffer;
var blob = new Blob([binaryArray], { type: 'application/pdf' });
const a = new Date().getTime().toString();
this.file.writeFile(this.file.dataDirectory, a + '.pdf', blob).then(fileEntry => {
// this.fileOpener.open(this.file.dataDirectory + a + '.pdf', 'application/pdf');
// loading.dismiss();
});
this.socialSharing.share("test", null, this.file.dataDirectory + a + '.pdf', null).then(() => {
loading.dismiss();
console.log('social share');
console.log(this.file.dataDirectory);
}).catch(() => {
});
debugger
// Save the PDF to the data Directory of our App
// this.file.writeFile(this.file.dataDirectory, 'Invoice4.pdf', blob).then(fileEntry => {
// this.fileOpener.open(this.file.dataDirectory + 'Invoice.pdf', 'application/pdf');
// });
});
} else {
// On a browser simply use download!
this.pdfObj.download();
}
}
}
| DayPage | identifier_name |
models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import yaml
import nn_utils
from sys_utils import _single_instance_logger as logger
def make_divisible(x, divisor):
# Returns x evenly divisble by divisor
return math.ceil(x / divisor) * divisor
def autopad(kernel, padding=None): # kernel, padding
# Pad to 'same'
if padding is None:
padding = kernel // 2 if isinstance(kernel, int) else [x // 2 for x in kernel] # auto-pad
return padding
class Conv(nn.Module):
'''
标准卷积层
'''
def __init__(self, in_channel, out_channel, kernel_size=1, stride=1, padding=None, groups=1, activation=True):
super(Conv, self).__init__()
self.conv = nn.Conv2d(in_channel, out_channel, kernel_size, stride, autopad(kernel_size, padding), groups=groups, bias=False)
self.bn = nn.BatchNorm2d(out_channel)
self.act = nn.LeakyReLU(0.1, inplace=True) if activation else nn.Identity()
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuse_forward(self, x):
# 合并后的前向推理,bn和卷积合并
return self.act(self.conv(x))
class Bottleneck(nn.Module):
'''
标准瓶颈层
'''
def __init__(self, in_channel, out_channel, shortcut=True, groups=1, expansion=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
hidden_channel = int(out_channel * expansion) # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = Conv(hidden_channel, out_channel, 3, 1, groups=groups)
self.add = shortcut and in_channel == out_channel
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, in_channel, out_channel, repeats=1, shortcut=True, groups=1, expansion=0.5):
super(BottleneckCSP, self).__init__()
hidden_channel = int(out_channel * expansion) # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = nn.Conv2d(in_channel, hidden_channel, 1, 1, bias=False)
self.cv3 = nn.Conv2d(hidden_channel, hidden_channel, 1, 1, bias=False)
self.cv4 = Conv(2 * hidden_channel, out_channel, 1, 1)
self.bn = nn.BatchNorm2d(2 * hidden_channel) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(hidden_channel, hidden_channel, shortcut, groups, expansion=1.0) for _ in range(repeats)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, in_channel, out_channel, kernel_size_list=(5, 9, 13)):
super(SPP, self).__init__()
hidden_channel = in_channel // 2 # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = Conv(hidden_channel * (len(kernel_size_list) + 1), out_channel, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=kernel_size // 2) for kernel_size in kernel_size_list])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], dim=1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, in_channel, out_channel, kernel_size=1, stride=1, padding=None, groups=1, activation=True):
super(Focus, self).__init__()
self.conv = Conv(in_channel * 4, out_channel, kernel_size, stride, padding, groups, activation)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], dim=1))
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, dim=self.d)
class Detect(nn.Module):
def __init__(self, num_classes, num_anchor, reference_channels):
super(Detect, self).__init__()
self.num_anchor = num_anchor
self.num_classes = num_classes
self.num_output = self.num_classes + 5
self.m = nn.ModuleList(nn.Conv2d(input_channel, self.num_output * self.num_anchor, 1) for input_channel in reference_channels)
self.init_weight()
def forward(self, x):
for ilevel, module in enumerate(self.m):
x[ilevel] = module(x[ilevel])
return x
def init_weight(self):
strides = [8, 16, 32]
for head, stride in zip(self.m, strides):
bias = head.bias.view(self.num_anchor, -1)
bias[:, 4] += math.log(8 / (640 / stride) ** 2)
bias[:, 5:] += math.log(0.6 / (self.num_classes - 0.99))
head.bias = nn.Parameter(bias.view(-1), requires_grad=True)
class Yolo(nn.Module):
def __init__(self, num_classes, config_file, rank=0):
super(Yolo, self).__init__()
self.num_classes = num_classes
self.rank = rank
self.strides = [8, 16, 32]
self.model, self.saved_index, anchors = self.build_model(config_file)
self.register_buffer("anchors", torch.FloatTensor(anchors).view(3, 3, 2) / torch.FloatTensor(self.strides).view(3, 1, 1))
self.apply(self.init_weight)
def set_new_anchors(self, anchors):
# 对设置的anchors缩放到特征图大小
self.anchors[...] = anchors / torch.FloatTensor(self.strides).view(3, 1, 1)
def init_weight(self, m):
type_t = type(m)
if type_t is nn.Conv2d:
# pass init
pass
elif type_t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif type_t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def forward(self, x):
y = []
for module in self.model:
if module.from_index != -1:
if isinstance(module.from_index, int):
x = y[module.from_index]
else:
xout = []
for i in module.from_index:
if i == -1:
xval = x
else:
xval = y[i]
xout.append(xval)
x = xout
x = module(x)
y.append(x if module.layer_index in self.saved_index else None)
return x
def parse_string(self, value):
if value == "None":
return None
elif value == "True":
return True
elif value == "False":
return False
else:
return value
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ', end='')
for m in self.model.modules():
if type(m) is Conv:
m.conv = nn_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv
m.bn = None # remove batchnorm
m.forward = m.fuse_forward # update forward
return self
def build_model(self, config_file, input_channel=3):
with open(config_file) as f:
self.yaml = yaml.load(f, Loader=yaml.FullLoader)
all_layers_cfg_list = self.yaml["backbone"] + self.yaml["head"]
anchors, depth_multiple, width_multiple = [self.yaml[item] for item in ["anchors", "depth_multiple", "width_multiple"]]
num_classes = self.num_classes
num_anchor = len(anchors[0]) // 2
num_output = num_anchor * (num_classes + 5) | args = [self.parse_string(a) for a in args]
module_function = eval(module_name)
if repeat_count > 1:
repeat_count = max(round(repeat_count * depth_multiple), 1)
if module_function in [Conv, Bottleneck, SPP, Focus, BottleneckCSP]:
channel_input, channel_output = all_layers_channels[from_index], args[0]
if channel_output != num_output:
channel_output = make_divisible(channel_output * width_multiple, 8)
args = [channel_input, channel_output, *args[1:]]
if module_function in [BottleneckCSP]:
args.insert(2, repeat_count)
repeat_count = 1
elif module_function is Concat:
channel_output = sum([all_layers_channels[-1 if x == -1 else x + 1] for x in from_index])
elif module_function is Detect:
reference_channel = [all_layers_channels[x + 1] for x in from_index]
args = [num_classes, num_anchor, reference_channel]
else:
channel_output = all_layers_channels[from_index]
if repeat_count > 1:
module_instance = nn.ModuleList([
module_function(*args) for _ in range(repeat_count)
])
else:
module_instance = module_function(*args)
module_instance.from_index = from_index
module_instance.layer_index = layer_index
all_layers.append(module_instance)
all_layers_channels.append(channel_output)
if not isinstance(from_index, list):
from_index = [from_index]
saved_layer_index.extend(filter(lambda x: x!=-1, from_index))
num_params = sum([x.numel() for x in module_instance.parameters()])
if self.rank == 0:
align_format = "%6s %-15s %-7s %-10s %-18s %-30s"
if layer_index == 0:
logger.info(align_format % ("Index", "From", "Repeats", "Param", "Module", "Arguments"))
format_vals = (
"%d." % layer_index,
str(from_index),
str(repeat_count),
"%d" % num_params,
module_name,
str(args)
)
logger.info(align_format % format_vals)
return nn.Sequential(*all_layers), sorted(saved_layer_index), anchors
if __name__ == "__main__":
import nn_utils
nn_utils.setup_seed(3)
device = "cuda:0"
model = Yolo(20, "/datav/wish/yolov5/models/yolov5s.yaml").to(device)
model.fuse()
checkpoint = torch.load("/datav/wish/yolov5-2.0/test.pt", map_location="cpu")
checkpoint['anchors'] = checkpoint['model.24.anchors']
del checkpoint['model.24.anchors']
del checkpoint['model.24.anchor_grid']
model.load_state_dict(checkpoint)
print("Done")
# weight = "/datav/wish/yolov5/weights/yolov5m.pt"
# import pickle
# with open(weight, "rb") as f:
# p = pickle.Unpickler(f, fix_imports=False)
# value = p.load()
# print(value)
#check_point = torch.load(weight, map_location="cpu", fix_imports=False)
#print(check_point)
#input = torch.zeros((1, 3, 640, 640))
#output = model(input)
#print(output[0].shape) | all_layers_channels = [input_channel]
all_layers = []
saved_layer_index = []
for layer_index, (from_index, repeat_count, module_name, args) in enumerate(all_layers_cfg_list): | random_line_split |
models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import yaml
import nn_utils
from sys_utils import _single_instance_logger as logger
def make_divisible(x, divisor):
# Returns x evenly divisble by divisor
|
def autopad(kernel, padding=None): # kernel, padding
# Pad to 'same'
if padding is None:
padding = kernel // 2 if isinstance(kernel, int) else [x // 2 for x in kernel] # auto-pad
return padding
class Conv(nn.Module):
'''
标准卷积层
'''
def __init__(self, in_channel, out_channel, kernel_size=1, stride=1, padding=None, groups=1, activation=True):
super(Conv, self).__init__()
self.conv = nn.Conv2d(in_channel, out_channel, kernel_size, stride, autopad(kernel_size, padding), groups=groups, bias=False)
self.bn = nn.BatchNorm2d(out_channel)
self.act = nn.LeakyReLU(0.1, inplace=True) if activation else nn.Identity()
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuse_forward(self, x):
# 合并后的前向推理,bn和卷积合并
return self.act(self.conv(x))
class Bottleneck(nn.Module):
'''
标准瓶颈层
'''
def __init__(self, in_channel, out_channel, shortcut=True, groups=1, expansion=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
hidden_channel = int(out_channel * expansion) # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = Conv(hidden_channel, out_channel, 3, 1, groups=groups)
self.add = shortcut and in_channel == out_channel
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, in_channel, out_channel, repeats=1, shortcut=True, groups=1, expansion=0.5):
super(BottleneckCSP, self).__init__()
hidden_channel = int(out_channel * expansion) # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = nn.Conv2d(in_channel, hidden_channel, 1, 1, bias=False)
self.cv3 = nn.Conv2d(hidden_channel, hidden_channel, 1, 1, bias=False)
self.cv4 = Conv(2 * hidden_channel, out_channel, 1, 1)
self.bn = nn.BatchNorm2d(2 * hidden_channel) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(hidden_channel, hidden_channel, shortcut, groups, expansion=1.0) for _ in range(repeats)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, in_channel, out_channel, kernel_size_list=(5, 9, 13)):
super(SPP, self).__init__()
hidden_channel = in_channel // 2 # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = Conv(hidden_channel * (len(kernel_size_list) + 1), out_channel, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=kernel_size // 2) for kernel_size in kernel_size_list])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], dim=1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, in_channel, out_channel, kernel_size=1, stride=1, padding=None, groups=1, activation=True):
super(Focus, self).__init__()
self.conv = Conv(in_channel * 4, out_channel, kernel_size, stride, padding, groups, activation)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], dim=1))
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, dim=self.d)
class Detect(nn.Module):
def __init__(self, num_classes, num_anchor, reference_channels):
super(Detect, self).__init__()
self.num_anchor = num_anchor
self.num_classes = num_classes
self.num_output = self.num_classes + 5
self.m = nn.ModuleList(nn.Conv2d(input_channel, self.num_output * self.num_anchor, 1) for input_channel in reference_channels)
self.init_weight()
def forward(self, x):
for ilevel, module in enumerate(self.m):
x[ilevel] = module(x[ilevel])
return x
def init_weight(self):
strides = [8, 16, 32]
for head, stride in zip(self.m, strides):
bias = head.bias.view(self.num_anchor, -1)
bias[:, 4] += math.log(8 / (640 / stride) ** 2)
bias[:, 5:] += math.log(0.6 / (self.num_classes - 0.99))
head.bias = nn.Parameter(bias.view(-1), requires_grad=True)
class Yolo(nn.Module):
def __init__(self, num_classes, config_file, rank=0):
super(Yolo, self).__init__()
self.num_classes = num_classes
self.rank = rank
self.strides = [8, 16, 32]
self.model, self.saved_index, anchors = self.build_model(config_file)
self.register_buffer("anchors", torch.FloatTensor(anchors).view(3, 3, 2) / torch.FloatTensor(self.strides).view(3, 1, 1))
self.apply(self.init_weight)
def set_new_anchors(self, anchors):
# 对设置的anchors缩放到特征图大小
self.anchors[...] = anchors / torch.FloatTensor(self.strides).view(3, 1, 1)
def init_weight(self, m):
type_t = type(m)
if type_t is nn.Conv2d:
# pass init
pass
elif type_t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif type_t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def forward(self, x):
y = []
for module in self.model:
if module.from_index != -1:
if isinstance(module.from_index, int):
x = y[module.from_index]
else:
xout = []
for i in module.from_index:
if i == -1:
xval = x
else:
xval = y[i]
xout.append(xval)
x = xout
x = module(x)
y.append(x if module.layer_index in self.saved_index else None)
return x
def parse_string(self, value):
if value == "None":
return None
elif value == "True":
return True
elif value == "False":
return False
else:
return value
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ', end='')
for m in self.model.modules():
if type(m) is Conv:
m.conv = nn_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv
m.bn = None # remove batchnorm
m.forward = m.fuse_forward # update forward
return self
def build_model(self, config_file, input_channel=3):
with open(config_file) as f:
self.yaml = yaml.load(f, Loader=yaml.FullLoader)
all_layers_cfg_list = self.yaml["backbone"] + self.yaml["head"]
anchors, depth_multiple, width_multiple = [self.yaml[item] for item in ["anchors", "depth_multiple", "width_multiple"]]
num_classes = self.num_classes
num_anchor = len(anchors[0]) // 2
num_output = num_anchor * (num_classes + 5)
all_layers_channels = [input_channel]
all_layers = []
saved_layer_index = []
for layer_index, (from_index, repeat_count, module_name, args) in enumerate(all_layers_cfg_list):
args = [self.parse_string(a) for a in args]
module_function = eval(module_name)
if repeat_count > 1:
repeat_count = max(round(repeat_count * depth_multiple), 1)
if module_function in [Conv, Bottleneck, SPP, Focus, BottleneckCSP]:
channel_input, channel_output = all_layers_channels[from_index], args[0]
if channel_output != num_output:
channel_output = make_divisible(channel_output * width_multiple, 8)
args = [channel_input, channel_output, *args[1:]]
if module_function in [BottleneckCSP]:
args.insert(2, repeat_count)
repeat_count = 1
elif module_function is Concat:
channel_output = sum([all_layers_channels[-1 if x == -1 else x + 1] for x in from_index])
elif module_function is Detect:
reference_channel = [all_layers_channels[x + 1] for x in from_index]
args = [num_classes, num_anchor, reference_channel]
else:
channel_output = all_layers_channels[from_index]
if repeat_count > 1:
module_instance = nn.ModuleList([
module_function(*args) for _ in range(repeat_count)
])
else:
module_instance = module_function(*args)
module_instance.from_index = from_index
module_instance.layer_index = layer_index
all_layers.append(module_instance)
all_layers_channels.append(channel_output)
if not isinstance(from_index, list):
from_index = [from_index]
saved_layer_index.extend(filter(lambda x: x!=-1, from_index))
num_params = sum([x.numel() for x in module_instance.parameters()])
if self.rank == 0:
align_format = "%6s %-15s %-7s %-10s %-18s %-30s"
if layer_index == 0:
logger.info(align_format % ("Index", "From", "Repeats", "Param", "Module", "Arguments"))
format_vals = (
"%d." % layer_index,
str(from_index),
str(repeat_count),
"%d" % num_params,
module_name,
str(args)
)
logger.info(align_format % format_vals)
return nn.Sequential(*all_layers), sorted(saved_layer_index), anchors
if __name__ == "__main__":
import nn_utils
nn_utils.setup_seed(3)
device = "cuda:0"
model = Yolo(20, "/datav/wish/yolov5/models/yolov5s.yaml").to(device)
model.fuse()
checkpoint = torch.load("/datav/wish/yolov5-2.0/test.pt", map_location="cpu")
checkpoint['anchors'] = checkpoint['model.24.anchors']
del checkpoint['model.24.anchors']
del checkpoint['model.24.anchor_grid']
model.load_state_dict(checkpoint)
print("Done")
# weight = "/datav/wish/yolov5/weights/yolov5m.pt"
# import pickle
# with open(weight, "rb") as f:
# p = pickle.Unpickler(f, fix_imports=False)
# value = p.load()
# print(value)
#check_point = torch.load(weight, map_location="cpu", fix_imports=False)
#print(check_point)
#input = torch.zeros((1, 3, 640, 640))
#output = model(input)
#print(output[0].shape) | return math.ceil(x / divisor) * divisor | identifier_body |
models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import yaml
import nn_utils
from sys_utils import _single_instance_logger as logger
def make_divisible(x, divisor):
# Returns x evenly divisble by divisor
return math.ceil(x / divisor) * divisor
def autopad(kernel, padding=None): # kernel, padding
# Pad to 'same'
if padding is None:
padding = kernel // 2 if isinstance(kernel, int) else [x // 2 for x in kernel] # auto-pad
return padding
class Conv(nn.Module):
'''
标准卷积层
'''
def __init__(self, in_channel, out_channel, kernel_size=1, stride=1, padding=None, groups=1, activation=True):
super(Conv, self).__init__()
self.conv = nn.Conv2d(in_channel, out_channel, kernel_size, stride, autopad(kernel_size, padding), groups=groups, bias=False)
self.bn = nn.BatchNorm2d(out_channel)
self.act = nn.LeakyReLU(0.1, inplace=True) if activation else nn.Identity()
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuse_forward(self, x):
# 合并后的前向推理,bn和卷积合并
return self.act(self.conv(x))
class Bottleneck(nn.Module):
'''
标准瓶颈层
'''
def __init__(self, in_channel, out_channel, shortcut=True, groups=1, expansion=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
hidden_channel = int(out_channel * expansion) # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = Conv(hidden_channel, out_channel, 3, 1, groups=groups)
self.add = shortcut and in_channel == out_channel
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, in_channel, out_channel, repeats=1, shortcut=True, groups=1, expansion=0.5):
super(BottleneckCSP, self).__init__()
hidden_channel = int(out_channel * expansion) # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = nn.Conv2d(in_channel, hidden_channel, 1, 1, bias=False)
self.cv3 = nn.Conv2d(hidden_channel, hidden_channel, 1, 1, bias=False)
self.cv4 = Conv(2 * hidden_channel, out_channel, 1, 1)
self.bn = nn.BatchNorm2d(2 * hidden_channel) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(hidden_channel, hidden_channel, shortcut, groups, expansion=1.0) for _ in range(repeats)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, in_channel, out_channel, kernel_size_list=(5, 9, 13)):
super(SPP, self).__init__()
hidden_channel = in_channel // 2 # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = Conv(hidden_channel * (len(kernel_size_list) + 1), out_channel, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=kernel_size // 2) for kernel_size in kernel_size_list])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], dim=1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, in_channel, out_channel, kernel_size=1, stride=1, padding=None, groups=1, activation=True):
super(Focus, self).__init__()
self.conv = Conv(in_channel * 4, out_channel, kernel_size, stride, padding, groups, activation)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], dim=1))
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, dim=self.d)
class Detect(nn.Module):
def __init__(self, num_classes, num_anchor, reference_channels):
super(Detect, self).__init__()
self.num_anchor = num_anchor
self.num_classes = num_classes
self.num_output = self.num_classes + 5
self.m = nn.ModuleList(nn.Conv2d(input_channel, self.num_output * self.num_anchor, 1) for input_channel in reference_channels)
self.init_weight()
def forward(self, x):
for ilevel, module in enumerate(self.m):
x[ilevel] = module(x[ilevel])
return x
| strides = [8, 16, 32]
for head, stride in zip(self.m, strides):
bias = head.bias.view(self.num_anchor, -1)
bias[:, 4] += math.log(8 / (640 / stride) ** 2)
bias[:, 5:] += math.log(0.6 / (self.num_classes - 0.99))
head.bias = nn.Parameter(bias.view(-1), requires_grad=True)
class Yolo(nn.Module):
def __init__(self, num_classes, config_file, rank=0):
super(Yolo, self).__init__()
self.num_classes = num_classes
self.rank = rank
self.strides = [8, 16, 32]
self.model, self.saved_index, anchors = self.build_model(config_file)
self.register_buffer("anchors", torch.FloatTensor(anchors).view(3, 3, 2) / torch.FloatTensor(self.strides).view(3, 1, 1))
self.apply(self.init_weight)
def set_new_anchors(self, anchors):
# 对设置的anchors缩放到特征图大小
self.anchors[...] = anchors / torch.FloatTensor(self.strides).view(3, 1, 1)
def init_weight(self, m):
type_t = type(m)
if type_t is nn.Conv2d:
# pass init
pass
elif type_t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif type_t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def forward(self, x):
y = []
for module in self.model:
if module.from_index != -1:
if isinstance(module.from_index, int):
x = y[module.from_index]
else:
xout = []
for i in module.from_index:
if i == -1:
xval = x
else:
xval = y[i]
xout.append(xval)
x = xout
x = module(x)
y.append(x if module.layer_index in self.saved_index else None)
return x
def parse_string(self, value):
if value == "None":
return None
elif value == "True":
return True
elif value == "False":
return False
else:
return value
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ', end='')
for m in self.model.modules():
if type(m) is Conv:
m.conv = nn_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv
m.bn = None # remove batchnorm
m.forward = m.fuse_forward # update forward
return self
def build_model(self, config_file, input_channel=3):
with open(config_file) as f:
self.yaml = yaml.load(f, Loader=yaml.FullLoader)
all_layers_cfg_list = self.yaml["backbone"] + self.yaml["head"]
anchors, depth_multiple, width_multiple = [self.yaml[item] for item in ["anchors", "depth_multiple", "width_multiple"]]
num_classes = self.num_classes
num_anchor = len(anchors[0]) // 2
num_output = num_anchor * (num_classes + 5)
all_layers_channels = [input_channel]
all_layers = []
saved_layer_index = []
for layer_index, (from_index, repeat_count, module_name, args) in enumerate(all_layers_cfg_list):
args = [self.parse_string(a) for a in args]
module_function = eval(module_name)
if repeat_count > 1:
repeat_count = max(round(repeat_count * depth_multiple), 1)
if module_function in [Conv, Bottleneck, SPP, Focus, BottleneckCSP]:
channel_input, channel_output = all_layers_channels[from_index], args[0]
if channel_output != num_output:
channel_output = make_divisible(channel_output * width_multiple, 8)
args = [channel_input, channel_output, *args[1:]]
if module_function in [BottleneckCSP]:
args.insert(2, repeat_count)
repeat_count = 1
elif module_function is Concat:
channel_output = sum([all_layers_channels[-1 if x == -1 else x + 1] for x in from_index])
elif module_function is Detect:
reference_channel = [all_layers_channels[x + 1] for x in from_index]
args = [num_classes, num_anchor, reference_channel]
else:
channel_output = all_layers_channels[from_index]
if repeat_count > 1:
module_instance = nn.ModuleList([
module_function(*args) for _ in range(repeat_count)
])
else:
module_instance = module_function(*args)
module_instance.from_index = from_index
module_instance.layer_index = layer_index
all_layers.append(module_instance)
all_layers_channels.append(channel_output)
if not isinstance(from_index, list):
from_index = [from_index]
saved_layer_index.extend(filter(lambda x: x!=-1, from_index))
num_params = sum([x.numel() for x in module_instance.parameters()])
if self.rank == 0:
align_format = "%6s %-15s %-7s %-10s %-18s %-30s"
if layer_index == 0:
logger.info(align_format % ("Index", "From", "Repeats", "Param", "Module", "Arguments"))
format_vals = (
"%d." % layer_index,
str(from_index),
str(repeat_count),
"%d" % num_params,
module_name,
str(args)
)
logger.info(align_format % format_vals)
return nn.Sequential(*all_layers), sorted(saved_layer_index), anchors
if __name__ == "__main__":
import nn_utils
nn_utils.setup_seed(3)
device = "cuda:0"
model = Yolo(20, "/datav/wish/yolov5/models/yolov5s.yaml").to(device)
model.fuse()
checkpoint = torch.load("/datav/wish/yolov5-2.0/test.pt", map_location="cpu")
checkpoint['anchors'] = checkpoint['model.24.anchors']
del checkpoint['model.24.anchors']
del checkpoint['model.24.anchor_grid']
model.load_state_dict(checkpoint)
print("Done")
# weight = "/datav/wish/yolov5/weights/yolov5m.pt"
# import pickle
# with open(weight, "rb") as f:
# p = pickle.Unpickler(f, fix_imports=False)
# value = p.load()
# print(value)
#check_point = torch.load(weight, map_location="cpu", fix_imports=False)
#print(check_point)
#input = torch.zeros((1, 3, 640, 640))
#output = model(input)
#print(output[0].shape) | def init_weight(self):
| conditional_block |
models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import yaml
import nn_utils
from sys_utils import _single_instance_logger as logger
def make_divisible(x, divisor):
# Returns x evenly divisble by divisor
return math.ceil(x / divisor) * divisor
def autopad(kernel, padding=None): # kernel, padding
# Pad to 'same'
if padding is None:
padding = kernel // 2 if isinstance(kernel, int) else [x // 2 for x in kernel] # auto-pad
return padding
class Conv(nn.Module):
'''
标准卷积层
'''
def __init__(self, in_channel, out_channel, kernel_size=1, stride=1, padding=None, groups=1, activation=True):
super(Conv, self).__init__()
self.conv = nn.Conv2d(in_channel, out_channel, kernel_size, stride, autopad(kernel_size, padding), groups=groups, bias=False)
self.bn = nn.BatchNorm2d(out_channel)
self.act = nn.LeakyReLU(0.1, inplace=True) if activation else nn.Identity()
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuse_forward(self, x):
# 合并后的前向推理,bn和卷积合并
return self.act(self.conv(x))
class Bottleneck(nn.Module):
'''
标准瓶颈层
'''
def __init__(self, in_channel, out_channel, shortcut=True, groups=1, expansion=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
hidden_channel = int(out_channel * expansion) # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = Conv(hidden_channel, out_channel, 3, 1, groups=groups)
self.add = shortcut and in_channel == out_channel
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, in_channel, out_channel, repeats=1, shortcut=True, groups=1, expansion=0.5):
super(BottleneckCSP, self).__init__()
hidden_channel = int(out_channel * expansion) # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = nn.Conv2d(in_channel, hidden_channel, 1, 1, bias=False)
self.cv3 = nn.Conv2d(hidden_channel, hidden_channel, 1, 1, bias=False)
self.cv4 = Conv(2 * hidden_channel, out_channel, 1, 1)
self.bn = nn.BatchNorm2d(2 * hidden_channel) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(hidden_channel, hidden_channel, shortcut, groups, expansion=1.0) for _ in range(repeats)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, in_channel, out_channel, kernel_size_list=(5, 9, 13)):
super(SPP, self).__init__()
hidden_channel = in_channel // 2 # hidden channels
self.cv1 = Conv(in_channel, hidden_channel, 1, 1)
self.cv2 = Conv(hidden_channel * (len(kernel_size_list) + 1), out_channel, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=kernel_size // 2) for kernel_size in kernel_size_list])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], dim=1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, in_channel, out_channel, kernel_size=1, stride=1, padding=None, groups=1, activation=True):
super(Focus, self).__init__()
self.conv = Conv(in_channel * 4, out_channel, kernel_size, stride, padding, groups, activation)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], dim=1))
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, di | d)
class Detect(nn.Module):
def __init__(self, num_classes, num_anchor, reference_channels):
super(Detect, self).__init__()
self.num_anchor = num_anchor
self.num_classes = num_classes
self.num_output = self.num_classes + 5
self.m = nn.ModuleList(nn.Conv2d(input_channel, self.num_output * self.num_anchor, 1) for input_channel in reference_channels)
self.init_weight()
def forward(self, x):
for ilevel, module in enumerate(self.m):
x[ilevel] = module(x[ilevel])
return x
def init_weight(self):
strides = [8, 16, 32]
for head, stride in zip(self.m, strides):
bias = head.bias.view(self.num_anchor, -1)
bias[:, 4] += math.log(8 / (640 / stride) ** 2)
bias[:, 5:] += math.log(0.6 / (self.num_classes - 0.99))
head.bias = nn.Parameter(bias.view(-1), requires_grad=True)
class Yolo(nn.Module):
def __init__(self, num_classes, config_file, rank=0):
super(Yolo, self).__init__()
self.num_classes = num_classes
self.rank = rank
self.strides = [8, 16, 32]
self.model, self.saved_index, anchors = self.build_model(config_file)
self.register_buffer("anchors", torch.FloatTensor(anchors).view(3, 3, 2) / torch.FloatTensor(self.strides).view(3, 1, 1))
self.apply(self.init_weight)
def set_new_anchors(self, anchors):
# 对设置的anchors缩放到特征图大小
self.anchors[...] = anchors / torch.FloatTensor(self.strides).view(3, 1, 1)
def init_weight(self, m):
type_t = type(m)
if type_t is nn.Conv2d:
# pass init
pass
elif type_t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif type_t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def forward(self, x):
y = []
for module in self.model:
if module.from_index != -1:
if isinstance(module.from_index, int):
x = y[module.from_index]
else:
xout = []
for i in module.from_index:
if i == -1:
xval = x
else:
xval = y[i]
xout.append(xval)
x = xout
x = module(x)
y.append(x if module.layer_index in self.saved_index else None)
return x
def parse_string(self, value):
if value == "None":
return None
elif value == "True":
return True
elif value == "False":
return False
else:
return value
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ', end='')
for m in self.model.modules():
if type(m) is Conv:
m.conv = nn_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv
m.bn = None # remove batchnorm
m.forward = m.fuse_forward # update forward
return self
def build_model(self, config_file, input_channel=3):
with open(config_file) as f:
self.yaml = yaml.load(f, Loader=yaml.FullLoader)
all_layers_cfg_list = self.yaml["backbone"] + self.yaml["head"]
anchors, depth_multiple, width_multiple = [self.yaml[item] for item in ["anchors", "depth_multiple", "width_multiple"]]
num_classes = self.num_classes
num_anchor = len(anchors[0]) // 2
num_output = num_anchor * (num_classes + 5)
all_layers_channels = [input_channel]
all_layers = []
saved_layer_index = []
for layer_index, (from_index, repeat_count, module_name, args) in enumerate(all_layers_cfg_list):
args = [self.parse_string(a) for a in args]
module_function = eval(module_name)
if repeat_count > 1:
repeat_count = max(round(repeat_count * depth_multiple), 1)
if module_function in [Conv, Bottleneck, SPP, Focus, BottleneckCSP]:
channel_input, channel_output = all_layers_channels[from_index], args[0]
if channel_output != num_output:
channel_output = make_divisible(channel_output * width_multiple, 8)
args = [channel_input, channel_output, *args[1:]]
if module_function in [BottleneckCSP]:
args.insert(2, repeat_count)
repeat_count = 1
elif module_function is Concat:
channel_output = sum([all_layers_channels[-1 if x == -1 else x + 1] for x in from_index])
elif module_function is Detect:
reference_channel = [all_layers_channels[x + 1] for x in from_index]
args = [num_classes, num_anchor, reference_channel]
else:
channel_output = all_layers_channels[from_index]
if repeat_count > 1:
module_instance = nn.ModuleList([
module_function(*args) for _ in range(repeat_count)
])
else:
module_instance = module_function(*args)
module_instance.from_index = from_index
module_instance.layer_index = layer_index
all_layers.append(module_instance)
all_layers_channels.append(channel_output)
if not isinstance(from_index, list):
from_index = [from_index]
saved_layer_index.extend(filter(lambda x: x!=-1, from_index))
num_params = sum([x.numel() for x in module_instance.parameters()])
if self.rank == 0:
align_format = "%6s %-15s %-7s %-10s %-18s %-30s"
if layer_index == 0:
logger.info(align_format % ("Index", "From", "Repeats", "Param", "Module", "Arguments"))
format_vals = (
"%d." % layer_index,
str(from_index),
str(repeat_count),
"%d" % num_params,
module_name,
str(args)
)
logger.info(align_format % format_vals)
return nn.Sequential(*all_layers), sorted(saved_layer_index), anchors
if __name__ == "__main__":
import nn_utils
nn_utils.setup_seed(3)
device = "cuda:0"
model = Yolo(20, "/datav/wish/yolov5/models/yolov5s.yaml").to(device)
model.fuse()
checkpoint = torch.load("/datav/wish/yolov5-2.0/test.pt", map_location="cpu")
checkpoint['anchors'] = checkpoint['model.24.anchors']
del checkpoint['model.24.anchors']
del checkpoint['model.24.anchor_grid']
model.load_state_dict(checkpoint)
print("Done")
# weight = "/datav/wish/yolov5/weights/yolov5m.pt"
# import pickle
# with open(weight, "rb") as f:
# p = pickle.Unpickler(f, fix_imports=False)
# value = p.load()
# print(value)
#check_point = torch.load(weight, map_location="cpu", fix_imports=False)
#print(check_point)
#input = torch.zeros((1, 3, 640, 640))
#output = model(input)
#print(output[0].shape) | m=self. | identifier_name |
filetrace_test.go | package libbusyna
import (
"io/ioutil"
"os"
"reflect"
"regexp"
"strings"
"testing"
)
// TestStraceRun tests basic strace execution.
func TestStraceRun(t *testing.T) {
StraceRun("echo asdf > /dev/null", nil, "")
}
// Example strace file used to test parsers.
var straceout = []string{
`16819 stat64("/usr/bin/unbuffer", {st_mode=S_IFREG|0755, st_size=640, ...}) = 0`,
`16819 stat64("/usr/bin/unbuffer", {st_mode=S_IFREG|0755, st_size=640, ...}) = 0`,
`16819 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xf74ee728) = 16820`,
`16819 stat64("/home/lpenz/bin/colorize", {st_mode=S_IFREG|0755, st_size=1483, ...}) = 0`,
`16819 clone( <unfinished ...>`,
`16820 execve("/usr/bin/unbuffer", ["unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */] <unfinished ...>`,
`16819 <... clone resumed> child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xf74ee728) = 16821`,
`16820 <... execve resumed> ) = 0`,
`16820 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`,
`16821 execve("/home/lpenz/bin/colorize", ["colorize", "^\\(\\s\\+new:\\s\\|\\s\\+old:\\s\\|scons"...], [/* 34 vars */] <unfinished ...>`,
`16820 open("/lib/i386-linux-gnu/i686/cmov/libc.so.6", O_RDONLY|O_CLOEXEC) = 3`,
`16821 <... execve resumed> ) = 0`,
`16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`,
`16821 open("/lib/i386-linux-gnu/libtinfo.so.5", O_RDONLY|O_CLOEXEC) = 3`,
`16821 open("/lib/i386-linux-gnu/i686/cmov/libdl.so.2", O_RDONLY|O_CLOEXEC) = 3`,
`16820 stat64("/home/lpenz/projs/lpenz.github.com", {st_mode=S_IFDIR|0755, st_size=4096, ...}) = 0`,
`16820 stat64(".", {st_mode=S_IFDIR|0755, st_size=4096, ...}) = 0`,
`16820 open("/usr/bin/unbuffer", O_RDONLY <unfinished ...>`,
`16821 open("/lib/i386-linux-gnu/i686/cmov/libc.so.6", O_RDONLY|O_CLOEXEC <unfinished ...>`,
`16820 <... open resumed> ) = 3`,
`16821 <... open resumed> ) = 3`,
`16820 execve("/home/lpenz/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = -1 ENOENT (No such file or directory)`,
`16820 ????( <unavailable>)= ? <unavailable>`,
`16820 execve("/usr/local/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = -1 ENOENT (No such file or directory)`,
`16820 --- SIGCHLD (Child exited) @ 0 (0) ---`,
`16820 execve("/usr/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = 0`,
`16820 execve("/usr/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = ? <unavailable>`,
`16820 ????(= ? <unavailable>`,
`16821 open("/dev/tty", O_RDWR|O_NONBLOCK|O_LARGEFILE) = 3`,
`16820 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC <unfinished ...>`,
`16820 <... ???? resumed> ) = ? <unavailable>`,
`16821 open("/usr/lib/locale/locale-archive", O_RDONLY|O_LARGEFILE|O_CLOEXEC <unfinished ...>`,
`16820 <... open resumed> ) = 3`,
`16821 <... open resumed> ) = 3`,
`16820 open("/usr/lib\"/libtcl8.5.so.0", O_RDONLY|O_CLOEXEC) = 3`,
`16820 exit_group(0) = ?`,
}
// TestStraceParse1 tests strace level1 parser (joining) by counting and
// checking strings.
func TestStraceParse1(t *testing.T) {
// Count strings that will be parsed away by StraceParser1
n := len(straceout)
for _, l := range straceout {
if strings.Contains(l, "resumed") || strings.Contains(l, "--- SIG") {
n--
continue
}
m, err := regexp.MatchString("^[0-9]+ \\?.*", l)
if err != nil {
t.Error(err)
}
if m {
n--
continue
}
}
if n == len(straceout) {
t.Error("test string has no level 1 parser tokens")
}
// Parse, and check that they went away and that the count is right
parsed := make([]string, 0, len(straceout))
for l := range StraceParse1(ChanFromList(straceout)) {
if strings.Contains(l, "resumed") || strings.Contains(l, "finished") {
t.Error("found invalid string in parsed results: " + l)
}
parsed = append(parsed, l)
}
if len(parsed) != n {
t.Error("incorrect len of parsed strings")
}
}
// TestStraceParse2Basic tests strace level2 parser by counting parsed entities.
func TestStraceParse2Basic(t *testing.T) {
nopen := 0
nexec := 0
for _, l := range straceout {
if strings.Contains(l, " open(") {
nopen++
}
if strings.Contains(l, " execve(") {
nexec++
}
}
syscalls := map[string]int{}
for info := range StraceParse2(StraceParse1(ChanFromList(straceout))) {
syscalls[info.syscall]++
}
if nopen != syscalls["open"] {
t.Errorf("\"open\" count mismatch: %d != %d", nopen, syscalls["open"])
}
if nexec != syscalls["execve"] {
t.Errorf("\"execve\" count mismatch: %d != %d", nexec, syscalls["execve"])
}
}
// TestStraceParse2Args tests strace level2 argument splitting.
func TestStraceParse2Args(t *testing.T) {
tests := []struct {
str string
ans []string
}{
{"asdf", []string{"asdf"}},
{"as, df", []string{"as", "df"}},
{"a {s, d} f", []string{"a {s, d} f"}},
{"{as, df}", []string{"{as, df}"}},
{`"as, df"`, []string{`"as, df"`}},
{`"as, df", gh`, []string{`"as, df"`, "gh"}},
{`"as, df\", gh"`, []string{`"as, df\", gh"`}},
{`"as, df\""`, []string{`"as, df\""`}},
}
for _, tst := range tests {
a := StraceParse2Argsplit(tst.str)
if !reflect.DeepEqual(a, tst.ans) {
t.Error(a, "!=", tst.ans)
}
}
}
// TestStraceParse2Lines tests a specific line-parsing.
func TestStraceParse2Lines(t *testing.T) {
c := make(chan string)
go func() {
defer close(c)
c <- `16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`
}()
for info := range StraceParse2(c) {
tests := []struct {
ok bool
str string
}{
{info.pid == 16821, "pid mismatch"},
{info.syscall == "open", "syscall mismatch"},
{info.result == 3, "result mismatch"},
{info.body == `"/etc/ld.so.cache", O_RDONLY|O_CLOEXEC`, "body mismatch"},
{info.err == "", "error mismatch"},
}
for _, tst := range tests {
if !tst.ok {
t.Error(tst.str)
}
}
ans := []string{
`"/etc/ld.so.cache"`,
`O_RDONLY|O_CLOEXEC`,
}
if len(ans) != len(info.args) {
t.Errorf("args len mismatch: len(%s)=%d != len(%s)=%d", info.args, len(info.args), ans, len(ans))
}
for i := 0; i < len(info.args); i++ {
if ans[i] != info.args[i] {
t.Errorf("arg %d mismatch", i)
}
}
}
}
// TestStraceParse3 tests StraceParse3 and StraceParse2
func TestStraceParse3(t *testing.T) {
c := make(chan string)
go func() {
defer close(c)
c <- `16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`
c <- `16821 open("w", O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC) = 4`
c <- `16821 open("r", O_RDONLY|O_CLOEXEC) = 5`
c <- `16821 open("rw", O_RDWR|O_NONBLOCK) = 6`
c <- `16821 creat("c", 01) = 6`
}()
r, w := StraceParse3(StraceParse2(c), "")
rok := map[string]bool{
"/etc/ld.so.cache": true,
"r": true,
"rw": true,
}
if !reflect.DeepEqual(r, rok) {
t.Error(r, "!=", rok)
}
wok := map[string]bool{
"w": true,
"c": true,
"rw": true,
}
if !reflect.DeepEqual(w, wok) {
t.Error(w, "!=", wok)
}
}
// Test real applications:
// straceRbase has the base read files for the OS where the tests are run.
var straceRbase map[string]bool
// empty is an empty map
var empty = map[string]bool{}
// filetraceTest is the primitive test function that runs the provided command
// and checks if the set of files read and written match the ones provided.
func filetraceTest(t *testing.T, cmd string, dir string, rok map[string]bool, wok map[string]bool) {
if len(straceRbase) == 0 {
straceRbase, _ = FileTrace("", nil, "")
}
rt, wt := FileTrace(cmd, nil, dir)
rok2 := map[string]bool{}
for r := range rok {
rok2[r] = true
}
for r := range straceRbase {
rok2[r] = true
}
if !reflect.DeepEqual(rt, rok2) {
t.Error("r", rt, "!=", rok2)
}
if !reflect.DeepEqual(wt, wok) {
t.Error("w", wt, "!=", wok)
}
}
// TestFiletraceEchocat is the base test of read/write that runs an echo with the
// output redirected to a file, and a cat that reads that file.
func TestFiletraceEchocat(t *testing.T) {
empty := map[string]bool{}
filetraceTest(t,
"echo asdf > t",
"",
empty,
map[string]bool{"t": true})
defer func() {
if err := os.Remove("t"); err != nil {
t.Error(err)
}
}()
filetraceTest(t,
"cat t > h",
"",
map[string]bool{"t": true},
map[string]bool{"h": true})
defer func() {
if err := os.Remove("h"); err != nil {
t.Error(err)
}
}()
filetraceTest(t,
"cp t j",
"",
map[string]bool{"t": true},
map[string]bool{"j": true})
defer func() {
if err := os.Remove("j"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceChdir tests directory chaging.
func TestFiletraceChdir(t *testing.T) {
filetraceTest(t,
"mkdir d; cd d; echo asdf > t",
"",
empty,
map[string]bool{"d/t": true})
defer func() {
if err := os.Remove("d/t"); err != nil {
t.Error(err)
}
if err := os.Remove("d"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceEnv tests the environment argument.
func TestFiletraceEnv(t *testing.T) {
FileTrace("env > e.txt", map[string]string{"x": "y"}, "")
defer func() {
if err := os.Remove("e.txt"); err != nil |
}()
data, err := ioutil.ReadFile("e.txt")
if err != nil {
t.Fatal(err)
}
datastr := string(data)
if !strings.Contains(datastr, "x=y") {
t.Fatalf("environment x=y not found in %s", datastr)
}
}
// TestFiletraceDir tests the dir argument.
func TestFiletraceDir(t *testing.T) {
os.Mkdir("d", 0755)
filetraceTest(t,
"mkdir -p s/ss; cd s; cd ss; echo asdf > t; echo zxcv > z; rm z",
"d",
empty,
map[string]bool{"s/ss/t": true})
defer func() {
for _, f := range []string{"d/s/ss/t", "d/s/ss", "d/s", "d"} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
}
// TestFiletraceRename tests renaming
func TestFiletraceRename(t *testing.T) {
empty := map[string]bool{}
filetraceTest(t,
"echo asdf > t; mv t v",
"",
empty,
map[string]bool{"v": true})
defer func() {
if err := os.Remove("v"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceChdirPid tests directory chaging in different processes
func TestFiletraceChdirPid(t *testing.T) {
filetraceTest(t,
"(mkdir d; cd d; echo asdf > t); echo asdf > u",
"",
empty,
map[string]bool{"d/t": true, "u": true})
defer func() {
for _, f := range []string{`d/t`, `d`, `u`} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
}
// TestFiletraceExec tests script execution
func TestFiletraceExec(t *testing.T) {
ioutil.WriteFile("tester.c",
[]byte("#include <stdio.h>\nint main(void)\n{\n\tFILE *fd = fopen(\"t\", \"w\");\nfprintf(fd, \"test\");\nreturn 0;\n}\n"),
0777)
filetraceTest(t,
"gcc -o tester tester.c",
"",
map[string]bool{"tester.c": true, "tester": true},
map[string]bool{"tester": true})
filetraceTest(t,
"./tester",
"",
map[string]bool{"tester": true},
map[string]bool{"t": true})
defer func() {
for _, f := range []string{`tester.c`, `tester`, `t`} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
}
| {
t.Error(err)
} | conditional_block |
filetrace_test.go | package libbusyna
import (
"io/ioutil"
"os"
"reflect"
"regexp"
"strings"
"testing"
)
// TestStraceRun tests basic strace execution.
func TestStraceRun(t *testing.T) {
StraceRun("echo asdf > /dev/null", nil, "")
}
// Example strace file used to test parsers.
var straceout = []string{
`16819 stat64("/usr/bin/unbuffer", {st_mode=S_IFREG|0755, st_size=640, ...}) = 0`,
`16819 stat64("/usr/bin/unbuffer", {st_mode=S_IFREG|0755, st_size=640, ...}) = 0`,
`16819 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xf74ee728) = 16820`,
`16819 stat64("/home/lpenz/bin/colorize", {st_mode=S_IFREG|0755, st_size=1483, ...}) = 0`,
`16819 clone( <unfinished ...>`,
`16820 execve("/usr/bin/unbuffer", ["unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */] <unfinished ...>`,
`16819 <... clone resumed> child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xf74ee728) = 16821`,
`16820 <... execve resumed> ) = 0`,
`16820 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`,
`16821 execve("/home/lpenz/bin/colorize", ["colorize", "^\\(\\s\\+new:\\s\\|\\s\\+old:\\s\\|scons"...], [/* 34 vars */] <unfinished ...>`,
`16820 open("/lib/i386-linux-gnu/i686/cmov/libc.so.6", O_RDONLY|O_CLOEXEC) = 3`,
`16821 <... execve resumed> ) = 0`,
`16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`,
`16821 open("/lib/i386-linux-gnu/libtinfo.so.5", O_RDONLY|O_CLOEXEC) = 3`,
`16821 open("/lib/i386-linux-gnu/i686/cmov/libdl.so.2", O_RDONLY|O_CLOEXEC) = 3`,
`16820 stat64("/home/lpenz/projs/lpenz.github.com", {st_mode=S_IFDIR|0755, st_size=4096, ...}) = 0`,
`16820 stat64(".", {st_mode=S_IFDIR|0755, st_size=4096, ...}) = 0`,
`16820 open("/usr/bin/unbuffer", O_RDONLY <unfinished ...>`,
`16821 open("/lib/i386-linux-gnu/i686/cmov/libc.so.6", O_RDONLY|O_CLOEXEC <unfinished ...>`,
`16820 <... open resumed> ) = 3`,
`16821 <... open resumed> ) = 3`,
`16820 execve("/home/lpenz/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = -1 ENOENT (No such file or directory)`,
`16820 ????( <unavailable>)= ? <unavailable>`,
`16820 execve("/usr/local/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = -1 ENOENT (No such file or directory)`,
`16820 --- SIGCHLD (Child exited) @ 0 (0) ---`,
`16820 execve("/usr/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = 0`,
`16820 execve("/usr/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = ? <unavailable>`,
`16820 ????(= ? <unavailable>`,
`16821 open("/dev/tty", O_RDWR|O_NONBLOCK|O_LARGEFILE) = 3`,
`16820 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC <unfinished ...>`,
`16820 <... ???? resumed> ) = ? <unavailable>`,
`16821 open("/usr/lib/locale/locale-archive", O_RDONLY|O_LARGEFILE|O_CLOEXEC <unfinished ...>`,
`16820 <... open resumed> ) = 3`,
`16821 <... open resumed> ) = 3`,
`16820 open("/usr/lib\"/libtcl8.5.so.0", O_RDONLY|O_CLOEXEC) = 3`,
`16820 exit_group(0) = ?`,
}
// TestStraceParse1 tests strace level1 parser (joining) by counting and
// checking strings.
func TestStraceParse1(t *testing.T) {
// Count strings that will be parsed away by StraceParser1
n := len(straceout)
for _, l := range straceout {
if strings.Contains(l, "resumed") || strings.Contains(l, "--- SIG") {
n--
continue
}
m, err := regexp.MatchString("^[0-9]+ \\?.*", l)
if err != nil {
t.Error(err)
}
if m {
n--
continue
}
}
if n == len(straceout) {
t.Error("test string has no level 1 parser tokens")
}
// Parse, and check that they went away and that the count is right
parsed := make([]string, 0, len(straceout))
for l := range StraceParse1(ChanFromList(straceout)) {
if strings.Contains(l, "resumed") || strings.Contains(l, "finished") {
t.Error("found invalid string in parsed results: " + l)
}
parsed = append(parsed, l)
}
if len(parsed) != n {
t.Error("incorrect len of parsed strings")
}
}
// TestStraceParse2Basic tests strace level2 parser by counting parsed entities.
func TestStraceParse2Basic(t *testing.T) {
nopen := 0
nexec := 0
for _, l := range straceout {
if strings.Contains(l, " open(") {
nopen++
}
if strings.Contains(l, " execve(") {
nexec++
}
}
syscalls := map[string]int{}
for info := range StraceParse2(StraceParse1(ChanFromList(straceout))) {
syscalls[info.syscall]++
}
if nopen != syscalls["open"] {
t.Errorf("\"open\" count mismatch: %d != %d", nopen, syscalls["open"])
}
if nexec != syscalls["execve"] {
t.Errorf("\"execve\" count mismatch: %d != %d", nexec, syscalls["execve"])
}
}
// TestStraceParse2Args tests strace level2 argument splitting.
func TestStraceParse2Args(t *testing.T) {
tests := []struct {
str string
ans []string
}{
{"asdf", []string{"asdf"}},
{"as, df", []string{"as", "df"}},
{"a {s, d} f", []string{"a {s, d} f"}},
{"{as, df}", []string{"{as, df}"}},
{`"as, df"`, []string{`"as, df"`}},
{`"as, df", gh`, []string{`"as, df"`, "gh"}},
{`"as, df\", gh"`, []string{`"as, df\", gh"`}},
{`"as, df\""`, []string{`"as, df\""`}},
}
for _, tst := range tests {
a := StraceParse2Argsplit(tst.str)
if !reflect.DeepEqual(a, tst.ans) {
t.Error(a, "!=", tst.ans)
}
}
}
// TestStraceParse2Lines tests a specific line-parsing.
func TestStraceParse2Lines(t *testing.T) {
c := make(chan string)
go func() {
defer close(c)
c <- `16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`
}()
for info := range StraceParse2(c) {
tests := []struct {
ok bool
str string
}{
{info.pid == 16821, "pid mismatch"},
{info.syscall == "open", "syscall mismatch"},
{info.result == 3, "result mismatch"},
{info.body == `"/etc/ld.so.cache", O_RDONLY|O_CLOEXEC`, "body mismatch"},
{info.err == "", "error mismatch"},
}
for _, tst := range tests {
if !tst.ok {
t.Error(tst.str)
}
}
ans := []string{
`"/etc/ld.so.cache"`,
`O_RDONLY|O_CLOEXEC`,
}
if len(ans) != len(info.args) {
t.Errorf("args len mismatch: len(%s)=%d != len(%s)=%d", info.args, len(info.args), ans, len(ans))
}
for i := 0; i < len(info.args); i++ {
if ans[i] != info.args[i] {
t.Errorf("arg %d mismatch", i)
}
}
}
}
// TestStraceParse3 tests StraceParse3 and StraceParse2
func TestStraceParse3(t *testing.T) {
c := make(chan string)
go func() {
defer close(c)
c <- `16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`
c <- `16821 open("w", O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC) = 4`
c <- `16821 open("r", O_RDONLY|O_CLOEXEC) = 5`
c <- `16821 open("rw", O_RDWR|O_NONBLOCK) = 6`
c <- `16821 creat("c", 01) = 6`
}()
r, w := StraceParse3(StraceParse2(c), "")
rok := map[string]bool{
"/etc/ld.so.cache": true,
"r": true,
"rw": true,
}
if !reflect.DeepEqual(r, rok) {
t.Error(r, "!=", rok)
}
wok := map[string]bool{
"w": true,
"c": true,
"rw": true,
}
if !reflect.DeepEqual(w, wok) {
t.Error(w, "!=", wok)
}
}
// Test real applications:
// straceRbase has the base read files for the OS where the tests are run.
var straceRbase map[string]bool
// empty is an empty map
var empty = map[string]bool{}
// filetraceTest is the primitive test function that runs the provided command
// and checks if the set of files read and written match the ones provided.
func filetraceTest(t *testing.T, cmd string, dir string, rok map[string]bool, wok map[string]bool) {
if len(straceRbase) == 0 {
straceRbase, _ = FileTrace("", nil, "")
}
rt, wt := FileTrace(cmd, nil, dir)
rok2 := map[string]bool{}
for r := range rok {
rok2[r] = true
}
for r := range straceRbase {
rok2[r] = true
}
if !reflect.DeepEqual(rt, rok2) {
t.Error("r", rt, "!=", rok2)
}
if !reflect.DeepEqual(wt, wok) {
t.Error("w", wt, "!=", wok)
}
}
// TestFiletraceEchocat is the base test of read/write that runs an echo with the
// output redirected to a file, and a cat that reads that file.
func | (t *testing.T) {
empty := map[string]bool{}
filetraceTest(t,
"echo asdf > t",
"",
empty,
map[string]bool{"t": true})
defer func() {
if err := os.Remove("t"); err != nil {
t.Error(err)
}
}()
filetraceTest(t,
"cat t > h",
"",
map[string]bool{"t": true},
map[string]bool{"h": true})
defer func() {
if err := os.Remove("h"); err != nil {
t.Error(err)
}
}()
filetraceTest(t,
"cp t j",
"",
map[string]bool{"t": true},
map[string]bool{"j": true})
defer func() {
if err := os.Remove("j"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceChdir tests directory chaging.
func TestFiletraceChdir(t *testing.T) {
filetraceTest(t,
"mkdir d; cd d; echo asdf > t",
"",
empty,
map[string]bool{"d/t": true})
defer func() {
if err := os.Remove("d/t"); err != nil {
t.Error(err)
}
if err := os.Remove("d"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceEnv tests the environment argument.
func TestFiletraceEnv(t *testing.T) {
FileTrace("env > e.txt", map[string]string{"x": "y"}, "")
defer func() {
if err := os.Remove("e.txt"); err != nil {
t.Error(err)
}
}()
data, err := ioutil.ReadFile("e.txt")
if err != nil {
t.Fatal(err)
}
datastr := string(data)
if !strings.Contains(datastr, "x=y") {
t.Fatalf("environment x=y not found in %s", datastr)
}
}
// TestFiletraceDir tests the dir argument.
func TestFiletraceDir(t *testing.T) {
os.Mkdir("d", 0755)
filetraceTest(t,
"mkdir -p s/ss; cd s; cd ss; echo asdf > t; echo zxcv > z; rm z",
"d",
empty,
map[string]bool{"s/ss/t": true})
defer func() {
for _, f := range []string{"d/s/ss/t", "d/s/ss", "d/s", "d"} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
}
// TestFiletraceRename tests renaming
func TestFiletraceRename(t *testing.T) {
empty := map[string]bool{}
filetraceTest(t,
"echo asdf > t; mv t v",
"",
empty,
map[string]bool{"v": true})
defer func() {
if err := os.Remove("v"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceChdirPid tests directory chaging in different processes
func TestFiletraceChdirPid(t *testing.T) {
filetraceTest(t,
"(mkdir d; cd d; echo asdf > t); echo asdf > u",
"",
empty,
map[string]bool{"d/t": true, "u": true})
defer func() {
for _, f := range []string{`d/t`, `d`, `u`} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
}
// TestFiletraceExec tests script execution
func TestFiletraceExec(t *testing.T) {
ioutil.WriteFile("tester.c",
[]byte("#include <stdio.h>\nint main(void)\n{\n\tFILE *fd = fopen(\"t\", \"w\");\nfprintf(fd, \"test\");\nreturn 0;\n}\n"),
0777)
filetraceTest(t,
"gcc -o tester tester.c",
"",
map[string]bool{"tester.c": true, "tester": true},
map[string]bool{"tester": true})
filetraceTest(t,
"./tester",
"",
map[string]bool{"tester": true},
map[string]bool{"t": true})
defer func() {
for _, f := range []string{`tester.c`, `tester`, `t`} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
}
| TestFiletraceEchocat | identifier_name |
filetrace_test.go | package libbusyna
import (
"io/ioutil"
"os"
"reflect"
"regexp"
"strings"
"testing"
)
// TestStraceRun tests basic strace execution.
func TestStraceRun(t *testing.T) {
StraceRun("echo asdf > /dev/null", nil, "")
}
// Example strace file used to test parsers.
var straceout = []string{
`16819 stat64("/usr/bin/unbuffer", {st_mode=S_IFREG|0755, st_size=640, ...}) = 0`,
`16819 stat64("/usr/bin/unbuffer", {st_mode=S_IFREG|0755, st_size=640, ...}) = 0`,
`16819 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xf74ee728) = 16820`,
`16819 stat64("/home/lpenz/bin/colorize", {st_mode=S_IFREG|0755, st_size=1483, ...}) = 0`,
`16819 clone( <unfinished ...>`,
`16820 execve("/usr/bin/unbuffer", ["unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */] <unfinished ...>`,
`16819 <... clone resumed> child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xf74ee728) = 16821`,
`16820 <... execve resumed> ) = 0`,
`16820 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`,
`16821 execve("/home/lpenz/bin/colorize", ["colorize", "^\\(\\s\\+new:\\s\\|\\s\\+old:\\s\\|scons"...], [/* 34 vars */] <unfinished ...>`,
`16820 open("/lib/i386-linux-gnu/i686/cmov/libc.so.6", O_RDONLY|O_CLOEXEC) = 3`,
`16821 <... execve resumed> ) = 0`,
`16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`,
`16821 open("/lib/i386-linux-gnu/libtinfo.so.5", O_RDONLY|O_CLOEXEC) = 3`,
`16821 open("/lib/i386-linux-gnu/i686/cmov/libdl.so.2", O_RDONLY|O_CLOEXEC) = 3`,
`16820 stat64("/home/lpenz/projs/lpenz.github.com", {st_mode=S_IFDIR|0755, st_size=4096, ...}) = 0`,
`16820 stat64(".", {st_mode=S_IFDIR|0755, st_size=4096, ...}) = 0`,
`16820 open("/usr/bin/unbuffer", O_RDONLY <unfinished ...>`,
`16821 open("/lib/i386-linux-gnu/i686/cmov/libc.so.6", O_RDONLY|O_CLOEXEC <unfinished ...>`,
`16820 <... open resumed> ) = 3`,
`16821 <... open resumed> ) = 3`,
`16820 execve("/home/lpenz/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = -1 ENOENT (No such file or directory)`,
`16820 ????( <unavailable>)= ? <unavailable>`,
`16820 execve("/usr/local/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = -1 ENOENT (No such file or directory)`,
`16820 --- SIGCHLD (Child exited) @ 0 (0) ---`,
`16820 execve("/usr/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = 0`,
`16820 execve("/usr/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = ? <unavailable>`,
`16820 ????(= ? <unavailable>`,
`16821 open("/dev/tty", O_RDWR|O_NONBLOCK|O_LARGEFILE) = 3`,
`16820 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC <unfinished ...>`,
`16820 <... ???? resumed> ) = ? <unavailable>`,
`16821 open("/usr/lib/locale/locale-archive", O_RDONLY|O_LARGEFILE|O_CLOEXEC <unfinished ...>`,
`16820 <... open resumed> ) = 3`,
`16821 <... open resumed> ) = 3`,
`16820 open("/usr/lib\"/libtcl8.5.so.0", O_RDONLY|O_CLOEXEC) = 3`,
`16820 exit_group(0) = ?`,
}
// TestStraceParse1 tests strace level1 parser (joining) by counting and
// checking strings.
func TestStraceParse1(t *testing.T) {
// Count strings that will be parsed away by StraceParser1
n := len(straceout)
for _, l := range straceout {
if strings.Contains(l, "resumed") || strings.Contains(l, "--- SIG") {
n--
continue
}
m, err := regexp.MatchString("^[0-9]+ \\?.*", l)
if err != nil {
t.Error(err)
}
if m {
n--
continue
}
}
if n == len(straceout) {
t.Error("test string has no level 1 parser tokens")
}
// Parse, and check that they went away and that the count is right
parsed := make([]string, 0, len(straceout))
for l := range StraceParse1(ChanFromList(straceout)) {
if strings.Contains(l, "resumed") || strings.Contains(l, "finished") {
t.Error("found invalid string in parsed results: " + l)
}
parsed = append(parsed, l)
}
if len(parsed) != n {
t.Error("incorrect len of parsed strings")
}
}
// TestStraceParse2Basic tests strace level2 parser by counting parsed entities.
func TestStraceParse2Basic(t *testing.T) {
nopen := 0
nexec := 0
for _, l := range straceout {
if strings.Contains(l, " open(") {
nopen++
}
if strings.Contains(l, " execve(") {
nexec++
}
}
syscalls := map[string]int{}
for info := range StraceParse2(StraceParse1(ChanFromList(straceout))) {
syscalls[info.syscall]++
}
if nopen != syscalls["open"] {
t.Errorf("\"open\" count mismatch: %d != %d", nopen, syscalls["open"])
}
if nexec != syscalls["execve"] {
t.Errorf("\"execve\" count mismatch: %d != %d", nexec, syscalls["execve"])
}
}
// TestStraceParse2Args tests strace level2 argument splitting.
func TestStraceParse2Args(t *testing.T) {
tests := []struct {
str string
ans []string
}{
{"asdf", []string{"asdf"}},
{"as, df", []string{"as", "df"}},
{"a {s, d} f", []string{"a {s, d} f"}},
{"{as, df}", []string{"{as, df}"}},
{`"as, df"`, []string{`"as, df"`}},
{`"as, df", gh`, []string{`"as, df"`, "gh"}},
{`"as, df\", gh"`, []string{`"as, df\", gh"`}},
{`"as, df\""`, []string{`"as, df\""`}},
}
for _, tst := range tests {
a := StraceParse2Argsplit(tst.str)
if !reflect.DeepEqual(a, tst.ans) {
t.Error(a, "!=", tst.ans)
}
}
}
// TestStraceParse2Lines tests a specific line-parsing.
func TestStraceParse2Lines(t *testing.T) {
c := make(chan string)
go func() {
defer close(c)
c <- `16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`
}()
for info := range StraceParse2(c) {
tests := []struct {
ok bool
str string
}{
{info.pid == 16821, "pid mismatch"},
{info.syscall == "open", "syscall mismatch"},
{info.result == 3, "result mismatch"},
{info.body == `"/etc/ld.so.cache", O_RDONLY|O_CLOEXEC`, "body mismatch"},
{info.err == "", "error mismatch"},
}
for _, tst := range tests {
if !tst.ok {
t.Error(tst.str)
}
}
ans := []string{
`"/etc/ld.so.cache"`,
`O_RDONLY|O_CLOEXEC`,
}
if len(ans) != len(info.args) {
t.Errorf("args len mismatch: len(%s)=%d != len(%s)=%d", info.args, len(info.args), ans, len(ans))
}
for i := 0; i < len(info.args); i++ {
if ans[i] != info.args[i] {
t.Errorf("arg %d mismatch", i)
}
}
}
}
// TestStraceParse3 tests StraceParse3 and StraceParse2
func TestStraceParse3(t *testing.T) {
c := make(chan string)
go func() {
defer close(c)
c <- `16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`
c <- `16821 open("w", O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC) = 4`
c <- `16821 open("r", O_RDONLY|O_CLOEXEC) = 5`
c <- `16821 open("rw", O_RDWR|O_NONBLOCK) = 6`
c <- `16821 creat("c", 01) = 6`
}()
r, w := StraceParse3(StraceParse2(c), "")
rok := map[string]bool{
"/etc/ld.so.cache": true,
"r": true,
"rw": true,
}
if !reflect.DeepEqual(r, rok) {
t.Error(r, "!=", rok)
}
wok := map[string]bool{
"w": true,
"c": true,
"rw": true,
}
if !reflect.DeepEqual(w, wok) {
t.Error(w, "!=", wok)
}
}
// Test real applications:
// straceRbase has the base read files for the OS where the tests are run.
var straceRbase map[string]bool
// empty is an empty map
var empty = map[string]bool{}
// filetraceTest is the primitive test function that runs the provided command
// and checks if the set of files read and written match the ones provided.
func filetraceTest(t *testing.T, cmd string, dir string, rok map[string]bool, wok map[string]bool) {
if len(straceRbase) == 0 {
straceRbase, _ = FileTrace("", nil, "")
}
rt, wt := FileTrace(cmd, nil, dir)
rok2 := map[string]bool{}
for r := range rok {
rok2[r] = true
}
for r := range straceRbase {
rok2[r] = true
}
if !reflect.DeepEqual(rt, rok2) {
t.Error("r", rt, "!=", rok2)
}
if !reflect.DeepEqual(wt, wok) {
t.Error("w", wt, "!=", wok)
}
}
// TestFiletraceEchocat is the base test of read/write that runs an echo with the
// output redirected to a file, and a cat that reads that file.
func TestFiletraceEchocat(t *testing.T) {
empty := map[string]bool{}
filetraceTest(t,
"echo asdf > t",
"",
empty,
map[string]bool{"t": true})
defer func() {
if err := os.Remove("t"); err != nil {
t.Error(err)
}
}()
filetraceTest(t,
"cat t > h",
"",
map[string]bool{"t": true},
map[string]bool{"h": true})
defer func() {
if err := os.Remove("h"); err != nil {
t.Error(err)
}
}()
filetraceTest(t,
"cp t j",
"",
map[string]bool{"t": true},
map[string]bool{"j": true})
defer func() {
if err := os.Remove("j"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceChdir tests directory chaging.
func TestFiletraceChdir(t *testing.T) {
filetraceTest(t,
"mkdir d; cd d; echo asdf > t",
"",
empty,
map[string]bool{"d/t": true})
defer func() {
if err := os.Remove("d/t"); err != nil {
t.Error(err)
}
if err := os.Remove("d"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceEnv tests the environment argument.
func TestFiletraceEnv(t *testing.T) {
FileTrace("env > e.txt", map[string]string{"x": "y"}, "")
defer func() {
if err := os.Remove("e.txt"); err != nil {
t.Error(err)
}
}()
data, err := ioutil.ReadFile("e.txt")
if err != nil {
t.Fatal(err)
}
datastr := string(data)
if !strings.Contains(datastr, "x=y") {
t.Fatalf("environment x=y not found in %s", datastr)
}
}
// TestFiletraceDir tests the dir argument.
func TestFiletraceDir(t *testing.T) {
os.Mkdir("d", 0755)
filetraceTest(t,
"mkdir -p s/ss; cd s; cd ss; echo asdf > t; echo zxcv > z; rm z",
"d",
empty,
map[string]bool{"s/ss/t": true})
defer func() {
for _, f := range []string{"d/s/ss/t", "d/s/ss", "d/s", "d"} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
}
// TestFiletraceRename tests renaming
func TestFiletraceRename(t *testing.T) |
// TestFiletraceChdirPid tests directory chaging in different processes
func TestFiletraceChdirPid(t *testing.T) {
filetraceTest(t,
"(mkdir d; cd d; echo asdf > t); echo asdf > u",
"",
empty,
map[string]bool{"d/t": true, "u": true})
defer func() {
for _, f := range []string{`d/t`, `d`, `u`} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
}
// TestFiletraceExec tests script execution
func TestFiletraceExec(t *testing.T) {
ioutil.WriteFile("tester.c",
[]byte("#include <stdio.h>\nint main(void)\n{\n\tFILE *fd = fopen(\"t\", \"w\");\nfprintf(fd, \"test\");\nreturn 0;\n}\n"),
0777)
filetraceTest(t,
"gcc -o tester tester.c",
"",
map[string]bool{"tester.c": true, "tester": true},
map[string]bool{"tester": true})
filetraceTest(t,
"./tester",
"",
map[string]bool{"tester": true},
map[string]bool{"t": true})
defer func() {
for _, f := range []string{`tester.c`, `tester`, `t`} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
}
| {
empty := map[string]bool{}
filetraceTest(t,
"echo asdf > t; mv t v",
"",
empty,
map[string]bool{"v": true})
defer func() {
if err := os.Remove("v"); err != nil {
t.Error(err)
}
}()
} | identifier_body |
filetrace_test.go | package libbusyna
import (
"io/ioutil"
"os"
"reflect"
"regexp"
"strings"
"testing"
)
// TestStraceRun tests basic strace execution.
func TestStraceRun(t *testing.T) {
StraceRun("echo asdf > /dev/null", nil, "")
}
// Example strace file used to test parsers.
var straceout = []string{
`16819 stat64("/usr/bin/unbuffer", {st_mode=S_IFREG|0755, st_size=640, ...}) = 0`,
`16819 stat64("/usr/bin/unbuffer", {st_mode=S_IFREG|0755, st_size=640, ...}) = 0`,
`16819 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xf74ee728) = 16820`,
`16819 stat64("/home/lpenz/bin/colorize", {st_mode=S_IFREG|0755, st_size=1483, ...}) = 0`,
`16819 clone( <unfinished ...>`,
`16820 execve("/usr/bin/unbuffer", ["unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */] <unfinished ...>`,
`16819 <... clone resumed> child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xf74ee728) = 16821`,
`16820 <... execve resumed> ) = 0`,
`16820 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`,
`16821 execve("/home/lpenz/bin/colorize", ["colorize", "^\\(\\s\\+new:\\s\\|\\s\\+old:\\s\\|scons"...], [/* 34 vars */] <unfinished ...>`,
`16820 open("/lib/i386-linux-gnu/i686/cmov/libc.so.6", O_RDONLY|O_CLOEXEC) = 3`,
`16821 <... execve resumed> ) = 0`,
`16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`,
`16821 open("/lib/i386-linux-gnu/libtinfo.so.5", O_RDONLY|O_CLOEXEC) = 3`,
`16821 open("/lib/i386-linux-gnu/i686/cmov/libdl.so.2", O_RDONLY|O_CLOEXEC) = 3`,
`16820 stat64("/home/lpenz/projs/lpenz.github.com", {st_mode=S_IFDIR|0755, st_size=4096, ...}) = 0`,
`16820 stat64(".", {st_mode=S_IFDIR|0755, st_size=4096, ...}) = 0`,
`16820 open("/usr/bin/unbuffer", O_RDONLY <unfinished ...>`,
`16821 open("/lib/i386-linux-gnu/i686/cmov/libc.so.6", O_RDONLY|O_CLOEXEC <unfinished ...>`,
`16820 <... open resumed> ) = 3`,
`16821 <... open resumed> ) = 3`,
`16820 execve("/home/lpenz/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = -1 ENOENT (No such file or directory)`,
`16820 ????( <unavailable>)= ? <unavailable>`,
`16820 execve("/usr/local/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = -1 ENOENT (No such file or directory)`,
`16820 --- SIGCHLD (Child exited) @ 0 (0) ---`,
`16820 execve("/usr/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = 0`,
`16820 execve("/usr/bin/tclsh", ["tclsh", "/usr/bin/unbuffer", "scons", "--max-drift=1", "--implicit-cache", "--debug=explain"], [/* 34 vars */]) = ? <unavailable>`,
`16820 ????(= ? <unavailable>`,
`16821 open("/dev/tty", O_RDWR|O_NONBLOCK|O_LARGEFILE) = 3`,
`16820 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC <unfinished ...>`,
`16820 <... ???? resumed> ) = ? <unavailable>`,
`16821 open("/usr/lib/locale/locale-archive", O_RDONLY|O_LARGEFILE|O_CLOEXEC <unfinished ...>`,
`16820 <... open resumed> ) = 3`,
`16821 <... open resumed> ) = 3`,
`16820 open("/usr/lib\"/libtcl8.5.so.0", O_RDONLY|O_CLOEXEC) = 3`,
`16820 exit_group(0) = ?`,
}
// TestStraceParse1 tests strace level1 parser (joining) by counting and
// checking strings.
func TestStraceParse1(t *testing.T) {
// Count strings that will be parsed away by StraceParser1
n := len(straceout)
for _, l := range straceout {
if strings.Contains(l, "resumed") || strings.Contains(l, "--- SIG") {
n--
continue
}
m, err := regexp.MatchString("^[0-9]+ \\?.*", l)
if err != nil {
t.Error(err)
}
if m {
n--
continue
}
}
if n == len(straceout) {
t.Error("test string has no level 1 parser tokens")
}
// Parse, and check that they went away and that the count is right
parsed := make([]string, 0, len(straceout))
for l := range StraceParse1(ChanFromList(straceout)) {
if strings.Contains(l, "resumed") || strings.Contains(l, "finished") {
t.Error("found invalid string in parsed results: " + l)
}
parsed = append(parsed, l)
}
if len(parsed) != n {
t.Error("incorrect len of parsed strings")
}
}
// TestStraceParse2Basic tests strace level2 parser by counting parsed entities.
func TestStraceParse2Basic(t *testing.T) {
nopen := 0
nexec := 0
for _, l := range straceout {
if strings.Contains(l, " open(") {
nopen++
}
if strings.Contains(l, " execve(") {
nexec++
}
}
syscalls := map[string]int{}
for info := range StraceParse2(StraceParse1(ChanFromList(straceout))) {
syscalls[info.syscall]++
}
if nopen != syscalls["open"] {
t.Errorf("\"open\" count mismatch: %d != %d", nopen, syscalls["open"])
}
if nexec != syscalls["execve"] {
t.Errorf("\"execve\" count mismatch: %d != %d", nexec, syscalls["execve"])
}
}
// TestStraceParse2Args tests strace level2 argument splitting.
func TestStraceParse2Args(t *testing.T) {
tests := []struct {
str string
ans []string
}{
{"asdf", []string{"asdf"}},
{"as, df", []string{"as", "df"}},
{"a {s, d} f", []string{"a {s, d} f"}},
{"{as, df}", []string{"{as, df}"}},
{`"as, df"`, []string{`"as, df"`}},
{`"as, df", gh`, []string{`"as, df"`, "gh"}},
{`"as, df\", gh"`, []string{`"as, df\", gh"`}},
{`"as, df\""`, []string{`"as, df\""`}},
}
for _, tst := range tests {
a := StraceParse2Argsplit(tst.str)
if !reflect.DeepEqual(a, tst.ans) {
t.Error(a, "!=", tst.ans)
}
}
}
// TestStraceParse2Lines tests a specific line-parsing.
func TestStraceParse2Lines(t *testing.T) {
c := make(chan string)
go func() {
defer close(c)
c <- `16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`
}()
for info := range StraceParse2(c) {
tests := []struct {
ok bool
str string
}{
{info.pid == 16821, "pid mismatch"},
{info.syscall == "open", "syscall mismatch"},
{info.result == 3, "result mismatch"},
{info.body == `"/etc/ld.so.cache", O_RDONLY|O_CLOEXEC`, "body mismatch"},
{info.err == "", "error mismatch"},
}
for _, tst := range tests {
if !tst.ok {
t.Error(tst.str)
}
}
ans := []string{
`"/etc/ld.so.cache"`,
`O_RDONLY|O_CLOEXEC`,
}
if len(ans) != len(info.args) {
t.Errorf("args len mismatch: len(%s)=%d != len(%s)=%d", info.args, len(info.args), ans, len(ans))
}
for i := 0; i < len(info.args); i++ {
if ans[i] != info.args[i] {
t.Errorf("arg %d mismatch", i)
}
}
}
}
// TestStraceParse3 tests StraceParse3 and StraceParse2
func TestStraceParse3(t *testing.T) {
c := make(chan string)
go func() {
defer close(c)
c <- `16821 open("/etc/ld.so.cache", O_RDONLY|O_CLOEXEC) = 3`
c <- `16821 open("w", O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC) = 4`
c <- `16821 open("r", O_RDONLY|O_CLOEXEC) = 5`
c <- `16821 open("rw", O_RDWR|O_NONBLOCK) = 6`
c <- `16821 creat("c", 01) = 6`
}()
r, w := StraceParse3(StraceParse2(c), "")
rok := map[string]bool{
"/etc/ld.so.cache": true,
"r": true,
"rw": true,
}
if !reflect.DeepEqual(r, rok) {
t.Error(r, "!=", rok)
}
wok := map[string]bool{
"w": true,
"c": true,
"rw": true,
}
if !reflect.DeepEqual(w, wok) {
t.Error(w, "!=", wok)
}
}
// Test real applications:
// straceRbase has the base read files for the OS where the tests are run.
var straceRbase map[string]bool
// empty is an empty map
var empty = map[string]bool{}
// filetraceTest is the primitive test function that runs the provided command
// and checks if the set of files read and written match the ones provided.
func filetraceTest(t *testing.T, cmd string, dir string, rok map[string]bool, wok map[string]bool) {
if len(straceRbase) == 0 {
straceRbase, _ = FileTrace("", nil, "")
}
rt, wt := FileTrace(cmd, nil, dir)
rok2 := map[string]bool{}
for r := range rok {
rok2[r] = true
}
for r := range straceRbase {
rok2[r] = true
}
if !reflect.DeepEqual(rt, rok2) {
t.Error("r", rt, "!=", rok2)
}
if !reflect.DeepEqual(wt, wok) {
t.Error("w", wt, "!=", wok)
}
}
// TestFiletraceEchocat is the base test of read/write that runs an echo with the
// output redirected to a file, and a cat that reads that file.
func TestFiletraceEchocat(t *testing.T) {
empty := map[string]bool{}
filetraceTest(t,
"echo asdf > t",
"",
empty,
map[string]bool{"t": true})
defer func() {
if err := os.Remove("t"); err != nil {
t.Error(err)
}
}()
filetraceTest(t,
"cat t > h",
"",
map[string]bool{"t": true},
map[string]bool{"h": true})
defer func() {
if err := os.Remove("h"); err != nil {
t.Error(err)
}
}()
filetraceTest(t,
"cp t j",
"",
map[string]bool{"t": true},
map[string]bool{"j": true})
defer func() {
if err := os.Remove("j"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceChdir tests directory chaging.
func TestFiletraceChdir(t *testing.T) {
filetraceTest(t,
"mkdir d; cd d; echo asdf > t",
"",
empty,
map[string]bool{"d/t": true})
defer func() {
if err := os.Remove("d/t"); err != nil {
t.Error(err)
}
if err := os.Remove("d"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceEnv tests the environment argument.
func TestFiletraceEnv(t *testing.T) {
FileTrace("env > e.txt", map[string]string{"x": "y"}, "")
defer func() {
if err := os.Remove("e.txt"); err != nil {
t.Error(err)
}
}()
data, err := ioutil.ReadFile("e.txt")
if err != nil {
t.Fatal(err)
}
datastr := string(data)
if !strings.Contains(datastr, "x=y") {
t.Fatalf("environment x=y not found in %s", datastr)
}
}
// TestFiletraceDir tests the dir argument.
func TestFiletraceDir(t *testing.T) {
os.Mkdir("d", 0755)
filetraceTest(t,
"mkdir -p s/ss; cd s; cd ss; echo asdf > t; echo zxcv > z; rm z",
"d",
empty,
map[string]bool{"s/ss/t": true})
defer func() {
for _, f := range []string{"d/s/ss/t", "d/s/ss", "d/s", "d"} { | }
}()
}
// TestFiletraceRename tests renaming
func TestFiletraceRename(t *testing.T) {
empty := map[string]bool{}
filetraceTest(t,
"echo asdf > t; mv t v",
"",
empty,
map[string]bool{"v": true})
defer func() {
if err := os.Remove("v"); err != nil {
t.Error(err)
}
}()
}
// TestFiletraceChdirPid tests directory chaging in different processes
func TestFiletraceChdirPid(t *testing.T) {
filetraceTest(t,
"(mkdir d; cd d; echo asdf > t); echo asdf > u",
"",
empty,
map[string]bool{"d/t": true, "u": true})
defer func() {
for _, f := range []string{`d/t`, `d`, `u`} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
}
// TestFiletraceExec tests script execution
func TestFiletraceExec(t *testing.T) {
ioutil.WriteFile("tester.c",
[]byte("#include <stdio.h>\nint main(void)\n{\n\tFILE *fd = fopen(\"t\", \"w\");\nfprintf(fd, \"test\");\nreturn 0;\n}\n"),
0777)
filetraceTest(t,
"gcc -o tester tester.c",
"",
map[string]bool{"tester.c": true, "tester": true},
map[string]bool{"tester": true})
filetraceTest(t,
"./tester",
"",
map[string]bool{"tester": true},
map[string]bool{"t": true})
defer func() {
for _, f := range []string{`tester.c`, `tester`, `t`} {
if err := os.Remove(f); err != nil {
t.Error(err)
}
}
}()
} | if err := os.Remove(f); err != nil {
t.Error(err)
} | random_line_split |
settings.py | """
Django settings for hauki project.
"""
import logging
import os
import subprocess
import environ
import sentry_sdk
from django.conf.global_settings import LANGUAGES as GLOBAL_LANGUAGES
from django.core.exceptions import ImproperlyConfigured
from sentry_sdk.integrations.django import DjangoIntegration
CONFIG_FILE_NAME = "config_dev.env"
# This will get default settings, as Django has not yet initialized
# logging when importing this file
logger = logging.getLogger(__name__)
def get_git_revision_hash() -> str:
|
root = environ.Path(__file__) - 2 # two levels back in hierarchy
env = environ.Env(
DEBUG=(bool, False),
DJANGO_LOG_LEVEL=(str, "INFO"),
CONN_MAX_AGE=(int, 0),
SYSTEM_DATA_SOURCE_ID=(str, "hauki"),
LANGUAGES=(list, ["fi", "sv", "en"]),
DATABASE_URL=(str, "postgres:///hauki"),
TEST_DATABASE_URL=(str, ""),
TOKEN_AUTH_ACCEPTED_AUDIENCE=(str, ""),
TOKEN_AUTH_SHARED_SECRET=(str, ""),
SECRET_KEY=(str, ""),
ALLOWED_HOSTS=(list, []),
ADMINS=(list, []),
SECURE_PROXY_SSL_HEADER=(tuple, None),
MEDIA_ROOT=(environ.Path(), root("media")),
STATIC_ROOT=(environ.Path(), root("static")),
MEDIA_URL=(str, "/media/"),
STATIC_URL=(str, "/static/"),
TRUST_X_FORWARDED_HOST=(bool, False),
SENTRY_DSN=(str, ""),
SENTRY_ENVIRONMENT=(str, "development"),
COOKIE_PREFIX=(str, "hauki"),
INTERNAL_IPS=(list, []),
INSTANCE_NAME=(str, "Hauki"),
EXTRA_INSTALLED_APPS=(list, []),
ENABLE_DJANGO_EXTENSIONS=(bool, False),
MAIL_MAILGUN_KEY=(str, ""),
MAIL_MAILGUN_DOMAIN=(str, ""),
MAIL_MAILGUN_API=(str, ""),
RESOURCE_DEFAULT_TIMEZONE=(str, None),
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = root()
# Django environ has a nasty habit of complanining at level
# WARN about env file not being preset. Here we pre-empt it.
env_file_path = os.path.join(BASE_DIR, CONFIG_FILE_NAME)
if os.path.exists(env_file_path):
# Logging configuration is not available at this point
print(f"Reading config from {env_file_path}")
environ.Env.read_env(env_file_path)
DEBUG = env("DEBUG")
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
ADMINS = env("ADMINS")
INTERNAL_IPS = env("INTERNAL_IPS", default=(["127.0.0.1"] if DEBUG else []))
DATABASES = {"default": env.db()}
DATABASES["default"]["CONN_MAX_AGE"] = env("CONN_MAX_AGE")
if env("TEST_DATABASE_URL"):
DATABASES["default"]["TEST"] = env.db("TEST_DATABASE_URL")
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
AUTH_USER_MODEL = "users.User"
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/v1/"
LOGOUT_REDIRECT_URL = "/v1/"
RESOURCE_DEFAULT_TIMEZONE = env("RESOURCE_DEFAULT_TIMEZONE")
DJANGO_ORGHIERARCHY_DATASOURCE_MODEL = "hours.DataSource"
SYSTEM_DATA_SOURCE_ID = env("SYSTEM_DATA_SOURCE_ID")
SITE_ID = 1
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {
"class": "logging.NullHandler",
},
},
"loggers": {
"": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
"django": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
# Application definition
INSTALLED_APPS = [
"helusers.apps.HelusersConfig",
"modeltranslation",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.humanize",
"simple_history",
# disable Django’s development server static file handling
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework.authtoken",
"django_filters",
"django_orghierarchy",
"timezone_field",
"mptt",
# Apps within this repository
"users",
"hours",
# OpenAPI
"drf_spectacular",
] + env("EXTRA_INSTALLED_APPS")
if env("SENTRY_DSN"):
sentry_sdk.init(
dsn=env("SENTRY_DSN"),
environment=env("SENTRY_ENVIRONMENT"),
release=get_git_revision_hash(),
integrations=[DjangoIntegration()],
)
MIDDLEWARE = [
# CorsMiddleware should be placed as high as possible and above WhiteNoiseMiddleware
# in particular
"corsheaders.middleware.CorsMiddleware",
# Ditto for securitymiddleware
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"simple_history.middleware.HistoryRequestMiddleware",
]
# django-extensions is a set of developer friendly tools
if env("ENABLE_DJANGO_EXTENSIONS"):
INSTALLED_APPS.append("django_extensions")
ROOT_URLCONF = "hauki.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "hauki.wsgi.application"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
# Map language codes to the (code, name) tuples used by Django
# We want to keep the ordering in LANGUAGES configuration variable,
# thus some gyrations
language_map = {x: y for x, y in GLOBAL_LANGUAGES}
try:
LANGUAGES = tuple((lang, language_map[lang]) for lang in env("LANGUAGES"))
except KeyError as e:
raise ImproperlyConfigured(f'unknown language code "{e.args[0]}"')
LANGUAGE_CODE = env("LANGUAGES")[0]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [root("locale")]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATIC_ROOT = env("STATIC_ROOT")
MEDIA_ROOT = env("MEDIA_ROOT")
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
# https://docs.djangoproject.com/en/3.0/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Specifies a header that is trusted to indicate that the request was using
# https while traversing over the Internet at large. This is used when
# a proxy terminates the TLS connection and forwards the request over
# a secure network. Specified using a tuple.
# https://docs.djangoproject.com/en/3.0/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = env("SECURE_PROXY_SSL_HEADER")
CORS_ORIGIN_ALLOW_ALL = True
CSRF_COOKIE_NAME = "%s-csrftoken" % env("COOKIE_PREFIX")
SESSION_COOKIE_NAME = "%s-sessionid" % env("COOKIE_PREFIX")
# DRF Settings
# https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
"hours.renderers.BrowsableAPIRendererWithoutForms",
],
"DEFAULT_FILTER_BACKENDS": [
"rest_framework.filters.OrderingFilter",
"django_filters.rest_framework.DjangoFilterBackend",
],
"DEFAULT_AUTHENTICATION_CLASSES": [
"hours.authentication.HaukiSignedAuthentication",
"hours.authentication.HaukiTokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.IsAuthenticatedOrReadOnly",
],
"DEFAULT_METADATA_CLASS": "hours.metadata.TranslatedChoiceNamesMetadata",
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
}
# shown in the browsable API
INSTANCE_NAME = env("INSTANCE_NAME")
#
# Anymail
#
if env("MAIL_MAILGUN_KEY"):
ANYMAIL = {
"MAILGUN_API_KEY": env("MAIL_MAILGUN_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAIL_MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAIL_MAILGUN_API"),
}
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
elif not env("MAIL_MAILGUN_KEY") and DEBUG is True:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
#
# Django spectacular (OpenAPI) settings
#
SPECTACULAR_SETTINGS = {
"TITLE": "Hauki API",
"DESCRIPTION": """
API for the City of Helsinki opening hours database
# Introduction
To do.
# Authentication methods
<SecurityDefinitions />
""",
"VERSION": "0.0.1",
"EXTERNAL_DOCS": {
"description": "Hauki API in GitHub",
"url": "https://github.com/City-of-Helsinki/hauki",
},
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
local_settings_path = os.path.join(BASE_DIR, "local_settings.py")
if os.path.exists(local_settings_path):
with open(local_settings_path) as fp:
code = compile(fp.read(), local_settings_path, "exec")
# Here, we execute local code on the server. Luckily, local_settings.py and BASE_DIR
# are hard-coded above, so this cannot be used to execute any other files.
exec(code, globals(), locals()) # nosec
# Django SECRET_KEY setting, used for password reset links and such
SECRET_KEY = env("SECRET_KEY")
if not DEBUG and not SECRET_KEY:
raise Exception("In production, SECRET_KEY must be provided in the environment.")
# If a secret key was not supplied elsewhere, generate a random one and print
# a warning (logging is not configured yet?). This means that any functionality
# expecting SECRET_KEY to stay same will break upon restart. Should not be a
# problem for development.
if not SECRET_KEY:
logger.warning(
"SECRET_KEY was not defined in configuration."
" Generating a temporary key for dev."
)
import random
system_random = random.SystemRandom()
SECRET_KEY = "".join(
[
system_random.choice("abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)")
for i in range(64)
]
)
| """
Retrieve the git hash for the underlying git repository or die trying
We need a way to retrieve git revision hash for sentry reports
I assume that if we have a git repository available we will
have git-the-comamand as well
"""
try:
# We are not interested in gits complaints
git_hash = subprocess.check_output(
["git", "rev-parse", "HEAD"], stderr=subprocess.DEVNULL, encoding="utf8"
)
# ie. "git" was not found
# should we return a more generic meta hash here?
# like "undefined"?
except FileNotFoundError:
git_hash = "git_not_available"
except subprocess.CalledProcessError:
# Ditto
git_hash = "no_repository"
return git_hash.rstrip() | identifier_body |
settings.py | """
Django settings for hauki project.
"""
import logging
import os
import subprocess
import environ
import sentry_sdk
from django.conf.global_settings import LANGUAGES as GLOBAL_LANGUAGES
from django.core.exceptions import ImproperlyConfigured
from sentry_sdk.integrations.django import DjangoIntegration
CONFIG_FILE_NAME = "config_dev.env"
# This will get default settings, as Django has not yet initialized
# logging when importing this file
logger = logging.getLogger(__name__)
def get_git_revision_hash() -> str:
"""
Retrieve the git hash for the underlying git repository or die trying
We need a way to retrieve git revision hash for sentry reports
I assume that if we have a git repository available we will
have git-the-comamand as well
"""
try:
# We are not interested in gits complaints
git_hash = subprocess.check_output(
["git", "rev-parse", "HEAD"], stderr=subprocess.DEVNULL, encoding="utf8"
)
# ie. "git" was not found
# should we return a more generic meta hash here?
# like "undefined"?
except FileNotFoundError:
git_hash = "git_not_available"
except subprocess.CalledProcessError:
# Ditto
git_hash = "no_repository"
return git_hash.rstrip()
root = environ.Path(__file__) - 2 # two levels back in hierarchy
env = environ.Env(
DEBUG=(bool, False),
DJANGO_LOG_LEVEL=(str, "INFO"),
CONN_MAX_AGE=(int, 0),
SYSTEM_DATA_SOURCE_ID=(str, "hauki"),
LANGUAGES=(list, ["fi", "sv", "en"]),
DATABASE_URL=(str, "postgres:///hauki"),
TEST_DATABASE_URL=(str, ""),
TOKEN_AUTH_ACCEPTED_AUDIENCE=(str, ""),
TOKEN_AUTH_SHARED_SECRET=(str, ""),
SECRET_KEY=(str, ""),
ALLOWED_HOSTS=(list, []),
ADMINS=(list, []),
SECURE_PROXY_SSL_HEADER=(tuple, None),
MEDIA_ROOT=(environ.Path(), root("media")),
STATIC_ROOT=(environ.Path(), root("static")),
MEDIA_URL=(str, "/media/"),
STATIC_URL=(str, "/static/"),
TRUST_X_FORWARDED_HOST=(bool, False),
SENTRY_DSN=(str, ""),
SENTRY_ENVIRONMENT=(str, "development"),
COOKIE_PREFIX=(str, "hauki"),
INTERNAL_IPS=(list, []),
INSTANCE_NAME=(str, "Hauki"),
EXTRA_INSTALLED_APPS=(list, []),
ENABLE_DJANGO_EXTENSIONS=(bool, False),
MAIL_MAILGUN_KEY=(str, ""),
MAIL_MAILGUN_DOMAIN=(str, ""),
MAIL_MAILGUN_API=(str, ""),
RESOURCE_DEFAULT_TIMEZONE=(str, None),
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = root()
# Django environ has a nasty habit of complanining at level
# WARN about env file not being preset. Here we pre-empt it.
env_file_path = os.path.join(BASE_DIR, CONFIG_FILE_NAME)
if os.path.exists(env_file_path):
# Logging configuration is not available at this point
print(f"Reading config from {env_file_path}")
environ.Env.read_env(env_file_path)
DEBUG = env("DEBUG")
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
ADMINS = env("ADMINS")
INTERNAL_IPS = env("INTERNAL_IPS", default=(["127.0.0.1"] if DEBUG else []))
DATABASES = {"default": env.db()}
DATABASES["default"]["CONN_MAX_AGE"] = env("CONN_MAX_AGE")
if env("TEST_DATABASE_URL"):
DATABASES["default"]["TEST"] = env.db("TEST_DATABASE_URL")
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
AUTH_USER_MODEL = "users.User"
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/v1/"
LOGOUT_REDIRECT_URL = "/v1/"
RESOURCE_DEFAULT_TIMEZONE = env("RESOURCE_DEFAULT_TIMEZONE")
DJANGO_ORGHIERARCHY_DATASOURCE_MODEL = "hours.DataSource"
SYSTEM_DATA_SOURCE_ID = env("SYSTEM_DATA_SOURCE_ID")
SITE_ID = 1
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {
"class": "logging.NullHandler",
},
},
"loggers": {
"": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
"django": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
# Application definition
INSTALLED_APPS = [
"helusers.apps.HelusersConfig",
"modeltranslation",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.humanize",
"simple_history",
# disable Django’s development server static file handling
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework.authtoken",
"django_filters",
"django_orghierarchy",
"timezone_field",
"mptt",
# Apps within this repository
"users",
"hours",
# OpenAPI
"drf_spectacular",
] + env("EXTRA_INSTALLED_APPS")
if env("SENTRY_DSN"):
se | MIDDLEWARE = [
# CorsMiddleware should be placed as high as possible and above WhiteNoiseMiddleware
# in particular
"corsheaders.middleware.CorsMiddleware",
# Ditto for securitymiddleware
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"simple_history.middleware.HistoryRequestMiddleware",
]
# django-extensions is a set of developer friendly tools
if env("ENABLE_DJANGO_EXTENSIONS"):
INSTALLED_APPS.append("django_extensions")
ROOT_URLCONF = "hauki.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "hauki.wsgi.application"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
# Map language codes to the (code, name) tuples used by Django
# We want to keep the ordering in LANGUAGES configuration variable,
# thus some gyrations
language_map = {x: y for x, y in GLOBAL_LANGUAGES}
try:
LANGUAGES = tuple((lang, language_map[lang]) for lang in env("LANGUAGES"))
except KeyError as e:
raise ImproperlyConfigured(f'unknown language code "{e.args[0]}"')
LANGUAGE_CODE = env("LANGUAGES")[0]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [root("locale")]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATIC_ROOT = env("STATIC_ROOT")
MEDIA_ROOT = env("MEDIA_ROOT")
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
# https://docs.djangoproject.com/en/3.0/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Specifies a header that is trusted to indicate that the request was using
# https while traversing over the Internet at large. This is used when
# a proxy terminates the TLS connection and forwards the request over
# a secure network. Specified using a tuple.
# https://docs.djangoproject.com/en/3.0/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = env("SECURE_PROXY_SSL_HEADER")
CORS_ORIGIN_ALLOW_ALL = True
CSRF_COOKIE_NAME = "%s-csrftoken" % env("COOKIE_PREFIX")
SESSION_COOKIE_NAME = "%s-sessionid" % env("COOKIE_PREFIX")
# DRF Settings
# https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
"hours.renderers.BrowsableAPIRendererWithoutForms",
],
"DEFAULT_FILTER_BACKENDS": [
"rest_framework.filters.OrderingFilter",
"django_filters.rest_framework.DjangoFilterBackend",
],
"DEFAULT_AUTHENTICATION_CLASSES": [
"hours.authentication.HaukiSignedAuthentication",
"hours.authentication.HaukiTokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.IsAuthenticatedOrReadOnly",
],
"DEFAULT_METADATA_CLASS": "hours.metadata.TranslatedChoiceNamesMetadata",
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
}
# shown in the browsable API
INSTANCE_NAME = env("INSTANCE_NAME")
#
# Anymail
#
if env("MAIL_MAILGUN_KEY"):
ANYMAIL = {
"MAILGUN_API_KEY": env("MAIL_MAILGUN_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAIL_MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAIL_MAILGUN_API"),
}
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
elif not env("MAIL_MAILGUN_KEY") and DEBUG is True:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
#
# Django spectacular (OpenAPI) settings
#
SPECTACULAR_SETTINGS = {
"TITLE": "Hauki API",
"DESCRIPTION": """
API for the City of Helsinki opening hours database
# Introduction
To do.
# Authentication methods
<SecurityDefinitions />
""",
"VERSION": "0.0.1",
"EXTERNAL_DOCS": {
"description": "Hauki API in GitHub",
"url": "https://github.com/City-of-Helsinki/hauki",
},
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
local_settings_path = os.path.join(BASE_DIR, "local_settings.py")
if os.path.exists(local_settings_path):
with open(local_settings_path) as fp:
code = compile(fp.read(), local_settings_path, "exec")
# Here, we execute local code on the server. Luckily, local_settings.py and BASE_DIR
# are hard-coded above, so this cannot be used to execute any other files.
exec(code, globals(), locals()) # nosec
# Django SECRET_KEY setting, used for password reset links and such
SECRET_KEY = env("SECRET_KEY")
if not DEBUG and not SECRET_KEY:
raise Exception("In production, SECRET_KEY must be provided in the environment.")
# If a secret key was not supplied elsewhere, generate a random one and print
# a warning (logging is not configured yet?). This means that any functionality
# expecting SECRET_KEY to stay same will break upon restart. Should not be a
# problem for development.
if not SECRET_KEY:
logger.warning(
"SECRET_KEY was not defined in configuration."
" Generating a temporary key for dev."
)
import random
system_random = random.SystemRandom()
SECRET_KEY = "".join(
[
system_random.choice("abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)")
for i in range(64)
]
)
| ntry_sdk.init(
dsn=env("SENTRY_DSN"),
environment=env("SENTRY_ENVIRONMENT"),
release=get_git_revision_hash(),
integrations=[DjangoIntegration()],
)
| conditional_block |
settings.py | """
Django settings for hauki project.
"""
import logging
import os
import subprocess
import environ
import sentry_sdk
from django.conf.global_settings import LANGUAGES as GLOBAL_LANGUAGES
from django.core.exceptions import ImproperlyConfigured
from sentry_sdk.integrations.django import DjangoIntegration
CONFIG_FILE_NAME = "config_dev.env"
# This will get default settings, as Django has not yet initialized
# logging when importing this file
logger = logging.getLogger(__name__)
def get_git_revision_hash() -> str:
"""
Retrieve the git hash for the underlying git repository or die trying
We need a way to retrieve git revision hash for sentry reports
I assume that if we have a git repository available we will
have git-the-comamand as well
"""
try:
# We are not interested in gits complaints
git_hash = subprocess.check_output(
["git", "rev-parse", "HEAD"], stderr=subprocess.DEVNULL, encoding="utf8"
)
# ie. "git" was not found
# should we return a more generic meta hash here?
# like "undefined"?
except FileNotFoundError:
git_hash = "git_not_available"
except subprocess.CalledProcessError:
# Ditto
git_hash = "no_repository"
return git_hash.rstrip()
root = environ.Path(__file__) - 2 # two levels back in hierarchy
env = environ.Env(
DEBUG=(bool, False),
DJANGO_LOG_LEVEL=(str, "INFO"),
CONN_MAX_AGE=(int, 0),
SYSTEM_DATA_SOURCE_ID=(str, "hauki"),
LANGUAGES=(list, ["fi", "sv", "en"]),
DATABASE_URL=(str, "postgres:///hauki"),
TEST_DATABASE_URL=(str, ""),
TOKEN_AUTH_ACCEPTED_AUDIENCE=(str, ""),
TOKEN_AUTH_SHARED_SECRET=(str, ""),
SECRET_KEY=(str, ""),
ALLOWED_HOSTS=(list, []),
ADMINS=(list, []),
SECURE_PROXY_SSL_HEADER=(tuple, None),
MEDIA_ROOT=(environ.Path(), root("media")),
STATIC_ROOT=(environ.Path(), root("static")),
MEDIA_URL=(str, "/media/"),
STATIC_URL=(str, "/static/"),
TRUST_X_FORWARDED_HOST=(bool, False),
SENTRY_DSN=(str, ""),
SENTRY_ENVIRONMENT=(str, "development"),
COOKIE_PREFIX=(str, "hauki"),
INTERNAL_IPS=(list, []),
INSTANCE_NAME=(str, "Hauki"),
EXTRA_INSTALLED_APPS=(list, []),
ENABLE_DJANGO_EXTENSIONS=(bool, False),
MAIL_MAILGUN_KEY=(str, ""),
MAIL_MAILGUN_DOMAIN=(str, ""),
MAIL_MAILGUN_API=(str, ""),
RESOURCE_DEFAULT_TIMEZONE=(str, None),
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = root()
# Django environ has a nasty habit of complanining at level
# WARN about env file not being preset. Here we pre-empt it.
env_file_path = os.path.join(BASE_DIR, CONFIG_FILE_NAME)
if os.path.exists(env_file_path):
# Logging configuration is not available at this point
print(f"Reading config from {env_file_path}")
environ.Env.read_env(env_file_path)
DEBUG = env("DEBUG")
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
ADMINS = env("ADMINS")
INTERNAL_IPS = env("INTERNAL_IPS", default=(["127.0.0.1"] if DEBUG else []))
DATABASES = {"default": env.db()}
DATABASES["default"]["CONN_MAX_AGE"] = env("CONN_MAX_AGE")
if env("TEST_DATABASE_URL"):
DATABASES["default"]["TEST"] = env.db("TEST_DATABASE_URL")
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
AUTH_USER_MODEL = "users.User"
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/v1/"
LOGOUT_REDIRECT_URL = "/v1/"
RESOURCE_DEFAULT_TIMEZONE = env("RESOURCE_DEFAULT_TIMEZONE")
DJANGO_ORGHIERARCHY_DATASOURCE_MODEL = "hours.DataSource"
SYSTEM_DATA_SOURCE_ID = env("SYSTEM_DATA_SOURCE_ID")
SITE_ID = 1
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {
"class": "logging.NullHandler",
},
},
"loggers": {
"": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
"django": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
# Application definition
INSTALLED_APPS = [
"helusers.apps.HelusersConfig",
"modeltranslation",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.humanize",
"simple_history",
# disable Django’s development server static file handling
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework.authtoken",
"django_filters",
"django_orghierarchy",
"timezone_field",
"mptt",
# Apps within this repository
"users",
"hours",
# OpenAPI
"drf_spectacular",
] + env("EXTRA_INSTALLED_APPS")
if env("SENTRY_DSN"):
sentry_sdk.init(
dsn=env("SENTRY_DSN"),
environment=env("SENTRY_ENVIRONMENT"),
release=get_git_revision_hash(),
integrations=[DjangoIntegration()],
)
MIDDLEWARE = [
# CorsMiddleware should be placed as high as possible and above WhiteNoiseMiddleware
# in particular
"corsheaders.middleware.CorsMiddleware",
# Ditto for securitymiddleware
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"simple_history.middleware.HistoryRequestMiddleware",
]
# django-extensions is a set of developer friendly tools
if env("ENABLE_DJANGO_EXTENSIONS"):
INSTALLED_APPS.append("django_extensions")
ROOT_URLCONF = "hauki.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "hauki.wsgi.application"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
# Map language codes to the (code, name) tuples used by Django
# We want to keep the ordering in LANGUAGES configuration variable,
# thus some gyrations
language_map = {x: y for x, y in GLOBAL_LANGUAGES}
try:
LANGUAGES = tuple((lang, language_map[lang]) for lang in env("LANGUAGES"))
except KeyError as e:
raise ImproperlyConfigured(f'unknown language code "{e.args[0]}"')
LANGUAGE_CODE = env("LANGUAGES")[0]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [root("locale")]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/ | STATIC_ROOT = env("STATIC_ROOT")
MEDIA_ROOT = env("MEDIA_ROOT")
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
# https://docs.djangoproject.com/en/3.0/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Specifies a header that is trusted to indicate that the request was using
# https while traversing over the Internet at large. This is used when
# a proxy terminates the TLS connection and forwards the request over
# a secure network. Specified using a tuple.
# https://docs.djangoproject.com/en/3.0/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = env("SECURE_PROXY_SSL_HEADER")
CORS_ORIGIN_ALLOW_ALL = True
CSRF_COOKIE_NAME = "%s-csrftoken" % env("COOKIE_PREFIX")
SESSION_COOKIE_NAME = "%s-sessionid" % env("COOKIE_PREFIX")
# DRF Settings
# https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
"hours.renderers.BrowsableAPIRendererWithoutForms",
],
"DEFAULT_FILTER_BACKENDS": [
"rest_framework.filters.OrderingFilter",
"django_filters.rest_framework.DjangoFilterBackend",
],
"DEFAULT_AUTHENTICATION_CLASSES": [
"hours.authentication.HaukiSignedAuthentication",
"hours.authentication.HaukiTokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.IsAuthenticatedOrReadOnly",
],
"DEFAULT_METADATA_CLASS": "hours.metadata.TranslatedChoiceNamesMetadata",
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
}
# shown in the browsable API
INSTANCE_NAME = env("INSTANCE_NAME")
#
# Anymail
#
if env("MAIL_MAILGUN_KEY"):
ANYMAIL = {
"MAILGUN_API_KEY": env("MAIL_MAILGUN_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAIL_MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAIL_MAILGUN_API"),
}
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
elif not env("MAIL_MAILGUN_KEY") and DEBUG is True:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
#
# Django spectacular (OpenAPI) settings
#
SPECTACULAR_SETTINGS = {
"TITLE": "Hauki API",
"DESCRIPTION": """
API for the City of Helsinki opening hours database
# Introduction
To do.
# Authentication methods
<SecurityDefinitions />
""",
"VERSION": "0.0.1",
"EXTERNAL_DOCS": {
"description": "Hauki API in GitHub",
"url": "https://github.com/City-of-Helsinki/hauki",
},
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
local_settings_path = os.path.join(BASE_DIR, "local_settings.py")
if os.path.exists(local_settings_path):
with open(local_settings_path) as fp:
code = compile(fp.read(), local_settings_path, "exec")
# Here, we execute local code on the server. Luckily, local_settings.py and BASE_DIR
# are hard-coded above, so this cannot be used to execute any other files.
exec(code, globals(), locals()) # nosec
# Django SECRET_KEY setting, used for password reset links and such
SECRET_KEY = env("SECRET_KEY")
if not DEBUG and not SECRET_KEY:
raise Exception("In production, SECRET_KEY must be provided in the environment.")
# If a secret key was not supplied elsewhere, generate a random one and print
# a warning (logging is not configured yet?). This means that any functionality
# expecting SECRET_KEY to stay same will break upon restart. Should not be a
# problem for development.
if not SECRET_KEY:
logger.warning(
"SECRET_KEY was not defined in configuration."
" Generating a temporary key for dev."
)
import random
system_random = random.SystemRandom()
SECRET_KEY = "".join(
[
system_random.choice("abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)")
for i in range(64)
]
) |
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL") | random_line_split |
settings.py | """
Django settings for hauki project.
"""
import logging
import os
import subprocess
import environ
import sentry_sdk
from django.conf.global_settings import LANGUAGES as GLOBAL_LANGUAGES
from django.core.exceptions import ImproperlyConfigured
from sentry_sdk.integrations.django import DjangoIntegration
CONFIG_FILE_NAME = "config_dev.env"
# This will get default settings, as Django has not yet initialized
# logging when importing this file
logger = logging.getLogger(__name__)
def | () -> str:
"""
Retrieve the git hash for the underlying git repository or die trying
We need a way to retrieve git revision hash for sentry reports
I assume that if we have a git repository available we will
have git-the-comamand as well
"""
try:
# We are not interested in gits complaints
git_hash = subprocess.check_output(
["git", "rev-parse", "HEAD"], stderr=subprocess.DEVNULL, encoding="utf8"
)
# ie. "git" was not found
# should we return a more generic meta hash here?
# like "undefined"?
except FileNotFoundError:
git_hash = "git_not_available"
except subprocess.CalledProcessError:
# Ditto
git_hash = "no_repository"
return git_hash.rstrip()
root = environ.Path(__file__) - 2 # two levels back in hierarchy
env = environ.Env(
DEBUG=(bool, False),
DJANGO_LOG_LEVEL=(str, "INFO"),
CONN_MAX_AGE=(int, 0),
SYSTEM_DATA_SOURCE_ID=(str, "hauki"),
LANGUAGES=(list, ["fi", "sv", "en"]),
DATABASE_URL=(str, "postgres:///hauki"),
TEST_DATABASE_URL=(str, ""),
TOKEN_AUTH_ACCEPTED_AUDIENCE=(str, ""),
TOKEN_AUTH_SHARED_SECRET=(str, ""),
SECRET_KEY=(str, ""),
ALLOWED_HOSTS=(list, []),
ADMINS=(list, []),
SECURE_PROXY_SSL_HEADER=(tuple, None),
MEDIA_ROOT=(environ.Path(), root("media")),
STATIC_ROOT=(environ.Path(), root("static")),
MEDIA_URL=(str, "/media/"),
STATIC_URL=(str, "/static/"),
TRUST_X_FORWARDED_HOST=(bool, False),
SENTRY_DSN=(str, ""),
SENTRY_ENVIRONMENT=(str, "development"),
COOKIE_PREFIX=(str, "hauki"),
INTERNAL_IPS=(list, []),
INSTANCE_NAME=(str, "Hauki"),
EXTRA_INSTALLED_APPS=(list, []),
ENABLE_DJANGO_EXTENSIONS=(bool, False),
MAIL_MAILGUN_KEY=(str, ""),
MAIL_MAILGUN_DOMAIN=(str, ""),
MAIL_MAILGUN_API=(str, ""),
RESOURCE_DEFAULT_TIMEZONE=(str, None),
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = root()
# Django environ has a nasty habit of complanining at level
# WARN about env file not being preset. Here we pre-empt it.
env_file_path = os.path.join(BASE_DIR, CONFIG_FILE_NAME)
if os.path.exists(env_file_path):
# Logging configuration is not available at this point
print(f"Reading config from {env_file_path}")
environ.Env.read_env(env_file_path)
DEBUG = env("DEBUG")
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
ADMINS = env("ADMINS")
INTERNAL_IPS = env("INTERNAL_IPS", default=(["127.0.0.1"] if DEBUG else []))
DATABASES = {"default": env.db()}
DATABASES["default"]["CONN_MAX_AGE"] = env("CONN_MAX_AGE")
if env("TEST_DATABASE_URL"):
DATABASES["default"]["TEST"] = env.db("TEST_DATABASE_URL")
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
AUTH_USER_MODEL = "users.User"
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/v1/"
LOGOUT_REDIRECT_URL = "/v1/"
RESOURCE_DEFAULT_TIMEZONE = env("RESOURCE_DEFAULT_TIMEZONE")
DJANGO_ORGHIERARCHY_DATASOURCE_MODEL = "hours.DataSource"
SYSTEM_DATA_SOURCE_ID = env("SYSTEM_DATA_SOURCE_ID")
SITE_ID = 1
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {
"class": "logging.NullHandler",
},
},
"loggers": {
"": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
"django": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
# Application definition
INSTALLED_APPS = [
"helusers.apps.HelusersConfig",
"modeltranslation",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.humanize",
"simple_history",
# disable Django’s development server static file handling
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework.authtoken",
"django_filters",
"django_orghierarchy",
"timezone_field",
"mptt",
# Apps within this repository
"users",
"hours",
# OpenAPI
"drf_spectacular",
] + env("EXTRA_INSTALLED_APPS")
if env("SENTRY_DSN"):
sentry_sdk.init(
dsn=env("SENTRY_DSN"),
environment=env("SENTRY_ENVIRONMENT"),
release=get_git_revision_hash(),
integrations=[DjangoIntegration()],
)
MIDDLEWARE = [
# CorsMiddleware should be placed as high as possible and above WhiteNoiseMiddleware
# in particular
"corsheaders.middleware.CorsMiddleware",
# Ditto for securitymiddleware
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"simple_history.middleware.HistoryRequestMiddleware",
]
# django-extensions is a set of developer friendly tools
if env("ENABLE_DJANGO_EXTENSIONS"):
INSTALLED_APPS.append("django_extensions")
ROOT_URLCONF = "hauki.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "hauki.wsgi.application"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
# Map language codes to the (code, name) tuples used by Django
# We want to keep the ordering in LANGUAGES configuration variable,
# thus some gyrations
language_map = {x: y for x, y in GLOBAL_LANGUAGES}
try:
LANGUAGES = tuple((lang, language_map[lang]) for lang in env("LANGUAGES"))
except KeyError as e:
raise ImproperlyConfigured(f'unknown language code "{e.args[0]}"')
LANGUAGE_CODE = env("LANGUAGES")[0]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [root("locale")]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATIC_ROOT = env("STATIC_ROOT")
MEDIA_ROOT = env("MEDIA_ROOT")
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
# https://docs.djangoproject.com/en/3.0/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Specifies a header that is trusted to indicate that the request was using
# https while traversing over the Internet at large. This is used when
# a proxy terminates the TLS connection and forwards the request over
# a secure network. Specified using a tuple.
# https://docs.djangoproject.com/en/3.0/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = env("SECURE_PROXY_SSL_HEADER")
CORS_ORIGIN_ALLOW_ALL = True
CSRF_COOKIE_NAME = "%s-csrftoken" % env("COOKIE_PREFIX")
SESSION_COOKIE_NAME = "%s-sessionid" % env("COOKIE_PREFIX")
# DRF Settings
# https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
"hours.renderers.BrowsableAPIRendererWithoutForms",
],
"DEFAULT_FILTER_BACKENDS": [
"rest_framework.filters.OrderingFilter",
"django_filters.rest_framework.DjangoFilterBackend",
],
"DEFAULT_AUTHENTICATION_CLASSES": [
"hours.authentication.HaukiSignedAuthentication",
"hours.authentication.HaukiTokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.IsAuthenticatedOrReadOnly",
],
"DEFAULT_METADATA_CLASS": "hours.metadata.TranslatedChoiceNamesMetadata",
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
}
# shown in the browsable API
INSTANCE_NAME = env("INSTANCE_NAME")
#
# Anymail
#
if env("MAIL_MAILGUN_KEY"):
ANYMAIL = {
"MAILGUN_API_KEY": env("MAIL_MAILGUN_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAIL_MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAIL_MAILGUN_API"),
}
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
elif not env("MAIL_MAILGUN_KEY") and DEBUG is True:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
#
# Django spectacular (OpenAPI) settings
#
SPECTACULAR_SETTINGS = {
"TITLE": "Hauki API",
"DESCRIPTION": """
API for the City of Helsinki opening hours database
# Introduction
To do.
# Authentication methods
<SecurityDefinitions />
""",
"VERSION": "0.0.1",
"EXTERNAL_DOCS": {
"description": "Hauki API in GitHub",
"url": "https://github.com/City-of-Helsinki/hauki",
},
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
local_settings_path = os.path.join(BASE_DIR, "local_settings.py")
if os.path.exists(local_settings_path):
with open(local_settings_path) as fp:
code = compile(fp.read(), local_settings_path, "exec")
# Here, we execute local code on the server. Luckily, local_settings.py and BASE_DIR
# are hard-coded above, so this cannot be used to execute any other files.
exec(code, globals(), locals()) # nosec
# Django SECRET_KEY setting, used for password reset links and such
SECRET_KEY = env("SECRET_KEY")
if not DEBUG and not SECRET_KEY:
raise Exception("In production, SECRET_KEY must be provided in the environment.")
# If a secret key was not supplied elsewhere, generate a random one and print
# a warning (logging is not configured yet?). This means that any functionality
# expecting SECRET_KEY to stay same will break upon restart. Should not be a
# problem for development.
if not SECRET_KEY:
logger.warning(
"SECRET_KEY was not defined in configuration."
" Generating a temporary key for dev."
)
import random
system_random = random.SystemRandom()
SECRET_KEY = "".join(
[
system_random.choice("abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)")
for i in range(64)
]
)
| get_git_revision_hash | identifier_name |
mod.rs | use crate::closure;
use wasm_bindgen::prelude::*;
/// Since the `copy` option can be either a function or a boolean, this enum
/// encapsulates the possible values for the copy option.
///
/// The closure signature is `(el, handle)`, the element to check and the
/// element that was directly clicked on.
pub enum CopyValue {
Bool(bool),
Func(Box<dyn FnMut(JsValue, JsValue) -> bool>),
}
impl From<CopyValue> for JsValue {
fn from(copy: CopyValue) -> JsValue {
match copy {
CopyValue::Bool(copy) => JsValue::from(copy),
CopyValue::Func(copy) => closure::to_js_2_ret(copy),
}
}
}
/// The axis to be considered when determining the location an element will be
/// placed when dropped.
///
/// When an element is dropped onto a container, it will be placed near the
/// point where the mouse was released. If the `direction` is `Vertical`,
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is `Horizontal`, the X axis will be considered.
pub enum Direction {
Vertical,
Horizontal,
}
impl ToString for Direction {
fn to_string(&self) -> String {
const VERTICAL: &str = "vertical";
const HORIZONTAL: &str = "horizontal";
match self {
Direction::Vertical => String::from(VERTICAL),
Direction::Horizontal => String::from(HORIZONTAL),
}
}
}
/// Used to pass options when activating Dragula
///
/// When passed to the [`dragula_options`](crate::dragula_options) function,
/// this struct can be used to specify options to control the behaviour of the
/// drag-and-drop functionality.
///
/// For example:
/// ```no_run
/// use dragula::*;
/// use dragula::options::CopyValue;
/// use web_sys::Element;
/// # use wasm_bindgen::JsValue;
///
/// # let element = JsValue::TRUE;
/// //--snip--
///
/// let options = Options {
/// invalid: Box::new(|el, _handle| {
/// Element::from(el).tag_name() == String::from("A")
/// }),
/// copy: CopyValue::Bool(true),
/// copy_sort_source: true,
/// remove_on_spill: true,
/// slide_factor_x: 10,
/// slide_factor_y: 10,
/// ..Options::default()
/// };
///
/// let drake = dragula_options(&[element], options);
///
/// //--snip--
/// ```
pub struct Options {
/// Besides the containers that you pass to [`dragula`](crate::dragula()),
/// or the containers you dynamically add, you can also use this closure to
/// specify any sort of logic that defines what is a container
/// for this particular [`Drake`](crate::Drake) instance.
///
/// This closure will be invoked with the element that is being checked for
/// whether it is a container.
pub is_container: Box<dyn FnMut(JsValue) -> bool>,
/// You can define a `moves` closure which will be invoked with `(el, source,
/// handle, sibling)` whenever an element is clicked. If this closure returns
/// `false`, a drag event won't begin, and the event won't be prevented
/// either. The `handle` element will be the original click target, which
/// comes in handy to test if that element is an expected _"drag handle"_.
pub moves: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can set `accepts` to a closure with the following signature: `(el,
/// target, source, sibling)`. It'll be called to make sure that an element
/// `el`, that came from container `source`, can be dropped on container
/// `target` before a `sibling` element. The `sibling` can be `null`, which
/// would mean that the element would be placed as the last element in the
/// container. Note that if [`copy`](Options::copy) is set to `true`, `el` will be
/// set to the copy, instead of the originally dragged element.
pub accepts: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can provide an `invalid` closure with a `(el, handle)` signature.
/// This closure should return `true` for elements that shouldn't trigger a
/// drag. The `handle` argument is the element that was clicked, while `el`
/// is the item that would be dragged.
pub invalid: Box<dyn FnMut(JsValue, JsValue) -> bool>,
/// If `copy` is set to `true` _(or a closure that returns `true`)_, items
/// will be copied rather than moved. This implies the following differences:
///
/// Event | Move | Copy
/// ----------|------------------------------------------|---------------------------------------------
/// `drag` | Element will be concealed from `source` | Nothing happens
/// `drop` | Element will be moved into `target` | Element will be cloned into `target`
/// `remove` | Element will be removed from DOM | Nothing happens
/// `cancel` | Element will stay in `source` | Nothing happens
///
/// If a closure is passed, it'll be called whenever an element starts being
/// dragged in order to decide whether it should follow `copy` behavior or
/// not. This closure will be passed the element to be dragged as well as
/// its source container, in other words, the signature is `(el, handle)`.
///
/// `false` by default.
pub copy: CopyValue,
/// If [`copy`](Options::copy) is set to `true` _(or a closure that
/// returns `true`)_ and `copy_sort_source` is `true` as well, users will
/// be able to sort elements in `copy`-source containers.
///
/// `false` by default.
pub copy_sort_source: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `revert_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are moved back to the source element where
/// the drag event began, rather than stay at the _drop position previewed
/// by the feedback shadow_.
///
/// `false` by default.
pub revert_on_spill: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `remove_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are removed from the DOM. Note that `remove`
/// events won't fire if [`copy`](Options::copy) is set to `true`.
///
/// `false` by default.
pub remove_on_spill: bool,
/// When an element is dropped onto a container, it'll be placed near the
/// point where the mouse was released. If the `direction` is
/// [`Vertical`](Direction::Vertical),
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is [`Horizontal`](Direction::Horizontal),
/// the X axis will be considered.
///
/// [`Vertical`](Direction::Vertical), by default.
pub direction: Direction,
/// The DOM element where the mirror element displayed while dragging will
/// be appended to.
///
/// `document.body` by default.
pub mirror_container: JsValue,
/// When this option is enabled, if the user clicks on an input element the
/// drag won't start until their mouse pointer exits the input. This
/// translates into the user being able to select text in inputs contained
/// inside draggable elements, and still drag the element by moving their
/// mouse outside of the input -- so you get the best of both worlds.
///
/// `true` by default.
pub ignore_input_text_selection: bool,
/// The amount of horizontal movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_x: i32,
/// The amount of vertical movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_y: i32,
}
impl Default for Options {
fn default() -> Self {
Self {
is_container: Box::new(|_| false),
moves: Box::new(|_, _, _, _| true),
accepts: Box::new(|_, _, _, _| true),
invalid: Box::new(|_, _| false),
copy: CopyValue::Bool(false),
copy_sort_source: false,
revert_on_spill: false,
remove_on_spill: false,
direction: Direction::Vertical,
// Will default to document.body (avoiding web_sys dependency)
mirror_container: JsValue::UNDEFINED,
ignore_input_text_selection: true,
slide_factor_x: 0,
slide_factor_y: 0,
}
}
}
#[doc(hidden)]
#[wasm_bindgen]
pub struct OptionsImpl {
is_container_func: JsValue,
moves_func: JsValue,
accepts_func: JsValue,
invalid_func: JsValue,
copy_func_or_bool: JsValue,
#[wasm_bindgen(js_name = copySortSource)]
pub copy_sort_source: bool,
#[wasm_bindgen(js_name = revertOnSpill)]
pub revert_on_spill: bool,
#[wasm_bindgen(js_name = removeOnSpill)]
pub remove_on_spill: bool,
direction: String,
mirror_container_elem: JsValue,
#[wasm_bindgen(js_name = ignoreInputTextSelection)]
pub ignore_input_text_selection: bool,
#[wasm_bindgen(js_name = slideFactorX)]
pub slide_factor_x: i32,
#[wasm_bindgen(js_name = slideFactorY)]
pub slide_factor_y: i32,
}
impl From<Options> for OptionsImpl {
fn from(options: Options) -> Self {
OptionsImpl {
is_container_func: closure::to_js_1_ret(options.is_container),
moves_func: closure::to_js_4_ret(options.moves),
accepts_func: closure::to_js_4_ret(options.accepts),
invalid_func: closure::to_js_2_ret(options.invalid),
copy_func_or_bool: JsValue::from(options.copy),
mirror_container_elem: options.mirror_container,
copy_sort_source: options.copy_sort_source,
revert_on_spill: options.revert_on_spill,
remove_on_spill: options.remove_on_spill,
direction: options.direction.to_string(),
ignore_input_text_selection: options.ignore_input_text_selection,
slide_factor_x: options.slide_factor_x,
slide_factor_y: options.slide_factor_y,
}
}
}
impl Default for OptionsImpl {
fn default() -> Self {
OptionsImpl::from(Options::default())
}
}
#[wasm_bindgen]
#[doc(hidden)]
impl OptionsImpl {
#[wasm_bindgen(getter = isContainer)]
pub fn is_container_func(&self) -> JsValue {
self.is_container_func.clone()
}
#[wasm_bindgen(setter = isContainer)]
pub fn set_is_container_func(&mut self, val: JsValue) {
self.is_container_func = val;
}
#[wasm_bindgen(getter = moves)]
pub fn moves_func(&self) -> JsValue {
self.moves_func.clone()
}
#[wasm_bindgen(setter = moves)]
pub fn set_moves_func(&mut self, val: JsValue) {
self.moves_func = val;
}
#[wasm_bindgen(getter = accepts)]
pub fn accepts_func(&self) -> JsValue {
self.accepts_func.clone()
}
#[wasm_bindgen(setter = accepts)]
pub fn set_accepts_func(&mut self, val: JsValue) {
self.accepts_func = val;
}
#[wasm_bindgen(getter = invalid)]
pub fn invalid_func(&self) -> JsValue |
#[wasm_bindgen(setter = invalid)]
pub fn set_invalid_func(&mut self, val: JsValue) {
self.invalid_func = val;
}
#[wasm_bindgen(getter = copy)]
pub fn copy_func_or_bool(&self) -> JsValue {
self.copy_func_or_bool.clone()
}
#[wasm_bindgen(setter = copy)]
pub fn set_copy_func_or_bool(&mut self, val: JsValue) {
self.copy_func_or_bool = val;
}
#[wasm_bindgen(getter = mirrorContainer)]
pub fn mirror_container_elem(&self) -> JsValue {
self.mirror_container_elem.clone()
}
#[wasm_bindgen(setter = mirrorContainer)]
pub fn set_mirror_container_elem(&mut self, val: JsValue) {
self.mirror_container_elem = val;
}
#[wasm_bindgen(getter)]
pub fn direction(&self) -> String {
self.direction.clone()
}
#[wasm_bindgen(setter)]
pub fn set_direction(&mut self, val: String) {
self.direction = val;
}
}
#[cfg(test)]
mod test;
| {
self.invalid_func.clone()
} | identifier_body |
mod.rs | use crate::closure;
use wasm_bindgen::prelude::*;
/// Since the `copy` option can be either a function or a boolean, this enum
/// encapsulates the possible values for the copy option.
///
/// The closure signature is `(el, handle)`, the element to check and the
/// element that was directly clicked on.
pub enum CopyValue {
Bool(bool),
Func(Box<dyn FnMut(JsValue, JsValue) -> bool>),
}
impl From<CopyValue> for JsValue {
fn from(copy: CopyValue) -> JsValue {
match copy {
CopyValue::Bool(copy) => JsValue::from(copy),
CopyValue::Func(copy) => closure::to_js_2_ret(copy),
}
}
}
/// The axis to be considered when determining the location an element will be
/// placed when dropped.
///
/// When an element is dropped onto a container, it will be placed near the
/// point where the mouse was released. If the `direction` is `Vertical`,
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is `Horizontal`, the X axis will be considered.
pub enum Direction {
Vertical,
Horizontal,
}
impl ToString for Direction {
fn to_string(&self) -> String {
const VERTICAL: &str = "vertical";
const HORIZONTAL: &str = "horizontal";
match self {
Direction::Vertical => String::from(VERTICAL),
Direction::Horizontal => String::from(HORIZONTAL),
}
}
}
/// Used to pass options when activating Dragula
///
/// When passed to the [`dragula_options`](crate::dragula_options) function,
/// this struct can be used to specify options to control the behaviour of the
/// drag-and-drop functionality.
///
/// For example:
/// ```no_run
/// use dragula::*;
/// use dragula::options::CopyValue;
/// use web_sys::Element;
/// # use wasm_bindgen::JsValue;
///
/// # let element = JsValue::TRUE;
/// //--snip--
///
/// let options = Options {
/// invalid: Box::new(|el, _handle| {
/// Element::from(el).tag_name() == String::from("A")
/// }),
/// copy: CopyValue::Bool(true),
/// copy_sort_source: true,
/// remove_on_spill: true,
/// slide_factor_x: 10,
/// slide_factor_y: 10,
/// ..Options::default()
/// };
///
/// let drake = dragula_options(&[element], options);
///
/// //--snip--
/// ```
pub struct Options { | /// for this particular [`Drake`](crate::Drake) instance.
///
/// This closure will be invoked with the element that is being checked for
/// whether it is a container.
pub is_container: Box<dyn FnMut(JsValue) -> bool>,
/// You can define a `moves` closure which will be invoked with `(el, source,
/// handle, sibling)` whenever an element is clicked. If this closure returns
/// `false`, a drag event won't begin, and the event won't be prevented
/// either. The `handle` element will be the original click target, which
/// comes in handy to test if that element is an expected _"drag handle"_.
pub moves: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can set `accepts` to a closure with the following signature: `(el,
/// target, source, sibling)`. It'll be called to make sure that an element
/// `el`, that came from container `source`, can be dropped on container
/// `target` before a `sibling` element. The `sibling` can be `null`, which
/// would mean that the element would be placed as the last element in the
/// container. Note that if [`copy`](Options::copy) is set to `true`, `el` will be
/// set to the copy, instead of the originally dragged element.
pub accepts: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can provide an `invalid` closure with a `(el, handle)` signature.
/// This closure should return `true` for elements that shouldn't trigger a
/// drag. The `handle` argument is the element that was clicked, while `el`
/// is the item that would be dragged.
pub invalid: Box<dyn FnMut(JsValue, JsValue) -> bool>,
/// If `copy` is set to `true` _(or a closure that returns `true`)_, items
/// will be copied rather than moved. This implies the following differences:
///
/// Event | Move | Copy
/// ----------|------------------------------------------|---------------------------------------------
/// `drag` | Element will be concealed from `source` | Nothing happens
/// `drop` | Element will be moved into `target` | Element will be cloned into `target`
/// `remove` | Element will be removed from DOM | Nothing happens
/// `cancel` | Element will stay in `source` | Nothing happens
///
/// If a closure is passed, it'll be called whenever an element starts being
/// dragged in order to decide whether it should follow `copy` behavior or
/// not. This closure will be passed the element to be dragged as well as
/// its source container, in other words, the signature is `(el, handle)`.
///
/// `false` by default.
pub copy: CopyValue,
/// If [`copy`](Options::copy) is set to `true` _(or a closure that
/// returns `true`)_ and `copy_sort_source` is `true` as well, users will
/// be able to sort elements in `copy`-source containers.
///
/// `false` by default.
pub copy_sort_source: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `revert_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are moved back to the source element where
/// the drag event began, rather than stay at the _drop position previewed
/// by the feedback shadow_.
///
/// `false` by default.
pub revert_on_spill: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `remove_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are removed from the DOM. Note that `remove`
/// events won't fire if [`copy`](Options::copy) is set to `true`.
///
/// `false` by default.
pub remove_on_spill: bool,
/// When an element is dropped onto a container, it'll be placed near the
/// point where the mouse was released. If the `direction` is
/// [`Vertical`](Direction::Vertical),
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is [`Horizontal`](Direction::Horizontal),
/// the X axis will be considered.
///
/// [`Vertical`](Direction::Vertical), by default.
pub direction: Direction,
/// The DOM element where the mirror element displayed while dragging will
/// be appended to.
///
/// `document.body` by default.
pub mirror_container: JsValue,
/// When this option is enabled, if the user clicks on an input element the
/// drag won't start until their mouse pointer exits the input. This
/// translates into the user being able to select text in inputs contained
/// inside draggable elements, and still drag the element by moving their
/// mouse outside of the input -- so you get the best of both worlds.
///
/// `true` by default.
pub ignore_input_text_selection: bool,
/// The amount of horizontal movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_x: i32,
/// The amount of vertical movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_y: i32,
}
impl Default for Options {
fn default() -> Self {
Self {
is_container: Box::new(|_| false),
moves: Box::new(|_, _, _, _| true),
accepts: Box::new(|_, _, _, _| true),
invalid: Box::new(|_, _| false),
copy: CopyValue::Bool(false),
copy_sort_source: false,
revert_on_spill: false,
remove_on_spill: false,
direction: Direction::Vertical,
// Will default to document.body (avoiding web_sys dependency)
mirror_container: JsValue::UNDEFINED,
ignore_input_text_selection: true,
slide_factor_x: 0,
slide_factor_y: 0,
}
}
}
#[doc(hidden)]
#[wasm_bindgen]
pub struct OptionsImpl {
is_container_func: JsValue,
moves_func: JsValue,
accepts_func: JsValue,
invalid_func: JsValue,
copy_func_or_bool: JsValue,
#[wasm_bindgen(js_name = copySortSource)]
pub copy_sort_source: bool,
#[wasm_bindgen(js_name = revertOnSpill)]
pub revert_on_spill: bool,
#[wasm_bindgen(js_name = removeOnSpill)]
pub remove_on_spill: bool,
direction: String,
mirror_container_elem: JsValue,
#[wasm_bindgen(js_name = ignoreInputTextSelection)]
pub ignore_input_text_selection: bool,
#[wasm_bindgen(js_name = slideFactorX)]
pub slide_factor_x: i32,
#[wasm_bindgen(js_name = slideFactorY)]
pub slide_factor_y: i32,
}
impl From<Options> for OptionsImpl {
fn from(options: Options) -> Self {
OptionsImpl {
is_container_func: closure::to_js_1_ret(options.is_container),
moves_func: closure::to_js_4_ret(options.moves),
accepts_func: closure::to_js_4_ret(options.accepts),
invalid_func: closure::to_js_2_ret(options.invalid),
copy_func_or_bool: JsValue::from(options.copy),
mirror_container_elem: options.mirror_container,
copy_sort_source: options.copy_sort_source,
revert_on_spill: options.revert_on_spill,
remove_on_spill: options.remove_on_spill,
direction: options.direction.to_string(),
ignore_input_text_selection: options.ignore_input_text_selection,
slide_factor_x: options.slide_factor_x,
slide_factor_y: options.slide_factor_y,
}
}
}
impl Default for OptionsImpl {
fn default() -> Self {
OptionsImpl::from(Options::default())
}
}
#[wasm_bindgen]
#[doc(hidden)]
impl OptionsImpl {
#[wasm_bindgen(getter = isContainer)]
pub fn is_container_func(&self) -> JsValue {
self.is_container_func.clone()
}
#[wasm_bindgen(setter = isContainer)]
pub fn set_is_container_func(&mut self, val: JsValue) {
self.is_container_func = val;
}
#[wasm_bindgen(getter = moves)]
pub fn moves_func(&self) -> JsValue {
self.moves_func.clone()
}
#[wasm_bindgen(setter = moves)]
pub fn set_moves_func(&mut self, val: JsValue) {
self.moves_func = val;
}
#[wasm_bindgen(getter = accepts)]
pub fn accepts_func(&self) -> JsValue {
self.accepts_func.clone()
}
#[wasm_bindgen(setter = accepts)]
pub fn set_accepts_func(&mut self, val: JsValue) {
self.accepts_func = val;
}
#[wasm_bindgen(getter = invalid)]
pub fn invalid_func(&self) -> JsValue {
self.invalid_func.clone()
}
#[wasm_bindgen(setter = invalid)]
pub fn set_invalid_func(&mut self, val: JsValue) {
self.invalid_func = val;
}
#[wasm_bindgen(getter = copy)]
pub fn copy_func_or_bool(&self) -> JsValue {
self.copy_func_or_bool.clone()
}
#[wasm_bindgen(setter = copy)]
pub fn set_copy_func_or_bool(&mut self, val: JsValue) {
self.copy_func_or_bool = val;
}
#[wasm_bindgen(getter = mirrorContainer)]
pub fn mirror_container_elem(&self) -> JsValue {
self.mirror_container_elem.clone()
}
#[wasm_bindgen(setter = mirrorContainer)]
pub fn set_mirror_container_elem(&mut self, val: JsValue) {
self.mirror_container_elem = val;
}
#[wasm_bindgen(getter)]
pub fn direction(&self) -> String {
self.direction.clone()
}
#[wasm_bindgen(setter)]
pub fn set_direction(&mut self, val: String) {
self.direction = val;
}
}
#[cfg(test)]
mod test; | /// Besides the containers that you pass to [`dragula`](crate::dragula()),
/// or the containers you dynamically add, you can also use this closure to
/// specify any sort of logic that defines what is a container | random_line_split |
mod.rs | use crate::closure;
use wasm_bindgen::prelude::*;
/// Since the `copy` option can be either a function or a boolean, this enum
/// encapsulates the possible values for the copy option.
///
/// The closure signature is `(el, handle)`, the element to check and the
/// element that was directly clicked on.
pub enum CopyValue {
Bool(bool),
Func(Box<dyn FnMut(JsValue, JsValue) -> bool>),
}
impl From<CopyValue> for JsValue {
fn from(copy: CopyValue) -> JsValue {
match copy {
CopyValue::Bool(copy) => JsValue::from(copy),
CopyValue::Func(copy) => closure::to_js_2_ret(copy),
}
}
}
/// The axis to be considered when determining the location an element will be
/// placed when dropped.
///
/// When an element is dropped onto a container, it will be placed near the
/// point where the mouse was released. If the `direction` is `Vertical`,
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is `Horizontal`, the X axis will be considered.
pub enum Direction {
Vertical,
Horizontal,
}
impl ToString for Direction {
fn to_string(&self) -> String {
const VERTICAL: &str = "vertical";
const HORIZONTAL: &str = "horizontal";
match self {
Direction::Vertical => String::from(VERTICAL),
Direction::Horizontal => String::from(HORIZONTAL),
}
}
}
/// Used to pass options when activating Dragula
///
/// When passed to the [`dragula_options`](crate::dragula_options) function,
/// this struct can be used to specify options to control the behaviour of the
/// drag-and-drop functionality.
///
/// For example:
/// ```no_run
/// use dragula::*;
/// use dragula::options::CopyValue;
/// use web_sys::Element;
/// # use wasm_bindgen::JsValue;
///
/// # let element = JsValue::TRUE;
/// //--snip--
///
/// let options = Options {
/// invalid: Box::new(|el, _handle| {
/// Element::from(el).tag_name() == String::from("A")
/// }),
/// copy: CopyValue::Bool(true),
/// copy_sort_source: true,
/// remove_on_spill: true,
/// slide_factor_x: 10,
/// slide_factor_y: 10,
/// ..Options::default()
/// };
///
/// let drake = dragula_options(&[element], options);
///
/// //--snip--
/// ```
pub struct Options {
/// Besides the containers that you pass to [`dragula`](crate::dragula()),
/// or the containers you dynamically add, you can also use this closure to
/// specify any sort of logic that defines what is a container
/// for this particular [`Drake`](crate::Drake) instance.
///
/// This closure will be invoked with the element that is being checked for
/// whether it is a container.
pub is_container: Box<dyn FnMut(JsValue) -> bool>,
/// You can define a `moves` closure which will be invoked with `(el, source,
/// handle, sibling)` whenever an element is clicked. If this closure returns
/// `false`, a drag event won't begin, and the event won't be prevented
/// either. The `handle` element will be the original click target, which
/// comes in handy to test if that element is an expected _"drag handle"_.
pub moves: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can set `accepts` to a closure with the following signature: `(el,
/// target, source, sibling)`. It'll be called to make sure that an element
/// `el`, that came from container `source`, can be dropped on container
/// `target` before a `sibling` element. The `sibling` can be `null`, which
/// would mean that the element would be placed as the last element in the
/// container. Note that if [`copy`](Options::copy) is set to `true`, `el` will be
/// set to the copy, instead of the originally dragged element.
pub accepts: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can provide an `invalid` closure with a `(el, handle)` signature.
/// This closure should return `true` for elements that shouldn't trigger a
/// drag. The `handle` argument is the element that was clicked, while `el`
/// is the item that would be dragged.
pub invalid: Box<dyn FnMut(JsValue, JsValue) -> bool>,
/// If `copy` is set to `true` _(or a closure that returns `true`)_, items
/// will be copied rather than moved. This implies the following differences:
///
/// Event | Move | Copy
/// ----------|------------------------------------------|---------------------------------------------
/// `drag` | Element will be concealed from `source` | Nothing happens
/// `drop` | Element will be moved into `target` | Element will be cloned into `target`
/// `remove` | Element will be removed from DOM | Nothing happens
/// `cancel` | Element will stay in `source` | Nothing happens
///
/// If a closure is passed, it'll be called whenever an element starts being
/// dragged in order to decide whether it should follow `copy` behavior or
/// not. This closure will be passed the element to be dragged as well as
/// its source container, in other words, the signature is `(el, handle)`.
///
/// `false` by default.
pub copy: CopyValue,
/// If [`copy`](Options::copy) is set to `true` _(or a closure that
/// returns `true`)_ and `copy_sort_source` is `true` as well, users will
/// be able to sort elements in `copy`-source containers.
///
/// `false` by default.
pub copy_sort_source: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `revert_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are moved back to the source element where
/// the drag event began, rather than stay at the _drop position previewed
/// by the feedback shadow_.
///
/// `false` by default.
pub revert_on_spill: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `remove_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are removed from the DOM. Note that `remove`
/// events won't fire if [`copy`](Options::copy) is set to `true`.
///
/// `false` by default.
pub remove_on_spill: bool,
/// When an element is dropped onto a container, it'll be placed near the
/// point where the mouse was released. If the `direction` is
/// [`Vertical`](Direction::Vertical),
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is [`Horizontal`](Direction::Horizontal),
/// the X axis will be considered.
///
/// [`Vertical`](Direction::Vertical), by default.
pub direction: Direction,
/// The DOM element where the mirror element displayed while dragging will
/// be appended to.
///
/// `document.body` by default.
pub mirror_container: JsValue,
/// When this option is enabled, if the user clicks on an input element the
/// drag won't start until their mouse pointer exits the input. This
/// translates into the user being able to select text in inputs contained
/// inside draggable elements, and still drag the element by moving their
/// mouse outside of the input -- so you get the best of both worlds.
///
/// `true` by default.
pub ignore_input_text_selection: bool,
/// The amount of horizontal movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_x: i32,
/// The amount of vertical movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_y: i32,
}
impl Default for Options {
fn default() -> Self {
Self {
is_container: Box::new(|_| false),
moves: Box::new(|_, _, _, _| true),
accepts: Box::new(|_, _, _, _| true),
invalid: Box::new(|_, _| false),
copy: CopyValue::Bool(false),
copy_sort_source: false,
revert_on_spill: false,
remove_on_spill: false,
direction: Direction::Vertical,
// Will default to document.body (avoiding web_sys dependency)
mirror_container: JsValue::UNDEFINED,
ignore_input_text_selection: true,
slide_factor_x: 0,
slide_factor_y: 0,
}
}
}
#[doc(hidden)]
#[wasm_bindgen]
pub struct OptionsImpl {
is_container_func: JsValue,
moves_func: JsValue,
accepts_func: JsValue,
invalid_func: JsValue,
copy_func_or_bool: JsValue,
#[wasm_bindgen(js_name = copySortSource)]
pub copy_sort_source: bool,
#[wasm_bindgen(js_name = revertOnSpill)]
pub revert_on_spill: bool,
#[wasm_bindgen(js_name = removeOnSpill)]
pub remove_on_spill: bool,
direction: String,
mirror_container_elem: JsValue,
#[wasm_bindgen(js_name = ignoreInputTextSelection)]
pub ignore_input_text_selection: bool,
#[wasm_bindgen(js_name = slideFactorX)]
pub slide_factor_x: i32,
#[wasm_bindgen(js_name = slideFactorY)]
pub slide_factor_y: i32,
}
impl From<Options> for OptionsImpl {
fn from(options: Options) -> Self {
OptionsImpl {
is_container_func: closure::to_js_1_ret(options.is_container),
moves_func: closure::to_js_4_ret(options.moves),
accepts_func: closure::to_js_4_ret(options.accepts),
invalid_func: closure::to_js_2_ret(options.invalid),
copy_func_or_bool: JsValue::from(options.copy),
mirror_container_elem: options.mirror_container,
copy_sort_source: options.copy_sort_source,
revert_on_spill: options.revert_on_spill,
remove_on_spill: options.remove_on_spill,
direction: options.direction.to_string(),
ignore_input_text_selection: options.ignore_input_text_selection,
slide_factor_x: options.slide_factor_x,
slide_factor_y: options.slide_factor_y,
}
}
}
impl Default for OptionsImpl {
fn | () -> Self {
OptionsImpl::from(Options::default())
}
}
#[wasm_bindgen]
#[doc(hidden)]
impl OptionsImpl {
#[wasm_bindgen(getter = isContainer)]
pub fn is_container_func(&self) -> JsValue {
self.is_container_func.clone()
}
#[wasm_bindgen(setter = isContainer)]
pub fn set_is_container_func(&mut self, val: JsValue) {
self.is_container_func = val;
}
#[wasm_bindgen(getter = moves)]
pub fn moves_func(&self) -> JsValue {
self.moves_func.clone()
}
#[wasm_bindgen(setter = moves)]
pub fn set_moves_func(&mut self, val: JsValue) {
self.moves_func = val;
}
#[wasm_bindgen(getter = accepts)]
pub fn accepts_func(&self) -> JsValue {
self.accepts_func.clone()
}
#[wasm_bindgen(setter = accepts)]
pub fn set_accepts_func(&mut self, val: JsValue) {
self.accepts_func = val;
}
#[wasm_bindgen(getter = invalid)]
pub fn invalid_func(&self) -> JsValue {
self.invalid_func.clone()
}
#[wasm_bindgen(setter = invalid)]
pub fn set_invalid_func(&mut self, val: JsValue) {
self.invalid_func = val;
}
#[wasm_bindgen(getter = copy)]
pub fn copy_func_or_bool(&self) -> JsValue {
self.copy_func_or_bool.clone()
}
#[wasm_bindgen(setter = copy)]
pub fn set_copy_func_or_bool(&mut self, val: JsValue) {
self.copy_func_or_bool = val;
}
#[wasm_bindgen(getter = mirrorContainer)]
pub fn mirror_container_elem(&self) -> JsValue {
self.mirror_container_elem.clone()
}
#[wasm_bindgen(setter = mirrorContainer)]
pub fn set_mirror_container_elem(&mut self, val: JsValue) {
self.mirror_container_elem = val;
}
#[wasm_bindgen(getter)]
pub fn direction(&self) -> String {
self.direction.clone()
}
#[wasm_bindgen(setter)]
pub fn set_direction(&mut self, val: String) {
self.direction = val;
}
}
#[cfg(test)]
mod test;
| default | identifier_name |
train.py | # from __future__ import division
# from __future__ import print_function
import argparse
import logging
import pickle
import random
import time
from itertools import chain
import numpy as np
import torch
import torch.nn.functional as functional
import torch.optim as optim
from torch.autograd import Variable
import model_meter
from feature_meta import NODE_FEATURES, DIRECTED_NEIGHBOR_FEATURES, UNDIRECTED_NEIGHBOR_FEATURES
from gcn import *
from gcn.data_loader import GraphLoader
from gcn.layers import AsymmetricGCN
from gcn.models import GCNCombined, GCN
from loggers import PrintLogger, multi_logger, EmptyLogger, CSVLogger, FileLogger
def get_features(feat_type, is_directed):
neighbor_features = DIRECTED_NEIGHBOR_FEATURES if is_directed else UNDIRECTED_NEIGHBOR_FEATURES
all_features = {"neighbors": [neighbor_features],
"features": [NODE_FEATURES],
"combined": [neighbor_features, NODE_FEATURES],
}
return dict(y for x in all_features[feat_type] for y in x.items())
class | :
def __init__(self, products_path, dataset_path, conf, logger, data_logger=None):
self.conf = conf
self._logger = logger
self._data_logger = EmptyLogger() if data_logger is None else data_logger
self.products_path = products_path
self.loader = GraphLoader(dataset_path, is_max_connected=False, norm_adj=conf["norm_adj"],
cuda_num=conf["cuda"], logger=self._logger)
self._criterion = torch.nn.NLLLoss()
def _get_models(self):
bow_feat = self.loader.bow_mx
topo_feat = self.loader.topo_mx
model1 = GCN(nfeat=bow_feat.shape[1],
hlayers=[self.conf["kipf"]["hidden"]],
nclass=self.loader.num_labels,
dropout=self.conf["kipf"]["dropout"])
opt1 = optim.Adam(model1.parameters(), lr=self.conf["kipf"]["lr"],
weight_decay=self.conf["kipf"]["weight_decay"])
model2 = GCNCombined(nbow=bow_feat.shape[1],
nfeat=topo_feat.shape[1],
hlayers=self.conf["hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"])
opt2 = optim.Adam(model2.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
model3 = GCN(nfeat=topo_feat.shape[1],
hlayers=self.conf["multi_hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"],
layer_type=None)
opt3 = optim.Adam(model3.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
model4 = GCN(nfeat=topo_feat.shape[1],
hlayers=self.conf["multi_hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"],
layer_type=AsymmetricGCN)
opt4 = optim.Adam(model4.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
return {
"kipf": {
"model": model1, "optimizer": opt1,
"arguments": [self.loader.bow_mx, self.loader.adj_mx],
"labels": self.loader.labels,
},
"our_combined": {
"model": model2, "optimizer": opt2,
"arguments": [self.loader.bow_mx, self.loader.topo_mx, self.loader.adj_rt_mx],
"labels": self.loader.labels,
},
"topo_sym": {
"model": model3, "optimizer": opt3,
"arguments": [self.loader.topo_mx, self.loader.adj_mx],
"labels": self.loader.labels,
},
"topo_asym": {
"model": model4, "optimizer": opt4,
"arguments": [self.loader.topo_mx, self.loader.adj_rt_mx],
"labels": self.loader.labels,
},
}
def run(self, train_p, feat_type):
features_meta = get_features(feat_type, is_directed=self.loader.is_graph_directed)
self.loader.split_train(train_p, features_meta)
models = self._get_models()
if self.conf["cuda"] is not None:
[model["model"].cuda(self.conf["cuda"]) for model in models.values()]
for model in models.values():
model["arguments"] = list(map(Variable, model["arguments"]))
model["labels"] = Variable(model["labels"])
# Train model
meters = {name: model_meter.ModelMeter(self.loader.distinct_labels) for name in models}
train_idx, val_idx = self.loader.train_idx, self.loader.val_idx
for epoch in range(self.conf["epochs"]):
for name, model_args in models.items():
self._train(epoch, name, model_args, train_idx, val_idx, meters[name])
# Testing
test_idx = self.loader.test_idx
for name, model_args in models.items():
meter = meters[name]
self._test(name, model_args, test_idx, meter)
self._data_logger.log_info(
model_name=name,
loss=meter.last_val("loss_test"),
acc=meter.last_val("acc_test"),
train_p=(train_p / (2 - train_p)) * 100,
norm_adj=self.conf["norm_adj"],
feat_type=self.conf["feat_type"]
)
# Currently supporting only binary class plotting
# meters[name].plot_auc(should_show=False)
# import matplotlib.pyplot as plt
# plt.savefig(os.path.join(self.products_path, time.strftime("%H_%M_%S_" + name)))
return meters
def _train(self, epoch, model_name, model_args, idx_train, idx_val, meter):
model, optimizer = model_args["model"], model_args["optimizer"]
arguments, labels = model_args["arguments"], model_args["labels"]
model.train()
optimizer.zero_grad()
output = model(*arguments)
loss_train = self._criterion(output[idx_train], labels[idx_train])
acc_train = model_meter.accuracy(output[idx_train], labels[idx_train])
meter.update_vals(loss_train=loss_train.item(), acc_train=acc_train)
loss_train.backward()
optimizer.step()
if not self.conf["fastmode"]:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(*arguments)
loss_val = self._criterion(output[idx_val], labels[idx_val])
acc_val = model_meter.accuracy(output[idx_val], labels[idx_val])
meter.update_vals(loss_val=loss_val.item(), acc_val=acc_val)
self._logger.debug("%s: Epoch: %03d, %s", model_name, epoch + 1, meter.log_str())
def _test(self, model_name, model_args, test_idx, meter):
model, arguments, labels = model_args["model"], model_args["arguments"], model_args["labels"]
model.eval()
output = model(*arguments)
loss_test = functional.nll_loss(output[test_idx], labels[test_idx])
acc_test = model_meter.accuracy(output[test_idx], labels[test_idx])
meter.update_diff(output[test_idx], labels[test_idx])
meter.update_vals(loss_test=loss_test.item(), acc_test=acc_test)
self._logger.info("%s: Test, %s", model_name, meter.log_str(log_vals=["loss_test", "acc_test"]))
# self._logger.info("%s Test: loss= %.4f accuracy= %.4f" % (model_name, loss_test.item(), acc_test.item()))
# return {"loss": loss_test.item(), "acc": acc_test.item()}
def init_seed(seed, cuda=None):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda is not None:
torch.cuda.manual_seed(seed)
def aggregate_results(res_list, logger):
aggregated = {}
for cur_res in res_list:
for name, vals in cur_res.items():
if name not in aggregated:
aggregated[name] = {}
for key, val in vals.items():
if key not in aggregated[name]:
aggregated[name][key] = []
aggregated[name][key].append(val)
for name, vals in aggregated.items():
val_list = sorted(vals.items(), key=lambda x: x[0], reverse=True)
logger.info("*" * 15 + "%s mean: %s", name,
", ".join("%s=%3.4f" % (key, np.mean(val)) for key, val in val_list))
logger.info("*" * 15 + "%s std: %s", name, ", ".join("%s=%3.4f" % (key, np.std(val)) for key, val in val_list))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', type=int, default=1,
help='Specify cuda device number')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--dataset', type=str, default="cora",
help='The dataset to use.')
# parser.add_argument('--prefix', type=str, default="",
# help='The prefix of the products dir name.')
args = parser.parse_args()
# args.cuda = not args.no_cuda and torch.cuda.is_available()
if not torch.cuda.is_available():
args.cuda = None
return args
def main():
args = parse_args()
dataset = "cora" # args.dataset
seed = random.randint(1, 1000000000)
conf = {
"kipf": {"hidden": 16, "dropout": 0.5, "lr": 0.01, "weight_decay": 5e-4},
"hidden_layers": [16], "multi_hidden_layers": [100, 20], "dropout": 0.6, "lr": 0.01, "weight_decay": 0.001,
"dataset": dataset, "epochs": args.epochs, "cuda": args.cuda, "fastmode": args.fastmode, "seed": seed}
init_seed(conf['seed'], conf['cuda'])
dataset_path = os.path.join(PROJ_DIR, "data", dataset)
products_path = os.path.join(CUR_DIR, "logs", dataset, time.strftime("%Y_%m_%d_%H_%M_%S"))
if not os.path.exists(products_path):
os.makedirs(products_path)
logger = multi_logger([
PrintLogger("IdansLogger", level=logging.INFO),
FileLogger("results_%s" % conf["dataset"], path=products_path, level=logging.INFO),
FileLogger("results_%s_all" % conf["dataset"], path=products_path, level=logging.DEBUG),
], name=None)
data_logger = CSVLogger("results_%s" % conf["dataset"], path=products_path)
data_logger.set_titles("model_name", "loss", "acc", "train_p", "norm_adj", "feat_type")
num_iter = 5
for norm_adj in [True, False]:
conf["norm_adj"] = norm_adj
runner = ModelRunner(products_path, dataset_path, conf, logger=logger, data_logger=data_logger)
for train_p in chain([1], range(5, 90, 10)):
conf["train_p"] = train_p
train_p /= 100
val_p = test_p = (1 - train_p) / 2.
train_p /= (val_p + train_p)
runner.loader.split_test(test_p)
for ft, feat_type in enumerate(["combined", "neighbors", "features"]):
conf["feat_type"] = feat_type
results = [runner.run(train_p, feat_type) for _ in range(num_iter)]
conf_path = os.path.join(runner.products_path,
"t%d_n%d_ft%d.pkl" % (conf["train_p"], norm_adj, ft,))
pickle.dump({"res": results, "conf": conf}, open(conf_path, "wb"))
logger.info("Finished")
if __name__ == "__main__":
main()
| ModelRunner | identifier_name |
train.py | # from __future__ import division
# from __future__ import print_function
import argparse
import logging
import pickle
import random
import time
from itertools import chain
import numpy as np
import torch
import torch.nn.functional as functional
import torch.optim as optim
from torch.autograd import Variable
import model_meter
from feature_meta import NODE_FEATURES, DIRECTED_NEIGHBOR_FEATURES, UNDIRECTED_NEIGHBOR_FEATURES
from gcn import *
from gcn.data_loader import GraphLoader
from gcn.layers import AsymmetricGCN
from gcn.models import GCNCombined, GCN
from loggers import PrintLogger, multi_logger, EmptyLogger, CSVLogger, FileLogger
def get_features(feat_type, is_directed):
neighbor_features = DIRECTED_NEIGHBOR_FEATURES if is_directed else UNDIRECTED_NEIGHBOR_FEATURES
all_features = {"neighbors": [neighbor_features],
"features": [NODE_FEATURES],
"combined": [neighbor_features, NODE_FEATURES],
}
return dict(y for x in all_features[feat_type] for y in x.items())
class ModelRunner:
def __init__(self, products_path, dataset_path, conf, logger, data_logger=None):
self.conf = conf
self._logger = logger
self._data_logger = EmptyLogger() if data_logger is None else data_logger
self.products_path = products_path
self.loader = GraphLoader(dataset_path, is_max_connected=False, norm_adj=conf["norm_adj"],
cuda_num=conf["cuda"], logger=self._logger)
self._criterion = torch.nn.NLLLoss()
def _get_models(self):
bow_feat = self.loader.bow_mx
topo_feat = self.loader.topo_mx
model1 = GCN(nfeat=bow_feat.shape[1],
hlayers=[self.conf["kipf"]["hidden"]],
nclass=self.loader.num_labels,
dropout=self.conf["kipf"]["dropout"])
opt1 = optim.Adam(model1.parameters(), lr=self.conf["kipf"]["lr"],
weight_decay=self.conf["kipf"]["weight_decay"])
model2 = GCNCombined(nbow=bow_feat.shape[1],
nfeat=topo_feat.shape[1],
hlayers=self.conf["hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"])
opt2 = optim.Adam(model2.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
model3 = GCN(nfeat=topo_feat.shape[1],
hlayers=self.conf["multi_hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"],
layer_type=None)
opt3 = optim.Adam(model3.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
model4 = GCN(nfeat=topo_feat.shape[1],
hlayers=self.conf["multi_hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"],
layer_type=AsymmetricGCN)
opt4 = optim.Adam(model4.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
return {
"kipf": {
"model": model1, "optimizer": opt1,
"arguments": [self.loader.bow_mx, self.loader.adj_mx],
"labels": self.loader.labels,
},
"our_combined": {
"model": model2, "optimizer": opt2,
"arguments": [self.loader.bow_mx, self.loader.topo_mx, self.loader.adj_rt_mx],
"labels": self.loader.labels,
},
"topo_sym": {
"model": model3, "optimizer": opt3,
"arguments": [self.loader.topo_mx, self.loader.adj_mx],
"labels": self.loader.labels,
},
"topo_asym": {
"model": model4, "optimizer": opt4,
"arguments": [self.loader.topo_mx, self.loader.adj_rt_mx],
"labels": self.loader.labels,
},
}
def run(self, train_p, feat_type):
features_meta = get_features(feat_type, is_directed=self.loader.is_graph_directed)
self.loader.split_train(train_p, features_meta)
models = self._get_models()
if self.conf["cuda"] is not None:
[model["model"].cuda(self.conf["cuda"]) for model in models.values()]
for model in models.values():
model["arguments"] = list(map(Variable, model["arguments"]))
model["labels"] = Variable(model["labels"])
# Train model
meters = {name: model_meter.ModelMeter(self.loader.distinct_labels) for name in models}
train_idx, val_idx = self.loader.train_idx, self.loader.val_idx
for epoch in range(self.conf["epochs"]):
for name, model_args in models.items():
self._train(epoch, name, model_args, train_idx, val_idx, meters[name])
# Testing
test_idx = self.loader.test_idx
for name, model_args in models.items():
meter = meters[name]
self._test(name, model_args, test_idx, meter)
self._data_logger.log_info(
model_name=name,
loss=meter.last_val("loss_test"),
acc=meter.last_val("acc_test"),
train_p=(train_p / (2 - train_p)) * 100,
norm_adj=self.conf["norm_adj"],
feat_type=self.conf["feat_type"]
)
# Currently supporting only binary class plotting
# meters[name].plot_auc(should_show=False)
# import matplotlib.pyplot as plt
# plt.savefig(os.path.join(self.products_path, time.strftime("%H_%M_%S_" + name)))
return meters
def _train(self, epoch, model_name, model_args, idx_train, idx_val, meter):
model, optimizer = model_args["model"], model_args["optimizer"]
arguments, labels = model_args["arguments"], model_args["labels"]
model.train()
optimizer.zero_grad()
output = model(*arguments)
loss_train = self._criterion(output[idx_train], labels[idx_train])
acc_train = model_meter.accuracy(output[idx_train], labels[idx_train])
meter.update_vals(loss_train=loss_train.item(), acc_train=acc_train)
loss_train.backward()
optimizer.step()
if not self.conf["fastmode"]:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(*arguments)
loss_val = self._criterion(output[idx_val], labels[idx_val])
acc_val = model_meter.accuracy(output[idx_val], labels[idx_val])
meter.update_vals(loss_val=loss_val.item(), acc_val=acc_val)
self._logger.debug("%s: Epoch: %03d, %s", model_name, epoch + 1, meter.log_str())
def _test(self, model_name, model_args, test_idx, meter):
model, arguments, labels = model_args["model"], model_args["arguments"], model_args["labels"]
model.eval()
output = model(*arguments)
loss_test = functional.nll_loss(output[test_idx], labels[test_idx])
acc_test = model_meter.accuracy(output[test_idx], labels[test_idx])
meter.update_diff(output[test_idx], labels[test_idx])
meter.update_vals(loss_test=loss_test.item(), acc_test=acc_test)
self._logger.info("%s: Test, %s", model_name, meter.log_str(log_vals=["loss_test", "acc_test"]))
# self._logger.info("%s Test: loss= %.4f accuracy= %.4f" % (model_name, loss_test.item(), acc_test.item()))
# return {"loss": loss_test.item(), "acc": acc_test.item()}
def init_seed(seed, cuda=None):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda is not None:
torch.cuda.manual_seed(seed)
def aggregate_results(res_list, logger):
aggregated = {}
for cur_res in res_list:
for name, vals in cur_res.items():
if name not in aggregated:
aggregated[name] = {}
for key, val in vals.items():
if key not in aggregated[name]:
aggregated[name][key] = []
aggregated[name][key].append(val)
for name, vals in aggregated.items(): | val_list = sorted(vals.items(), key=lambda x: x[0], reverse=True)
logger.info("*" * 15 + "%s mean: %s", name,
", ".join("%s=%3.4f" % (key, np.mean(val)) for key, val in val_list))
logger.info("*" * 15 + "%s std: %s", name, ", ".join("%s=%3.4f" % (key, np.std(val)) for key, val in val_list))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', type=int, default=1,
help='Specify cuda device number')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--dataset', type=str, default="cora",
help='The dataset to use.')
# parser.add_argument('--prefix', type=str, default="",
# help='The prefix of the products dir name.')
args = parser.parse_args()
# args.cuda = not args.no_cuda and torch.cuda.is_available()
if not torch.cuda.is_available():
args.cuda = None
return args
def main():
args = parse_args()
dataset = "cora" # args.dataset
seed = random.randint(1, 1000000000)
conf = {
"kipf": {"hidden": 16, "dropout": 0.5, "lr": 0.01, "weight_decay": 5e-4},
"hidden_layers": [16], "multi_hidden_layers": [100, 20], "dropout": 0.6, "lr": 0.01, "weight_decay": 0.001,
"dataset": dataset, "epochs": args.epochs, "cuda": args.cuda, "fastmode": args.fastmode, "seed": seed}
init_seed(conf['seed'], conf['cuda'])
dataset_path = os.path.join(PROJ_DIR, "data", dataset)
products_path = os.path.join(CUR_DIR, "logs", dataset, time.strftime("%Y_%m_%d_%H_%M_%S"))
if not os.path.exists(products_path):
os.makedirs(products_path)
logger = multi_logger([
PrintLogger("IdansLogger", level=logging.INFO),
FileLogger("results_%s" % conf["dataset"], path=products_path, level=logging.INFO),
FileLogger("results_%s_all" % conf["dataset"], path=products_path, level=logging.DEBUG),
], name=None)
data_logger = CSVLogger("results_%s" % conf["dataset"], path=products_path)
data_logger.set_titles("model_name", "loss", "acc", "train_p", "norm_adj", "feat_type")
num_iter = 5
for norm_adj in [True, False]:
conf["norm_adj"] = norm_adj
runner = ModelRunner(products_path, dataset_path, conf, logger=logger, data_logger=data_logger)
for train_p in chain([1], range(5, 90, 10)):
conf["train_p"] = train_p
train_p /= 100
val_p = test_p = (1 - train_p) / 2.
train_p /= (val_p + train_p)
runner.loader.split_test(test_p)
for ft, feat_type in enumerate(["combined", "neighbors", "features"]):
conf["feat_type"] = feat_type
results = [runner.run(train_p, feat_type) for _ in range(num_iter)]
conf_path = os.path.join(runner.products_path,
"t%d_n%d_ft%d.pkl" % (conf["train_p"], norm_adj, ft,))
pickle.dump({"res": results, "conf": conf}, open(conf_path, "wb"))
logger.info("Finished")
if __name__ == "__main__":
main() | random_line_split | |
train.py | # from __future__ import division
# from __future__ import print_function
import argparse
import logging
import pickle
import random
import time
from itertools import chain
import numpy as np
import torch
import torch.nn.functional as functional
import torch.optim as optim
from torch.autograd import Variable
import model_meter
from feature_meta import NODE_FEATURES, DIRECTED_NEIGHBOR_FEATURES, UNDIRECTED_NEIGHBOR_FEATURES
from gcn import *
from gcn.data_loader import GraphLoader
from gcn.layers import AsymmetricGCN
from gcn.models import GCNCombined, GCN
from loggers import PrintLogger, multi_logger, EmptyLogger, CSVLogger, FileLogger
def get_features(feat_type, is_directed):
neighbor_features = DIRECTED_NEIGHBOR_FEATURES if is_directed else UNDIRECTED_NEIGHBOR_FEATURES
all_features = {"neighbors": [neighbor_features],
"features": [NODE_FEATURES],
"combined": [neighbor_features, NODE_FEATURES],
}
return dict(y for x in all_features[feat_type] for y in x.items())
class ModelRunner:
def __init__(self, products_path, dataset_path, conf, logger, data_logger=None):
self.conf = conf
self._logger = logger
self._data_logger = EmptyLogger() if data_logger is None else data_logger
self.products_path = products_path
self.loader = GraphLoader(dataset_path, is_max_connected=False, norm_adj=conf["norm_adj"],
cuda_num=conf["cuda"], logger=self._logger)
self._criterion = torch.nn.NLLLoss()
def _get_models(self):
bow_feat = self.loader.bow_mx
topo_feat = self.loader.topo_mx
model1 = GCN(nfeat=bow_feat.shape[1],
hlayers=[self.conf["kipf"]["hidden"]],
nclass=self.loader.num_labels,
dropout=self.conf["kipf"]["dropout"])
opt1 = optim.Adam(model1.parameters(), lr=self.conf["kipf"]["lr"],
weight_decay=self.conf["kipf"]["weight_decay"])
model2 = GCNCombined(nbow=bow_feat.shape[1],
nfeat=topo_feat.shape[1],
hlayers=self.conf["hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"])
opt2 = optim.Adam(model2.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
model3 = GCN(nfeat=topo_feat.shape[1],
hlayers=self.conf["multi_hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"],
layer_type=None)
opt3 = optim.Adam(model3.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
model4 = GCN(nfeat=topo_feat.shape[1],
hlayers=self.conf["multi_hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"],
layer_type=AsymmetricGCN)
opt4 = optim.Adam(model4.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
return {
"kipf": {
"model": model1, "optimizer": opt1,
"arguments": [self.loader.bow_mx, self.loader.adj_mx],
"labels": self.loader.labels,
},
"our_combined": {
"model": model2, "optimizer": opt2,
"arguments": [self.loader.bow_mx, self.loader.topo_mx, self.loader.adj_rt_mx],
"labels": self.loader.labels,
},
"topo_sym": {
"model": model3, "optimizer": opt3,
"arguments": [self.loader.topo_mx, self.loader.adj_mx],
"labels": self.loader.labels,
},
"topo_asym": {
"model": model4, "optimizer": opt4,
"arguments": [self.loader.topo_mx, self.loader.adj_rt_mx],
"labels": self.loader.labels,
},
}
def run(self, train_p, feat_type):
features_meta = get_features(feat_type, is_directed=self.loader.is_graph_directed)
self.loader.split_train(train_p, features_meta)
models = self._get_models()
if self.conf["cuda"] is not None:
[model["model"].cuda(self.conf["cuda"]) for model in models.values()]
for model in models.values():
model["arguments"] = list(map(Variable, model["arguments"]))
model["labels"] = Variable(model["labels"])
# Train model
meters = {name: model_meter.ModelMeter(self.loader.distinct_labels) for name in models}
train_idx, val_idx = self.loader.train_idx, self.loader.val_idx
for epoch in range(self.conf["epochs"]):
for name, model_args in models.items():
|
# Testing
test_idx = self.loader.test_idx
for name, model_args in models.items():
meter = meters[name]
self._test(name, model_args, test_idx, meter)
self._data_logger.log_info(
model_name=name,
loss=meter.last_val("loss_test"),
acc=meter.last_val("acc_test"),
train_p=(train_p / (2 - train_p)) * 100,
norm_adj=self.conf["norm_adj"],
feat_type=self.conf["feat_type"]
)
# Currently supporting only binary class plotting
# meters[name].plot_auc(should_show=False)
# import matplotlib.pyplot as plt
# plt.savefig(os.path.join(self.products_path, time.strftime("%H_%M_%S_" + name)))
return meters
def _train(self, epoch, model_name, model_args, idx_train, idx_val, meter):
model, optimizer = model_args["model"], model_args["optimizer"]
arguments, labels = model_args["arguments"], model_args["labels"]
model.train()
optimizer.zero_grad()
output = model(*arguments)
loss_train = self._criterion(output[idx_train], labels[idx_train])
acc_train = model_meter.accuracy(output[idx_train], labels[idx_train])
meter.update_vals(loss_train=loss_train.item(), acc_train=acc_train)
loss_train.backward()
optimizer.step()
if not self.conf["fastmode"]:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(*arguments)
loss_val = self._criterion(output[idx_val], labels[idx_val])
acc_val = model_meter.accuracy(output[idx_val], labels[idx_val])
meter.update_vals(loss_val=loss_val.item(), acc_val=acc_val)
self._logger.debug("%s: Epoch: %03d, %s", model_name, epoch + 1, meter.log_str())
def _test(self, model_name, model_args, test_idx, meter):
model, arguments, labels = model_args["model"], model_args["arguments"], model_args["labels"]
model.eval()
output = model(*arguments)
loss_test = functional.nll_loss(output[test_idx], labels[test_idx])
acc_test = model_meter.accuracy(output[test_idx], labels[test_idx])
meter.update_diff(output[test_idx], labels[test_idx])
meter.update_vals(loss_test=loss_test.item(), acc_test=acc_test)
self._logger.info("%s: Test, %s", model_name, meter.log_str(log_vals=["loss_test", "acc_test"]))
# self._logger.info("%s Test: loss= %.4f accuracy= %.4f" % (model_name, loss_test.item(), acc_test.item()))
# return {"loss": loss_test.item(), "acc": acc_test.item()}
def init_seed(seed, cuda=None):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda is not None:
torch.cuda.manual_seed(seed)
def aggregate_results(res_list, logger):
aggregated = {}
for cur_res in res_list:
for name, vals in cur_res.items():
if name not in aggregated:
aggregated[name] = {}
for key, val in vals.items():
if key not in aggregated[name]:
aggregated[name][key] = []
aggregated[name][key].append(val)
for name, vals in aggregated.items():
val_list = sorted(vals.items(), key=lambda x: x[0], reverse=True)
logger.info("*" * 15 + "%s mean: %s", name,
", ".join("%s=%3.4f" % (key, np.mean(val)) for key, val in val_list))
logger.info("*" * 15 + "%s std: %s", name, ", ".join("%s=%3.4f" % (key, np.std(val)) for key, val in val_list))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', type=int, default=1,
help='Specify cuda device number')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--dataset', type=str, default="cora",
help='The dataset to use.')
# parser.add_argument('--prefix', type=str, default="",
# help='The prefix of the products dir name.')
args = parser.parse_args()
# args.cuda = not args.no_cuda and torch.cuda.is_available()
if not torch.cuda.is_available():
args.cuda = None
return args
def main():
args = parse_args()
dataset = "cora" # args.dataset
seed = random.randint(1, 1000000000)
conf = {
"kipf": {"hidden": 16, "dropout": 0.5, "lr": 0.01, "weight_decay": 5e-4},
"hidden_layers": [16], "multi_hidden_layers": [100, 20], "dropout": 0.6, "lr": 0.01, "weight_decay": 0.001,
"dataset": dataset, "epochs": args.epochs, "cuda": args.cuda, "fastmode": args.fastmode, "seed": seed}
init_seed(conf['seed'], conf['cuda'])
dataset_path = os.path.join(PROJ_DIR, "data", dataset)
products_path = os.path.join(CUR_DIR, "logs", dataset, time.strftime("%Y_%m_%d_%H_%M_%S"))
if not os.path.exists(products_path):
os.makedirs(products_path)
logger = multi_logger([
PrintLogger("IdansLogger", level=logging.INFO),
FileLogger("results_%s" % conf["dataset"], path=products_path, level=logging.INFO),
FileLogger("results_%s_all" % conf["dataset"], path=products_path, level=logging.DEBUG),
], name=None)
data_logger = CSVLogger("results_%s" % conf["dataset"], path=products_path)
data_logger.set_titles("model_name", "loss", "acc", "train_p", "norm_adj", "feat_type")
num_iter = 5
for norm_adj in [True, False]:
conf["norm_adj"] = norm_adj
runner = ModelRunner(products_path, dataset_path, conf, logger=logger, data_logger=data_logger)
for train_p in chain([1], range(5, 90, 10)):
conf["train_p"] = train_p
train_p /= 100
val_p = test_p = (1 - train_p) / 2.
train_p /= (val_p + train_p)
runner.loader.split_test(test_p)
for ft, feat_type in enumerate(["combined", "neighbors", "features"]):
conf["feat_type"] = feat_type
results = [runner.run(train_p, feat_type) for _ in range(num_iter)]
conf_path = os.path.join(runner.products_path,
"t%d_n%d_ft%d.pkl" % (conf["train_p"], norm_adj, ft,))
pickle.dump({"res": results, "conf": conf}, open(conf_path, "wb"))
logger.info("Finished")
if __name__ == "__main__":
main()
| self._train(epoch, name, model_args, train_idx, val_idx, meters[name]) | conditional_block |
train.py | # from __future__ import division
# from __future__ import print_function
import argparse
import logging
import pickle
import random
import time
from itertools import chain
import numpy as np
import torch
import torch.nn.functional as functional
import torch.optim as optim
from torch.autograd import Variable
import model_meter
from feature_meta import NODE_FEATURES, DIRECTED_NEIGHBOR_FEATURES, UNDIRECTED_NEIGHBOR_FEATURES
from gcn import *
from gcn.data_loader import GraphLoader
from gcn.layers import AsymmetricGCN
from gcn.models import GCNCombined, GCN
from loggers import PrintLogger, multi_logger, EmptyLogger, CSVLogger, FileLogger
def get_features(feat_type, is_directed):
neighbor_features = DIRECTED_NEIGHBOR_FEATURES if is_directed else UNDIRECTED_NEIGHBOR_FEATURES
all_features = {"neighbors": [neighbor_features],
"features": [NODE_FEATURES],
"combined": [neighbor_features, NODE_FEATURES],
}
return dict(y for x in all_features[feat_type] for y in x.items())
class ModelRunner:
def __init__(self, products_path, dataset_path, conf, logger, data_logger=None):
self.conf = conf
self._logger = logger
self._data_logger = EmptyLogger() if data_logger is None else data_logger
self.products_path = products_path
self.loader = GraphLoader(dataset_path, is_max_connected=False, norm_adj=conf["norm_adj"],
cuda_num=conf["cuda"], logger=self._logger)
self._criterion = torch.nn.NLLLoss()
def _get_models(self):
|
def run(self, train_p, feat_type):
features_meta = get_features(feat_type, is_directed=self.loader.is_graph_directed)
self.loader.split_train(train_p, features_meta)
models = self._get_models()
if self.conf["cuda"] is not None:
[model["model"].cuda(self.conf["cuda"]) for model in models.values()]
for model in models.values():
model["arguments"] = list(map(Variable, model["arguments"]))
model["labels"] = Variable(model["labels"])
# Train model
meters = {name: model_meter.ModelMeter(self.loader.distinct_labels) for name in models}
train_idx, val_idx = self.loader.train_idx, self.loader.val_idx
for epoch in range(self.conf["epochs"]):
for name, model_args in models.items():
self._train(epoch, name, model_args, train_idx, val_idx, meters[name])
# Testing
test_idx = self.loader.test_idx
for name, model_args in models.items():
meter = meters[name]
self._test(name, model_args, test_idx, meter)
self._data_logger.log_info(
model_name=name,
loss=meter.last_val("loss_test"),
acc=meter.last_val("acc_test"),
train_p=(train_p / (2 - train_p)) * 100,
norm_adj=self.conf["norm_adj"],
feat_type=self.conf["feat_type"]
)
# Currently supporting only binary class plotting
# meters[name].plot_auc(should_show=False)
# import matplotlib.pyplot as plt
# plt.savefig(os.path.join(self.products_path, time.strftime("%H_%M_%S_" + name)))
return meters
def _train(self, epoch, model_name, model_args, idx_train, idx_val, meter):
model, optimizer = model_args["model"], model_args["optimizer"]
arguments, labels = model_args["arguments"], model_args["labels"]
model.train()
optimizer.zero_grad()
output = model(*arguments)
loss_train = self._criterion(output[idx_train], labels[idx_train])
acc_train = model_meter.accuracy(output[idx_train], labels[idx_train])
meter.update_vals(loss_train=loss_train.item(), acc_train=acc_train)
loss_train.backward()
optimizer.step()
if not self.conf["fastmode"]:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(*arguments)
loss_val = self._criterion(output[idx_val], labels[idx_val])
acc_val = model_meter.accuracy(output[idx_val], labels[idx_val])
meter.update_vals(loss_val=loss_val.item(), acc_val=acc_val)
self._logger.debug("%s: Epoch: %03d, %s", model_name, epoch + 1, meter.log_str())
def _test(self, model_name, model_args, test_idx, meter):
model, arguments, labels = model_args["model"], model_args["arguments"], model_args["labels"]
model.eval()
output = model(*arguments)
loss_test = functional.nll_loss(output[test_idx], labels[test_idx])
acc_test = model_meter.accuracy(output[test_idx], labels[test_idx])
meter.update_diff(output[test_idx], labels[test_idx])
meter.update_vals(loss_test=loss_test.item(), acc_test=acc_test)
self._logger.info("%s: Test, %s", model_name, meter.log_str(log_vals=["loss_test", "acc_test"]))
# self._logger.info("%s Test: loss= %.4f accuracy= %.4f" % (model_name, loss_test.item(), acc_test.item()))
# return {"loss": loss_test.item(), "acc": acc_test.item()}
def init_seed(seed, cuda=None):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda is not None:
torch.cuda.manual_seed(seed)
def aggregate_results(res_list, logger):
aggregated = {}
for cur_res in res_list:
for name, vals in cur_res.items():
if name not in aggregated:
aggregated[name] = {}
for key, val in vals.items():
if key not in aggregated[name]:
aggregated[name][key] = []
aggregated[name][key].append(val)
for name, vals in aggregated.items():
val_list = sorted(vals.items(), key=lambda x: x[0], reverse=True)
logger.info("*" * 15 + "%s mean: %s", name,
", ".join("%s=%3.4f" % (key, np.mean(val)) for key, val in val_list))
logger.info("*" * 15 + "%s std: %s", name, ", ".join("%s=%3.4f" % (key, np.std(val)) for key, val in val_list))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', type=int, default=1,
help='Specify cuda device number')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--dataset', type=str, default="cora",
help='The dataset to use.')
# parser.add_argument('--prefix', type=str, default="",
# help='The prefix of the products dir name.')
args = parser.parse_args()
# args.cuda = not args.no_cuda and torch.cuda.is_available()
if not torch.cuda.is_available():
args.cuda = None
return args
def main():
args = parse_args()
dataset = "cora" # args.dataset
seed = random.randint(1, 1000000000)
conf = {
"kipf": {"hidden": 16, "dropout": 0.5, "lr": 0.01, "weight_decay": 5e-4},
"hidden_layers": [16], "multi_hidden_layers": [100, 20], "dropout": 0.6, "lr": 0.01, "weight_decay": 0.001,
"dataset": dataset, "epochs": args.epochs, "cuda": args.cuda, "fastmode": args.fastmode, "seed": seed}
init_seed(conf['seed'], conf['cuda'])
dataset_path = os.path.join(PROJ_DIR, "data", dataset)
products_path = os.path.join(CUR_DIR, "logs", dataset, time.strftime("%Y_%m_%d_%H_%M_%S"))
if not os.path.exists(products_path):
os.makedirs(products_path)
logger = multi_logger([
PrintLogger("IdansLogger", level=logging.INFO),
FileLogger("results_%s" % conf["dataset"], path=products_path, level=logging.INFO),
FileLogger("results_%s_all" % conf["dataset"], path=products_path, level=logging.DEBUG),
], name=None)
data_logger = CSVLogger("results_%s" % conf["dataset"], path=products_path)
data_logger.set_titles("model_name", "loss", "acc", "train_p", "norm_adj", "feat_type")
num_iter = 5
for norm_adj in [True, False]:
conf["norm_adj"] = norm_adj
runner = ModelRunner(products_path, dataset_path, conf, logger=logger, data_logger=data_logger)
for train_p in chain([1], range(5, 90, 10)):
conf["train_p"] = train_p
train_p /= 100
val_p = test_p = (1 - train_p) / 2.
train_p /= (val_p + train_p)
runner.loader.split_test(test_p)
for ft, feat_type in enumerate(["combined", "neighbors", "features"]):
conf["feat_type"] = feat_type
results = [runner.run(train_p, feat_type) for _ in range(num_iter)]
conf_path = os.path.join(runner.products_path,
"t%d_n%d_ft%d.pkl" % (conf["train_p"], norm_adj, ft,))
pickle.dump({"res": results, "conf": conf}, open(conf_path, "wb"))
logger.info("Finished")
if __name__ == "__main__":
main()
| bow_feat = self.loader.bow_mx
topo_feat = self.loader.topo_mx
model1 = GCN(nfeat=bow_feat.shape[1],
hlayers=[self.conf["kipf"]["hidden"]],
nclass=self.loader.num_labels,
dropout=self.conf["kipf"]["dropout"])
opt1 = optim.Adam(model1.parameters(), lr=self.conf["kipf"]["lr"],
weight_decay=self.conf["kipf"]["weight_decay"])
model2 = GCNCombined(nbow=bow_feat.shape[1],
nfeat=topo_feat.shape[1],
hlayers=self.conf["hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"])
opt2 = optim.Adam(model2.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
model3 = GCN(nfeat=topo_feat.shape[1],
hlayers=self.conf["multi_hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"],
layer_type=None)
opt3 = optim.Adam(model3.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
model4 = GCN(nfeat=topo_feat.shape[1],
hlayers=self.conf["multi_hidden_layers"],
nclass=self.loader.num_labels,
dropout=self.conf["dropout"],
layer_type=AsymmetricGCN)
opt4 = optim.Adam(model4.parameters(), lr=self.conf["lr"], weight_decay=self.conf["weight_decay"])
return {
"kipf": {
"model": model1, "optimizer": opt1,
"arguments": [self.loader.bow_mx, self.loader.adj_mx],
"labels": self.loader.labels,
},
"our_combined": {
"model": model2, "optimizer": opt2,
"arguments": [self.loader.bow_mx, self.loader.topo_mx, self.loader.adj_rt_mx],
"labels": self.loader.labels,
},
"topo_sym": {
"model": model3, "optimizer": opt3,
"arguments": [self.loader.topo_mx, self.loader.adj_mx],
"labels": self.loader.labels,
},
"topo_asym": {
"model": model4, "optimizer": opt4,
"arguments": [self.loader.topo_mx, self.loader.adj_rt_mx],
"labels": self.loader.labels,
},
} | identifier_body |
CAAPR_Pipeline.py | # Import smorgasbord
import os
import sys
sys.path.append( str( os.path.join( os.path.split( os.path.dirname(os.path.abspath(__file__)) )[0], 'CAAPR', 'CAAPR_AstroMagic', 'PTS') ) )
import gc
import pdb
import time
import re
import copy
import warnings
import numbers
import random
import shutil
import numpy as np
import scipy.ndimage
import multiprocessing as mp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import astropy.io.fits
import ChrisFuncs
import ChrisFuncs.Photom
import ChrisFuncs.FromGitHub
import CAAPR
# The main pipeline; the cutout-production, aperture-fitting, and actual photometry parts of the CAAPR process are called in here, as sub-pipelines
def PipelineMain(source_dict, bands_dict, kwargs_dict):
# Start timer, and check that the user has actually asked CAAPR to do something; if they haven't asked CAAPR to do anything at all, tell them that they're being a bit odd!
source_start = time.time()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Processing target '+source_dict['name']+'.'
if not kwargs_dict['fit_apertures'] and not kwargs_dict['do_photom']:
if not kwargs_dict['save_images']:
raise ValueError("None of fit_apertures, do_photom, and save_images is set to True. ")
print('Skipping photometry and aperture fitting, but will save processed images.')
# Check if any data actually exists for this source
if SourcePrelim(source_dict, bands_dict, kwargs_dict)==False:
return
# Loop over bands for initial processing
for band in bands_dict.keys():
# Do basic initial handling of band parameters
bands_dict[band] = BandInitiate(bands_dict[band])
# Functiont hat checks if user has requested a cutout; and, if so, produces it
bands_dict[band] = CAAPR.CAAPR_IO.Cutout(source_dict, bands_dict[band], kwargs_dict)
# Function that check sif it is possible to trim padding of no-coverage from edge of map (if user hasn't specificed a particular cutout be made)
bands_dict[band] = CAAPR.CAAPR_IO.UnpaddingCutout(source_dict, bands_dict[band], kwargs_dict)
# Check if star-subtraction is requested for any band; if so, commence catalogue pre-fetching
CAAPR.CAAPR_AstroMagic.PreCatalogue(source_dict, bands_dict, kwargs_dict)
# If aperture file not provided, commence aperture-fitting sub-pipeline
if kwargs_dict['fit_apertures']==True:
# Process sources inside while loop, to catch 'missed' bands
aperture_attempts = 0
while aperture_attempts!='Success':
# In standard operation, process multiple sources in parallel
aperture_start = time.time()
aperture_output_list = []
if kwargs_dict['parallel']==True:
bands_dict_keys = bands_dict.keys()
random.shuffle(bands_dict_keys)
pool = mp.Pool(processes=kwargs_dict['n_proc'])
for band in bands_dict_keys:
aperture_output_list.append( pool.apply_async( CAAPR.CAAPR_Aperture.SubpipelineAperture, args=(source_dict, bands_dict[band], kwargs_dict) ) )
pool.close()
pool.join()
del(pool)
aperture_list = [output.get() for output in aperture_output_list if output.successful()==True]
aperture_list = [aperture for aperture in aperture_list if aperture!=None]
# If parallelisation is disabled, process sources one-at-a-time
elif kwargs_dict['parallel']==False:
for band in bands_dict.keys():
aperture_output_list.append( CAAPR.CAAPR_Aperture.SubpipelineAperture(source_dict, bands_dict[band], kwargs_dict) )
aperture_list = [output for output in aperture_output_list if output!=None]
# Check that all photometry completed
aperture_attempts = CAAPR.CAAPR_Aperture.ApertureCheck(aperture_attempts, aperture_output_list, source_dict, bands_dict, kwargs_dict)
# Combine all fitted apertures to produce amalgam aperture
aperture_combined = CAAPR.CAAPR_Aperture.CombineAperture(aperture_list, source_dict, kwargs_dict)
# Record aperture properties to file
CAAPR.CAAPR_IO.RecordAperture(aperture_combined, source_dict, kwargs_dict)
# Prepare thumbnail images for bands excluded from aperture fitting
CAAPR.CAAPR_Aperture.ExcludedThumb(source_dict, bands_dict, kwargs_dict, aperture_list, aperture_combined)
# Create grid of thumbnail images
CAAPR.CAAPR_IO.ApertureThumbGrid(source_dict, bands_dict, kwargs_dict, aperture_list, aperture_combined)
# Report time taken to fit apertures, and tidy up garbage
gc.collect()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Time taken performing aperture fitting: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-aperture_start,4))+' seconds.'
# Commence actual photometry sub-pipeline
if kwargs_dict['do_photom'] or kwargs_dict['save_images']:
# Handle problem where the user hasn't provided an aperture file, but also hasn't told CAAPR to fit its own apertures.
if kwargs_dict['aperture_table_path']==False and kwargs_dict['fit_apertures']==False:
raise Exception('User has requested no aperture-fitting, and no photometry!')
# Process sources inside while loop, to catch 'missed' bands
photom_attempts = 0
while photom_attempts!='Complete':
# In standard operation, process multiple sources in parallel
photom_start = time.time()
photom_output_list = []
if kwargs_dict['parallel']==True:
bands_dict_keys = bands_dict.keys()
random.shuffle(bands_dict_keys)
pool = mp.Pool(processes=kwargs_dict['n_proc'])
for band in bands_dict_keys:
photom_output_list.append( pool.apply_async( CAAPR.CAAPR_Photom.SubpipelinePhotom, args=(source_dict, bands_dict[band], kwargs_dict) ) )
pool.close()
pool.join()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Gathering parallel threads.'
photom_output_list = [output.get() for output in photom_output_list if output.successful()==True]
photom_list = [photom for photom in photom_output_list if photom!=None]
# If parallelisation is disabled, process sources one-at-a-time
elif kwargs_dict['parallel']==False:
for band in bands_dict.keys():
photom_output_list.append( CAAPR.CAAPR_Photom.SubpipelinePhotom(source_dict, bands_dict[band], kwargs_dict) )
photom_list = [photom for photom in photom_output_list if photom!=None]
# Shortcut if no photometry is done
if not kwargs_dict['do_photom']:
photom_attempts = 'Complete'
gc.collect()
return
# Check that all photometry completed
photom_attempts, photom_output_list = CAAPR.CAAPR_Photom.PhotomCheck(photom_attempts, photom_output_list, source_dict, bands_dict, kwargs_dict)
# Record photometry results to file
CAAPR.CAAPR_IO.RecordPhotom(photom_list, source_dict, bands_dict, kwargs_dict)
# Prepare thumbnail images for bands excluded from photometry
CAAPR.CAAPR_Photom.ExcludedThumb(source_dict, bands_dict, kwargs_dict)
# Create grid of thumbnail images
CAAPR.CAAPR_IO.PhotomThumbGrid(source_dict, bands_dict, kwargs_dict)
# Report time taken to do photometry, and tidy up
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Time taken performing actual photometry: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-photom_start,4))+' seconds.'
# Tidy up temporary files and paths
bands_dict = PathTidy(source_dict, bands_dict, kwargs_dict)
# Report time taken for source, and tidy up garbage
gc.collect()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Total time taken for souce: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-source_start,4))+' seconds.'
if kwargs_dict['thumbnails']==True and kwargs_dict['messy']==False:
[os.remove(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',processed_map)) for processed_map in os.listdir(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps')) if '.fits' in processed_map]
# Define function to check if data actually exists for any band for this source
def SourcePrelim(source_dict, bands_dict, kwargs_dict):
# Check that any of the bands actually have data for this source
kwargs_dict_copy = copy.deepcopy(kwargs_dict)
kwargs_dict_copy['verbose'] = False
bands_check = []
for band in bands_dict.keys():
source_id = source_dict['name']+'_'+bands_dict[band]['band_name']
in_fitspath, file_found = CAAPR.CAAPR_Pipeline.FilePrelim(source_dict, bands_dict[band], kwargs_dict_copy)
bands_check.append(file_found)
# Report to user if no data found
if True not in bands_check:
print '['+source_id+'] No data found in target directory for current source.'
# Make null entries in tables, as necessary
if kwargs_dict['fit_apertures']==True:
null_aperture_combined = [np.NaN, np.NaN, np.NaN, np.NaN]
CAAPR.CAAPR_IO.RecordAperture(null_aperture_combined, source_dict, kwargs_dict)
if kwargs_dict['do_photom']==True:
CAAPR.CAAPR_IO.RecordPhotom([], source_dict, bands_dict, kwargs_dict)
# Return result
if True not in bands_check:
return False
elif True in bands_check:
return True
# Define function that does basic initial handling of band parameters
def BandInitiate(band_dict):
# Make sure band has content
if band_dict==None:
return band_dict
# Parse band cutout request, converting string to boolean if necessary
if band_dict['make_cutout']=='True':
band_dict['make_cutout']=True
elif band_dict['make_cutout']=='False':
band_dict['make_cutout']=False
else:
try:
band_dict['make_cutout'] = float(band_dict['make_cutout'])
except:
raise Exception('Cutout request not understood; should either be False, or width of cutout in arcseconds.')
# Reset band directory to inviolate value, to purge any holdovers from previous source
band_dict['band_dir'] = band_dict['band_dir_inviolate']
# Return band dict
return band_dict
# Define function that performs preimilary checks of file type and location
def FilePrelim(source_dict, band_dict, kwargs_dict):
# Determine whether the user is specificing a directroy full of FITS files in this band (in which case use standardised filename format), or just a single FITS file
try:
if os.path.isdir(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'], source_dict['name']+'_'+band_dict['band_name'] )
elif os.path.isfile(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'] )
except:
pdb.set_trace()
# Work out whether the file extension for FITS file in question is .fits or .fits.gz
file_found = False
try:
if os.path.exists(in_fitspath+'.fits'):
in_fitspath = in_fitspath+'.fits'
file_found = True
elif os.path.exists(in_fitspath+'.fits.gz'):
in_fitspath = in_fitspath+'.fits.gz'
file_found = True
except:
raise Exception('Path provided for band '+str(band_dict['band_name'])+' refers to neither a file nor a folder.')
# Return file values
return in_fitspath, file_found
# Initiate the pod (Photometry Organisation Dictionary)
def PodInitiate(in_fitspath, source_dict, band_dict, kwargs_dict):
source_id = source_dict['name']+'_'+band_dict['band_name']
if kwargs_dict['verbose']: print '['+source_id+'] Reading in FITS data.'
# Read in FITS file in question
in_fitsdata = astropy.io.fits.open(in_fitspath)
in_image = in_fitsdata[0].data
in_header = in_fitsdata[0].header
in_fitsdata.close()
in_wcs = astropy.wcs.WCS(in_header)
in_fitspath_size = float(os.stat(in_fitspath).st_size)
# Create the pod (Photometry Organisation Dictionary), which will bundle all the photometry data for this source & band into one dictionary to be passed between functions
pod = {'in_fitspath':in_fitspath,
'in_image':in_image,
'in_header':in_header,
'in_wcs':in_wcs,
'cutout':in_image.copy(),
'output_dir_path':kwargs_dict['output_dir_path'],
'temp_dir_path':kwargs_dict['temp_dir_path'],
'in_fitspath_size':in_fitspath_size,
'id':source_id,
'verbose':kwargs_dict['verbose']}
# Return pod
return pod
# Define function that determines preliminary map values
def MapPrelim(pod, source_dict, band_dict, verbose=False):
|
# Define function that fits and subtracts polynomial background filter from map
def PolySub(pod, mask_semimaj_pix, mask_axial_ratio, mask_angle, poly_order=5, cutoff_sigma=2.0, instant_quit=False):
if pod['verbose']: print '['+pod['id']+'] Determining if (and how) background is significnatly variable.'
# If polynomial background subraction not wanted, immediately return everything unchanged
if instant_quit:
pod['sky_poly'] = False
return pod
# If image has pixels smaller than some limit, downsample image to improve processing time
pix_size = pod['pix_arcsec']
pix_size_limit = 2.0
if pix_size<pix_size_limit:
downsample_factor = int(np.ceil(pix_size_limit/pix_size))
else:
downsample_factor = 1
image_ds = ChrisFuncs.Downsample(pod['cutout'], downsample_factor)
# Downsample related values accordingly
mask_semimaj_pix = mask_semimaj_pix / downsample_factor
centre_i = int(round(float((0.5*pod['centre_i'])-1.0)))
centre_j = int(round(float((0.5*pod['centre_j'])-1.0)))
# Find cutoff for excluding bright pixels by sigma-clipping map
clip_value = ChrisFuncs.SigmaClip(image_ds, tolerance=0.01, sigma_thresh=3.0, median=True)
noise_value = clip_value[0]
field_value = clip_value[1]
cutoff = field_value + ( cutoff_sigma * noise_value )
# Mask all image pixels in masking region around source
image_masked = image_ds.copy()
ellipse_mask = ChrisFuncs.Photom.EllipseMask(image_ds, mask_semimaj_pix, mask_axial_ratio, mask_angle, centre_i, centre_j)
image_masked[ np.where( ellipse_mask==1 ) ] = np.nan
# Mask all image pixels identified as being high SNR
image_masked[ np.where( image_masked>cutoff ) ] = np.nan
# Use astropy to set up 2-dimensional polynomial to the image
image_masked[ np.where( np.isnan(image_masked)==True ) ] = field_value
poly_model = astropy.modeling.models.Polynomial2D(degree=poly_order)
i_coords, j_coords = np.mgrid[:image_masked.shape[0], :image_masked.shape[1]]
fitter = astropy.modeling.fitting.LevMarLSQFitter()
i_coords = i_coords.flatten()
j_coords = j_coords.flatten()
image_flattened = image_masked.flatten()
good = np.where(np.isnan(image_flattened)==False)
i_coords = i_coords[good]
j_coords = j_coords[good]
# Attempt polynomial fit; if insufficient data then skip onwards
image_flattened = image_flattened[good]
try:
fit = fitter(poly_model, i_coords, j_coords, image_flattened)
except:
if pod['verbose']: print '['+pod['id']+'] Background is not significnatly variable; leaving image unaltered.'
pod['sky_poly'] = False
return pod
# Create final polynomial filter (undoing downsampling using lorenzoriano GitHub script)
i_coords, j_coords = np.mgrid[:image_ds.shape[0], :image_ds.shape[1]]
poly_fit = fit(i_coords, j_coords)
poly_full = scipy.ndimage.interpolation.zoom(poly_fit, [ float(pod['cutout'].shape[0])/float(poly_fit.shape[0]), float(pod['cutout'].shape[1])/float(poly_fit.shape[1]) ], mode='nearest') #poly_full = congrid.congrid(poly_fit, (pod['cutout'].shape[0], pod['cutout'].shape[1]), minusone=True)
# Establish background variation before application of filter
sigma_thresh = 3.0
clip_in = ChrisFuncs.SigmaClip(pod['cutout'], tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_in = pod['cutout'][ np.where( pod['cutout']<clip_in[1] ) ]
spread_in = np.mean( np.abs( bg_in - clip_in[1] ) )
# How much reduction in background variation there was due to application of the filter
image_sub = pod['cutout'] - poly_full
clip_sub = ChrisFuncs.SigmaClip(image_sub, tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_sub = image_sub[ np.where( image_sub<clip_sub[1] ) ]
spread_sub = np.mean( np.abs( bg_sub - clip_sub[1] ) )
spread_diff = spread_in / spread_sub
# If the filter made significant difference, apply to image and return it; otherwise, just return the unaltered map
if spread_diff>1.1:
if pod['verbose']: print '['+pod['id']+'] Background is significnatly variable; removing polynomial background fit.'
pod['cutout_nopoly'] = pod['cutout'].copy()
pod['cutout'] = image_sub
pod['sky_poly'] = poly_model
else:
if pod['verbose']: print '['+pod['id']+'] Background is not significnatly variable; subtracting median background.'
pod['cutout_nopoly'] = pod['cutout'].copy()
pod['cutout'] = pod['cutout'] - np.median(poly_full)
# pod['sky_poly'] = False
return pod
# Define function that tidies up folders and paths after completed processing a source
def PathTidy(source_dict, bands_dict, kwargs_dict):
# If we're not in messy mode, delete temporary directories
if not kwargs_dict['messy']:
if os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'Cutouts',source_dict['name'])):
shutil.rmtree(os.path.join(kwargs_dict['temp_dir_path'],'Cutouts',source_dict['name']))
if os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'AstroMagic')):
shutil.rmtree(os.path.join(kwargs_dict['temp_dir_path'],'AstroMagic'))
# Set band directories to standard, not whatever temporary cutout directories may have been used for this source
for band in bands_dict.keys():
if bands_dict[band]==None:
continue
bands_dict[band]['band_dir'] = bands_dict[band]['band_dir_inviolate']
# Define function that predicts time until completion, and produces plot thereof
def TimeEst(time_list, total, output_dir_path, source_dict, kwargs_dict):
# Add current timing to list of timings, and pass to time estimation function to get predicted completion time
time_list.append(time.time())
time_est = ChrisFuncs.TimeEst(time_list, total, plot=True)
time_remaining = time_est[0]
# Write estimated completion time to text file
time_file = open( os.path.join(output_dir_path,'Estimated_Completion_Time.txt'), 'w')
time_file.write(time_remaining)
time_file.close()
# Make plot showing timings so far, and predicted time remaining
time_fig = time_est[1]
time_fig.savefig( os.path.join(output_dir_path,'Estimated_Completion_Time.png'), dpi=150 )
time_fig.clf()
plt.close('all')
# If vorbose, report estimated time until completion to user
if kwargs_dict['verbose']: print '['+source_dict['name']+'] CAAPR estimated completion at: '+time_remaining+'.'
| if pod['verbose']: print '['+pod['id']+'] Determining properties of map.'
# Check if x & y pixel sizes are meaningfully different. If so, panic; else, treat as same
pix_size = 3600.0 * pod['in_wcs'].wcs.cdelt
if float(abs(pix_size.max()))/float(abs(pix_size.min()))>(1+1E-3):
raise Exception('The x pixel size if noticably different from the y pixel size.')
else:
pod['pix_arcsec'] = float(np.mean(np.abs(pix_size)))
# Determine source position in cutout in ij coordinates, and size of cutout
centre_xy = pod['in_wcs'].wcs_world2pix( np.array([[ source_dict['ra'], source_dict['dec'] ]]), 0 )
pod['centre_i'], pod['centre_j'] = float(centre_xy[0][1]), float(centre_xy[0][0])
pod['box_rad'] = int( round( float(pod['cutout'].shape[0]) * 0.5 ) )
# Determine beam size in pixels; if beam size not given, then assume map is Nyquist sampled (ie, 2.355 pixels ber beam)
if isinstance(band_dict['beam_arcsec'], numbers.Number):
pod['beam_pix'] = float(band_dict['beam_arcsec']) / pod['pix_arcsec']
else:
pod['beam_pix'] = pod['pix_arcsec'] * 2.355
# Check if current source lies within bounds of map; if not, fai and return)
if pod['centre_i']<0 or pod['centre_i']>(pod['cutout'].shape)[0] or pod['centre_j']<0 or pod['centre_j']>(pod['cutout'].shape)[1]:
pod['within_bounds'] = False
if 'band_dir_inviolate' in band_dict.keys():
band_dict['band_dir'] = band_dict['band_dir_inviolate']
if pod['verbose']: print '['+pod['id']+'] Target not within bounds of map.'
else:
pod['within_bounds'] = True
# Return pod
return pod | identifier_body |
CAAPR_Pipeline.py | # Import smorgasbord
import os
import sys
sys.path.append( str( os.path.join( os.path.split( os.path.dirname(os.path.abspath(__file__)) )[0], 'CAAPR', 'CAAPR_AstroMagic', 'PTS') ) )
import gc
import pdb
import time
import re
import copy
import warnings
import numbers
import random
import shutil
import numpy as np
import scipy.ndimage
import multiprocessing as mp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import astropy.io.fits
import ChrisFuncs
import ChrisFuncs.Photom
import ChrisFuncs.FromGitHub
import CAAPR
# The main pipeline; the cutout-production, aperture-fitting, and actual photometry parts of the CAAPR process are called in here, as sub-pipelines
def PipelineMain(source_dict, bands_dict, kwargs_dict):
# Start timer, and check that the user has actually asked CAAPR to do something; if they haven't asked CAAPR to do anything at all, tell them that they're being a bit odd!
source_start = time.time()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Processing target '+source_dict['name']+'.'
if not kwargs_dict['fit_apertures'] and not kwargs_dict['do_photom']:
if not kwargs_dict['save_images']:
raise ValueError("None of fit_apertures, do_photom, and save_images is set to True. ")
print('Skipping photometry and aperture fitting, but will save processed images.')
# Check if any data actually exists for this source
if SourcePrelim(source_dict, bands_dict, kwargs_dict)==False:
return
# Loop over bands for initial processing
for band in bands_dict.keys():
# Do basic initial handling of band parameters
bands_dict[band] = BandInitiate(bands_dict[band])
# Functiont hat checks if user has requested a cutout; and, if so, produces it
bands_dict[band] = CAAPR.CAAPR_IO.Cutout(source_dict, bands_dict[band], kwargs_dict)
# Function that check sif it is possible to trim padding of no-coverage from edge of map (if user hasn't specificed a particular cutout be made)
bands_dict[band] = CAAPR.CAAPR_IO.UnpaddingCutout(source_dict, bands_dict[band], kwargs_dict)
# Check if star-subtraction is requested for any band; if so, commence catalogue pre-fetching
CAAPR.CAAPR_AstroMagic.PreCatalogue(source_dict, bands_dict, kwargs_dict)
# If aperture file not provided, commence aperture-fitting sub-pipeline
if kwargs_dict['fit_apertures']==True:
# Process sources inside while loop, to catch 'missed' bands
aperture_attempts = 0
while aperture_attempts!='Success':
# In standard operation, process multiple sources in parallel
aperture_start = time.time()
aperture_output_list = []
if kwargs_dict['parallel']==True:
bands_dict_keys = bands_dict.keys()
random.shuffle(bands_dict_keys)
pool = mp.Pool(processes=kwargs_dict['n_proc'])
for band in bands_dict_keys:
aperture_output_list.append( pool.apply_async( CAAPR.CAAPR_Aperture.SubpipelineAperture, args=(source_dict, bands_dict[band], kwargs_dict) ) )
pool.close()
pool.join()
del(pool)
aperture_list = [output.get() for output in aperture_output_list if output.successful()==True]
aperture_list = [aperture for aperture in aperture_list if aperture!=None]
# If parallelisation is disabled, process sources one-at-a-time
elif kwargs_dict['parallel']==False:
for band in bands_dict.keys():
aperture_output_list.append( CAAPR.CAAPR_Aperture.SubpipelineAperture(source_dict, bands_dict[band], kwargs_dict) )
aperture_list = [output for output in aperture_output_list if output!=None]
# Check that all photometry completed
aperture_attempts = CAAPR.CAAPR_Aperture.ApertureCheck(aperture_attempts, aperture_output_list, source_dict, bands_dict, kwargs_dict)
# Combine all fitted apertures to produce amalgam aperture
aperture_combined = CAAPR.CAAPR_Aperture.CombineAperture(aperture_list, source_dict, kwargs_dict)
# Record aperture properties to file
CAAPR.CAAPR_IO.RecordAperture(aperture_combined, source_dict, kwargs_dict)
# Prepare thumbnail images for bands excluded from aperture fitting
CAAPR.CAAPR_Aperture.ExcludedThumb(source_dict, bands_dict, kwargs_dict, aperture_list, aperture_combined)
# Create grid of thumbnail images
CAAPR.CAAPR_IO.ApertureThumbGrid(source_dict, bands_dict, kwargs_dict, aperture_list, aperture_combined)
# Report time taken to fit apertures, and tidy up garbage
gc.collect()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Time taken performing aperture fitting: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-aperture_start,4))+' seconds.'
# Commence actual photometry sub-pipeline
if kwargs_dict['do_photom'] or kwargs_dict['save_images']:
# Handle problem where the user hasn't provided an aperture file, but also hasn't told CAAPR to fit its own apertures.
if kwargs_dict['aperture_table_path']==False and kwargs_dict['fit_apertures']==False:
raise Exception('User has requested no aperture-fitting, and no photometry!')
# Process sources inside while loop, to catch 'missed' bands
photom_attempts = 0
while photom_attempts!='Complete':
# In standard operation, process multiple sources in parallel
photom_start = time.time()
photom_output_list = []
if kwargs_dict['parallel']==True:
bands_dict_keys = bands_dict.keys()
random.shuffle(bands_dict_keys)
pool = mp.Pool(processes=kwargs_dict['n_proc'])
for band in bands_dict_keys:
photom_output_list.append( pool.apply_async( CAAPR.CAAPR_Photom.SubpipelinePhotom, args=(source_dict, bands_dict[band], kwargs_dict) ) )
pool.close()
pool.join()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Gathering parallel threads.'
photom_output_list = [output.get() for output in photom_output_list if output.successful()==True]
photom_list = [photom for photom in photom_output_list if photom!=None]
# If parallelisation is disabled, process sources one-at-a-time
elif kwargs_dict['parallel']==False:
for band in bands_dict.keys():
photom_output_list.append( CAAPR.CAAPR_Photom.SubpipelinePhotom(source_dict, bands_dict[band], kwargs_dict) )
photom_list = [photom for photom in photom_output_list if photom!=None]
# Shortcut if no photometry is done
if not kwargs_dict['do_photom']:
photom_attempts = 'Complete'
gc.collect()
return
# Check that all photometry completed
photom_attempts, photom_output_list = CAAPR.CAAPR_Photom.PhotomCheck(photom_attempts, photom_output_list, source_dict, bands_dict, kwargs_dict)
# Record photometry results to file
CAAPR.CAAPR_IO.RecordPhotom(photom_list, source_dict, bands_dict, kwargs_dict)
# Prepare thumbnail images for bands excluded from photometry
CAAPR.CAAPR_Photom.ExcludedThumb(source_dict, bands_dict, kwargs_dict)
# Create grid of thumbnail images
CAAPR.CAAPR_IO.PhotomThumbGrid(source_dict, bands_dict, kwargs_dict)
# Report time taken to do photometry, and tidy up
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Time taken performing actual photometry: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-photom_start,4))+' seconds.'
# Tidy up temporary files and paths
bands_dict = PathTidy(source_dict, bands_dict, kwargs_dict)
# Report time taken for source, and tidy up garbage
gc.collect()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Total time taken for souce: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-source_start,4))+' seconds.'
if kwargs_dict['thumbnails']==True and kwargs_dict['messy']==False:
[os.remove(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',processed_map)) for processed_map in os.listdir(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps')) if '.fits' in processed_map]
# Define function to check if data actually exists for any band for this source
def SourcePrelim(source_dict, bands_dict, kwargs_dict):
# Check that any of the bands actually have data for this source
kwargs_dict_copy = copy.deepcopy(kwargs_dict)
kwargs_dict_copy['verbose'] = False
bands_check = []
for band in bands_dict.keys():
source_id = source_dict['name']+'_'+bands_dict[band]['band_name']
in_fitspath, file_found = CAAPR.CAAPR_Pipeline.FilePrelim(source_dict, bands_dict[band], kwargs_dict_copy)
bands_check.append(file_found)
# Report to user if no data found
if True not in bands_check:
print '['+source_id+'] No data found in target directory for current source.'
# Make null entries in tables, as necessary
if kwargs_dict['fit_apertures']==True:
null_aperture_combined = [np.NaN, np.NaN, np.NaN, np.NaN]
CAAPR.CAAPR_IO.RecordAperture(null_aperture_combined, source_dict, kwargs_dict)
if kwargs_dict['do_photom']==True:
CAAPR.CAAPR_IO.RecordPhotom([], source_dict, bands_dict, kwargs_dict)
# Return result
if True not in bands_check:
return False
elif True in bands_check:
return True
# Define function that does basic initial handling of band parameters
def BandInitiate(band_dict):
# Make sure band has content
if band_dict==None:
return band_dict
# Parse band cutout request, converting string to boolean if necessary
if band_dict['make_cutout']=='True':
band_dict['make_cutout']=True
elif band_dict['make_cutout']=='False':
band_dict['make_cutout']=False
else:
try:
band_dict['make_cutout'] = float(band_dict['make_cutout'])
except:
raise Exception('Cutout request not understood; should either be False, or width of cutout in arcseconds.')
# Reset band directory to inviolate value, to purge any holdovers from previous source
band_dict['band_dir'] = band_dict['band_dir_inviolate']
# Return band dict
return band_dict
# Define function that performs preimilary checks of file type and location
def | (source_dict, band_dict, kwargs_dict):
# Determine whether the user is specificing a directroy full of FITS files in this band (in which case use standardised filename format), or just a single FITS file
try:
if os.path.isdir(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'], source_dict['name']+'_'+band_dict['band_name'] )
elif os.path.isfile(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'] )
except:
pdb.set_trace()
# Work out whether the file extension for FITS file in question is .fits or .fits.gz
file_found = False
try:
if os.path.exists(in_fitspath+'.fits'):
in_fitspath = in_fitspath+'.fits'
file_found = True
elif os.path.exists(in_fitspath+'.fits.gz'):
in_fitspath = in_fitspath+'.fits.gz'
file_found = True
except:
raise Exception('Path provided for band '+str(band_dict['band_name'])+' refers to neither a file nor a folder.')
# Return file values
return in_fitspath, file_found
# Initiate the pod (Photometry Organisation Dictionary)
def PodInitiate(in_fitspath, source_dict, band_dict, kwargs_dict):
source_id = source_dict['name']+'_'+band_dict['band_name']
if kwargs_dict['verbose']: print '['+source_id+'] Reading in FITS data.'
# Read in FITS file in question
in_fitsdata = astropy.io.fits.open(in_fitspath)
in_image = in_fitsdata[0].data
in_header = in_fitsdata[0].header
in_fitsdata.close()
in_wcs = astropy.wcs.WCS(in_header)
in_fitspath_size = float(os.stat(in_fitspath).st_size)
# Create the pod (Photometry Organisation Dictionary), which will bundle all the photometry data for this source & band into one dictionary to be passed between functions
pod = {'in_fitspath':in_fitspath,
'in_image':in_image,
'in_header':in_header,
'in_wcs':in_wcs,
'cutout':in_image.copy(),
'output_dir_path':kwargs_dict['output_dir_path'],
'temp_dir_path':kwargs_dict['temp_dir_path'],
'in_fitspath_size':in_fitspath_size,
'id':source_id,
'verbose':kwargs_dict['verbose']}
# Return pod
return pod
# Define function that determines preliminary map values
def MapPrelim(pod, source_dict, band_dict, verbose=False):
if pod['verbose']: print '['+pod['id']+'] Determining properties of map.'
# Check if x & y pixel sizes are meaningfully different. If so, panic; else, treat as same
pix_size = 3600.0 * pod['in_wcs'].wcs.cdelt
if float(abs(pix_size.max()))/float(abs(pix_size.min()))>(1+1E-3):
raise Exception('The x pixel size if noticably different from the y pixel size.')
else:
pod['pix_arcsec'] = float(np.mean(np.abs(pix_size)))
# Determine source position in cutout in ij coordinates, and size of cutout
centre_xy = pod['in_wcs'].wcs_world2pix( np.array([[ source_dict['ra'], source_dict['dec'] ]]), 0 )
pod['centre_i'], pod['centre_j'] = float(centre_xy[0][1]), float(centre_xy[0][0])
pod['box_rad'] = int( round( float(pod['cutout'].shape[0]) * 0.5 ) )
# Determine beam size in pixels; if beam size not given, then assume map is Nyquist sampled (ie, 2.355 pixels ber beam)
if isinstance(band_dict['beam_arcsec'], numbers.Number):
pod['beam_pix'] = float(band_dict['beam_arcsec']) / pod['pix_arcsec']
else:
pod['beam_pix'] = pod['pix_arcsec'] * 2.355
# Check if current source lies within bounds of map; if not, fai and return)
if pod['centre_i']<0 or pod['centre_i']>(pod['cutout'].shape)[0] or pod['centre_j']<0 or pod['centre_j']>(pod['cutout'].shape)[1]:
pod['within_bounds'] = False
if 'band_dir_inviolate' in band_dict.keys():
band_dict['band_dir'] = band_dict['band_dir_inviolate']
if pod['verbose']: print '['+pod['id']+'] Target not within bounds of map.'
else:
pod['within_bounds'] = True
# Return pod
return pod
# Define function that fits and subtracts polynomial background filter from map
def PolySub(pod, mask_semimaj_pix, mask_axial_ratio, mask_angle, poly_order=5, cutoff_sigma=2.0, instant_quit=False):
if pod['verbose']: print '['+pod['id']+'] Determining if (and how) background is significnatly variable.'
# If polynomial background subraction not wanted, immediately return everything unchanged
if instant_quit:
pod['sky_poly'] = False
return pod
# If image has pixels smaller than some limit, downsample image to improve processing time
pix_size = pod['pix_arcsec']
pix_size_limit = 2.0
if pix_size<pix_size_limit:
downsample_factor = int(np.ceil(pix_size_limit/pix_size))
else:
downsample_factor = 1
image_ds = ChrisFuncs.Downsample(pod['cutout'], downsample_factor)
# Downsample related values accordingly
mask_semimaj_pix = mask_semimaj_pix / downsample_factor
centre_i = int(round(float((0.5*pod['centre_i'])-1.0)))
centre_j = int(round(float((0.5*pod['centre_j'])-1.0)))
# Find cutoff for excluding bright pixels by sigma-clipping map
clip_value = ChrisFuncs.SigmaClip(image_ds, tolerance=0.01, sigma_thresh=3.0, median=True)
noise_value = clip_value[0]
field_value = clip_value[1]
cutoff = field_value + ( cutoff_sigma * noise_value )
# Mask all image pixels in masking region around source
image_masked = image_ds.copy()
ellipse_mask = ChrisFuncs.Photom.EllipseMask(image_ds, mask_semimaj_pix, mask_axial_ratio, mask_angle, centre_i, centre_j)
image_masked[ np.where( ellipse_mask==1 ) ] = np.nan
# Mask all image pixels identified as being high SNR
image_masked[ np.where( image_masked>cutoff ) ] = np.nan
# Use astropy to set up 2-dimensional polynomial to the image
image_masked[ np.where( np.isnan(image_masked)==True ) ] = field_value
poly_model = astropy.modeling.models.Polynomial2D(degree=poly_order)
i_coords, j_coords = np.mgrid[:image_masked.shape[0], :image_masked.shape[1]]
fitter = astropy.modeling.fitting.LevMarLSQFitter()
i_coords = i_coords.flatten()
j_coords = j_coords.flatten()
image_flattened = image_masked.flatten()
good = np.where(np.isnan(image_flattened)==False)
i_coords = i_coords[good]
j_coords = j_coords[good]
# Attempt polynomial fit; if insufficient data then skip onwards
image_flattened = image_flattened[good]
try:
fit = fitter(poly_model, i_coords, j_coords, image_flattened)
except:
if pod['verbose']: print '['+pod['id']+'] Background is not significnatly variable; leaving image unaltered.'
pod['sky_poly'] = False
return pod
# Create final polynomial filter (undoing downsampling using lorenzoriano GitHub script)
i_coords, j_coords = np.mgrid[:image_ds.shape[0], :image_ds.shape[1]]
poly_fit = fit(i_coords, j_coords)
poly_full = scipy.ndimage.interpolation.zoom(poly_fit, [ float(pod['cutout'].shape[0])/float(poly_fit.shape[0]), float(pod['cutout'].shape[1])/float(poly_fit.shape[1]) ], mode='nearest') #poly_full = congrid.congrid(poly_fit, (pod['cutout'].shape[0], pod['cutout'].shape[1]), minusone=True)
# Establish background variation before application of filter
sigma_thresh = 3.0
clip_in = ChrisFuncs.SigmaClip(pod['cutout'], tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_in = pod['cutout'][ np.where( pod['cutout']<clip_in[1] ) ]
spread_in = np.mean( np.abs( bg_in - clip_in[1] ) )
# How much reduction in background variation there was due to application of the filter
image_sub = pod['cutout'] - poly_full
clip_sub = ChrisFuncs.SigmaClip(image_sub, tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_sub = image_sub[ np.where( image_sub<clip_sub[1] ) ]
spread_sub = np.mean( np.abs( bg_sub - clip_sub[1] ) )
spread_diff = spread_in / spread_sub
# If the filter made significant difference, apply to image and return it; otherwise, just return the unaltered map
if spread_diff>1.1:
if pod['verbose']: print '['+pod['id']+'] Background is significnatly variable; removing polynomial background fit.'
pod['cutout_nopoly'] = pod['cutout'].copy()
pod['cutout'] = image_sub
pod['sky_poly'] = poly_model
else:
if pod['verbose']: print '['+pod['id']+'] Background is not significnatly variable; subtracting median background.'
pod['cutout_nopoly'] = pod['cutout'].copy()
pod['cutout'] = pod['cutout'] - np.median(poly_full)
# pod['sky_poly'] = False
return pod
# Define function that tidies up folders and paths after completed processing a source
def PathTidy(source_dict, bands_dict, kwargs_dict):
# If we're not in messy mode, delete temporary directories
if not kwargs_dict['messy']:
if os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'Cutouts',source_dict['name'])):
shutil.rmtree(os.path.join(kwargs_dict['temp_dir_path'],'Cutouts',source_dict['name']))
if os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'AstroMagic')):
shutil.rmtree(os.path.join(kwargs_dict['temp_dir_path'],'AstroMagic'))
# Set band directories to standard, not whatever temporary cutout directories may have been used for this source
for band in bands_dict.keys():
if bands_dict[band]==None:
continue
bands_dict[band]['band_dir'] = bands_dict[band]['band_dir_inviolate']
# Define function that predicts time until completion, and produces plot thereof
def TimeEst(time_list, total, output_dir_path, source_dict, kwargs_dict):
# Add current timing to list of timings, and pass to time estimation function to get predicted completion time
time_list.append(time.time())
time_est = ChrisFuncs.TimeEst(time_list, total, plot=True)
time_remaining = time_est[0]
# Write estimated completion time to text file
time_file = open( os.path.join(output_dir_path,'Estimated_Completion_Time.txt'), 'w')
time_file.write(time_remaining)
time_file.close()
# Make plot showing timings so far, and predicted time remaining
time_fig = time_est[1]
time_fig.savefig( os.path.join(output_dir_path,'Estimated_Completion_Time.png'), dpi=150 )
time_fig.clf()
plt.close('all')
# If vorbose, report estimated time until completion to user
if kwargs_dict['verbose']: print '['+source_dict['name']+'] CAAPR estimated completion at: '+time_remaining+'.'
| FilePrelim | identifier_name |
CAAPR_Pipeline.py | # Import smorgasbord
import os
import sys
sys.path.append( str( os.path.join( os.path.split( os.path.dirname(os.path.abspath(__file__)) )[0], 'CAAPR', 'CAAPR_AstroMagic', 'PTS') ) )
import gc
import pdb
import time
import re
import copy
import warnings
import numbers
import random
import shutil
import numpy as np
import scipy.ndimage
import multiprocessing as mp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import astropy.io.fits
import ChrisFuncs
import ChrisFuncs.Photom
import ChrisFuncs.FromGitHub
import CAAPR
# The main pipeline; the cutout-production, aperture-fitting, and actual photometry parts of the CAAPR process are called in here, as sub-pipelines
def PipelineMain(source_dict, bands_dict, kwargs_dict):
# Start timer, and check that the user has actually asked CAAPR to do something; if they haven't asked CAAPR to do anything at all, tell them that they're being a bit odd!
source_start = time.time()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Processing target '+source_dict['name']+'.'
if not kwargs_dict['fit_apertures'] and not kwargs_dict['do_photom']:
if not kwargs_dict['save_images']:
raise ValueError("None of fit_apertures, do_photom, and save_images is set to True. ")
print('Skipping photometry and aperture fitting, but will save processed images.')
# Check if any data actually exists for this source
if SourcePrelim(source_dict, bands_dict, kwargs_dict)==False:
return
# Loop over bands for initial processing
for band in bands_dict.keys():
# Do basic initial handling of band parameters
bands_dict[band] = BandInitiate(bands_dict[band])
# Functiont hat checks if user has requested a cutout; and, if so, produces it
bands_dict[band] = CAAPR.CAAPR_IO.Cutout(source_dict, bands_dict[band], kwargs_dict)
# Function that check sif it is possible to trim padding of no-coverage from edge of map (if user hasn't specificed a particular cutout be made)
bands_dict[band] = CAAPR.CAAPR_IO.UnpaddingCutout(source_dict, bands_dict[band], kwargs_dict)
# Check if star-subtraction is requested for any band; if so, commence catalogue pre-fetching
CAAPR.CAAPR_AstroMagic.PreCatalogue(source_dict, bands_dict, kwargs_dict)
# If aperture file not provided, commence aperture-fitting sub-pipeline
if kwargs_dict['fit_apertures']==True:
# Process sources inside while loop, to catch 'missed' bands
aperture_attempts = 0
while aperture_attempts!='Success':
# In standard operation, process multiple sources in parallel
aperture_start = time.time()
aperture_output_list = []
if kwargs_dict['parallel']==True:
bands_dict_keys = bands_dict.keys()
random.shuffle(bands_dict_keys)
pool = mp.Pool(processes=kwargs_dict['n_proc'])
for band in bands_dict_keys:
aperture_output_list.append( pool.apply_async( CAAPR.CAAPR_Aperture.SubpipelineAperture, args=(source_dict, bands_dict[band], kwargs_dict) ) )
pool.close()
pool.join()
del(pool)
aperture_list = [output.get() for output in aperture_output_list if output.successful()==True]
aperture_list = [aperture for aperture in aperture_list if aperture!=None]
# If parallelisation is disabled, process sources one-at-a-time
elif kwargs_dict['parallel']==False:
for band in bands_dict.keys():
aperture_output_list.append( CAAPR.CAAPR_Aperture.SubpipelineAperture(source_dict, bands_dict[band], kwargs_dict) )
aperture_list = [output for output in aperture_output_list if output!=None]
# Check that all photometry completed
aperture_attempts = CAAPR.CAAPR_Aperture.ApertureCheck(aperture_attempts, aperture_output_list, source_dict, bands_dict, kwargs_dict)
# Combine all fitted apertures to produce amalgam aperture
aperture_combined = CAAPR.CAAPR_Aperture.CombineAperture(aperture_list, source_dict, kwargs_dict)
# Record aperture properties to file
CAAPR.CAAPR_IO.RecordAperture(aperture_combined, source_dict, kwargs_dict)
# Prepare thumbnail images for bands excluded from aperture fitting
CAAPR.CAAPR_Aperture.ExcludedThumb(source_dict, bands_dict, kwargs_dict, aperture_list, aperture_combined)
# Create grid of thumbnail images
CAAPR.CAAPR_IO.ApertureThumbGrid(source_dict, bands_dict, kwargs_dict, aperture_list, aperture_combined)
# Report time taken to fit apertures, and tidy up garbage
gc.collect()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Time taken performing aperture fitting: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-aperture_start,4))+' seconds.'
# Commence actual photometry sub-pipeline
if kwargs_dict['do_photom'] or kwargs_dict['save_images']:
# Handle problem where the user hasn't provided an aperture file, but also hasn't told CAAPR to fit its own apertures.
if kwargs_dict['aperture_table_path']==False and kwargs_dict['fit_apertures']==False:
raise Exception('User has requested no aperture-fitting, and no photometry!')
# Process sources inside while loop, to catch 'missed' bands
photom_attempts = 0
while photom_attempts!='Complete':
# In standard operation, process multiple sources in parallel
photom_start = time.time()
photom_output_list = []
if kwargs_dict['parallel']==True:
bands_dict_keys = bands_dict.keys()
random.shuffle(bands_dict_keys)
pool = mp.Pool(processes=kwargs_dict['n_proc'])
for band in bands_dict_keys:
photom_output_list.append( pool.apply_async( CAAPR.CAAPR_Photom.SubpipelinePhotom, args=(source_dict, bands_dict[band], kwargs_dict) ) )
pool.close()
pool.join()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Gathering parallel threads.'
photom_output_list = [output.get() for output in photom_output_list if output.successful()==True]
photom_list = [photom for photom in photom_output_list if photom!=None]
# If parallelisation is disabled, process sources one-at-a-time
elif kwargs_dict['parallel']==False:
for band in bands_dict.keys():
photom_output_list.append( CAAPR.CAAPR_Photom.SubpipelinePhotom(source_dict, bands_dict[band], kwargs_dict) )
photom_list = [photom for photom in photom_output_list if photom!=None]
# Shortcut if no photometry is done
if not kwargs_dict['do_photom']:
photom_attempts = 'Complete'
gc.collect()
return
# Check that all photometry completed
photom_attempts, photom_output_list = CAAPR.CAAPR_Photom.PhotomCheck(photom_attempts, photom_output_list, source_dict, bands_dict, kwargs_dict)
# Record photometry results to file
CAAPR.CAAPR_IO.RecordPhotom(photom_list, source_dict, bands_dict, kwargs_dict)
# Prepare thumbnail images for bands excluded from photometry
CAAPR.CAAPR_Photom.ExcludedThumb(source_dict, bands_dict, kwargs_dict)
# Create grid of thumbnail images
CAAPR.CAAPR_IO.PhotomThumbGrid(source_dict, bands_dict, kwargs_dict)
# Report time taken to do photometry, and tidy up
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Time taken performing actual photometry: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-photom_start,4))+' seconds.'
# Tidy up temporary files and paths
bands_dict = PathTidy(source_dict, bands_dict, kwargs_dict)
# Report time taken for source, and tidy up garbage
gc.collect()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Total time taken for souce: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-source_start,4))+' seconds.'
if kwargs_dict['thumbnails']==True and kwargs_dict['messy']==False:
[os.remove(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',processed_map)) for processed_map in os.listdir(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps')) if '.fits' in processed_map]
# Define function to check if data actually exists for any band for this source
def SourcePrelim(source_dict, bands_dict, kwargs_dict):
# Check that any of the bands actually have data for this source
kwargs_dict_copy = copy.deepcopy(kwargs_dict)
kwargs_dict_copy['verbose'] = False
bands_check = []
for band in bands_dict.keys():
source_id = source_dict['name']+'_'+bands_dict[band]['band_name']
in_fitspath, file_found = CAAPR.CAAPR_Pipeline.FilePrelim(source_dict, bands_dict[band], kwargs_dict_copy)
bands_check.append(file_found)
# Report to user if no data found
if True not in bands_check:
print '['+source_id+'] No data found in target directory for current source.'
# Make null entries in tables, as necessary
if kwargs_dict['fit_apertures']==True:
null_aperture_combined = [np.NaN, np.NaN, np.NaN, np.NaN]
CAAPR.CAAPR_IO.RecordAperture(null_aperture_combined, source_dict, kwargs_dict)
if kwargs_dict['do_photom']==True:
CAAPR.CAAPR_IO.RecordPhotom([], source_dict, bands_dict, kwargs_dict)
# Return result
if True not in bands_check:
return False
elif True in bands_check:
return True
# Define function that does basic initial handling of band parameters
def BandInitiate(band_dict):
# Make sure band has content
if band_dict==None:
return band_dict
# Parse band cutout request, converting string to boolean if necessary
if band_dict['make_cutout']=='True':
band_dict['make_cutout']=True
elif band_dict['make_cutout']=='False':
band_dict['make_cutout']=False
else:
try:
band_dict['make_cutout'] = float(band_dict['make_cutout'])
except:
raise Exception('Cutout request not understood; should either be False, or width of cutout in arcseconds.')
# Reset band directory to inviolate value, to purge any holdovers from previous source
band_dict['band_dir'] = band_dict['band_dir_inviolate']
# Return band dict
return band_dict
# Define function that performs preimilary checks of file type and location
def FilePrelim(source_dict, band_dict, kwargs_dict):
# Determine whether the user is specificing a directroy full of FITS files in this band (in which case use standardised filename format), or just a single FITS file
try:
if os.path.isdir(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'], source_dict['name']+'_'+band_dict['band_name'] )
elif os.path.isfile(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'] )
except:
pdb.set_trace()
# Work out whether the file extension for FITS file in question is .fits or .fits.gz
file_found = False
try:
if os.path.exists(in_fitspath+'.fits'):
in_fitspath = in_fitspath+'.fits'
file_found = True
elif os.path.exists(in_fitspath+'.fits.gz'):
in_fitspath = in_fitspath+'.fits.gz'
file_found = True
except:
raise Exception('Path provided for band '+str(band_dict['band_name'])+' refers to neither a file nor a folder.')
# Return file values
return in_fitspath, file_found
# Initiate the pod (Photometry Organisation Dictionary)
def PodInitiate(in_fitspath, source_dict, band_dict, kwargs_dict):
source_id = source_dict['name']+'_'+band_dict['band_name']
if kwargs_dict['verbose']: print '['+source_id+'] Reading in FITS data.'
# Read in FITS file in question
in_fitsdata = astropy.io.fits.open(in_fitspath)
in_image = in_fitsdata[0].data
in_header = in_fitsdata[0].header
in_fitsdata.close()
in_wcs = astropy.wcs.WCS(in_header)
in_fitspath_size = float(os.stat(in_fitspath).st_size)
# Create the pod (Photometry Organisation Dictionary), which will bundle all the photometry data for this source & band into one dictionary to be passed between functions
pod = {'in_fitspath':in_fitspath,
'in_image':in_image,
'in_header':in_header,
'in_wcs':in_wcs,
'cutout':in_image.copy(),
'output_dir_path':kwargs_dict['output_dir_path'],
'temp_dir_path':kwargs_dict['temp_dir_path'],
'in_fitspath_size':in_fitspath_size,
'id':source_id,
'verbose':kwargs_dict['verbose']}
# Return pod
return pod
# Define function that determines preliminary map values
def MapPrelim(pod, source_dict, band_dict, verbose=False):
if pod['verbose']: print '['+pod['id']+'] Determining properties of map.'
# Check if x & y pixel sizes are meaningfully different. If so, panic; else, treat as same
pix_size = 3600.0 * pod['in_wcs'].wcs.cdelt
if float(abs(pix_size.max()))/float(abs(pix_size.min()))>(1+1E-3):
raise Exception('The x pixel size if noticably different from the y pixel size.')
else:
pod['pix_arcsec'] = float(np.mean(np.abs(pix_size)))
# Determine source position in cutout in ij coordinates, and size of cutout
centre_xy = pod['in_wcs'].wcs_world2pix( np.array([[ source_dict['ra'], source_dict['dec'] ]]), 0 )
pod['centre_i'], pod['centre_j'] = float(centre_xy[0][1]), float(centre_xy[0][0])
pod['box_rad'] = int( round( float(pod['cutout'].shape[0]) * 0.5 ) )
# Determine beam size in pixels; if beam size not given, then assume map is Nyquist sampled (ie, 2.355 pixels ber beam)
if isinstance(band_dict['beam_arcsec'], numbers.Number):
pod['beam_pix'] = float(band_dict['beam_arcsec']) / pod['pix_arcsec']
else:
pod['beam_pix'] = pod['pix_arcsec'] * 2.355
# Check if current source lies within bounds of map; if not, fai and return)
if pod['centre_i']<0 or pod['centre_i']>(pod['cutout'].shape)[0] or pod['centre_j']<0 or pod['centre_j']>(pod['cutout'].shape)[1]:
pod['within_bounds'] = False
if 'band_dir_inviolate' in band_dict.keys():
band_dict['band_dir'] = band_dict['band_dir_inviolate']
if pod['verbose']: print '['+pod['id']+'] Target not within bounds of map.'
else:
pod['within_bounds'] = True
# Return pod
return pod
# Define function that fits and subtracts polynomial background filter from map
def PolySub(pod, mask_semimaj_pix, mask_axial_ratio, mask_angle, poly_order=5, cutoff_sigma=2.0, instant_quit=False):
if pod['verbose']: print '['+pod['id']+'] Determining if (and how) background is significnatly variable.'
# If polynomial background subraction not wanted, immediately return everything unchanged
if instant_quit:
pod['sky_poly'] = False
return pod
# If image has pixels smaller than some limit, downsample image to improve processing time
pix_size = pod['pix_arcsec']
pix_size_limit = 2.0
if pix_size<pix_size_limit:
downsample_factor = int(np.ceil(pix_size_limit/pix_size))
else:
downsample_factor = 1
image_ds = ChrisFuncs.Downsample(pod['cutout'], downsample_factor)
# Downsample related values accordingly
mask_semimaj_pix = mask_semimaj_pix / downsample_factor
centre_i = int(round(float((0.5*pod['centre_i'])-1.0)))
centre_j = int(round(float((0.5*pod['centre_j'])-1.0)))
# Find cutoff for excluding bright pixels by sigma-clipping map
clip_value = ChrisFuncs.SigmaClip(image_ds, tolerance=0.01, sigma_thresh=3.0, median=True)
noise_value = clip_value[0]
field_value = clip_value[1]
cutoff = field_value + ( cutoff_sigma * noise_value )
# Mask all image pixels in masking region around source
image_masked = image_ds.copy()
ellipse_mask = ChrisFuncs.Photom.EllipseMask(image_ds, mask_semimaj_pix, mask_axial_ratio, mask_angle, centre_i, centre_j)
image_masked[ np.where( ellipse_mask==1 ) ] = np.nan
# Mask all image pixels identified as being high SNR
image_masked[ np.where( image_masked>cutoff ) ] = np.nan
# Use astropy to set up 2-dimensional polynomial to the image
image_masked[ np.where( np.isnan(image_masked)==True ) ] = field_value
poly_model = astropy.modeling.models.Polynomial2D(degree=poly_order)
i_coords, j_coords = np.mgrid[:image_masked.shape[0], :image_masked.shape[1]]
fitter = astropy.modeling.fitting.LevMarLSQFitter()
i_coords = i_coords.flatten()
j_coords = j_coords.flatten()
image_flattened = image_masked.flatten()
good = np.where(np.isnan(image_flattened)==False)
i_coords = i_coords[good]
j_coords = j_coords[good]
# Attempt polynomial fit; if insufficient data then skip onwards
image_flattened = image_flattened[good]
try:
fit = fitter(poly_model, i_coords, j_coords, image_flattened)
except:
if pod['verbose']: print '['+pod['id']+'] Background is not significnatly variable; leaving image unaltered.'
pod['sky_poly'] = False
return pod
# Create final polynomial filter (undoing downsampling using lorenzoriano GitHub script)
i_coords, j_coords = np.mgrid[:image_ds.shape[0], :image_ds.shape[1]]
poly_fit = fit(i_coords, j_coords)
poly_full = scipy.ndimage.interpolation.zoom(poly_fit, [ float(pod['cutout'].shape[0])/float(poly_fit.shape[0]), float(pod['cutout'].shape[1])/float(poly_fit.shape[1]) ], mode='nearest') #poly_full = congrid.congrid(poly_fit, (pod['cutout'].shape[0], pod['cutout'].shape[1]), minusone=True)
# Establish background variation before application of filter
sigma_thresh = 3.0
clip_in = ChrisFuncs.SigmaClip(pod['cutout'], tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_in = pod['cutout'][ np.where( pod['cutout']<clip_in[1] ) ]
| clip_sub = ChrisFuncs.SigmaClip(image_sub, tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_sub = image_sub[ np.where( image_sub<clip_sub[1] ) ]
spread_sub = np.mean( np.abs( bg_sub - clip_sub[1] ) )
spread_diff = spread_in / spread_sub
# If the filter made significant difference, apply to image and return it; otherwise, just return the unaltered map
if spread_diff>1.1:
if pod['verbose']: print '['+pod['id']+'] Background is significnatly variable; removing polynomial background fit.'
pod['cutout_nopoly'] = pod['cutout'].copy()
pod['cutout'] = image_sub
pod['sky_poly'] = poly_model
else:
if pod['verbose']: print '['+pod['id']+'] Background is not significnatly variable; subtracting median background.'
pod['cutout_nopoly'] = pod['cutout'].copy()
pod['cutout'] = pod['cutout'] - np.median(poly_full)
# pod['sky_poly'] = False
return pod
# Define function that tidies up folders and paths after completed processing a source
def PathTidy(source_dict, bands_dict, kwargs_dict):
# If we're not in messy mode, delete temporary directories
if not kwargs_dict['messy']:
if os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'Cutouts',source_dict['name'])):
shutil.rmtree(os.path.join(kwargs_dict['temp_dir_path'],'Cutouts',source_dict['name']))
if os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'AstroMagic')):
shutil.rmtree(os.path.join(kwargs_dict['temp_dir_path'],'AstroMagic'))
# Set band directories to standard, not whatever temporary cutout directories may have been used for this source
for band in bands_dict.keys():
if bands_dict[band]==None:
continue
bands_dict[band]['band_dir'] = bands_dict[band]['band_dir_inviolate']
# Define function that predicts time until completion, and produces plot thereof
def TimeEst(time_list, total, output_dir_path, source_dict, kwargs_dict):
# Add current timing to list of timings, and pass to time estimation function to get predicted completion time
time_list.append(time.time())
time_est = ChrisFuncs.TimeEst(time_list, total, plot=True)
time_remaining = time_est[0]
# Write estimated completion time to text file
time_file = open( os.path.join(output_dir_path,'Estimated_Completion_Time.txt'), 'w')
time_file.write(time_remaining)
time_file.close()
# Make plot showing timings so far, and predicted time remaining
time_fig = time_est[1]
time_fig.savefig( os.path.join(output_dir_path,'Estimated_Completion_Time.png'), dpi=150 )
time_fig.clf()
plt.close('all')
# If vorbose, report estimated time until completion to user
if kwargs_dict['verbose']: print '['+source_dict['name']+'] CAAPR estimated completion at: '+time_remaining+'.' | spread_in = np.mean( np.abs( bg_in - clip_in[1] ) )
# How much reduction in background variation there was due to application of the filter
image_sub = pod['cutout'] - poly_full
| random_line_split |
CAAPR_Pipeline.py | # Import smorgasbord
import os
import sys
sys.path.append( str( os.path.join( os.path.split( os.path.dirname(os.path.abspath(__file__)) )[0], 'CAAPR', 'CAAPR_AstroMagic', 'PTS') ) )
import gc
import pdb
import time
import re
import copy
import warnings
import numbers
import random
import shutil
import numpy as np
import scipy.ndimage
import multiprocessing as mp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import astropy.io.fits
import ChrisFuncs
import ChrisFuncs.Photom
import ChrisFuncs.FromGitHub
import CAAPR
# The main pipeline; the cutout-production, aperture-fitting, and actual photometry parts of the CAAPR process are called in here, as sub-pipelines
def PipelineMain(source_dict, bands_dict, kwargs_dict):
# Start timer, and check that the user has actually asked CAAPR to do something; if they haven't asked CAAPR to do anything at all, tell them that they're being a bit odd!
source_start = time.time()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Processing target '+source_dict['name']+'.'
if not kwargs_dict['fit_apertures'] and not kwargs_dict['do_photom']:
if not kwargs_dict['save_images']:
raise ValueError("None of fit_apertures, do_photom, and save_images is set to True. ")
print('Skipping photometry and aperture fitting, but will save processed images.')
# Check if any data actually exists for this source
if SourcePrelim(source_dict, bands_dict, kwargs_dict)==False:
return
# Loop over bands for initial processing
for band in bands_dict.keys():
# Do basic initial handling of band parameters
bands_dict[band] = BandInitiate(bands_dict[band])
# Functiont hat checks if user has requested a cutout; and, if so, produces it
bands_dict[band] = CAAPR.CAAPR_IO.Cutout(source_dict, bands_dict[band], kwargs_dict)
# Function that check sif it is possible to trim padding of no-coverage from edge of map (if user hasn't specificed a particular cutout be made)
bands_dict[band] = CAAPR.CAAPR_IO.UnpaddingCutout(source_dict, bands_dict[band], kwargs_dict)
# Check if star-subtraction is requested for any band; if so, commence catalogue pre-fetching
CAAPR.CAAPR_AstroMagic.PreCatalogue(source_dict, bands_dict, kwargs_dict)
# If aperture file not provided, commence aperture-fitting sub-pipeline
if kwargs_dict['fit_apertures']==True:
# Process sources inside while loop, to catch 'missed' bands
aperture_attempts = 0
while aperture_attempts!='Success':
# In standard operation, process multiple sources in parallel
aperture_start = time.time()
aperture_output_list = []
if kwargs_dict['parallel']==True:
bands_dict_keys = bands_dict.keys()
random.shuffle(bands_dict_keys)
pool = mp.Pool(processes=kwargs_dict['n_proc'])
for band in bands_dict_keys:
aperture_output_list.append( pool.apply_async( CAAPR.CAAPR_Aperture.SubpipelineAperture, args=(source_dict, bands_dict[band], kwargs_dict) ) )
pool.close()
pool.join()
del(pool)
aperture_list = [output.get() for output in aperture_output_list if output.successful()==True]
aperture_list = [aperture for aperture in aperture_list if aperture!=None]
# If parallelisation is disabled, process sources one-at-a-time
elif kwargs_dict['parallel']==False:
for band in bands_dict.keys():
aperture_output_list.append( CAAPR.CAAPR_Aperture.SubpipelineAperture(source_dict, bands_dict[band], kwargs_dict) )
aperture_list = [output for output in aperture_output_list if output!=None]
# Check that all photometry completed
aperture_attempts = CAAPR.CAAPR_Aperture.ApertureCheck(aperture_attempts, aperture_output_list, source_dict, bands_dict, kwargs_dict)
# Combine all fitted apertures to produce amalgam aperture
aperture_combined = CAAPR.CAAPR_Aperture.CombineAperture(aperture_list, source_dict, kwargs_dict)
# Record aperture properties to file
CAAPR.CAAPR_IO.RecordAperture(aperture_combined, source_dict, kwargs_dict)
# Prepare thumbnail images for bands excluded from aperture fitting
CAAPR.CAAPR_Aperture.ExcludedThumb(source_dict, bands_dict, kwargs_dict, aperture_list, aperture_combined)
# Create grid of thumbnail images
CAAPR.CAAPR_IO.ApertureThumbGrid(source_dict, bands_dict, kwargs_dict, aperture_list, aperture_combined)
# Report time taken to fit apertures, and tidy up garbage
gc.collect()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Time taken performing aperture fitting: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-aperture_start,4))+' seconds.'
# Commence actual photometry sub-pipeline
if kwargs_dict['do_photom'] or kwargs_dict['save_images']:
# Handle problem where the user hasn't provided an aperture file, but also hasn't told CAAPR to fit its own apertures.
if kwargs_dict['aperture_table_path']==False and kwargs_dict['fit_apertures']==False:
raise Exception('User has requested no aperture-fitting, and no photometry!')
# Process sources inside while loop, to catch 'missed' bands
photom_attempts = 0
while photom_attempts!='Complete':
# In standard operation, process multiple sources in parallel
photom_start = time.time()
photom_output_list = []
if kwargs_dict['parallel']==True:
bands_dict_keys = bands_dict.keys()
random.shuffle(bands_dict_keys)
pool = mp.Pool(processes=kwargs_dict['n_proc'])
for band in bands_dict_keys:
photom_output_list.append( pool.apply_async( CAAPR.CAAPR_Photom.SubpipelinePhotom, args=(source_dict, bands_dict[band], kwargs_dict) ) )
pool.close()
pool.join()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Gathering parallel threads.'
photom_output_list = [output.get() for output in photom_output_list if output.successful()==True]
photom_list = [photom for photom in photom_output_list if photom!=None]
# If parallelisation is disabled, process sources one-at-a-time
elif kwargs_dict['parallel']==False:
for band in bands_dict.keys():
photom_output_list.append( CAAPR.CAAPR_Photom.SubpipelinePhotom(source_dict, bands_dict[band], kwargs_dict) )
photom_list = [photom for photom in photom_output_list if photom!=None]
# Shortcut if no photometry is done
if not kwargs_dict['do_photom']:
photom_attempts = 'Complete'
gc.collect()
return
# Check that all photometry completed
photom_attempts, photom_output_list = CAAPR.CAAPR_Photom.PhotomCheck(photom_attempts, photom_output_list, source_dict, bands_dict, kwargs_dict)
# Record photometry results to file
CAAPR.CAAPR_IO.RecordPhotom(photom_list, source_dict, bands_dict, kwargs_dict)
# Prepare thumbnail images for bands excluded from photometry
CAAPR.CAAPR_Photom.ExcludedThumb(source_dict, bands_dict, kwargs_dict)
# Create grid of thumbnail images
CAAPR.CAAPR_IO.PhotomThumbGrid(source_dict, bands_dict, kwargs_dict)
# Report time taken to do photometry, and tidy up
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Time taken performing actual photometry: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-photom_start,4))+' seconds.'
# Tidy up temporary files and paths
bands_dict = PathTidy(source_dict, bands_dict, kwargs_dict)
# Report time taken for source, and tidy up garbage
gc.collect()
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Total time taken for souce: '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(time.time()-source_start,4))+' seconds.'
if kwargs_dict['thumbnails']==True and kwargs_dict['messy']==False:
[os.remove(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',processed_map)) for processed_map in os.listdir(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps')) if '.fits' in processed_map]
# Define function to check if data actually exists for any band for this source
def SourcePrelim(source_dict, bands_dict, kwargs_dict):
# Check that any of the bands actually have data for this source
kwargs_dict_copy = copy.deepcopy(kwargs_dict)
kwargs_dict_copy['verbose'] = False
bands_check = []
for band in bands_dict.keys():
source_id = source_dict['name']+'_'+bands_dict[band]['band_name']
in_fitspath, file_found = CAAPR.CAAPR_Pipeline.FilePrelim(source_dict, bands_dict[band], kwargs_dict_copy)
bands_check.append(file_found)
# Report to user if no data found
if True not in bands_check:
|
# Return result
if True not in bands_check:
return False
elif True in bands_check:
return True
# Define function that does basic initial handling of band parameters
def BandInitiate(band_dict):
# Make sure band has content
if band_dict==None:
return band_dict
# Parse band cutout request, converting string to boolean if necessary
if band_dict['make_cutout']=='True':
band_dict['make_cutout']=True
elif band_dict['make_cutout']=='False':
band_dict['make_cutout']=False
else:
try:
band_dict['make_cutout'] = float(band_dict['make_cutout'])
except:
raise Exception('Cutout request not understood; should either be False, or width of cutout in arcseconds.')
# Reset band directory to inviolate value, to purge any holdovers from previous source
band_dict['band_dir'] = band_dict['band_dir_inviolate']
# Return band dict
return band_dict
# Define function that performs preimilary checks of file type and location
def FilePrelim(source_dict, band_dict, kwargs_dict):
# Determine whether the user is specificing a directroy full of FITS files in this band (in which case use standardised filename format), or just a single FITS file
try:
if os.path.isdir(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'], source_dict['name']+'_'+band_dict['band_name'] )
elif os.path.isfile(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'] )
except:
pdb.set_trace()
# Work out whether the file extension for FITS file in question is .fits or .fits.gz
file_found = False
try:
if os.path.exists(in_fitspath+'.fits'):
in_fitspath = in_fitspath+'.fits'
file_found = True
elif os.path.exists(in_fitspath+'.fits.gz'):
in_fitspath = in_fitspath+'.fits.gz'
file_found = True
except:
raise Exception('Path provided for band '+str(band_dict['band_name'])+' refers to neither a file nor a folder.')
# Return file values
return in_fitspath, file_found
# Initiate the pod (Photometry Organisation Dictionary)
def PodInitiate(in_fitspath, source_dict, band_dict, kwargs_dict):
source_id = source_dict['name']+'_'+band_dict['band_name']
if kwargs_dict['verbose']: print '['+source_id+'] Reading in FITS data.'
# Read in FITS file in question
in_fitsdata = astropy.io.fits.open(in_fitspath)
in_image = in_fitsdata[0].data
in_header = in_fitsdata[0].header
in_fitsdata.close()
in_wcs = astropy.wcs.WCS(in_header)
in_fitspath_size = float(os.stat(in_fitspath).st_size)
# Create the pod (Photometry Organisation Dictionary), which will bundle all the photometry data for this source & band into one dictionary to be passed between functions
pod = {'in_fitspath':in_fitspath,
'in_image':in_image,
'in_header':in_header,
'in_wcs':in_wcs,
'cutout':in_image.copy(),
'output_dir_path':kwargs_dict['output_dir_path'],
'temp_dir_path':kwargs_dict['temp_dir_path'],
'in_fitspath_size':in_fitspath_size,
'id':source_id,
'verbose':kwargs_dict['verbose']}
# Return pod
return pod
# Define function that determines preliminary map values
def MapPrelim(pod, source_dict, band_dict, verbose=False):
if pod['verbose']: print '['+pod['id']+'] Determining properties of map.'
# Check if x & y pixel sizes are meaningfully different. If so, panic; else, treat as same
pix_size = 3600.0 * pod['in_wcs'].wcs.cdelt
if float(abs(pix_size.max()))/float(abs(pix_size.min()))>(1+1E-3):
raise Exception('The x pixel size if noticably different from the y pixel size.')
else:
pod['pix_arcsec'] = float(np.mean(np.abs(pix_size)))
# Determine source position in cutout in ij coordinates, and size of cutout
centre_xy = pod['in_wcs'].wcs_world2pix( np.array([[ source_dict['ra'], source_dict['dec'] ]]), 0 )
pod['centre_i'], pod['centre_j'] = float(centre_xy[0][1]), float(centre_xy[0][0])
pod['box_rad'] = int( round( float(pod['cutout'].shape[0]) * 0.5 ) )
# Determine beam size in pixels; if beam size not given, then assume map is Nyquist sampled (ie, 2.355 pixels ber beam)
if isinstance(band_dict['beam_arcsec'], numbers.Number):
pod['beam_pix'] = float(band_dict['beam_arcsec']) / pod['pix_arcsec']
else:
pod['beam_pix'] = pod['pix_arcsec'] * 2.355
# Check if current source lies within bounds of map; if not, fai and return)
if pod['centre_i']<0 or pod['centre_i']>(pod['cutout'].shape)[0] or pod['centre_j']<0 or pod['centre_j']>(pod['cutout'].shape)[1]:
pod['within_bounds'] = False
if 'band_dir_inviolate' in band_dict.keys():
band_dict['band_dir'] = band_dict['band_dir_inviolate']
if pod['verbose']: print '['+pod['id']+'] Target not within bounds of map.'
else:
pod['within_bounds'] = True
# Return pod
return pod
# Define function that fits and subtracts polynomial background filter from map
def PolySub(pod, mask_semimaj_pix, mask_axial_ratio, mask_angle, poly_order=5, cutoff_sigma=2.0, instant_quit=False):
if pod['verbose']: print '['+pod['id']+'] Determining if (and how) background is significnatly variable.'
# If polynomial background subraction not wanted, immediately return everything unchanged
if instant_quit:
pod['sky_poly'] = False
return pod
# If image has pixels smaller than some limit, downsample image to improve processing time
pix_size = pod['pix_arcsec']
pix_size_limit = 2.0
if pix_size<pix_size_limit:
downsample_factor = int(np.ceil(pix_size_limit/pix_size))
else:
downsample_factor = 1
image_ds = ChrisFuncs.Downsample(pod['cutout'], downsample_factor)
# Downsample related values accordingly
mask_semimaj_pix = mask_semimaj_pix / downsample_factor
centre_i = int(round(float((0.5*pod['centre_i'])-1.0)))
centre_j = int(round(float((0.5*pod['centre_j'])-1.0)))
# Find cutoff for excluding bright pixels by sigma-clipping map
clip_value = ChrisFuncs.SigmaClip(image_ds, tolerance=0.01, sigma_thresh=3.0, median=True)
noise_value = clip_value[0]
field_value = clip_value[1]
cutoff = field_value + ( cutoff_sigma * noise_value )
# Mask all image pixels in masking region around source
image_masked = image_ds.copy()
ellipse_mask = ChrisFuncs.Photom.EllipseMask(image_ds, mask_semimaj_pix, mask_axial_ratio, mask_angle, centre_i, centre_j)
image_masked[ np.where( ellipse_mask==1 ) ] = np.nan
# Mask all image pixels identified as being high SNR
image_masked[ np.where( image_masked>cutoff ) ] = np.nan
# Use astropy to set up 2-dimensional polynomial to the image
image_masked[ np.where( np.isnan(image_masked)==True ) ] = field_value
poly_model = astropy.modeling.models.Polynomial2D(degree=poly_order)
i_coords, j_coords = np.mgrid[:image_masked.shape[0], :image_masked.shape[1]]
fitter = astropy.modeling.fitting.LevMarLSQFitter()
i_coords = i_coords.flatten()
j_coords = j_coords.flatten()
image_flattened = image_masked.flatten()
good = np.where(np.isnan(image_flattened)==False)
i_coords = i_coords[good]
j_coords = j_coords[good]
# Attempt polynomial fit; if insufficient data then skip onwards
image_flattened = image_flattened[good]
try:
fit = fitter(poly_model, i_coords, j_coords, image_flattened)
except:
if pod['verbose']: print '['+pod['id']+'] Background is not significnatly variable; leaving image unaltered.'
pod['sky_poly'] = False
return pod
# Create final polynomial filter (undoing downsampling using lorenzoriano GitHub script)
i_coords, j_coords = np.mgrid[:image_ds.shape[0], :image_ds.shape[1]]
poly_fit = fit(i_coords, j_coords)
poly_full = scipy.ndimage.interpolation.zoom(poly_fit, [ float(pod['cutout'].shape[0])/float(poly_fit.shape[0]), float(pod['cutout'].shape[1])/float(poly_fit.shape[1]) ], mode='nearest') #poly_full = congrid.congrid(poly_fit, (pod['cutout'].shape[0], pod['cutout'].shape[1]), minusone=True)
# Establish background variation before application of filter
sigma_thresh = 3.0
clip_in = ChrisFuncs.SigmaClip(pod['cutout'], tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_in = pod['cutout'][ np.where( pod['cutout']<clip_in[1] ) ]
spread_in = np.mean( np.abs( bg_in - clip_in[1] ) )
# How much reduction in background variation there was due to application of the filter
image_sub = pod['cutout'] - poly_full
clip_sub = ChrisFuncs.SigmaClip(image_sub, tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_sub = image_sub[ np.where( image_sub<clip_sub[1] ) ]
spread_sub = np.mean( np.abs( bg_sub - clip_sub[1] ) )
spread_diff = spread_in / spread_sub
# If the filter made significant difference, apply to image and return it; otherwise, just return the unaltered map
if spread_diff>1.1:
if pod['verbose']: print '['+pod['id']+'] Background is significnatly variable; removing polynomial background fit.'
pod['cutout_nopoly'] = pod['cutout'].copy()
pod['cutout'] = image_sub
pod['sky_poly'] = poly_model
else:
if pod['verbose']: print '['+pod['id']+'] Background is not significnatly variable; subtracting median background.'
pod['cutout_nopoly'] = pod['cutout'].copy()
pod['cutout'] = pod['cutout'] - np.median(poly_full)
# pod['sky_poly'] = False
return pod
# Define function that tidies up folders and paths after completed processing a source
def PathTidy(source_dict, bands_dict, kwargs_dict):
# If we're not in messy mode, delete temporary directories
if not kwargs_dict['messy']:
if os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'Cutouts',source_dict['name'])):
shutil.rmtree(os.path.join(kwargs_dict['temp_dir_path'],'Cutouts',source_dict['name']))
if os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'AstroMagic')):
shutil.rmtree(os.path.join(kwargs_dict['temp_dir_path'],'AstroMagic'))
# Set band directories to standard, not whatever temporary cutout directories may have been used for this source
for band in bands_dict.keys():
if bands_dict[band]==None:
continue
bands_dict[band]['band_dir'] = bands_dict[band]['band_dir_inviolate']
# Define function that predicts time until completion, and produces plot thereof
def TimeEst(time_list, total, output_dir_path, source_dict, kwargs_dict):
# Add current timing to list of timings, and pass to time estimation function to get predicted completion time
time_list.append(time.time())
time_est = ChrisFuncs.TimeEst(time_list, total, plot=True)
time_remaining = time_est[0]
# Write estimated completion time to text file
time_file = open( os.path.join(output_dir_path,'Estimated_Completion_Time.txt'), 'w')
time_file.write(time_remaining)
time_file.close()
# Make plot showing timings so far, and predicted time remaining
time_fig = time_est[1]
time_fig.savefig( os.path.join(output_dir_path,'Estimated_Completion_Time.png'), dpi=150 )
time_fig.clf()
plt.close('all')
# If vorbose, report estimated time until completion to user
if kwargs_dict['verbose']: print '['+source_dict['name']+'] CAAPR estimated completion at: '+time_remaining+'.'
| print '['+source_id+'] No data found in target directory for current source.'
# Make null entries in tables, as necessary
if kwargs_dict['fit_apertures']==True:
null_aperture_combined = [np.NaN, np.NaN, np.NaN, np.NaN]
CAAPR.CAAPR_IO.RecordAperture(null_aperture_combined, source_dict, kwargs_dict)
if kwargs_dict['do_photom']==True:
CAAPR.CAAPR_IO.RecordPhotom([], source_dict, bands_dict, kwargs_dict) | conditional_block |
index.js | var pageIndex = document.getElementById('index');
var pageRule = document.getElementById('rule');
var pageQuestion1 = document.getElementById('question1');
var pageQuestion2 = document.getElementById('question2');
var pageFail = document.getElementById('fail');
var pageReward = document.getElementById('reward');
var pageAgain = document.getElementById('again');
var pageMessages = document.getElementById('messages');
var pageShare = document.getElementById('share');
var bgMusic = document.getElementById('bg-music');
var btnAudio = document.getElementById('btn-music');
var yun1 = pageIndex.querySelector('.yun1');
var yun2 = pageIndex.querySelector('.yun2'); | var btnRule = pageRule.querySelector('.bottom-btn img');
var btnQuestion1 = pageQuestion1.querySelector('.bottom-btn img');
var btnGifPlay = pageQuestion1.querySelector('.btn-play');
var btnGifImg = pageQuestion1.querySelector('.gif-img');
var btnQuestion2 = pageQuestion2.querySelector('.bottom-btn img');
var btnRestart = pageFail.querySelector('.bottom-btn img');
var btnZhizhen = pageReward.querySelector('.zhizhen');
var btnAgain = pageAgain.querySelector('.bottom-btn img');
var btnMessages = pageMessages.querySelector('.bottom-btn img');
var jiangpinImg = pageMessages.querySelector('.jiangpin img');
var jiangpinName = pageMessages.querySelector('.jiangpin p');
var answer1 = '';
var answer2 = {};
var currentMaster = 1;
var reward = '';
//云朵飘动
function yunduo() {
yun1Left++;
yun1.style.left = -yun1Left+'px';
yun2.style.left = (yun2Left-yun1Left)+'px';
if(yun1Left > yun2Left){
yun1Left = 0;
}
requestAnimationFrame(yunduo);
}
//隐藏系统alerturl
window.alert = function(name){
var iframe = document.createElement("IFRAME");
iframe.style.display="none";
iframe.setAttribute("src", 'data:text/plain,');
document.documentElement.appendChild(iframe);
window.frames[0].window.alert(name);
iframe.parentNode.removeChild(iframe);
};
//预加载
var imgArr = [
'img/answer1-gif.gif',
'img/answer1-gif-1.jpg',
'img/img-index.png',
'img/bg-messages.png',
'img/bg-question1.png',
'img/bg-question2.png',
'img/bg-rule.png',
'img/bg-zhuanpan.png',
'img/1.jpg',
'img/2.png',
'img/3.jpg',
'img/4.jpg',
'img/wxicon.jpg'
];
var loading = document.querySelector('.loading');
var loadingPro = loading.querySelector('.top');
var imgArrLength = imgArr.length;
var imageKey = 0;
imgArr.forEach(function (val,key) {
var oImg = new Image();
oImg.onload = function(){
oImg.onload = null;
loadingPro.style.width = Math.ceil(100*(++imageKey)/imgArrLength)+'%';
if (imageKey == imgArrLength) {
$('.preload-bg').each(function (i,v) {
v.style.backgroundImage = 'url('+v.dataset.preload_src+')';
});
$('.preload-img').each(function (i,v) {
v.src = v.dataset.preload_src;
});
loading.classList.add('none');
pageIndex.classList.remove('none');
// pageReward.classList.remove('none');
// pageMessages.classList.remove('none');
requestAnimationFrame(yunduo);
document.addEventListener("WeixinJSBridgeReady", function () {//微信
bgMusic.play();
}, false);
}
};
oImg.src = val;
});
btnStart.onclick = function () {
btnAudio.play();
pageIndex.classList.add('none');
pageRule.classList.remove('none');
};
btnRule.onclick = function () {
btnAudio.play();
pageRule.classList.add('none');
pageQuestion1.classList.remove('none');
};
//问题一选答案并记录
$('#question1 .answer1').each(function (i,val) {
val.onclick =function () {
btnAudio.play();
$('#question1 .answer1').each(function (i,v) {
v.classList.remove('active');
});
val.classList.add('active');
answer1 = val.dataset.value;
}
});
//问题一点击按钮播放
btnGifPlay.onclick = function () {
btnAudio.play();
var _this = this;
var src = btnGifImg.src;
var dataSrc = btnGifImg.dataset.src;
_this.classList.add('none');
setTimeout(function () {
btnGifImg.src = dataSrc;
},500);
setTimeout(function () {
_this.classList.remove('none');
btnGifImg.src = src;
},9500);
};
btnQuestion1.onclick = function () {
btnAudio.play();
if (answer1) {
pageQuestion1.classList.add('none');
pageQuestion2.classList.remove('none');
}
};
//问题二选和尚
$('.master-small img').each(function (key,val) {
val.onclick =function () {
btnAudio.play();
$('.bingqi img').each(function (i,v) {
v.classList.remove('active');
});
if (answer2['as'+(key+1)]) {
pageQuestion2.querySelectorAll('.bingqi img')[answer2['as'+(key+1)]-1].classList.add('active');
}
$('.master-small img').each(function (i,v) {
v.classList.remove('active');
});
val.classList.add('active');
$('.master-big').each(function (i,v) {
v.classList.add('none');
});
pageQuestion2.querySelectorAll('.master-big')[key].classList.remove('none');
currentMaster = key+1;
}
});
//问题二选兵器
$('.bingqi img').each(function (key,val) {
val.onclick = function () {
btnAudio.play();
pageQuestion2.querySelectorAll('.bingqi img').forEach(function (v) {
v.classList.remove('active');
});
val.classList.add('active');
answer2['as'+currentMaster] = key+1;
}
});
btnQuestion2.onclick = function () {
btnAudio.play();
if (answer2.as3 && answer2.as1 && answer2.as2) {
if ((answer2.as1 === 1) && (answer2.as2 === 2) && (answer2.as3 === 4) && (answer1=='c')) {
pageQuestion2.classList.add('none');
pageReward.classList.remove('none');
} else {
pageFail.classList.remove('none');
}
}
else {
alert ('注意!三位大师都要匹配相应兵器哦~');
}
};
//重新开始
btnRestart.onclick = function () {
btnAudio.play();
pageFail.classList.add('none');
pageIndex.classList.remove('none');
//清空之前答案
pageQuestion1.querySelectorAll('.answer1').forEach(function (v) {
v.classList.remove('active');
});
answer1 = '';
pageQuestion2.querySelectorAll('.master-small img').forEach(function (v) {
v.classList.remove('active');
});
pageQuestion2.querySelectorAll('.master-small img')[0].classList.add('active');
pageQuestion2.querySelectorAll('.bingqi img').forEach(function (v) {
v.classList.remove('active');
});
answer2 = {};
currentMaster = 1;
};
//抽奖
var is_click = 1;
btnZhizhen.onclick = function () {
btnAudio.play();
if (!is_click) {
return false;
}
var _this = this;
var zhongjiang = 1;
is_click = 0;
//请求抽奖结果决定停止位置
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/getHomeLuckdraw',
data: {
openid: open_id
},
dataType: 'json',
success: function(data){
reward = data.date.yes || '5';
if (reward == '1') {
_this.classList.add('yideng');
jiangpinImg.src = 'img/1.jpg';
jiangpinName.innerHTML = '昂坪360 精美行李带<span>(价值HK$120)</span>';
} else if(reward == '2') {
var erdeng = (Math.random()>0.5)? 'erdeng1':'erdeng2';
_this.classList.add(erdeng);
jiangpinImg.src = 'img/2.png';
jiangpinName.innerHTML = '昂坪360 化妆品收纳袋<span>(价值HK$120)</span>';
} else if(reward == '3') {
var sandeng = (Math.random()>0.5)? 'sandeng1':'sandeng2';
_this.classList.add(sandeng);
jiangpinImg.src = 'img/3.jpg';
jiangpinName.innerHTML = '昂坪360 缆车小钱包<span>(价值HK$130)</span>';
} else if(reward == '4') {
var youxiu = (Math.random()>0.5)? 'youxiu1':'youxiu2';
_this.classList.add(youxiu);
jiangpinImg.src = 'img/4.jpg';
jiangpinName.innerHTML = '驴妈妈 小驴公仔';
} else {
var xiexie = (Math.random()>0.5)? 'xiexie1':'xiexie2';
_this.classList.add(xiexie);
zhongjiang = 0
}
setTimeout(function () {
if (zhongjiang) {
pageReward.classList.add('none');
pageMessages.classList.remove('none');
} else {
pageAgain.classList.remove('none');
}
},5200)
}
});
};
//再来一次
btnAgain.onclick = function () {
btnAudio.play();
btnZhizhen.className = 'zhizhen';
is_click = 1;
pageAgain.classList.add('none');
pageReward.classList.remove('none');
};
btnMessages.onclick = function () {
btnAudio.play();
var user_name = pageMessages.querySelector('input[name=user_name]').value;
var user_address = pageMessages.querySelector('input[name=user_address]').value;
var user_phone = pageMessages.querySelector('input[name=user_phone]').value;
if (user_name && user_address && user_phone) {
var user_messages = {
name: user_name,
address: user_address,
mobile: user_phone,
openid :open_id
};
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/saveLuckUser',
data: user_messages,
dataType: 'json',
success: function(data){
if (data.date = 'yes') {
pageShare.classList.remove('none');
}
}
});
}
};
//微信分享
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/console',
data: {
url: location.href.split('#')[0]
},
dataType: 'json',
success: function(res){
wx.config({
debug: 0,
appId: res.appId,
timestamp: res.timestamp,
nonceStr: res.nonceStr,
signature: res.signature,
jsApiList: ['onMenuShareTimeline', 'onMenuShareAppMessage']
});
wx.ready(function () {
window.wxData = {//分享给朋友
title: '昂坪360放大招,你接还是不接?',
desc: '不接不合适吧!',
link: 'http://lmm.itbuluo.top/angping',
imgUrl: 'http://lmm.itbuluo.top/angping/img/wxicon.jpg',
type: 'link',
success: function () {},
cancel: function () {}
};
window.wxDataTiemline = {//分享到朋友圈
title: '昂坪360放大招,你接还是不接?',
link: 'http://lmm.itbuluo.top/angping',
imgUrl: 'http://lmm.itbuluo.top/angping/img/wxicon.jpg',
type: 'link',
success: function () {},
cancel: function () {}
};
wx.onMenuShareAppMessage(wxData);
wx.onMenuShareTimeline(wxDataTiemline);
});
}
}); | var yun1Left = 0;
var yun2Left = parseInt(window.getComputedStyle(yun2).left);
var btnStart = pageIndex.querySelector('.bottom-btn img'); | random_line_split |
index.js |
var pageIndex = document.getElementById('index');
var pageRule = document.getElementById('rule');
var pageQuestion1 = document.getElementById('question1');
var pageQuestion2 = document.getElementById('question2');
var pageFail = document.getElementById('fail');
var pageReward = document.getElementById('reward');
var pageAgain = document.getElementById('again');
var pageMessages = document.getElementById('messages');
var pageShare = document.getElementById('share');
var bgMusic = document.getElementById('bg-music');
var btnAudio = document.getElementById('btn-music');
var yun1 = pageIndex.querySelector('.yun1');
var yun2 = pageIndex.querySelector('.yun2');
var yun1Left = 0;
var yun2Left = parseInt(window.getComputedStyle(yun2).left);
var btnStart = pageIndex.querySelector('.bottom-btn img');
var btnRule = pageRule.querySelector('.bottom-btn img');
var btnQuestion1 = pageQuestion1.querySelector('.bottom-btn img');
var btnGifPlay = pageQuestion1.querySelector('.btn-play');
var btnGifImg = pageQuestion1.querySelector('.gif-img');
var btnQuestion2 = pageQuestion2.querySelector('.bottom-btn img');
var btnRestart = pageFail.querySelector('.bottom-btn img');
var btnZhizhen = pageReward.querySelector('.zhizhen');
var btnAgain = pageAgain.querySelector('.bottom-btn img');
var btnMessages = pageMessages.querySelector('.bottom-btn img');
var jiangpinImg = pageMessages.querySelector('.jiangpin img');
var jiangpinName = pageMessages.querySelector('.jiangpin p');
var answer1 = '';
var answer2 = {};
var currentMaster = 1;
var reward = '';
//云朵飘动
function yunduo() | yun1Left++;
yun1.style.left = -yun1Left+'px';
yun2.style.left = (yun2Left-yun1Left)+'px';
if(yun1Left > yun2Left){
yun1Left = 0;
}
requestAnimationFrame(yunduo);
}
//隐藏系统alerturl
window.alert = function(name){
var iframe = document.createElement("IFRAME");
iframe.style.display="none";
iframe.setAttribute("src", 'data:text/plain,');
document.documentElement.appendChild(iframe);
window.frames[0].window.alert(name);
iframe.parentNode.removeChild(iframe);
};
//预加载
var imgArr = [
'img/answer1-gif.gif',
'img/answer1-gif-1.jpg',
'img/img-index.png',
'img/bg-messages.png',
'img/bg-question1.png',
'img/bg-question2.png',
'img/bg-rule.png',
'img/bg-zhuanpan.png',
'img/1.jpg',
'img/2.png',
'img/3.jpg',
'img/4.jpg',
'img/wxicon.jpg'
];
var loading = document.querySelector('.loading');
var loadingPro = loading.querySelector('.top');
var imgArrLength = imgArr.length;
var imageKey = 0;
imgArr.forEach(function (val,key) {
var oImg = new Image();
oImg.onload = function(){
oImg.onload = null;
loadingPro.style.width = Math.ceil(100*(++imageKey)/imgArrLength)+'%';
if (imageKey == imgArrLength) {
$('.preload-bg').each(function (i,v) {
v.style.backgroundImage = 'url('+v.dataset.preload_src+')';
});
$('.preload-img').each(function (i,v) {
v.src = v.dataset.preload_src;
});
loading.classList.add('none');
pageIndex.classList.remove('none');
// pageReward.classList.remove('none');
// pageMessages.classList.remove('none');
requestAnimationFrame(yunduo);
document.addEventListener("WeixinJSBridgeReady", function () {//微信
bgMusic.play();
}, false);
}
};
oImg.src = val;
});
btnStart.onclick = function () {
btnAudio.play();
pageIndex.classList.add('none');
pageRule.classList.remove('none');
};
btnRule.onclick = function () {
btnAudio.play();
pageRule.classList.add('none');
pageQuestion1.classList.remove('none');
};
//问题一选答案并记录
$('#question1 .answer1').each(function (i,val) {
val.onclick =function () {
btnAudio.play();
$('#question1 .answer1').each(function (i,v) {
v.classList.remove('active');
});
val.classList.add('active');
answer1 = val.dataset.value;
}
});
//问题一点击按钮播放
btnGifPlay.onclick = function () {
btnAudio.play();
var _this = this;
var src = btnGifImg.src;
var dataSrc = btnGifImg.dataset.src;
_this.classList.add('none');
setTimeout(function () {
btnGifImg.src = dataSrc;
},500);
setTimeout(function () {
_this.classList.remove('none');
btnGifImg.src = src;
},9500);
};
btnQuestion1.onclick = function () {
btnAudio.play();
if (answer1) {
pageQuestion1.classList.add('none');
pageQuestion2.classList.remove('none');
}
};
//问题二选和尚
$('.master-small img').each(function (key,val) {
val.onclick =function () {
btnAudio.play();
$('.bingqi img').each(function (i,v) {
v.classList.remove('active');
});
if (answer2['as'+(key+1)]) {
pageQuestion2.querySelectorAll('.bingqi img')[answer2['as'+(key+1)]-1].classList.add('active');
}
$('.master-small img').each(function (i,v) {
v.classList.remove('active');
});
val.classList.add('active');
$('.master-big').each(function (i,v) {
v.classList.add('none');
});
pageQuestion2.querySelectorAll('.master-big')[key].classList.remove('none');
currentMaster = key+1;
}
});
//问题二选兵器
$('.bingqi img').each(function (key,val) {
val.onclick = function () {
btnAudio.play();
pageQuestion2.querySelectorAll('.bingqi img').forEach(function (v) {
v.classList.remove('active');
});
val.classList.add('active');
answer2['as'+currentMaster] = key+1;
}
});
btnQuestion2.onclick = function () {
btnAudio.play();
if (answer2.as3 && answer2.as1 && answer2.as2) {
if ((answer2.as1 === 1) && (answer2.as2 === 2) && (answer2.as3 === 4) && (answer1=='c')) {
pageQuestion2.classList.add('none');
pageReward.classList.remove('none');
} else {
pageFail.classList.remove('none');
}
}
else {
alert ('注意!三位大师都要匹配相应兵器哦~');
}
};
//重新开始
btnRestart.onclick = function () {
btnAudio.play();
pageFail.classList.add('none');
pageIndex.classList.remove('none');
//清空之前答案
pageQuestion1.querySelectorAll('.answer1').forEach(function (v) {
v.classList.remove('active');
});
answer1 = '';
pageQuestion2.querySelectorAll('.master-small img').forEach(function (v) {
v.classList.remove('active');
});
pageQuestion2.querySelectorAll('.master-small img')[0].classList.add('active');
pageQuestion2.querySelectorAll('.bingqi img').forEach(function (v) {
v.classList.remove('active');
});
answer2 = {};
currentMaster = 1;
};
//抽奖
var is_click = 1;
btnZhizhen.onclick = function () {
btnAudio.play();
if (!is_click) {
return false;
}
var _this = this;
var zhongjiang = 1;
is_click = 0;
//请求抽奖结果决定停止位置
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/getHomeLuckdraw',
data: {
openid: open_id
},
dataType: 'json',
success: function(data){
reward = data.date.yes || '5';
if (reward == '1') {
_this.classList.add('yideng');
jiangpinImg.src = 'img/1.jpg';
jiangpinName.innerHTML = '昂坪360 精美行李带<span>(价值HK$120)</span>';
} else if(reward == '2') {
var erdeng = (Math.random()>0.5)? 'erdeng1':'erdeng2';
_this.classList.add(erdeng);
jiangpinImg.src = 'img/2.png';
jiangpinName.innerHTML = '昂坪360 化妆品收纳袋<span>(价值HK$120)</span>';
} else if(reward == '3') {
var sandeng = (Math.random()>0.5)? 'sandeng1':'sandeng2';
_this.classList.add(sandeng);
jiangpinImg.src = 'img/3.jpg';
jiangpinName.innerHTML = '昂坪360 缆车小钱包<span>(价值HK$130)</span>';
} else if(reward == '4') {
var youxiu = (Math.random()>0.5)? 'youxiu1':'youxiu2';
_this.classList.add(youxiu);
jiangpinImg.src = 'img/4.jpg';
jiangpinName.innerHTML = '驴妈妈 小驴公仔';
} else {
var xiexie = (Math.random()>0.5)? 'xiexie1':'xiexie2';
_this.classList.add(xiexie);
zhongjiang = 0
}
setTimeout(function () {
if (zhongjiang) {
pageReward.classList.add('none');
pageMessages.classList.remove('none');
} else {
pageAgain.classList.remove('none');
}
},5200)
}
});
};
//再来一次
btnAgain.onclick = function () {
btnAudio.play();
btnZhizhen.className = 'zhizhen';
is_click = 1;
pageAgain.classList.add('none');
pageReward.classList.remove('none');
};
btnMessages.onclick = function () {
btnAudio.play();
var user_name = pageMessages.querySelector('input[name=user_name]').value;
var user_address = pageMessages.querySelector('input[name=user_address]').value;
var user_phone = pageMessages.querySelector('input[name=user_phone]').value;
if (user_name && user_address && user_phone) {
var user_messages = {
name: user_name,
address: user_address,
mobile: user_phone,
openid :open_id
};
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/saveLuckUser',
data: user_messages,
dataType: 'json',
success: function(data){
if (data.date = 'yes') {
pageShare.classList.remove('none');
}
}
});
}
};
//微信分享
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/console',
data: {
url: location.href.split('#')[0]
},
dataType: 'json',
success: function(res){
wx.config({
debug: 0,
appId: res.appId,
timestamp: res.timestamp,
nonceStr: res.nonceStr,
signature: res.signature,
jsApiList: ['onMenuShareTimeline', 'onMenuShareAppMessage']
});
wx.ready(function () {
window.wxData = {//分享给朋友
title: '昂坪360放大招,你接还是不接?',
desc: '不接不合适吧!',
link: 'http://lmm.itbuluo.top/angping',
imgUrl: 'http://lmm.itbuluo.top/angping/img/wxicon.jpg',
type: 'link',
success: function () {},
cancel: function () {}
};
window.wxDataTiemline = {//分享到朋友圈
title: '昂坪360放大招,你接还是不接?',
link: 'http://lmm.itbuluo.top/angping',
imgUrl: 'http://lmm.itbuluo.top/angping/img/wxicon.jpg',
type: 'link',
success: function () {},
cancel: function () {}
};
wx.onMenuShareAppMessage(wxData);
wx.onMenuShareTimeline(wxDataTiemline);
});
}
});
| {
| identifier_name |
index.js |
var pageIndex = document.getElementById('index');
var pageRule = document.getElementById('rule');
var pageQuestion1 = document.getElementById('question1');
var pageQuestion2 = document.getElementById('question2');
var pageFail = document.getElementById('fail');
var pageReward = document.getElementById('reward');
var pageAgain = document.getElementById('again');
var pageMessages = document.getElementById('messages');
var pageShare = document.getElementById('share');
var bgMusic = document.getElementById('bg-music');
var btnAudio = document.getElementById('btn-music');
var yun1 = pageIndex.querySelector('.yun1');
var yun2 = pageIndex.querySelector('.yun2');
var yun1Left = 0;
var yun2Left = parseInt(window.getComputedStyle(yun2).left);
var btnStart = pageIndex.querySelector('.bottom-btn img');
var btnRule = pageRule.querySelector('.bottom-btn img');
var btnQuestion1 = pageQuestion1.querySelector('.bottom-btn img');
var btnGifPlay = pageQuestion1.querySelector('.btn-play');
var btnGifImg = pageQuestion1.querySelector('.gif-img');
var btnQuestion2 = pageQuestion2.querySelector('.bottom-btn img');
var btnRestart = pageFail.querySelector('.bottom-btn img');
var btnZhizhen = pageReward.querySelector('.zhizhen');
var btnAgain = pageAgain.querySelector('.bottom-btn img');
var btnMessages = pageMessages.querySelector('.bottom-btn img');
var jiangpinImg = pageMessages.querySelector('.jiangpin img');
var jiangpinName = pageMessages.querySelector('.jiangpin p');
var answer1 = '';
var answer2 = {};
var currentMaster = 1;
var reward = '';
//云朵飘动
function yunduo() {
yu | 系统alerturl
window.alert = function(name){
var iframe = document.createElement("IFRAME");
iframe.style.display="none";
iframe.setAttribute("src", 'data:text/plain,');
document.documentElement.appendChild(iframe);
window.frames[0].window.alert(name);
iframe.parentNode.removeChild(iframe);
};
//预加载
var imgArr = [
'img/answer1-gif.gif',
'img/answer1-gif-1.jpg',
'img/img-index.png',
'img/bg-messages.png',
'img/bg-question1.png',
'img/bg-question2.png',
'img/bg-rule.png',
'img/bg-zhuanpan.png',
'img/1.jpg',
'img/2.png',
'img/3.jpg',
'img/4.jpg',
'img/wxicon.jpg'
];
var loading = document.querySelector('.loading');
var loadingPro = loading.querySelector('.top');
var imgArrLength = imgArr.length;
var imageKey = 0;
imgArr.forEach(function (val,key) {
var oImg = new Image();
oImg.onload = function(){
oImg.onload = null;
loadingPro.style.width = Math.ceil(100*(++imageKey)/imgArrLength)+'%';
if (imageKey == imgArrLength) {
$('.preload-bg').each(function (i,v) {
v.style.backgroundImage = 'url('+v.dataset.preload_src+')';
});
$('.preload-img').each(function (i,v) {
v.src = v.dataset.preload_src;
});
loading.classList.add('none');
pageIndex.classList.remove('none');
// pageReward.classList.remove('none');
// pageMessages.classList.remove('none');
requestAnimationFrame(yunduo);
document.addEventListener("WeixinJSBridgeReady", function () {//微信
bgMusic.play();
}, false);
}
};
oImg.src = val;
});
btnStart.onclick = function () {
btnAudio.play();
pageIndex.classList.add('none');
pageRule.classList.remove('none');
};
btnRule.onclick = function () {
btnAudio.play();
pageRule.classList.add('none');
pageQuestion1.classList.remove('none');
};
//问题一选答案并记录
$('#question1 .answer1').each(function (i,val) {
val.onclick =function () {
btnAudio.play();
$('#question1 .answer1').each(function (i,v) {
v.classList.remove('active');
});
val.classList.add('active');
answer1 = val.dataset.value;
}
});
//问题一点击按钮播放
btnGifPlay.onclick = function () {
btnAudio.play();
var _this = this;
var src = btnGifImg.src;
var dataSrc = btnGifImg.dataset.src;
_this.classList.add('none');
setTimeout(function () {
btnGifImg.src = dataSrc;
},500);
setTimeout(function () {
_this.classList.remove('none');
btnGifImg.src = src;
},9500);
};
btnQuestion1.onclick = function () {
btnAudio.play();
if (answer1) {
pageQuestion1.classList.add('none');
pageQuestion2.classList.remove('none');
}
};
//问题二选和尚
$('.master-small img').each(function (key,val) {
val.onclick =function () {
btnAudio.play();
$('.bingqi img').each(function (i,v) {
v.classList.remove('active');
});
if (answer2['as'+(key+1)]) {
pageQuestion2.querySelectorAll('.bingqi img')[answer2['as'+(key+1)]-1].classList.add('active');
}
$('.master-small img').each(function (i,v) {
v.classList.remove('active');
});
val.classList.add('active');
$('.master-big').each(function (i,v) {
v.classList.add('none');
});
pageQuestion2.querySelectorAll('.master-big')[key].classList.remove('none');
currentMaster = key+1;
}
});
//问题二选兵器
$('.bingqi img').each(function (key,val) {
val.onclick = function () {
btnAudio.play();
pageQuestion2.querySelectorAll('.bingqi img').forEach(function (v) {
v.classList.remove('active');
});
val.classList.add('active');
answer2['as'+currentMaster] = key+1;
}
});
btnQuestion2.onclick = function () {
btnAudio.play();
if (answer2.as3 && answer2.as1 && answer2.as2) {
if ((answer2.as1 === 1) && (answer2.as2 === 2) && (answer2.as3 === 4) && (answer1=='c')) {
pageQuestion2.classList.add('none');
pageReward.classList.remove('none');
} else {
pageFail.classList.remove('none');
}
}
else {
alert ('注意!三位大师都要匹配相应兵器哦~');
}
};
//重新开始
btnRestart.onclick = function () {
btnAudio.play();
pageFail.classList.add('none');
pageIndex.classList.remove('none');
//清空之前答案
pageQuestion1.querySelectorAll('.answer1').forEach(function (v) {
v.classList.remove('active');
});
answer1 = '';
pageQuestion2.querySelectorAll('.master-small img').forEach(function (v) {
v.classList.remove('active');
});
pageQuestion2.querySelectorAll('.master-small img')[0].classList.add('active');
pageQuestion2.querySelectorAll('.bingqi img').forEach(function (v) {
v.classList.remove('active');
});
answer2 = {};
currentMaster = 1;
};
//抽奖
var is_click = 1;
btnZhizhen.onclick = function () {
btnAudio.play();
if (!is_click) {
return false;
}
var _this = this;
var zhongjiang = 1;
is_click = 0;
//请求抽奖结果决定停止位置
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/getHomeLuckdraw',
data: {
openid: open_id
},
dataType: 'json',
success: function(data){
reward = data.date.yes || '5';
if (reward == '1') {
_this.classList.add('yideng');
jiangpinImg.src = 'img/1.jpg';
jiangpinName.innerHTML = '昂坪360 精美行李带<span>(价值HK$120)</span>';
} else if(reward == '2') {
var erdeng = (Math.random()>0.5)? 'erdeng1':'erdeng2';
_this.classList.add(erdeng);
jiangpinImg.src = 'img/2.png';
jiangpinName.innerHTML = '昂坪360 化妆品收纳袋<span>(价值HK$120)</span>';
} else if(reward == '3') {
var sandeng = (Math.random()>0.5)? 'sandeng1':'sandeng2';
_this.classList.add(sandeng);
jiangpinImg.src = 'img/3.jpg';
jiangpinName.innerHTML = '昂坪360 缆车小钱包<span>(价值HK$130)</span>';
} else if(reward == '4') {
var youxiu = (Math.random()>0.5)? 'youxiu1':'youxiu2';
_this.classList.add(youxiu);
jiangpinImg.src = 'img/4.jpg';
jiangpinName.innerHTML = '驴妈妈 小驴公仔';
} else {
var xiexie = (Math.random()>0.5)? 'xiexie1':'xiexie2';
_this.classList.add(xiexie);
zhongjiang = 0
}
setTimeout(function () {
if (zhongjiang) {
pageReward.classList.add('none');
pageMessages.classList.remove('none');
} else {
pageAgain.classList.remove('none');
}
},5200)
}
});
};
//再来一次
btnAgain.onclick = function () {
btnAudio.play();
btnZhizhen.className = 'zhizhen';
is_click = 1;
pageAgain.classList.add('none');
pageReward.classList.remove('none');
};
btnMessages.onclick = function () {
btnAudio.play();
var user_name = pageMessages.querySelector('input[name=user_name]').value;
var user_address = pageMessages.querySelector('input[name=user_address]').value;
var user_phone = pageMessages.querySelector('input[name=user_phone]').value;
if (user_name && user_address && user_phone) {
var user_messages = {
name: user_name,
address: user_address,
mobile: user_phone,
openid :open_id
};
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/saveLuckUser',
data: user_messages,
dataType: 'json',
success: function(data){
if (data.date = 'yes') {
pageShare.classList.remove('none');
}
}
});
}
};
//微信分享
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/console',
data: {
url: location.href.split('#')[0]
},
dataType: 'json',
success: function(res){
wx.config({
debug: 0,
appId: res.appId,
timestamp: res.timestamp,
nonceStr: res.nonceStr,
signature: res.signature,
jsApiList: ['onMenuShareTimeline', 'onMenuShareAppMessage']
});
wx.ready(function () {
window.wxData = {//分享给朋友
title: '昂坪360放大招,你接还是不接?',
desc: '不接不合适吧!',
link: 'http://lmm.itbuluo.top/angping',
imgUrl: 'http://lmm.itbuluo.top/angping/img/wxicon.jpg',
type: 'link',
success: function () {},
cancel: function () {}
};
window.wxDataTiemline = {//分享到朋友圈
title: '昂坪360放大招,你接还是不接?',
link: 'http://lmm.itbuluo.top/angping',
imgUrl: 'http://lmm.itbuluo.top/angping/img/wxicon.jpg',
type: 'link',
success: function () {},
cancel: function () {}
};
wx.onMenuShareAppMessage(wxData);
wx.onMenuShareTimeline(wxDataTiemline);
});
}
});
| n1Left++;
yun1.style.left = -yun1Left+'px';
yun2.style.left = (yun2Left-yun1Left)+'px';
if(yun1Left > yun2Left){
yun1Left = 0;
}
requestAnimationFrame(yunduo);
}
//隐藏 | identifier_body |
index.js |
var pageIndex = document.getElementById('index');
var pageRule = document.getElementById('rule');
var pageQuestion1 = document.getElementById('question1');
var pageQuestion2 = document.getElementById('question2');
var pageFail = document.getElementById('fail');
var pageReward = document.getElementById('reward');
var pageAgain = document.getElementById('again');
var pageMessages = document.getElementById('messages');
var pageShare = document.getElementById('share');
var bgMusic = document.getElementById('bg-music');
var btnAudio = document.getElementById('btn-music');
var yun1 = pageIndex.querySelector('.yun1');
var yun2 = pageIndex.querySelector('.yun2');
var yun1Left = 0;
var yun2Left = parseInt(window.getComputedStyle(yun2).left);
var btnStart = pageIndex.querySelector('.bottom-btn img');
var btnRule = pageRule.querySelector('.bottom-btn img');
var btnQuestion1 = pageQuestion1.querySelector('.bottom-btn img');
var btnGifPlay = pageQuestion1.querySelector('.btn-play');
var btnGifImg = pageQuestion1.querySelector('.gif-img');
var btnQuestion2 = pageQuestion2.querySelector('.bottom-btn img');
var btnRestart = pageFail.querySelector('.bottom-btn img');
var btnZhizhen = pageReward.querySelector('.zhizhen');
var btnAgain = pageAgain.querySelector('.bottom-btn img');
var btnMessages = pageMessages.querySelector('.bottom-btn img');
var jiangpinImg = pageMessages.querySelector('.jiangpin img');
var jiangpinName = pageMessages.querySelector('.jiangpin p');
var answer1 = '';
var answer2 = {};
var currentMaster = 1;
var reward = '';
//云朵飘动
function yunduo() {
yun1Left++;
yun1.style.left = -yun1Left+'px';
yun2.style.left = (yun2Left-yun1Left)+'px';
if(yun1Left > yun2Left){
| uestAnimationFrame(yunduo);
}
//隐藏系统alerturl
window.alert = function(name){
var iframe = document.createElement("IFRAME");
iframe.style.display="none";
iframe.setAttribute("src", 'data:text/plain,');
document.documentElement.appendChild(iframe);
window.frames[0].window.alert(name);
iframe.parentNode.removeChild(iframe);
};
//预加载
var imgArr = [
'img/answer1-gif.gif',
'img/answer1-gif-1.jpg',
'img/img-index.png',
'img/bg-messages.png',
'img/bg-question1.png',
'img/bg-question2.png',
'img/bg-rule.png',
'img/bg-zhuanpan.png',
'img/1.jpg',
'img/2.png',
'img/3.jpg',
'img/4.jpg',
'img/wxicon.jpg'
];
var loading = document.querySelector('.loading');
var loadingPro = loading.querySelector('.top');
var imgArrLength = imgArr.length;
var imageKey = 0;
imgArr.forEach(function (val,key) {
var oImg = new Image();
oImg.onload = function(){
oImg.onload = null;
loadingPro.style.width = Math.ceil(100*(++imageKey)/imgArrLength)+'%';
if (imageKey == imgArrLength) {
$('.preload-bg').each(function (i,v) {
v.style.backgroundImage = 'url('+v.dataset.preload_src+')';
});
$('.preload-img').each(function (i,v) {
v.src = v.dataset.preload_src;
});
loading.classList.add('none');
pageIndex.classList.remove('none');
// pageReward.classList.remove('none');
// pageMessages.classList.remove('none');
requestAnimationFrame(yunduo);
document.addEventListener("WeixinJSBridgeReady", function () {//微信
bgMusic.play();
}, false);
}
};
oImg.src = val;
});
btnStart.onclick = function () {
btnAudio.play();
pageIndex.classList.add('none');
pageRule.classList.remove('none');
};
btnRule.onclick = function () {
btnAudio.play();
pageRule.classList.add('none');
pageQuestion1.classList.remove('none');
};
//问题一选答案并记录
$('#question1 .answer1').each(function (i,val) {
val.onclick =function () {
btnAudio.play();
$('#question1 .answer1').each(function (i,v) {
v.classList.remove('active');
});
val.classList.add('active');
answer1 = val.dataset.value;
}
});
//问题一点击按钮播放
btnGifPlay.onclick = function () {
btnAudio.play();
var _this = this;
var src = btnGifImg.src;
var dataSrc = btnGifImg.dataset.src;
_this.classList.add('none');
setTimeout(function () {
btnGifImg.src = dataSrc;
},500);
setTimeout(function () {
_this.classList.remove('none');
btnGifImg.src = src;
},9500);
};
btnQuestion1.onclick = function () {
btnAudio.play();
if (answer1) {
pageQuestion1.classList.add('none');
pageQuestion2.classList.remove('none');
}
};
//问题二选和尚
$('.master-small img').each(function (key,val) {
val.onclick =function () {
btnAudio.play();
$('.bingqi img').each(function (i,v) {
v.classList.remove('active');
});
if (answer2['as'+(key+1)]) {
pageQuestion2.querySelectorAll('.bingqi img')[answer2['as'+(key+1)]-1].classList.add('active');
}
$('.master-small img').each(function (i,v) {
v.classList.remove('active');
});
val.classList.add('active');
$('.master-big').each(function (i,v) {
v.classList.add('none');
});
pageQuestion2.querySelectorAll('.master-big')[key].classList.remove('none');
currentMaster = key+1;
}
});
//问题二选兵器
$('.bingqi img').each(function (key,val) {
val.onclick = function () {
btnAudio.play();
pageQuestion2.querySelectorAll('.bingqi img').forEach(function (v) {
v.classList.remove('active');
});
val.classList.add('active');
answer2['as'+currentMaster] = key+1;
}
});
btnQuestion2.onclick = function () {
btnAudio.play();
if (answer2.as3 && answer2.as1 && answer2.as2) {
if ((answer2.as1 === 1) && (answer2.as2 === 2) && (answer2.as3 === 4) && (answer1=='c')) {
pageQuestion2.classList.add('none');
pageReward.classList.remove('none');
} else {
pageFail.classList.remove('none');
}
}
else {
alert ('注意!三位大师都要匹配相应兵器哦~');
}
};
//重新开始
btnRestart.onclick = function () {
btnAudio.play();
pageFail.classList.add('none');
pageIndex.classList.remove('none');
//清空之前答案
pageQuestion1.querySelectorAll('.answer1').forEach(function (v) {
v.classList.remove('active');
});
answer1 = '';
pageQuestion2.querySelectorAll('.master-small img').forEach(function (v) {
v.classList.remove('active');
});
pageQuestion2.querySelectorAll('.master-small img')[0].classList.add('active');
pageQuestion2.querySelectorAll('.bingqi img').forEach(function (v) {
v.classList.remove('active');
});
answer2 = {};
currentMaster = 1;
};
//抽奖
var is_click = 1;
btnZhizhen.onclick = function () {
btnAudio.play();
if (!is_click) {
return false;
}
var _this = this;
var zhongjiang = 1;
is_click = 0;
//请求抽奖结果决定停止位置
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/getHomeLuckdraw',
data: {
openid: open_id
},
dataType: 'json',
success: function(data){
reward = data.date.yes || '5';
if (reward == '1') {
_this.classList.add('yideng');
jiangpinImg.src = 'img/1.jpg';
jiangpinName.innerHTML = '昂坪360 精美行李带<span>(价值HK$120)</span>';
} else if(reward == '2') {
var erdeng = (Math.random()>0.5)? 'erdeng1':'erdeng2';
_this.classList.add(erdeng);
jiangpinImg.src = 'img/2.png';
jiangpinName.innerHTML = '昂坪360 化妆品收纳袋<span>(价值HK$120)</span>';
} else if(reward == '3') {
var sandeng = (Math.random()>0.5)? 'sandeng1':'sandeng2';
_this.classList.add(sandeng);
jiangpinImg.src = 'img/3.jpg';
jiangpinName.innerHTML = '昂坪360 缆车小钱包<span>(价值HK$130)</span>';
} else if(reward == '4') {
var youxiu = (Math.random()>0.5)? 'youxiu1':'youxiu2';
_this.classList.add(youxiu);
jiangpinImg.src = 'img/4.jpg';
jiangpinName.innerHTML = '驴妈妈 小驴公仔';
} else {
var xiexie = (Math.random()>0.5)? 'xiexie1':'xiexie2';
_this.classList.add(xiexie);
zhongjiang = 0
}
setTimeout(function () {
if (zhongjiang) {
pageReward.classList.add('none');
pageMessages.classList.remove('none');
} else {
pageAgain.classList.remove('none');
}
},5200)
}
});
};
//再来一次
btnAgain.onclick = function () {
btnAudio.play();
btnZhizhen.className = 'zhizhen';
is_click = 1;
pageAgain.classList.add('none');
pageReward.classList.remove('none');
};
btnMessages.onclick = function () {
btnAudio.play();
var user_name = pageMessages.querySelector('input[name=user_name]').value;
var user_address = pageMessages.querySelector('input[name=user_address]').value;
var user_phone = pageMessages.querySelector('input[name=user_phone]').value;
if (user_name && user_address && user_phone) {
var user_messages = {
name: user_name,
address: user_address,
mobile: user_phone,
openid :open_id
};
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/saveLuckUser',
data: user_messages,
dataType: 'json',
success: function(data){
if (data.date = 'yes') {
pageShare.classList.remove('none');
}
}
});
}
};
//微信分享
$.ajax({
type: 'post',
url: 'https://wx.ouu.me/home/console',
data: {
url: location.href.split('#')[0]
},
dataType: 'json',
success: function(res){
wx.config({
debug: 0,
appId: res.appId,
timestamp: res.timestamp,
nonceStr: res.nonceStr,
signature: res.signature,
jsApiList: ['onMenuShareTimeline', 'onMenuShareAppMessage']
});
wx.ready(function () {
window.wxData = {//分享给朋友
title: '昂坪360放大招,你接还是不接?',
desc: '不接不合适吧!',
link: 'http://lmm.itbuluo.top/angping',
imgUrl: 'http://lmm.itbuluo.top/angping/img/wxicon.jpg',
type: 'link',
success: function () {},
cancel: function () {}
};
window.wxDataTiemline = {//分享到朋友圈
title: '昂坪360放大招,你接还是不接?',
link: 'http://lmm.itbuluo.top/angping',
imgUrl: 'http://lmm.itbuluo.top/angping/img/wxicon.jpg',
type: 'link',
success: function () {},
cancel: function () {}
};
wx.onMenuShareAppMessage(wxData);
wx.onMenuShareTimeline(wxDataTiemline);
});
}
});
| yun1Left = 0;
}
req | conditional_block |
images.py | from tgen.images import Images, ttypes as o
from lib.blobby import Blobby, o as bo
from lib.discovery import connect
from redis import Redis
import time
from lib.imgcompare.avg import average_hash
from cStringIO import StringIO
from PIL import Image
# events we will fire:
# image_added : source_page_url, source_url, shahash
# image_deleted : source_page_url, source_url, shahash
class ImagesHandler(object):
def __init__(self, redis_host='127.0.0.1'):
self.redis_host = redis_host
self.rc = Redis(redis_host)
self.revent = ReventClient(redis_host=self.redis_host)
# redis keys
# incr this for the next image id
# images:next_id = next_id
# all the images for the given sha
# images:datainstances:<shahash> = (ids)
# timestamp of when image was added
# images:ids:timestamps = sorted (ids,timestamp)
# all the image ids for the page
# images:page_ids:<page_url> (ids)
# last time an image was added from page
# images:pages:timestamps = sorted (url,timestamp)
# images meta data
# images:id = {}
def _image_to_dict(self, image):
data = {}
ignored_attrs = ['data']
for attrs in image.thrift_spec[1:]:
attr = attrs[2]
if attr in ignored_attrs:
continue
v = getattr(image,attr)
if v is not None:
data[attr] = v
return data
def _dict_to_image(self, data):
image = o.Image()
for attrs in image.thrift_spec[1:]:
attr = attrs[2]
v = data.get(attr)
if v is not None:
# we might need to update the value
# type, since all values come back
# from redis as strings
attr_type = attrs[1]
# float
if attr_type == 4:
setattr(image,attr,float(v))
# int
elif attr_type == 8:
setattr(image,attr,int(v))
else:
setattr(image,attr,v)
return image
def _delete_from_redis(self, image):
# make these a transaction
pipe = self.rc.pipeline()
# remove it from the id set
pipe.zrem('images:ids:timestamps',image.id)
# remove it's hash
pipe.delete('images:%s' % image.id)
# decriment the count for it's image data
pipe.srem('images:datainstances:%s' % image.shahash,
image.id)
# remove image from the page's id set
if image.source_page_url:
pipe.zrem('images:page_ids:%s' % image.source_page_url,
image.id)
# make it happen
pipe.execute()
return True
def _save_to_redis(self, image):
# make these a transaction
pipe = self.rc.pipeline()
# if our image doesn't have an id, set it up w/ one
if not image.id:
print 'got new image: %s' % image.shahash
image.id = self.rc.incr('images:next_id')
pipe.sadd('images:datainstances:%s' % image.shahash,
image.id)
# check and see if we used to have a different shahash
old_shahash = self.rc.hget('images:%s' % image.id,'shahash')
if old_shahash != image.shahash:
# remove our id from the old shahash tracker
pipe.srem('images:datainstances:%s' % old_shahash,
image.id)
# add it to the new tracker
pipe.sadd('images:datainstances:%s' % image.shahash,
image.id)
# update / set our timestamp
da = 0.0
if image.downloaded_at:
da = image.downloaded_at
else:
da = time.time()
pipe.zadd('images:ids:timestamps',image.id, da)
# add this image to the page's id set
if image.source_page_url:
pipe.zadd('images:page_ids:%s' % image.source_page_url,
image.id, da)
# update our last scrape time for the page
pipe.zadd('images:pages:timestamps',
image.source_page_url, image.id)
# take our image and make a dict
image_data = self._image_to_dict(image)
# set our data to redis
key = 'images:%s' % image.id
pipe.hmset(key,image_data)
# execute our pipe
pipe.execute()
return image
def _get_from_redis(self, image_id):
# if the image id is in the id set than pull it's details
if self.rc.zrank('images:ids:timestamps',image_id) is not None:
# get the image data from redis
key = 'images:%s' % image_id
image_data = self.rc.hgetall(key)
if not image_data:
print 'redis had no image data'
return None
image = self._dict_to_image(image_data)
return image
return None
def _populate_image_data(self, image):
if not image.shahash:
return None
with connect(Blobby) as c:
image.data = c.get_data(image.shahash)
return image
def _set_image_data(self, image):
if image.data is not None:
|
return image
def get_image(self, image_id):
""" returns Image for given id or blank Image """
# see if we have an image
image = self._get_from_redis(image_id)
if not image:
raise o.ImageNotFound('Could not get image', image_id)
# pull the actual image data
self._populate_image_data(image)
return image
def add_image(self, image):
""" like set but if we already have this image from this
page we're not going to add it again. will also
fill out image stats (size, dimension) """
# we're only for new images, no i'ds allowed
# if u want to set an id by hand use set_image
if image.id:
raise o.Exception('Can not add image with id')
if not image.data:
raise o.Exception('Image must have data')
if not image.source_page_url:
raise o.Exception('Image must have source page url')
# update it's stats
image = self.populate_image_stats(image)
# only add the image if we haven't seen it beforeQ
# if we've seen it before there will be an id which
# the set of images w/ this data and from this page share
ids = self.rc.sinter('images:datainstance:%s' % image.shahash,
'images:page_ids:%s' % image.source_page_url)
# we don't need to continue
# we'll return back their original msg, w/o the id set
if ids:
print 'image already exists [%s], not setting' % ids
return image
# so the image appears to be new, good for it
return self.set_image(image)
def set_image(self, image):
""" sets image data, returns image """
# would be better if we only saved if it didn't exist
if image.data:
# save the images data
self._set_image_data(image)
# could be an update, could be new
image = self._save_to_redis(image)
# let the world know we have added a new image
self.revent.fire('image_added',{
'source_page_url': image.source_page_url,
'source_url': image.source_url,
'shahash': image.shahash,
'vhash': image.vhash,
'xdim': image.xdim,
'ydim': image.ydim,
})
return image
def delete_image(self, image_id):
""" removes an image """
# get it's image obj
try:
image = self.get_image(image_id)
except o.ImageNotFound, ex:
return False
# delete the redis data
self._delete_from_redis(image)
# see if we need to remove the image data
if self.rc.scard('images:datainstances:%s' % image.shahash) == 0:
# no more images w/ the same data, remove image data
with connect(Blobby) as c:
c.delete_data(image.shahash)
# it's gone, let'm know
self.revent.fire('image_deleted',{
'source_page_url': image.source_page_url,
'source_url': image.source_url,
'shahash': image.shahash,
'vhash': image.vhash,
'xdim': image.xdim,
'ydim': image.ydim,
})
# and we're done!
return True
def get_images_since(self, image_id=None, timestamp=None,
offset=10, limit=0):
""" returns list of tublr images or blank list which were
added after given image id or timestamp """
print '%s %s %s %s' % (image_id,timestamp,limit,offset)
if image_id is not None:
print 'got image id'
# figure out what the current id is and than grab
# our sorted set by index assuming that all ids
# contain an image
next_id = int(self.rc.get('images:next_id') or 0)
# how far from the end is the id given
d = next_id - image_id
start = next_id - d
end = next_id - d + limit - 1
print 'getting between %s %s' % (start,end)
# starting back where we think this image is to + limit
ids = self.rc.zrange('images:ids:timestamps',start,end)
print 'got ids: %s' % ids
elif timestamp:
print 'from timestamp: %s' % timestamp
# get ids from our sorted set by it's weight (aka timestamp)
# TODO: not use inf
ids = self.rc.zrangebyscore('images:ids:timestamps',
timestamp,'+inf')
else:
print 'could not find images'
ids = []
# page ids
if offset < len(ids):
ids = ids[offset:max(len(ids),limit)]
else:
ids = []
print 'found ids: %s' % ids
# return images for each ID
images = map(self._get_from_redis,ids)
# populate image data
map(self._populate_image_data,images)
return images
def search(self, source_blog_url=None, since_timestamp=None,
before_timestamp=None, ids=[], source_url=None):
""" returns list of images, searches passed on passed params """
pass
def populate_image_stats(self, image):
""" returns a Image w/ image data + stats filled
out """
ti = image
image_data = ti.data
if not ti.data:
return ti
ti.size = len(image_data)
try:
with connect(Blobby) as c:
ti.shahash = c.get_data_bhash(image_data)
except o.Exception, ex:
raise o.Exception('oException getting shahash: %s' % ex.msg)
except Exception, ex:
raise o.Exception('Exception getting shahash: %s' % ex)
try:
b = StringIO(image_data)
img = Image.open(b)
except Exception, ex:
raise o.Exception('Exception getting PIL img: %s' % ex)
try:
ti.xdim, ti.ydim = img.size
except Exception, ex:
raise o.Exception('Exception getting dimensions: %s' % ex)
try:
ti.vhash = str(average_hash(img))
except Exception, ex:
raise o.Exception('Exception getting vhash: %s' % ex)
return ti
def run():
from run_services import serve_service
serve_service(Images, ImagesHandler())
| with connect(Blobby) as c:
image.shahash = c.set_data(image.data) | conditional_block |
images.py | from tgen.images import Images, ttypes as o
from lib.blobby import Blobby, o as bo
from lib.discovery import connect
from redis import Redis
import time
from lib.imgcompare.avg import average_hash
from cStringIO import StringIO
from PIL import Image
# events we will fire:
# image_added : source_page_url, source_url, shahash
# image_deleted : source_page_url, source_url, shahash
class ImagesHandler(object):
def __init__(self, redis_host='127.0.0.1'):
self.redis_host = redis_host
self.rc = Redis(redis_host)
self.revent = ReventClient(redis_host=self.redis_host)
# redis keys
# incr this for the next image id
# images:next_id = next_id
# all the images for the given sha
# images:datainstances:<shahash> = (ids)
# timestamp of when image was added
# images:ids:timestamps = sorted (ids,timestamp)
# all the image ids for the page
# images:page_ids:<page_url> (ids)
# last time an image was added from page
# images:pages:timestamps = sorted (url,timestamp)
# images meta data
# images:id = {}
def _image_to_dict(self, image):
data = {}
ignored_attrs = ['data']
for attrs in image.thrift_spec[1:]:
attr = attrs[2]
if attr in ignored_attrs:
continue
v = getattr(image,attr)
if v is not None:
data[attr] = v
return data
def _dict_to_image(self, data):
image = o.Image()
for attrs in image.thrift_spec[1:]:
attr = attrs[2]
v = data.get(attr)
if v is not None:
# we might need to update the value
# type, since all values come back
# from redis as strings
attr_type = attrs[1]
# float
if attr_type == 4:
setattr(image,attr,float(v))
# int
elif attr_type == 8:
setattr(image,attr,int(v))
else:
setattr(image,attr,v)
return image
def _delete_from_redis(self, image):
# make these a transaction
pipe = self.rc.pipeline()
# remove it from the id set
pipe.zrem('images:ids:timestamps',image.id)
# remove it's hash
pipe.delete('images:%s' % image.id)
# decriment the count for it's image data
pipe.srem('images:datainstances:%s' % image.shahash,
image.id)
# remove image from the page's id set
if image.source_page_url:
pipe.zrem('images:page_ids:%s' % image.source_page_url,
image.id)
# make it happen
pipe.execute()
return True
def _save_to_redis(self, image):
# make these a transaction
pipe = self.rc.pipeline()
# if our image doesn't have an id, set it up w/ one
if not image.id:
print 'got new image: %s' % image.shahash
image.id = self.rc.incr('images:next_id')
pipe.sadd('images:datainstances:%s' % image.shahash,
image.id)
# check and see if we used to have a different shahash
old_shahash = self.rc.hget('images:%s' % image.id,'shahash')
if old_shahash != image.shahash:
# remove our id from the old shahash tracker
pipe.srem('images:datainstances:%s' % old_shahash,
image.id)
# add it to the new tracker
pipe.sadd('images:datainstances:%s' % image.shahash,
image.id)
# update / set our timestamp
da = 0.0
if image.downloaded_at:
da = image.downloaded_at
else:
da = time.time()
pipe.zadd('images:ids:timestamps',image.id, da)
# add this image to the page's id set
if image.source_page_url:
pipe.zadd('images:page_ids:%s' % image.source_page_url,
image.id, da)
# update our last scrape time for the page
pipe.zadd('images:pages:timestamps',
image.source_page_url, image.id)
# take our image and make a dict
image_data = self._image_to_dict(image)
# set our data to redis
key = 'images:%s' % image.id
pipe.hmset(key,image_data)
# execute our pipe
pipe.execute()
return image
def _get_from_redis(self, image_id):
# if the image id is in the id set than pull it's details
if self.rc.zrank('images:ids:timestamps',image_id) is not None:
# get the image data from redis
key = 'images:%s' % image_id
image_data = self.rc.hgetall(key)
if not image_data:
print 'redis had no image data'
return None
image = self._dict_to_image(image_data)
return image
return None
def _populate_image_data(self, image):
if not image.shahash:
return None
with connect(Blobby) as c:
image.data = c.get_data(image.shahash)
return image
def _set_image_data(self, image):
if image.data is not None:
with connect(Blobby) as c:
image.shahash = c.set_data(image.data)
return image
def get_image(self, image_id):
""" returns Image for given id or blank Image """
# see if we have an image
image = self._get_from_redis(image_id)
if not image:
raise o.ImageNotFound('Could not get image', image_id)
# pull the actual image data
self._populate_image_data(image)
return image
def add_image(self, image):
""" like set but if we already have this image from this
page we're not going to add it again. will also
fill out image stats (size, dimension) """
# we're only for new images, no i'ds allowed
# if u want to set an id by hand use set_image
if image.id:
raise o.Exception('Can not add image with id')
if not image.data:
raise o.Exception('Image must have data')
if not image.source_page_url:
raise o.Exception('Image must have source page url')
# update it's stats
image = self.populate_image_stats(image)
# only add the image if we haven't seen it beforeQ
# if we've seen it before there will be an id which
# the set of images w/ this data and from this page share
ids = self.rc.sinter('images:datainstance:%s' % image.shahash,
'images:page_ids:%s' % image.source_page_url)
# we don't need to continue
# we'll return back their original msg, w/o the id set
if ids:
print 'image already exists [%s], not setting' % ids
return image
# so the image appears to be new, good for it
return self.set_image(image)
def set_image(self, image):
""" sets image data, returns image """
# would be better if we only saved if it didn't exist
if image.data:
# save the images data
self._set_image_data(image)
# could be an update, could be new
image = self._save_to_redis(image)
# let the world know we have added a new image
self.revent.fire('image_added',{
'source_page_url': image.source_page_url,
'source_url': image.source_url,
'shahash': image.shahash,
'vhash': image.vhash,
'xdim': image.xdim,
'ydim': image.ydim,
})
return image
def delete_image(self, image_id):
""" removes an image """
# get it's image obj
try:
image = self.get_image(image_id)
except o.ImageNotFound, ex:
return False
# delete the redis data
self._delete_from_redis(image)
# see if we need to remove the image data
if self.rc.scard('images:datainstances:%s' % image.shahash) == 0:
# no more images w/ the same data, remove image data
with connect(Blobby) as c:
c.delete_data(image.shahash)
# it's gone, let'm know
self.revent.fire('image_deleted',{
'source_page_url': image.source_page_url,
'source_url': image.source_url,
'shahash': image.shahash,
'vhash': image.vhash,
'xdim': image.xdim,
'ydim': image.ydim,
})
# and we're done!
return True
def get_images_since(self, image_id=None, timestamp=None,
offset=10, limit=0):
""" returns list of tublr images or blank list which were
added after given image id or timestamp """
print '%s %s %s %s' % (image_id,timestamp,limit,offset)
if image_id is not None:
print 'got image id'
# figure out what the current id is and than grab
# our sorted set by index assuming that all ids
# contain an image
next_id = int(self.rc.get('images:next_id') or 0)
# how far from the end is the id given
d = next_id - image_id
start = next_id - d
end = next_id - d + limit - 1
print 'getting between %s %s' % (start,end)
# starting back where we think this image is to + limit
ids = self.rc.zrange('images:ids:timestamps',start,end)
print 'got ids: %s' % ids
elif timestamp:
print 'from timestamp: %s' % timestamp
# get ids from our sorted set by it's weight (aka timestamp)
# TODO: not use inf
ids = self.rc.zrangebyscore('images:ids:timestamps',
timestamp,'+inf')
else:
print 'could not find images'
ids = []
# page ids
if offset < len(ids):
ids = ids[offset:max(len(ids),limit)]
else:
ids = []
print 'found ids: %s' % ids
# return images for each ID
images = map(self._get_from_redis,ids)
# populate image data
map(self._populate_image_data,images)
return images
def search(self, source_blog_url=None, since_timestamp=None,
before_timestamp=None, ids=[], source_url=None):
""" returns list of images, searches passed on passed params """
pass
def populate_image_stats(self, image):
""" returns a Image w/ image data + stats filled
out """
ti = image
image_data = ti.data
if not ti.data:
return ti
ti.size = len(image_data)
try:
with connect(Blobby) as c:
ti.shahash = c.get_data_bhash(image_data)
except o.Exception, ex: |
try:
b = StringIO(image_data)
img = Image.open(b)
except Exception, ex:
raise o.Exception('Exception getting PIL img: %s' % ex)
try:
ti.xdim, ti.ydim = img.size
except Exception, ex:
raise o.Exception('Exception getting dimensions: %s' % ex)
try:
ti.vhash = str(average_hash(img))
except Exception, ex:
raise o.Exception('Exception getting vhash: %s' % ex)
return ti
def run():
from run_services import serve_service
serve_service(Images, ImagesHandler()) | raise o.Exception('oException getting shahash: %s' % ex.msg)
except Exception, ex:
raise o.Exception('Exception getting shahash: %s' % ex) | random_line_split |
images.py | from tgen.images import Images, ttypes as o
from lib.blobby import Blobby, o as bo
from lib.discovery import connect
from redis import Redis
import time
from lib.imgcompare.avg import average_hash
from cStringIO import StringIO
from PIL import Image
# events we will fire:
# image_added : source_page_url, source_url, shahash
# image_deleted : source_page_url, source_url, shahash
class ImagesHandler(object):
def __init__(self, redis_host='127.0.0.1'):
self.redis_host = redis_host
self.rc = Redis(redis_host)
self.revent = ReventClient(redis_host=self.redis_host)
# redis keys
# incr this for the next image id
# images:next_id = next_id
# all the images for the given sha
# images:datainstances:<shahash> = (ids)
# timestamp of when image was added
# images:ids:timestamps = sorted (ids,timestamp)
# all the image ids for the page
# images:page_ids:<page_url> (ids)
# last time an image was added from page
# images:pages:timestamps = sorted (url,timestamp)
# images meta data
# images:id = {}
def _image_to_dict(self, image):
data = {}
ignored_attrs = ['data']
for attrs in image.thrift_spec[1:]:
attr = attrs[2]
if attr in ignored_attrs:
continue
v = getattr(image,attr)
if v is not None:
data[attr] = v
return data
def _dict_to_image(self, data):
image = o.Image()
for attrs in image.thrift_spec[1:]:
attr = attrs[2]
v = data.get(attr)
if v is not None:
# we might need to update the value
# type, since all values come back
# from redis as strings
attr_type = attrs[1]
# float
if attr_type == 4:
setattr(image,attr,float(v))
# int
elif attr_type == 8:
setattr(image,attr,int(v))
else:
setattr(image,attr,v)
return image
def _delete_from_redis(self, image):
# make these a transaction
pipe = self.rc.pipeline()
# remove it from the id set
pipe.zrem('images:ids:timestamps',image.id)
# remove it's hash
pipe.delete('images:%s' % image.id)
# decriment the count for it's image data
pipe.srem('images:datainstances:%s' % image.shahash,
image.id)
# remove image from the page's id set
if image.source_page_url:
pipe.zrem('images:page_ids:%s' % image.source_page_url,
image.id)
# make it happen
pipe.execute()
return True
def _save_to_redis(self, image):
# make these a transaction
|
def _get_from_redis(self, image_id):
# if the image id is in the id set than pull it's details
if self.rc.zrank('images:ids:timestamps',image_id) is not None:
# get the image data from redis
key = 'images:%s' % image_id
image_data = self.rc.hgetall(key)
if not image_data:
print 'redis had no image data'
return None
image = self._dict_to_image(image_data)
return image
return None
def _populate_image_data(self, image):
if not image.shahash:
return None
with connect(Blobby) as c:
image.data = c.get_data(image.shahash)
return image
def _set_image_data(self, image):
if image.data is not None:
with connect(Blobby) as c:
image.shahash = c.set_data(image.data)
return image
def get_image(self, image_id):
""" returns Image for given id or blank Image """
# see if we have an image
image = self._get_from_redis(image_id)
if not image:
raise o.ImageNotFound('Could not get image', image_id)
# pull the actual image data
self._populate_image_data(image)
return image
def add_image(self, image):
""" like set but if we already have this image from this
page we're not going to add it again. will also
fill out image stats (size, dimension) """
# we're only for new images, no i'ds allowed
# if u want to set an id by hand use set_image
if image.id:
raise o.Exception('Can not add image with id')
if not image.data:
raise o.Exception('Image must have data')
if not image.source_page_url:
raise o.Exception('Image must have source page url')
# update it's stats
image = self.populate_image_stats(image)
# only add the image if we haven't seen it beforeQ
# if we've seen it before there will be an id which
# the set of images w/ this data and from this page share
ids = self.rc.sinter('images:datainstance:%s' % image.shahash,
'images:page_ids:%s' % image.source_page_url)
# we don't need to continue
# we'll return back their original msg, w/o the id set
if ids:
print 'image already exists [%s], not setting' % ids
return image
# so the image appears to be new, good for it
return self.set_image(image)
def set_image(self, image):
""" sets image data, returns image """
# would be better if we only saved if it didn't exist
if image.data:
# save the images data
self._set_image_data(image)
# could be an update, could be new
image = self._save_to_redis(image)
# let the world know we have added a new image
self.revent.fire('image_added',{
'source_page_url': image.source_page_url,
'source_url': image.source_url,
'shahash': image.shahash,
'vhash': image.vhash,
'xdim': image.xdim,
'ydim': image.ydim,
})
return image
def delete_image(self, image_id):
""" removes an image """
# get it's image obj
try:
image = self.get_image(image_id)
except o.ImageNotFound, ex:
return False
# delete the redis data
self._delete_from_redis(image)
# see if we need to remove the image data
if self.rc.scard('images:datainstances:%s' % image.shahash) == 0:
# no more images w/ the same data, remove image data
with connect(Blobby) as c:
c.delete_data(image.shahash)
# it's gone, let'm know
self.revent.fire('image_deleted',{
'source_page_url': image.source_page_url,
'source_url': image.source_url,
'shahash': image.shahash,
'vhash': image.vhash,
'xdim': image.xdim,
'ydim': image.ydim,
})
# and we're done!
return True
def get_images_since(self, image_id=None, timestamp=None,
offset=10, limit=0):
""" returns list of tublr images or blank list which were
added after given image id or timestamp """
print '%s %s %s %s' % (image_id,timestamp,limit,offset)
if image_id is not None:
print 'got image id'
# figure out what the current id is and than grab
# our sorted set by index assuming that all ids
# contain an image
next_id = int(self.rc.get('images:next_id') or 0)
# how far from the end is the id given
d = next_id - image_id
start = next_id - d
end = next_id - d + limit - 1
print 'getting between %s %s' % (start,end)
# starting back where we think this image is to + limit
ids = self.rc.zrange('images:ids:timestamps',start,end)
print 'got ids: %s' % ids
elif timestamp:
print 'from timestamp: %s' % timestamp
# get ids from our sorted set by it's weight (aka timestamp)
# TODO: not use inf
ids = self.rc.zrangebyscore('images:ids:timestamps',
timestamp,'+inf')
else:
print 'could not find images'
ids = []
# page ids
if offset < len(ids):
ids = ids[offset:max(len(ids),limit)]
else:
ids = []
print 'found ids: %s' % ids
# return images for each ID
images = map(self._get_from_redis,ids)
# populate image data
map(self._populate_image_data,images)
return images
def search(self, source_blog_url=None, since_timestamp=None,
before_timestamp=None, ids=[], source_url=None):
""" returns list of images, searches passed on passed params """
pass
def populate_image_stats(self, image):
""" returns a Image w/ image data + stats filled
out """
ti = image
image_data = ti.data
if not ti.data:
return ti
ti.size = len(image_data)
try:
with connect(Blobby) as c:
ti.shahash = c.get_data_bhash(image_data)
except o.Exception, ex:
raise o.Exception('oException getting shahash: %s' % ex.msg)
except Exception, ex:
raise o.Exception('Exception getting shahash: %s' % ex)
try:
b = StringIO(image_data)
img = Image.open(b)
except Exception, ex:
raise o.Exception('Exception getting PIL img: %s' % ex)
try:
ti.xdim, ti.ydim = img.size
except Exception, ex:
raise o.Exception('Exception getting dimensions: %s' % ex)
try:
ti.vhash = str(average_hash(img))
except Exception, ex:
raise o.Exception('Exception getting vhash: %s' % ex)
return ti
def run():
from run_services import serve_service
serve_service(Images, ImagesHandler())
| pipe = self.rc.pipeline()
# if our image doesn't have an id, set it up w/ one
if not image.id:
print 'got new image: %s' % image.shahash
image.id = self.rc.incr('images:next_id')
pipe.sadd('images:datainstances:%s' % image.shahash,
image.id)
# check and see if we used to have a different shahash
old_shahash = self.rc.hget('images:%s' % image.id,'shahash')
if old_shahash != image.shahash:
# remove our id from the old shahash tracker
pipe.srem('images:datainstances:%s' % old_shahash,
image.id)
# add it to the new tracker
pipe.sadd('images:datainstances:%s' % image.shahash,
image.id)
# update / set our timestamp
da = 0.0
if image.downloaded_at:
da = image.downloaded_at
else:
da = time.time()
pipe.zadd('images:ids:timestamps',image.id, da)
# add this image to the page's id set
if image.source_page_url:
pipe.zadd('images:page_ids:%s' % image.source_page_url,
image.id, da)
# update our last scrape time for the page
pipe.zadd('images:pages:timestamps',
image.source_page_url, image.id)
# take our image and make a dict
image_data = self._image_to_dict(image)
# set our data to redis
key = 'images:%s' % image.id
pipe.hmset(key,image_data)
# execute our pipe
pipe.execute()
return image | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.