file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
day18.rs | ;
for i in 0..4 {
if i == 0 {
xd = -1; yd = 0;
} else if i == 1 {
xd = 1; yd = 0;
} else if i == 2 {
xd = 0; yd = 1;
} else if i == 3 {
xd = 0; yd = -1;
}
let x1 = (*node).x as i64 + xd;
let y1 = (*node).y as i64 + yd;
if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 {
continue;
}
else {
if (*maze).grid[y1 as usize][x1 as usize].obstacle {
continue;
}
let index = exploredindex(maze, x1 as usize, y1 as usize);
let new_dist = (*node).dist + 1;
if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist {
continue;
}
if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist {
continue;
}
frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y});
}
}
}
if explored.contains_key(&dest_key) {
let end_node = explored.get(&dest_key).unwrap();
if ret_doors_keys {
let mut curr_x = end_node.parent_x;
let mut curr_y = end_node.parent_y;
while !(curr_x == start_x && curr_y == start_y) {
if (*maze).grid[curr_y][curr_x].key_index >= 0 {
(*keys).push((*maze).grid[curr_y][curr_x].key_index as usize);
}
if (*maze).grid[curr_y][curr_x].door_index >= 0 {
(*doors).push((*maze).grid[curr_y][curr_x].door_index as usize);
}
let index = exploredindex(maze, curr_x, curr_y);
let trace = explored.get(&index).unwrap();
curr_x = trace.parent_x;
curr_y = trace.parent_y;
}
}
return end_node.dist as i64;
}
}
return -1;
}
fn read_maze(input: Vec<String>, maze:&mut Maze)->usize {
(*maze).width = input[0].len();
(*maze).height = input.len();
// read origin, obstacles, doors and keys
for y in 0..(*maze).height {
(*maze).grid.push(Vec::new());
for x in 0..(*maze).width {
let byte = input[y].as_bytes()[x];
match byte {
35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}),
46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}),
65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol:(byte as char),key_index:0}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: ((*maze).doors.len() - 1) as i64, key_index: -1}); },
97..=122=> {(*maze).keys.push(Key{x:x,y:y,symbol:((byte-32) as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64}); },
_=>{(*maze).keys.push(Key{x:x, y:y, symbol:(byte as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64});},
}
}
}
// quick lookup for door/key correspondance
for i in 0..(*maze).doors.len() {
for j in 0..(*maze).keys.len() {
if (*maze).keys[j].symbol == (*maze).doors[i].symbol {
(*maze).doors[i].key_index = j;
break;
}
}
}
// cache distances between each key
for i in 0..(*maze).keys.len() {
(*maze).cached.insert(i, HashMap::new());
}
for i in 0..(*maze).keys.len() {
for j in 0..(*maze).keys.len() {
if j == i {
continue;
}
let mut doors = Vec::new();
let mut keys = Vec::new();
let dist = dijkstra_a(maze, (*maze).keys[i].x, (*maze).keys[i].y, (*maze).keys[j].x, (*maze).keys[j].y, &mut doors, &mut keys, true);
let mut doorkeys:Vec<usize> = Vec::new();
for k in 0..doors.len() {
doorkeys.push((*maze).doors[doors[k]].key_index);
}
(*maze).cached.get_mut(&i).unwrap().insert(j, CachedPath{dist: dist, keys: doorkeys});
}
}
let mut first_keys:Vec<usize> = Vec::new();
for i in 0..(*maze).keys.len() {
if (*maze).keys[i].symbol == '@' {
first_keys.push(i);
}
}
return dijkstra_b(maze, &first_keys);
}
pub fn run(file_path:&str) {
let mut maze = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0};
let mut maze2 = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0};
let vec = super::utility::util_fread(file_path);
let mut vec2:Vec<String> = Vec::new();
let mut ox = 0;
let mut oy = 0;
if vec.len() == 0 {
println!("Input not read properly");
return;
}
// test if maze is set up for part B
for line in 0..vec.len() {
let bytes = vec[line].as_bytes();
for pos in 0..bytes.len() {
if bytes[pos] == '@' as u8 {
ox = pos;
oy = line;
}
}
}
let mut has_part_b = true;
if ox + 1 >= vec[0].len() || (ox as i64 - 1) < 0 || oy + 1 >= vec.len() || (oy as i64 - 1) < 0 {
has_part_b = false;
}
else {
for y in oy-1..=oy+1 {
let bytes = vec[y].as_bytes();
if y == oy-1 && (bytes[ox-1] != '.' as u8 || bytes[ox] != '.' as u8 || bytes[ox+1] != '.' as u8) {
has_part_b = false;
break;
}
else if y == oy && (bytes[ox-1] != '.' as u8 || bytes[ox] != '@' as u8 || bytes[ox+1] != '.' as u8) {
has_part_b = false;
break;
}
else if y == oy+1 && (bytes[ox-1] != '.' as u8 || bytes[ox] != '.' as u8 || bytes[ox+1] != '.' as u8) {
has_part_b = false;
break;
}
}
}
if has_part_b | {
for y in 0..vec.len() {
let mut line = String::from("");
let bytes = vec[y].as_bytes();
for x in 0..vec[y].len() {
if (x == ox - 1 && y == oy - 1) ||
(x == ox + 1 && y == oy - 1) ||
(x == ox - 1 && y == oy + 1) ||
(x == ox + 1 && y == oy + 1) {
line.push('@');
} else if (x == ox && y == oy - 1) ||
(x == ox && y == oy + 1) ||
(x == ox - 1 && y == oy) ||
(x == ox + 1 && y == oy) ||
(x == ox && y == oy) {
line.push('#');
}
else {
line.push(bytes[x] as char); | conditional_block | |
day18.rs | rontier.clear();
for key in frontier_next.keys() {
let node = frontier_next.get(key).unwrap();
let node2 = (*node).clone();
frontier.insert(key.to_string(), node2);
}
frontier_next.clear();
for key in frontier.keys() {
//println!("Key {}", key);
let node = frontier.get(key).unwrap();
if (*node).keys.len() == (*maze).keys.len() {
if let Some(candidate) = candidates.get_mut(key) {
if (*candidate) > (*node).dist {
*candidate = (*node).dist;
}
}
else {
candidates.insert(key.to_string(), (*node).dist);
}
}
// add to explored or update
if let Some(explored_node) = explored.get_mut(key) {
if (*explored_node).dist > (*node).dist {
(*explored_node).keys.clear();
(*explored_node).at.clear();
for i in 0..(*node).keys.len() {
(*explored_node).keys.push((*node).keys[i]);
}
for i in 0..(*node).at.len() {
(*explored_node).at.push((*node).at[i]);
}
(*explored_node).dist = (*node).dist;
}
}
else {
let new_node = (*node).clone();
explored.insert(key.to_string(), new_node);
}
// add all next steps from all positions
for p in 0..(*node).at.len() {
for k in 0..(*maze).keys.len() {
let mut present = false;
for j in 0..(*node).keys.len() {
if (*node).keys[j] == k {
present = true;
break;
}
}
if present {
continue;
}
let curr_key = (*node).at[p];
// if not accessible from current position
if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 {
continue;
}
// if not accessible with current keys
let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone();
if intersect_count(&((*node).keys), &required_keys) < required_keys.len() {
continue;
}
let mut new_keys = (*node).keys.clone();
new_keys.push(k);
let mut new_at = (*node).at.clone();
new_at[p] = k;
let new_keys_index = keynodeindex(maze, &new_keys, &new_at);
let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize);
// if previously explored and not shorter
if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist {
continue;
}
// if previously added to the frontier
if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist {
continue;
}
// add to frontier
frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist});
}
}
}
}
let mut min_dist = 0;
for candidate_key in candidates.keys() {
let candidate = candidates.get(candidate_key).unwrap();
if min_dist == 0 || min_dist > *candidate {
min_dist = *candidate;
}
}
return min_dist;
}
fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize {
return ((*maze).width * y) + x;
}
fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64 | for key in frontier.keys() {
let node = frontier.get(key).unwrap();
let exploredindex1 = exploredindex(maze, (*node).x, (*node).y);
if explored.contains_key(&exploredindex1) {
let last_dist = explored.get(&exploredindex1).unwrap().dist;
if (*node).dist < last_dist {
let node2 = explored.get_mut(&exploredindex1).unwrap();
(*node2).dist = (*node).dist;
(*node2).parent_x = (*node).parent_x;
(*node2).parent_y = (*node).parent_y;
}
}
else {
let new_node = (*node).clone();
explored.insert(exploredindex1, new_node);
}
let mut xd:i64 = 0;
let mut yd:i64 = 0;
for i in 0..4 {
if i == 0 {
xd = -1; yd = 0;
} else if i == 1 {
xd = 1; yd = 0;
} else if i == 2 {
xd = 0; yd = 1;
} else if i == 3 {
xd = 0; yd = -1;
}
let x1 = (*node).x as i64 + xd;
let y1 = (*node).y as i64 + yd;
if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 {
continue;
}
else {
if (*maze).grid[y1 as usize][x1 as usize].obstacle {
continue;
}
let index = exploredindex(maze, x1 as usize, y1 as usize);
let new_dist = (*node).dist + 1;
if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist {
continue;
}
if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist {
continue;
}
frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y});
}
}
}
if explored.contains_key(&dest_key) {
let end_node = explored.get(&dest_key).unwrap();
if ret_doors_keys {
let mut curr_x = end_node.parent_x;
let mut curr_y = end_node.parent_y;
while !(curr_x == start_x && curr_y == start_y) {
if (*maze).grid[curr_y][curr_x].key_index >= 0 {
(*keys).push((*maze).grid[curr_y][curr_x].key_index as usize);
}
if (*maze).grid[curr_y][curr_x].door_index >= 0 {
(*doors).push((*maze).grid[curr_y][curr_x].door_index as usize);
}
let index = exploredindex(maze, curr_x, curr_y);
let trace = explored.get(&index).unwrap();
curr_x = trace.parent_x;
curr_y = trace.parent_y;
}
}
return end_node.dist as i64;
}
}
return -1;
}
fn read_maze(input: Vec<String>, maze:&mut Maze)->usize {
(*maze).width = input[0].len();
(*maze).height = input.len();
// read origin, obstacles, doors and keys
for y in 0..(*maze).height {
(*maze).grid.push(Vec::new());
for x in 0..(*maze).width {
let byte = input[y].as_bytes()[x];
match byte {
35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}),
46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}),
65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol | {
let mut explored:HashMap<usize, DNode> = HashMap::new();
let mut frontier:HashMap<usize,DNode> = HashMap::new();
let mut frontier_next:HashMap<usize,DNode> = HashMap::new();
frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y});
let dest_key = exploredindex(maze, end_x, end_y);
while frontier_next.len() > 0 {
frontier.clear();
for key in frontier_next.keys() {
let node = frontier_next.get(key).unwrap();
let new_node = (*node).clone();
frontier.insert(*key, new_node);
}
frontier_next.clear();
| identifier_body |
day18.rs | rontier.clear();
for key in frontier_next.keys() {
let node = frontier_next.get(key).unwrap();
let node2 = (*node).clone();
frontier.insert(key.to_string(), node2);
}
frontier_next.clear();
for key in frontier.keys() {
//println!("Key {}", key);
let node = frontier.get(key).unwrap();
if (*node).keys.len() == (*maze).keys.len() {
if let Some(candidate) = candidates.get_mut(key) {
if (*candidate) > (*node).dist {
*candidate = (*node).dist;
}
}
else {
candidates.insert(key.to_string(), (*node).dist);
}
}
// add to explored or update
if let Some(explored_node) = explored.get_mut(key) {
if (*explored_node).dist > (*node).dist {
(*explored_node).keys.clear();
(*explored_node).at.clear();
for i in 0..(*node).keys.len() {
(*explored_node).keys.push((*node).keys[i]);
}
for i in 0..(*node).at.len() {
(*explored_node).at.push((*node).at[i]);
}
(*explored_node).dist = (*node).dist;
}
}
else {
let new_node = (*node).clone();
explored.insert(key.to_string(), new_node);
}
// add all next steps from all positions
for p in 0..(*node).at.len() {
for k in 0..(*maze).keys.len() {
let mut present = false;
for j in 0..(*node).keys.len() {
if (*node).keys[j] == k {
present = true;
break;
}
}
if present {
continue;
}
let curr_key = (*node).at[p];
// if not accessible from current position
if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 {
continue;
}
// if not accessible with current keys
let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone();
if intersect_count(&((*node).keys), &required_keys) < required_keys.len() {
continue;
}
let mut new_keys = (*node).keys.clone();
new_keys.push(k);
let mut new_at = (*node).at.clone();
new_at[p] = k;
let new_keys_index = keynodeindex(maze, &new_keys, &new_at);
let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize);
// if previously explored and not shorter
if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist {
continue;
}
// if previously added to the frontier
if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist {
continue;
}
// add to frontier
frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist});
}
}
}
}
let mut min_dist = 0;
for candidate_key in candidates.keys() {
let candidate = candidates.get(candidate_key).unwrap();
if min_dist == 0 || min_dist > *candidate {
min_dist = *candidate;
}
}
return min_dist;
}
fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize {
return ((*maze).width * y) + x;
}
fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64 {
let mut explored:HashMap<usize, DNode> = HashMap::new();
let mut frontier:HashMap<usize,DNode> = HashMap::new();
let mut frontier_next:HashMap<usize,DNode> = HashMap::new();
frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y});
let dest_key = exploredindex(maze, end_x, end_y);
while frontier_next.len() > 0 {
frontier.clear();
for key in frontier_next.keys() {
let node = frontier_next.get(key).unwrap();
let new_node = (*node).clone();
frontier.insert(*key, new_node);
}
frontier_next.clear();
for key in frontier.keys() {
let node = frontier.get(key).unwrap();
let exploredindex1 = exploredindex(maze, (*node).x, (*node).y);
if explored.contains_key(&exploredindex1) {
let last_dist = explored.get(&exploredindex1).unwrap().dist;
if (*node).dist < last_dist {
let node2 = explored.get_mut(&exploredindex1).unwrap();
(*node2).dist = (*node).dist;
(*node2).parent_x = (*node).parent_x;
(*node2).parent_y = (*node).parent_y;
}
}
else {
let new_node = (*node).clone();
explored.insert(exploredindex1, new_node);
}
let mut xd:i64 = 0;
let mut yd:i64 = 0;
for i in 0..4 {
if i == 0 {
xd = -1; yd = 0; | } else if i == 3 {
xd = 0; yd = -1;
}
let x1 = (*node).x as i64 + xd;
let y1 = (*node).y as i64 + yd;
if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 {
continue;
}
else {
if (*maze).grid[y1 as usize][x1 as usize].obstacle {
continue;
}
let index = exploredindex(maze, x1 as usize, y1 as usize);
let new_dist = (*node).dist + 1;
if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist {
continue;
}
if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist {
continue;
}
frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y});
}
}
}
if explored.contains_key(&dest_key) {
let end_node = explored.get(&dest_key).unwrap();
if ret_doors_keys {
let mut curr_x = end_node.parent_x;
let mut curr_y = end_node.parent_y;
while !(curr_x == start_x && curr_y == start_y) {
if (*maze).grid[curr_y][curr_x].key_index >= 0 {
(*keys).push((*maze).grid[curr_y][curr_x].key_index as usize);
}
if (*maze).grid[curr_y][curr_x].door_index >= 0 {
(*doors).push((*maze).grid[curr_y][curr_x].door_index as usize);
}
let index = exploredindex(maze, curr_x, curr_y);
let trace = explored.get(&index).unwrap();
curr_x = trace.parent_x;
curr_y = trace.parent_y;
}
}
return end_node.dist as i64;
}
}
return -1;
}
fn read_maze(input: Vec<String>, maze:&mut Maze)->usize {
(*maze).width = input[0].len();
(*maze).height = input.len();
// read origin, obstacles, doors and keys
for y in 0..(*maze).height {
(*maze).grid.push(Vec::new());
for x in 0..(*maze).width {
let byte = input[y].as_bytes()[x];
match byte {
35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}),
46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}),
65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol:( | } else if i == 1 {
xd = 1; yd = 0;
} else if i == 2 {
xd = 0; yd = 1; | random_line_split |
day18.rs | {
x: usize,
y: usize,
dist: usize,
parent_x:usize,
parent_y:usize
}
#[derive(Clone)]
struct DNodeB {
at:Vec<usize>,
keys:Vec<usize>,
dist:usize
}
fn intersect_count (vec_a:&Vec<usize>, vec_b:&Vec<usize>)->usize {
let mut count = 0;
for i in 0..vec_a.len() {
for j in 0..vec_b.len() {
if vec_b[j] == vec_a[i] {
count+=1;
break;
}
}
}
return count;
}
fn keynodeindex(maze:&mut Maze, keys: &Vec<usize>, at: &Vec<usize>)->String {
let mut ret = String::from("");
let mut keys2 = Vec::new();
for i in 0..keys.len() {
keys2.push((*maze).keys[(*keys)[i]].symbol);
}
keys2.sort();
for i in 0..(*at).len() {
ret.push((*maze).keys[(*at)[i]].symbol);
}
ret.push('|');
for i in 0..keys2.len() {
ret.push(keys2[i]);
}
return ret;
}
fn dijkstra_b(maze:&mut Maze, origins:&Vec<usize>)->usize {
let mut frontier:HashMap<String, DNodeB> = HashMap::new();
let mut frontier_next:HashMap<String, DNodeB> = HashMap::new();
let mut explored:HashMap<String, DNodeB> = HashMap::new();
let mut candidates:HashMap<String, usize> = HashMap::new();
let mut start = DNodeB{at:Vec::new(), keys:Vec::new(), dist:0};
for i in 0..(*origins).len() {
start.at.push((*origins)[i]);
start.keys.push((*origins)[i]);
}
frontier_next.insert(keynodeindex(maze, &(start.keys), &(start.at)), start);
while frontier_next.len() > 0 {
frontier.clear();
for key in frontier_next.keys() {
let node = frontier_next.get(key).unwrap();
let node2 = (*node).clone();
frontier.insert(key.to_string(), node2);
}
frontier_next.clear();
for key in frontier.keys() {
//println!("Key {}", key);
let node = frontier.get(key).unwrap();
if (*node).keys.len() == (*maze).keys.len() {
if let Some(candidate) = candidates.get_mut(key) {
if (*candidate) > (*node).dist {
*candidate = (*node).dist;
}
}
else {
candidates.insert(key.to_string(), (*node).dist);
}
}
// add to explored or update
if let Some(explored_node) = explored.get_mut(key) {
if (*explored_node).dist > (*node).dist {
(*explored_node).keys.clear();
(*explored_node).at.clear();
for i in 0..(*node).keys.len() {
(*explored_node).keys.push((*node).keys[i]);
}
for i in 0..(*node).at.len() {
(*explored_node).at.push((*node).at[i]);
}
(*explored_node).dist = (*node).dist;
}
}
else {
let new_node = (*node).clone();
explored.insert(key.to_string(), new_node);
}
// add all next steps from all positions
for p in 0..(*node).at.len() {
for k in 0..(*maze).keys.len() {
let mut present = false;
for j in 0..(*node).keys.len() {
if (*node).keys[j] == k {
present = true;
break;
}
}
if present {
continue;
}
let curr_key = (*node).at[p];
// if not accessible from current position
if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 {
continue;
}
// if not accessible with current keys
let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone();
if intersect_count(&((*node).keys), &required_keys) < required_keys.len() {
continue;
}
let mut new_keys = (*node).keys.clone();
new_keys.push(k);
let mut new_at = (*node).at.clone();
new_at[p] = k;
let new_keys_index = keynodeindex(maze, &new_keys, &new_at);
let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize);
// if previously explored and not shorter
if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist {
continue;
}
// if previously added to the frontier
if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist {
continue;
}
// add to frontier
frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist});
}
}
}
}
let mut min_dist = 0;
for candidate_key in candidates.keys() {
let candidate = candidates.get(candidate_key).unwrap();
if min_dist == 0 || min_dist > *candidate {
min_dist = *candidate;
}
}
return min_dist;
}
fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize {
return ((*maze).width * y) + x;
}
fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64 {
let mut explored:HashMap<usize, DNode> = HashMap::new();
let mut frontier:HashMap<usize,DNode> = HashMap::new();
let mut frontier_next:HashMap<usize,DNode> = HashMap::new();
frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y});
let dest_key = exploredindex(maze, end_x, end_y);
while frontier_next.len() > 0 {
frontier.clear();
for key in frontier_next.keys() {
let node = frontier_next.get(key).unwrap();
let new_node = (*node).clone();
frontier.insert(*key, new_node);
}
frontier_next.clear();
for key in frontier.keys() {
let node = frontier.get(key).unwrap();
let exploredindex1 = exploredindex(maze, (*node).x, (*node).y);
if explored.contains_key(&exploredindex1) {
let last_dist = explored.get(&exploredindex1).unwrap().dist;
if (*node).dist < last_dist {
let node2 = explored.get_mut(&exploredindex1).unwrap();
(*node2).dist = (*node).dist;
(*node2).parent_x = (*node).parent_x;
(*node2).parent_y = (*node).parent_y;
}
}
else {
let new_node = (*node).clone();
explored.insert(exploredindex1, new_node);
}
let mut xd:i64 = 0;
let mut yd:i64 = 0;
for i in 0..4 {
if i == 0 {
xd = -1; yd = 0;
} else if i == 1 {
xd = 1; yd = 0;
} else if i == 2 {
xd = 0; yd = 1;
} else if i == 3 {
xd = 0; yd = -1;
}
let x1 = (*node).x as i64 + xd;
let y1 = (*node).y as i64 + yd;
if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 {
continue;
}
else {
if (*maze).grid[y1 as usize][x1 as usize].obstacle {
continue;
}
let index = exploredindex(maze, x1 as usize, y1 as usize);
let new_dist = (*node).dist + 1;
if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist {
continue;
}
if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist {
continue;
}
frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent | DNode | identifier_name | |
mtaresolver.go | resolvePath(path string, parts ...string) string {
absolutePath := path
if !filepath.IsAbs(path) {
absolutePath = filepath.Join(append(parts, absolutePath)...)
}
return absolutePath
}
// ResolveProperties is the main function to trigger the resolution
func (m *MTAResolver) ResolveProperties(module *mta.Module, envFilePath string) {
if m.Parameters == nil {
m.Parameters = map[string]interface{}{}
}
//add env variables
for _, val := range envGetter() {
pos := strings.Index(val, "=")
if pos > 0 {
key := strings.Trim(val[:pos], " ")
value := strings.Trim(val[pos+1:], " ")
m.addValueToContext(key, value)
}
}
//add .env file in module's path to the module context
if len(module.Path) > 0 {
envFile := resolvePath(envFilePath, m.WorkingDir, module.Path)
envMap, err := godotenv.Read(envFile)
if err == nil {
for key, value := range envMap {
m.addValueToContext(key, value)
}
}
}
m.addServiceNames(module)
//top level properties
for key, value := range module.Properties {
//no expected variables
propValue := m.resolve(module, nil, value)
module.Properties[key] = m.resolvePlaceholders(module, nil, nil, propValue)
}
//required properties:
for _, req := range module.Requires {
requiredSource := m.findProvider(req.Name)
for propName, PropValue := range req.Properties {
resolvedValue := m.resolve(module, &req, PropValue)
//replace value with resolved value
req.Properties[propName] = m.resolvePlaceholders(module, requiredSource, &req, resolvedValue)
}
}
}
func (m *MTAResolver) addValueToContext(key, value string) {
//if the key has format of "module/key", or "resource/key" writes the value to the module's context
slashPos := strings.Index(key, "/")
if slashPos > 0 {
modName := key[:slashPos]
key = key[slashPos+1:]
modulesContext, ok := m.context.modules[modName]
if !ok {
modulesContext, ok = m.context.resources[modName]
}
if ok {
modulesContext[key] = value
}
} else {
m.context.global[key] = value
}
}
func (m *MTAResolver) resolve(sourceModule *mta.Module, requires *mta.Requires, valueObj interface{}) interface{} {
switch valueObj := valueObj.(type) {
case map[interface{}]interface{}:
v := convertToJSONSafe(valueObj)
return m.resolve(sourceModule, requires, v)
case map[string]interface{}:
for k, v := range valueObj {
valueObj[k] = m.resolve(sourceModule, requires, v)
}
return valueObj
case []interface{}:
for i, v := range valueObj {
valueObj[i] = m.resolve(sourceModule, requires, v)
}
return valueObj
case string:
return m.resolveString(sourceModule, requires, valueObj)
default:
//if the value is not a string but a leaf, just return it
return valueObj
}
}
func (m *MTAResolver) resolveString(sourceModule *mta.Module, requires *mta.Requires, value string) interface{} {
pos := 0
pos, variableName, wholeValue := parseNextVariable(pos, value, variablePrefix)
if pos < 0 {
//no variables
return value
}
varValue := m.getVariableValue(sourceModule, requires, variableName)
if wholeValue {
return varValue
}
for pos >= 0 {
varValueStr, _ := convertToString(varValue)
value = value[:pos] + varValueStr + value[pos+len(variableName)+3:]
pos, variableName, _ = parseNextVariable(pos+len(varValueStr), value, variablePrefix)
if pos >= 0 {
varValue = m.getVariableValue(sourceModule, requires, variableName)
}
}
return value
}
func convertToString(valueObj interface{}) (string, bool) {
switch v := valueObj.(type) {
case string:
return v, false
}
valueBytes, err := json.Marshal(convertToJSONSafe(valueObj))
if err != nil {
logs.Logger.Error(err)
return "", false
}
return string(valueBytes), true
}
// return start position, name of variable and if it is a whole value
func parseNextVariable(pos int, value string, prefix string) (int, string, bool) {
endSign := "}"
posStart := strings.Index(value[pos:], prefix+"{")
if posStart < 0 {
return -1, "", false
}
posStart += pos
if string(value[posStart+2]) == "{" {
endSign = "}}"
}
posEnd := strings.Index(value[posStart+2:], endSign)
if posEnd < 0 {
//bad value
return -1, "", false
}
posEnd += posStart + 1 + len(endSign)
wholeValue := posStart == 0 && posEnd == len(value)-1
return posStart, value[posStart+2 : posEnd], wholeValue
}
func (m *MTAResolver) getVariableValue(sourceModule *mta.Module, requires *mta.Requires, variableName string) interface{} {
var providerName string
if requires == nil {
slashPos := strings.Index(variableName, "/")
if slashPos > 0 {
providerName = variableName[:slashPos]
variableName = variableName[slashPos+1:]
} else {
m.addMessage(fmt.Sprintf(missingPrefixMsg, variableName))
return "~{" + variableName + "}"
}
} else {
providerName = requires.Name
}
source := m.findProvider(providerName)
if source != nil {
for propName, propValue := range source.Properties {
if propName == variableName {
//Do not pass module and requires, because it is a wrong scope
//it is either global->module->requires
//or global->resource
propValue = m.resolvePlaceholders(nil, source, nil, propValue)
return convertToJSONSafe(propValue)
}
}
}
if source != nil && source.Type == resourceType && source.Resource.Type == "configuration" {
provID, ok := getStringFromMap(source.Resource.Parameters, "provider-id")
if ok {
m.addMessage(fmt.Sprint("Missing configuration ", provID, "/", variableName))
}
}
return "~{" + variableName + "}"
}
func (m *MTAResolver) resolvePlaceholders(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, valueObj interface{}) interface{} {
switch valueObj := valueObj.(type) {
case map[interface{}]interface{}:
v := convertToJSONSafe(valueObj)
return m.resolvePlaceholders(sourceModule, source, requires, v)
case map[string]interface{}:
for k, v := range valueObj {
valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v)
}
return valueObj
case []interface{}:
for k, v := range valueObj {
valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v)
}
return valueObj
case string:
return m.resolvePlaceholdersString(sourceModule, source, requires, valueObj)
default:
//if the value is not a string but a leaf, just return it
return valueObj
}
}
func (m *MTAResolver) resolvePlaceholdersString(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, value string) interface{} {
pos := 0
pos, placeholderName, wholeValue := parseNextVariable(pos, value, placeholderPrefix)
if pos < 0 {
return value
}
placeholderValue := m.getParameter(sourceModule, source, requires, placeholderName)
if wholeValue {
return placeholderValue
}
for pos >= 0 {
phValueStr, _ := convertToString(placeholderValue)
value = value[:pos] + phValueStr + value[pos+len(placeholderName)+3:]
pos, placeholderName, _ = parseNextVariable(pos+len(phValueStr), value, placeholderPrefix)
if pos >= 0 {
placeholderValue = m.getParameter(sourceModule, source, requires, placeholderName)
}
}
return value
}
func (m *MTAResolver) getParameterFromSource(source *mtaSource, paramName string) string | {
if source != nil {
// See if the value was configured externally first (in VCAP_SERVICES, env var etc)
// The source can be a module or a resource
module, found := m.context.modules[source.Name]
if found {
paramValStr, ok := module[paramName]
if ok {
return paramValStr
}
}
resource, found := m.context.resources[source.Name]
if found {
paramValStr, ok := resource[paramName]
if ok {
return paramValStr
}
}
| identifier_body | |
mtaresolver.go | ([]map[string]interface{})
envVar[requires.Group] = append(groupArray, propMap)
} else {
envVar[requires.Group] = []map[string]interface{}{propMap}
}
}
}
//serialize
return serializePropertiesAsEnvVars(envVar)
}
func serializePropertiesAsEnvVars(envVar map[string]interface{}) (map[string]string, error) {
retEnvVar := map[string]string{}
for key, val := range envVar {
switch v := val.(type) {
case string:
retEnvVar[key] = v
default:
bytesVal, err := json.Marshal(val)
if err != nil {
return nil, errors.Errorf(marshalFailsMag, key)
}
retEnvVar[key] = string(bytesVal)
}
}
return retEnvVar, nil
}
// MTAResolver is used to resolve MTA properties' variables
type MTAResolver struct {
mta.MTA
WorkingDir string
context *ResolveContext
messages []string
}
const resourceType = 1
const moduleType = 2
const variablePrefix = "~"
const placeholderPrefix = "$"
type mtaSource struct {
Name string
Parameters map[string]interface{} `yaml:"parameters,omitempty"`
Properties map[string]interface{} `yaml:"properties,omitempty"`
Type int
Module *mta.Module
Resource *mta.Resource
}
// NewMTAResolver is a factory function for MTAResolver
func NewMTAResolver(m *mta.MTA, workspaceDir string) *MTAResolver {
resolver := &MTAResolver{*m, workspaceDir, &ResolveContext{
global: map[string]string{},
modules: map[string]map[string]string{},
resources: map[string]map[string]string{},
}, []string{}}
for _, module := range m.Modules {
resolver.context.modules[module.Name] = map[string]string{}
}
for _, resource := range m.Resources {
resolver.context.resources[resource.Name] = map[string]string{}
}
return resolver
}
func resolvePath(path string, parts ...string) string {
absolutePath := path
if !filepath.IsAbs(path) {
absolutePath = filepath.Join(append(parts, absolutePath)...)
}
return absolutePath
}
// ResolveProperties is the main function to trigger the resolution
func (m *MTAResolver) ResolveProperties(module *mta.Module, envFilePath string) {
if m.Parameters == nil {
m.Parameters = map[string]interface{}{}
}
//add env variables
for _, val := range envGetter() {
pos := strings.Index(val, "=")
if pos > 0 {
key := strings.Trim(val[:pos], " ")
value := strings.Trim(val[pos+1:], " ")
m.addValueToContext(key, value)
}
}
//add .env file in module's path to the module context
if len(module.Path) > 0 {
envFile := resolvePath(envFilePath, m.WorkingDir, module.Path)
envMap, err := godotenv.Read(envFile)
if err == nil {
for key, value := range envMap {
m.addValueToContext(key, value)
}
}
}
m.addServiceNames(module)
//top level properties | propValue := m.resolve(module, nil, value)
module.Properties[key] = m.resolvePlaceholders(module, nil, nil, propValue)
}
//required properties:
for _, req := range module.Requires {
requiredSource := m.findProvider(req.Name)
for propName, PropValue := range req.Properties {
resolvedValue := m.resolve(module, &req, PropValue)
//replace value with resolved value
req.Properties[propName] = m.resolvePlaceholders(module, requiredSource, &req, resolvedValue)
}
}
}
func (m *MTAResolver) addValueToContext(key, value string) {
//if the key has format of "module/key", or "resource/key" writes the value to the module's context
slashPos := strings.Index(key, "/")
if slashPos > 0 {
modName := key[:slashPos]
key = key[slashPos+1:]
modulesContext, ok := m.context.modules[modName]
if !ok {
modulesContext, ok = m.context.resources[modName]
}
if ok {
modulesContext[key] = value
}
} else {
m.context.global[key] = value
}
}
func (m *MTAResolver) resolve(sourceModule *mta.Module, requires *mta.Requires, valueObj interface{}) interface{} {
switch valueObj := valueObj.(type) {
case map[interface{}]interface{}:
v := convertToJSONSafe(valueObj)
return m.resolve(sourceModule, requires, v)
case map[string]interface{}:
for k, v := range valueObj {
valueObj[k] = m.resolve(sourceModule, requires, v)
}
return valueObj
case []interface{}:
for i, v := range valueObj {
valueObj[i] = m.resolve(sourceModule, requires, v)
}
return valueObj
case string:
return m.resolveString(sourceModule, requires, valueObj)
default:
//if the value is not a string but a leaf, just return it
return valueObj
}
}
func (m *MTAResolver) resolveString(sourceModule *mta.Module, requires *mta.Requires, value string) interface{} {
pos := 0
pos, variableName, wholeValue := parseNextVariable(pos, value, variablePrefix)
if pos < 0 {
//no variables
return value
}
varValue := m.getVariableValue(sourceModule, requires, variableName)
if wholeValue {
return varValue
}
for pos >= 0 {
varValueStr, _ := convertToString(varValue)
value = value[:pos] + varValueStr + value[pos+len(variableName)+3:]
pos, variableName, _ = parseNextVariable(pos+len(varValueStr), value, variablePrefix)
if pos >= 0 {
varValue = m.getVariableValue(sourceModule, requires, variableName)
}
}
return value
}
func convertToString(valueObj interface{}) (string, bool) {
switch v := valueObj.(type) {
case string:
return v, false
}
valueBytes, err := json.Marshal(convertToJSONSafe(valueObj))
if err != nil {
logs.Logger.Error(err)
return "", false
}
return string(valueBytes), true
}
// return start position, name of variable and if it is a whole value
func parseNextVariable(pos int, value string, prefix string) (int, string, bool) {
endSign := "}"
posStart := strings.Index(value[pos:], prefix+"{")
if posStart < 0 {
return -1, "", false
}
posStart += pos
if string(value[posStart+2]) == "{" {
endSign = "}}"
}
posEnd := strings.Index(value[posStart+2:], endSign)
if posEnd < 0 {
//bad value
return -1, "", false
}
posEnd += posStart + 1 + len(endSign)
wholeValue := posStart == 0 && posEnd == len(value)-1
return posStart, value[posStart+2 : posEnd], wholeValue
}
func (m *MTAResolver) getVariableValue(sourceModule *mta.Module, requires *mta.Requires, variableName string) interface{} {
var providerName string
if requires == nil {
slashPos := strings.Index(variableName, "/")
if slashPos > 0 {
providerName = variableName[:slashPos]
variableName = variableName[slashPos+1:]
} else {
m.addMessage(fmt.Sprintf(missingPrefixMsg, variableName))
return "~{" + variableName + "}"
}
} else {
providerName = requires.Name
}
source := m.findProvider(providerName)
if source != nil {
for propName, propValue := range source.Properties {
if propName == variableName {
//Do not pass module and requires, because it is a wrong scope
//it is either global->module->requires
//or global->resource
propValue = m.resolvePlaceholders(nil, source, nil, propValue)
return convertToJSONSafe(propValue)
}
}
}
if source != nil && source.Type == resourceType && source.Resource.Type == "configuration" {
provID, ok := getStringFromMap(source.Resource.Parameters, "provider-id")
if ok {
m.addMessage(fmt.Sprint("Missing configuration ", provID, "/", variableName))
}
}
return "~{" + variableName + "}"
}
func (m *MTAResolver) resolvePlaceholders(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, valueObj interface{}) interface{} {
switch valueObj := valueObj.(type) {
case map[interface{}]interface{}:
v := convertToJSONSafe(valueObj)
return m.resolvePlaceholders(sourceModule, source, requires, v)
case map[string]interface{}:
for k, v := range valueObj {
valueObj[k] = m | for key, value := range module.Properties {
//no expected variables | random_line_split |
mtaresolver.go | map[string]interface{})
envVar[requires.Group] = append(groupArray, propMap)
} else {
envVar[requires.Group] = []map[string]interface{}{propMap}
}
}
}
//serialize
return serializePropertiesAsEnvVars(envVar)
}
func serializePropertiesAsEnvVars(envVar map[string]interface{}) (map[string]string, error) {
retEnvVar := map[string]string{}
for key, val := range envVar {
switch v := val.(type) {
case string:
retEnvVar[key] = v
default:
bytesVal, err := json.Marshal(val)
if err != nil {
return nil, errors.Errorf(marshalFailsMag, key)
}
retEnvVar[key] = string(bytesVal)
}
}
return retEnvVar, nil
}
// MTAResolver is used to resolve MTA properties' variables
type MTAResolver struct {
mta.MTA
WorkingDir string
context *ResolveContext
messages []string
}
const resourceType = 1
const moduleType = 2
const variablePrefix = "~"
const placeholderPrefix = "$"
type mtaSource struct {
Name string
Parameters map[string]interface{} `yaml:"parameters,omitempty"`
Properties map[string]interface{} `yaml:"properties,omitempty"`
Type int
Module *mta.Module
Resource *mta.Resource
}
// NewMTAResolver is a factory function for MTAResolver
func NewMTAResolver(m *mta.MTA, workspaceDir string) *MTAResolver {
resolver := &MTAResolver{*m, workspaceDir, &ResolveContext{
global: map[string]string{},
modules: map[string]map[string]string{},
resources: map[string]map[string]string{},
}, []string{}}
for _, module := range m.Modules {
resolver.context.modules[module.Name] = map[string]string{}
}
for _, resource := range m.Resources {
resolver.context.resources[resource.Name] = map[string]string{}
}
return resolver
}
func | (path string, parts ...string) string {
absolutePath := path
if !filepath.IsAbs(path) {
absolutePath = filepath.Join(append(parts, absolutePath)...)
}
return absolutePath
}
// ResolveProperties is the main function to trigger the resolution
func (m *MTAResolver) ResolveProperties(module *mta.Module, envFilePath string) {
if m.Parameters == nil {
m.Parameters = map[string]interface{}{}
}
//add env variables
for _, val := range envGetter() {
pos := strings.Index(val, "=")
if pos > 0 {
key := strings.Trim(val[:pos], " ")
value := strings.Trim(val[pos+1:], " ")
m.addValueToContext(key, value)
}
}
//add .env file in module's path to the module context
if len(module.Path) > 0 {
envFile := resolvePath(envFilePath, m.WorkingDir, module.Path)
envMap, err := godotenv.Read(envFile)
if err == nil {
for key, value := range envMap {
m.addValueToContext(key, value)
}
}
}
m.addServiceNames(module)
//top level properties
for key, value := range module.Properties {
//no expected variables
propValue := m.resolve(module, nil, value)
module.Properties[key] = m.resolvePlaceholders(module, nil, nil, propValue)
}
//required properties:
for _, req := range module.Requires {
requiredSource := m.findProvider(req.Name)
for propName, PropValue := range req.Properties {
resolvedValue := m.resolve(module, &req, PropValue)
//replace value with resolved value
req.Properties[propName] = m.resolvePlaceholders(module, requiredSource, &req, resolvedValue)
}
}
}
func (m *MTAResolver) addValueToContext(key, value string) {
//if the key has format of "module/key", or "resource/key" writes the value to the module's context
slashPos := strings.Index(key, "/")
if slashPos > 0 {
modName := key[:slashPos]
key = key[slashPos+1:]
modulesContext, ok := m.context.modules[modName]
if !ok {
modulesContext, ok = m.context.resources[modName]
}
if ok {
modulesContext[key] = value
}
} else {
m.context.global[key] = value
}
}
func (m *MTAResolver) resolve(sourceModule *mta.Module, requires *mta.Requires, valueObj interface{}) interface{} {
switch valueObj := valueObj.(type) {
case map[interface{}]interface{}:
v := convertToJSONSafe(valueObj)
return m.resolve(sourceModule, requires, v)
case map[string]interface{}:
for k, v := range valueObj {
valueObj[k] = m.resolve(sourceModule, requires, v)
}
return valueObj
case []interface{}:
for i, v := range valueObj {
valueObj[i] = m.resolve(sourceModule, requires, v)
}
return valueObj
case string:
return m.resolveString(sourceModule, requires, valueObj)
default:
//if the value is not a string but a leaf, just return it
return valueObj
}
}
func (m *MTAResolver) resolveString(sourceModule *mta.Module, requires *mta.Requires, value string) interface{} {
pos := 0
pos, variableName, wholeValue := parseNextVariable(pos, value, variablePrefix)
if pos < 0 {
//no variables
return value
}
varValue := m.getVariableValue(sourceModule, requires, variableName)
if wholeValue {
return varValue
}
for pos >= 0 {
varValueStr, _ := convertToString(varValue)
value = value[:pos] + varValueStr + value[pos+len(variableName)+3:]
pos, variableName, _ = parseNextVariable(pos+len(varValueStr), value, variablePrefix)
if pos >= 0 {
varValue = m.getVariableValue(sourceModule, requires, variableName)
}
}
return value
}
func convertToString(valueObj interface{}) (string, bool) {
switch v := valueObj.(type) {
case string:
return v, false
}
valueBytes, err := json.Marshal(convertToJSONSafe(valueObj))
if err != nil {
logs.Logger.Error(err)
return "", false
}
return string(valueBytes), true
}
// return start position, name of variable and if it is a whole value
func parseNextVariable(pos int, value string, prefix string) (int, string, bool) {
endSign := "}"
posStart := strings.Index(value[pos:], prefix+"{")
if posStart < 0 {
return -1, "", false
}
posStart += pos
if string(value[posStart+2]) == "{" {
endSign = "}}"
}
posEnd := strings.Index(value[posStart+2:], endSign)
if posEnd < 0 {
//bad value
return -1, "", false
}
posEnd += posStart + 1 + len(endSign)
wholeValue := posStart == 0 && posEnd == len(value)-1
return posStart, value[posStart+2 : posEnd], wholeValue
}
func (m *MTAResolver) getVariableValue(sourceModule *mta.Module, requires *mta.Requires, variableName string) interface{} {
var providerName string
if requires == nil {
slashPos := strings.Index(variableName, "/")
if slashPos > 0 {
providerName = variableName[:slashPos]
variableName = variableName[slashPos+1:]
} else {
m.addMessage(fmt.Sprintf(missingPrefixMsg, variableName))
return "~{" + variableName + "}"
}
} else {
providerName = requires.Name
}
source := m.findProvider(providerName)
if source != nil {
for propName, propValue := range source.Properties {
if propName == variableName {
//Do not pass module and requires, because it is a wrong scope
//it is either global->module->requires
//or global->resource
propValue = m.resolvePlaceholders(nil, source, nil, propValue)
return convertToJSONSafe(propValue)
}
}
}
if source != nil && source.Type == resourceType && source.Resource.Type == "configuration" {
provID, ok := getStringFromMap(source.Resource.Parameters, "provider-id")
if ok {
m.addMessage(fmt.Sprint("Missing configuration ", provID, "/", variableName))
}
}
return "~{" + variableName + "}"
}
func (m *MTAResolver) resolvePlaceholders(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, valueObj interface{}) interface{} {
switch valueObj := valueObj.(type) {
case map[interface{}]interface{}:
v := convertToJSONSafe(valueObj)
return m.resolvePlaceholders(sourceModule, source, requires, v)
case map[string]interface{}:
for k, v := range valueObj {
valueObj[k] | resolvePath | identifier_name |
mtaresolver.go | )
}
return valueObj
case []interface{}:
for i, v := range valueObj {
valueObj[i] = m.resolve(sourceModule, requires, v)
}
return valueObj
case string:
return m.resolveString(sourceModule, requires, valueObj)
default:
//if the value is not a string but a leaf, just return it
return valueObj
}
}
func (m *MTAResolver) resolveString(sourceModule *mta.Module, requires *mta.Requires, value string) interface{} {
pos := 0
pos, variableName, wholeValue := parseNextVariable(pos, value, variablePrefix)
if pos < 0 {
//no variables
return value
}
varValue := m.getVariableValue(sourceModule, requires, variableName)
if wholeValue {
return varValue
}
for pos >= 0 {
varValueStr, _ := convertToString(varValue)
value = value[:pos] + varValueStr + value[pos+len(variableName)+3:]
pos, variableName, _ = parseNextVariable(pos+len(varValueStr), value, variablePrefix)
if pos >= 0 {
varValue = m.getVariableValue(sourceModule, requires, variableName)
}
}
return value
}
func convertToString(valueObj interface{}) (string, bool) {
switch v := valueObj.(type) {
case string:
return v, false
}
valueBytes, err := json.Marshal(convertToJSONSafe(valueObj))
if err != nil {
logs.Logger.Error(err)
return "", false
}
return string(valueBytes), true
}
// return start position, name of variable and if it is a whole value
func parseNextVariable(pos int, value string, prefix string) (int, string, bool) {
endSign := "}"
posStart := strings.Index(value[pos:], prefix+"{")
if posStart < 0 {
return -1, "", false
}
posStart += pos
if string(value[posStart+2]) == "{" {
endSign = "}}"
}
posEnd := strings.Index(value[posStart+2:], endSign)
if posEnd < 0 {
//bad value
return -1, "", false
}
posEnd += posStart + 1 + len(endSign)
wholeValue := posStart == 0 && posEnd == len(value)-1
return posStart, value[posStart+2 : posEnd], wholeValue
}
func (m *MTAResolver) getVariableValue(sourceModule *mta.Module, requires *mta.Requires, variableName string) interface{} {
var providerName string
if requires == nil {
slashPos := strings.Index(variableName, "/")
if slashPos > 0 {
providerName = variableName[:slashPos]
variableName = variableName[slashPos+1:]
} else {
m.addMessage(fmt.Sprintf(missingPrefixMsg, variableName))
return "~{" + variableName + "}"
}
} else {
providerName = requires.Name
}
source := m.findProvider(providerName)
if source != nil {
for propName, propValue := range source.Properties {
if propName == variableName {
//Do not pass module and requires, because it is a wrong scope
//it is either global->module->requires
//or global->resource
propValue = m.resolvePlaceholders(nil, source, nil, propValue)
return convertToJSONSafe(propValue)
}
}
}
if source != nil && source.Type == resourceType && source.Resource.Type == "configuration" {
provID, ok := getStringFromMap(source.Resource.Parameters, "provider-id")
if ok {
m.addMessage(fmt.Sprint("Missing configuration ", provID, "/", variableName))
}
}
return "~{" + variableName + "}"
}
func (m *MTAResolver) resolvePlaceholders(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, valueObj interface{}) interface{} {
switch valueObj := valueObj.(type) {
case map[interface{}]interface{}:
v := convertToJSONSafe(valueObj)
return m.resolvePlaceholders(sourceModule, source, requires, v)
case map[string]interface{}:
for k, v := range valueObj {
valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v)
}
return valueObj
case []interface{}:
for k, v := range valueObj {
valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v)
}
return valueObj
case string:
return m.resolvePlaceholdersString(sourceModule, source, requires, valueObj)
default:
//if the value is not a string but a leaf, just return it
return valueObj
}
}
func (m *MTAResolver) resolvePlaceholdersString(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, value string) interface{} {
pos := 0
pos, placeholderName, wholeValue := parseNextVariable(pos, value, placeholderPrefix)
if pos < 0 {
return value
}
placeholderValue := m.getParameter(sourceModule, source, requires, placeholderName)
if wholeValue {
return placeholderValue
}
for pos >= 0 {
phValueStr, _ := convertToString(placeholderValue)
value = value[:pos] + phValueStr + value[pos+len(placeholderName)+3:]
pos, placeholderName, _ = parseNextVariable(pos+len(phValueStr), value, placeholderPrefix)
if pos >= 0 {
placeholderValue = m.getParameter(sourceModule, source, requires, placeholderName)
}
}
return value
}
func (m *MTAResolver) getParameterFromSource(source *mtaSource, paramName string) string {
if source != nil {
// See if the value was configured externally first (in VCAP_SERVICES, env var etc)
// The source can be a module or a resource
module, found := m.context.modules[source.Name]
if found {
paramValStr, ok := module[paramName]
if ok {
return paramValStr
}
}
resource, found := m.context.resources[source.Name]
if found {
paramValStr, ok := resource[paramName]
if ok {
return paramValStr
}
}
// If it was not defined externally, try to get it from the source parameters
paramVal, found := getStringFromMap(source.Parameters, paramName)
if found {
return paramVal
}
}
return ""
}
func (m *MTAResolver) getParameter(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, paramName string) string {
//first on source parameters scope
paramValStr := m.getParameterFromSource(source, paramName)
//first on source parameters scope
if paramValStr != "" {
return paramValStr
}
//then try on requires level
if requires != nil {
paramVal, ok := getStringFromMap(requires.Parameters, paramName)
if ok {
return paramVal
}
}
if sourceModule != nil {
paramVal, ok := getStringFromMap(sourceModule.Parameters, paramName)
if ok {
return paramVal
}
//defaults to context's module params:
paramValStr, ok = m.context.modules[sourceModule.Name][paramName]
if ok {
return paramValStr
}
}
//then on MTA root scope
paramVal, ok := getStringFromMap(m.Parameters, paramName)
if ok {
return paramVal
}
//then global scope
paramValStr, ok = m.context.global[paramName]
if ok {
return paramValStr
}
if source == nil {
m.addMessage(fmt.Sprint("Missing ", paramName))
} else {
m.addMessage(fmt.Sprint("Missing ", source.Name+"/"+paramName))
}
return "${" + paramName + "}"
}
func (m *MTAResolver) findProvider(name string) *mtaSource {
for _, module := range m.Modules {
for _, provides := range module.Provides {
if provides.Name == name {
source := mtaSource{Name: module.Name, Properties: provides.Properties, Parameters: nil, Type: moduleType, Module: module}
return &source
}
}
}
//in case of resource, its name is the matching to the requires name
for _, resource := range m.Resources {
if resource.Name == name {
source := mtaSource{Name: resource.Name, Properties: resource.Properties, Parameters: resource.Parameters, Type: resourceType, Resource: resource}
return &source
}
}
return nil
}
func (m *MTAResolver) addMessage(message string) {
// This check is necessary so the same message won't be written twice.
// This happens when a placeholder references a parameter that is not defined,
// because we try to resolve the parameter while resolving the placeholder and then
// we try to resolve the parameter again as a parameter.
if !containsString(m.messages, message) {
m.messages = append(m.messages, message)
}
}
func containsString(slice []string, value string) bool {
for _, curr := range slice | {
if curr == value {
return true
}
} | conditional_block | |
stlib.py | 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = bs(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def cleaning_urls_text(url):
try:
html = text_from_html(urllib.request.urlopen(url).read())
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(html)
return [w for w in word_tokens if not w in stop_words]
except:
return []
def filter_warning_words(sentence):
warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail',
'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide',
'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot',
'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']
return list(filter(lambda word: word in warning_word, sentence))
def warnings_count(url):
clean_sentence = cleaning_urls_text(url)
length = len(filter_warning_words(clean_sentence)) | list_len_tup_clean.sort(key = lambda item: item[1], reverse=True)
top_urls = [url for url, length in list_len_tup_clean[:2]]
if len(top_urls) > 1:
print(f"""
We found something sketchy. You might want to check these links:
- {top_urls[0]}
- {top_urls[1]}
""")
elif len(top_urls) == 1:
print(f"""
We found something sketchy. You might want to check this link:
{top_urls[0]}
""")
else:
print(f"We couldn't find anything worrying about {look_for} on Google. Nice!")
# Input correction
def retrieve_name(my_name, companies):
companies_list = []
for i in companies.dropna(subset=['name']).name:
companies_list.append(i)
if my_name in companies_list:
return my_name
elif len(get_close_matches(my_name, companies_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0])
if (action == "y"):
return get_close_matches(my_name, companies_list)[0]
elif (action == "n"):
return my_name
else:
return("we don't understand you. Apologies.")
def retrieve_sector(my_sector, investments):
investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list'])
sector_list0 = []
sector_list = []
for item in investments['company_category_list']:
if ',' in item:
sector_list0.append(item.split(sep=', '))
else:
sector_list0.append(item)
for i in sector_list0:
if type(i) == list:
for sec in i:
sector_list.append(sec)
else:
sector_list.append(i)
if my_sector in sector_list:
return my_sector
elif len(get_close_matches(my_sector, sector_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0])
if (action == "y"):
return get_close_matches(my_sector, sector_list)[0]
else:
return my_sector
# Sentiment analysis tweeter
def tw_sent_sector(public_tweets, sector):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {sector} industry in Twitter is {sent}")
# Sentiment analysis news
def news_sentiment_sector(public_news, sector):
news_list = []
for piece in range(len(public_news['articles'])):
news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0])
news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0])
if sum(news_list)>0:
news_sent = 'Positive'
elif sum(news_list)<0:
news_sent = 'Negative'
else:
news_sent = 'Neutral'
print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}")
# Look for data about sector
def category(sector, investments):
# Gather tweets
public_tweets = api.search(sector)
# Gather news
public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en')
# Prepare the data for the sector
investments = investments.dropna(subset=['company_category_list'])
sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1)
sector_investments.reset_index(drop=True)
sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at'])
sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year )
sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month )
sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day )
# Sentiment analysis Twitter
tw_sent_sector(public_tweets, sector)
# Sentiment analysis News
news_sentiment_sector(public_news, sector)
# create plot
sector_year = sector_investments.groupby(['Year']).sum()[-10:]
movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100)
if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0:
in_dec = 'increased'
grow = 'growing'
else:
in_dec = 'decreased'
grow = 'falling'
movement = movement[1:]
sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}')
investments_per_year = sector_investments.groupby(['Year']).count()
peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list()
peak_amount = max(sector_year.raised_amount_usd)
#peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list()
low_amount = min(sector_year.raised_amount_usd)
most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False)
low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list()
format_doll = ',.2f'
print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years.
It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested.
""")
plt.ylabel('Raised amount in USD')
plt.show()
sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}')
plt.ylabel('Number of investments')
#print("""Plot explanaition average investment
""")
plt.show()
#print(f"""
# The Top 3 companies with biggest investments are:
#- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised,
#- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and
#- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised
#""")
# Sentiment analysis founder
def tw_analysis | return (url, length) if length != 0 else None
def most_warnings(urls, look_for):
list_len_tup = list(map(warnings_count, urls))
list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup)) | random_line_split |
stlib.py | _tup))
list_len_tup_clean.sort(key = lambda item: item[1], reverse=True)
top_urls = [url for url, length in list_len_tup_clean[:2]]
if len(top_urls) > 1:
print(f"""
We found something sketchy. You might want to check these links:
- {top_urls[0]}
- {top_urls[1]}
""")
elif len(top_urls) == 1:
print(f"""
We found something sketchy. You might want to check this link:
{top_urls[0]}
""")
else:
print(f"We couldn't find anything worrying about {look_for} on Google. Nice!")
# Input correction
def retrieve_name(my_name, companies):
companies_list = []
for i in companies.dropna(subset=['name']).name:
companies_list.append(i)
if my_name in companies_list:
return my_name
elif len(get_close_matches(my_name, companies_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0])
if (action == "y"):
return get_close_matches(my_name, companies_list)[0]
elif (action == "n"):
return my_name
else:
return("we don't understand you. Apologies.")
def retrieve_sector(my_sector, investments):
investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list'])
sector_list0 = []
sector_list = []
for item in investments['company_category_list']:
if ',' in item:
sector_list0.append(item.split(sep=', '))
else:
sector_list0.append(item)
for i in sector_list0:
if type(i) == list:
for sec in i:
sector_list.append(sec)
else:
sector_list.append(i)
if my_sector in sector_list:
return my_sector
elif len(get_close_matches(my_sector, sector_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0])
if (action == "y"):
return get_close_matches(my_sector, sector_list)[0]
else:
return my_sector
# Sentiment analysis tweeter
def tw_sent_sector(public_tweets, sector):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {sector} industry in Twitter is {sent}")
# Sentiment analysis news
def news_sentiment_sector(public_news, sector):
news_list = []
for piece in range(len(public_news['articles'])):
news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0])
news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0])
if sum(news_list)>0:
news_sent = 'Positive'
elif sum(news_list)<0:
news_sent = 'Negative'
else:
news_sent = 'Neutral'
print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}")
# Look for data about sector
def category(sector, investments):
# Gather tweets
public_tweets = api.search(sector)
# Gather news
public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en')
# Prepare the data for the sector
investments = investments.dropna(subset=['company_category_list'])
sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1)
sector_investments.reset_index(drop=True)
sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at'])
sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year )
sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month )
sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day )
# Sentiment analysis Twitter
tw_sent_sector(public_tweets, sector)
# Sentiment analysis News
news_sentiment_sector(public_news, sector)
# create plot
sector_year = sector_investments.groupby(['Year']).sum()[-10:]
movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100)
if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0:
in_dec = 'increased'
grow = 'growing'
else:
in_dec = 'decreased'
grow = 'falling'
movement = movement[1:]
sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}')
investments_per_year = sector_investments.groupby(['Year']).count()
peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list()
peak_amount = max(sector_year.raised_amount_usd)
#peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list()
low_amount = min(sector_year.raised_amount_usd)
most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False)
low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list()
format_doll = ',.2f'
print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years.
It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested.
""")
plt.ylabel('Raised amount in USD')
plt.show()
sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}')
plt.ylabel('Number of investments')
#print("""Plot explanaition average investment
""")
plt.show()
#print(f"""
# The Top 3 companies with biggest investments are:
#- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised,
#- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and
#- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised
#""")
# Sentiment analysis founder
def tw_analysis_founder(public_tweets, founder):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {founder} in Twitter is {sent}")
# Look for data about the founder
def founders(founder, people):
full_name = founder.split()
public_tweets = api.search(founder)
# What to search on Google
look_for = founder
for i in range(len(people)):
if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[i]==full_name[1]:
display(Image(url=people.profile_image_url[i]))
print(f'We found this information about {founder}:')
print(f"Founder's name: {people.first_name[i]} {people.last_name[i]} ")
print(f"Title: {people.title[i]}")
print(f"Organization: {people.organization[i]}")
print(f"Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}")
if people.twitter_url[i] != None:
print(f"Twitter URL: {people.twitter_url[i]}")
if people.linkedin_url[i] != None:
print(f"Linkedin URL: {people.linkedin_url[i]}")
if people.facebook_url[i] != None:
print(f"Facebook URL: {people.facebook_url[i]}")
# Twitter analysis
tw_analysis_founder(public_tweets, founder)
# Google search
most_warnings(find_webs(founder), look_for)
# Look for data about company
def | find_companies_by_size | identifier_name | |
stlib.py | script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = bs(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def cleaning_urls_text(url):
try:
html = text_from_html(urllib.request.urlopen(url).read())
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(html)
return [w for w in word_tokens if not w in stop_words]
except:
return []
def filter_warning_words(sentence):
warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail',
'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide',
'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot',
'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']
return list(filter(lambda word: word in warning_word, sentence))
def warnings_count(url):
clean_sentence = cleaning_urls_text(url)
length = len(filter_warning_words(clean_sentence))
return (url, length) if length != 0 else None
def most_warnings(urls, look_for):
list_len_tup = list(map(warnings_count, urls))
list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup))
list_len_tup_clean.sort(key = lambda item: item[1], reverse=True)
top_urls = [url for url, length in list_len_tup_clean[:2]]
if len(top_urls) > 1:
print(f"""
We found something sketchy. You might want to check these links:
- {top_urls[0]}
- {top_urls[1]}
""")
elif len(top_urls) == 1:
print(f"""
We found something sketchy. You might want to check this link:
{top_urls[0]}
""")
else:
print(f"We couldn't find anything worrying about {look_for} on Google. Nice!")
# Input correction
def retrieve_name(my_name, companies):
companies_list = []
for i in companies.dropna(subset=['name']).name:
companies_list.append(i)
if my_name in companies_list:
return my_name
elif len(get_close_matches(my_name, companies_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0])
if (action == "y"):
return get_close_matches(my_name, companies_list)[0]
elif (action == "n"):
return my_name
else:
return("we don't understand you. Apologies.")
def retrieve_sector(my_sector, investments):
investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list'])
sector_list0 = []
sector_list = []
for item in investments['company_category_list']:
if ',' in item:
sector_list0.append(item.split(sep=', '))
else:
sector_list0.append(item)
for i in sector_list0:
if type(i) == list:
for sec in i:
sector_list.append(sec)
else:
sector_list.append(i)
if my_sector in sector_list:
return my_sector
elif len(get_close_matches(my_sector, sector_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0])
if (action == "y"):
return get_close_matches(my_sector, sector_list)[0]
else:
return my_sector
# Sentiment analysis tweeter
def tw_sent_sector(public_tweets, sector):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {sector} industry in Twitter is {sent}")
# Sentiment analysis news
def news_sentiment_sector(public_news, sector):
|
# Look for data about sector
def category(sector, investments):
# Gather tweets
public_tweets = api.search(sector)
# Gather news
public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en')
# Prepare the data for the sector
investments = investments.dropna(subset=['company_category_list'])
sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1)
sector_investments.reset_index(drop=True)
sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at'])
sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year )
sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month )
sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day )
# Sentiment analysis Twitter
tw_sent_sector(public_tweets, sector)
# Sentiment analysis News
news_sentiment_sector(public_news, sector)
# create plot
sector_year = sector_investments.groupby(['Year']).sum()[-10:]
movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100)
if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0:
in_dec = 'increased'
grow = 'growing'
else:
in_dec = 'decreased'
grow = 'falling'
movement = movement[1:]
sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}')
investments_per_year = sector_investments.groupby(['Year']).count()
peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list()
peak_amount = max(sector_year.raised_amount_usd)
#peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list()
low_amount = min(sector_year.raised_amount_usd)
most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False)
low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list()
format_doll = ',.2f'
print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years.
It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested.
""")
plt.ylabel('Raised amount in USD')
plt.show()
sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}')
plt.ylabel('Number of investments')
#print("""Plot explanaition average investment
""")
plt.show()
#print(f"""
# The Top 3 companies with biggest investments are:
#- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised,
#- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and
#- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised
#""")
# Sentiment analysis founder
def tw_analysis | news_list = []
for piece in range(len(public_news['articles'])):
news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0])
news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0])
if sum(news_list)>0:
news_sent = 'Positive'
elif sum(news_list)<0:
news_sent = 'Negative'
else:
news_sent = 'Neutral'
print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}") | identifier_body |
stlib.py | script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = bs(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def cleaning_urls_text(url):
try:
html = text_from_html(urllib.request.urlopen(url).read())
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(html)
return [w for w in word_tokens if not w in stop_words]
except:
return []
def filter_warning_words(sentence):
warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail',
'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide',
'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot',
'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']
return list(filter(lambda word: word in warning_word, sentence))
def warnings_count(url):
clean_sentence = cleaning_urls_text(url)
length = len(filter_warning_words(clean_sentence))
return (url, length) if length != 0 else None
def most_warnings(urls, look_for):
list_len_tup = list(map(warnings_count, urls))
list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup))
list_len_tup_clean.sort(key = lambda item: item[1], reverse=True)
top_urls = [url for url, length in list_len_tup_clean[:2]]
if len(top_urls) > 1:
print(f"""
We found something sketchy. You might want to check these links:
- {top_urls[0]}
- {top_urls[1]}
""")
elif len(top_urls) == 1:
print(f"""
We found something sketchy. You might want to check this link:
{top_urls[0]}
""")
else:
print(f"We couldn't find anything worrying about {look_for} on Google. Nice!")
# Input correction
def retrieve_name(my_name, companies):
companies_list = []
for i in companies.dropna(subset=['name']).name:
companies_list.append(i)
if my_name in companies_list:
return my_name
elif len(get_close_matches(my_name, companies_list)) > 0:
|
def retrieve_sector(my_sector, investments):
investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list'])
sector_list0 = []
sector_list = []
for item in investments['company_category_list']:
if ',' in item:
sector_list0.append(item.split(sep=', '))
else:
sector_list0.append(item)
for i in sector_list0:
if type(i) == list:
for sec in i:
sector_list.append(sec)
else:
sector_list.append(i)
if my_sector in sector_list:
return my_sector
elif len(get_close_matches(my_sector, sector_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0])
if (action == "y"):
return get_close_matches(my_sector, sector_list)[0]
else:
return my_sector
# Sentiment analysis tweeter
def tw_sent_sector(public_tweets, sector):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {sector} industry in Twitter is {sent}")
# Sentiment analysis news
def news_sentiment_sector(public_news, sector):
news_list = []
for piece in range(len(public_news['articles'])):
news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0])
news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0])
if sum(news_list)>0:
news_sent = 'Positive'
elif sum(news_list)<0:
news_sent = 'Negative'
else:
news_sent = 'Neutral'
print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}")
# Look for data about sector
def category(sector, investments):
# Gather tweets
public_tweets = api.search(sector)
# Gather news
public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en')
# Prepare the data for the sector
investments = investments.dropna(subset=['company_category_list'])
sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1)
sector_investments.reset_index(drop=True)
sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at'])
sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year )
sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month )
sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day )
# Sentiment analysis Twitter
tw_sent_sector(public_tweets, sector)
# Sentiment analysis News
news_sentiment_sector(public_news, sector)
# create plot
sector_year = sector_investments.groupby(['Year']).sum()[-10:]
movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100)
if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0:
in_dec = 'increased'
grow = 'growing'
else:
in_dec = 'decreased'
grow = 'falling'
movement = movement[1:]
sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}')
investments_per_year = sector_investments.groupby(['Year']).count()
peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list()
peak_amount = max(sector_year.raised_amount_usd)
#peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list()
low_amount = min(sector_year.raised_amount_usd)
most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False)
low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list()
format_doll = ',.2f'
print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years.
It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested.
""")
plt.ylabel('Raised amount in USD')
plt.show()
sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}')
plt.ylabel('Number of investments')
#print("""Plot explanaition average investment
""")
plt.show()
#print(f"""
# The Top 3 companies with biggest investments are:
#- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised,
#- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and
#- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised
#""")
# Sentiment analysis founder
def tw_analysis | action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0])
if (action == "y"):
return get_close_matches(my_name, companies_list)[0]
elif (action == "n"):
return my_name
else:
return("we don't understand you. Apologies.") | conditional_block |
train.py | , \
restore_checkpoint, print_para, restore_best_checkpoint
import logging
from tensorboardX import SummaryWriter
import json
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
# This is needed to make the imports work
from allennlp.models import Model
import models
torch.backends.cudnn.enabled = False
torch.set_printoptions(threshold=500000000, linewidth=8000000)
#################################
#################################
######## Data loading stuff
#################################
#################################
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-params',
dest='params',
help='Params location',
type=str,
)
parser.add_argument(
'-rationale',
action="store_true",
help='use rationale',
)
parser.add_argument(
'-output',
type=str
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-batch_size',
dest='batch_size',
type=int,
default=96
)
parser.add_argument(
'-records',
type=str,
default='records.json'
)
parser.add_argument(
'-describe',
type=str,
default=''
)
args = parser.parse_args()
seed = 1111
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
batch_size = args.batch_size
only_use_relevant_dets = False
# args.rationale = True
# args.params = 'models/multiatt/default2.json'
folder = f'saves/{args.output}'
writer = SummaryWriter(f'/home/share/wangkejie/vcr1/runs/{args.output}')
params = Params.from_file(args.params)
train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),
only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets',
only_use_relevant_dets)) ########################这个地方我改成了false,使用全部的box
# NUM_GPUS = torch.cuda.device_count() # NUM_GPUS = 4
NUM_GPUS = 1
NUM_CPUS = multiprocessing.cpu_count() # NUM_CPUS = 32
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if NUM_GPUS > 1:
return td
for k in td:
if k != 'metadata':
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[
k].cuda(
non_blocking=True)
return td
# num_workers = (8 * NUM_GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS)
num_workers = 8
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {'batch_size': batch_size, 'num_workers': num_workers, "pin_memory": True}
# train_loader = DataLoader(train, shuffle=True, collate_fn=collate_fn, drop_last=True, batch_size=batch_size, num_workers=num_workers)
# val_loader = DataLoader(val, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers)
# test_loader = DataLoader(test, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers)
train_loader = VCRLoader.from_dataset(train, **loader_params)
val_loader = VCRLoader.from_dataset(val, **loader_params)
test_loader = VCRLoader.from_dataset(test, **loader_params)
# train_loader = CudaDataLoader(train_loader, device='cuda', queue_size=4)
# val_loader = CudaDataLoader(val_loader, device='cuda', queue_size=4)
# test_loader = CudaDataLoader(test_loader, device='cuda', queue_size=4)
ARGS_RESET_EVERY = 600
print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'),
flush=True)
model = Model.from_params(vocab=train.vocab, params=params['model'])
model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda()
optimizer = Optimizer.from_params(model_parameters=[x for x in model.named_parameters() if x[1].requires_grad],
params=params['trainer']['optimizer'])
lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None)
scheduler = LearningRateScheduler.from_params(optimizer=optimizer,
params=lr_scheduler_params) if lr_scheduler_params else None
if os.path.exists(folder):
print("Found folder! restoring", flush=True)
start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=folder,
learning_rate_scheduler=scheduler)
# start_epoch, val_metric_per_epoch = 0, []
print(start_epoch)
print(val_metric_per_epoch)
else:
print("Making directories")
os.makedirs(folder, exist_ok=True)
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.params, folder)
with open(os.path.join(folder, 'describe.txt'), 'a') as fp:
fp.write(args.describe)
fp.write('\n--------------------------\n')
logger = open(f'saves/{args.output}/log.txt', mode='a', encoding='utf8')
# store best performance of all models in a file
param_shapes = print_para(model)
num_batches = 0
for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch + 10):
train_results = []
norms = []
model.train()
for b, (time_per_batch, batch) in enumerate(
time_batch(train_loader if args.no_tqdm else tqdm(train_loader, ncols=80), reset_every=ARGS_RESET_EVERY)):
batch = _to_gpu(batch)
optimizer.zero_grad()
output_dict = model(**batch)
loss = output_dict['loss'].mean()
loss.backward()
num_batches += 1
if scheduler:
scheduler.step_batch(num_batches)
norms.append(
clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False)
)
optimizer.step()
train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(),
**(model.module if NUM_GPUS > 1 else model).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0),
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}))
if b % ARGS_RESET_EVERY == 0 and b > 0:
norms_df = pd.DataFrame(pd.DataFrame(norms[-ARGS_RESET_EVERY:]).mean(), columns=['norm']).join(
param_shapes[['shape', 'size']]).sort_values('norm', ascending=False)
print("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
norms_df.to_string(formatters={'norm': '{:.7f}'.format}),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
), flush=True)
logger.write("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
norms_df.to_string(formatters={'norm': '{:.7f}'.format}),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
))
writer.add_scalar('training_loss', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['loss'],
global_step=num_batches)
writer.add_scalar('training_accuracy', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['accuracy'],
global_step=num_batches)
print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
logger.write("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
val_probs = []
val_labels = []
val_loss_sum = 0.0
q_att1 = []
a_att1 = []
q_att2 = []
a_att2 = []
model.eval()
for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)):
with torch.no_grad():
| enate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
val_loss_avg = val_loss_sum / val_labels.shape[0]
val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1))))
if scheduler:
scheduler.step(val_metric_per_epoch[-1 | batch = _to_gpu(batch)
output_dict = model(**batch)
val_probs.append(output_dict['label_probs'].detach().cpu().numpy())
val_labels.append(batch['label'].detach().cpu().numpy())
val_loss_sum += output_dict['loss'].mean().item() * batch['label'].shape[0]
q_att1.append(output_dict['q_att1'])
a_att1.append(output_dict['a_att1'])
q_att2.append(output_dict['q_att2'])
a_att2.append(output_dict['a_att2'])
val_labels = np.concat | conditional_block |
train.py | , \
restore_checkpoint, print_para, restore_best_checkpoint
import logging
from tensorboardX import SummaryWriter
import json
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
# This is needed to make the imports work
from allennlp.models import Model
import models
torch.backends.cudnn.enabled = False
torch.set_printoptions(threshold=500000000, linewidth=8000000)
#################################
#################################
######## Data loading stuff
#################################
#################################
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-params',
dest='params',
help='Params location',
type=str,
)
parser.add_argument(
'-rationale',
action="store_true",
help='use rationale',
)
parser.add_argument(
'-output',
type=str
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-batch_size',
dest='batch_size',
type=int,
default=96
)
parser.add_argument(
'-records',
type=str,
default='records.json'
)
parser.add_argument(
'-describe',
type=str,
default=''
)
args = parser.parse_args()
seed = 1111
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
batch_size = args.batch_size
only_use_relevant_dets = False
# args.rationale = True
# args.params = 'models/multiatt/default2.json'
folder = f'saves/{args.output}'
writer = SummaryWriter(f'/home/share/wangkejie/vcr1/runs/{args.output}')
params = Params.from_file(args.params)
train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),
only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets',
only_use_relevant_dets)) ########################这个地方我改成了false,使用全部的box
# NUM_GPUS = torch.cuda.device_count() # NUM_GPUS = 4
NUM_GPUS = 1
NUM_CPUS = multiprocessing.cpu_count() # NUM_CPUS = 32
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if NUM_GPUS | return td
for k in td:
if k != 'metadata':
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[
k].cuda(
non_blocking=True)
return td
# num_workers = (8 * NUM_GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS)
num_workers = 8
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {'batch_size': batch_size, 'num_workers': num_workers, "pin_memory": True}
# train_loader = DataLoader(train, shuffle=True, collate_fn=collate_fn, drop_last=True, batch_size=batch_size, num_workers=num_workers)
# val_loader = DataLoader(val, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers)
# test_loader = DataLoader(test, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers)
train_loader = VCRLoader.from_dataset(train, **loader_params)
val_loader = VCRLoader.from_dataset(val, **loader_params)
test_loader = VCRLoader.from_dataset(test, **loader_params)
# train_loader = CudaDataLoader(train_loader, device='cuda', queue_size=4)
# val_loader = CudaDataLoader(val_loader, device='cuda', queue_size=4)
# test_loader = CudaDataLoader(test_loader, device='cuda', queue_size=4)
ARGS_RESET_EVERY = 600
print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'),
flush=True)
model = Model.from_params(vocab=train.vocab, params=params['model'])
model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda()
optimizer = Optimizer.from_params(model_parameters=[x for x in model.named_parameters() if x[1].requires_grad],
params=params['trainer']['optimizer'])
lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None)
scheduler = LearningRateScheduler.from_params(optimizer=optimizer,
params=lr_scheduler_params) if lr_scheduler_params else None
if os.path.exists(folder):
print("Found folder! restoring", flush=True)
start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=folder,
learning_rate_scheduler=scheduler)
# start_epoch, val_metric_per_epoch = 0, []
print(start_epoch)
print(val_metric_per_epoch)
else:
print("Making directories")
os.makedirs(folder, exist_ok=True)
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.params, folder)
with open(os.path.join(folder, 'describe.txt'), 'a') as fp:
fp.write(args.describe)
fp.write('\n--------------------------\n')
logger = open(f'saves/{args.output}/log.txt', mode='a', encoding='utf8')
# store best performance of all models in a file
param_shapes = print_para(model)
num_batches = 0
for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch + 10):
train_results = []
norms = []
model.train()
for b, (time_per_batch, batch) in enumerate(
time_batch(train_loader if args.no_tqdm else tqdm(train_loader, ncols=80), reset_every=ARGS_RESET_EVERY)):
batch = _to_gpu(batch)
optimizer.zero_grad()
output_dict = model(**batch)
loss = output_dict['loss'].mean()
loss.backward()
num_batches += 1
if scheduler:
scheduler.step_batch(num_batches)
norms.append(
clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False)
)
optimizer.step()
train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(),
**(model.module if NUM_GPUS > 1 else model).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0),
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}))
if b % ARGS_RESET_EVERY == 0 and b > 0:
norms_df = pd.DataFrame(pd.DataFrame(norms[-ARGS_RESET_EVERY:]).mean(), columns=['norm']).join(
param_shapes[['shape', 'size']]).sort_values('norm', ascending=False)
print("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
norms_df.to_string(formatters={'norm': '{:.7f}'.format}),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
), flush=True)
logger.write("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
norms_df.to_string(formatters={'norm': '{:.7f}'.format}),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
))
writer.add_scalar('training_loss', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['loss'],
global_step=num_batches)
writer.add_scalar('training_accuracy', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['accuracy'],
global_step=num_batches)
print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
logger.write("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
val_probs = []
val_labels = []
val_loss_sum = 0.0
q_att1 = []
a_att1 = []
q_att2 = []
a_att2 = []
model.eval()
for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)):
with torch.no_grad():
batch = _to_gpu(batch)
output_dict = model(**batch)
val_probs.append(output_dict['label_probs'].detach().cpu().numpy())
val_labels.append(batch['label'].detach().cpu().numpy())
val_loss_sum += output_dict['loss'].mean().item() * batch['label'].shape[0]
q_att1.append(output_dict['q_att1'])
a_att1.append(output_dict['a_att1'])
q_att2.append(output_dict['q_att2'])
a_att2.append(output_dict['a_att2'])
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
val_loss_avg = val_loss_sum / val_labels.shape[0]
val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1))))
if scheduler:
scheduler.step(val_metric_per_epoch[-1])
| > 1:
| identifier_name |
train.py | , \
restore_checkpoint, print_para, restore_best_checkpoint
import logging
from tensorboardX import SummaryWriter
import json
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
# This is needed to make the imports work
from allennlp.models import Model
import models
torch.backends.cudnn.enabled = False
torch.set_printoptions(threshold=500000000, linewidth=8000000)
#################################
#################################
######## Data loading stuff
#################################
#################################
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-params',
dest='params',
help='Params location',
type=str,
)
parser.add_argument(
'-rationale',
action="store_true",
help='use rationale',
)
parser.add_argument(
'-output',
type=str
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-batch_size',
dest='batch_size',
type=int,
default=96
)
parser.add_argument(
'-records',
type=str,
default='records.json'
)
parser.add_argument(
'-describe',
type=str,
default=''
)
args = parser.parse_args()
seed = 1111
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
batch_size = args.batch_size
only_use_relevant_dets = False
# args.rationale = True
# args.params = 'models/multiatt/default2.json'
folder = f'saves/{args.output}'
writer = SummaryWriter(f'/home/share/wangkejie/vcr1/runs/{args.output}')
params = Params.from_file(args.params)
train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),
only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets',
only_use_relevant_dets)) ########################这个地方我改成了false,使用全部的box
# NUM_GPUS = torch.cuda.device_count() # NUM_GPUS = 4
NUM_GPUS = 1
NUM_CPUS = multiprocessing.cpu_count() # NUM_CPUS = 32
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if NUM_GPUS > 1:
ret | GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS)
num_workers = 8
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {'batch_size': batch_size, 'num_workers': num_workers, "pin_memory": True}
# train_loader = DataLoader(train, shuffle=True, collate_fn=collate_fn, drop_last=True, batch_size=batch_size, num_workers=num_workers)
# val_loader = DataLoader(val, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers)
# test_loader = DataLoader(test, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers)
train_loader = VCRLoader.from_dataset(train, **loader_params)
val_loader = VCRLoader.from_dataset(val, **loader_params)
test_loader = VCRLoader.from_dataset(test, **loader_params)
# train_loader = CudaDataLoader(train_loader, device='cuda', queue_size=4)
# val_loader = CudaDataLoader(val_loader, device='cuda', queue_size=4)
# test_loader = CudaDataLoader(test_loader, device='cuda', queue_size=4)
ARGS_RESET_EVERY = 600
print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'),
flush=True)
model = Model.from_params(vocab=train.vocab, params=params['model'])
model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda()
optimizer = Optimizer.from_params(model_parameters=[x for x in model.named_parameters() if x[1].requires_grad],
params=params['trainer']['optimizer'])
lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None)
scheduler = LearningRateScheduler.from_params(optimizer=optimizer,
params=lr_scheduler_params) if lr_scheduler_params else None
if os.path.exists(folder):
print("Found folder! restoring", flush=True)
start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=folder,
learning_rate_scheduler=scheduler)
# start_epoch, val_metric_per_epoch = 0, []
print(start_epoch)
print(val_metric_per_epoch)
else:
print("Making directories")
os.makedirs(folder, exist_ok=True)
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.params, folder)
with open(os.path.join(folder, 'describe.txt'), 'a') as fp:
fp.write(args.describe)
fp.write('\n--------------------------\n')
logger = open(f'saves/{args.output}/log.txt', mode='a', encoding='utf8')
# store best performance of all models in a file
param_shapes = print_para(model)
num_batches = 0
for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch + 10):
train_results = []
norms = []
model.train()
for b, (time_per_batch, batch) in enumerate(
time_batch(train_loader if args.no_tqdm else tqdm(train_loader, ncols=80), reset_every=ARGS_RESET_EVERY)):
batch = _to_gpu(batch)
optimizer.zero_grad()
output_dict = model(**batch)
loss = output_dict['loss'].mean()
loss.backward()
num_batches += 1
if scheduler:
scheduler.step_batch(num_batches)
norms.append(
clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False)
)
optimizer.step()
train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(),
**(model.module if NUM_GPUS > 1 else model).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0),
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}))
if b % ARGS_RESET_EVERY == 0 and b > 0:
norms_df = pd.DataFrame(pd.DataFrame(norms[-ARGS_RESET_EVERY:]).mean(), columns=['norm']).join(
param_shapes[['shape', 'size']]).sort_values('norm', ascending=False)
print("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
norms_df.to_string(formatters={'norm': '{:.7f}'.format}),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
), flush=True)
logger.write("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
norms_df.to_string(formatters={'norm': '{:.7f}'.format}),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
))
writer.add_scalar('training_loss', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['loss'],
global_step=num_batches)
writer.add_scalar('training_accuracy', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['accuracy'],
global_step=num_batches)
print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
logger.write("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
val_probs = []
val_labels = []
val_loss_sum = 0.0
q_att1 = []
a_att1 = []
q_att2 = []
a_att2 = []
model.eval()
for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)):
with torch.no_grad():
batch = _to_gpu(batch)
output_dict = model(**batch)
val_probs.append(output_dict['label_probs'].detach().cpu().numpy())
val_labels.append(batch['label'].detach().cpu().numpy())
val_loss_sum += output_dict['loss'].mean().item() * batch['label'].shape[0]
q_att1.append(output_dict['q_att1'])
a_att1.append(output_dict['a_att1'])
q_att2.append(output_dict['q_att2'])
a_att2.append(output_dict['a_att2'])
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
val_loss_avg = val_loss_sum / val_labels.shape[0]
val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1))))
if scheduler:
scheduler.step(val_metric_per_epoch[-1 | urn td
for k in td:
if k != 'metadata':
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[
k].cuda(
non_blocking=True)
return td
# num_workers = (8 * NUM_ | identifier_body |
train.py | _norm, \
restore_checkpoint, print_para, restore_best_checkpoint
import logging
from tensorboardX import SummaryWriter
import json
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
# This is needed to make the imports work
from allennlp.models import Model
import models
torch.backends.cudnn.enabled = False
torch.set_printoptions(threshold=500000000, linewidth=8000000)
#################################
#################################
######## Data loading stuff
#################################
#################################
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-params',
dest='params',
help='Params location',
type=str,
)
parser.add_argument(
'-rationale',
action="store_true",
help='use rationale',
)
parser.add_argument(
'-output',
type=str
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-batch_size',
dest='batch_size',
type=int,
default=96
)
parser.add_argument(
'-records',
type=str,
default='records.json'
)
parser.add_argument(
'-describe',
type=str,
default=''
)
args = parser.parse_args()
seed = 1111
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
batch_size = args.batch_size
only_use_relevant_dets = False
# args.rationale = True
# args.params = 'models/multiatt/default2.json'
folder = f'saves/{args.output}'
writer = SummaryWriter(f'/home/share/wangkejie/vcr1/runs/{args.output}')
params = Params.from_file(args.params)
train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),
only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets', | NUM_CPUS = multiprocessing.cpu_count() # NUM_CPUS = 32
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if NUM_GPUS > 1:
return td
for k in td:
if k != 'metadata':
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[
k].cuda(
non_blocking=True)
return td
# num_workers = (8 * NUM_GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS)
num_workers = 8
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {'batch_size': batch_size, 'num_workers': num_workers, "pin_memory": True}
# train_loader = DataLoader(train, shuffle=True, collate_fn=collate_fn, drop_last=True, batch_size=batch_size, num_workers=num_workers)
# val_loader = DataLoader(val, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers)
# test_loader = DataLoader(test, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers)
train_loader = VCRLoader.from_dataset(train, **loader_params)
val_loader = VCRLoader.from_dataset(val, **loader_params)
test_loader = VCRLoader.from_dataset(test, **loader_params)
# train_loader = CudaDataLoader(train_loader, device='cuda', queue_size=4)
# val_loader = CudaDataLoader(val_loader, device='cuda', queue_size=4)
# test_loader = CudaDataLoader(test_loader, device='cuda', queue_size=4)
ARGS_RESET_EVERY = 600
print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'),
flush=True)
model = Model.from_params(vocab=train.vocab, params=params['model'])
model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda()
optimizer = Optimizer.from_params(model_parameters=[x for x in model.named_parameters() if x[1].requires_grad],
params=params['trainer']['optimizer'])
lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None)
scheduler = LearningRateScheduler.from_params(optimizer=optimizer,
params=lr_scheduler_params) if lr_scheduler_params else None
if os.path.exists(folder):
print("Found folder! restoring", flush=True)
start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=folder,
learning_rate_scheduler=scheduler)
# start_epoch, val_metric_per_epoch = 0, []
print(start_epoch)
print(val_metric_per_epoch)
else:
print("Making directories")
os.makedirs(folder, exist_ok=True)
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.params, folder)
with open(os.path.join(folder, 'describe.txt'), 'a') as fp:
fp.write(args.describe)
fp.write('\n--------------------------\n')
logger = open(f'saves/{args.output}/log.txt', mode='a', encoding='utf8')
# store best performance of all models in a file
param_shapes = print_para(model)
num_batches = 0
for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch + 10):
train_results = []
norms = []
model.train()
for b, (time_per_batch, batch) in enumerate(
time_batch(train_loader if args.no_tqdm else tqdm(train_loader, ncols=80), reset_every=ARGS_RESET_EVERY)):
batch = _to_gpu(batch)
optimizer.zero_grad()
output_dict = model(**batch)
loss = output_dict['loss'].mean()
loss.backward()
num_batches += 1
if scheduler:
scheduler.step_batch(num_batches)
norms.append(
clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False)
)
optimizer.step()
train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(),
**(model.module if NUM_GPUS > 1 else model).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0),
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}))
if b % ARGS_RESET_EVERY == 0 and b > 0:
norms_df = pd.DataFrame(pd.DataFrame(norms[-ARGS_RESET_EVERY:]).mean(), columns=['norm']).join(
param_shapes[['shape', 'size']]).sort_values('norm', ascending=False)
print("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
norms_df.to_string(formatters={'norm': '{:.7f}'.format}),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
), flush=True)
logger.write("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
norms_df.to_string(formatters={'norm': '{:.7f}'.format}),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
))
writer.add_scalar('training_loss', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['loss'],
global_step=num_batches)
writer.add_scalar('training_accuracy', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['accuracy'],
global_step=num_batches)
print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
logger.write("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
val_probs = []
val_labels = []
val_loss_sum = 0.0
q_att1 = []
a_att1 = []
q_att2 = []
a_att2 = []
model.eval()
for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)):
with torch.no_grad():
batch = _to_gpu(batch)
output_dict = model(**batch)
val_probs.append(output_dict['label_probs'].detach().cpu().numpy())
val_labels.append(batch['label'].detach().cpu().numpy())
val_loss_sum += output_dict['loss'].mean().item() * batch['label'].shape[0]
q_att1.append(output_dict['q_att1'])
a_att1.append(output_dict['a_att1'])
q_att2.append(output_dict['q_att2'])
a_att2.append(output_dict['a_att2'])
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
val_loss_avg = val_loss_sum / val_labels.shape[0]
val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1))))
if scheduler:
scheduler.step(val_metric_per_epoch[-1])
| only_use_relevant_dets)) ########################这个地方我改成了false,使用全部的box
# NUM_GPUS = torch.cuda.device_count() # NUM_GPUS = 4
NUM_GPUS = 1 | random_line_split |
lab.py | ]['lon'] = node['lon']
return nodes
class Heap:
def __init__(self, prop, start=None, start_item=None):
self.property = 'min' if (prop == 'min') else 'max' # Heap property
self.heap = [] # List representation of the heap
self.items = [] # A list of the items corresponding to each index in the heap
self.size = 0
if isinstance(start, list):
self.heap = start[:]
self.items = start_item if start_item is not None else [None] * len(start)
self.size = len(self.heap)
for i in range(len(start) // 2, -1, -1):
# Second half of the heap is comprised entirely of leaves, so we know it fulfills our heap property
# We loop backwards over the array and max heapify down so we always maintain our heap property
# at every index after i
self.heapify_down(i)
elif start is not None:
self.add(start, start_item)
self.size = 1
def parent(self, i):
# Returns the index of i's parent if it has one
return (i + 1) // 2 - 1 if i > 0 else i
def left(self, i):
# Returns the index of i's left child if it has one
return 2 * i + 1 if i < self.size else i
def right(self, i):
# Returns the index of i's right child if it has one
return 2 * (i + 1) if i < self.size else i
def add(self, val, item=None):
# Add value to heap
self.heap.append(val)
self.items.append(item)
self.size += 1
self.heapify_up(self.size - 1)
def next(self):
# Get the value at the top of the heap
if self.size > 0:
if self.size == 1:
self.size -= 1
return self.heap.pop(0), self.items.pop(0)
else:
# Swap element at the top of the heap to the end
self.swap(0, self.size - 1)
top = self.heap.pop(self.size - 1)
top_item = self.items.pop(self.size - 1)
self.size -= 1
self.heapify_down(0) # Heapify from the top
return top, top_item
def heapify_up(self, i):
# Assume everything below i fulfills the heap property, shift value at index i up until
# our heap property is fulfilled across the entire heap
p = self.parent(i)
if not p == i and ((self.property == 'max' and self.heap[i] > self.heap[p]) or
(self.property == 'min' and self.heap[i] < self.heap[p])):
# If node i violates this heap's heap property, swap it with its parent, then check again:
self.swap(i, p)
self.heapify_up(p)
def heapify_down(self, p):
# Assume everything below p fulfills the heap property, shift value at index p down until
# our heap property is fulfilled across the entire heap
l, r = self.left(p), self.right(p)
if l >= self.size:
# If p has no children, we do nothing
return
if self.property == 'max':
c = l if r >= self.size or self.heap[l] > self.heap[r] else r
if self.heap[p] < self.heap[c]:
# If node p violates this heap's max heap property, swap it with its larger child, then check again:
self.swap(p, c)
self.heapify_down(c)
else:
# if property == 'min'
c = l if r >= self.size or self.heap[l] < self.heap[r] else r
if self.heap[p] > self.heap[c]:
# If node p violates this heap's min heap property, swap it with its smaller child, then check again:
self.swap(p, c)
self.heapify_down(c)
def swap(self, a, b):
# Swaps the elements of heap and items at indices a and b
self.heap[a], self.heap[b] = self.heap[b], self.heap[a]
self.items[a], self.items[b] = self.items[b], self.items[a]
def empty(self):
# Returns true if this heap has no elements
return self.size == 0
def __str__(self):
# Returns the heap in the form of a list
return str(self.heap)
def find_min_cost_path(data, start, is_goal, get_children, cost, heuristic=lambda x: 0):
"""haha, uniform cost search go brrr"""
paths = Heap('min', heuristic(start), ([start], 0)) # Min heap of paths and their respective costs (sorted by heuristic cost)
seen = set() # Set of nodes we've already found shorter paths to
# T H E S E A R C H L O O P B E G I N S
while not paths.empty():
next_path = paths.next() # get the minimum cost path (heuristic cost, (path, path cost))
min_cost_path = next_path[1][0]
min_cost = next_path[1][1]
terminal_node = min_cost_path[-1]
while terminal_node in seen:
# If we've already found a path to the same node with a lower cost, we pick a new next_path
if paths.empty():
# If we run out of paths to search, we return nothing
return None
next_path = paths.next()
min_cost_path = next_path[1][0]
min_cost = next_path[1][1]
terminal_node = min_cost_path[-1]
if is_goal(terminal_node):
return min_cost_path
seen.add(terminal_node)
children = get_children(terminal_node)
for c in children:
if c not in seen:
# If this child does not have an existing path to it already, we build a
# data structure for it and at it to our min heap
path_to_c = min_cost_path + [c]
c_cost = min_cost + cost(data, terminal_node, c)
c_heuristic = c_cost + heuristic(c)
paths.add(c_heuristic, (path_to_c, c_cost))
# T H E S E A R C H L O O P E N D S
return None # We failed to find a path to the goal node. Very sad. Return nothing :(
def get_dist_cost(data, start_node_id, end_node_id):
"""
Calculates the cost of the direct path (which is assume to exist) between
specified start and end nodes based on the distance between them.
Parameters:
data: The auxiliary data structure (a dictionary) that stores information
about nodes and the ways that connect them
start_node_id: The integer id of the start node in data
end_node_id: The integer id of the end node in data
"""
p1 = get_coords(data, start_node_id)
p2 = get_coords(data, end_node_id)
return great_circle_distance(p1, p2)
def get_coords(data, id):
"""
Returns the GPS coordinates of a node in the form of a (lat, lon) tuple given its id number
"""
return data[id]['lat'], data[id]['lon']
def find_short_path_nodes(aux_structures, node1, node2):
"""
Return the shortest path between the two nodes
Parameters:
aux_structures: the result of calling build_auxiliary_structures
node1: node representing the start location
node2: node representing the end location
Returns:
a list of node IDs representing the shortest path (in terms of
distance) from node1 to node2
"""
p = find_min_cost_path(
aux_structures,
node1,
lambda x: x == node2,
lambda parent_id: aux_structures[parent_id]['adjacent'],
get_dist_cost,
lambda x: gcd_heuristic(aux_structures, x, node2))
return list(p) if p is not None else None
def gcd_heuristic(data, node1, node2):
return great_circle_distance(get_coords(data, node1), get_coords(data, node2))
def find_short_path(aux_structures, loc1, loc2):
"""
Retu | rn the shortest path between the two locations
Parameters:
aux_structures: the result of calling build_auxiliary_structures
loc1: tuple of 2 floats: (latitude, longitude), representing the start
location
loc2: tuple of 2 floats: (latitude, longitude), representing the end
location
Returns:
a list of (latitude, longitude) tuples representing the shortest path
(in terms of distance) from loc1 to loc2.
"""
node1 = get_closest_node(aux_structures, loc1)
node2 = get_closest_node(aux_structures, loc2)
p = find_min_cost_path(
aux_structures,
node1,
lambda x: x == node2,
lambda parent_id: aux_structures[parent_id]['adjacent'], | identifier_body | |
lab.py | so we always maintain our heap property
# at every index after i
self.heapify_down(i)
elif start is not None:
self.add(start, start_item)
self.size = 1
def parent(self, i):
# Returns the index of i's parent if it has one
return (i + 1) // 2 - 1 if i > 0 else i
def left(self, i):
# Returns the index of i's left child if it has one
return 2 * i + 1 if i < self.size else i
def right(self, i):
# Returns the index of i's right child if it has one
return 2 * (i + 1) if i < self.size else i
def add(self, val, item=None):
# Add value to heap
self.heap.append(val)
self.items.append(item)
self.size += 1
self.heapify_up(self.size - 1)
def next(self):
# Get the value at the top of the heap
if self.size > 0:
if self.size == 1:
self.size -= 1
return self.heap.pop(0), self.items.pop(0)
else:
# Swap element at the top of the heap to the end
self.swap(0, self.size - 1)
top = self.heap.pop(self.size - 1)
top_item = self.items.pop(self.size - 1)
self.size -= 1
self.heapify_down(0) # Heapify from the top
return top, top_item
def heapify_up(self, i):
# Assume everything below i fulfills the heap property, shift value at index i up until
# our heap property is fulfilled across the entire heap
p = self.parent(i)
if not p == i and ((self.property == 'max' and self.heap[i] > self.heap[p]) or
(self.property == 'min' and self.heap[i] < self.heap[p])):
# If node i violates this heap's heap property, swap it with its parent, then check again:
self.swap(i, p)
self.heapify_up(p)
def heapify_down(self, p):
# Assume everything below p fulfills the heap property, shift value at index p down until
# our heap property is fulfilled across the entire heap
l, r = self.left(p), self.right(p)
if l >= self.size:
# If p has no children, we do nothing
return
if self.property == 'max':
c = l if r >= self.size or self.heap[l] > self.heap[r] else r
if self.heap[p] < self.heap[c]:
# If node p violates this heap's max heap property, swap it with its larger child, then check again:
self.swap(p, c)
self.heapify_down(c)
else:
# if property == 'min'
c = l if r >= self.size or self.heap[l] < self.heap[r] else r
if self.heap[p] > self.heap[c]:
# If node p violates this heap's min heap property, swap it with its smaller child, then check again:
self.swap(p, c)
self.heapify_down(c)
def swap(self, a, b):
# Swaps the elements of heap and items at indices a and b
self.heap[a], self.heap[b] = self.heap[b], self.heap[a]
self.items[a], self.items[b] = self.items[b], self.items[a]
def empty(self):
# Returns true if this heap has no elements
return self.size == 0
def __str__(self):
# Returns the heap in the form of a list
return str(self.heap)
def find_min_cost_path(data, start, is_goal, get_children, cost, heuristic=lambda x: 0):
"""haha, uniform cost search go brrr"""
paths = Heap('min', heuristic(start), ([start], 0)) # Min heap of paths and their respective costs (sorted by heuristic cost)
seen = set() # Set of nodes we've already found shorter paths to
# T H E S E A R C H L O O P B E G I N S
while not paths.empty():
next_path = paths.next() # get the minimum cost path (heuristic cost, (path, path cost))
min_cost_path = next_path[1][0]
min_cost = next_path[1][1]
terminal_node = min_cost_path[-1]
while terminal_node in seen:
# If we've already found a path to the same node with a lower cost, we pick a new next_path
if paths.empty():
# If we run out of paths to search, we return nothing
return None
next_path = paths.next()
min_cost_path = next_path[1][0]
min_cost = next_path[1][1]
terminal_node = min_cost_path[-1]
if is_goal(terminal_node):
return min_cost_path
seen.add(terminal_node)
children = get_children(terminal_node)
for c in children:
if c not in seen:
# If this child does not have an existing path to it already, we build a
# data structure for it and at it to our min heap
path_to_c = min_cost_path + [c]
c_cost = min_cost + cost(data, terminal_node, c)
c_heuristic = c_cost + heuristic(c)
paths.add(c_heuristic, (path_to_c, c_cost))
# T H E S E A R C H L O O P E N D S
return None # We failed to find a path to the goal node. Very sad. Return nothing :(
def get_dist_cost(data, start_node_id, end_node_id):
"""
Calculates the cost of the direct path (which is assume to exist) between
specified start and end nodes based on the distance between them.
Parameters:
data: The auxiliary data structure (a dictionary) that stores information
about nodes and the ways that connect them
start_node_id: The integer id of the start node in data
end_node_id: The integer id of the end node in data
"""
p1 = get_coords(data, start_node_id)
p2 = get_coords(data, end_node_id)
return great_circle_distance(p1, p2)
def get_coords(data, id):
"""
Returns the GPS coordinates of a node in the form of a (lat, lon) tuple given its id number
"""
return data[id]['lat'], data[id]['lon']
def find_short_path_nodes(aux_structures, node1, node2):
"""
Return the shortest path between the two nodes
Parameters:
aux_structures: the result of calling build_auxiliary_structures
node1: node representing the start location
node2: node representing the end location
Returns:
a list of node IDs representing the shortest path (in terms of
distance) from node1 to node2
"""
p = find_min_cost_path(
aux_structures,
node1,
lambda x: x == node2,
lambda parent_id: aux_structures[parent_id]['adjacent'],
get_dist_cost,
lambda x: gcd_heuristic(aux_structures, x, node2))
return list(p) if p is not None else None
def gcd_heuristic(data, node1, node2):
return great_circle_distance(get_coords(data, node1), get_coords(data, node2))
def find_short_path(aux_structures, loc1, loc2):
"""
Return the shortest path between the two locations
Parameters:
aux_structures: the result of calling build_auxiliary_structures
loc1: tuple of 2 floats: (latitude, longitude), representing the start
location
loc2: tuple of 2 floats: (latitude, longitude), representing the end
location
Returns:
a list of (latitude, longitude) tuples representing the shortest path
(in terms of distance) from loc1 to loc2.
"""
node1 = get_closest_node(aux_structures, loc1)
node2 = get_closest_node(aux_structures, loc2)
p = find_min_cost_path(
aux_structures,
node1,
lambda x: x == node2,
lambda parent_id: aux_structures[parent_id]['adjacent'],
get_dist_cost,
lambda x: gcd_heuristic(aux_structures, x, node2))
return get_coord_list(aux_structures, p) if p is not None else None
def get_closest_node(data, loc):
"""
Calculates the closest node in the given dataset to a specified query location
Parameters:
data: The auxiliary data structure (a dictionary) that stores information
about nodes and the ways that connect them
loc: The query location, given in terms of a tuple of two floats (latitude, longitude)
"""
min_dist = None
closest = None
for i in data:
# Standard min-value search loop
dist = great_circle_distance(get_coords(data, i), loc)
if closest is None or dist < min_dist:
closest = i
min_dist = dist
return closest
def get_coord_li | st(data, ids): | identifier_name | |
lab.py |
def build_auxiliary_structures(nodes_filename, ways_filename):
"""
Create any auxiliary structures you are interested in, by reading the data
from the given filenames (using read_osm_data)
"""
nodes = {}
for way in read_osm_data(ways_filename):
highway_type = way['tags'].get('highway', '( ͡° ͜ʖ ͡°)')
if highway_type in ALLOWED_HIGHWAY_TYPES:
nodes_along_way = way['nodes'] # List of nodes along this way
for i in range(len(nodes_along_way) - 1):
# A pair of adjacent nodes along this way
left = nodes_along_way[i]
right = nodes_along_way[i + 1]
default_speed_limit = DEFAULT_SPEED_LIMIT_MPH[highway_type]
# If this way doesn't have a speed limit tag, we use the default value based on highway type
speed_limit = way['tags'].get('maxspeed_mph', default_speed_limit)
def build_data(root, adjacent):
"""
root: ID of some node along way
adjacent: ID of some node adjacent to root node along way
"""
new_node_data_struct = {'adjacent': {adjacent: speed_limit}} # Init dict for node data structure
root_data = nodes.get(root, new_node_data_struct)
# There might be another way where root and adjacent are directly adjacent, so our
# speed limit is the max of the speed limits of those two ways:
root_data['adjacent'][adjacent] = max(root_data['adjacent'].get(adjacent, 0), speed_limit)
nodes[root] = root_data # Add the data on root to our dictionary of node data
build_data(left, right)
if not way['tags'].get('oneway', '( ͡° ͜ʖ ͡°)') == 'yes':
# If this isn't a oneway way, we can build the data structure for the next node as well
build_data(right, left)
elif right == nodes_along_way[-1]:
# In non-oneway ways, the above build_data(right, left) call creates the data structure
# for the final node at the same time as the penultimate one. However, in the case of a
# oneway path, we have to do it manually:
nodes[right] = nodes.get(right, {'adjacent': {}})
for node in read_osm_data(nodes_filename):
id = node['id']
if id in nodes:
# If the id of this node in the generator was on a valid way, we add the data about that node
# to its dictionary in nodes.
# Add lat/lon data
nodes[id]['lat'] = node['lat']
nodes[id]['lon'] = node['lon']
return nodes
class Heap:
def __init__(self, prop, start=None, start_item=None):
self.property = 'min' if (prop == 'min') else 'max' # Heap property
self.heap = [] # List representation of the heap
self.items = [] # A list of the items corresponding to each index in the heap
self.size = 0
if isinstance(start, list):
self.heap = start[:]
self.items = start_item if start_item is not None else [None] * len(start)
self.size = len(self.heap)
for i in range(len(start) // 2, -1, -1):
# Second half of the heap is comprised entirely of leaves, so we know it fulfills our heap property
# We loop backwards over the array and max heapify down so we always maintain our heap property
# at every index after i
self.heapify_down(i)
elif start is not None:
self.add(start, start_item)
self.size = 1
def parent(self, i):
# Returns the index of i's parent if it has one
return (i + 1) // 2 - 1 if i > 0 else i
def left(self, i):
# Returns the index of i's left child if it has one
return 2 * i + 1 if i < self.size else i
def right(self, i):
# Returns the index of i's right child if it has one
return 2 * (i + 1) if i < self.size else i
def add(self, val, item=None):
# Add value to heap
self.heap.append(val)
self.items.append(item)
self.size += 1
self.heapify_up(self.size - 1)
def next(self):
# Get the value at the top of the heap
if self.size > 0:
if self.size == 1:
self.size -= 1
return self.heap.pop(0), self.items.pop(0)
else:
# Swap element at the top of the heap to the end
self.swap(0, self.size - 1)
top = self.heap.pop(self.size - 1)
top_item = self.items.pop(self.size - 1)
self.size -= 1
self.heapify_down(0) # Heapify from the top
return top, top_item
def heapify_up(self, i):
# Assume everything below i fulfills the heap property, shift value at index i up until
# our heap property is fulfilled across the entire heap
p = self.parent(i)
if not p == i and ((self.property == 'max' and self.heap[i] > self.heap[p]) or
(self.property == 'min' and self.heap[i] < self.heap[p])):
# If node i violates this heap's heap property, swap it with its parent, then check again:
self.swap(i, p)
self.heapify_up(p)
def heapify_down(self, p):
# Assume everything below p fulfills the heap property, shift value at index p down until
# our heap property is fulfilled across the entire heap
l, r = self.left(p), self.right(p)
if l >= self.size:
# If p has no children, we do nothing
return
if self.property == 'max':
c = l if r >= self.size or self.heap[l] > self.heap[r] else r
if self.heap[p] < self.heap[c]:
# If node p violates this heap's max heap property, swap it with its larger child, then check again:
self.swap(p, c)
self.heapify_down(c)
else:
# if property == 'min'
c = l if r >= self.size or self.heap[l] < self.heap[r] else r
if self.heap[p] > self.heap[c]:
# If node p violates this heap's min heap property, swap it with its smaller child, then check again:
self.swap(p, c)
self.heapify_down(c)
def swap(self, a, b):
# Swaps the elements of heap and items at indices a and b
self.heap[a], self.heap[b] = self.heap[b], self.heap[a]
self.items[a], self.items[b] = self.items[b], self.items[a]
def empty(self):
# Returns true if this heap has no elements
return self.size == 0
def __str__(self):
# Returns the heap in the form of a list
return str(self.heap)
def find_min_cost_path(data, start, is_goal, get_children, cost, heuristic=lambda x: 0):
"""haha, uniform cost search go brrr"""
paths = Heap('min', heuristic(start), ([start], 0)) # Min heap of paths and their respective costs (sorted by heuristic cost)
seen = set() # Set of nodes we've already found shorter paths to
# T H E S E A R C H L O O P B E G I N S
while not paths.empty():
next_path = paths.next() # get the minimum cost path (heuristic cost, (path, path cost))
min_cost_path = next_path[1][0]
min_cost = next_path[1][1]
terminal_node = min_cost_path[-1]
while terminal_node in seen:
# If we've already found a path to the same node with a lower cost, we pick a new next_path
if paths.empty():
# If we run out of paths to search, we return nothing
return None
next_path = paths.next()
min_cost_path = next_path[1][0]
min_cost = next_path[1][1]
terminal_node = min_cost_path[-1]
if is_goal(terminal_node):
return min_cost_path
seen.add(terminal_node)
children = get_children(terminal_node)
for c in children:
if c not in seen:
# If this child does not have an existing path to it already, we build a
# data structure for it and at it to our min heap
path_to_c = min_cost_path + [c]
c_cost = min_cost + cost(data, terminal_node, c)
c_heuristic = c_cost + heuristic(c)
paths.add(c_heuristic, (path_to_c, c_cost))
# T H E S E A R C H L O | random_line_split | ||
lab.py | # If this isn't a oneway way, we can build the data structure for the next node as well
build_data(right, left)
elif right == nodes_along_way[-1]:
# In non-oneway ways, the above build_data(right, left) call creates the data structure
# for the final node at the same time as the penultimate one. However, in the case of a
# oneway path, we have to do it manually:
nodes[right] = nodes.get(right, {'adjacent': {}})
for no
de in read_osm_data(nodes_filename):
id = node['id']
if id in nodes:
# If the id of this node in the generator was on a valid way, we add the data about that node
# to its dictionary in nodes.
# Add lat/lon data
nodes[id]['lat'] = node['lat']
nodes[id]['lon'] = node['lon']
return nodes
class Heap:
def __init__(self, prop, start=None, start_item=None):
self.property = 'min' if (prop == 'min') else 'max' # Heap property
self.heap = [] # List representation of the heap
self.items = [] # A list of the items corresponding to each index in the heap
self.size = 0
if isinstance(start, list):
self.heap = start[:]
self.items = start_item if start_item is not None else [None] * len(start)
self.size = len(self.heap)
for i in range(len(start) // 2, -1, -1):
# Second half of the heap is comprised entirely of leaves, so we know it fulfills our heap property
# We loop backwards over the array and max heapify down so we always maintain our heap property
# at every index after i
self.heapify_down(i)
elif start is not None:
self.add(start, start_item)
self.size = 1
def parent(self, i):
# Returns the index of i's parent if it has one
return (i + 1) // 2 - 1 if i > 0 else i
def left(self, i):
# Returns the index of i's left child if it has one
return 2 * i + 1 if i < self.size else i
def right(self, i):
# Returns the index of i's right child if it has one
return 2 * (i + 1) if i < self.size else i
def add(self, val, item=None):
# Add value to heap
self.heap.append(val)
self.items.append(item)
self.size += 1
self.heapify_up(self.size - 1)
def next(self):
# Get the value at the top of the heap
if self.size > 0:
if self.size == 1:
self.size -= 1
return self.heap.pop(0), self.items.pop(0)
else:
# Swap element at the top of the heap to the end
self.swap(0, self.size - 1)
top = self.heap.pop(self.size - 1)
top_item = self.items.pop(self.size - 1)
self.size -= 1
self.heapify_down(0) # Heapify from the top
return top, top_item
def heapify_up(self, i):
# Assume everything below i fulfills the heap property, shift value at index i up until
# our heap property is fulfilled across the entire heap
p = self.parent(i)
if not p == i and ((self.property == 'max' and self.heap[i] > self.heap[p]) or
(self.property == 'min' and self.heap[i] < self.heap[p])):
# If node i violates this heap's heap property, swap it with its parent, then check again:
self.swap(i, p)
self.heapify_up(p)
def heapify_down(self, p):
# Assume everything below p fulfills the heap property, shift value at index p down until
# our heap property is fulfilled across the entire heap
l, r = self.left(p), self.right(p)
if l >= self.size:
# If p has no children, we do nothing
return
if self.property == 'max':
c = l if r >= self.size or self.heap[l] > self.heap[r] else r
if self.heap[p] < self.heap[c]:
# If node p violates this heap's max heap property, swap it with its larger child, then check again:
self.swap(p, c)
self.heapify_down(c)
else:
# if property == 'min'
c = l if r >= self.size or self.heap[l] < self.heap[r] else r
if self.heap[p] > self.heap[c]:
# If node p violates this heap's min heap property, swap it with its smaller child, then check again:
self.swap(p, c)
self.heapify_down(c)
def swap(self, a, b):
# Swaps the elements of heap and items at indices a and b
self.heap[a], self.heap[b] = self.heap[b], self.heap[a]
self.items[a], self.items[b] = self.items[b], self.items[a]
def empty(self):
# Returns true if this heap has no elements
return self.size == 0
def __str__(self):
# Returns the heap in the form of a list
return str(self.heap)
def find_min_cost_path(data, start, is_goal, get_children, cost, heuristic=lambda x: 0):
"""haha, uniform cost search go brrr"""
paths = Heap('min', heuristic(start), ([start], 0)) # Min heap of paths and their respective costs (sorted by heuristic cost)
seen = set() # Set of nodes we've already found shorter paths to
# T H E S E A R C H L O O P B E G I N S
while not paths.empty():
next_path = paths.next() # get the minimum cost path (heuristic cost, (path, path cost))
min_cost_path = next_path[1][0]
min_cost = next_path[1][1]
terminal_node = min_cost_path[-1]
while terminal_node in seen:
# If we've already found a path to the same node with a lower cost, we pick a new next_path
if paths.empty():
# If we run out of paths to search, we return nothing
return None
next_path = paths.next()
min_cost_path = next_path[1][0]
min_cost = next_path[1][1]
terminal_node = min_cost_path[-1]
if is_goal(terminal_node):
return min_cost_path
seen.add(terminal_node)
children = get_children(terminal_node)
for c in children:
if c not in seen:
# If this child does not have an existing path to it already, we build a
# data structure for it and at it to our min heap
path_to_c = min_cost_path + [c]
c_cost = min_cost + cost(data, terminal_node, c)
c_heuristic = c_cost + heuristic(c)
paths.add(c_heuristic, (path_to_c, c_cost))
# T H E S E A R C H L O O P E N D S
return None # We failed to find a path to the goal node. Very sad. Return nothing :(
def get_dist_cost(data, start_node_id, end_node_id):
"""
Calculates the cost of the direct path (which is assume to exist) between
specified start and end nodes based on the distance between them.
Parameters:
data: The auxiliary data structure (a dictionary) that stores information
about nodes and the ways that connect them
start_node_id: The integer id of the start node in data
end_node_id: The integer id of the end node in data
"""
p1 = get_coords(data | nodes_along_way[i]
right = nodes_along_way[i + 1]
default_speed_limit = DEFAULT_SPEED_LIMIT_MPH[highway_type]
# If this way doesn't have a speed limit tag, we use the default value based on highway type
speed_limit = way['tags'].get('maxspeed_mph', default_speed_limit)
def build_data(root, adjacent):
"""
root: ID of some node along way
adjacent: ID of some node adjacent to root node along way
"""
new_node_data_struct = {'adjacent': {adjacent: speed_limit}} # Init dict for node data structure
root_data = nodes.get(root, new_node_data_struct)
# There might be another way where root and adjacent are directly adjacent, so our
# speed limit is the max of the speed limits of those two ways:
root_data['adjacent'][adjacent] = max(root_data['adjacent'].get(adjacent, 0), speed_limit)
nodes[root] = root_data # Add the data on root to our dictionary of node data
build_data(left, right)
if not way['tags'].get('oneway', '( ͡° ͜ʖ ͡°)') == 'yes': | conditional_block | |
cassandra.go | _ orm.Connector = (*cassandraConnector)(nil)
// getGocqlErrorTag gets a error tag for metrics based on gocql error
// We cannot just use err.Error() as a tag because it contains invalid
// characters like = : etc. which will be rejected by M3
func getGocqlErrorTag(err error) string {
if yarpcerrors.IsAlreadyExists(err) {
return "already_exists"
}
if yarpcerrors.IsNotFound(err) {
return "not_found"
}
switch err.(type) {
case *gocql.RequestErrReadFailure:
return "read_failure"
case *gocql.RequestErrWriteFailure:
return "write_failure"
case *gocql.RequestErrAlreadyExists:
return "already_exists"
case *gocql.RequestErrReadTimeout:
return "read_timeout"
case *gocql.RequestErrWriteTimeout:
return "write_timeout"
case *gocql.RequestErrUnavailable:
return "unavailable"
case *gocql.RequestErrFunctionFailure:
return "function_failure"
case *gocql.RequestErrUnprepared:
return "unprepared"
default:
return "unknown"
}
}
// buildResultRow is used to allocate memory for the row to be populated by
// Cassandra read operation based on what object fields are being read
func buildResultRow(e *base.Definition, columns []string) []interface{} {
results := make([]interface{}, len(columns))
timeType := reflect.ValueOf(time.Now())
gocqlUUIDType := reflect.ValueOf(gocql.UUIDFromTime(time.Now()))
for i, column := range columns {
// get the type of the field from the ColumnToType mapping for object
// That we we can allocate appropriate memory for this field
typ := e.ColumnToType[column]
switch typ.Kind() {
case reflect.String:
var value *string
results[i] = &value
case reflect.Int32, reflect.Uint32, reflect.Int:
// C* internally uses int and int64
var value *int
results[i] = &value
case reflect.Int64, reflect.Uint64:
// C* internally uses int and int64
var value *int64
results[i] = &value
case reflect.Bool:
var value *bool
results[i] = &value
case reflect.Slice:
var value *[]byte
results[i] = &value
case timeType.Kind():
var value *time.Time
results[i] = &value
case gocqlUUIDType.Kind():
var value *gocql.UUID
results[i] = &value
case reflect.Ptr:
// Special case for custom optional string type:
// string type used in Cassandra
// converted to/from custom type in ORM layer
if typ == reflect.TypeOf(&base.OptionalString{}) {
var value *string
results[i] = &value
break
}
// Special case for custom optional int type:
// int64 type used in Cassandra
// converted to/from custom type in ORM layer
if typ == reflect.TypeOf(&base.OptionalUInt64{}) {
var value *int64
results[i] = &value
break
}
// for unrecognized pointer types, fall back to default logging
fallthrough
default:
// This should only happen if we start using a new cassandra type
// without adding to the translation layer
log.WithFields(log.Fields{"type": typ.Kind(), "column": column}).
Infof("type not found")
}
}
return results
}
// getRowFromResult translates a row read from Cassandra into a list of
// base.Column to be interpreted by base store client
func getRowFromResult(
e *base.Definition, columnNames []string, columnVals []interface{},
) []base.Column {
row := make([]base.Column, 0, len(columnNames))
for i, columnName := range columnNames {
// construct a list of column objects from the lists of column names
// and values that were returned by the cassandra query
column := base.Column{
Name: columnName,
}
switch rv := columnVals[i].(type) {
case **int:
column.Value = *rv
case **int64:
column.Value = *rv
case **string:
column.Value = *rv
case **gocql.UUID:
column.Value = *rv
case **time.Time:
column.Value = *rv
case **bool:
column.Value = *rv
case **[]byte:
column.Value = *rv
default:
// This should only happen if we start using a new cassandra type
// without adding to the translation layer
log.WithFields(log.Fields{
"data": columnVals[i],
"column": columnName}).Infof("type not found")
}
row = append(row, column)
}
return row
}
// splitColumnNameValue is used to return list of column names and list of their
// corresponding value. Order is very important in this lists as they will be
// used separately when constructing the CQL query.
func splitColumnNameValue(row []base.Column) (
colNames []string, colValues []interface{}) {
// Split row into two lists of column names and column values.
// So for a location `i` in the list, the colNames[i] and colValues[i] will
// represent row[i]
for _, column := range row {
colNames = append(colNames, column.Name)
colValues = append(colValues, column.Value)
}
return colNames, colValues
}
// Create creates a new row in DB if it already doesn't exist. Uses CAS write.
func (c *cassandraConnector) CreateIfNotExists(
ctx context.Context,
e *base.Definition,
row []base.Column,
) error {
return c.create(ctx, e, row, useCasWrite)
}
// Create creates a new row in DB.
func (c *cassandraConnector) Create(
ctx context.Context,
e *base.Definition,
row []base.Column,
) error {
return c.create(ctx, e, row, !useCasWrite)
}
func (c *cassandraConnector) create(
ctx context.Context,
e *base.Definition,
row []base.Column,
casWrite bool,
) error {
// split row into a list of names and values to compose query stmt using
// names and use values in the session query call, so the order needs to be
// maintained.
colNames, colValues := splitColumnNameValue(row)
// Prepare insert statement
stmt, err := InsertStmt(
Table(e.Name),
Columns(colNames),
Values(colValues),
IfNotExist(casWrite),
)
if err != nil {
return err
}
operation := create
if casWrite {
operation = cas
}
q := c.Session.Query(stmt, colValues...).WithContext(ctx)
if casWrite {
applied, err := q.MapScanCAS(map[string]interface{}{})
if err != nil {
sendCounters(c.executeFailScope, e.Name, operation, err)
return err
}
if !applied {
return yarpcerrors.AlreadyExistsErrorf("item already exists")
}
} else {
if err := q.Exec(); err != nil {
sendCounters(c.executeFailScope, e.Name, operation, err)
return err
}
}
sendLatency(c.scope, e.Name, operation, time.Duration(q.Latency()))
sendCounters(c.executeSuccessScope, e.Name, operation, nil)
return nil
}
// buildSelectQuery builds a select query using base object and key columns
func (c *cassandraConnector) buildSelectQuery(
ctx context.Context,
e *base.Definition,
keyCols []base.Column,
colNamesToRead []string,
) (*gocql.Query, error) {
// split keyCols into a list of names and values to compose query stmt using
// names and use values in the session query call, so the order needs to be
// maintained.
keyColNames, keyColValues := splitColumnNameValue(keyCols)
// Prepare select statement
stmt, err := SelectStmt(
Table(e.Name),
Columns(colNamesToRead), | if err != nil {
return nil, err
}
return c.Session.Query(stmt, keyColValues...).WithContext(ctx), nil
}
// Get fetches a record from DB using primary keys
func (c *cassandraConnector) Get(
ctx context.Context,
e *base.Definition,
keyCols []base.Column,
colNamesToRead ...string,
) ([]base.Column, error) {
if len(colNamesToRead) == 0 {
colNamesToRead = e.GetColumnsToRead()
}
q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead)
if err != nil {
return nil, err
}
// build a result row
result := buildResultRow(e, colNamesToRead)
if err := q.Scan(result...); err != nil {
if err == gocql.ErrNotFound {
err = yarpcerrors.NotFoundErrorf(err.Error())
}
sendCounters(c.executeFailScope, e.Name, | Conditions(keyColNames),
) | random_line_split |
cassandra.go | _ orm.Connector = (*cassandraConnector)(nil)
// getGocqlErrorTag gets a error tag for metrics based on gocql error
// We cannot just use err.Error() as a tag because it contains invalid
// characters like = : etc. which will be rejected by M3
func getGocqlErrorTag(err error) string {
if yarpcerrors.IsAlreadyExists(err) {
return "already_exists"
}
if yarpcerrors.IsNotFound(err) {
return "not_found"
}
switch err.(type) {
case *gocql.RequestErrReadFailure:
return "read_failure"
case *gocql.RequestErrWriteFailure:
return "write_failure"
case *gocql.RequestErrAlreadyExists:
return "already_exists"
case *gocql.RequestErrReadTimeout:
return "read_timeout"
case *gocql.RequestErrWriteTimeout:
return "write_timeout"
case *gocql.RequestErrUnavailable:
return "unavailable"
case *gocql.RequestErrFunctionFailure:
return "function_failure"
case *gocql.RequestErrUnprepared:
return "unprepared"
default:
return "unknown"
}
}
// buildResultRow is used to allocate memory for the row to be populated by
// Cassandra read operation based on what object fields are being read
func buildResultRow(e *base.Definition, columns []string) []interface{} {
results := make([]interface{}, len(columns))
timeType := reflect.ValueOf(time.Now())
gocqlUUIDType := reflect.ValueOf(gocql.UUIDFromTime(time.Now()))
for i, column := range columns {
// get the type of the field from the ColumnToType mapping for object
// That we we can allocate appropriate memory for this field
typ := e.ColumnToType[column]
switch typ.Kind() {
case reflect.String:
var value *string
results[i] = &value
case reflect.Int32, reflect.Uint32, reflect.Int:
// C* internally uses int and int64
var value *int
results[i] = &value
case reflect.Int64, reflect.Uint64:
// C* internally uses int and int64
var value *int64
results[i] = &value
case reflect.Bool:
var value *bool
results[i] = &value
case reflect.Slice:
var value *[]byte
results[i] = &value
case timeType.Kind():
var value *time.Time
results[i] = &value
case gocqlUUIDType.Kind():
var value *gocql.UUID
results[i] = &value
case reflect.Ptr:
// Special case for custom optional string type:
// string type used in Cassandra
// converted to/from custom type in ORM layer
if typ == reflect.TypeOf(&base.OptionalString{}) {
var value *string
results[i] = &value
break
}
// Special case for custom optional int type:
// int64 type used in Cassandra
// converted to/from custom type in ORM layer
if typ == reflect.TypeOf(&base.OptionalUInt64{}) {
var value *int64
results[i] = &value
break
}
// for unrecognized pointer types, fall back to default logging
fallthrough
default:
// This should only happen if we start using a new cassandra type
// without adding to the translation layer
log.WithFields(log.Fields{"type": typ.Kind(), "column": column}).
Infof("type not found")
}
}
return results
}
// getRowFromResult translates a row read from Cassandra into a list of
// base.Column to be interpreted by base store client
func | (
e *base.Definition, columnNames []string, columnVals []interface{},
) []base.Column {
row := make([]base.Column, 0, len(columnNames))
for i, columnName := range columnNames {
// construct a list of column objects from the lists of column names
// and values that were returned by the cassandra query
column := base.Column{
Name: columnName,
}
switch rv := columnVals[i].(type) {
case **int:
column.Value = *rv
case **int64:
column.Value = *rv
case **string:
column.Value = *rv
case **gocql.UUID:
column.Value = *rv
case **time.Time:
column.Value = *rv
case **bool:
column.Value = *rv
case **[]byte:
column.Value = *rv
default:
// This should only happen if we start using a new cassandra type
// without adding to the translation layer
log.WithFields(log.Fields{
"data": columnVals[i],
"column": columnName}).Infof("type not found")
}
row = append(row, column)
}
return row
}
// splitColumnNameValue is used to return list of column names and list of their
// corresponding value. Order is very important in this lists as they will be
// used separately when constructing the CQL query.
func splitColumnNameValue(row []base.Column) (
colNames []string, colValues []interface{}) {
// Split row into two lists of column names and column values.
// So for a location `i` in the list, the colNames[i] and colValues[i] will
// represent row[i]
for _, column := range row {
colNames = append(colNames, column.Name)
colValues = append(colValues, column.Value)
}
return colNames, colValues
}
// Create creates a new row in DB if it already doesn't exist. Uses CAS write.
func (c *cassandraConnector) CreateIfNotExists(
ctx context.Context,
e *base.Definition,
row []base.Column,
) error {
return c.create(ctx, e, row, useCasWrite)
}
// Create creates a new row in DB.
func (c *cassandraConnector) Create(
ctx context.Context,
e *base.Definition,
row []base.Column,
) error {
return c.create(ctx, e, row, !useCasWrite)
}
func (c *cassandraConnector) create(
ctx context.Context,
e *base.Definition,
row []base.Column,
casWrite bool,
) error {
// split row into a list of names and values to compose query stmt using
// names and use values in the session query call, so the order needs to be
// maintained.
colNames, colValues := splitColumnNameValue(row)
// Prepare insert statement
stmt, err := InsertStmt(
Table(e.Name),
Columns(colNames),
Values(colValues),
IfNotExist(casWrite),
)
if err != nil {
return err
}
operation := create
if casWrite {
operation = cas
}
q := c.Session.Query(stmt, colValues...).WithContext(ctx)
if casWrite {
applied, err := q.MapScanCAS(map[string]interface{}{})
if err != nil {
sendCounters(c.executeFailScope, e.Name, operation, err)
return err
}
if !applied {
return yarpcerrors.AlreadyExistsErrorf("item already exists")
}
} else {
if err := q.Exec(); err != nil {
sendCounters(c.executeFailScope, e.Name, operation, err)
return err
}
}
sendLatency(c.scope, e.Name, operation, time.Duration(q.Latency()))
sendCounters(c.executeSuccessScope, e.Name, operation, nil)
return nil
}
// buildSelectQuery builds a select query using base object and key columns
func (c *cassandraConnector) buildSelectQuery(
ctx context.Context,
e *base.Definition,
keyCols []base.Column,
colNamesToRead []string,
) (*gocql.Query, error) {
// split keyCols into a list of names and values to compose query stmt using
// names and use values in the session query call, so the order needs to be
// maintained.
keyColNames, keyColValues := splitColumnNameValue(keyCols)
// Prepare select statement
stmt, err := SelectStmt(
Table(e.Name),
Columns(colNamesToRead),
Conditions(keyColNames),
)
if err != nil {
return nil, err
}
return c.Session.Query(stmt, keyColValues...).WithContext(ctx), nil
}
// Get fetches a record from DB using primary keys
func (c *cassandraConnector) Get(
ctx context.Context,
e *base.Definition,
keyCols []base.Column,
colNamesToRead ...string,
) ([]base.Column, error) {
if len(colNamesToRead) == 0 {
colNamesToRead = e.GetColumnsToRead()
}
q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead)
if err != nil {
return nil, err
}
// build a result row
result := buildResultRow(e, colNamesToRead)
if err := q.Scan(result...); err != nil {
if err == gocql.ErrNotFound {
err = yarpcerrors.NotFoundErrorf(err.Error())
}
sendCounters(c.executeFailScope, e.Name, | getRowFromResult | identifier_name |
cassandra.go | _ orm.Connector = (*cassandraConnector)(nil)
// getGocqlErrorTag gets a error tag for metrics based on gocql error
// We cannot just use err.Error() as a tag because it contains invalid
// characters like = : etc. which will be rejected by M3
func getGocqlErrorTag(err error) string {
if yarpcerrors.IsAlreadyExists(err) {
return "already_exists"
}
if yarpcerrors.IsNotFound(err) {
return "not_found"
}
switch err.(type) {
case *gocql.RequestErrReadFailure:
return "read_failure"
case *gocql.RequestErrWriteFailure:
return "write_failure"
case *gocql.RequestErrAlreadyExists:
return "already_exists"
case *gocql.RequestErrReadTimeout:
return "read_timeout"
case *gocql.RequestErrWriteTimeout:
return "write_timeout"
case *gocql.RequestErrUnavailable:
return "unavailable"
case *gocql.RequestErrFunctionFailure:
return "function_failure"
case *gocql.RequestErrUnprepared:
return "unprepared"
default:
return "unknown"
}
}
// buildResultRow is used to allocate memory for the row to be populated by
// Cassandra read operation based on what object fields are being read
func buildResultRow(e *base.Definition, columns []string) []interface{} {
results := make([]interface{}, len(columns))
timeType := reflect.ValueOf(time.Now())
gocqlUUIDType := reflect.ValueOf(gocql.UUIDFromTime(time.Now()))
for i, column := range columns {
// get the type of the field from the ColumnToType mapping for object
// That we we can allocate appropriate memory for this field
typ := e.ColumnToType[column]
switch typ.Kind() {
case reflect.String:
var value *string
results[i] = &value
case reflect.Int32, reflect.Uint32, reflect.Int:
// C* internally uses int and int64
var value *int
results[i] = &value
case reflect.Int64, reflect.Uint64:
// C* internally uses int and int64
var value *int64
results[i] = &value
case reflect.Bool:
var value *bool
results[i] = &value
case reflect.Slice:
var value *[]byte
results[i] = &value
case timeType.Kind():
var value *time.Time
results[i] = &value
case gocqlUUIDType.Kind():
var value *gocql.UUID
results[i] = &value
case reflect.Ptr:
// Special case for custom optional string type:
// string type used in Cassandra
// converted to/from custom type in ORM layer
if typ == reflect.TypeOf(&base.OptionalString{}) {
var value *string
results[i] = &value
break
}
// Special case for custom optional int type:
// int64 type used in Cassandra
// converted to/from custom type in ORM layer
if typ == reflect.TypeOf(&base.OptionalUInt64{}) {
var value *int64
results[i] = &value
break
}
// for unrecognized pointer types, fall back to default logging
fallthrough
default:
// This should only happen if we start using a new cassandra type
// without adding to the translation layer
log.WithFields(log.Fields{"type": typ.Kind(), "column": column}).
Infof("type not found")
}
}
return results
}
// getRowFromResult translates a row read from Cassandra into a list of
// base.Column to be interpreted by base store client
func getRowFromResult(
e *base.Definition, columnNames []string, columnVals []interface{},
) []base.Column {
row := make([]base.Column, 0, len(columnNames))
for i, columnName := range columnNames | case **[]byte:
column.Value = *rv
default:
// This should only happen if we start using a new cassandra type
// without adding to the translation layer
log.WithFields(log.Fields{
"data": columnVals[i],
"column": columnName}).Infof("type not found")
}
row = append(row, column)
}
return row
}
// splitColumnNameValue is used to return list of column names and list of their
// corresponding value. Order is very important in this lists as they will be
// used separately when constructing the CQL query.
func splitColumnNameValue(row []base.Column) (
colNames []string, colValues []interface{}) {
// Split row into two lists of column names and column values.
// So for a location `i` in the list, the colNames[i] and colValues[i] will
// represent row[i]
for _, column := range row {
colNames = append(colNames, column.Name)
colValues = append(colValues, column.Value)
}
return colNames, colValues
}
// Create creates a new row in DB if it already doesn't exist. Uses CAS write.
func (c *cassandraConnector) CreateIfNotExists(
ctx context.Context,
e *base.Definition,
row []base.Column,
) error {
return c.create(ctx, e, row, useCasWrite)
}
// Create creates a new row in DB.
func (c *cassandraConnector) Create(
ctx context.Context,
e *base.Definition,
row []base.Column,
) error {
return c.create(ctx, e, row, !useCasWrite)
}
func (c *cassandraConnector) create(
ctx context.Context,
e *base.Definition,
row []base.Column,
casWrite bool,
) error {
// split row into a list of names and values to compose query stmt using
// names and use values in the session query call, so the order needs to be
// maintained.
colNames, colValues := splitColumnNameValue(row)
// Prepare insert statement
stmt, err := InsertStmt(
Table(e.Name),
Columns(colNames),
Values(colValues),
IfNotExist(casWrite),
)
if err != nil {
return err
}
operation := create
if casWrite {
operation = cas
}
q := c.Session.Query(stmt, colValues...).WithContext(ctx)
if casWrite {
applied, err := q.MapScanCAS(map[string]interface{}{})
if err != nil {
sendCounters(c.executeFailScope, e.Name, operation, err)
return err
}
if !applied {
return yarpcerrors.AlreadyExistsErrorf("item already exists")
}
} else {
if err := q.Exec(); err != nil {
sendCounters(c.executeFailScope, e.Name, operation, err)
return err
}
}
sendLatency(c.scope, e.Name, operation, time.Duration(q.Latency()))
sendCounters(c.executeSuccessScope, e.Name, operation, nil)
return nil
}
// buildSelectQuery builds a select query using base object and key columns
func (c *cassandraConnector) buildSelectQuery(
ctx context.Context,
e *base.Definition,
keyCols []base.Column,
colNamesToRead []string,
) (*gocql.Query, error) {
// split keyCols into a list of names and values to compose query stmt using
// names and use values in the session query call, so the order needs to be
// maintained.
keyColNames, keyColValues := splitColumnNameValue(keyCols)
// Prepare select statement
stmt, err := SelectStmt(
Table(e.Name),
Columns(colNamesToRead),
Conditions(keyColNames),
)
if err != nil {
return nil, err
}
return c.Session.Query(stmt, keyColValues...).WithContext(ctx), nil
}
// Get fetches a record from DB using primary keys
func (c *cassandraConnector) Get(
ctx context.Context,
e *base.Definition,
keyCols []base.Column,
colNamesToRead ...string,
) ([]base.Column, error) {
if len(colNamesToRead) == 0 {
colNamesToRead = e.GetColumnsToRead()
}
q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead)
if err != nil {
return nil, err
}
// build a result row
result := buildResultRow(e, colNamesToRead)
if err := q.Scan(result...); err != nil {
if err == gocql.ErrNotFound {
err = yarpcerrors.NotFoundErrorf(err.Error())
}
sendCounters(c.executeFailScope, e.Name, | {
// construct a list of column objects from the lists of column names
// and values that were returned by the cassandra query
column := base.Column{
Name: columnName,
}
switch rv := columnVals[i].(type) {
case **int:
column.Value = *rv
case **int64:
column.Value = *rv
case **string:
column.Value = *rv
case **gocql.UUID:
column.Value = *rv
case **time.Time:
column.Value = *rv
case **bool:
column.Value = *rv | conditional_block |
cassandra.go | _ orm.Connector = (*cassandraConnector)(nil)
// getGocqlErrorTag gets a error tag for metrics based on gocql error
// We cannot just use err.Error() as a tag because it contains invalid
// characters like = : etc. which will be rejected by M3
func getGocqlErrorTag(err error) string {
if yarpcerrors.IsAlreadyExists(err) {
return "already_exists"
}
if yarpcerrors.IsNotFound(err) {
return "not_found"
}
switch err.(type) {
case *gocql.RequestErrReadFailure:
return "read_failure"
case *gocql.RequestErrWriteFailure:
return "write_failure"
case *gocql.RequestErrAlreadyExists:
return "already_exists"
case *gocql.RequestErrReadTimeout:
return "read_timeout"
case *gocql.RequestErrWriteTimeout:
return "write_timeout"
case *gocql.RequestErrUnavailable:
return "unavailable"
case *gocql.RequestErrFunctionFailure:
return "function_failure"
case *gocql.RequestErrUnprepared:
return "unprepared"
default:
return "unknown"
}
}
// buildResultRow is used to allocate memory for the row to be populated by
// Cassandra read operation based on what object fields are being read
func buildResultRow(e *base.Definition, columns []string) []interface{} {
results := make([]interface{}, len(columns))
timeType := reflect.ValueOf(time.Now())
gocqlUUIDType := reflect.ValueOf(gocql.UUIDFromTime(time.Now()))
for i, column := range columns {
// get the type of the field from the ColumnToType mapping for object
// That we we can allocate appropriate memory for this field
typ := e.ColumnToType[column]
switch typ.Kind() {
case reflect.String:
var value *string
results[i] = &value
case reflect.Int32, reflect.Uint32, reflect.Int:
// C* internally uses int and int64
var value *int
results[i] = &value
case reflect.Int64, reflect.Uint64:
// C* internally uses int and int64
var value *int64
results[i] = &value
case reflect.Bool:
var value *bool
results[i] = &value
case reflect.Slice:
var value *[]byte
results[i] = &value
case timeType.Kind():
var value *time.Time
results[i] = &value
case gocqlUUIDType.Kind():
var value *gocql.UUID
results[i] = &value
case reflect.Ptr:
// Special case for custom optional string type:
// string type used in Cassandra
// converted to/from custom type in ORM layer
if typ == reflect.TypeOf(&base.OptionalString{}) {
var value *string
results[i] = &value
break
}
// Special case for custom optional int type:
// int64 type used in Cassandra
// converted to/from custom type in ORM layer
if typ == reflect.TypeOf(&base.OptionalUInt64{}) {
var value *int64
results[i] = &value
break
}
// for unrecognized pointer types, fall back to default logging
fallthrough
default:
// This should only happen if we start using a new cassandra type
// without adding to the translation layer
log.WithFields(log.Fields{"type": typ.Kind(), "column": column}).
Infof("type not found")
}
}
return results
}
// getRowFromResult translates a row read from Cassandra into a list of
// base.Column to be interpreted by base store client
func getRowFromResult(
e *base.Definition, columnNames []string, columnVals []interface{},
) []base.Column {
row := make([]base.Column, 0, len(columnNames))
for i, columnName := range columnNames {
// construct a list of column objects from the lists of column names
// and values that were returned by the cassandra query
column := base.Column{
Name: columnName,
}
switch rv := columnVals[i].(type) {
case **int:
column.Value = *rv
case **int64:
column.Value = *rv
case **string:
column.Value = *rv
case **gocql.UUID:
column.Value = *rv
case **time.Time:
column.Value = *rv
case **bool:
column.Value = *rv
case **[]byte:
column.Value = *rv
default:
// This should only happen if we start using a new cassandra type
// without adding to the translation layer
log.WithFields(log.Fields{
"data": columnVals[i],
"column": columnName}).Infof("type not found")
}
row = append(row, column)
}
return row
}
// splitColumnNameValue is used to return list of column names and list of their
// corresponding value. Order is very important in this lists as they will be
// used separately when constructing the CQL query.
func splitColumnNameValue(row []base.Column) (
colNames []string, colValues []interface{}) {
// Split row into two lists of column names and column values.
// So for a location `i` in the list, the colNames[i] and colValues[i] will
// represent row[i]
for _, column := range row {
colNames = append(colNames, column.Name)
colValues = append(colValues, column.Value)
}
return colNames, colValues
}
// Create creates a new row in DB if it already doesn't exist. Uses CAS write.
func (c *cassandraConnector) CreateIfNotExists(
ctx context.Context,
e *base.Definition,
row []base.Column,
) error {
return c.create(ctx, e, row, useCasWrite)
}
// Create creates a new row in DB.
func (c *cassandraConnector) Create(
ctx context.Context,
e *base.Definition,
row []base.Column,
) error {
return c.create(ctx, e, row, !useCasWrite)
}
func (c *cassandraConnector) create(
ctx context.Context,
e *base.Definition,
row []base.Column,
casWrite bool,
) error | }
q := c.Session.Query(stmt, colValues...).WithContext(ctx)
if casWrite {
applied, err := q.MapScanCAS(map[string]interface{}{})
if err != nil {
sendCounters(c.executeFailScope, e.Name, operation, err)
return err
}
if !applied {
return yarpcerrors.AlreadyExistsErrorf("item already exists")
}
} else {
if err := q.Exec(); err != nil {
sendCounters(c.executeFailScope, e.Name, operation, err)
return err
}
}
sendLatency(c.scope, e.Name, operation, time.Duration(q.Latency()))
sendCounters(c.executeSuccessScope, e.Name, operation, nil)
return nil
}
// buildSelectQuery builds a select query using base object and key columns
func (c *cassandraConnector) buildSelectQuery(
ctx context.Context,
e *base.Definition,
keyCols []base.Column,
colNamesToRead []string,
) (*gocql.Query, error) {
// split keyCols into a list of names and values to compose query stmt using
// names and use values in the session query call, so the order needs to be
// maintained.
keyColNames, keyColValues := splitColumnNameValue(keyCols)
// Prepare select statement
stmt, err := SelectStmt(
Table(e.Name),
Columns(colNamesToRead),
Conditions(keyColNames),
)
if err != nil {
return nil, err
}
return c.Session.Query(stmt, keyColValues...).WithContext(ctx), nil
}
// Get fetches a record from DB using primary keys
func (c *cassandraConnector) Get(
ctx context.Context,
e *base.Definition,
keyCols []base.Column,
colNamesToRead ...string,
) ([]base.Column, error) {
if len(colNamesToRead) == 0 {
colNamesToRead = e.GetColumnsToRead()
}
q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead)
if err != nil {
return nil, err
}
// build a result row
result := buildResultRow(e, colNamesToRead)
if err := q.Scan(result...); err != nil {
if err == gocql.ErrNotFound {
err = yarpcerrors.NotFoundErrorf(err.Error())
}
sendCounters(c.executeFailScope, e.Name, | {
// split row into a list of names and values to compose query stmt using
// names and use values in the session query call, so the order needs to be
// maintained.
colNames, colValues := splitColumnNameValue(row)
// Prepare insert statement
stmt, err := InsertStmt(
Table(e.Name),
Columns(colNames),
Values(colValues),
IfNotExist(casWrite),
)
if err != nil {
return err
}
operation := create
if casWrite {
operation = cas | identifier_body |
SharedEnum.ts |
rePerSignatureLengthMax = 100,
}
export enum EStoryStack { // storyStack 表的枚举
story_stack_daily_times = 2045, // 剧本搜罗每天搜罗次数
story_stack_search_cost = 2046, // 剧本搜罗每次消耗物品
story_stack_cd_stage_cost = 2047, // 剧本搜罗每阶段搜罗冷却加速消耗资源类型和数量
story_stack_cd_stage_length = 2048, // 剧本搜罗阶段时长
story_stack_cd = 2050, // 剧本搜罗冷却时间
story_stack_daily_update = 2051, // 每日刷新时间
story_stack_cd_block_level = 2052, // 剧本搜罗冷却功能开放所需街区等级
}
export enum EStoryModule { // story 模块的枚举
noFinish = 0,
finish = 1,
hadGet = 2,
randomLength = 3,
shootTime = 5,
score = 8,
}
export enum EMovieState {
chooseScript = 2, // 选择剧本
renameFilm = 3, // 修改剧本名称
currentMarketInf = 4, // 当前市场反馈
chooseActor = 5, // 选择艺人
costView = 6, // 薪酬计算
yRShowView = 7, // 艺人展示
compatibility = 8, // 艺人对影片的契合度
filming = 9, // 正在拍摄中
chooseType = 10, // 选择类型
chooseTip = 11, // 选择后的提示
propaganda = 12, // 宣传
complete = 13, // 杀青
actorUpdate = 14, // 艺人熟练度
proficiency = 15, // 杀青对比情况
chooseTheater = 16, // 选择院线
noticeFeedBack = 17, // 媒体评价
SYPF = 18, // 首映票房
audienceReputation = 19, // 观众口碑
result = 20, // 上映结果
overMarket = 21, // 下映提示
Jiesuan = 22, // 结算分享界面
rewardTip = 23, // 奖励物品提示
contineTransceiver = 24, // 持续收益
}
export enum EGlobalId {
maxMovieNum = 9, // 最多可同时进行持续收益的电影数量
tempActor = 9999, // 临时演员id
firstActor = 9997, // 首部电影演员巨石强森
loyaltyCardLimit = 1035, // 忠诚卡增加赠送物品上限
}
export enum EMovieType {
}
export enum EBoxOfficeEvaluation { // 票房评价
normal = 1, // 一般
good = 2, // 良好
big_sell = 3, // 大卖
great_sell = 4, // 超卖
myth_sell = 5, // 神话
marvel_sell = 6, // 传奇
}
export enum EProgressBoxId {
manageOrder = 1, // 经营订单进度宝箱
dailyTask = 2, // 日常任务宝箱
cooperateTask = 3, // 合作任务宝箱
}
export enum EManageProduce { // 经营生产
line = 1, // 队列
save = 2, // 存储
time = 3, // 时间减免
output_initial = 4, // 每次产出
dollarIntervalTime = 11, // 美元生产线时间
baseProduceId = 100, // 生产纪念品的生产线ID,该生产线只产生美元
intervalTime = 300, // 间隔时间
}
export enum ERankListType {
SelfList = 1, // 自己的排名
ServerList = 2, // 全服排行榜
GroupList = 3, // 分组总票房
GroupMovieList = 4, // 分组影片票房
}
export enum EAchievementState { // 成就或者任务的状态 | finished = 3, // 完成
rewarded = 4, // 已领奖的
}
export enum EAchievementType { // 成就或者任务的类型
daily = 1, // 日常
achievement = 2, // 成就
story = 3, // 剧本
mainTask = 4, // 主线任务
}
export enum EMallItemLimitType { // 商店限购类型
daily = 1, // 每日
weekly = 2, // 每周
}
export enum EManageBusiness { // 经营事务
baseLine = 10, // 基础代办事务上限
interval = 180, // 间隔时长
}
export enum EManageVisit { // 经营探班
baseLine = 3, // 基础探班队列
overdueTime = 180, // 过期时长
baseIntervalTime = 300, // 基本间隔时间
}
export enum EManageOrder { // 经营订单
baseLength = 9, // 最多显示9个订单
baseTime = 1200, // 订单存活时间
delTime = 20, // 删除订单缓存时间
}
export enum EMailId { // 邮件ID枚举
cooperateApplyFail = 3, // 合作邀请失败
orderId = 4, // 订单过期邮件ID
partnerGive = 7, //伙伴赠送
firstCooperate = 8, // 首次建立合作奖励
firstAccountCooperate = 9, // 首次账号查找建立合作
delOrderId = 10, // 删除的订单邮件ID
rewardOrder = 11, //订单奖励过期
dailyTask = 12, // 日程任务未领取
cupPackage = 13, // 奖杯礼包
}
export enum ESecretaryType { // 秘书拥有类型
noHave = 0, // 未获取
had = 1, // 终生拥有
temporaryHad = 2, // 暂时拥有
}
export enum ESecretary { // 秘书表枚举
skillExp = 2, // 艺人技能训练经验增加N%
trainTime = 3, // 艺人培养时间减少N%
searchTime = 4, // 星探等待时间减少N%
starSearch = 5, // 每天额外星探次数增加N
cityTimes = 6, // 城市自动宣传次数
workTimes = 7, // 自动处理公务次数
visitTimes = 8, // 自动接待次数
secretaryExpStart = 9, // 秘书体验活动开始时间
secretaryExpEnd = 10, // 秘书体验活动结束时间
applicationDuration = 11, //许可证申请时长
applicationLicense = 12, //申请许可证物品类型
applicationLicenseNum = 13, // 申请许可证物品数量
freeGiftId = 1001, // 每日免费领取礼包ID
moneyGiftId = 2001, // 每日需要花费领取的礼包ID
}
export enum EManageMeet { // 经营会议枚举
baseLine = 3, // 基础探班队列
baseIntervalTime = 1200, // 基本间隔时间
}
export enum EOscarType {
man = 1,
woman = 2,
perform = 3,
art = 4,
publicPraise = 5,
bestBoxOffice = 6,
totalBoxOffice = 7,
movies = 8,
bigSell = 9,
}
export enum EGMRefreshType { // GM的刷新类型
dailySign = 0, // 日常签到
dailyTask = 1, // 日常任务
dailyMall = 2, // 商城每日限购道具
weeklyMall = 3, // 商城每周限购道具
manageProduce = 4, // 管理生产线cd的清除
sevenAddup = 5, // 七日累计登陆
findStory = 6, // 剧本搜罗清除
| none = 0, // 初始化
receivable = 1, // 可接
received = 2, // 已接 | random_line_split |
SharedEnum.ts | LimitType { // 商店限购类型
daily = 1, // 每日
weekly = 2, // 每周
}
export enum EManageBusiness { // 经营事务
baseLine = 10, // 基础代办事务上限
interval = 180, // 间隔时长
}
export enum EManageVisit { // 经营探班
baseLine = 3, // 基础探班队列
overdueTime = 180, // 过期时长
baseIntervalTime = 300, // 基本间隔时间
}
export enum EManageOrder { // 经营订单
baseLength = 9, // 最多显示9个订单
baseTime = 1200, // 订单存活时间
delTime = 20, // 删除订单缓存时间
}
export enum EMailId { // 邮件ID枚举
cooperateApplyFail = 3, // 合作邀请失败
orderId = 4, // 订单过期邮件ID
partnerGive = 7, //伙伴赠送
firstCooperate = 8, // 首次建立合作奖励
firstAccountCooperate = 9, // 首次账号查找建立合作
delOrderId = 10, // 删除的订单邮件ID
rewardOrder = 11, //订单奖励过期
dailyTask = 12, // 日程任务未领取
cupPackage = 13, // 奖杯礼包
}
export enum ESecretaryType { // 秘书拥有类型
noHave = 0, // 未获取
had = 1, // 终生拥有
temporaryHad = 2, // 暂时拥有
}
export enum ESecretary { // 秘书表枚举
skillExp = 2, // 艺人技能训练经验增加N%
trainTime = 3, // 艺人培养时间减少N%
searchTime = 4, // 星探等待时间减少N%
starSearch = 5, // 每天额外星探次数增加N
cityTimes = 6, // 城市自动宣传次数
workTimes = 7, // 自动处理公务次数
visitTimes = 8, // 自动接待次数
secretaryExpStart = 9, // 秘书体验活动开始时间
secretaryExpEnd = 10, // 秘书体验活动结束时间
applicationDuration = 11, //许可证申请时长
applicationLicense = 12, //申请许可证物品类型
applicationLicenseNum = 13, // 申请许可证物品数量
freeGiftId = 1001, // 每日免费领取礼包ID
moneyGiftId = 2001, // 每日需要花费领取的礼包ID
}
export enum EManageMeet { // 经营会议枚举
baseLine = 3, // 基础探班队列
baseIntervalTime = 1200, // 基本间隔时间
}
export enum EOscarType {
man = 1,
woman = 2,
perform = 3,
art = 4,
publicPraise = 5,
bestBoxOffice = 6,
totalBoxOffice = 7,
movies = 8,
bigSell = 9,
}
export enum EGMRefreshType { // GM的刷新类型
dailySign = 0, // 日常签到
dailyTask = 1, // 日常任务
dailyMall = 2, // 商城每日限购道具
weeklyMall = 3, // 商城每周限购道具
manageProduce = 4, // 管理生产线cd的清除
sevenAddup = 5, // 七日累计登陆
findStory = 6, // 剧本搜罗清除
}
export enum EItemUseType { // 物品使用类型
noReward = 0, // 无奖励
allReward = 1, // 获得reward中所有奖励
randomReward = 2, // 随机获得一项奖励
}
export enum EItemType { // 物品类型
randomActor = 16, // 随机获得一个艺人
}
export enum EGuideType {
guide = 1, // 引导
story = 2, // 剧情
}
export enum ENoticeType {
init = 1,
add = 2,
update = 4,
remove = 8,
}
export enum ECooperate {
taskLength = 6,
}
export enum EChatChannel {
private = 1, // 私人聊天
world = 2, // 世界聊天
company = 3, // 公司聊天
}
export enum ESystemMessageId {
getActor1 = 1001,
getActor2 = 1002,
movie = 1003,
updateName = 1004,
createCompany = 1005,
joinCompany = 1006,
}
export enum ESystemType {
userName = 1, // 玩家/公司 名称
oldName = 2, // 旧名称
actorStar = 3, // 艺人星级
actorName = 4, // 艺人名称
moiveName = 5, // 电影名称
blockLevel = 6, // 街道等级
boxOfficeEvaluation = 7, // 电影等级
groupName = 8, // 集团名称
}
export class ECommon { // 通用枚举
static MaxNameLenth = 20; // 名字最大长度
static DefaultName = "Brad Pitt"; // 默认名字
static MoveSpeed = 1.3; // 移动速度
static CanJump = false; // 开启跳跃
}
export class EChannel { // 渠道枚举
static facebook = "1";
static google = "2";
}
export class ProtoVersion { //工具自动生成的枚举,记录协议版本号
static versionCode = 10; //协议版本号数字表示
static versionName = "0.1.0"; //协议版本号字符表示
}
export class DBOType { //工具自动生成的枚举,枚举所有的存库对象
static UsrData = "UsrData";
static GameInfo = "GameInfo";
static GameInfoExt = "GameInfoExt";
static GMMsgLog = "GMMsgLog";
static OrderData = "OrderData";
static ItemData = "ItemData";
static StorySuitData = "StorySuitData";
static StoryData = "StoryData";
static ActorData = "ActorData";
static CityData = "CityData";
static ActorSkillEffect = "ActorSkillEffect";
static TempData = "TempData";
static MovieData = "MovieData";
static ManageOrderData = "ManageOrderData"; // 经营订单
static ProgressBoxData = "ProgressBoxData";
static ManageProduceData = "ManageProduceData"; // 经营生产
static MovieLicenceResetTimeData = "MovieLicenceResetTimeData";
static ManageBusinessData = "ManageBusinessData"; // 经营公务
static ManageVisitData = "ManageVisitData"; // 经营探班
static StoryMessage = "StoryMessage";
static RankListData = "RankListData";
static TaskData = "TaskData"; // 任务结构
static MallItemLimitData = "MallItemLimitData"; // 单个商品限购数据
static MailData = "MailData";
static ManageMeetingData = "ManageMeetingData"; // 经营会议
static DanmuData = "DanmuData";
static DailySign = "DailySign"; // 日常签到数据
static SecretaryData = "SecretaryData"; // 小秘书功能
static SevenAddUpData = "SevenAddUpData"; // 活动期间累计达到某种要求领取奖励
static ActortExerciseData = "ActortExerciseData"; // 艺人训练队列
static DailyEvent = "DailyEvent"; // 日程事件
static GuideData = "GuideData"; // 引导数据
static PlayerCooperateData = "PlayerCooperateData"; // 合作
static AccountCooperateData = "AccountCooperateData"; // 账号邀请合作数据
static AutoCooperateData = "AutoCooperateData"; // 公共邀请合作数据
static CooperateTaskData = "CooperateTaskData"; // 合作任务
static CooperateOrderData = "CooperateOrderData"; // 合作订单
static LevelGiftData = "LevelGiftData";
static GetGuideReward = | "GetGuid | identifier_name | |
main.rs | Status::new(Code::InvalidArgument, "invalid header name")
})?;
builder = builder.header(key, &header.value[..]);
}
if self.mock_network {
// Forward the network access to the controller.
warn!("mock network request");
warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now());
self.api.forward_network(req).await?;
Ok(Response::new(NetworkAccessResult::default()))
} else {
// Make the actual network access
let handle = tokio::spawn(async move {
let res = builder.send().await.map_err(|e| {
error!("{}", e);
Status::new(Code::Aborted, "http request failed")
})?;
let status_code = res.status().as_u16() as u32;
let headers = res
.headers()
.iter()
.map(|(key, value)| KeyValuePair {
key: key.as_str().as_bytes().to_vec(),
value: value.as_bytes().to_vec(),
})
.collect::<Vec<_>>();
let data = res.bytes().await.map_err(|e| {
error!("{}", e);
Status::new(Code::Unavailable, "error streaming response bytes")
})?;
Ok(Response::new(NetworkAccessResult {
status_code,
headers,
data: data.to_vec(),
}))
});
// Forward the network access to the controller.
self.api.forward_network(req).await?;
// Return the result of the HTTP request.
handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))?
}
}
/// Validates the process is an existing process, and checks its
/// permissions to see that the tag corresponds to a valid param.
/// If the tag is valid and this is a stateless edge, only respond
/// succesfully if the module is trying to get the triggered data.
/// If the tag is valid and this is a stateful edge, endorse the data
/// with the host token and forward to the controller.
async fn get(
&self, req: Request<GetData>,
) -> Result<Response<GetDataResult>, Status> {
debug!("get");
// Validate the process is valid and has permissions to read the tag.
// No serializability guarantees from other requests from the same process.
let req = req.into_inner();
let rx = {
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
perms.touch()
} else {
warn!("get: invalid token {}", req.process_token);
return Err(Status::new(Code::Unauthenticated, "invalid process token"));
}
};
if let Some(mut rx) = rx {
debug!("warm process awaiting...");
rx.recv().await;
}
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
if perms.is_triggered(&req.tag) {
// cached the triggered file
if req.lower != req.upper {
debug!("get: {} invalid triggered timestamps", req.process_token);
return Ok(Response::new(GetDataResult::default()))
} else if !self.pubsub_enabled {
debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token);
// fallthrough below
} else if let Some(data) = perms.read_triggered(&req.lower) {
debug!("get: {} reading triggered data", req.process_token);
return Ok(Response::new(GetDataResult {
timestamps: vec![req.lower],
data: vec![data],
}))
} else {
debug!("get: {} process was not triggered", req.process_token);
return Ok(Response::new(GetDataResult::default()))
}
} else if !perms.can_read(&req.tag) {
warn!("get: {} cannot read {}", req.process_token, req.tag);
return Err(Status::new(Code::Unauthenticated, "cannot read"));
}
}
// Forward the file access to the controller and return the result
debug!("get: {} forwarding tag={}", req.process_token, req.tag);
self.api.forward_get(req).await
}
/// Validates the process is an existing process, and checks its
/// permissions to see that the process is writing to a valid tag.
/// If the tag is valid, endorse the data with the host token and | /// If the tag corresponds to sensor state (say maybe it starts with #
/// which is reserved for state tags), forward the request as a state
/// change instead.
async fn push(
&self, req: Request<PushData>,
) -> Result<Response<()>, Status> {
debug!("push");
// Validate the process is valid and has permissions to write the file.
// No serializability guarantees from other requests from the same process.
// Sanitizes the path.
let req = req.into_inner();
let rx = {
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
perms.touch()
} else {
return Err(Status::new(Code::Unauthenticated, "invalid process token"));
}
};
if let Some(mut rx) = rx {
debug!("warm process awaiting...");
rx.recv().await;
}
let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
if !perms.can_write(&req.tag) {
debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag);
return Ok(Response::new(()));
}
if state_tags::is_state_tag(&req.tag) {
Some(state_tags::parse_state_tag(&req.tag))
} else {
None
}
} else {
unreachable!()
};
if let Some((sensor, key)) = sensor_key {
// Forward as state change if the tag changes state.
debug!("push: {} forwarding state change tag={}", req.process_token, req.tag);
let req = StateChange {
host_token: String::new(),
process_token: req.process_token,
sensor_id: sensor,
key,
value: req.data,
};
self.api.forward_state(req).await
} else {
// Forward the file access to the controller and return the result
debug!("push: {} forwarding push tag={}", req.process_token, req.tag);
self.api.forward_push(req).await
}
}
}
impl Host {
/// Generate a new host with a random ID.
pub fn new(
base_path: PathBuf,
controller: &str,
cold_cache_enabled: bool,
warm_cache_enabled: bool,
pubsub_enabled: bool,
mock_network: bool,
) -> Self {
use rand::Rng;
let id: u32 = rand::thread_rng().gen();
assert!(cold_cache_enabled || !warm_cache_enabled);
// TODO: buffer size
Self {
id,
api: crate::net::KarlHostAPI::new(controller),
process_tokens: Arc::new(Mutex::new(HashMap::new())),
warm_processes: Arc::new(Mutex::new(HashMap::new())),
warm_cache_tx: None, // wish this didn't have to be wrapped
path_manager: Arc::new(PathManager::new(base_path, id)),
compute_lock: Arc::new(Mutex::new(())),
cold_cache_enabled,
warm_cache_enabled,
pubsub_enabled,
mock_network,
}
}
/// Spawns a background process that sends heartbeats to the controller
/// at the HEARTBEAT_INTERVAL.
///
/// The constructor creates a directory at the <KARL_PATH> if it does
/// not already exist. The working directory for any computation is at
/// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working
/// directory must be at <KARL_PATH>.
///
/// Parameters:
/// - port - The port to listen on.
/// - password - The password to register with the controller.
pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> {
self.api.register(self.id, port, password).await?;
let api = self.api.clone();
tokio::spawn(async move {
// Every HEARTBEAT_INTERVAL seconds, this process wakes up
// sends a heartbeat message to the controller.
loop {
tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await;
trace!("heartbeat");
let res = api.heartbeat().await;
if let Err(e) = res {
warn!("error sending heartbeat: {}", e);
};
}
});
// listener for spawning warm processes
let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100);
self.warm_cache_tx = Some(tx);
let host = self.clone();
tokio::spawn(async move {
loop {
let req: ComputeRequest = rx.recv().await.unwrap();
let is_warm = true;
Host::spawn_new_process(
host.clone(),
req,
is_warm,
TRIGGERED_KEY.to_string(), // special value
TRIGGERED_KEY.to_string(), // special value
).await;
}
});
Ok(())
}
async fn attach_warm_process(
&self,
req: &mut ComputeRequest,
) -> Option<Process | /// forward to the controller.
/// | random_line_split |
main.rs | Status::new(Code::InvalidArgument, "invalid header name")
})?;
builder = builder.header(key, &header.value[..]);
}
if self.mock_network {
// Forward the network access to the controller.
warn!("mock network request");
warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now());
self.api.forward_network(req).await?;
Ok(Response::new(NetworkAccessResult::default()))
} else {
// Make the actual network access
let handle = tokio::spawn(async move {
let res = builder.send().await.map_err(|e| {
error!("{}", e);
Status::new(Code::Aborted, "http request failed")
})?;
let status_code = res.status().as_u16() as u32;
let headers = res
.headers()
.iter()
.map(|(key, value)| KeyValuePair {
key: key.as_str().as_bytes().to_vec(),
value: value.as_bytes().to_vec(),
})
.collect::<Vec<_>>();
let data = res.bytes().await.map_err(|e| {
error!("{}", e);
Status::new(Code::Unavailable, "error streaming response bytes")
})?;
Ok(Response::new(NetworkAccessResult {
status_code,
headers,
data: data.to_vec(),
}))
});
// Forward the network access to the controller.
self.api.forward_network(req).await?;
// Return the result of the HTTP request.
handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))?
}
}
/// Validates the process is an existing process, and checks its
/// permissions to see that the tag corresponds to a valid param.
/// If the tag is valid and this is a stateless edge, only respond
/// succesfully if the module is trying to get the triggered data.
/// If the tag is valid and this is a stateful edge, endorse the data
/// with the host token and forward to the controller.
async fn get(
&self, req: Request<GetData>,
) -> Result<Response<GetDataResult>, Status> {
debug!("get");
// Validate the process is valid and has permissions to read the tag.
// No serializability guarantees from other requests from the same process.
let req = req.into_inner();
let rx = {
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
perms.touch()
} else {
warn!("get: invalid token {}", req.process_token);
return Err(Status::new(Code::Unauthenticated, "invalid process token"));
}
};
if let Some(mut rx) = rx {
debug!("warm process awaiting...");
rx.recv().await;
}
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
if perms.is_triggered(&req.tag) {
// cached the triggered file
if req.lower != req.upper {
debug!("get: {} invalid triggered timestamps", req.process_token);
return Ok(Response::new(GetDataResult::default()))
} else if !self.pubsub_enabled {
debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token);
// fallthrough below
} else if let Some(data) = perms.read_triggered(&req.lower) {
debug!("get: {} reading triggered data", req.process_token);
return Ok(Response::new(GetDataResult {
timestamps: vec![req.lower],
data: vec![data],
}))
} else {
debug!("get: {} process was not triggered", req.process_token);
return Ok(Response::new(GetDataResult::default()))
}
} else if !perms.can_read(&req.tag) {
warn!("get: {} cannot read {}", req.process_token, req.tag);
return Err(Status::new(Code::Unauthenticated, "cannot read"));
}
}
// Forward the file access to the controller and return the result
debug!("get: {} forwarding tag={}", req.process_token, req.tag);
self.api.forward_get(req).await
}
/// Validates the process is an existing process, and checks its
/// permissions to see that the process is writing to a valid tag.
/// If the tag is valid, endorse the data with the host token and
/// forward to the controller.
///
/// If the tag corresponds to sensor state (say maybe it starts with #
/// which is reserved for state tags), forward the request as a state
/// change instead.
async fn | (
&self, req: Request<PushData>,
) -> Result<Response<()>, Status> {
debug!("push");
// Validate the process is valid and has permissions to write the file.
// No serializability guarantees from other requests from the same process.
// Sanitizes the path.
let req = req.into_inner();
let rx = {
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
perms.touch()
} else {
return Err(Status::new(Code::Unauthenticated, "invalid process token"));
}
};
if let Some(mut rx) = rx {
debug!("warm process awaiting...");
rx.recv().await;
}
let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
if !perms.can_write(&req.tag) {
debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag);
return Ok(Response::new(()));
}
if state_tags::is_state_tag(&req.tag) {
Some(state_tags::parse_state_tag(&req.tag))
} else {
None
}
} else {
unreachable!()
};
if let Some((sensor, key)) = sensor_key {
// Forward as state change if the tag changes state.
debug!("push: {} forwarding state change tag={}", req.process_token, req.tag);
let req = StateChange {
host_token: String::new(),
process_token: req.process_token,
sensor_id: sensor,
key,
value: req.data,
};
self.api.forward_state(req).await
} else {
// Forward the file access to the controller and return the result
debug!("push: {} forwarding push tag={}", req.process_token, req.tag);
self.api.forward_push(req).await
}
}
}
impl Host {
/// Generate a new host with a random ID.
pub fn new(
base_path: PathBuf,
controller: &str,
cold_cache_enabled: bool,
warm_cache_enabled: bool,
pubsub_enabled: bool,
mock_network: bool,
) -> Self {
use rand::Rng;
let id: u32 = rand::thread_rng().gen();
assert!(cold_cache_enabled || !warm_cache_enabled);
// TODO: buffer size
Self {
id,
api: crate::net::KarlHostAPI::new(controller),
process_tokens: Arc::new(Mutex::new(HashMap::new())),
warm_processes: Arc::new(Mutex::new(HashMap::new())),
warm_cache_tx: None, // wish this didn't have to be wrapped
path_manager: Arc::new(PathManager::new(base_path, id)),
compute_lock: Arc::new(Mutex::new(())),
cold_cache_enabled,
warm_cache_enabled,
pubsub_enabled,
mock_network,
}
}
/// Spawns a background process that sends heartbeats to the controller
/// at the HEARTBEAT_INTERVAL.
///
/// The constructor creates a directory at the <KARL_PATH> if it does
/// not already exist. The working directory for any computation is at
/// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working
/// directory must be at <KARL_PATH>.
///
/// Parameters:
/// - port - The port to listen on.
/// - password - The password to register with the controller.
pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> {
self.api.register(self.id, port, password).await?;
let api = self.api.clone();
tokio::spawn(async move {
// Every HEARTBEAT_INTERVAL seconds, this process wakes up
// sends a heartbeat message to the controller.
loop {
tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await;
trace!("heartbeat");
let res = api.heartbeat().await;
if let Err(e) = res {
warn!("error sending heartbeat: {}", e);
};
}
});
// listener for spawning warm processes
let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100);
self.warm_cache_tx = Some(tx);
let host = self.clone();
tokio::spawn(async move {
loop {
let req: ComputeRequest = rx.recv().await.unwrap();
let is_warm = true;
Host::spawn_new_process(
host.clone(),
req,
is_warm,
TRIGGERED_KEY.to_string(), // special value
TRIGGERED_KEY.to_string(), // special value
).await;
}
});
Ok(())
}
async fn attach_warm_process(
&self,
req: &mut ComputeRequest,
) -> Option< | push | identifier_name |
main.rs | Status::new(Code::InvalidArgument, "invalid header name")
})?;
builder = builder.header(key, &header.value[..]);
}
if self.mock_network {
// Forward the network access to the controller.
warn!("mock network request");
warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now());
self.api.forward_network(req).await?;
Ok(Response::new(NetworkAccessResult::default()))
} else {
// Make the actual network access
let handle = tokio::spawn(async move {
let res = builder.send().await.map_err(|e| {
error!("{}", e);
Status::new(Code::Aborted, "http request failed")
})?;
let status_code = res.status().as_u16() as u32;
let headers = res
.headers()
.iter()
.map(|(key, value)| KeyValuePair {
key: key.as_str().as_bytes().to_vec(),
value: value.as_bytes().to_vec(),
})
.collect::<Vec<_>>();
let data = res.bytes().await.map_err(|e| {
error!("{}", e);
Status::new(Code::Unavailable, "error streaming response bytes")
})?;
Ok(Response::new(NetworkAccessResult {
status_code,
headers,
data: data.to_vec(),
}))
});
// Forward the network access to the controller.
self.api.forward_network(req).await?;
// Return the result of the HTTP request.
handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))?
}
}
/// Validates the process is an existing process, and checks its
/// permissions to see that the tag corresponds to a valid param.
/// If the tag is valid and this is a stateless edge, only respond
/// succesfully if the module is trying to get the triggered data.
/// If the tag is valid and this is a stateful edge, endorse the data
/// with the host token and forward to the controller.
async fn get(
&self, req: Request<GetData>,
) -> Result<Response<GetDataResult>, Status> {
debug!("get");
// Validate the process is valid and has permissions to read the tag.
// No serializability guarantees from other requests from the same process.
let req = req.into_inner();
let rx = {
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
perms.touch()
} else {
warn!("get: invalid token {}", req.process_token);
return Err(Status::new(Code::Unauthenticated, "invalid process token"));
}
};
if let Some(mut rx) = rx {
debug!("warm process awaiting...");
rx.recv().await;
}
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
if perms.is_triggered(&req.tag) {
// cached the triggered file
if req.lower != req.upper {
debug!("get: {} invalid triggered timestamps", req.process_token);
return Ok(Response::new(GetDataResult::default()))
} else if !self.pubsub_enabled {
debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token);
// fallthrough below
} else if let Some(data) = perms.read_triggered(&req.lower) {
debug!("get: {} reading triggered data", req.process_token);
return Ok(Response::new(GetDataResult {
timestamps: vec![req.lower],
data: vec![data],
}))
} else {
debug!("get: {} process was not triggered", req.process_token);
return Ok(Response::new(GetDataResult::default()))
}
} else if !perms.can_read(&req.tag) {
warn!("get: {} cannot read {}", req.process_token, req.tag);
return Err(Status::new(Code::Unauthenticated, "cannot read"));
}
}
// Forward the file access to the controller and return the result
debug!("get: {} forwarding tag={}", req.process_token, req.tag);
self.api.forward_get(req).await
}
/// Validates the process is an existing process, and checks its
/// permissions to see that the process is writing to a valid tag.
/// If the tag is valid, endorse the data with the host token and
/// forward to the controller.
///
/// If the tag corresponds to sensor state (say maybe it starts with #
/// which is reserved for state tags), forward the request as a state
/// change instead.
async fn push(
&self, req: Request<PushData>,
) -> Result<Response<()>, Status> | return Ok(Response::new(()));
}
if state_tags::is_state_tag(&req.tag) {
Some(state_tags::parse_state_tag(&req.tag))
} else {
None
}
} else {
unreachable!()
};
if let Some((sensor, key)) = sensor_key {
// Forward as state change if the tag changes state.
debug!("push: {} forwarding state change tag={}", req.process_token, req.tag);
let req = StateChange {
host_token: String::new(),
process_token: req.process_token,
sensor_id: sensor,
key,
value: req.data,
};
self.api.forward_state(req).await
} else {
// Forward the file access to the controller and return the result
debug!("push: {} forwarding push tag={}", req.process_token, req.tag);
self.api.forward_push(req).await
}
}
}
impl Host {
/// Generate a new host with a random ID.
pub fn new(
base_path: PathBuf,
controller: &str,
cold_cache_enabled: bool,
warm_cache_enabled: bool,
pubsub_enabled: bool,
mock_network: bool,
) -> Self {
use rand::Rng;
let id: u32 = rand::thread_rng().gen();
assert!(cold_cache_enabled || !warm_cache_enabled);
// TODO: buffer size
Self {
id,
api: crate::net::KarlHostAPI::new(controller),
process_tokens: Arc::new(Mutex::new(HashMap::new())),
warm_processes: Arc::new(Mutex::new(HashMap::new())),
warm_cache_tx: None, // wish this didn't have to be wrapped
path_manager: Arc::new(PathManager::new(base_path, id)),
compute_lock: Arc::new(Mutex::new(())),
cold_cache_enabled,
warm_cache_enabled,
pubsub_enabled,
mock_network,
}
}
/// Spawns a background process that sends heartbeats to the controller
/// at the HEARTBEAT_INTERVAL.
///
/// The constructor creates a directory at the <KARL_PATH> if it does
/// not already exist. The working directory for any computation is at
/// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working
/// directory must be at <KARL_PATH>.
///
/// Parameters:
/// - port - The port to listen on.
/// - password - The password to register with the controller.
pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> {
self.api.register(self.id, port, password).await?;
let api = self.api.clone();
tokio::spawn(async move {
// Every HEARTBEAT_INTERVAL seconds, this process wakes up
// sends a heartbeat message to the controller.
loop {
tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await;
trace!("heartbeat");
let res = api.heartbeat().await;
if let Err(e) = res {
warn!("error sending heartbeat: {}", e);
};
}
});
// listener for spawning warm processes
let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100);
self.warm_cache_tx = Some(tx);
let host = self.clone();
tokio::spawn(async move {
loop {
let req: ComputeRequest = rx.recv().await.unwrap();
let is_warm = true;
Host::spawn_new_process(
host.clone(),
req,
is_warm,
TRIGGERED_KEY.to_string(), // special value
TRIGGERED_KEY.to_string(), // special value
).await;
}
});
Ok(())
}
async fn attach_warm_process(
&self,
req: &mut ComputeRequest,
) -> Option< | {
debug!("push");
// Validate the process is valid and has permissions to write the file.
// No serializability guarantees from other requests from the same process.
// Sanitizes the path.
let req = req.into_inner();
let rx = {
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
perms.touch()
} else {
return Err(Status::new(Code::Unauthenticated, "invalid process token"));
}
};
if let Some(mut rx) = rx {
debug!("warm process awaiting...");
rx.recv().await;
}
let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
if !perms.can_write(&req.tag) {
debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag); | identifier_body |
main.rs | Status::new(Code::InvalidArgument, "invalid header name")
})?;
builder = builder.header(key, &header.value[..]);
}
if self.mock_network {
// Forward the network access to the controller.
warn!("mock network request");
warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now());
self.api.forward_network(req).await?;
Ok(Response::new(NetworkAccessResult::default()))
} else {
// Make the actual network access
let handle = tokio::spawn(async move {
let res = builder.send().await.map_err(|e| {
error!("{}", e);
Status::new(Code::Aborted, "http request failed")
})?;
let status_code = res.status().as_u16() as u32;
let headers = res
.headers()
.iter()
.map(|(key, value)| KeyValuePair {
key: key.as_str().as_bytes().to_vec(),
value: value.as_bytes().to_vec(),
})
.collect::<Vec<_>>();
let data = res.bytes().await.map_err(|e| {
error!("{}", e);
Status::new(Code::Unavailable, "error streaming response bytes")
})?;
Ok(Response::new(NetworkAccessResult {
status_code,
headers,
data: data.to_vec(),
}))
});
// Forward the network access to the controller.
self.api.forward_network(req).await?;
// Return the result of the HTTP request.
handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))?
}
}
/// Validates the process is an existing process, and checks its
/// permissions to see that the tag corresponds to a valid param.
/// If the tag is valid and this is a stateless edge, only respond
/// succesfully if the module is trying to get the triggered data.
/// If the tag is valid and this is a stateful edge, endorse the data
/// with the host token and forward to the controller.
async fn get(
&self, req: Request<GetData>,
) -> Result<Response<GetDataResult>, Status> {
debug!("get");
// Validate the process is valid and has permissions to read the tag.
// No serializability guarantees from other requests from the same process.
let req = req.into_inner();
let rx = {
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
perms.touch()
} else {
warn!("get: invalid token {}", req.process_token);
return Err(Status::new(Code::Unauthenticated, "invalid process token"));
}
};
if let Some(mut rx) = rx {
debug!("warm process awaiting...");
rx.recv().await;
}
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
if perms.is_triggered(&req.tag) {
// cached the triggered file
if req.lower != req.upper {
debug!("get: {} invalid triggered timestamps", req.process_token);
return Ok(Response::new(GetDataResult::default()))
} else if !self.pubsub_enabled {
debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token);
// fallthrough below
} else if let Some(data) = perms.read_triggered(&req.lower) {
debug!("get: {} reading triggered data", req.process_token);
return Ok(Response::new(GetDataResult {
timestamps: vec![req.lower],
data: vec![data],
}))
} else {
debug!("get: {} process was not triggered", req.process_token);
return Ok(Response::new(GetDataResult::default()))
}
} else if !perms.can_read(&req.tag) {
warn!("get: {} cannot read {}", req.process_token, req.tag);
return Err(Status::new(Code::Unauthenticated, "cannot read"));
}
}
// Forward the file access to the controller and return the result
debug!("get: {} forwarding tag={}", req.process_token, req.tag);
self.api.forward_get(req).await
}
/// Validates the process is an existing process, and checks its
/// permissions to see that the process is writing to a valid tag.
/// If the tag is valid, endorse the data with the host token and
/// forward to the controller.
///
/// If the tag corresponds to sensor state (say maybe it starts with #
/// which is reserved for state tags), forward the request as a state
/// change instead.
async fn push(
&self, req: Request<PushData>,
) -> Result<Response<()>, Status> {
debug!("push");
// Validate the process is valid and has permissions to write the file.
// No serializability guarantees from other requests from the same process.
// Sanitizes the path.
let req = req.into_inner();
let rx = {
if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
perms.touch()
} else {
return Err(Status::new(Code::Unauthenticated, "invalid process token"));
}
};
if let Some(mut rx) = rx {
debug!("warm process awaiting...");
rx.recv().await;
}
let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) {
if !perms.can_write(&req.tag) {
debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag);
return Ok(Response::new(()));
}
if state_tags::is_state_tag(&req.tag) {
Some(state_tags::parse_state_tag(&req.tag))
} else {
None
}
} else {
unreachable!()
};
if let Some((sensor, key)) = sensor_key {
// Forward as state change if the tag changes state.
debug!("push: {} forwarding state change tag={}", req.process_token, req.tag);
let req = StateChange {
host_token: String::new(),
process_token: req.process_token,
sensor_id: sensor,
key,
value: req.data,
};
self.api.forward_state(req).await
} else |
}
}
impl Host {
/// Generate a new host with a random ID.
pub fn new(
base_path: PathBuf,
controller: &str,
cold_cache_enabled: bool,
warm_cache_enabled: bool,
pubsub_enabled: bool,
mock_network: bool,
) -> Self {
use rand::Rng;
let id: u32 = rand::thread_rng().gen();
assert!(cold_cache_enabled || !warm_cache_enabled);
// TODO: buffer size
Self {
id,
api: crate::net::KarlHostAPI::new(controller),
process_tokens: Arc::new(Mutex::new(HashMap::new())),
warm_processes: Arc::new(Mutex::new(HashMap::new())),
warm_cache_tx: None, // wish this didn't have to be wrapped
path_manager: Arc::new(PathManager::new(base_path, id)),
compute_lock: Arc::new(Mutex::new(())),
cold_cache_enabled,
warm_cache_enabled,
pubsub_enabled,
mock_network,
}
}
/// Spawns a background process that sends heartbeats to the controller
/// at the HEARTBEAT_INTERVAL.
///
/// The constructor creates a directory at the <KARL_PATH> if it does
/// not already exist. The working directory for any computation is at
/// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working
/// directory must be at <KARL_PATH>.
///
/// Parameters:
/// - port - The port to listen on.
/// - password - The password to register with the controller.
pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> {
self.api.register(self.id, port, password).await?;
let api = self.api.clone();
tokio::spawn(async move {
// Every HEARTBEAT_INTERVAL seconds, this process wakes up
// sends a heartbeat message to the controller.
loop {
tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await;
trace!("heartbeat");
let res = api.heartbeat().await;
if let Err(e) = res {
warn!("error sending heartbeat: {}", e);
};
}
});
// listener for spawning warm processes
let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100);
self.warm_cache_tx = Some(tx);
let host = self.clone();
tokio::spawn(async move {
loop {
let req: ComputeRequest = rx.recv().await.unwrap();
let is_warm = true;
Host::spawn_new_process(
host.clone(),
req,
is_warm,
TRIGGERED_KEY.to_string(), // special value
TRIGGERED_KEY.to_string(), // special value
).await;
}
});
Ok(())
}
async fn attach_warm_process(
&self,
req: &mut ComputeRequest,
) -> Option | {
// Forward the file access to the controller and return the result
debug!("push: {} forwarding push tag={}", req.process_token, req.tag);
self.api.forward_push(req).await
} | conditional_block |
parser.rs | when parsing vmem files.
#[derive(Clone, Debug, Error, PartialEq, Eq)]
pub enum ParseError {
/// Failure to parse an integer from hexadecimal.
#[error("failed to parse as hexadecimal integer")]
ParseInt(#[from] ParseIntError),
/// An opened comment was not closed.
#[error("unclosed comment")]
UnclosedComment,
/// An address was started with an '@' character, but no address value followed.
#[error("address is missing a value")]
AddrMissingValue,
/// Catch-all for any characters that don't belong in vmem files.
#[error("unknown character '{0}'")]
UnknownChar(char),
}
/// Representation of the possible tokens found in vmem files.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum Token {
/// End of file.
Eof,
/// Address directive, e.g. `@123abc`.
Addr(u32),
/// Data value, e.g. `abc123`.
Value(u32),
/// Comments, e.g. `/* comment */` or `// comment`.
Comment,
/// Whitespace, including newlines.
Whitespace,
}
/// Some span of the input text representing a token.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
struct Span {
token: Token,
len: usize,
}
/// Parser for vmem files.
pub struct VmemParser;
impl VmemParser {
/// Parse a complete vmem file from a string.
pub fn parse(mut s: &str) -> ParseResult<Vmem> {
// Build up the vmem file as sections.
let mut vmem = Vmem::default();
vmem.sections.push(Section::default());
loop {
// Parse a token from the input string, and move along by its span.
let Span { len, token } = Self::token(s)?;
s = &s[len..];
match token {
Token::Eof => break,
Token::Addr(addr) => {
// Add a new section to the `Vmem` at this address.
// Here we translate between a "word index" to a byte address.
vmem.sections.push(Section {
addr: addr * 4,
data: Vec::new(),
});
}
Token::Value(value) => {
// Add the value to the current (last added) section's data.
let section = vmem.sections.last_mut().unwrap();
section.data.push(value)
}
// Whitespace and comments are ignored.
Token::Whitespace => continue,
Token::Comment => continue,
}
}
Ok(vmem)
}
/// Parse a single token from the beginning of a string.
fn token(s: &str) -> ParseResult<Span> {
let parsers = [
Self::parse_eof,
Self::parse_addr,
Self::parse_value,
Self::parse_comment,
Self::parse_whitespace,
];
// Run each parser in order, stopping when one gets a matching parse.
let span = parsers.iter().find_map(|p| p(s).transpose());
// If no parsers succeeded, return an error.
match span {
Some(span) => span,
None => Err(ParseError::UnknownChar(s.chars().next().unwrap())),
}
}
/// Try to parse an EOF from the beginning of a string.
fn parse_eof(s: &str) -> ParseResult<Option<Span>> {
// Empty strings give a 0-length `Token::Eof` span.
match s.is_empty() {
true => Ok(Some(Span {
len: 0,
token: Token::Eof,
})),
false => Ok(None),
}
}
/// Try to parse an address from the beginning of a string.
fn parse_addr(s: &str) -> ParseResult<Option<Span>> {
// Check for the beginning '@' symbol.
let Some(addr) = s.strip_prefix('@') else {
return Ok(None);
};
// Find the length of the actual address string.
let addr_len = match addr.find(|c: char| !c.is_ascii_hexdigit()) {
Some(0) => return Err(ParseError::AddrMissingValue),
Some(len) => len,
None => addr.len(),
};
// Ensure the '@' is included in the span's length!
let len = '@'.len_utf8() + addr_len;
// Parse from hexadecimal.
let val = u32::from_str_radix(&addr[..addr_len], 16)?;
let token = Token::Addr(val);
let span = Span { token, len };
Ok(Some(span))
}
/// Try parse a value from the beginning of a string.
fn parse_value(s: &str) -> ParseResult<Option<Span>> {
// Check for hexadecimal characters in the input.
let len = match s.find(|c: char| !c.is_ascii_hexdigit()) {
Some(0) => return Ok(None),
Some(len) => len,
None => s.len(),
};
let val = u32::from_str_radix(&s[..len], 16)?;
let token = Token::Value(val);
let span = Span { token, len };
Ok(Some(span))
}
/// Try parse a comment from the beginning of a string.
fn parse_comment(s: &str) -> ParseResult<Option<Span>> {
// Look for commend identifiers and their closers.
let len = match s {
s if s.starts_with("//") => s.find('\n').unwrap_or(s.len()),
s if s.starts_with("/*") => {
// `find` gives us the _start_ of the `*/`, so include its length as well.
s.find("*/").ok_or(ParseError::UnclosedComment)? + "*/".len()
}
_ => return Ok(None),
};
let token = Token::Comment;
let span = Span { token, len };
Ok(Some(span))
}
/// Try to parse whitespace from the beginning of a string.
fn parse_whitespace(s: &str) -> ParseResult<Option<Span>> {
// Check for whitespace at the beginning of the input.
let len = match s.find(|c: char| !c.is_whitespace()) {
Some(0) => return Ok(None),
Some(len) => len,
None => s.len(),
};
let token = Token::Whitespace;
let span = Span { len, token };
Ok(Some(span))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn parse() {
let input = r#"
AB
// comment
CD EF
@42
12 /* comment */ 34
"#;
let expected = Vmem {
sections: vec![
Section {
addr: 0x00,
data: vec![0xAB, 0xCD, 0xEF],
},
Section {
addr: 0x108,
data: vec![0x12, 0x34],
},
],
};
assert_eq!(VmemParser::parse(input).unwrap(), expected);
}
#[test]
fn | () {
// Check we can pick out the correct token from a string:
let expected = [
("", Token::Eof, 0),
("@ff", Token::Addr(0xff), 3),
("ff", Token::Value(0xff), 2),
("// X", Token::Comment, 4),
("/* X */", Token::Comment, 7),
(" ", Token::Whitespace, 2),
];
for (s, token, len) in expected {
let span = Span { token, len };
assert_eq!(VmemParser::token(s), Ok(span));
}
// Unknown non-token:
assert_eq!(VmemParser::token("X"), Err(ParseError::UnknownChar('X')));
}
#[test]
fn eof() {
// Not EOF:
assert_eq!(VmemParser::parse_eof(" ").unwrap(), None);
// EOF:
let expected = Some(Span {
len: 0,
token: Token::Eof,
});
assert_eq!(VmemParser::parse_eof("").unwrap(), expected);
}
#[test]
fn addr() {
// No address:
assert_eq!(VmemParser::parse_addr("/* X */").unwrap(), None);
let expected = Some(Span {
len: 9,
token: Token::Addr(0x0123abcd),
});
// Partially an address:
assert_eq!(VmemParser::parse_addr("@0123ABCD FF").unwrap(), expected);
// Entirely an address:
assert_eq!(VmemParser::parse_addr("@0123ABCD").unwrap(), expected);
// Lower-case hex characters:
assert_eq!(VmemParser::parse_addr("@0123abcd").unwrap(), expected);
// u32 overflow:
assert!(VmemParser::parse_addr("@123456789").is_err());
// Missing address after '@':
assert!(VmemParser::parse_addr("@").is_err());
assert!(VmemParser::parse_addr("@ FF").is_err());
}
#[test]
fn value() {
// No value:
assert_eq!(VmemParser:: | token | identifier_name |
parser.rs | when parsing vmem files.
#[derive(Clone, Debug, Error, PartialEq, Eq)]
pub enum ParseError {
/// Failure to parse an integer from hexadecimal.
#[error("failed to parse as hexadecimal integer")]
ParseInt(#[from] ParseIntError),
/// An opened comment was not closed.
#[error("unclosed comment")]
UnclosedComment,
/// An address was started with an '@' character, but no address value followed.
#[error("address is missing a value")] | /// Catch-all for any characters that don't belong in vmem files.
#[error("unknown character '{0}'")]
UnknownChar(char),
}
/// Representation of the possible tokens found in vmem files.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum Token {
/// End of file.
Eof,
/// Address directive, e.g. `@123abc`.
Addr(u32),
/// Data value, e.g. `abc123`.
Value(u32),
/// Comments, e.g. `/* comment */` or `// comment`.
Comment,
/// Whitespace, including newlines.
Whitespace,
}
/// Some span of the input text representing a token.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
struct Span {
token: Token,
len: usize,
}
/// Parser for vmem files.
pub struct VmemParser;
impl VmemParser {
/// Parse a complete vmem file from a string.
pub fn parse(mut s: &str) -> ParseResult<Vmem> {
// Build up the vmem file as sections.
let mut vmem = Vmem::default();
vmem.sections.push(Section::default());
loop {
// Parse a token from the input string, and move along by its span.
let Span { len, token } = Self::token(s)?;
s = &s[len..];
match token {
Token::Eof => break,
Token::Addr(addr) => {
// Add a new section to the `Vmem` at this address.
// Here we translate between a "word index" to a byte address.
vmem.sections.push(Section {
addr: addr * 4,
data: Vec::new(),
});
}
Token::Value(value) => {
// Add the value to the current (last added) section's data.
let section = vmem.sections.last_mut().unwrap();
section.data.push(value)
}
// Whitespace and comments are ignored.
Token::Whitespace => continue,
Token::Comment => continue,
}
}
Ok(vmem)
}
/// Parse a single token from the beginning of a string.
fn token(s: &str) -> ParseResult<Span> {
let parsers = [
Self::parse_eof,
Self::parse_addr,
Self::parse_value,
Self::parse_comment,
Self::parse_whitespace,
];
// Run each parser in order, stopping when one gets a matching parse.
let span = parsers.iter().find_map(|p| p(s).transpose());
// If no parsers succeeded, return an error.
match span {
Some(span) => span,
None => Err(ParseError::UnknownChar(s.chars().next().unwrap())),
}
}
/// Try to parse an EOF from the beginning of a string.
fn parse_eof(s: &str) -> ParseResult<Option<Span>> {
// Empty strings give a 0-length `Token::Eof` span.
match s.is_empty() {
true => Ok(Some(Span {
len: 0,
token: Token::Eof,
})),
false => Ok(None),
}
}
/// Try to parse an address from the beginning of a string.
fn parse_addr(s: &str) -> ParseResult<Option<Span>> {
// Check for the beginning '@' symbol.
let Some(addr) = s.strip_prefix('@') else {
return Ok(None);
};
// Find the length of the actual address string.
let addr_len = match addr.find(|c: char| !c.is_ascii_hexdigit()) {
Some(0) => return Err(ParseError::AddrMissingValue),
Some(len) => len,
None => addr.len(),
};
// Ensure the '@' is included in the span's length!
let len = '@'.len_utf8() + addr_len;
// Parse from hexadecimal.
let val = u32::from_str_radix(&addr[..addr_len], 16)?;
let token = Token::Addr(val);
let span = Span { token, len };
Ok(Some(span))
}
/// Try parse a value from the beginning of a string.
fn parse_value(s: &str) -> ParseResult<Option<Span>> {
// Check for hexadecimal characters in the input.
let len = match s.find(|c: char| !c.is_ascii_hexdigit()) {
Some(0) => return Ok(None),
Some(len) => len,
None => s.len(),
};
let val = u32::from_str_radix(&s[..len], 16)?;
let token = Token::Value(val);
let span = Span { token, len };
Ok(Some(span))
}
/// Try parse a comment from the beginning of a string.
fn parse_comment(s: &str) -> ParseResult<Option<Span>> {
// Look for commend identifiers and their closers.
let len = match s {
s if s.starts_with("//") => s.find('\n').unwrap_or(s.len()),
s if s.starts_with("/*") => {
// `find` gives us the _start_ of the `*/`, so include its length as well.
s.find("*/").ok_or(ParseError::UnclosedComment)? + "*/".len()
}
_ => return Ok(None),
};
let token = Token::Comment;
let span = Span { token, len };
Ok(Some(span))
}
/// Try to parse whitespace from the beginning of a string.
fn parse_whitespace(s: &str) -> ParseResult<Option<Span>> {
// Check for whitespace at the beginning of the input.
let len = match s.find(|c: char| !c.is_whitespace()) {
Some(0) => return Ok(None),
Some(len) => len,
None => s.len(),
};
let token = Token::Whitespace;
let span = Span { len, token };
Ok(Some(span))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn parse() {
let input = r#"
AB
// comment
CD EF
@42
12 /* comment */ 34
"#;
let expected = Vmem {
sections: vec![
Section {
addr: 0x00,
data: vec![0xAB, 0xCD, 0xEF],
},
Section {
addr: 0x108,
data: vec![0x12, 0x34],
},
],
};
assert_eq!(VmemParser::parse(input).unwrap(), expected);
}
#[test]
fn token() {
// Check we can pick out the correct token from a string:
let expected = [
("", Token::Eof, 0),
("@ff", Token::Addr(0xff), 3),
("ff", Token::Value(0xff), 2),
("// X", Token::Comment, 4),
("/* X */", Token::Comment, 7),
(" ", Token::Whitespace, 2),
];
for (s, token, len) in expected {
let span = Span { token, len };
assert_eq!(VmemParser::token(s), Ok(span));
}
// Unknown non-token:
assert_eq!(VmemParser::token("X"), Err(ParseError::UnknownChar('X')));
}
#[test]
fn eof() {
// Not EOF:
assert_eq!(VmemParser::parse_eof(" ").unwrap(), None);
// EOF:
let expected = Some(Span {
len: 0,
token: Token::Eof,
});
assert_eq!(VmemParser::parse_eof("").unwrap(), expected);
}
#[test]
fn addr() {
// No address:
assert_eq!(VmemParser::parse_addr("/* X */").unwrap(), None);
let expected = Some(Span {
len: 9,
token: Token::Addr(0x0123abcd),
});
// Partially an address:
assert_eq!(VmemParser::parse_addr("@0123ABCD FF").unwrap(), expected);
// Entirely an address:
assert_eq!(VmemParser::parse_addr("@0123ABCD").unwrap(), expected);
// Lower-case hex characters:
assert_eq!(VmemParser::parse_addr("@0123abcd").unwrap(), expected);
// u32 overflow:
assert!(VmemParser::parse_addr("@123456789").is_err());
// Missing address after '@':
assert!(VmemParser::parse_addr("@").is_err());
assert!(VmemParser::parse_addr("@ FF").is_err());
}
#[test]
fn value() {
// No value:
assert_eq!(VmemParser::parse | AddrMissingValue,
| random_line_split |
parser.rs | parsing vmem files.
#[derive(Clone, Debug, Error, PartialEq, Eq)]
pub enum ParseError {
/// Failure to parse an integer from hexadecimal.
#[error("failed to parse as hexadecimal integer")]
ParseInt(#[from] ParseIntError),
/// An opened comment was not closed.
#[error("unclosed comment")]
UnclosedComment,
/// An address was started with an '@' character, but no address value followed.
#[error("address is missing a value")]
AddrMissingValue,
/// Catch-all for any characters that don't belong in vmem files.
#[error("unknown character '{0}'")]
UnknownChar(char),
}
/// Representation of the possible tokens found in vmem files.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum Token {
/// End of file.
Eof,
/// Address directive, e.g. `@123abc`.
Addr(u32),
/// Data value, e.g. `abc123`.
Value(u32),
/// Comments, e.g. `/* comment */` or `// comment`.
Comment,
/// Whitespace, including newlines.
Whitespace,
}
/// Some span of the input text representing a token.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
struct Span {
token: Token,
len: usize,
}
/// Parser for vmem files.
pub struct VmemParser;
impl VmemParser {
/// Parse a complete vmem file from a string.
pub fn parse(mut s: &str) -> ParseResult<Vmem> {
// Build up the vmem file as sections.
let mut vmem = Vmem::default();
vmem.sections.push(Section::default());
loop {
// Parse a token from the input string, and move along by its span.
let Span { len, token } = Self::token(s)?;
s = &s[len..];
match token {
Token::Eof => break,
Token::Addr(addr) => {
// Add a new section to the `Vmem` at this address.
// Here we translate between a "word index" to a byte address.
vmem.sections.push(Section {
addr: addr * 4,
data: Vec::new(),
});
}
Token::Value(value) => {
// Add the value to the current (last added) section's data.
let section = vmem.sections.last_mut().unwrap();
section.data.push(value)
}
// Whitespace and comments are ignored.
Token::Whitespace => continue,
Token::Comment => continue,
}
}
Ok(vmem)
}
/// Parse a single token from the beginning of a string.
fn token(s: &str) -> ParseResult<Span> {
let parsers = [
Self::parse_eof,
Self::parse_addr,
Self::parse_value,
Self::parse_comment,
Self::parse_whitespace,
];
// Run each parser in order, stopping when one gets a matching parse.
let span = parsers.iter().find_map(|p| p(s).transpose());
// If no parsers succeeded, return an error.
match span {
Some(span) => span,
None => Err(ParseError::UnknownChar(s.chars().next().unwrap())),
}
}
/// Try to parse an EOF from the beginning of a string.
fn parse_eof(s: &str) -> ParseResult<Option<Span>> {
// Empty strings give a 0-length `Token::Eof` span.
match s.is_empty() {
true => Ok(Some(Span {
len: 0,
token: Token::Eof,
})),
false => Ok(None),
}
}
/// Try to parse an address from the beginning of a string.
fn parse_addr(s: &str) -> ParseResult<Option<Span>> {
// Check for the beginning '@' symbol.
let Some(addr) = s.strip_prefix('@') else {
return Ok(None);
};
// Find the length of the actual address string.
let addr_len = match addr.find(|c: char| !c.is_ascii_hexdigit()) {
Some(0) => return Err(ParseError::AddrMissingValue),
Some(len) => len,
None => addr.len(),
};
// Ensure the '@' is included in the span's length!
let len = '@'.len_utf8() + addr_len;
// Parse from hexadecimal.
let val = u32::from_str_radix(&addr[..addr_len], 16)?;
let token = Token::Addr(val);
let span = Span { token, len };
Ok(Some(span))
}
/// Try parse a value from the beginning of a string.
fn parse_value(s: &str) -> ParseResult<Option<Span>> {
// Check for hexadecimal characters in the input.
let len = match s.find(|c: char| !c.is_ascii_hexdigit()) {
Some(0) => return Ok(None),
Some(len) => len,
None => s.len(),
};
let val = u32::from_str_radix(&s[..len], 16)?;
let token = Token::Value(val);
let span = Span { token, len };
Ok(Some(span))
}
/// Try parse a comment from the beginning of a string.
fn parse_comment(s: &str) -> ParseResult<Option<Span>> {
// Look for commend identifiers and their closers.
let len = match s {
s if s.starts_with("//") => s.find('\n').unwrap_or(s.len()),
s if s.starts_with("/*") => {
// `find` gives us the _start_ of the `*/`, so include its length as well.
s.find("*/").ok_or(ParseError::UnclosedComment)? + "*/".len()
}
_ => return Ok(None),
};
let token = Token::Comment;
let span = Span { token, len };
Ok(Some(span))
}
/// Try to parse whitespace from the beginning of a string.
fn parse_whitespace(s: &str) -> ParseResult<Option<Span>> |
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn parse() {
let input = r#"
AB
// comment
CD EF
@42
12 /* comment */ 34
"#;
let expected = Vmem {
sections: vec![
Section {
addr: 0x00,
data: vec![0xAB, 0xCD, 0xEF],
},
Section {
addr: 0x108,
data: vec![0x12, 0x34],
},
],
};
assert_eq!(VmemParser::parse(input).unwrap(), expected);
}
#[test]
fn token() {
// Check we can pick out the correct token from a string:
let expected = [
("", Token::Eof, 0),
("@ff", Token::Addr(0xff), 3),
("ff", Token::Value(0xff), 2),
("// X", Token::Comment, 4),
("/* X */", Token::Comment, 7),
(" ", Token::Whitespace, 2),
];
for (s, token, len) in expected {
let span = Span { token, len };
assert_eq!(VmemParser::token(s), Ok(span));
}
// Unknown non-token:
assert_eq!(VmemParser::token("X"), Err(ParseError::UnknownChar('X')));
}
#[test]
fn eof() {
// Not EOF:
assert_eq!(VmemParser::parse_eof(" ").unwrap(), None);
// EOF:
let expected = Some(Span {
len: 0,
token: Token::Eof,
});
assert_eq!(VmemParser::parse_eof("").unwrap(), expected);
}
#[test]
fn addr() {
// No address:
assert_eq!(VmemParser::parse_addr("/* X */").unwrap(), None);
let expected = Some(Span {
len: 9,
token: Token::Addr(0x0123abcd),
});
// Partially an address:
assert_eq!(VmemParser::parse_addr("@0123ABCD FF").unwrap(), expected);
// Entirely an address:
assert_eq!(VmemParser::parse_addr("@0123ABCD").unwrap(), expected);
// Lower-case hex characters:
assert_eq!(VmemParser::parse_addr("@0123abcd").unwrap(), expected);
// u32 overflow:
assert!(VmemParser::parse_addr("@123456789").is_err());
// Missing address after '@':
assert!(VmemParser::parse_addr("@").is_err());
assert!(VmemParser::parse_addr("@ FF").is_err());
}
#[test]
fn value() {
// No value:
assert_eq!(VmemParser:: | {
// Check for whitespace at the beginning of the input.
let len = match s.find(|c: char| !c.is_whitespace()) {
Some(0) => return Ok(None),
Some(len) => len,
None => s.len(),
};
let token = Token::Whitespace;
let span = Span { len, token };
Ok(Some(span))
} | identifier_body |
hCassandra_test.py | .01" \
"AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}" \
"AND comment = ''" \
"AND compaction = {'class': " \
"'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'," \
" 'min_threshold': '4'}" \
"AND compression = {'enabled': 'false'}" \
"AND crc_check_chance = 1.0" \
"AND dclocal_read_repair_chance = 0.1" \
"AND default_time_to_live = 0" \
"AND gc_grace_seconds = 864000" \
"AND max_index_interval = 2048" \
"AND memtable_flush_period_in_ms = 0" \
"AND min_index_interval = 128" \
"AND read_repair_chance = 0.0" \
"AND speculative_retry = '99PERCENTILE';"
l.info("Create standard1 Table")
# Create 'standard1' & 'counter1' default Tables
session.execute(table_create)
l.info('Succeeded to create keyspace1 and standard1 Table.')
# Create Trigger
trigger_jar = 'org.apache.cassandra.triggers.AuditTrigger'
trigger_cql = "CREATE TRIGGER pushTrigger ON keyspace1.standard1 USING " + trigger_jar
session.execute(trigger_cql)
except Exception as e:
l.error('FAILED to create trigger. Error: %s' % str(e))
def reset_db(self):
try:
ips = self.options.cluster_ips.split(',')
cluster = Cluster(ips)
l.debug("Connecting to Cassandra Cluster: [%s]" % (ips))
session = cluster.connect()
l.info("dropping [keyspace1] (default) keyspace...")
session.execute("DROP KEYSPACE keyspace1")
l.info('Succeeded to delete DB.')
except Exception as e:
l.error('Failed to reset Cassandra DB. Error: %s' % str(e))
def stop_and_delete_all_apps(self):
self.delete_all_launched_apps()
def result_parser(self):
result = {
'total ops': [], # Running total number of operations during the run.
'op/s': [], # Number of operations per second performed during the run.
'pk/s': [], # Number of partition operations per second performed during the run.
'row/s': 0, # Number of row operations per second performed during the run.
'mean': 0, # Average latency in milisecond for each operation during that run.
'med': [], # Median latency in miliseconds for each operation during that run.
'.95': [], # 95% of the time the latency was less than this number.
'.99': [], # 99% of the time the latency was less than this number.
'max': [], # Maximum latency in miliseconds.
'gc_num': 0, # Number of garbage collections.
'max_ms': [], # Longest garbage collection in miliseconds.
'sum_ms': 0, # Total of garbage collection in miliseconds.
'sdv_ms': [], # Standard deviation in miliseconds.
'mb': 0, # Size of the garbage collection in megabytes.
'op_time': [] # Total Operation Time per client
}
cassandra_results = {
'write': copy.deepcopy(result),
'read': copy.deepcopy(result)
}
# Get stats for Cassandra Stress Client
stats = self.get_app_stats(self.stress_client)
# num_clients = self.options.total_client_count
db_ops = ['write', 'read']
for client in stats.keys():
info = stats[client]
for db_op in db_ops:
if db_op in info:
try:
info[db_op] = ast.literal_eval(info[db_op])
cassandra_results[db_op]['total ops'].append(int(info[db_op]['Total partitions']))
cassandra_results[db_op]['op/s'].append(int(info[db_op]['op rate']))
cassandra_results[db_op]['pk/s'].append(int(info[db_op]['partition rate']))
cassandra_results[db_op]['.95'].append(float(info[db_op]['latency 95th percentile']))
cassandra_results[db_op]['.99'].append(float(info[db_op]['latency 99th percentile']))
cassandra_results[db_op]['gc_num'] += int(info[db_op]['total gc count'])
cassandra_results[db_op]['sdv_ms'].append(float(info[db_op]['stdev gc time(ms)']))
cassandra_results[db_op]['max'].append(float(info[db_op]['latency max']))
cassandra_results[db_op]['med'].append(float(info[db_op]['latency median']))
cassandra_results[db_op]['op_time'].append((info[db_op]['Total operation time']).replace(' ', ''))
except Exception as e:
l.error("Failed to parse stats from Client: " + pformat(client) + " DATA = " + pformat(info[db_op]))
l.error("ERROR: %s" % str(e))
return cassandra_results
def launch_stress_client(self):
max_threads_per_client = 20
l.info("Launching the Cassandra Stress Client(s). Total clients = %s" % (self.options.total_client_count))
# Determine number of threads per Cassandra Stress Client
if self.options.total_client_count > max_threads_per_client:
# Calculating the number of apps we need to scale to
client_count = math.ceil(self.options.total_client_count / float(max_threads_per_client))
# Calculating the suitable number of threads we need to run in an app
threads_per_client = int(math.ceil(self.options.total_client_count / client_count))
else:
threads_per_client = self.options.total_client_count
l.debug("Number of Threads per Cassandra-Stress Client, set to: %s" % (threads_per_client))
self.create_binary_app(name=self.stress_client, app_script='./src/stress_client.py %s %s %s %s %s %s'
% (self.options.total_ops_count,
threads_per_client,
self.options.cluster_ips,
self.options.test_duration,
self.options.cl,
self.options.profile),
cpus=0.2, mem=600, ports=[0])
if self.options.total_client_count > max_threads_per_client:
l.info("Number of Cassandra-Stress Clients to launch = %s" % (client_count))
self.scale_and_verify_app(self.stress_client, client_count)
def delete_all_launched_apps(self):
l.info("Deleting Stress Clients")
self.delete_app(self.stress_client)
def simulate_node_failure(node_ips, max_duration, tests_completed):
"""
Simulate random cassandra node failure and 'rejoin' into cluster
"""
run = True
l.info("START Cassandra Node Failure Simulation. Entering.")
while run:
# If stress-tests are still running continue with node failure simulation
if not tests_completed.isSet():
# Select 'random' node from Cassandra Cluster
node_ip = select_random_node(node_ips)
# Determine delay before stopping cassandra node (to simulate failure / node down)
duration_secs = max_duration*60
time_next_stop = random.randint(1, duration_secs/4)
l.debug("STOP programmed in %s seconds" % time_next_stop)
# Wait
time.sleep(time_next_stop)
ssh_fail = False
# Stop Cassandra Node (simulate failure / stop the service)
stop_cmd = "sudo service cassandra stop"
l.debug("STOP Cassandra Node: %s"%node_ip)
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(str(node_ip))
l.debug("[Simulate Cassandra Node Failure] Connected to host: %s" % node_ip)
except paramiko.AuthenticationException as e:
l.error("Authentication failed when connecting to %s. ERROR: %s" % (node_ip, e))
ssh_fail = True
except:
l.error("Could not SSH to %s, waiting for it to start" % node_ip)
ssh_fail = True
if not ssh_fail:
# Send the command to STOP cassandra node
ssh.exec_command(stop_cmd)
# Determine delay before starting cassandra node (to simulate rejoin to the cluster)
time_next_rejoin = random.randint(1, duration_secs/4)
l.debug("START programmed in %s seconds" % time_next_rejoin)
time.sleep(time_next_rejoin)
# Start Cassandra Node (simulate rejoin / start the service)
start_cmd = "sudo service cassandra start"
l.debug("START Cassandra Node: %s"%node_ip)
# Send the command (non-blocking)
ssh.exec_command(start_cmd)
# Disconnect from the host
l.debug("Closing SSH connection to host: %s" % node_ip)
ssh.close()
run=False
else:
# Tests Complete has been signaled
run=False
l.info("END node failure simulation. Exiting.")
def select_random_node(cluster_ips):
| """
Select a random cassandra node from a list of IPs
"""
return random.choice(cluster_ips) | identifier_body | |
hCassandra_test.py | self.create_triggers()
# Launch Cassandra Stress-Client(s)
self.launch_stress_client()
# Rerun the test
res = self.rerun_test(self.options)
# Return Test Results
return res
def create_triggers(self):
try:
cluster_ips = self.options.cluster_ips.split(',')
cluster = Cluster(cluster_ips)
l.debug("Connecting to Cassandra Cluster: [%s]" % (cluster_ips))
session = cluster.connect()
l.info("Create keyspace [keyspace1]...")
# Create Keyspace
session.execute("CREATE KEYSPACE keyspace1 WITH replication = {'class': 'SimpleStrategy', "
"'replication_factor': '1'} AND durable_writes = true;")
l.info("Create tables [standard1] & [counter1]...")
table_create = "CREATE TABLE keyspace1.standard1 ( " \
"key blob PRIMARY KEY," \
"\"C0\" blob," \
"\"C1\" blob," \
"\"C2\" blob," \
"\"C3\" blob," \
"\"C3\" blob" \
") WITH COMPACT STORAGE" \
"AND bloom_filter_fp_chance = 0.01" \
"AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}" \
"AND comment = ''" \
"AND compaction = {'class': " \
"'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'," \
" 'min_threshold': '4'}" \
"AND compression = {'enabled': 'false'}" \
"AND crc_check_chance = 1.0" \
"AND dclocal_read_repair_chance = 0.1" \
"AND default_time_to_live = 0" \
"AND gc_grace_seconds = 864000" \
"AND max_index_interval = 2048" \
"AND memtable_flush_period_in_ms = 0" \
"AND min_index_interval = 128" \
"AND read_repair_chance = 0.0" \
"AND speculative_retry = '99PERCENTILE';"
l.info("Create standard1 Table")
# Create 'standard1' & 'counter1' default Tables
session.execute(table_create)
l.info('Succeeded to create keyspace1 and standard1 Table.')
# Create Trigger
trigger_jar = 'org.apache.cassandra.triggers.AuditTrigger'
trigger_cql = "CREATE TRIGGER pushTrigger ON keyspace1.standard1 USING " + trigger_jar
session.execute(trigger_cql)
except Exception as e:
l.error('FAILED to create trigger. Error: %s' % str(e))
def reset_db(self):
try:
ips = self.options.cluster_ips.split(',')
cluster = Cluster(ips)
l.debug("Connecting to Cassandra Cluster: [%s]" % (ips))
session = cluster.connect()
l.info("dropping [keyspace1] (default) keyspace...")
session.execute("DROP KEYSPACE keyspace1")
l.info('Succeeded to delete DB.')
except Exception as e:
l.error('Failed to reset Cassandra DB. Error: %s' % str(e))
def stop_and_delete_all_apps(self):
self.delete_all_launched_apps()
def result_parser(self):
result = {
'total ops': [], # Running total number of operations during the run.
'op/s': [], # Number of operations per second performed during the run.
'pk/s': [], # Number of partition operations per second performed during the run.
'row/s': 0, # Number of row operations per second performed during the run.
'mean': 0, # Average latency in milisecond for each operation during that run.
'med': [], # Median latency in miliseconds for each operation during that run.
'.95': [], # 95% of the time the latency was less than this number.
'.99': [], # 99% of the time the latency was less than this number.
'max': [], # Maximum latency in miliseconds.
'gc_num': 0, # Number of garbage collections.
'max_ms': [], # Longest garbage collection in miliseconds.
'sum_ms': 0, # Total of garbage collection in miliseconds.
'sdv_ms': [], # Standard deviation in miliseconds.
'mb': 0, # Size of the garbage collection in megabytes.
'op_time': [] # Total Operation Time per client
}
cassandra_results = {
'write': copy.deepcopy(result),
'read': copy.deepcopy(result)
}
# Get stats for Cassandra Stress Client
stats = self.get_app_stats(self.stress_client)
# num_clients = self.options.total_client_count
db_ops = ['write', 'read']
for client in stats.keys():
info = stats[client]
for db_op in db_ops:
if db_op in info:
try:
info[db_op] = ast.literal_eval(info[db_op])
cassandra_results[db_op]['total ops'].append(int(info[db_op]['Total partitions']))
cassandra_results[db_op]['op/s'].append(int(info[db_op]['op rate']))
cassandra_results[db_op]['pk/s'].append(int(info[db_op]['partition rate']))
cassandra_results[db_op]['.95'].append(float(info[db_op]['latency 95th percentile']))
cassandra_results[db_op]['.99'].append(float(info[db_op]['latency 99th percentile']))
cassandra_results[db_op]['gc_num'] += int(info[db_op]['total gc count'])
cassandra_results[db_op]['sdv_ms'].append(float(info[db_op]['stdev gc time(ms)']))
cassandra_results[db_op]['max'].append(float(info[db_op]['latency max']))
cassandra_results[db_op]['med'].append(float(info[db_op]['latency median']))
cassandra_results[db_op]['op_time'].append((info[db_op]['Total operation time']).replace(' ', ''))
except Exception as e:
l.error("Failed to parse stats from Client: " + pformat(client) + " DATA = " + pformat(info[db_op]))
l.error("ERROR: %s" % str(e))
return cassandra_results
def launch_stress_client(self):
max_threads_per_client = 20
l.info("Launching the Cassandra Stress Client(s). Total clients = %s" % (self.options.total_client_count))
# Determine number of threads per Cassandra Stress Client
if self.options.total_client_count > max_threads_per_client:
# Calculating the number of apps we need to scale to
client_count = math.ceil(self.options.total_client_count / float(max_threads_per_client))
# Calculating the suitable number of threads we need to run in an app
threads_per_client = int(math.ceil(self.options.total_client_count / client_count))
else:
threads_per_client = self.options.total_client_count
l.debug("Number of Threads per Cassandra-Stress Client, set to: %s" % (threads_per_client))
self.create_binary_app(name=self.stress_client, app_script='./src/stress_client.py %s %s %s %s %s %s'
% (self.options.total_ops_count,
threads_per_client,
self.options.cluster_ips,
self.options.test_duration,
self.options.cl,
self.options.profile),
cpus=0.2, mem=600, ports=[0])
if self.options.total_client_count > max_threads_per_client:
l.info("Number of Cassandra-Stress Clients to launch = %s" % (client_count))
self.scale_and_verify_app(self.stress_client, client_count)
def delete_all_launched_apps(self):
l.info("Deleting Stress Clients")
self.delete_app(self.stress_client)
def simulate_node_failure(node_ips, max_duration, tests_completed):
"""
Simulate random cassandra node failure and 'rejoin' into cluster
"""
run = True
l.info("START Cassandra Node Failure Simulation. Entering.")
while run:
# If stress-tests are still running continue with node failure simulation
if not tests_completed.isSet():
# Select 'random' node from Cassandra Cluster
| node_ip = select_random_node(node_ips)
# Determine delay before stopping cassandra node (to simulate failure / node down)
duration_secs = max_duration*60
time_next_stop = random.randint(1, duration_secs/4)
l.debug("STOP programmed in %s seconds" % time_next_stop)
# Wait
time.sleep(time_next_stop)
ssh_fail = False
# Stop Cassandra Node (simulate failure / stop the service)
stop_cmd = "sudo service cassandra stop"
l.debug("STOP Cassandra Node: %s"%node_ip)
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(str(node_ip))
l.debug("[Simulate Cassandra Node Failure] Connected to host: %s" % node_ip)
except paramiko.AuthenticationException as e:
l.error("Authentication failed when connecting to %s. ERROR: %s" % (node_ip, e))
ssh_fail = True
except: | conditional_block | |
hCassandra_test.py | (HydraBase):
def __init__(self, options, runtest=True, mock=False):
self.options = options
self.config = ConfigParser()
HydraBase.__init__(self, 'CassandraStressTest', self.options, self.config, startappserver=runtest, mock=mock,
app_dirs=['src', 'hydra'])
self.stress_client = '/stress-client'
self.add_appid(self.stress_client)
if runtest:
self.run_test()
self.stop_appserver()
def rerun_test(self, options):
self.options = options
self.reset_all_app_stats(self.stress_client)
# Signal message sending
l.info("Sending signal to Cassandra Stress client to start sending all messages..")
# Force start-time for ALL clients +60 seconds from current time
start_time = datetime.now() + timedelta(seconds=60)
l.debug("Current Time: %s, Start Time: %s" % (datetime.now(), start_time))
task_list = self.all_task_ids[self.stress_client]
ha_list = []
for task_id in task_list:
info = self.apps[self.stress_client]['ip_port_map'][task_id]
port = info[0]
ip = info[1]
ha_stress = HAnalyser(ip, port, task_id)
# Signal ALL clients to start sending data, blocks until clients respond with "DONE" after sending all data
ha_stress.start_test(start_time=start_time)
ha_list.append(ha_stress)
l.info('Waiting for test(s) to end...')
if self.options.sim_failure:
l.debug("Simulate Cassandra Node Failure. Init.")
# Thread Event to indicate tests have been completed
tests_completed = threading.Event()
# Launch parallel Thread to simulate cassandra node failure.
l.debug("Launch separate thread to simulate node failure and rejoin.")
failure_thread = threading.Thread(target=simulate_node_failure, args=(self.options.cluster_ips.split(','),
self.options.test_duration, tests_completed))
failure_thread.start()
for idx, ha_stress in enumerate(ha_list):
l.debug('Waiting for task [%s] in [%s:%s] test to END. Iteration: %s' % (ha_stress.task_id, ha_stress.server_ip, ha_stress.port, idx))
ha_stress.wait_for_testend()
if self.options.sim_failure:
l.debug("ALL tests are COMPLETED.")
tests_completed.set()
l.info('Fetch App Stats')
self.fetch_app_stats(self.stress_client)
return self.result_parser()
def run_test(self, first_run=True):
# Get Mesos/Marathon Clients
self.start_init()
# Reset (drop) Cassandra DB for cassandra-stress tool default 'keyspace'
self.reset_db()
# Create Table(s) & Triggers for stress Test
# self.create_triggers()
# Launch Cassandra Stress-Client(s)
self.launch_stress_client()
# Rerun the test
res = self.rerun_test(self.options)
# Return Test Results
return res
def create_triggers(self):
try:
cluster_ips = self.options.cluster_ips.split(',')
cluster = Cluster(cluster_ips)
l.debug("Connecting to Cassandra Cluster: [%s]" % (cluster_ips))
session = cluster.connect()
l.info("Create keyspace [keyspace1]...")
# Create Keyspace
session.execute("CREATE KEYSPACE keyspace1 WITH replication = {'class': 'SimpleStrategy', "
"'replication_factor': '1'} AND durable_writes = true;")
l.info("Create tables [standard1] & [counter1]...")
table_create = "CREATE TABLE keyspace1.standard1 ( " \
"key blob PRIMARY KEY," \
"\"C0\" blob," \
"\"C1\" blob," \
"\"C2\" blob," \
"\"C3\" blob," \
"\"C3\" blob" \
") WITH COMPACT STORAGE" \
"AND bloom_filter_fp_chance = 0.01" \
"AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}" \
"AND comment = ''" \
"AND compaction = {'class': " \
"'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'," \
" 'min_threshold': '4'}" \
"AND compression = {'enabled': 'false'}" \
"AND crc_check_chance = 1.0" \
"AND dclocal_read_repair_chance = 0.1" \
"AND default_time_to_live = 0" \
"AND gc_grace_seconds = 864000" \
"AND max_index_interval = 2048" \
"AND memtable_flush_period_in_ms = 0" \
"AND min_index_interval = 128" \
"AND read_repair_chance = 0.0" \
"AND speculative_retry = '99PERCENTILE';"
l.info("Create standard1 Table")
# Create 'standard1' & 'counter1' default Tables
session.execute(table_create)
l.info('Succeeded to create keyspace1 and standard1 Table.')
# Create Trigger
trigger_jar = 'org.apache.cassandra.triggers.AuditTrigger'
trigger_cql = "CREATE TRIGGER pushTrigger ON keyspace1.standard1 USING " + trigger_jar
session.execute(trigger_cql)
except Exception as e:
l.error('FAILED to create trigger. Error: %s' % str(e))
def reset_db(self):
try:
ips = self.options.cluster_ips.split(',')
cluster = Cluster(ips)
l.debug("Connecting to Cassandra Cluster: [%s]" % (ips))
session = cluster.connect()
l.info("dropping [keyspace1] (default) keyspace...")
session.execute("DROP KEYSPACE keyspace1")
l.info('Succeeded to delete DB.')
except Exception as e:
l.error('Failed to reset Cassandra DB. Error: %s' % str(e))
def stop_and_delete_all_apps(self):
self.delete_all_launched_apps()
def result_parser(self):
result = {
'total ops': [], # Running total number of operations during the run.
'op/s': [], # Number of operations per second performed during the run.
'pk/s': [], # Number of partition operations per second performed during the run.
'row/s': 0, # Number of row operations per second performed during the run.
'mean': 0, # Average latency in milisecond for each operation during that run.
'med': [], # Median latency in miliseconds for each operation during that run.
'.95': [], # 95% of the time the latency was less than this number.
'.99': [], # 99% of the time the latency was less than this number.
'max': [], # Maximum latency in miliseconds.
'gc_num': 0, # Number of garbage collections.
'max_ms': [], # Longest garbage collection in miliseconds.
'sum_ms': 0, # Total of garbage collection in miliseconds.
'sdv_ms': [], # Standard deviation in miliseconds.
'mb': 0, # Size of the garbage collection in megabytes.
'op_time': [] # Total Operation Time per client
}
cassandra_results = {
'write': copy.deepcopy(result),
'read': copy.deepcopy(result)
}
# Get stats for Cassandra Stress Client
stats = self.get_app_stats(self.stress_client)
# num_clients = self.options.total_client_count
db_ops = ['write', 'read']
for client in stats.keys():
info = stats[client]
for db_op in db_ops:
if db_op in info:
try:
info[db_op] = ast.literal_eval(info[db_op])
cassandra_results[db_op]['total ops'].append(int(info[db_op]['Total partitions']))
cassandra_results[db_op]['op/s'].append(int(info[db_op]['op rate']))
cassandra_results[db_op]['pk/s'].append(int(info[db_op]['partition rate']))
cassandra_results[db_op]['.95'].append(float(info[db_op]['latency 95th percentile']))
cassandra_results[db_op]['.99'].append(float(info[db_op]['latency 99th percentile']))
cassandra_results[db_op]['gc_num'] += int(info[db_op]['total gc count'])
cassandra_results[db_op]['sdv_ms'].append(float(info[db_op]['stdev gc time(ms)']))
cassandra_results[db_op]['max'].append(float(info[db_op]['latency max']))
cassandra_results[db_op]['med'].append(float(info[db_op]['latency median']))
cassandra_results[db_op]['op_time'].append((info[db_op]['Total operation time']).replace(' ', ''))
except Exception as e:
l.error("Failed to parse stats from Client: " + pformat(client) + " DATA = " + pformat(info[db_op]))
l.error("ERROR: %s" % str(e))
return cassandra_results
def launch_stress_client(self):
max_threads_per_client = | RunTestCassandra | identifier_name | |
hCassandra_test.py | l.info("Create keyspace [keyspace1]...")
# Create Keyspace
session.execute("CREATE KEYSPACE keyspace1 WITH replication = {'class': 'SimpleStrategy', "
"'replication_factor': '1'} AND durable_writes = true;")
l.info("Create tables [standard1] & [counter1]...")
table_create = "CREATE TABLE keyspace1.standard1 ( " \
"key blob PRIMARY KEY," \
"\"C0\" blob," \
"\"C1\" blob," \
"\"C2\" blob," \
"\"C3\" blob," \
"\"C3\" blob" \
") WITH COMPACT STORAGE" \
"AND bloom_filter_fp_chance = 0.01" \
"AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}" \
"AND comment = ''" \
"AND compaction = {'class': " \
"'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'," \
" 'min_threshold': '4'}" \
"AND compression = {'enabled': 'false'}" \
"AND crc_check_chance = 1.0" \
"AND dclocal_read_repair_chance = 0.1" \
"AND default_time_to_live = 0" \
"AND gc_grace_seconds = 864000" \
"AND max_index_interval = 2048" \
"AND memtable_flush_period_in_ms = 0" \
"AND min_index_interval = 128" \
"AND read_repair_chance = 0.0" \
"AND speculative_retry = '99PERCENTILE';"
l.info("Create standard1 Table")
# Create 'standard1' & 'counter1' default Tables
session.execute(table_create)
l.info('Succeeded to create keyspace1 and standard1 Table.')
# Create Trigger
trigger_jar = 'org.apache.cassandra.triggers.AuditTrigger'
trigger_cql = "CREATE TRIGGER pushTrigger ON keyspace1.standard1 USING " + trigger_jar
session.execute(trigger_cql)
except Exception as e:
l.error('FAILED to create trigger. Error: %s' % str(e))
def reset_db(self):
try:
ips = self.options.cluster_ips.split(',')
cluster = Cluster(ips)
l.debug("Connecting to Cassandra Cluster: [%s]" % (ips))
session = cluster.connect()
l.info("dropping [keyspace1] (default) keyspace...")
session.execute("DROP KEYSPACE keyspace1")
l.info('Succeeded to delete DB.')
except Exception as e:
l.error('Failed to reset Cassandra DB. Error: %s' % str(e))
def stop_and_delete_all_apps(self):
self.delete_all_launched_apps()
def result_parser(self):
result = {
'total ops': [], # Running total number of operations during the run.
'op/s': [], # Number of operations per second performed during the run.
'pk/s': [], # Number of partition operations per second performed during the run.
'row/s': 0, # Number of row operations per second performed during the run.
'mean': 0, # Average latency in milisecond for each operation during that run.
'med': [], # Median latency in miliseconds for each operation during that run.
'.95': [], # 95% of the time the latency was less than this number.
'.99': [], # 99% of the time the latency was less than this number.
'max': [], # Maximum latency in miliseconds.
'gc_num': 0, # Number of garbage collections.
'max_ms': [], # Longest garbage collection in miliseconds.
'sum_ms': 0, # Total of garbage collection in miliseconds.
'sdv_ms': [], # Standard deviation in miliseconds.
'mb': 0, # Size of the garbage collection in megabytes.
'op_time': [] # Total Operation Time per client
}
cassandra_results = {
'write': copy.deepcopy(result),
'read': copy.deepcopy(result)
}
# Get stats for Cassandra Stress Client
stats = self.get_app_stats(self.stress_client)
# num_clients = self.options.total_client_count
db_ops = ['write', 'read']
for client in stats.keys():
info = stats[client]
for db_op in db_ops:
if db_op in info:
try:
info[db_op] = ast.literal_eval(info[db_op])
cassandra_results[db_op]['total ops'].append(int(info[db_op]['Total partitions']))
cassandra_results[db_op]['op/s'].append(int(info[db_op]['op rate']))
cassandra_results[db_op]['pk/s'].append(int(info[db_op]['partition rate']))
cassandra_results[db_op]['.95'].append(float(info[db_op]['latency 95th percentile']))
cassandra_results[db_op]['.99'].append(float(info[db_op]['latency 99th percentile']))
cassandra_results[db_op]['gc_num'] += int(info[db_op]['total gc count'])
cassandra_results[db_op]['sdv_ms'].append(float(info[db_op]['stdev gc time(ms)']))
cassandra_results[db_op]['max'].append(float(info[db_op]['latency max']))
cassandra_results[db_op]['med'].append(float(info[db_op]['latency median']))
cassandra_results[db_op]['op_time'].append((info[db_op]['Total operation time']).replace(' ', ''))
except Exception as e:
l.error("Failed to parse stats from Client: " + pformat(client) + " DATA = " + pformat(info[db_op]))
l.error("ERROR: %s" % str(e))
return cassandra_results
def launch_stress_client(self):
max_threads_per_client = 20
l.info("Launching the Cassandra Stress Client(s). Total clients = %s" % (self.options.total_client_count))
# Determine number of threads per Cassandra Stress Client
if self.options.total_client_count > max_threads_per_client:
# Calculating the number of apps we need to scale to
client_count = math.ceil(self.options.total_client_count / float(max_threads_per_client))
# Calculating the suitable number of threads we need to run in an app
threads_per_client = int(math.ceil(self.options.total_client_count / client_count))
else:
threads_per_client = self.options.total_client_count
l.debug("Number of Threads per Cassandra-Stress Client, set to: %s" % (threads_per_client))
self.create_binary_app(name=self.stress_client, app_script='./src/stress_client.py %s %s %s %s %s %s'
% (self.options.total_ops_count,
threads_per_client,
self.options.cluster_ips,
self.options.test_duration,
self.options.cl,
self.options.profile),
cpus=0.2, mem=600, ports=[0])
if self.options.total_client_count > max_threads_per_client:
l.info("Number of Cassandra-Stress Clients to launch = %s" % (client_count))
self.scale_and_verify_app(self.stress_client, client_count)
def delete_all_launched_apps(self):
l.info("Deleting Stress Clients")
self.delete_app(self.stress_client)
def simulate_node_failure(node_ips, max_duration, tests_completed):
"""
Simulate random cassandra node failure and 'rejoin' into cluster
"""
run = True
l.info("START Cassandra Node Failure Simulation. Entering.")
while run:
# If stress-tests are still running continue with node failure simulation
if not tests_completed.isSet():
# Select 'random' node from Cassandra Cluster
node_ip = select_random_node(node_ips)
# Determine delay before stopping cassandra node (to simulate failure / node down)
duration_secs = max_duration*60
time_next_stop = random.randint(1, duration_secs/4)
l.debug("STOP programmed in %s seconds" % time_next_stop)
# Wait
time.sleep(time_next_stop)
ssh_fail = False
# Stop Cassandra Node (simulate failure / stop the service)
stop_cmd = "sudo service cassandra stop"
l.debug("STOP Cassandra Node: %s"%node_ip)
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(str(node_ip))
l.debug("[Simulate Cassandra Node Failure] Connected to host: %s" % node_ip)
except paramiko.AuthenticationException as e:
l.error("Authentication failed when connecting to %s. ERROR: %s" % (node_ip, e))
ssh_fail = True
except:
l.error("Could not SSH to %s, waiting for it to start" % node_ip)
ssh_fail = True
if not ssh_fail:
# Send the command to STOP cassandra node | ssh.exec_command(stop_cmd)
# Determine delay before starting cassandra node (to simulate rejoin to the cluster)
time_next_rejoin = random.randint(1, duration_secs/4)
l.debug("START programmed in %s seconds" % time_next_rejoin) | random_line_split | |
main.rs | a",))
.add("Adverb",vec!("b","bl","ad","d","dl","œa","dg",))
.add("AllPronouns", vec!("rr","rz","ryt","Rg","ry","rys","rzs","rzt","ryv","k",))
.add("AskWhen", vec!("rzt"))
.add("When", vec!("ryt"))
.add("AskHow", vec!("ryv"))
.add("AskWhere", vec!("rys"))
.add("Where", vec!("rzs"))
.add("Who", vec!("rr","rz","Rg",))
.add("AskWho", vec!("ry"))
.add("Conjunction", vec!("rzv","u","c","cc",))
.add("Preposition", vec!("r","uyy","udeng","p","udh",))
.add("Particle", vec!("uzhe","uls","ule","usuo","ulian","uzhi","ude",))
.add("AllModals", vec!("y","e","o",))
.add("PostFixModal", vec!("y"))
.add("PreFixModal", vec!("e"))
.add("Onomatopoeia", vec!("o"));
let new_library = HashMap::new();
Directionary {
library : new_library,
matcher : tag_matcher,
}
}
pub fn from_default(highest_input : Option<u32>, lowest_input : Option<u32>) -> Directionary {
// TODO
use std::fs;
let highest_frequency : u32 = match highest_input {
Some(frequency) => frequency,
None => 2147483647,
};
let lowest_frequency : u32 = match lowest_input {
Some(frequency) => frequency,
None=> 0,
};
let mut directionary = Directionary::new();
let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file");
let filterd_bytes = raw_bytes.replace(&['\n'][..], "\t");
let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect();
let mut i : u64 = 0;
let mut last_word : &str = "";
let mut last_tag : &str = "";
let mut frequency;
for tag in seperate_words {
i = i + 1;
// 第一列为word, 第二列为tag, 第三列为frequency
let count = i % 3;
match count {
0 => {
// println!("tag{:?}, result{:?}", &tag, tag.parse::<u32>());
frequency = tag.parse::<u32>().unwrap();
}, // 第三列
1 => {
last_word = tag;
continue;
}, // 第一列
_ => {
last_tag = tag;
continue;
}, // 第二列
}
if (frequency > highest_frequency) || (frequency < lowest_frequency) {
continue;
}
let mut word = Word::from_literal(last_word);
word.set_tag(last_tag);
directionary.add_a_word(&word);
}
directionary
}
pub fn find_a_word(&self, element : &str, resolver : &mut RandomResolver) -> String {
let library_vec = self.library.get(element);
match library_vec {
Some(_) => {}
None => {panic!("failed to get element type {}", element);}
}
let library_vec = library_vec.unwrap();
let word = library_vec.get(
resolver.get_pos(library_vec.len())).unwrap();
word.clone()
}
pub fn add_a_word(&mut self, new_word : &Word) {
let matcher_result = self.matcher.resolve(new_word.tag.clone());
// print!("word :{:?}, result :{:?}", &new_word, matcher_result);
match matcher_result {
Some(element_vec) => {
for element in element_vec{
let library_result = self.library.get_mut(element.as_str());
match library_result {
Some(ele_vec) => {
ele_vec.push(new_word.literal.clone());
}
None => {
self.library.insert(element.clone(), vec!(new_word.literal.clone()));
}
}
}
}
None => {}
}
}
}
#[derive(Debug)]
struct RandomResolver {
rng : oorandom::Rand64,
}
impl RandomResolver {
pub fn from_seed(seed : u128) -> Self {
let rng = oorandom::Rand64::new(seed);
RandomResolver{rng : rng}
}
fn resolve_pos(&mut self, vec : &Vec<f64>) -> usize {
let float_result = self.rng.rand_float();
let mut sum = 0.0f64;
let size = vec.len();
for i in 0..size {
sum += vec[i];
if sum > float_result {
return i;
}
}
return size;
}
pub fn get_pos(&mut self, size : usize) -> usize {
let float_result = self.rng.rand_float();
(size as f64 * float_result) as usize
}
}
#[derive(Debug, Clone)]
enum SentanceItem {
Element(String), // element type to be resolve
Word(String),
Icon(char),
}
#[derive(Debug, Clone)]
struct SentanceNode {
item : SentanceItem,
next : Option<Box<SentanceNode>>,
}
impl SentanceNode {
pub fn element(element_name : &str) -> SentanceNode {
SentanceNode {
item : SentanceItem::Element(String::from(element_name)),
next : None,
}
}
pub fn icon(icon : char) -> SentanceNode{
SentanceNode {
item : SentanceItem::Icon(icon),
next : None,
}
}
pub fn word(word : &str) -> SentanceNode{
SentanceNode {
item : SentanceItem::Word(String::from(word)),
next : None,
}
}
// insert a node, panic if already has one
pub fn next(mut self, next : SentanceNode) -> Self {
match &self.next {
Some(_) => {
panic!("node {:?} already have a next node", &self);
}
None => {
self.next = Some(Box::new(next));
}
}
self
}
// get the result from element
fn resolve(&mut self, resolver : &mut RandomResolver, dict : &Directionary) {
match &self.item {
SentanceItem::Element(element) => {
self.item = SentanceItem::Word(
dict.find_a_word(element.as_str(), resolver));
}
_ => {}
}
}
pub fn resolve_sentance(&mut self, resolver : &mut RandomResolver, dict : &Directionary) {
self.resolve(resolver, dict);
match &mut self.next {
Some(node) => {node.resolve_sentance(resolver, dict);}
None => {}
}
}
pub fn to_string(self) -> String {
let mut string = String::new();
match self.item {
SentanceItem::Word(word) => string.push_str(word.as_str()),
SentanceItem::Icon(icon) => string.push(icon),
_ => {}
}
match self.next {
Some(node) => string.push_str(node.to_string().as_str()),
None => {}
}
string
}
}
#[derive(Debug)]
struct ComedyWriter {
possibilitys : Vec<f64>,
sentances : Vec<Box<SentanceNode>>,
resolver : RandomResolver,
directionary : Directionary,
}
impl ComedyWriter {
pub fn from_seed(random_seed : u128,
highest_frequency : Option<u32>,
lowest_frequency : Option<u32>)
-> ComedyWriter {
ComedyWriter{
possibilitys : Vec::new(),
sentances : Vec::new(),
resolver : RandomResolver::from_seed(random_seed),
directionary : Directionary::from_default(
highest_frequency,
lowest_frequency),
}
}
pub fn add_node(&mut self, sentance : SentanceNode, posssibility : f64) -> &mut Self {
self.sentances.push(Box::new(sentance));
self.possibilitys.push(posssibility);
self
}
fn normalize(&mut self) {
let mut sum = 0.0f64;
for chance in &self.possibilitys {
sum += chance;
}
for chance in &mut self.possibilitys {
*chance /= sum;
}
}
pub fn write(&mut self, number : u32) -> String {
self.normalize();
let mut article = String::new();
for _ in 0..number {
let pos = self.resolver.resolve_pos(&self.possibilitys);
let mut sentance = self.sentances[pos].clone();
sentance.resolve_sentance(&mut self.resolver, &self.directionary);
let string = sentance.to_string();
article.push_str(string.as_str());
}
article
}
}
fn main() {
let mut writer = ComedyWriter::from_seed(
65536, None, None);
writer
.add_node(sentance | !(
| identifier_name | |
main.rs | continue;
}
let hash_get = hash.get_mut(tag);
match hash_get {
None => {
let vec = vec!(last_word);
hash.insert(tag, vec);
}
Some(vec) => {
if vec.len() >= 10 {continue;}
vec.push(last_word);
}
}
}
println!("{:?}", hash);
}
#[test]
fn add_directionary() {
let mut random_word = Word::from_literal("okay");
random_word.set_tag("v");
let mut random_wor2 = Word::from_literal("no");
random_wor2.set_tag("l");
let mut directionary = Directionary::new();
directionary.add_a_word(&random_word);
directionary.add_a_word(&random_wor2);
println!("{:?}", directionary);
}
#[test]
fn create_directionary() {
let directionary = Directionary::from_default(None, Some(500));
println!("generation finished!");
println!("result {:?}", directionary);
}
#[test]
fn create_sentance() {
let mut resolver = RandomResolver::from_seed(64u128);
let directionary = Directionary::from_default(None, None);
let mut sentance =
SentanceNode::word("其实")
.next(
SentanceNode::icon(',')
.next(
SentanceNode::element("Noun")
.next(
SentanceNode::word("是")
.next(
SentanceNode::element("Noun")
.next(
SentanceNode::icon(',')
.next(
SentanceNode::word("你知道吗?")
))))));
sentance.resolve_sentance(&mut resolver, &directionary);
let output = sentance.to_string();
println!("result {}", output);
}
#[test]
fn multi_sentance() {
let mut resolver = RandomResolver::from_seed(1024u128);
let directionary = Directionary::from_default(None, None);
let generic_sentance = sentance!(
[element="Who"][word="是"][element= "Adjective"][word="的"][element="Adjective"][element="IntranstiveVerb"][word="器."]
);
for _ in 1..255 {
let mut sentance = generic_sentance.clone();
sentance.resolve_sentance(&mut resolver, &directionary);
let output = sentance.to_string();
println!("result {}", output);
}
}
#[macro_export]
macro_rules! sentance {
([$function:ident=$string:expr]$([$nfunction:ident=$nstring:expr])+
) => {
SentanceNode::$function($string).next(sentance!($([$nfunction=$nstring])+))
};
([$function:ident=$string:expr]) => {
SentanceNode::$function($string)
};
}
#[derive(Debug)]
struct Word {
pub tag : String,
pub literal : String,
}
impl Word {
pub fn from_literal(init_literal : &str) -> Word {
Word{
tag : String::new(),
literal : String::from(init_literal)
}
}
pub fn set_tag(&mut self, new_tag : &str) {
self.tag = String::from(new_tag);
}
}
#[derive(Debug)]
struct TagMatcher {
pub matchers_pool : std::collections::BTreeMap<String, Vec<String>>,
}
impl TagMatcher {
pub fn new() -> TagMatcher {
use std::collections::BTreeMap;
TagMatcher{matchers_pool : BTreeMap::new()}
}
fn add(mut self, tag : &'static str, matchers : Vec<&'static str>) -> Self
{
let matcher_result = self.matchers_pool.get(tag);
match matcher_result {
Some(_) => {
}
None => {
self.matchers_pool.insert(String::from(tag), Vec::new());
}
}
let matcher_vec = self.matchers_pool.get_mut(tag).unwrap();
for matcher in matchers {
matcher_vec.push(String::from(matcher));
}
self
}
pub fn resolve(&self, tag : String) -> Option<Vec<String>>
{
let mut ret_vec : Option<Vec<String>> = None;
for (element, matchers) in &self.matchers_pool
{
for matcher in matchers
{
if *matcher == tag
{
match &mut ret_vec {
Some(vec) => {
vec.push(element.clone());
}
None => {
let vec = vec!(element.clone());
ret_vec = Some(vec);
}
}
}
}
}
ret_vec
}
}
#[derive(Debug)]
struct Directionary {
// verbs : Vec<String>,
// nouns : Vec<String>,
// advs : Vec<String>,
// adjs : Vec<String>,
library : std::collections::HashMap<String, Vec<String>>,
matcher : TagMatcher,
}
impl Directionary {
pub fn new() -> Directionary {
use std::collections::HashMap;
// TODO FINISH THIS
let tag_matcher = TagMatcher::new()
.add("Location", vec!("nis","ntcb","ntcf","s","na","ns","ntc","nts","nth","ntch","nto","nit","nt","nsf","nz","f","ntu","nsf",))
.add("Name", vec!("nr","nba","nrfg","nrf","nrj",))
.add("Time", vec!("tg","t","Mg"))
.add("GenericNoun", vec!("gb","vf","nnd","nhd","nmc","nbc","gc","nhm","ng","gg","gi","n","gp","gm","nnt",))
.add("AllNouns", vec!("vf","nis","ntcb","ntcf","gb","nhd","j","nr","nba","s","nmc","nnd","nrfg","na","ns","ntc","nbc","gc","nts","nth","x","ntch","nto","nit","nrf","nhm","ng","nrt","ntu","gg","gi","nt","nsf","nrj","nz","f","n","gp","gm","tg","nnt","t","Mg",))
.add("Numeral",vec!("m"))
.add("Quantifier", vec!("qv","q","qt",))
.add("IndependentVerb", vec!("vl",))
.add("TranstiveVerb", vec!("pba","pbei","vyou","vshi","vd","vx","vq","vi","vn",))
.add("IntranstiveVerb", vec!("vg","uguo","v","vf",))
.add("AllVerbs", vec!("vyou","uguo","vd","v","vx","vi","pba","pbei","vl","vg","vq","vn","vshi","vf",))
.add("Adjective", vec!("b","mq","bl","a","z","al","ag","an","œa",))
.add("Adverb",vec!("b","bl","ad","d","dl","œa","dg",))
.add("AllPronouns", vec!("rr","rz","ryt","Rg","ry","rys","rzs","rzt","ryv","k",))
.add("AskWhen", vec!("rzt"))
.add("When", vec!("ryt"))
.add("AskHow", vec!("ryv"))
.add("AskWhere", vec!("rys"))
.add("Where", vec!("rzs"))
.add("Who", vec!("rr","rz","Rg",))
.add("AskWho", vec!("ry"))
.add("Conjunction", vec!("rzv","u","c","cc",))
.add("Preposition", vec!("r","uyy","udeng","p","udh",))
.add("Particle", vec!("uzhe","uls","ule","usuo","ulian","uzhi","ude",))
.add("AllModals", vec!("y","e","o",))
.add("PostFixModal", vec!("y"))
.add("PreFixModal", vec!("e"))
.add("Onomatopoeia", vec!("o"));
let new_library = HashMap::new();
Directionary {
library : new_library,
matcher : tag_matcher,
}
}
pub fn from_default(highest_input : Option<u32>, lowest_input : Option<u32>) -> Directionary {
// TODO
use std::fs;
let highest_frequency : u32 = match highest_input {
Some(frequency) => frequency,
None => 2147483647,
};
let lowest_frequency : u32 = match lowest_input {
Some(frequency) => frequency,
None=> 0,
};
let mut directionary = Directionary::new();
let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file");
let filterd_bytes = raw_bytes.replace(&['\n'][..], "\t");
let seperate_words : Vec<&str> = filter | i = i + 1;
// 奇数列为word, 偶数列为tag
if i % 2 != 0 {
last_word = tag; | random_line_split | |
main.rs | _sentance = sentance!(
[element="Who"][word="是"][element= "Adjective"][word="的"][element="Adjective"][element="IntranstiveVerb"][word="器."]
);
for _ in 1..255 {
let mut sentance = generic_sentance.clone();
sentance.resolve_sentance(&mut resolver, &directionary);
let output = sentance.to_string();
println!("result {}", output);
}
}
#[macro_export]
macro_rules! sentance {
([$function:ident=$string:expr]$([$nfunction:ident=$nstring:expr])+
) => {
SentanceNode::$function($string).next(sentance!($([$nfunction=$nstring])+))
};
([$function:ident=$string:expr]) => {
SentanceNode::$function($string)
};
}
#[derive(Debug)]
struct Word {
pub tag : String,
pub literal : String,
}
impl Word {
pub fn from_literal(init_literal : &str) -> Word {
Word{
tag : String::new(),
literal : String::from(init_literal)
}
}
pub fn set_tag(&mut self, new_tag : &str) {
self.tag = String::from(new_tag);
}
}
#[derive(Debug)]
struct TagMatcher {
pub matchers_pool : std::collections::BTreeMap<String, Vec<String>>,
}
impl TagMatcher {
pub fn new() -> TagMatcher {
use std::collections::BTreeMap;
TagMatcher{matchers_pool : BTreeMap::new()}
}
fn add(mut self, tag : &'static str, matchers : Vec<&'static str>) -> Self
{
let matcher_result = self.matchers_pool.get(tag);
match matcher_result {
Some(_) => {
}
None => {
self.matchers_pool.insert(String::from(tag), Vec::new());
}
}
let matcher_vec = self.matchers_pool.get_mut(tag).unwrap();
for matcher in matchers {
matcher_vec.push(String::from(matcher));
}
self
}
pub fn resolve(&self, tag : String) -> Option<Vec<String>>
{
let mut ret_vec : Option<Vec<String>> = None;
for (element, matchers) in &self.matchers_pool
{
for matcher in matchers
{
if *matcher == tag
{
match &mut ret_vec {
Some(vec) => {
vec.push(element.clone());
}
None => {
let vec = vec!(element.clone());
ret_vec = Some(vec);
}
}
}
}
}
ret_vec
}
}
#[derive(Debug)]
struct Directionary {
// verbs : Vec<String>,
// nouns : Vec<String>,
// advs : Vec<String>,
// adjs : Vec<String>,
library : std::collections::HashMap<String, Vec<String>>,
matcher : TagMatcher,
}
impl Directionary {
pub fn new() -> Directionary {
use std::collections::HashMap;
// TODO FINISH THIS
let tag_matcher = TagMatcher::new()
.add("Location", vec!("nis","ntcb","ntcf","s","na","ns","ntc","nts","nth","ntch","nto","nit","nt","nsf","nz","f","ntu","nsf",))
.add("Name", vec!("nr","nba","nrfg","nrf","nrj",))
.add("Time", vec!("tg","t","Mg"))
.add("GenericNoun", vec!("gb","vf","nnd","nhd","nmc","nbc","gc","nhm","ng","gg","gi","n","gp","gm","nnt",))
.add("AllNouns", vec!("vf","nis","ntcb","ntcf","gb","nhd","j","nr","nba","s","nmc","nnd","nrfg","na","ns","ntc","nbc","gc","nts","nth","x","ntch","nto","nit","nrf","nhm","ng","nrt","ntu","gg","gi","nt","nsf","nrj","nz","f","n","gp","gm","tg","nnt","t","Mg",))
.add("Numeral",vec!("m"))
.add("Quantifier", vec!("qv","q","qt",))
.add("IndependentVerb", vec!("vl",))
.add("TranstiveVerb", vec!("pba","pbei","vyou","vshi","vd","vx","vq","vi","vn",))
.add("IntranstiveVerb", vec!("vg","uguo","v","vf",))
.add("AllVerbs", vec!("vyou","uguo","vd","v","vx","vi","pba","pbei","vl","vg","vq","vn","vshi","vf",))
.add("Adjective", vec!("b","mq","bl","a","z","al","ag","an","œa",))
.add("Adverb",vec!("b","bl","ad","d","dl","œa","dg",))
.add("AllPronouns", vec!("rr","rz","ryt","Rg","ry","rys","rzs","rzt","ryv","k",))
.add("AskWhen", vec!("rzt"))
.add("When", vec!("ryt"))
.add("AskHow", vec!("ryv"))
.add("AskWhere", vec!("rys"))
.add("Where", vec!("rzs"))
.add("Who", vec!("rr","rz","Rg",))
.add("AskWho", vec!("ry"))
.add("Conjunction", vec!("rzv","u","c","cc",))
.add("Preposition", vec!("r","uyy","udeng","p","udh",))
.add("Particle", vec!("uzhe","uls","ule","usuo","ulian","uzhi","ude",))
.add("AllModals", vec!("y","e","o",))
.add("PostFixModal", vec!("y"))
.add("PreFixModal", vec!("e"))
.add("Onomatopoeia", vec!("o"));
let new_library = HashMap::new();
Directionary {
library : new_library,
matcher : tag_matcher,
}
}
pub fn from_default(highest_input : Option<u32>, lowest_input : Option<u32>) -> Directionary {
// TODO
use std::fs;
let highest_frequency : u32 = match highest_input {
Some(frequency) => frequency,
None => 2147483647,
};
let lowest_frequency : u32 = match lowest_input {
Some(frequency) => frequency,
None=> 0,
};
let mut directionary = Directionary::new();
let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file");
let filterd_bytes = raw_bytes.replace(&['\n'][..], "\t");
let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect();
let mut i : u64 = 0;
let mut last_word : &str = "";
let mut last_tag : &str = "";
let mut frequency;
for tag in seperate_words {
i = i + 1;
// 第一列为word, 第二列为tag, 第三列为frequency
let count = i % 3;
match count {
0 => {
// println!("tag{:?}, result{:?}", &tag, tag.parse::<u32>());
frequency = tag.parse::<u32>().unwrap();
}, // 第三列
1 => {
last_word = tag;
continue;
}, // 第一列
_ => {
last_tag = tag;
continue;
}, // 第二列
}
if (frequency > highest_frequency) || (frequency < lowest_frequency) {
continue;
}
let mut word = Word::from_literal(last_word);
word.set_tag(last_tag);
directionary.add_a_word(&word);
}
directionary
}
pub fn find_a_word(&self, element : &str, resolver : &mut RandomResolver) -> String {
let library_vec = self.library.get(element);
match library_vec {
Some(_) => {}
None => {panic!("failed to get element type {}", element);}
}
let library_vec = library_vec.unwrap();
let word = library_vec.get(
resolver.get_pos(library_vec.len())).unwrap();
word.clone()
}
pub fn add_a_word(&mut self, new_word : &Word) {
let matcher_result = self.matcher.resolve(new_word.tag.clone());
// print!("word :{:?}, result :{:?}", | &new_word, matcher_result);
match matcher_result {
Some(element_vec) => {
for element in element_vec{
let library_result = self.library.get_mut(element.as_str());
match library_result {
Some(ele_vec) => {
ele_vec.push(new_word.literal.clone());
}
None => {
self.library.insert(element.clone(), vec!(new_word.literal.clone()));
}
}
}
}
None => {}
}
}
}
| identifier_body | |
users.py | from galaxy.web.base.controller import BaseAPIController
from galaxy.web.base.controller import CreatesApiKeysMixin
from galaxy.web.base.controller import CreatesUsersMixin
from galaxy.web.base.controller import UsesTagsMixin
log = logging.getLogger( __name__ )
class UserAPIController( BaseAPIController, UsesTagsMixin, CreatesUsersMixin, CreatesApiKeysMixin ):
def __init__(self, app):
super(UserAPIController, self).__init__(app)
self.user_manager = users.UserManager(app)
self.user_serializer = users.UserSerializer( app )
self.user_deserializer = users.UserDeserializer( app )
@expose_api
def index( self, trans, deleted='False', f_email=None, f_name=None, f_any=None, **kwd ):
"""
GET /api/users
GET /api/users/deleted
Displays a collection (list) of users.
:param deleted: (optional) If true, show deleted users
:type deleted: bool
:param f_email: (optional) An email address to filter on. (Non-admin
users can only use this if ``expose_user_email`` is ``True`` in
galaxy.ini)
:type f_email: str
:param f_name: (optional) A username to filter on. (Non-admin users
can only use this if ``expose_user_name`` is ``True`` in
galaxy.ini)
:type f_name: str
:param f_any: (optional) Filter on username OR email. (Non-admin users
can use this, the email filter and username filter will
only be active if their corresponding ``expose_user_*`` is
``True`` in galaxy.ini)
:type f_any: str
"""
rval = []
query = trans.sa_session.query( trans.app.model.User )
deleted = util.string_as_bool( deleted )
if f_email and (trans.user_is_admin() or trans.app.config.expose_user_email):
query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_email) )
if f_name and (trans.user_is_admin() or trans.app.config.expose_user_name):
query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_name) )
if f_any:
if trans.user_is_admin():
query = query.filter(or_(
trans.app.model.User.email.like("%%%s%%" % f_any),
trans.app.model.User.username.like("%%%s%%" % f_any)
))
else:
if trans.app.config.expose_user_email and trans.app.config.expose_user_name:
query = query.filter(or_(
trans.app.model.User.email.like("%%%s%%" % f_any),
trans.app.model.User.username.like("%%%s%%" % f_any)
))
elif trans.app.config.expose_user_email:
query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_any) )
elif trans.app.config.expose_user_name:
query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_any) )
if deleted:
query = query.filter( trans.app.model.User.table.c.deleted == true() )
# only admins can see deleted users
if not trans.user_is_admin():
return []
else:
query = query.filter( trans.app.model.User.table.c.deleted == false() )
# special case: user can see only their own user
# special case2: if the galaxy admin has specified that other user email/names are
# exposed, we don't want special case #1
if not trans.user_is_admin() and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email:
item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
return [item]
for user in query:
item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
# If NOT configured to expose_email, do not expose email UNLESS the user is self, or
# the user is an admin
if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin():
del item['username']
if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin():
del item['email']
# TODO: move into api_values
rval.append( item )
return rval
@expose_api_anonymous
def show( self, trans, id, deleted='False', **kwd ):
"""
GET /api/users/{encoded_user_id}
GET /api/users/deleted/{encoded_user_id}
GET /api/users/current
Displays information about a user.
"""
deleted = util.string_as_bool( deleted )
try:
# user is requesting data about themselves
if id == "current":
# ...and is anonymous - return usage and quota (if any)
if not trans.user:
item = self.anon_user_api_value( trans )
return item
# ...and is logged in - return full
else:
user = trans.user
else:
user = self.get_user( trans, id, deleted=deleted )
# check that the user is requesting themselves (and they aren't del'd) unless admin
if not trans.user_is_admin():
assert trans.user == user
assert not user.deleted
except:
raise exceptions.RequestParameterInvalidException( 'Invalid user id specified', id=id )
return self.user_serializer.serialize_to_view(user, view='detailed')
@expose_api
def create( self, trans, payload, **kwd ):
"""
POST /api/users
Creates a new Galaxy user.
"""
if not trans.app.config.allow_user_creation and not trans.user_is_admin():
raise exceptions.ConfigDoesNotAllowException( 'User creation is not allowed in this Galaxy instance' )
if trans.app.config.use_remote_user and trans.user_is_admin():
user = trans.get_or_create_remote_user( remote_user_email=payload['remote_user_email'] )
elif trans.user_is_admin():
username = payload[ 'username' ]
email = payload[ 'email' ]
password = payload[ 'password' ]
message = "\n".join( [ validate_email( trans, email ),
validate_password( trans, password, password ),
validate_publicname( trans, username ) ] ).rstrip()
if message:
raise exceptions.RequestParameterInvalidException( message )
else:
user = self.create_user( trans=trans, email=email, username=username, password=password )
else:
raise exceptions.NotImplemented()
item = user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id,
'total_disk_usage': float } )
return item
@expose_api
@web.require_admin
def api_key( self, trans, user_id, **kwd ):
"""
POST /api/users/{encoded_user_id}/api_key
Creates a new API key for specified user.
"""
user = self.get_user( trans, user_id )
key = self.create_api_key( trans, user )
return key
@expose_api
def update( self, trans, id, payload, **kwd ):
"""
update( self, trans, id, payload, **kwd )
* PUT /api/users/{id}
updates the values for the item with the given ``id``
:type id: str
:param id: the encoded id of the item to update
:type payload: dict
:param payload: a dictionary of new attribute values
:rtype: dict
:returns: an error object if an error occurred or a dictionary containing
the serialized item after any changes
"""
current_user = trans.user
user_to_update = self.user_manager.by_id( self.decode_id( id ) )
# only allow updating other users if they're admin
editing_someone_else = current_user != user_to_update
is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user )
if editing_someone_else and not is_admin:
raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id )
self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans )
return self.user_serializer.serialize_to_view( user_to_update, view='detailed' )
@expose_api
@web.require_admin
def delete( self, trans, id, **kwd ):
"""
DELETE /api/users/{id}
delete the user with the given ``id``
:param id: the encoded id of the user to delete
:type id: str
:param purge: (optional) if True, purge the user
:type purge: bool
"""
if not trans.app.config.allow_user_deletion:
raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow admins to delete users.' )
purge = util.string_as_bool(kwd.get('purge', False))
if purge:
raise exceptions.NotImplemented('P | from galaxy.security.validate_user_input import validate_email
from galaxy.security.validate_user_input import validate_password
from galaxy.security.validate_user_input import validate_publicname
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous | random_line_split | |
users.py | name
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
from galaxy.web.base.controller import BaseAPIController
from galaxy.web.base.controller import CreatesApiKeysMixin
from galaxy.web.base.controller import CreatesUsersMixin
from galaxy.web.base.controller import UsesTagsMixin
log = logging.getLogger( __name__ )
class UserAPIController( BaseAPIController, UsesTagsMixin, CreatesUsersMixin, CreatesApiKeysMixin ):
def __init__(self, app):
super(UserAPIController, self).__init__(app)
self.user_manager = users.UserManager(app)
self.user_serializer = users.UserSerializer( app )
self.user_deserializer = users.UserDeserializer( app )
@expose_api
def index( self, trans, deleted='False', f_email=None, f_name=None, f_any=None, **kwd ):
"""
GET /api/users
GET /api/users/deleted
Displays a collection (list) of users.
:param deleted: (optional) If true, show deleted users
:type deleted: bool
:param f_email: (optional) An email address to filter on. (Non-admin
users can only use this if ``expose_user_email`` is ``True`` in
galaxy.ini)
:type f_email: str
:param f_name: (optional) A username to filter on. (Non-admin users
can only use this if ``expose_user_name`` is ``True`` in
galaxy.ini)
:type f_name: str
:param f_any: (optional) Filter on username OR email. (Non-admin users
can use this, the email filter and username filter will
only be active if their corresponding ``expose_user_*`` is
``True`` in galaxy.ini)
:type f_any: str
"""
rval = []
query = trans.sa_session.query( trans.app.model.User )
deleted = util.string_as_bool( deleted )
if f_email and (trans.user_is_admin() or trans.app.config.expose_user_email):
query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_email) )
if f_name and (trans.user_is_admin() or trans.app.config.expose_user_name):
query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_name) )
if f_any:
if trans.user_is_admin():
query = query.filter(or_(
trans.app.model.User.email.like("%%%s%%" % f_any),
trans.app.model.User.username.like("%%%s%%" % f_any)
))
else:
if trans.app.config.expose_user_email and trans.app.config.expose_user_name:
query = query.filter(or_(
trans.app.model.User.email.like("%%%s%%" % f_any),
trans.app.model.User.username.like("%%%s%%" % f_any)
))
elif trans.app.config.expose_user_email:
query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_any) )
elif trans.app.config.expose_user_name:
query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_any) )
if deleted:
query = query.filter( trans.app.model.User.table.c.deleted == true() )
# only admins can see deleted users
if not trans.user_is_admin():
return []
else:
query = query.filter( trans.app.model.User.table.c.deleted == false() )
# special case: user can see only their own user
# special case2: if the galaxy admin has specified that other user email/names are
# exposed, we don't want special case #1
if not trans.user_is_admin() and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email:
item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
return [item]
for user in query:
item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
# If NOT configured to expose_email, do not expose email UNLESS the user is self, or
# the user is an admin
if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin():
del item['username']
if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin():
del item['email']
# TODO: move into api_values
rval.append( item )
return rval
@expose_api_anonymous
def show( self, trans, id, deleted='False', **kwd ):
"""
GET /api/users/{encoded_user_id}
GET /api/users/deleted/{encoded_user_id}
GET /api/users/current
Displays information about a user.
"""
deleted = util.string_as_bool( deleted )
try:
# user is requesting data about themselves
if id == "current":
# ...and is anonymous - return usage and quota (if any)
if not trans.user:
|
# ...and is logged in - return full
else:
user = trans.user
else:
user = self.get_user( trans, id, deleted=deleted )
# check that the user is requesting themselves (and they aren't del'd) unless admin
if not trans.user_is_admin():
assert trans.user == user
assert not user.deleted
except:
raise exceptions.RequestParameterInvalidException( 'Invalid user id specified', id=id )
return self.user_serializer.serialize_to_view(user, view='detailed')
@expose_api
def create( self, trans, payload, **kwd ):
"""
POST /api/users
Creates a new Galaxy user.
"""
if not trans.app.config.allow_user_creation and not trans.user_is_admin():
raise exceptions.ConfigDoesNotAllowException( 'User creation is not allowed in this Galaxy instance' )
if trans.app.config.use_remote_user and trans.user_is_admin():
user = trans.get_or_create_remote_user( remote_user_email=payload['remote_user_email'] )
elif trans.user_is_admin():
username = payload[ 'username' ]
email = payload[ 'email' ]
password = payload[ 'password' ]
message = "\n".join( [ validate_email( trans, email ),
validate_password( trans, password, password ),
validate_publicname( trans, username ) ] ).rstrip()
if message:
raise exceptions.RequestParameterInvalidException( message )
else:
user = self.create_user( trans=trans, email=email, username=username, password=password )
else:
raise exceptions.NotImplemented()
item = user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id,
'total_disk_usage': float } )
return item
@expose_api
@web.require_admin
def api_key( self, trans, user_id, **kwd ):
"""
POST /api/users/{encoded_user_id}/api_key
Creates a new API key for specified user.
"""
user = self.get_user( trans, user_id )
key = self.create_api_key( trans, user )
return key
@expose_api
def update( self, trans, id, payload, **kwd ):
"""
update( self, trans, id, payload, **kwd )
* PUT /api/users/{id}
updates the values for the item with the given ``id``
:type id: str
:param id: the encoded id of the item to update
:type payload: dict
:param payload: a dictionary of new attribute values
:rtype: dict
:returns: an error object if an error occurred or a dictionary containing
the serialized item after any changes
"""
current_user = trans.user
user_to_update = self.user_manager.by_id( self.decode_id( id ) )
# only allow updating other users if they're admin
editing_someone_else = current_user != user_to_update
is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user )
if editing_someone_else and not is_admin:
raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id )
self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans )
return self.user_serializer.serialize_to_view( user_to_update, view='detailed' )
@expose_api
@web.require_admin
def delete( self, trans, id, **kwd ):
"""
DELETE /api/users/{id}
delete the user with the given ``id``
:param id: the encoded id of the user to delete
:type id: str
:param purge: (optional) if True, purge the user
:type purge: bool
"""
if not trans.app.config.allow_user_deletion:
raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow admins to delete users.' )
purge = util.string_as_bool(kwd.get('purge', False))
if purge:
raise exceptions.NotImplemented('Purge option has not been implemented yet')
user = self.get_user(trans, id)
self.user_manager.delete(user)
return self | item = self.anon_user_api_value( trans )
return item | conditional_block |
users.py | name
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
from galaxy.web.base.controller import BaseAPIController
from galaxy.web.base.controller import CreatesApiKeysMixin
from galaxy.web.base.controller import CreatesUsersMixin
from galaxy.web.base.controller import UsesTagsMixin
log = logging.getLogger( __name__ )
class | ( BaseAPIController, UsesTagsMixin, CreatesUsersMixin, CreatesApiKeysMixin ):
def __init__(self, app):
super(UserAPIController, self).__init__(app)
self.user_manager = users.UserManager(app)
self.user_serializer = users.UserSerializer( app )
self.user_deserializer = users.UserDeserializer( app )
@expose_api
def index( self, trans, deleted='False', f_email=None, f_name=None, f_any=None, **kwd ):
"""
GET /api/users
GET /api/users/deleted
Displays a collection (list) of users.
:param deleted: (optional) If true, show deleted users
:type deleted: bool
:param f_email: (optional) An email address to filter on. (Non-admin
users can only use this if ``expose_user_email`` is ``True`` in
galaxy.ini)
:type f_email: str
:param f_name: (optional) A username to filter on. (Non-admin users
can only use this if ``expose_user_name`` is ``True`` in
galaxy.ini)
:type f_name: str
:param f_any: (optional) Filter on username OR email. (Non-admin users
can use this, the email filter and username filter will
only be active if their corresponding ``expose_user_*`` is
``True`` in galaxy.ini)
:type f_any: str
"""
rval = []
query = trans.sa_session.query( trans.app.model.User )
deleted = util.string_as_bool( deleted )
if f_email and (trans.user_is_admin() or trans.app.config.expose_user_email):
query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_email) )
if f_name and (trans.user_is_admin() or trans.app.config.expose_user_name):
query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_name) )
if f_any:
if trans.user_is_admin():
query = query.filter(or_(
trans.app.model.User.email.like("%%%s%%" % f_any),
trans.app.model.User.username.like("%%%s%%" % f_any)
))
else:
if trans.app.config.expose_user_email and trans.app.config.expose_user_name:
query = query.filter(or_(
trans.app.model.User.email.like("%%%s%%" % f_any),
trans.app.model.User.username.like("%%%s%%" % f_any)
))
elif trans.app.config.expose_user_email:
query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_any) )
elif trans.app.config.expose_user_name:
query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_any) )
if deleted:
query = query.filter( trans.app.model.User.table.c.deleted == true() )
# only admins can see deleted users
if not trans.user_is_admin():
return []
else:
query = query.filter( trans.app.model.User.table.c.deleted == false() )
# special case: user can see only their own user
# special case2: if the galaxy admin has specified that other user email/names are
# exposed, we don't want special case #1
if not trans.user_is_admin() and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email:
item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
return [item]
for user in query:
item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
# If NOT configured to expose_email, do not expose email UNLESS the user is self, or
# the user is an admin
if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin():
del item['username']
if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin():
del item['email']
# TODO: move into api_values
rval.append( item )
return rval
@expose_api_anonymous
def show( self, trans, id, deleted='False', **kwd ):
"""
GET /api/users/{encoded_user_id}
GET /api/users/deleted/{encoded_user_id}
GET /api/users/current
Displays information about a user.
"""
deleted = util.string_as_bool( deleted )
try:
# user is requesting data about themselves
if id == "current":
# ...and is anonymous - return usage and quota (if any)
if not trans.user:
item = self.anon_user_api_value( trans )
return item
# ...and is logged in - return full
else:
user = trans.user
else:
user = self.get_user( trans, id, deleted=deleted )
# check that the user is requesting themselves (and they aren't del'd) unless admin
if not trans.user_is_admin():
assert trans.user == user
assert not user.deleted
except:
raise exceptions.RequestParameterInvalidException( 'Invalid user id specified', id=id )
return self.user_serializer.serialize_to_view(user, view='detailed')
@expose_api
def create( self, trans, payload, **kwd ):
"""
POST /api/users
Creates a new Galaxy user.
"""
if not trans.app.config.allow_user_creation and not trans.user_is_admin():
raise exceptions.ConfigDoesNotAllowException( 'User creation is not allowed in this Galaxy instance' )
if trans.app.config.use_remote_user and trans.user_is_admin():
user = trans.get_or_create_remote_user( remote_user_email=payload['remote_user_email'] )
elif trans.user_is_admin():
username = payload[ 'username' ]
email = payload[ 'email' ]
password = payload[ 'password' ]
message = "\n".join( [ validate_email( trans, email ),
validate_password( trans, password, password ),
validate_publicname( trans, username ) ] ).rstrip()
if message:
raise exceptions.RequestParameterInvalidException( message )
else:
user = self.create_user( trans=trans, email=email, username=username, password=password )
else:
raise exceptions.NotImplemented()
item = user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id,
'total_disk_usage': float } )
return item
@expose_api
@web.require_admin
def api_key( self, trans, user_id, **kwd ):
"""
POST /api/users/{encoded_user_id}/api_key
Creates a new API key for specified user.
"""
user = self.get_user( trans, user_id )
key = self.create_api_key( trans, user )
return key
@expose_api
def update( self, trans, id, payload, **kwd ):
"""
update( self, trans, id, payload, **kwd )
* PUT /api/users/{id}
updates the values for the item with the given ``id``
:type id: str
:param id: the encoded id of the item to update
:type payload: dict
:param payload: a dictionary of new attribute values
:rtype: dict
:returns: an error object if an error occurred or a dictionary containing
the serialized item after any changes
"""
current_user = trans.user
user_to_update = self.user_manager.by_id( self.decode_id( id ) )
# only allow updating other users if they're admin
editing_someone_else = current_user != user_to_update
is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user )
if editing_someone_else and not is_admin:
raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id )
self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans )
return self.user_serializer.serialize_to_view( user_to_update, view='detailed' )
@expose_api
@web.require_admin
def delete( self, trans, id, **kwd ):
"""
DELETE /api/users/{id}
delete the user with the given ``id``
:param id: the encoded id of the user to delete
:type id: str
:param purge: (optional) if True, purge the user
:type purge: bool
"""
if not trans.app.config.allow_user_deletion:
raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow admins to delete users.' )
purge = util.string_as_bool(kwd.get('purge', False))
if purge:
raise exceptions.NotImplemented('Purge option has not been implemented yet')
user = self.get_user(trans, id)
self.user_manager.delete(user)
return | UserAPIController | identifier_name |
users.py | expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
from galaxy.web.base.controller import BaseAPIController
from galaxy.web.base.controller import CreatesApiKeysMixin
from galaxy.web.base.controller import CreatesUsersMixin
from galaxy.web.base.controller import UsesTagsMixin
log = logging.getLogger( __name__ )
class UserAPIController( BaseAPIController, UsesTagsMixin, CreatesUsersMixin, CreatesApiKeysMixin ):
def __init__(self, app):
super(UserAPIController, self).__init__(app)
self.user_manager = users.UserManager(app)
self.user_serializer = users.UserSerializer( app )
self.user_deserializer = users.UserDeserializer( app )
@expose_api
def index( self, trans, deleted='False', f_email=None, f_name=None, f_any=None, **kwd ):
"""
GET /api/users
GET /api/users/deleted
Displays a collection (list) of users.
:param deleted: (optional) If true, show deleted users
:type deleted: bool
:param f_email: (optional) An email address to filter on. (Non-admin
users can only use this if ``expose_user_email`` is ``True`` in
galaxy.ini)
:type f_email: str
:param f_name: (optional) A username to filter on. (Non-admin users
can only use this if ``expose_user_name`` is ``True`` in
galaxy.ini)
:type f_name: str
:param f_any: (optional) Filter on username OR email. (Non-admin users
can use this, the email filter and username filter will
only be active if their corresponding ``expose_user_*`` is
``True`` in galaxy.ini)
:type f_any: str
"""
rval = []
query = trans.sa_session.query( trans.app.model.User )
deleted = util.string_as_bool( deleted )
if f_email and (trans.user_is_admin() or trans.app.config.expose_user_email):
query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_email) )
if f_name and (trans.user_is_admin() or trans.app.config.expose_user_name):
query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_name) )
if f_any:
if trans.user_is_admin():
query = query.filter(or_(
trans.app.model.User.email.like("%%%s%%" % f_any),
trans.app.model.User.username.like("%%%s%%" % f_any)
))
else:
if trans.app.config.expose_user_email and trans.app.config.expose_user_name:
query = query.filter(or_(
trans.app.model.User.email.like("%%%s%%" % f_any),
trans.app.model.User.username.like("%%%s%%" % f_any)
))
elif trans.app.config.expose_user_email:
query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_any) )
elif trans.app.config.expose_user_name:
query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_any) )
if deleted:
query = query.filter( trans.app.model.User.table.c.deleted == true() )
# only admins can see deleted users
if not trans.user_is_admin():
return []
else:
query = query.filter( trans.app.model.User.table.c.deleted == false() )
# special case: user can see only their own user
# special case2: if the galaxy admin has specified that other user email/names are
# exposed, we don't want special case #1
if not trans.user_is_admin() and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email:
item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
return [item]
for user in query:
item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
# If NOT configured to expose_email, do not expose email UNLESS the user is self, or
# the user is an admin
if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin():
del item['username']
if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin():
del item['email']
# TODO: move into api_values
rval.append( item )
return rval
@expose_api_anonymous
def show( self, trans, id, deleted='False', **kwd ):
"""
GET /api/users/{encoded_user_id}
GET /api/users/deleted/{encoded_user_id}
GET /api/users/current
Displays information about a user.
"""
deleted = util.string_as_bool( deleted )
try:
# user is requesting data about themselves
if id == "current":
# ...and is anonymous - return usage and quota (if any)
if not trans.user:
item = self.anon_user_api_value( trans )
return item
# ...and is logged in - return full
else:
user = trans.user
else:
user = self.get_user( trans, id, deleted=deleted )
# check that the user is requesting themselves (and they aren't del'd) unless admin
if not trans.user_is_admin():
assert trans.user == user
assert not user.deleted
except:
raise exceptions.RequestParameterInvalidException( 'Invalid user id specified', id=id )
return self.user_serializer.serialize_to_view(user, view='detailed')
@expose_api
def create( self, trans, payload, **kwd ):
"""
POST /api/users
Creates a new Galaxy user.
"""
if not trans.app.config.allow_user_creation and not trans.user_is_admin():
raise exceptions.ConfigDoesNotAllowException( 'User creation is not allowed in this Galaxy instance' )
if trans.app.config.use_remote_user and trans.user_is_admin():
user = trans.get_or_create_remote_user( remote_user_email=payload['remote_user_email'] )
elif trans.user_is_admin():
username = payload[ 'username' ]
email = payload[ 'email' ]
password = payload[ 'password' ]
message = "\n".join( [ validate_email( trans, email ),
validate_password( trans, password, password ),
validate_publicname( trans, username ) ] ).rstrip()
if message:
raise exceptions.RequestParameterInvalidException( message )
else:
user = self.create_user( trans=trans, email=email, username=username, password=password )
else:
raise exceptions.NotImplemented()
item = user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id,
'total_disk_usage': float } )
return item
@expose_api
@web.require_admin
def api_key( self, trans, user_id, **kwd ):
"""
POST /api/users/{encoded_user_id}/api_key
Creates a new API key for specified user.
"""
user = self.get_user( trans, user_id )
key = self.create_api_key( trans, user )
return key
@expose_api
def update( self, trans, id, payload, **kwd ):
"""
update( self, trans, id, payload, **kwd )
* PUT /api/users/{id}
updates the values for the item with the given ``id``
:type id: str
:param id: the encoded id of the item to update
:type payload: dict
:param payload: a dictionary of new attribute values
:rtype: dict
:returns: an error object if an error occurred or a dictionary containing
the serialized item after any changes
"""
current_user = trans.user
user_to_update = self.user_manager.by_id( self.decode_id( id ) )
# only allow updating other users if they're admin
editing_someone_else = current_user != user_to_update
is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user )
if editing_someone_else and not is_admin:
raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id )
self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans )
return self.user_serializer.serialize_to_view( user_to_update, view='detailed' )
@expose_api
@web.require_admin
def delete( self, trans, id, **kwd ):
| """
DELETE /api/users/{id}
delete the user with the given ``id``
:param id: the encoded id of the user to delete
:type id: str
:param purge: (optional) if True, purge the user
:type purge: bool
"""
if not trans.app.config.allow_user_deletion:
raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow admins to delete users.' )
purge = util.string_as_bool(kwd.get('purge', False))
if purge:
raise exceptions.NotImplemented('Purge option has not been implemented yet')
user = self.get_user(trans, id)
self.user_manager.delete(user)
return self.user_serializer.serialize_to_view(user, view='detailed') | identifier_body | |
requestpool.go | indicates that the batch cannot be increased further by calling again with the same arguments.
func (rp *Pool) NextRequests(maxCount int, maxSizeBytes uint64, check bool) (batch [][]byte, full bool) {
rp.lock.Lock()
defer rp.lock.Unlock()
if check {
if (len(rp.existMap) < maxCount) && (rp.sizeBytes < maxSizeBytes) {
return nil, false
}
}
count := minInt(rp.fifo.Len(), maxCount)
var totalSize uint64
batch = make([][]byte, 0, count)
element := rp.fifo.Front()
for i := 0; i < count; i++ {
req := element.Value.(*requestItem).request
reqLen := uint64(len(req))
if totalSize+reqLen > maxSizeBytes {
rp.logger.Debugf("Returning batch of %d requests totalling %dB as it exceeds threshold of %dB",
len(batch), totalSize, maxSizeBytes)
return batch, true
}
batch = append(batch, req)
totalSize += reqLen
element = element.Next()
}
fullS := totalSize >= maxSizeBytes
fullC := len(batch) == maxCount
full = fullS || fullC
if len(batch) > 0 {
rp.logger.Debugf("Returning batch of %d requests totalling %dB",
len(batch), totalSize)
}
return batch, full
}
// Prune removes requests for which the given predicate returns error.
func (rp *Pool) Prune(predicate func([]byte) error) {
reqVec, infoVec := rp.copyRequests()
var numPruned int
for i, req := range reqVec {
err := predicate(req)
if err == nil {
continue
}
if remErr := rp.RemoveRequest(infoVec[i]); remErr != nil {
rp.logger.Debugf("Failed to prune request: %s; predicate error: %s; remove error: %s", infoVec[i], err, remErr)
} else {
rp.logger.Debugf("Pruned request: %s; predicate error: %s", infoVec[i], err)
numPruned++
}
}
rp.logger.Debugf("Pruned %d requests", numPruned)
}
func (rp *Pool) copyRequests() (requestVec [][]byte, infoVec []types.RequestInfo) {
rp.lock.Lock()
defer rp.lock.Unlock()
requestVec = make([][]byte, len(rp.existMap))
infoVec = make([]types.RequestInfo, len(rp.existMap))
var i int
for info, item := range rp.existMap {
infoVec[i] = info
requestVec[i] = item.Value.(*requestItem).request
i++
}
return
}
// RemoveRequest removes the given request from the pool.
func (rp *Pool) RemoveRequest(requestInfo types.RequestInfo) error {
rp.lock.Lock()
defer rp.lock.Unlock()
element, exist := rp.existMap[requestInfo]
if !exist {
rp.moveToDelSlice(requestInfo)
errStr := fmt.Sprintf("request %s is not in the pool at remove time", requestInfo)
rp.logger.Debugf(errStr)
return fmt.Errorf(errStr)
}
rp.deleteRequest(element, requestInfo)
rp.sizeBytes -= uint64(len(element.Value.(*requestItem).request))
return nil
}
func (rp *Pool) deleteRequest(element *list.Element, requestInfo types.RequestInfo) {
item := element.Value.(*requestItem)
item.timeout.Stop()
rp.fifo.Remove(element)
rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len()))
rp.metrics.LatencyOfRequestPool.Observe(time.Since(item.additionTimestamp).Seconds())
delete(rp.existMap, requestInfo)
rp.moveToDelSlice(requestInfo)
rp.logger.Infof("Removed request %s from request pool", requestInfo)
rp.semaphore.Release(1)
if len(rp.existMap) != rp.fifo.Len() {
rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len())
}
}
func (rp *Pool) moveToDelSlice(requestInfo types.RequestInfo) {
_, exist := rp.delMap[requestInfo]
if exist {
return
}
rp.delMap[requestInfo] = struct{}{}
rp.delSlice = append(rp.delSlice, requestInfo)
}
func (rp *Pool) eraseFromDelSlice() {
rp.lock.RLock()
l := len(rp.delSlice)
rp.lock.RUnlock()
if l <= defaultSizeOfDelElements {
return
}
rp.lock.Lock()
defer rp.lock.Unlock()
n := len(rp.delSlice) - defaultSizeOfDelElements
for _, r := range rp.delSlice[:n] {
delete(rp.delMap, r)
}
rp.delSlice = rp.delSlice[n:]
}
// Close removes all the requests, stops all the timeout timers.
func (rp *Pool) Close() {
rp.lock.Lock()
defer rp.lock.Unlock()
rp.closed = true
for requestInfo, element := range rp.existMap {
rp.deleteRequest(element, requestInfo)
}
rp.cancel()
}
// StopTimers stops all the timeout timers attached to the pending requests, and marks the pool as "stopped".
// This which prevents submission of new requests, and renewal of timeouts by timer go-routines that where running
// at the time of the call to StopTimers().
func (rp *Pool) StopTimers() {
rp.lock.Lock()
defer rp.lock.Unlock()
rp.stopped = true
for _, element := range rp.existMap {
item := element.Value.(*requestItem)
item.timeout.Stop()
}
rp.logger.Debugf("Stopped all timers: size=%d", len(rp.existMap))
}
// RestartTimers restarts all the timeout timers attached to the pending requests, as RequestForwardTimeout, and re-allows
// submission of new requests.
func (rp *Pool) RestartTimers() {
rp.lock.Lock()
defer rp.lock.Unlock()
rp.stopped = false
for reqInfo, element := range rp.existMap {
item := element.Value.(*requestItem)
item.timeout.Stop()
ri := reqInfo
to := time.AfterFunc(
rp.options.ForwardTimeout,
func() { rp.onRequestTO(item.request, ri) },
)
item.timeout = to
}
rp.logger.Debugf("Restarted all timers: size=%d", len(rp.existMap))
}
// called by the goroutine spawned by time.AfterFunc
func (rp *Pool) onRequestTO(request []byte, reqInfo types.RequestInfo) {
rp.lock.Lock()
element, contains := rp.existMap[reqInfo]
if !contains {
rp.lock.Unlock()
rp.logger.Debugf("Request %s no longer in pool", reqInfo)
return
}
if rp.closed || rp.stopped {
rp.lock.Unlock()
rp.logger.Debugf("Pool stopped, will NOT start a leader-forwarding timeout")
return
}
// start a second timeout
item := element.Value.(*requestItem)
item.timeout = time.AfterFunc(
rp.options.ComplainTimeout,
func() { rp.onLeaderFwdRequestTO(request, reqInfo) },
)
rp.logger.Debugf("Request %s; started a leader-forwarding timeout: %s", reqInfo, rp.options.ComplainTimeout)
rp.lock.Unlock()
// may take time, in case Comm channel to leader is full; hence w/o the lock.
rp.logger.Debugf("Request %s timeout expired, going to send to leader", reqInfo)
rp.metrics.CountOfLeaderForwardRequest.Add(1)
rp.timeoutHandler.OnRequestTimeout(request, reqInfo)
}
// called by the goroutine spawned by time.AfterFunc
func (rp *Pool) onLeaderFwdRequestTO(request []byte, reqInfo types.RequestInfo) {
rp.lock.Lock()
element, contains := rp.existMap[reqInfo]
if !contains {
rp.lock.Unlock()
rp.logger.Debugf("Request %s no longer in pool", reqInfo)
return
}
if rp.closed || rp.stopped {
rp.lock.Unlock()
rp.logger.Debugf("Pool stopped, will NOT start auto-remove timeout")
return
}
// start a third timeout
item := element.Value.(*requestItem)
item.timeout = time.AfterFunc(
rp.options.AutoRemoveTimeout,
func() { rp.onAutoRemoveTO(reqInfo) },
)
rp.logger.Debugf("Request %s; started auto-remove timeout: %s", reqInfo, rp.options.AutoRemoveTimeout)
rp.lock.Unlock()
// may take time, in case Comm channel is full; hence w/o the lock.
rp.logger.Debugf("Request %s leader-forwarding timeout expired, going to complain on leader", reqInfo)
rp.metrics.CountTimeoutTwoStep.Add(1)
rp.timeoutHandler.OnLeaderFwdRequestTimeout(request, reqInfo)
}
// called by the goroutine spawned by time.AfterFunc
func (rp *Pool) onAutoRemoveTO(reqInfo types.RequestInfo) | {
rp.logger.Debugf("Request %s auto-remove timeout expired, going to remove from pool", reqInfo)
if err := rp.RemoveRequest(reqInfo); err != nil {
rp.logger.Errorf("Removal of request %s failed; error: %s", reqInfo, err)
return
}
rp.metrics.CountOfDeleteRequestPool.Add(1)
rp.timeoutHandler.OnAutoRemoveTimeout(reqInfo)
} | identifier_body | |
requestpool.go | for a semaphore with a lock, as it will prevent draining the pool.
if err := rp.semaphore.Acquire(ctx, 1); err != nil {
rp.metrics.CountOfFailAddRequestToPool.With(
rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonSemaphoreAcquireFail)...,
).Add(1)
return errors.Wrapf(err, "acquiring semaphore for request: %s", reqInfo)
}
reqCopy := append(make([]byte, 0), request...)
rp.lock.Lock()
defer rp.lock.Unlock()
if _, existsEl := rp.existMap[reqInfo]; existsEl {
rp.semaphore.Release(1)
rp.logger.Debugf("request %s has been already added to the pool", reqInfo)
return ErrReqAlreadyExists
}
if _, deleteEl := rp.delMap[reqInfo]; deleteEl {
rp.semaphore.Release(1)
rp.logger.Debugf("request %s has been already processed", reqInfo)
return ErrReqAlreadyProcessed
}
to := time.AfterFunc(
rp.options.ForwardTimeout,
func() { rp.onRequestTO(reqCopy, reqInfo) },
)
if rp.stopped {
rp.logger.Debugf("pool stopped, submitting with a stopped timer, request: %s", reqInfo)
to.Stop()
}
reqItem := &requestItem{
request: reqCopy,
timeout: to,
additionTimestamp: time.Now(),
}
element := rp.fifo.PushBack(reqItem)
rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len()))
rp.metrics.CountOfRequestPoolAll.Add(1)
rp.existMap[reqInfo] = element
if len(rp.existMap) != rp.fifo.Len() {
rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len())
}
rp.logger.Debugf("Request %s submitted; started a timeout: %s", reqInfo, rp.options.ForwardTimeout)
// notify that a request was submitted
select {
case rp.submittedChan <- struct{}{}:
default:
}
rp.sizeBytes += uint64(len(element.Value.(*requestItem).request))
return nil
}
// Size returns the number of requests currently residing the pool
func (rp *Pool) Size() int {
rp.lock.Lock()
defer rp.lock.Unlock()
return len(rp.existMap)
}
// NextRequests returns the next requests to be batched.
// It returns at most maxCount requests, and at most maxSizeBytes, in a newly allocated slice.
// Return variable full indicates that the batch cannot be increased further by calling again with the same arguments.
func (rp *Pool) NextRequests(maxCount int, maxSizeBytes uint64, check bool) (batch [][]byte, full bool) {
rp.lock.Lock()
defer rp.lock.Unlock()
if check {
if (len(rp.existMap) < maxCount) && (rp.sizeBytes < maxSizeBytes) {
return nil, false
}
}
count := minInt(rp.fifo.Len(), maxCount)
var totalSize uint64
batch = make([][]byte, 0, count)
element := rp.fifo.Front()
for i := 0; i < count; i++ {
req := element.Value.(*requestItem).request
reqLen := uint64(len(req))
if totalSize+reqLen > maxSizeBytes {
rp.logger.Debugf("Returning batch of %d requests totalling %dB as it exceeds threshold of %dB",
len(batch), totalSize, maxSizeBytes)
return batch, true
}
batch = append(batch, req)
totalSize += reqLen
element = element.Next()
}
fullS := totalSize >= maxSizeBytes
fullC := len(batch) == maxCount
full = fullS || fullC
if len(batch) > 0 {
rp.logger.Debugf("Returning batch of %d requests totalling %dB",
len(batch), totalSize)
}
return batch, full
}
// Prune removes requests for which the given predicate returns error.
func (rp *Pool) Prune(predicate func([]byte) error) {
reqVec, infoVec := rp.copyRequests()
var numPruned int
for i, req := range reqVec {
err := predicate(req)
if err == nil {
continue
}
if remErr := rp.RemoveRequest(infoVec[i]); remErr != nil {
rp.logger.Debugf("Failed to prune request: %s; predicate error: %s; remove error: %s", infoVec[i], err, remErr)
} else {
rp.logger.Debugf("Pruned request: %s; predicate error: %s", infoVec[i], err)
numPruned++
}
}
rp.logger.Debugf("Pruned %d requests", numPruned)
}
func (rp *Pool) copyRequests() (requestVec [][]byte, infoVec []types.RequestInfo) {
rp.lock.Lock()
defer rp.lock.Unlock()
requestVec = make([][]byte, len(rp.existMap))
infoVec = make([]types.RequestInfo, len(rp.existMap))
var i int
for info, item := range rp.existMap {
infoVec[i] = info
requestVec[i] = item.Value.(*requestItem).request
i++
}
return
}
// RemoveRequest removes the given request from the pool.
func (rp *Pool) RemoveRequest(requestInfo types.RequestInfo) error {
rp.lock.Lock()
defer rp.lock.Unlock()
element, exist := rp.existMap[requestInfo]
if !exist {
rp.moveToDelSlice(requestInfo)
errStr := fmt.Sprintf("request %s is not in the pool at remove time", requestInfo)
rp.logger.Debugf(errStr)
return fmt.Errorf(errStr)
}
rp.deleteRequest(element, requestInfo)
rp.sizeBytes -= uint64(len(element.Value.(*requestItem).request))
return nil
}
func (rp *Pool) deleteRequest(element *list.Element, requestInfo types.RequestInfo) {
item := element.Value.(*requestItem)
item.timeout.Stop()
rp.fifo.Remove(element)
rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len()))
rp.metrics.LatencyOfRequestPool.Observe(time.Since(item.additionTimestamp).Seconds())
delete(rp.existMap, requestInfo)
rp.moveToDelSlice(requestInfo)
rp.logger.Infof("Removed request %s from request pool", requestInfo)
rp.semaphore.Release(1)
if len(rp.existMap) != rp.fifo.Len() {
rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len())
}
}
func (rp *Pool) moveToDelSlice(requestInfo types.RequestInfo) {
_, exist := rp.delMap[requestInfo]
if exist {
return
}
rp.delMap[requestInfo] = struct{}{}
rp.delSlice = append(rp.delSlice, requestInfo)
}
func (rp *Pool) eraseFromDelSlice() {
rp.lock.RLock()
l := len(rp.delSlice)
rp.lock.RUnlock()
if l <= defaultSizeOfDelElements {
return
}
rp.lock.Lock()
defer rp.lock.Unlock()
n := len(rp.delSlice) - defaultSizeOfDelElements
for _, r := range rp.delSlice[:n] {
delete(rp.delMap, r)
}
rp.delSlice = rp.delSlice[n:]
}
// Close removes all the requests, stops all the timeout timers.
func (rp *Pool) Close() {
rp.lock.Lock()
defer rp.lock.Unlock()
rp.closed = true
for requestInfo, element := range rp.existMap {
rp.deleteRequest(element, requestInfo)
}
rp.cancel()
}
// StopTimers stops all the timeout timers attached to the pending requests, and marks the pool as "stopped".
// This which prevents submission of new requests, and renewal of timeouts by timer go-routines that where running
// at the time of the call to StopTimers().
func (rp *Pool) StopTimers() {
rp.lock.Lock()
defer rp.lock.Unlock()
rp.stopped = true
for _, element := range rp.existMap {
item := element.Value.(*requestItem)
item.timeout.Stop()
}
rp.logger.Debugf("Stopped all timers: size=%d", len(rp.existMap))
}
// RestartTimers restarts all the timeout timers attached to the pending requests, as RequestForwardTimeout, and re-allows
// submission of new requests.
func (rp *Pool) RestartTimers() {
rp.lock.Lock()
defer rp.lock.Unlock()
rp.stopped = false
for reqInfo, element := range rp.existMap {
item := element.Value.(*requestItem)
item.timeout.Stop()
ri := reqInfo
to := time.AfterFunc(
rp.options.ForwardTimeout,
func() { rp.onRequestTO(item.request, ri) },
)
item.timeout = to
}
rp.logger.Debugf("Restarted all timers: size=%d", len(rp.existMap))
}
// called by the goroutine spawned by time.AfterFunc
func (rp *Pool) onRequestTO(request []byte, reqInfo types.RequestInfo) {
rp.lock.Lock()
element, contains := rp.existMap[reqInfo]
if !contains {
rp.lock.Unlock()
rp.logger.Debugf("Request %s no longer in pool", reqInfo)
return
}
| random_line_split | ||
requestpool.go | ForwardTimeout time.Duration
ComplainTimeout time.Duration
AutoRemoveTimeout time.Duration
RequestMaxBytes uint64
SubmitTimeout time.Duration
Metrics *api.MetricsRequestPool
}
// NewPool constructs new requests pool
func NewPool(log api.Logger, inspector api.RequestInspector, th RequestTimeoutHandler, options PoolOptions, submittedChan chan struct{}) *Pool {
if options.ForwardTimeout == 0 {
options.ForwardTimeout = defaultRequestTimeout
}
if options.ComplainTimeout == 0 {
options.ComplainTimeout = defaultRequestTimeout
}
if options.AutoRemoveTimeout == 0 {
options.AutoRemoveTimeout = defaultRequestTimeout
}
if options.RequestMaxBytes == 0 {
options.RequestMaxBytes = defaultMaxBytes
}
if options.SubmitTimeout == 0 {
options.SubmitTimeout = defaultRequestTimeout
}
if options.Metrics == nil {
options.Metrics = api.NewMetricsRequestPool(&disabled.Provider{})
}
ctx, cancel := context.WithCancel(context.Background())
rp := &Pool{
cancel: cancel,
timeoutHandler: th,
logger: log,
metrics: options.Metrics,
inspector: inspector,
fifo: list.New(),
semaphore: semaphore.NewWeighted(options.QueueSize),
existMap: make(map[types.RequestInfo]*list.Element),
options: options,
submittedChan: submittedChan,
delMap: make(map[types.RequestInfo]struct{}),
delSlice: make([]types.RequestInfo, 0, defaultSizeOfDelElements),
}
go func() {
tic := time.NewTicker(defaultEraseTimeout)
for {
select {
case <-tic.C:
rp.eraseFromDelSlice()
case <-ctx.Done():
tic.Stop()
return
}
}
}()
return rp
}
// ChangeTimeouts changes the timeout of the pool
func (rp *Pool) ChangeTimeouts(th RequestTimeoutHandler, options PoolOptions) {
rp.lock.Lock()
defer rp.lock.Unlock()
if !rp.stopped {
rp.logger.Errorf("Trying to change timeouts but the pool is not stopped")
return
}
if options.ForwardTimeout == 0 {
options.ForwardTimeout = defaultRequestTimeout
}
if options.ComplainTimeout == 0 {
options.ComplainTimeout = defaultRequestTimeout
}
if options.AutoRemoveTimeout == 0 {
options.AutoRemoveTimeout = defaultRequestTimeout
}
rp.options.ForwardTimeout = options.ForwardTimeout
rp.options.ComplainTimeout = options.ComplainTimeout
rp.options.AutoRemoveTimeout = options.AutoRemoveTimeout
rp.timeoutHandler = th
rp.logger.Debugf("Changed pool timeouts")
}
func (rp *Pool) isClosed() bool {
rp.lock.Lock()
defer rp.lock.Unlock()
return rp.closed
}
// Submit a request into the pool, returns an error when request is already in the pool
func (rp *Pool) Submit(request []byte) error {
reqInfo := rp.inspector.RequestID(request)
if rp.isClosed() {
return errors.Errorf("pool closed, request rejected: %s", reqInfo)
}
if uint64(len(request)) > rp.options.RequestMaxBytes {
rp.metrics.CountOfFailAddRequestToPool.With(
rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonRequestMaxBytes)...,
).Add(1)
return fmt.Errorf(
"submitted request (%d) is bigger than request max bytes (%d)",
len(request),
rp.options.RequestMaxBytes,
)
}
rp.lock.RLock()
_, alreadyExists := rp.existMap[reqInfo]
_, alreadyDelete := rp.delMap[reqInfo]
rp.lock.RUnlock()
if alreadyExists {
rp.logger.Debugf("request %s already exists in the pool", reqInfo)
return ErrReqAlreadyExists
}
if alreadyDelete {
rp.logger.Debugf("request %s already processed", reqInfo)
return ErrReqAlreadyProcessed
}
ctx, cancel := context.WithTimeout(context.Background(), rp.options.SubmitTimeout)
defer cancel()
// do not wait for a semaphore with a lock, as it will prevent draining the pool.
if err := rp.semaphore.Acquire(ctx, 1); err != nil {
rp.metrics.CountOfFailAddRequestToPool.With(
rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonSemaphoreAcquireFail)...,
).Add(1)
return errors.Wrapf(err, "acquiring semaphore for request: %s", reqInfo)
}
reqCopy := append(make([]byte, 0), request...)
rp.lock.Lock()
defer rp.lock.Unlock()
if _, existsEl := rp.existMap[reqInfo]; existsEl {
rp.semaphore.Release(1)
rp.logger.Debugf("request %s has been already added to the pool", reqInfo)
return ErrReqAlreadyExists
}
if _, deleteEl := rp.delMap[reqInfo]; deleteEl {
rp.semaphore.Release(1)
rp.logger.Debugf("request %s has been already processed", reqInfo)
return ErrReqAlreadyProcessed
}
to := time.AfterFunc(
rp.options.ForwardTimeout,
func() { rp.onRequestTO(reqCopy, reqInfo) },
)
if rp.stopped {
rp.logger.Debugf("pool stopped, submitting with a stopped timer, request: %s", reqInfo)
to.Stop()
}
reqItem := &requestItem{
request: reqCopy,
timeout: to,
additionTimestamp: time.Now(),
}
element := rp.fifo.PushBack(reqItem)
rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len()))
rp.metrics.CountOfRequestPoolAll.Add(1)
rp.existMap[reqInfo] = element
if len(rp.existMap) != rp.fifo.Len() {
rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len())
}
rp.logger.Debugf("Request %s submitted; started a timeout: %s", reqInfo, rp.options.ForwardTimeout)
// notify that a request was submitted
select {
case rp.submittedChan <- struct{}{}:
default:
}
rp.sizeBytes += uint64(len(element.Value.(*requestItem).request))
return nil
}
// Size returns the number of requests currently residing the pool
func (rp *Pool) Size() int {
rp.lock.Lock()
defer rp.lock.Unlock()
return len(rp.existMap)
}
// NextRequests returns the next requests to be batched.
// It returns at most maxCount requests, and at most maxSizeBytes, in a newly allocated slice.
// Return variable full indicates that the batch cannot be increased further by calling again with the same arguments.
func (rp *Pool) NextRequests(maxCount int, maxSizeBytes uint64, check bool) (batch [][]byte, full bool) {
rp.lock.Lock()
defer rp.lock.Unlock()
if check {
if (len(rp.existMap) < maxCount) && (rp.sizeBytes < maxSizeBytes) |
}
count := minInt(rp.fifo.Len(), maxCount)
var totalSize uint64
batch = make([][]byte, 0, count)
element := rp.fifo.Front()
for i := 0; i < count; i++ {
req := element.Value.(*requestItem).request
reqLen := uint64(len(req))
if totalSize+reqLen > maxSizeBytes {
rp.logger.Debugf("Returning batch of %d requests totalling %dB as it exceeds threshold of %dB",
len(batch), totalSize, maxSizeBytes)
return batch, true
}
batch = append(batch, req)
totalSize += reqLen
element = element.Next()
}
fullS := totalSize >= maxSizeBytes
fullC := len(batch) == maxCount
full = fullS || fullC
if len(batch) > 0 {
rp.logger.Debugf("Returning batch of %d requests totalling %dB",
len(batch), totalSize)
}
return batch, full
}
// Prune removes requests for which the given predicate returns error.
func (rp *Pool) Prune(predicate func([]byte) error) {
reqVec, infoVec := rp.copyRequests()
var numPruned int
for i, req := range reqVec {
err := predicate(req)
if err == nil {
continue
}
if remErr := rp.RemoveRequest(infoVec[i]); remErr != nil {
rp.logger.Debugf("Failed to prune request: %s; predicate error: %s; remove error: %s", infoVec[i], err, remErr)
} else {
rp.logger.Debugf("Pruned request: %s; predicate error: %s", infoVec[i], err)
numPruned++
}
}
rp.logger.Debugf("Pruned %d requests", numPruned)
}
func (rp *Pool) copyRequests() (requestVec [][]byte, infoVec []types.RequestInfo) {
rp.lock.Lock()
defer rp.lock.Unlock()
requestVec = make([][]byte, len(rp.existMap))
infoVec = make([]types.RequestInfo, len(rp.existMap))
var i int
for info, item := range rp.existMap {
infoVec[i] = info
requestVec[i] | {
return nil, false
} | conditional_block |
requestpool.go | ForwardTimeout time.Duration
ComplainTimeout time.Duration
AutoRemoveTimeout time.Duration
RequestMaxBytes uint64
SubmitTimeout time.Duration
Metrics *api.MetricsRequestPool
}
// NewPool constructs new requests pool
func NewPool(log api.Logger, inspector api.RequestInspector, th RequestTimeoutHandler, options PoolOptions, submittedChan chan struct{}) *Pool {
if options.ForwardTimeout == 0 {
options.ForwardTimeout = defaultRequestTimeout
}
if options.ComplainTimeout == 0 {
options.ComplainTimeout = defaultRequestTimeout
}
if options.AutoRemoveTimeout == 0 {
options.AutoRemoveTimeout = defaultRequestTimeout
}
if options.RequestMaxBytes == 0 {
options.RequestMaxBytes = defaultMaxBytes
}
if options.SubmitTimeout == 0 {
options.SubmitTimeout = defaultRequestTimeout
}
if options.Metrics == nil {
options.Metrics = api.NewMetricsRequestPool(&disabled.Provider{})
}
ctx, cancel := context.WithCancel(context.Background())
rp := &Pool{
cancel: cancel,
timeoutHandler: th,
logger: log,
metrics: options.Metrics,
inspector: inspector,
fifo: list.New(),
semaphore: semaphore.NewWeighted(options.QueueSize),
existMap: make(map[types.RequestInfo]*list.Element),
options: options,
submittedChan: submittedChan,
delMap: make(map[types.RequestInfo]struct{}),
delSlice: make([]types.RequestInfo, 0, defaultSizeOfDelElements),
}
go func() {
tic := time.NewTicker(defaultEraseTimeout)
for {
select {
case <-tic.C:
rp.eraseFromDelSlice()
case <-ctx.Done():
tic.Stop()
return
}
}
}()
return rp
}
// ChangeTimeouts changes the timeout of the pool
func (rp *Pool) ChangeTimeouts(th RequestTimeoutHandler, options PoolOptions) {
rp.lock.Lock()
defer rp.lock.Unlock()
if !rp.stopped {
rp.logger.Errorf("Trying to change timeouts but the pool is not stopped")
return
}
if options.ForwardTimeout == 0 {
options.ForwardTimeout = defaultRequestTimeout
}
if options.ComplainTimeout == 0 {
options.ComplainTimeout = defaultRequestTimeout
}
if options.AutoRemoveTimeout == 0 {
options.AutoRemoveTimeout = defaultRequestTimeout
}
rp.options.ForwardTimeout = options.ForwardTimeout
rp.options.ComplainTimeout = options.ComplainTimeout
rp.options.AutoRemoveTimeout = options.AutoRemoveTimeout
rp.timeoutHandler = th
rp.logger.Debugf("Changed pool timeouts")
}
func (rp *Pool) isClosed() bool {
rp.lock.Lock()
defer rp.lock.Unlock()
return rp.closed
}
// Submit a request into the pool, returns an error when request is already in the pool
func (rp *Pool) Submit(request []byte) error {
reqInfo := rp.inspector.RequestID(request)
if rp.isClosed() {
return errors.Errorf("pool closed, request rejected: %s", reqInfo)
}
if uint64(len(request)) > rp.options.RequestMaxBytes {
rp.metrics.CountOfFailAddRequestToPool.With(
rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonRequestMaxBytes)...,
).Add(1)
return fmt.Errorf(
"submitted request (%d) is bigger than request max bytes (%d)",
len(request),
rp.options.RequestMaxBytes,
)
}
rp.lock.RLock()
_, alreadyExists := rp.existMap[reqInfo]
_, alreadyDelete := rp.delMap[reqInfo]
rp.lock.RUnlock()
if alreadyExists {
rp.logger.Debugf("request %s already exists in the pool", reqInfo)
return ErrReqAlreadyExists
}
if alreadyDelete {
rp.logger.Debugf("request %s already processed", reqInfo)
return ErrReqAlreadyProcessed
}
ctx, cancel := context.WithTimeout(context.Background(), rp.options.SubmitTimeout)
defer cancel()
// do not wait for a semaphore with a lock, as it will prevent draining the pool.
if err := rp.semaphore.Acquire(ctx, 1); err != nil {
rp.metrics.CountOfFailAddRequestToPool.With(
rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonSemaphoreAcquireFail)...,
).Add(1)
return errors.Wrapf(err, "acquiring semaphore for request: %s", reqInfo)
}
reqCopy := append(make([]byte, 0), request...)
rp.lock.Lock()
defer rp.lock.Unlock()
if _, existsEl := rp.existMap[reqInfo]; existsEl {
rp.semaphore.Release(1)
rp.logger.Debugf("request %s has been already added to the pool", reqInfo)
return ErrReqAlreadyExists
}
if _, deleteEl := rp.delMap[reqInfo]; deleteEl {
rp.semaphore.Release(1)
rp.logger.Debugf("request %s has been already processed", reqInfo)
return ErrReqAlreadyProcessed
}
to := time.AfterFunc(
rp.options.ForwardTimeout,
func() { rp.onRequestTO(reqCopy, reqInfo) },
)
if rp.stopped {
rp.logger.Debugf("pool stopped, submitting with a stopped timer, request: %s", reqInfo)
to.Stop()
}
reqItem := &requestItem{
request: reqCopy,
timeout: to,
additionTimestamp: time.Now(),
}
element := rp.fifo.PushBack(reqItem)
rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len()))
rp.metrics.CountOfRequestPoolAll.Add(1)
rp.existMap[reqInfo] = element
if len(rp.existMap) != rp.fifo.Len() {
rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len())
}
rp.logger.Debugf("Request %s submitted; started a timeout: %s", reqInfo, rp.options.ForwardTimeout)
// notify that a request was submitted
select {
case rp.submittedChan <- struct{}{}:
default:
}
rp.sizeBytes += uint64(len(element.Value.(*requestItem).request))
return nil
}
// Size returns the number of requests currently residing the pool
func (rp *Pool) | () int {
rp.lock.Lock()
defer rp.lock.Unlock()
return len(rp.existMap)
}
// NextRequests returns the next requests to be batched.
// It returns at most maxCount requests, and at most maxSizeBytes, in a newly allocated slice.
// Return variable full indicates that the batch cannot be increased further by calling again with the same arguments.
func (rp *Pool) NextRequests(maxCount int, maxSizeBytes uint64, check bool) (batch [][]byte, full bool) {
rp.lock.Lock()
defer rp.lock.Unlock()
if check {
if (len(rp.existMap) < maxCount) && (rp.sizeBytes < maxSizeBytes) {
return nil, false
}
}
count := minInt(rp.fifo.Len(), maxCount)
var totalSize uint64
batch = make([][]byte, 0, count)
element := rp.fifo.Front()
for i := 0; i < count; i++ {
req := element.Value.(*requestItem).request
reqLen := uint64(len(req))
if totalSize+reqLen > maxSizeBytes {
rp.logger.Debugf("Returning batch of %d requests totalling %dB as it exceeds threshold of %dB",
len(batch), totalSize, maxSizeBytes)
return batch, true
}
batch = append(batch, req)
totalSize += reqLen
element = element.Next()
}
fullS := totalSize >= maxSizeBytes
fullC := len(batch) == maxCount
full = fullS || fullC
if len(batch) > 0 {
rp.logger.Debugf("Returning batch of %d requests totalling %dB",
len(batch), totalSize)
}
return batch, full
}
// Prune removes requests for which the given predicate returns error.
func (rp *Pool) Prune(predicate func([]byte) error) {
reqVec, infoVec := rp.copyRequests()
var numPruned int
for i, req := range reqVec {
err := predicate(req)
if err == nil {
continue
}
if remErr := rp.RemoveRequest(infoVec[i]); remErr != nil {
rp.logger.Debugf("Failed to prune request: %s; predicate error: %s; remove error: %s", infoVec[i], err, remErr)
} else {
rp.logger.Debugf("Pruned request: %s; predicate error: %s", infoVec[i], err)
numPruned++
}
}
rp.logger.Debugf("Pruned %d requests", numPruned)
}
func (rp *Pool) copyRequests() (requestVec [][]byte, infoVec []types.RequestInfo) {
rp.lock.Lock()
defer rp.lock.Unlock()
requestVec = make([][]byte, len(rp.existMap))
infoVec = make([]types.RequestInfo, len(rp.existMap))
var i int
for info, item := range rp.existMap {
infoVec[i] = info
requestVec[i] = | Size | identifier_name |
crawl_stations_data_and_update_tb.py | is set no other '
'argument is considered.'
)
parser.add_argument(
'-i', '--input-stations-file', dest='input_stations_file', type=is_valid_file, required=False, default=None,
help='File with a list of desired INMET stations (one name per row). Fetch all available stations data ' +
'if no file provided'
)
parser.add_argument(
'-s', '--start-date', dest='start_date', type=is_valid_date, required=False, default=None,
help='Date in which data fetch will start on DD-MM-YYYY format. If no date provided, mostRecentUpdate ' +
'attribute on ThingsBoard will be considered for each station.'
)
parser.add_argument(
'-e', '--end-date', dest='end_date', type=is_valid_date, required=False, default=None,
help='Date in which data fetch will stop on DD-MM-YYYY format. If no date provided, the day of today ' +
'will be considered for each station.'
)
return parser
def get_current_stations(cfg_params):
|
def get_station(cfg_params, station_name):
current_device_id = ''
# first get the device id
while True:
try:
api_response = device_controller_api_inst.get_tenant_device_using_get(station_name)
current_device_id = api_response.id.id
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
# TODO: create device when it is not found? ask Professor Dr. Goncalves
print("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e)
break
# second get the device from device id
try:
devices = device_controller_api_inst.get_devices_by_ids_using_get(current_device_id)
except ApiException as e:
print("Exception when calling DeviceControllerApi->get_devices_by_ids_using_get: %s\n" % e)
return devices[0]
def get_station_token(station_id):
# get device token
device_token = ''
while True:
try:
api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(station_id)
device_token = api_response.credentials_id
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print(
"Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e)
break
return device_token
# API version
'''
def get_station_attributes(station_token):
client_keys = 'url,mostRecentData'
station_attributes = ''
try:
api_response = device_api_controller_api_inst.get_device_attributes_using_get(station_token, shared_keys=client_keys)
print(api_response)
except ApiException as e:
print("Exception when calling DeviceApiControllerApi->get_device_attributes_using_get: %s\n" % e)
return station_attributes
'''
# requests version
def get_station_attributes(station_token):
client_keys = 'url,mostRecentData'
url = 'http://'+ cfg_params['tb_api_access']['host'] + '/api/v1/' + station_token + '/attributes?clientKeys=' + client_keys
r = requests.get(url)
return r
def set_station_attributes(station_token, attributes):
# set station attributes
while True:
try:
api_response = device_api_controller_api_inst.post_device_attributes_using_post(station_token, attributes)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceApiControllerApi->post_device_attributes_using_post: %s\n" % e)
break
def format_data(rawData):
single_str = ''
for line in rawData:
single_str += line
single_str = single_str.replace('\r\n', '')
single_str = single_str.replace(' ', '')
single_str = single_str.replace('\t', '')
data = single_str.split('<br>')
data = data[:-1]
return data
def run_crawler(start_date, end_date, url):
# define time period and create session
form = {
'dtaini': start_date.strftime("%d/%m/%Y"),
'dtafim': end_date.strftime("%d/%m/%Y"),
'aleaValue': 'NDgyOA==',
'aleaNum': '4828'
}
encondedForm = urllib.urlencode(form)
head = {
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.post(url, data=encondedForm, headers=head)
# get session cookie and get data from site
cookie = r.headers["Set-Cookie"]
head = {
'Cookie': cookie
}
fixed_url = 'http://www.inmet.gov.br/sonabra/pg_downDadosCodigo_sim.php'
r = requests.get(fixed_url, headers=head)
formatted_data = format_data(r)
return formatted_data
def load_station_data(station_token, station_data):
# load station data
reader = csv.reader(station_data)
keys = reader.next()
# iterate over data collects
for i, row_of_values in enumerate(reader, start = 0):
current_data = dict(zip(keys, row_of_values))
most_recent_data = ''
# get date from the most recent data for attribute update
if i == 0:
most_recent_data = current_data['data'].replace('/','-')
# convert current datetime to timestamp
date = current_data['data'].split('/')
time_tuple_utc = (int(date[2]), int(date[1]), int(date[0]), int(current_data['hora']), 0, 0)
ts_utc = int(calendar.timegm(time_tuple_utc)) * 1000
json_temp = {'unavailable_data': ''}
# adjust data types
for key, value in current_data.iteritems():
if key in ['hora', 'vento_vel', 'umid_max', 'umid_min', 'umid_inst']:
try:
json_temp[key] = int(current_data[key])
except ValueError:
json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',')
current_data[key] = '-'
continue
elif key in ['radiacao', 'precipitacao', 'vento_direcao', 'vento_rajada', 'temp_max', 'temp_min',
'temp_inst',
'pressao_max', 'pressao_min', 'pressao', 'pto_orvalho_max', 'pto_orvalho_min',
'pto_orvalho_inst']:
try:
json_temp[key] = float(current_data[key])
except ValueError:
json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',')
current_data[key] = '-'
continue
# clean last character from json unavailable_data key
if json_temp['unavailable_data'] != '':
json_temp['unavailable_data'] = json_temp['unavailable_data'][0:-1]
# swap wind information due to problem on inmet crawled data vento_direcao <-> vento_vel
wind_direction = ''
wind_speed = ''
if 'vento_vel' in json_temp:
wind_direction = json_temp['vento_vel']
json_temp.pop('vento_vel')
if 'vento_direcao' in json_temp:
wind_speed = json_temp['vento_direcao']
json_temp.pop('vento_direcao')
if wind_direction != '':
json_temp['vento_direcao'] = wind_direction
if wind_speed != '':
json_temp['vento_vel'] = wind_speed
# write data to thingsboard
# 1 - format json
json_data = {}
json_data['values'] = json_temp
json_data['ts'] = ts_utc
# 2 - write data
while True:
try:
api_response = device_api_controller_api_inst.post_telemetry_using_post(station_token, json_data)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceApiControllerApi->post_telemetry_using_post: %s\n" % e)
break
# update mostRecentData attribute
json_data = {}
json_data = {'mostRecentData':most_recent_data}
set_station_attributes(station_token, json_data)
pass
def walkdir(folder):
# walk through each files in a directory
| relation_search_parameters = swagger_client.RelationsSearchParameters(
root_id=cfg_params['tb_entities_access']['root_asset_id'], root_type='ASSET', direction='FROM', max_level=0)
query = swagger_client.DeviceSearchQuery(device_types=['automatic-station'], parameters=relation_search_parameters,
relation_type='Contains')
query.parameters = relation_search_parameters
while True:
try:
stations_list = device_controller_api_inst.find_by_query_using_post1(query)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceControllerApi->find_by_query_using_post1: %s\n" % e)
break
return stations_list | identifier_body |
crawl_stations_data_and_update_tb.py | is set no other '
'argument is considered.'
)
parser.add_argument(
'-i', '--input-stations-file', dest='input_stations_file', type=is_valid_file, required=False, default=None,
help='File with a list of desired INMET stations (one name per row). Fetch all available stations data ' +
'if no file provided'
)
parser.add_argument(
'-s', '--start-date', dest='start_date', type=is_valid_date, required=False, default=None,
help='Date in which data fetch will start on DD-MM-YYYY format. If no date provided, mostRecentUpdate ' +
'attribute on ThingsBoard will be considered for each station.'
)
parser.add_argument(
'-e', '--end-date', dest='end_date', type=is_valid_date, required=False, default=None,
help='Date in which data fetch will stop on DD-MM-YYYY format. If no date provided, the day of today ' +
'will be considered for each station.'
)
return parser
def get_current_stations(cfg_params):
relation_search_parameters = swagger_client.RelationsSearchParameters(
root_id=cfg_params['tb_entities_access']['root_asset_id'], root_type='ASSET', direction='FROM', max_level=0)
query = swagger_client.DeviceSearchQuery(device_types=['automatic-station'], parameters=relation_search_parameters,
relation_type='Contains')
query.parameters = relation_search_parameters
while True:
try:
stations_list = device_controller_api_inst.find_by_query_using_post1(query)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceControllerApi->find_by_query_using_post1: %s\n" % e)
break
return stations_list
def get_station(cfg_params, station_name):
current_device_id = ''
# first get the device id
while True:
try:
api_response = device_controller_api_inst.get_tenant_device_using_get(station_name)
current_device_id = api_response.id.id
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
# TODO: create device when it is not found? ask Professor Dr. Goncalves
print("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e)
break
# second get the device from device id
try:
devices = device_controller_api_inst.get_devices_by_ids_using_get(current_device_id)
except ApiException as e:
print("Exception when calling DeviceControllerApi->get_devices_by_ids_using_get: %s\n" % e)
return devices[0]
def get_station_token(station_id):
# get device token
device_token = ''
while True:
try:
api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(station_id)
device_token = api_response.credentials_id
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print(
"Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e)
break
return device_token
# API version
'''
def get_station_attributes(station_token):
client_keys = 'url,mostRecentData'
station_attributes = ''
try:
api_response = device_api_controller_api_inst.get_device_attributes_using_get(station_token, shared_keys=client_keys)
print(api_response)
except ApiException as e:
print("Exception when calling DeviceApiControllerApi->get_device_attributes_using_get: %s\n" % e)
return station_attributes
'''
# requests version
def get_station_attributes(station_token):
client_keys = 'url,mostRecentData'
url = 'http://'+ cfg_params['tb_api_access']['host'] + '/api/v1/' + station_token + '/attributes?clientKeys=' + client_keys
r = requests.get(url)
return r
def set_station_attributes(station_token, attributes):
# set station attributes
while True:
try:
api_response = device_api_controller_api_inst.post_device_attributes_using_post(station_token, attributes)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceApiControllerApi->post_device_attributes_using_post: %s\n" % e)
break
def format_data(rawData):
single_str = ''
for line in rawData:
single_str += line
single_str = single_str.replace('\r\n', '')
single_str = single_str.replace(' ', '')
single_str = single_str.replace('\t', '')
data = single_str.split('<br>')
data = data[:-1]
return data
def run_crawler(start_date, end_date, url):
# define time period and create session
form = {
'dtaini': start_date.strftime("%d/%m/%Y"),
'dtafim': end_date.strftime("%d/%m/%Y"),
'aleaValue': 'NDgyOA==',
'aleaNum': '4828'
}
encondedForm = urllib.urlencode(form)
head = {
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.post(url, data=encondedForm, headers=head)
# get session cookie and get data from site
cookie = r.headers["Set-Cookie"]
head = {
'Cookie': cookie
}
fixed_url = 'http://www.inmet.gov.br/sonabra/pg_downDadosCodigo_sim.php'
r = requests.get(fixed_url, headers=head)
formatted_data = format_data(r)
return formatted_data
def load_station_data(station_token, station_data):
# load station data
reader = csv.reader(station_data)
keys = reader.next()
# iterate over data collects
for i, row_of_values in enumerate(reader, start = 0):
current_data = dict(zip(keys, row_of_values))
most_recent_data = ''
# get date from the most recent data for attribute update | ts_utc = int(calendar.timegm(time_tuple_utc)) * 1000
json_temp = {'unavailable_data': ''}
# adjust data types
for key, value in current_data.iteritems():
if key in ['hora', 'vento_vel', 'umid_max', 'umid_min', 'umid_inst']:
try:
json_temp[key] = int(current_data[key])
except ValueError:
json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',')
current_data[key] = '-'
continue
elif key in ['radiacao', 'precipitacao', 'vento_direcao', 'vento_rajada', 'temp_max', 'temp_min',
'temp_inst',
'pressao_max', 'pressao_min', 'pressao', 'pto_orvalho_max', 'pto_orvalho_min',
'pto_orvalho_inst']:
try:
json_temp[key] = float(current_data[key])
except ValueError:
json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',')
current_data[key] = '-'
continue
# clean last character from json unavailable_data key
if json_temp['unavailable_data'] != '':
json_temp['unavailable_data'] = json_temp['unavailable_data'][0:-1]
# swap wind information due to problem on inmet crawled data vento_direcao <-> vento_vel
wind_direction = ''
wind_speed = ''
if 'vento_vel' in json_temp:
wind_direction = json_temp['vento_vel']
json_temp.pop('vento_vel')
if 'vento_direcao' in json_temp:
wind_speed = json_temp['vento_direcao']
json_temp.pop('vento_direcao')
if wind_direction != '':
json_temp['vento_direcao'] = wind_direction
if wind_speed != '':
json_temp['vento_vel'] = wind_speed
# write data to thingsboard
# 1 - format json
json_data = {}
json_data['values'] = json_temp
json_data['ts'] = ts_utc
# 2 - write data
while True:
try:
api_response = device_api_controller_api_inst.post_telemetry_using_post(station_token, json_data)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceApiControllerApi->post_telemetry_using_post: %s\n" % e)
break
# update mostRecentData attribute
json_data = {}
json_data = {'mostRecentData':most_recent_data}
set_station_attributes(station_token, json_data)
pass
def walkdir(folder):
# walk through each files in a directory
for | if i == 0:
most_recent_data = current_data['data'].replace('/','-')
# convert current datetime to timestamp
date = current_data['data'].split('/')
time_tuple_utc = (int(date[2]), int(date[1]), int(date[0]), int(current_data['hora']), 0, 0) | random_line_split |
crawl_stations_data_and_update_tb.py | r
def set_station_attributes(station_token, attributes):
# set station attributes
while True:
try:
api_response = device_api_controller_api_inst.post_device_attributes_using_post(station_token, attributes)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceApiControllerApi->post_device_attributes_using_post: %s\n" % e)
break
def format_data(rawData):
single_str = ''
for line in rawData:
single_str += line
single_str = single_str.replace('\r\n', '')
single_str = single_str.replace(' ', '')
single_str = single_str.replace('\t', '')
data = single_str.split('<br>')
data = data[:-1]
return data
def run_crawler(start_date, end_date, url):
# define time period and create session
form = {
'dtaini': start_date.strftime("%d/%m/%Y"),
'dtafim': end_date.strftime("%d/%m/%Y"),
'aleaValue': 'NDgyOA==',
'aleaNum': '4828'
}
encondedForm = urllib.urlencode(form)
head = {
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.post(url, data=encondedForm, headers=head)
# get session cookie and get data from site
cookie = r.headers["Set-Cookie"]
head = {
'Cookie': cookie
}
fixed_url = 'http://www.inmet.gov.br/sonabra/pg_downDadosCodigo_sim.php'
r = requests.get(fixed_url, headers=head)
formatted_data = format_data(r)
return formatted_data
def load_station_data(station_token, station_data):
# load station data
reader = csv.reader(station_data)
keys = reader.next()
# iterate over data collects
for i, row_of_values in enumerate(reader, start = 0):
current_data = dict(zip(keys, row_of_values))
most_recent_data = ''
# get date from the most recent data for attribute update
if i == 0:
most_recent_data = current_data['data'].replace('/','-')
# convert current datetime to timestamp
date = current_data['data'].split('/')
time_tuple_utc = (int(date[2]), int(date[1]), int(date[0]), int(current_data['hora']), 0, 0)
ts_utc = int(calendar.timegm(time_tuple_utc)) * 1000
json_temp = {'unavailable_data': ''}
# adjust data types
for key, value in current_data.iteritems():
if key in ['hora', 'vento_vel', 'umid_max', 'umid_min', 'umid_inst']:
try:
json_temp[key] = int(current_data[key])
except ValueError:
json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',')
current_data[key] = '-'
continue
elif key in ['radiacao', 'precipitacao', 'vento_direcao', 'vento_rajada', 'temp_max', 'temp_min',
'temp_inst',
'pressao_max', 'pressao_min', 'pressao', 'pto_orvalho_max', 'pto_orvalho_min',
'pto_orvalho_inst']:
try:
json_temp[key] = float(current_data[key])
except ValueError:
json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',')
current_data[key] = '-'
continue
# clean last character from json unavailable_data key
if json_temp['unavailable_data'] != '':
json_temp['unavailable_data'] = json_temp['unavailable_data'][0:-1]
# swap wind information due to problem on inmet crawled data vento_direcao <-> vento_vel
wind_direction = ''
wind_speed = ''
if 'vento_vel' in json_temp:
wind_direction = json_temp['vento_vel']
json_temp.pop('vento_vel')
if 'vento_direcao' in json_temp:
wind_speed = json_temp['vento_direcao']
json_temp.pop('vento_direcao')
if wind_direction != '':
json_temp['vento_direcao'] = wind_direction
if wind_speed != '':
json_temp['vento_vel'] = wind_speed
# write data to thingsboard
# 1 - format json
json_data = {}
json_data['values'] = json_temp
json_data['ts'] = ts_utc
# 2 - write data
while True:
try:
api_response = device_api_controller_api_inst.post_telemetry_using_post(station_token, json_data)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceApiControllerApi->post_telemetry_using_post: %s\n" % e)
break
# update mostRecentData attribute
json_data = {}
json_data = {'mostRecentData':most_recent_data}
set_station_attributes(station_token, json_data)
pass
def walkdir(folder):
# walk through each files in a directory
for dirpath, dirs, files in os.walk(folder):
for filename in files:
if filename.endswith(".html"):
yield os.path.abspath(os.path.join(dirpath, filename))
def send_data_from_file(file_path):
file = open(file_path, 'r')
formatted_data = format_data(file)
# get station code
station_code = file_path.split('.')[0].split('-')[-1]
# 1 - get device id from station code
current_device_id = ""
while True:
try:
# get device id
api_response = device_controller_api_inst.get_tenant_device_using_get(station_code)
current_device_id = api_response.id.id
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
tqdm.write("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e)
break
# 2 - get device token from device id
current_device_token = ""
while True:
try:
# getDeviceCredentialsByDeviceId
api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(current_device_id)
current_device_token = api_response.credentials_id
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
tqdm.write(
"Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e)
break
load_station_data(current_device_token, formatted_data)
# function that iterates over all folders
def iterate_over_all_files(root_path):
# compute the total number of files
file_counter = 0
for file_path in walkdir(root_path):
file_counter += 1
# iterates over all files
with tqdm(total=file_counter, unit='files') as pbar:
for file_path in walkdir(root_path):
send_data_from_file(file_path)
pbar.set_postfix(file=file_path, refresh=False)
pbar.update()
def main():
'''
run_crawler(datetime.today(), datetime.today(),
'http://www.inmet.gov.br/sonabra/pg_dspDadosCodigo_sim.php?QTMwMQ==')
'''
parser = create_parser()
args = parser.parse_args()
stations = []
# verify if there is a path with input files
if args.input_data_path:
file_counter = 0
for filename in walkdir(args.input_data_path):
file_counter += 1
# iterates over all files
with tqdm(total=file_counter, unit='files') as pbar:
for filename in walkdir(args.input_data_path):
send_data_from_file(filename)
pbar.set_postfix(file=filename, refresh=False)
pbar.update()
else:
# verify if there is a file with a list of stations
if args.input_stations_file:
# if so, read file to a list
file_content = args.input_stations_file.readlines()
file_content = [x.strip() for x in file_content]
# query defined stations
for station_name in file_content:
stations.append(get_station(cfg_params, station_name))
else:
# query all stations
stations = get_current_stations(cfg_params)
# set progress bar
# with tqdm(total=len(stations), unit='stations') as pbar:
# iterates over all stations
for station in stations:
# get station access token
station_token = get_station_token(station.id.id)
# get station attributes
station_attributes = get_station_attributes(station_token)
# verify if there is a start date
if not args.start_date:
# verify device mostRecentData to define start_date
# if mostRecentData is empty define start_date to 365 days before today
if station_attributes['mostRecentData'] == '':
start_date = datetime.today() - timedelta(days=365)
else:
| start_date = station_attributes['mostRecentData'] | conditional_block | |
crawl_stations_data_and_update_tb.py | set no other '
'argument is considered.'
)
parser.add_argument(
'-i', '--input-stations-file', dest='input_stations_file', type=is_valid_file, required=False, default=None,
help='File with a list of desired INMET stations (one name per row). Fetch all available stations data ' +
'if no file provided'
)
parser.add_argument(
'-s', '--start-date', dest='start_date', type=is_valid_date, required=False, default=None,
help='Date in which data fetch will start on DD-MM-YYYY format. If no date provided, mostRecentUpdate ' +
'attribute on ThingsBoard will be considered for each station.'
)
parser.add_argument(
'-e', '--end-date', dest='end_date', type=is_valid_date, required=False, default=None,
help='Date in which data fetch will stop on DD-MM-YYYY format. If no date provided, the day of today ' +
'will be considered for each station.'
)
return parser
def get_current_stations(cfg_params):
relation_search_parameters = swagger_client.RelationsSearchParameters(
root_id=cfg_params['tb_entities_access']['root_asset_id'], root_type='ASSET', direction='FROM', max_level=0)
query = swagger_client.DeviceSearchQuery(device_types=['automatic-station'], parameters=relation_search_parameters,
relation_type='Contains')
query.parameters = relation_search_parameters
while True:
try:
stations_list = device_controller_api_inst.find_by_query_using_post1(query)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceControllerApi->find_by_query_using_post1: %s\n" % e)
break
return stations_list
def get_station(cfg_params, station_name):
current_device_id = ''
# first get the device id
while True:
try:
api_response = device_controller_api_inst.get_tenant_device_using_get(station_name)
current_device_id = api_response.id.id
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
# TODO: create device when it is not found? ask Professor Dr. Goncalves
print("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e)
break
# second get the device from device id
try:
devices = device_controller_api_inst.get_devices_by_ids_using_get(current_device_id)
except ApiException as e:
print("Exception when calling DeviceControllerApi->get_devices_by_ids_using_get: %s\n" % e)
return devices[0]
def get_station_token(station_id):
# get device token
device_token = ''
while True:
try:
api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(station_id)
device_token = api_response.credentials_id
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print(
"Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e)
break
return device_token
# API version
'''
def get_station_attributes(station_token):
client_keys = 'url,mostRecentData'
station_attributes = ''
try:
api_response = device_api_controller_api_inst.get_device_attributes_using_get(station_token, shared_keys=client_keys)
print(api_response)
except ApiException as e:
print("Exception when calling DeviceApiControllerApi->get_device_attributes_using_get: %s\n" % e)
return station_attributes
'''
# requests version
def get_station_attributes(station_token):
client_keys = 'url,mostRecentData'
url = 'http://'+ cfg_params['tb_api_access']['host'] + '/api/v1/' + station_token + '/attributes?clientKeys=' + client_keys
r = requests.get(url)
return r
def set_station_attributes(station_token, attributes):
# set station attributes
while True:
try:
api_response = device_api_controller_api_inst.post_device_attributes_using_post(station_token, attributes)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceApiControllerApi->post_device_attributes_using_post: %s\n" % e)
break
def format_data(rawData):
single_str = ''
for line in rawData:
single_str += line
single_str = single_str.replace('\r\n', '')
single_str = single_str.replace(' ', '')
single_str = single_str.replace('\t', '')
data = single_str.split('<br>')
data = data[:-1]
return data
def run_crawler(start_date, end_date, url):
# define time period and create session
form = {
'dtaini': start_date.strftime("%d/%m/%Y"),
'dtafim': end_date.strftime("%d/%m/%Y"),
'aleaValue': 'NDgyOA==',
'aleaNum': '4828'
}
encondedForm = urllib.urlencode(form)
head = {
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.post(url, data=encondedForm, headers=head)
# get session cookie and get data from site
cookie = r.headers["Set-Cookie"]
head = {
'Cookie': cookie
}
fixed_url = 'http://www.inmet.gov.br/sonabra/pg_downDadosCodigo_sim.php'
r = requests.get(fixed_url, headers=head)
formatted_data = format_data(r)
return formatted_data
def | (station_token, station_data):
# load station data
reader = csv.reader(station_data)
keys = reader.next()
# iterate over data collects
for i, row_of_values in enumerate(reader, start = 0):
current_data = dict(zip(keys, row_of_values))
most_recent_data = ''
# get date from the most recent data for attribute update
if i == 0:
most_recent_data = current_data['data'].replace('/','-')
# convert current datetime to timestamp
date = current_data['data'].split('/')
time_tuple_utc = (int(date[2]), int(date[1]), int(date[0]), int(current_data['hora']), 0, 0)
ts_utc = int(calendar.timegm(time_tuple_utc)) * 1000
json_temp = {'unavailable_data': ''}
# adjust data types
for key, value in current_data.iteritems():
if key in ['hora', 'vento_vel', 'umid_max', 'umid_min', 'umid_inst']:
try:
json_temp[key] = int(current_data[key])
except ValueError:
json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',')
current_data[key] = '-'
continue
elif key in ['radiacao', 'precipitacao', 'vento_direcao', 'vento_rajada', 'temp_max', 'temp_min',
'temp_inst',
'pressao_max', 'pressao_min', 'pressao', 'pto_orvalho_max', 'pto_orvalho_min',
'pto_orvalho_inst']:
try:
json_temp[key] = float(current_data[key])
except ValueError:
json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',')
current_data[key] = '-'
continue
# clean last character from json unavailable_data key
if json_temp['unavailable_data'] != '':
json_temp['unavailable_data'] = json_temp['unavailable_data'][0:-1]
# swap wind information due to problem on inmet crawled data vento_direcao <-> vento_vel
wind_direction = ''
wind_speed = ''
if 'vento_vel' in json_temp:
wind_direction = json_temp['vento_vel']
json_temp.pop('vento_vel')
if 'vento_direcao' in json_temp:
wind_speed = json_temp['vento_direcao']
json_temp.pop('vento_direcao')
if wind_direction != '':
json_temp['vento_direcao'] = wind_direction
if wind_speed != '':
json_temp['vento_vel'] = wind_speed
# write data to thingsboard
# 1 - format json
json_data = {}
json_data['values'] = json_temp
json_data['ts'] = ts_utc
# 2 - write data
while True:
try:
api_response = device_api_controller_api_inst.post_telemetry_using_post(station_token, json_data)
except ApiException as e:
if (json.loads(e.body)['message'] == 'Token has expired'):
renew_token(configuration)
continue
else:
print("Exception when calling DeviceApiControllerApi->post_telemetry_using_post: %s\n" % e)
break
# update mostRecentData attribute
json_data = {}
json_data = {'mostRecentData':most_recent_data}
set_station_attributes(station_token, json_data)
pass
def walkdir(folder):
# walk through each files in a directory
| load_station_data | identifier_name |
virtio_constants.rs | OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* VirtIO Header, located in BAR 0.
*/
pub const VIRTIO_PCI_HOST_FEATURES: u64 = 0; /* host's supported features (32bit, RO)*/
pub const VIRTIO_PCI_GUEST_FEATURES: u64 = 4; /* guest's supported features (32, RW) */
pub const VIRTIO_PCI_QUEUE_PFN: u64 = 8; /* physical address of VQ (32, RW) */
pub const VIRTIO_PCI_QUEUE_NUM: u64 = 12; /* number of ring entries (16, RO) */
pub const VIRTIO_PCI_QUEUE_SEL: u64 = 14; /* current VQ selection (16, RW) */
pub const VIRTIO_PCI_QUEUE_NOTIFY: u64 = 16; /* notify host regarding VQ (16, RW) */
pub const VIRTIO_PCI_STATUS: u64 = 18; /* device status register (8, RW) */
pub const VIRTIO_PCI_ISR: u64 = 19; /* interrupt status register, reading also clears the register (8, RO) */
/* Only if MSIX is enabled: */
pub const VIRTIO_MSI_CONFIG_VECTOR: u64 = 20; /* configuration change vector (16, RW) */
pub const VIRTIO_MSI_QUEUE_VECTOR: u64 = 22; /* vector for selected VQ notifications (16, RW) */
/* Status byte for guest to report progress. */
pub const VIRTIO_CONFIG_STATUS_RESET: u8 = 0x00;
pub const VIRTIO_CONFIG_STATUS_ACK: u8 = 0x01;
pub const VIRTIO_CONFIG_STATUS_DRIVER: u8 = 0x02;
pub const VIRTIO_CONFIG_STATUS_DRIVER_OK: u8 = 0x04;
pub const VIRTIO_CONFIG_STATUS_FEATURES_OK: u8 = 0x08;
pub const VIRTIO_CONFIG_STATUS_FAILED: u8 = 0x80;
/*
* How many bits to shift physical queue address written to QUEUE_PFN.
* 12 is historical, and due to x86 page size.
*/
pub const VIRTIO_PCI_QUEUE_ADDR_SHIFT: usize = 12;
/* This marks a buffer as continuing via the next field. */
pub const VIRTQ_DESC_F_NEXT: u16 = 1;
/* This marks a buffer as write-only (otherwise read-only). */
pub const VIRTQ_DESC_F_WRITE: u16 = 2;
/* This means the buffer contains a list of buffer descriptors. */
pub const VIRTQ_DESC_F_INDIRECT: u16 = 4;
/* The feature bitmap for virtio net */
pub const VIRTIO_NET_F_CSUM: usize = 0; /* Host handles pkts w/ partial csum */
pub const VIRTIO_NET_F_GUEST_CSUM: usize = 1; /* Guest handles pkts w/ partial csum */
pub const VIRTIO_NET_F_MTU: usize = 3; /* Initial MTU advice. */
pub const VIRTIO_NET_F_MAC: usize = 5; /* Host has given MAC address. */
pub const VIRTIO_NET_F_GUEST_TSO4: usize = 7; /* Guest can handle TSOv4 in. */
pub const VIRTIO_NET_F_GUEST_TSO6: usize = 8; /* Guest can handle TSOv6 in. */
pub const VIRTIO_NET_F_GUEST_ECN: usize = 9; /* Guest can handle TSO[6] w/ ECN in. */
pub const VIRTIO_NET_F_GUEST_UFO: usize = 10; /* Guest can handle UFO in. */
pub const VIRTIO_NET_F_HOST_TSO4: usize = 11; /* Host can handle TSOv4 in. */
pub const VIRTIO_NET_F_HOST_TSO6: usize = 12; /* Host can handle TSOv6 in. */
pub const VIRTIO_NET_F_HOST_ECN: usize = 13; /* Host can handle TSO[6] w/ ECN in. */
pub const VIRTIO_NET_F_HOST_UFO: usize = 14; /* Host can handle UFO in. */
pub const VIRTIO_NET_F_MRG_RXBUF: usize = 15; /* Host can merge receive buffers. */ | pub const VIRTIO_NET_F_STATUS: usize = 16; /* virtio_net_config.status available */
pub const VIRTIO_NET_F_CTRL_VQ: usize = 17; /* Control channel available */
pub const VIRTIO_NET_F_CTRL_RX: usize = 18; /* Control channel RX mode support */
pub const VIRTIO_NET_F_CTRL_VLAN: usize = 19; /* Control channel VLAN filtering */
pub const VIRTIO_NET_F_CTRL_RX_EXTRA: usize = 20; /* Extra RX mode control support */
pub const VIRTIO_NET_F_GUEST_ANNOUNCE: usize = 21; /* Guest can announce device on the network */
pub const VIRTIO_NET_F_MQ: usize = 22; /* Device supports Receive Flow Steering */
pub const VIRTIO_NET_F_CTRL_MAC_ADDR: usize = 23; /* Set MAC address */
/* Do we get callbacks when the ring is completely used, even if we've suppressed them? */
pub const VIRTIO_F_NOTIFY_ON_EMPTY: usize = 24;
/* Can the device handle any descriptor layout? */
pub const VIRTIO_F_ANY_LAYOUT: usize = 27;
/* We support indirect buffer descriptors */
pub const VIRTIO_RING_F_INDIRECT_DESC: usize = 28;
pub const VIRTIO_F_VERSION_1: usize = 32;
pub const VIRTIO_F_IOMMU_PLATFORM: usize = 33;
/**
* Control the RX mode, ie. promiscuous, allmulti, etc...
* All commands require an "out" sg entry containing a 1 byte
* state value, zero = disable, non-zero = enable. Commands
* 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
* Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
*/
pub const VIRTIO_NET_CTRL_RX: u8 = 0;
pub const VIRTIO_NET_CTRL_RX_PROMISC: u8 = 0;
pub const VIRTIO_NET_CTRL_RX_ALLMULTI: u8 = 1;
pub const VIRTIO_NET_CTRL_RX_ALLUNI: u8 = 2;
pub const VIRTIO_NET_CTRL_RX_NOMULTI: u8 = 3;
pub const VIRTIO_NET_CTRL_RX_NOUNI: u8 = 4;
pub const VIRTIO_NET_CTRL_RX_NOBCAST: u8 = 5;
pub const VIRTIO_NET_OK: u8 = 0;
pub const VIRTIO_NET_ERR: u8 = 1;
pub const VIRTIO_MAX_CTRL_DATA: usize = 2048;
/**
* This is the first element of the scatter-gather list. If you don't
* specify GSO or CSUM features, you can simply ignore the header.
*/
#[repr(C)]
pub struct virtio_net_hdr {
pub flags: u8,
pub gso_type: u8,
pub hdr_len: u16, // Ethernet + IP + tcp/udp hdrs
pub gso_size: u16, // Bytes to append to hdr_len per frame
pub csum_start: u16, // Position to start checksumming from
pub csum_offset: u16, // Offset after that to place checksum
}
pub const VIRTIO_NET_HDR_F_NEEDS_CSUM: u8 = 1; /**< Use csum_start,csum_offset*/
pub const VIRTIO_NET_HDR_F_DATA_VALID: u8 = 2; /**< Checksum is valid */
pub const VIRTIO_NET_HDR_GSO_NONE: u8 = 0; /**< Not a GSO frame */
pub const VIRTIO_NET_HDR_GSO_TCPV4: u8 = 1; /**< GSO frame, IPv4 TCP (TSO) */
pub const VIRTIO_NET_HDR_GSO_UDP: u8 = 3; /**< GSO frame, IPv4 UDP (UFO) */
pub const VIRTIO_NET_HDR_GSO_TCPV6: u8 = 4; /**< GSO frame, IPv6 TCP */
pub const VIRTIO_NET_HDR_GSO_ECN: u8 = 0x80; /**< TCP has ECN set */
/* The Host uses this in used->flags to advise the Guest: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
* optimization. Guest will still kick if it's out of buffers. */
pub const VIRTQ_USED_F_NO_NOTIFY: u16 = 1;
/* The Guest uses this in avail->flags to advise | random_line_split | |
virtio_constants.rs | 12;
/* This marks a buffer as continuing via the next field. */
pub const VIRTQ_DESC_F_NEXT: u16 = 1;
/* This marks a buffer as write-only (otherwise read-only). */
pub const VIRTQ_DESC_F_WRITE: u16 = 2;
/* This means the buffer contains a list of buffer descriptors. */
pub const VIRTQ_DESC_F_INDIRECT: u16 = 4;
/* The feature bitmap for virtio net */
pub const VIRTIO_NET_F_CSUM: usize = 0; /* Host handles pkts w/ partial csum */
pub const VIRTIO_NET_F_GUEST_CSUM: usize = 1; /* Guest handles pkts w/ partial csum */
pub const VIRTIO_NET_F_MTU: usize = 3; /* Initial MTU advice. */
pub const VIRTIO_NET_F_MAC: usize = 5; /* Host has given MAC address. */
pub const VIRTIO_NET_F_GUEST_TSO4: usize = 7; /* Guest can handle TSOv4 in. */
pub const VIRTIO_NET_F_GUEST_TSO6: usize = 8; /* Guest can handle TSOv6 in. */
pub const VIRTIO_NET_F_GUEST_ECN: usize = 9; /* Guest can handle TSO[6] w/ ECN in. */
pub const VIRTIO_NET_F_GUEST_UFO: usize = 10; /* Guest can handle UFO in. */
pub const VIRTIO_NET_F_HOST_TSO4: usize = 11; /* Host can handle TSOv4 in. */
pub const VIRTIO_NET_F_HOST_TSO6: usize = 12; /* Host can handle TSOv6 in. */
pub const VIRTIO_NET_F_HOST_ECN: usize = 13; /* Host can handle TSO[6] w/ ECN in. */
pub const VIRTIO_NET_F_HOST_UFO: usize = 14; /* Host can handle UFO in. */
pub const VIRTIO_NET_F_MRG_RXBUF: usize = 15; /* Host can merge receive buffers. */
pub const VIRTIO_NET_F_STATUS: usize = 16; /* virtio_net_config.status available */
pub const VIRTIO_NET_F_CTRL_VQ: usize = 17; /* Control channel available */
pub const VIRTIO_NET_F_CTRL_RX: usize = 18; /* Control channel RX mode support */
pub const VIRTIO_NET_F_CTRL_VLAN: usize = 19; /* Control channel VLAN filtering */
pub const VIRTIO_NET_F_CTRL_RX_EXTRA: usize = 20; /* Extra RX mode control support */
pub const VIRTIO_NET_F_GUEST_ANNOUNCE: usize = 21; /* Guest can announce device on the network */
pub const VIRTIO_NET_F_MQ: usize = 22; /* Device supports Receive Flow Steering */
pub const VIRTIO_NET_F_CTRL_MAC_ADDR: usize = 23; /* Set MAC address */
/* Do we get callbacks when the ring is completely used, even if we've suppressed them? */
pub const VIRTIO_F_NOTIFY_ON_EMPTY: usize = 24;
/* Can the device handle any descriptor layout? */
pub const VIRTIO_F_ANY_LAYOUT: usize = 27;
/* We support indirect buffer descriptors */
pub const VIRTIO_RING_F_INDIRECT_DESC: usize = 28;
pub const VIRTIO_F_VERSION_1: usize = 32;
pub const VIRTIO_F_IOMMU_PLATFORM: usize = 33;
/**
* Control the RX mode, ie. promiscuous, allmulti, etc...
* All commands require an "out" sg entry containing a 1 byte
* state value, zero = disable, non-zero = enable. Commands
* 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
* Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
*/
pub const VIRTIO_NET_CTRL_RX: u8 = 0;
pub const VIRTIO_NET_CTRL_RX_PROMISC: u8 = 0;
pub const VIRTIO_NET_CTRL_RX_ALLMULTI: u8 = 1;
pub const VIRTIO_NET_CTRL_RX_ALLUNI: u8 = 2;
pub const VIRTIO_NET_CTRL_RX_NOMULTI: u8 = 3;
pub const VIRTIO_NET_CTRL_RX_NOUNI: u8 = 4;
pub const VIRTIO_NET_CTRL_RX_NOBCAST: u8 = 5;
pub const VIRTIO_NET_OK: u8 = 0;
pub const VIRTIO_NET_ERR: u8 = 1;
pub const VIRTIO_MAX_CTRL_DATA: usize = 2048;
/**
* This is the first element of the scatter-gather list. If you don't
* specify GSO or CSUM features, you can simply ignore the header.
*/
#[repr(C)]
pub struct virtio_net_hdr {
pub flags: u8,
pub gso_type: u8,
pub hdr_len: u16, // Ethernet + IP + tcp/udp hdrs
pub gso_size: u16, // Bytes to append to hdr_len per frame
pub csum_start: u16, // Position to start checksumming from
pub csum_offset: u16, // Offset after that to place checksum
}
pub const VIRTIO_NET_HDR_F_NEEDS_CSUM: u8 = 1; /**< Use csum_start,csum_offset*/
pub const VIRTIO_NET_HDR_F_DATA_VALID: u8 = 2; /**< Checksum is valid */
pub const VIRTIO_NET_HDR_GSO_NONE: u8 = 0; /**< Not a GSO frame */
pub const VIRTIO_NET_HDR_GSO_TCPV4: u8 = 1; /**< GSO frame, IPv4 TCP (TSO) */
pub const VIRTIO_NET_HDR_GSO_UDP: u8 = 3; /**< GSO frame, IPv4 UDP (UFO) */
pub const VIRTIO_NET_HDR_GSO_TCPV6: u8 = 4; /**< GSO frame, IPv6 TCP */
pub const VIRTIO_NET_HDR_GSO_ECN: u8 = 0x80; /**< TCP has ECN set */
/* The Host uses this in used->flags to advise the Guest: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
* optimization. Guest will still kick if it's out of buffers. */
pub const VIRTQ_USED_F_NO_NOTIFY: u16 = 1;
/* The Guest uses this in avail->flags to advise the Host: don't
* interrupt me when you consume a buffer. It's unreliable, so it's
* simply an optimization. */
pub const VIRTQ_AVAIL_F_NO_INTERRUPT: u16 = 1;
use std::num::Wrapping;
/* VirtIO ring descriptors: 16 bytes.
* These can chain together via "next". */
#[repr(C)]
#[derive(Default)]
pub struct VirtqDesc {
pub addr: usize, /* Address (guest-physical). */
pub len: u32, /* Length. */
pub flags: u16, /* The flags as indicated above. */
pub next: u16, /* We chain unused descriptors via this. */
}
#[repr(C)]
pub struct VirtqAvail {
pub flags: u16,
pub idx: Wrapping<u16>,
pub ring: [u16; 0],
}
#[repr(C)]
pub struct VirtqUsed {
pub flags: u16,
pub idx: Wrapping<u16>,
pub ring: [VirtqUsedElem; 0],
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct VirtqUsedElem {
/* Index of start of used descriptor chain. */
pub id: u16,
pub _padding: u16,
/* Total length of the descriptor chain which was written to. */
pub len: u32,
}
pub trait Ring {
type Element;
fn ring(&self) -> *const Self::Element;
fn ring_mut(&mut self) -> *mut Self::Element;
}
impl Ring for VirtqAvail {
type Element = u16;
fn ring(&self) -> *const u16 {
self.ring.as_ptr()
}
fn ring_mut(&mut self) -> *mut u16 {
self.ring.as_mut_ptr()
}
}
impl Ring for VirtqUsed {
type Element = VirtqUsedElem;
fn ring(&self) -> *const VirtqUsedElem {
self.ring.as_ptr()
}
fn ring_mut(&mut self) -> *mut VirtqUsedElem {
self.ring.as_mut_ptr()
}
}
#[repr(C)]
#[derive(Debug)]
pub struct VirtioNetCtrl<T: VirtioNetCtrlCommand> {
pub class: u8,
pub command: u8,
pub command_data: T,
pub ack: u8,
}
impl<T: VirtioNetCtrlCommand> From<T> for VirtioNetCtrl<T> {
fn | from | identifier_name | |
debugging_errors.py | errors occur when Python was expecting something to be have a certain indentation level but instead encountered something different. Remember, Python cares about whitespace, so if you fail to adhere to what Python is expecting, you will get an `IndentationError`.
# will produce a syntax error
# and specifically an indentation error
my_list = [1, 2]
for value in my_list:
print(value)
In the above example, Python again gives you a readout about what it was expecting and where you appear to have gone wrong, letting you know that it encountered an `IndentationError` as it expected `print(value)` to be indented within your `for` loop.
For `SyntaxError`s and `IndentationError`s, the rest of your code may very well be fine. Once you fix the structure or syntax of your code, the error will likely be resolved.
## Exceptions
Unlike syntax errors, exceptions are errors that occur due to the specific code you tried to execute. In these cases, the syntax or structure of your code looks fine, but an error occurred when Python tried to execute your code, resulting in an error.
<div class="alert alert-success">
Exceptions are errors that occur when a code is executed.
</div>
### ZeroDivisionError
A `ZeroDivisionError` occurs when you try to divide by zero.
Sometimes this will be very obvious, such as if you directly try to divide by zero, as Python does not know how to divide by zero.
# produces ZeroDivisionError
1 / 0
However, more likely you'll encounter a `ZeroDivisionError` when looping through a list or using a conditional. In these cases, you'll have to dig through your code and how you tried to execute that code to determine where your code tried to divide by zero.
For example, in the following cell, you see code that is syntactically fine. However, when the loop gets to the third index in my_list, the `temp = val / (val - 4)` attempts to divide by zero, leading Python to return a `ZeroDivisionError`
# produces a ZeroDivisionError
running_sum = 0
my_list = [1, 2, 3, 4, 5]
for val in my_list:
if val % 2 == 0:
temp = val / (val - 4)
running_sum += temp
### NameError
A `NameError` occurs when you try to access a name that Python does not know.
For example, if you define a variable with the name `variable` and then try to access `varaible` (`variable` with a typo), you will receive a `NameError`.
# Define a variable
variable = 12
# If you typo a name, you will get a NameError
varaible
Whenever you see a `NameError`, consider whether you've misspelled or mistyped something. Look through your code carefully as they can somteimes be hard to spot visually.
And, while it's annoying, it's helpful that Python doesn't just _guess_ that you _meant_ 'variable'....because sometimes Python would guess wrong. It's better for Python to just give us the error.
Finally, you'll also get a `NamerError` if you try to use the equality operator (`==`) when you meant to use the assignment operator (`=`). Here, since `new_variable` hasn't yet been defined, when Python tries to determine if it is equl to `1`, a `NameError` is returned, as `new_variable` does not exist.
# You also get a name error if you try to use the wrong operator for assignment
new_variable == 1
### IndexError
Similarly, an `IndexError` occurs when you try to access an index that doesn't exist.
For example, the following list has three elements, if you try to access the fourth element (index position 5), you'll recieve an `IndexError` with a note that the index is out of range:
my_list = [1, 2, 3]
my_list[5]
Note that this applies to any collection where indexing applies, such as tuples, dictionaries, or strings.
If you try to access the value for a key that does not exist in a dictionary, for example, you will again receive an Error. Here, it is specifically a `KeyError`.
# Relatedly, 'KeyError' occurs if you ask for a dictionary key that doesn't exist
my_dictionary = {'name1' : 1, 'name2' : 2}
my_dictionary['name3']
### ValueError
A `ValueError` occurs when you try to use an illegal value for something.
For example, if you try to make an integer out of a string, you'll receive a `ValueError`:
int('cat')
### TypeError
Finally, a `TypeError` occurs when you try to operate on a variable in a way that Python is unabe to interpret given its type.
For example, `+` concatenates strings and adds integers. When you try to combine those two types of variables, Python is unable to determine whether it shoudl concatenate or add. As such, a `TypeError` is returned.
'a_string' + 12
## Stack Trace
The **stack trace** is a log of what Python did as it went through your code. This ets printed out if Python runs into an error.
running_sum = 0
my_list = [1, 2, 3, 4, 5]
for val in my_list:
if val % 2 == 0:
temp = val / (val - 4)
#+= allows you to add the value on the right to the variable on the left
# and assign it to the variable on the left
running_sum += temp
# equivalent to:
# running_sum = running_sum + temp
Sometimes these get really complex. With practice, you'll get better at interpreting these traces, but for now notice that there is often either an arrow (`---->`) or a caret (`^`) indicating at which line or at which point in your code Python encountered the error. Focusing at these points in the error message and reading the error message at the end (i.e. `ZeroDivisionError: division by zero` in the example above) is a good place to start when trying to decipher what the error means.
As you're learning, do your very best to read the message and try to understand it. It can be tempting to be overwhelmed and ignore these if you don't understand the error at first glance. Spending a few seconds longer to understand it can save you a lot of time in the long run.
## Try / Except |
### `try`/`except` Block
The general structure of a `try`/`except` blog is as follows:
```python
try:
# Tries to do this code
pass # pass just says is not an operation; carry on
except:
# If there is an error (an exception), keep going and do this instead
pass
```
For example, if, we wanted to ask the user to input a number, we could do so using `input()`. The string inside the `input` function ('Please type a number: ') is what is displayed to the left of the box where the user enters their input. Whatever the user types will, in the code example below, be stored in the variable `my_num`. In the example below, if the user were to type 'shannon', the code would print `my_num is: shannon`. And, that doesn't make a ton of sense...since 'shannon' is not a number. Fortunately, we can use a `try`/`except` block to handle this and only accept the input from the user once it is, in fact, a number.
# Example: we want to get an input number from the user
my_num = input('Please type a number: ')
print('\nmy_num is: ', my_num)
To build up to this, we first need to think about the logic. We want to *first* `try` to get the `input` from the user. And, we want to typecast (meaning specify the type that input should be) that input into an integer using `int()`. However, what if we input the string 'shannon' again? As we saw above `int('shannon')` would raise an exception.
# this raises an error
int('shannon')
This is where `try`/`except` comes in handy! We can first `try` to run the code in the `try` code block; however, *if an exception is raised* (which will happen if we type 'shannon' as input), the code in the `except` block will execute instead.
# with a string as input
try:
int(input('Please type an integer: '))
except:
print('nahhh')
In the above example, we see that rather than raising a `ValueError`, instead, the code in the `except` block has executed instead, printing 'nahhh'.
Note that if the user input a number, like 29, for example, the `except` block would never execute, since the `try` block would not have raised an exception, as we see here, where 'nahhh' is not printed |
While *syntax errors* will necessarily fail, *exceptions* do not necessarily have to lead to breaking the program - they can be programmatically dealt with, using `try` and `except`. A `try`/`except` block allows you to try some code. If, in attempting to execute that code, an exeption is encountered, Python will instead execute the code in the `except` code block. | random_line_split |
debugging_errors.py |
Well, to answer this question you could consider the following call of the function:
example_function([1, 2, 3, 4])
The above example of the function executes and returns the value 14, the sum of all the values in the input list plus the value stored in the fourth position of the input list.
But, what about the following function call:
example_function([1, 2, 3])
Here, we get an `IndexError`, as there is no fourth index of the input list. Python runs into an error when trying to execute the following line of the function: `special_value = input_list[3]`
So, the successful execute of the function is dependent upon the input provided.
Let's look at one more example:
example_function(['s', 'h', 'a', 'n'])
The following example will also error; however, this time it's a `TypeError` rather than an `IndexError`. This occurs when Python tries to execute the `running_sum = running_sum + item` line of the function. In this case, a string is attempted to be added to an integer, which is not something Python can do.
Thus, another error.
Being able to adapt your code so that it does what you want it to do requires that you both understand these errors *and* know how to fix it. So, let's get started understanding what each of these errors means.
## Errors
Errors are enountered when the code you've tried to execute is unable to run. These interruptions can occur for a number of different reasons. We'll explore each of these now.
<div class="alert alert-success">
<b>Errors</b> are problems with code definition or execution that interrupt running Python code.
</div>
### Syntax Errors
Syntax errors occur when the code you've written fails to follow the rules of Python. It will fail under any and all circumstances. These include `SyntaxErrors` and `IndentationErrors`.
<div class="alert alert-success">
Syntax & Indentation Errors reflect code that doesn't follow Python structure, and will necessarily fail.
</div>
### Syntax Errors
For example, if you try to execute the following conditional statement, it will fail. This will happen regardless of the code you include within your conditional or what conditional you specify becuase a colon is missing after your `if` statement.
Notice in the output that Python does it's best to point you in the right direction using a `^` to highlight where in your code Python encoungered an error. And, Python lets you know this is a `SyntaxError`, so you're clued into the fact that there's something wrong with your code's structure.
# will produce a syntax error
if True
print('Yep.')
### Indentation Errors
Indentation errors occur when Python was expecting something to be have a certain indentation level but instead encountered something different. Remember, Python cares about whitespace, so if you fail to adhere to what Python is expecting, you will get an `IndentationError`.
# will produce a syntax error
# and specifically an indentation error
my_list = [1, 2]
for value in my_list:
print(value)
In the above example, Python again gives you a readout about what it was expecting and where you appear to have gone wrong, letting you know that it encountered an `IndentationError` as it expected `print(value)` to be indented within your `for` loop.
For `SyntaxError`s and `IndentationError`s, the rest of your code may very well be fine. Once you fix the structure or syntax of your code, the error will likely be resolved.
## Exceptions
Unlike syntax errors, exceptions are errors that occur due to the specific code you tried to execute. In these cases, the syntax or structure of your code looks fine, but an error occurred when Python tried to execute your code, resulting in an error.
<div class="alert alert-success">
Exceptions are errors that occur when a code is executed.
</div>
### ZeroDivisionError
A `ZeroDivisionError` occurs when you try to divide by zero.
Sometimes this will be very obvious, such as if you directly try to divide by zero, as Python does not know how to divide by zero.
# produces ZeroDivisionError
1 / 0
However, more likely you'll encounter a `ZeroDivisionError` when looping through a list or using a conditional. In these cases, you'll have to dig through your code and how you tried to execute that code to determine where your code tried to divide by zero.
For example, in the following cell, you see code that is syntactically fine. However, when the loop gets to the third index in my_list, the `temp = val / (val - 4)` attempts to divide by zero, leading Python to return a `ZeroDivisionError`
# produces a ZeroDivisionError
running_sum = 0
my_list = [1, 2, 3, 4, 5]
for val in my_list:
if val % 2 == 0:
temp = val / (val - 4)
running_sum += temp
### NameError
A `NameError` occurs when you try to access a name that Python does not know.
For example, if you define a variable with the name `variable` and then try to access `varaible` (`variable` with a typo), you will receive a `NameError`.
# Define a variable
variable = 12
# If you typo a name, you will get a NameError
varaible
Whenever you see a `NameError`, consider whether you've misspelled or mistyped something. Look through your code carefully as they can somteimes be hard to spot visually.
And, while it's annoying, it's helpful that Python doesn't just _guess_ that you _meant_ 'variable'....because sometimes Python would guess wrong. It's better for Python to just give us the error.
Finally, you'll also get a `NamerError` if you try to use the equality operator (`==`) when you meant to use the assignment operator (`=`). Here, since `new_variable` hasn't yet been defined, when Python tries to determine if it is equl to `1`, a `NameError` is returned, as `new_variable` does not exist.
# You also get a name error if you try to use the wrong operator for assignment
new_variable == 1
### IndexError
Similarly, an `IndexError` occurs when you try to access an index that doesn't exist.
For example, the following list has three elements, if you try to access the fourth element (index position 5), you'll recieve an `IndexError` with a note that the index is out of range:
my_list = [1, 2, 3]
my_list[5]
Note that this applies to any collection where indexing applies, such as tuples, dictionaries, or strings.
If you try to access the value for a key that does not exist in a dictionary, for example, you will again receive an Error. Here, it is specifically a `KeyError`.
# Relatedly, 'KeyError' occurs if you ask for a dictionary key that doesn't exist
my_dictionary = {'name1' : 1, 'name2' : 2}
my_dictionary['name3']
### ValueError
A `ValueError` occurs when you try to use an illegal value for something.
For example, if you try to make an integer out of a string, you'll receive a `ValueError`:
int('cat')
### TypeError
Finally, a `TypeError` occurs when you try to operate on a variable in a way that Python is unabe to interpret given its type.
For example, `+` concatenates strings and adds integers. When you try to combine those two types of variables, Python is unable to determine whether it shoudl concatenate or add. As such, a `TypeError` is returned.
'a_string' + 12
## Stack Trace
The **stack trace** is a log of what Python did as it went through your code. This ets printed out if Python runs into an error.
running_sum = 0
my_list = [1, 2, 3, 4, 5]
for val in my_list:
if val % 2 == 0:
temp = val / (val - 4)
#+= allows you to add the value on the right to the variable on the left
# and assign it to the variable on the left
running_sum += temp
# equivalent to:
# running_sum = running_sum + temp
Sometimes these get really complex. With practice, you'll get better at interpreting these traces, but for now notice that there is often either an arrow (`---->`) or a caret (`^`) indicating at which line or at which point in your code Python encountered the error. Focusing at these points in the error message and reading the error message at the end (i.e. `ZeroDivisionError: division by zero` in the example above) is a good place to start when trying to decipher what the error means.
As you're learning, do your very best to read the message and try to understand it. It can be tempting to be overwhelmed and ignore these if you don't understand the error at first glance. Spending a few seconds longer to understand it can save you a lot of time in the long run.
## Try / Except
While *syntax errors* will necessarily fail, *exceptions* do not necessarily have to lead to breaking the program - they can | running_sum = 0
for item in input_list:
running_sum = running_sum + item
special_value = input_list[3]
return running_sum + special_value | conditional_block | |
main.rs | u64, frames: u64) -> u64 {
buffer_const_size(align) + buffer_frame_size(align) * frames
}
const fn uniform_offset(index: usize, align: u64) -> u64 {
buffer_const_size(align) + buffer_frame_size(align) * index as u64
}
const fn per_instance_offset(index: usize, align: u64) -> u64 {
uniform_offset(index, align) + UNIFORM_SIZE
}
#[derive(Debug, Default)]
struct MeshRenderPipelineDesc;
#[derive(Debug)]
struct MeshRenderPipeline<B: hal::Backend> {
align: u64,
buffer: Escape<Buffer<B>>,
sets: Vec<Escape<DescriptorSet<B>>>,
}
struct ProfileTimer {
label: std::string::String,
start: std::time::Instant,
}
impl ProfileTimer {
fn start(label: &str) -> Self {
ProfileTimer {
label: label.into(),
start: std::time::Instant::now(),
}
}
}
// impl Drop for ProfileTimer {
// fn drop(&mut self) {
// println!("{}: {:?}", self.label, self.start.elapsed());
// }
// }
impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc
where
B: hal::Backend,
{
type Pipeline = MeshRenderPipeline<B>;
fn load_shader_set(
&self,
factory: &mut Factory<B>,
_scene: &Scene<B>,
) -> rendy_shader::ShaderSet<B> {
SHADERS.build(factory, Default::default()).unwrap()
}
fn vertices(
&self,
) -> Vec<(
Vec<hal::pso::Element<hal::format::Format>>,
hal::pso::ElemStride,
hal::pso::VertexInputRate,
)> {
return vec![
SHADER_REFLECTION
.attributes(&["position"])
.unwrap()
.gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex),
SHADER_REFLECTION
.attributes(&["translate", "dir"])
.unwrap()
.gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)),
SHADER_REFLECTION
.attributes(&["color", "pad"])
.unwrap()
.gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)),
];
}
fn layout(&self) -> Layout {
return SHADER_REFLECTION.layout().unwrap();
}
fn build<'a>(
self,
ctx: &GraphContext<B>,
factory: &mut Factory<B>,
_queue: QueueId,
scene: &Scene<B>,
buffers: Vec<NodeBuffer>,
images: Vec<NodeImage>,
set_layouts: &[Handle<DescriptorSetLayout<B>>],
) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> {
assert!(buffers.is_empty());
assert!(images.is_empty());
assert_eq!(set_layouts.len(), 1);
let frames = ctx.frames_in_flight as _;
let align = factory
.physical()
.limits()
.min_uniform_buffer_offset_alignment;
let mut buffer = factory
.create_buffer(
BufferInfo {
size: buffer_size(align, frames) as u64,
usage: hal::buffer::Usage::UNIFORM
| hal::buffer::Usage::INDIRECT
| hal::buffer::Usage::VERTEX,
},
Dynamic,
)
.unwrap();
let mut sets = Vec::new();
for index in 0..frames {
unsafe {
let set = factory
.create_descriptor_set(set_layouts[0].clone())
.unwrap();
factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite {
set: set.raw(),
binding: 0,
array_offset: 0,
descriptors: Some(hal::pso::Descriptor::Buffer(
buffer.raw(),
Some(uniform_offset(index as usize, align))
..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE),
)),
}));
sets.push(set);
}
}
if !scene.per_instance_const.is_empty() |
Ok(MeshRenderPipeline {
align,
buffer,
sets,
})
}
}
fn model_transform() -> nalgebra::Matrix4<f32> {
let rot = nalgebra::UnitQuaternion::identity();
nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into()
}
fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] {
let z_pos = nalgebra::UnitQuaternion::identity();
let z_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, 0.0, -1.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let x_pos = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(1.0, 0.0, 0.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let x_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(-1.0, 0.0, 0.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let y_pos = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, 1.0, 0.0),
&Vector3::new(0.0, 0.0, 1.0),
);
let y_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, -1.0, 0.0),
&Vector3::new(0.0, 0.0, -1.0),
);
// let unit = 0.125;
let unit = 0.125;
let scale = 0.125;
[
nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale)
.into(),
nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale)
.into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale)
.into(),
]
}
impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B>
where
B: hal::Backend,
{
type Desc = MeshRenderPipelineDesc;
fn prepare(
&mut self,
factory: &Factory<B>,
_queue: QueueId,
_set_layouts: &[Handle<DescriptorSetLayout<B>>],
index: usize,
scene: &Scene<B>,
) -> PrepareResult {
let pt = ProfileTimer::start("prepare");
// println!("index: {}", index);
// println!(
// "upload uniform {}: {}",
// index,
// std::mem::size_of::<UniformArgs>()
// );
unsafe {
factory
.upload_visible_buffer(
&mut self.buffer,
uniform_offset(index, self.align),
&[UniformArgs {
// proj: scene.camera.proj.to_homogeneous(),
proj: scene.camera.proj,
view: scene.camera.view.to_homogeneous(),
model: model_transform2(),
}],
)
.unwrap()
};
// {
// let per_instance = &scene.per_instance[..];
// println!(
// "upload dyn {}: {}",
// index,
// // std::mem::size_of::<PerInstance>() * scene.per_instance.len(),
// std::mem::size_of_val(per_instance)
// );
// }
if !scene.per_instance.is_empty() {
unsafe {
factory
.upload_visible_buffer(
&mut self.buffer,
per_instance_offset(index, self.align),
&scene.per_instance[..],
)
.unwrap()
};
}
PrepareResult::DrawReuse
}
fn draw(
&mut self,
layout: &B::Pipeline | {
// println!(
// "upload const: {}",
// std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len()
// );
unsafe {
factory
.upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..])
.expect("update const buffer failed")
};
} | conditional_block |
main.rs | .limits()
.min_uniform_buffer_offset_alignment;
let mut buffer = factory
.create_buffer(
BufferInfo {
size: buffer_size(align, frames) as u64,
usage: hal::buffer::Usage::UNIFORM
| hal::buffer::Usage::INDIRECT
| hal::buffer::Usage::VERTEX,
},
Dynamic,
)
.unwrap();
let mut sets = Vec::new();
for index in 0..frames {
unsafe {
let set = factory
.create_descriptor_set(set_layouts[0].clone())
.unwrap();
factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite {
set: set.raw(),
binding: 0,
array_offset: 0,
descriptors: Some(hal::pso::Descriptor::Buffer(
buffer.raw(),
Some(uniform_offset(index as usize, align))
..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE),
)),
}));
sets.push(set);
}
}
if !scene.per_instance_const.is_empty() {
// println!(
// "upload const: {}",
// std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len()
// );
unsafe {
factory
.upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..])
.expect("update const buffer failed")
};
}
Ok(MeshRenderPipeline {
align,
buffer,
sets,
})
}
}
fn model_transform() -> nalgebra::Matrix4<f32> {
let rot = nalgebra::UnitQuaternion::identity();
nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into()
}
fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] {
let z_pos = nalgebra::UnitQuaternion::identity();
let z_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, 0.0, -1.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let x_pos = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(1.0, 0.0, 0.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let x_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(-1.0, 0.0, 0.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let y_pos = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, 1.0, 0.0),
&Vector3::new(0.0, 0.0, 1.0),
);
let y_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, -1.0, 0.0),
&Vector3::new(0.0, 0.0, -1.0),
);
// let unit = 0.125;
let unit = 0.125;
let scale = 0.125;
[
nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale)
.into(),
nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale)
.into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale)
.into(),
]
}
impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B>
where
B: hal::Backend,
{
type Desc = MeshRenderPipelineDesc;
fn prepare(
&mut self,
factory: &Factory<B>,
_queue: QueueId,
_set_layouts: &[Handle<DescriptorSetLayout<B>>],
index: usize,
scene: &Scene<B>,
) -> PrepareResult {
let pt = ProfileTimer::start("prepare");
// println!("index: {}", index);
// println!(
// "upload uniform {}: {}",
// index,
// std::mem::size_of::<UniformArgs>()
// );
unsafe {
factory
.upload_visible_buffer(
&mut self.buffer,
uniform_offset(index, self.align),
&[UniformArgs {
// proj: scene.camera.proj.to_homogeneous(),
proj: scene.camera.proj,
view: scene.camera.view.to_homogeneous(),
model: model_transform2(),
}],
)
.unwrap()
};
// {
// let per_instance = &scene.per_instance[..];
// println!(
// "upload dyn {}: {}",
// index,
// // std::mem::size_of::<PerInstance>() * scene.per_instance.len(),
// std::mem::size_of_val(per_instance)
// );
// }
if !scene.per_instance.is_empty() {
unsafe {
factory
.upload_visible_buffer(
&mut self.buffer,
per_instance_offset(index, self.align),
&scene.per_instance[..],
)
.unwrap()
};
}
PrepareResult::DrawReuse
}
fn draw(
&mut self,
layout: &B::PipelineLayout,
mut encoder: RenderPassEncoder<'_, B>,
index: usize,
scene: &Scene<B>,
) {
println!("draw");
unsafe {
encoder.bind_graphics_descriptor_sets(
layout,
0,
Some(self.sets[index].raw()),
std::iter::empty(),
);
let vertex = [SHADER_REFLECTION.attributes(&["position"]).unwrap()];
scene
.object_mesh
.as_ref()
.unwrap()
.bind(0, &vertex, &mut encoder)
.unwrap();
encoder.bind_vertex_buffers(1, std::iter::once((self.buffer.raw(), 0)));
encoder.bind_vertex_buffers(
2,
std::iter::once((self.buffer.raw(), per_instance_offset(index, self.align))),
);
encoder.draw_indexed(
0..scene.object_mesh.as_ref().unwrap().len(),
0 as i32,
0..scene.per_instance.len() as u32,
)
}
}
fn dispose(self, _factory: &mut Factory<B>, _scene: &Scene<B>) {}
}
fn main() {
env_logger::Builder::from_default_env()
.filter_module("meshes", log::LevelFilter::Trace)
.init();
let mut event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_inner_size((960, 640).into())
.with_title("Rendy example");
let config: Config = Default::default();
let rendy = AnyWindowedRendy::init_auto(&config, window, &event_loop).unwrap();
rendy::with_any_windowed_rendy!((rendy)
use back; (mut factory, mut families, surface, window) => {
let mut graph_builder = GraphBuilder::<Backend, Scene<Backend>>::new();
let size = window.inner_size().to_physical(window.hidpi_factor());
let window_kind = hal::image::Kind::D2(size.width as u32, size.height as u32, 1, 1);
let aspect = size.width / size.height;
let depth = graph_builder.create_image(
window_kind,
1,
hal::format::Format::D32Sfloat,
Some(hal::command::ClearValue {
depth_stencil: hal::command::ClearDepthStencil {
depth: 1.0,
stencil: 0,
},
}),
);
let pass = graph_builder.add_node(
MeshRenderPipeline::builder()
.into_subpass()
.with_color_surface()
.with_depth_stencil(depth)
.into_pass()
.with_surface(
surface,
hal::window::Extent2D {
width: size.width as _,
height: size.height as _,
},
Some(hal::command::ClearValue {
color: hal::command::ClearColor {
float32: [0.5, 0.5, 1.0, 1.0],
},
}),
),
);
let bm = crystal::read_map("hidden_ramp.txt").expect("could not read file");
| random_line_split | ||
main.rs |
const fn buffer_const_size(align: u64) -> u64 {
align_to(PER_INSTANCE_CONST_SIZE * NUM_INSTANCES, align)
}
const fn buffer_frame_size(align: u64) -> u64 {
align_to(UNIFORM_SIZE + PER_INSTANCE_SIZE * NUM_INSTANCES, align)
}
const fn buffer_size(align: u64, frames: u64) -> u64 {
buffer_const_size(align) + buffer_frame_size(align) * frames
}
const fn uniform_offset(index: usize, align: u64) -> u64 {
buffer_const_size(align) + buffer_frame_size(align) * index as u64
}
const fn per_instance_offset(index: usize, align: u64) -> u64 {
uniform_offset(index, align) + UNIFORM_SIZE
}
#[derive(Debug, Default)]
struct MeshRenderPipelineDesc;
#[derive(Debug)]
struct MeshRenderPipeline<B: hal::Backend> {
align: u64,
buffer: Escape<Buffer<B>>,
sets: Vec<Escape<DescriptorSet<B>>>,
}
struct ProfileTimer {
label: std::string::String,
start: std::time::Instant,
}
impl ProfileTimer {
fn start(label: &str) -> Self {
ProfileTimer {
label: label.into(),
start: std::time::Instant::now(),
}
}
}
// impl Drop for ProfileTimer {
// fn drop(&mut self) {
// println!("{}: {:?}", self.label, self.start.elapsed());
// }
// }
impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc
where
B: hal::Backend,
{
type Pipeline = MeshRenderPipeline<B>;
fn load_shader_set(
&self,
factory: &mut Factory<B>,
_scene: &Scene<B>,
) -> rendy_shader::ShaderSet<B> {
SHADERS.build(factory, Default::default()).unwrap()
}
fn vertices(
&self,
) -> Vec<(
Vec<hal::pso::Element<hal::format::Format>>,
hal::pso::ElemStride,
hal::pso::VertexInputRate,
)> {
return vec![
SHADER_REFLECTION
.attributes(&["position"])
.unwrap()
.gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex),
SHADER_REFLECTION
.attributes(&["translate", "dir"])
.unwrap()
.gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)),
SHADER_REFLECTION
.attributes(&["color", "pad"])
.unwrap()
.gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)),
];
}
fn layout(&self) -> Layout {
return SHADER_REFLECTION.layout().unwrap();
}
fn build<'a>(
self,
ctx: &GraphContext<B>,
factory: &mut Factory<B>,
_queue: QueueId,
scene: &Scene<B>,
buffers: Vec<NodeBuffer>,
images: Vec<NodeImage>,
set_layouts: &[Handle<DescriptorSetLayout<B>>],
) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> {
assert!(buffers.is_empty());
assert!(images.is_empty());
assert_eq!(set_layouts.len(), 1);
let frames = ctx.frames_in_flight as _;
let align = factory
.physical()
.limits()
.min_uniform_buffer_offset_alignment;
let mut buffer = factory
.create_buffer(
BufferInfo {
size: buffer_size(align, frames) as u64,
usage: hal::buffer::Usage::UNIFORM
| hal::buffer::Usage::INDIRECT
| hal::buffer::Usage::VERTEX,
},
Dynamic,
)
.unwrap();
let mut sets = Vec::new();
for index in 0..frames {
unsafe {
let set = factory
.create_descriptor_set(set_layouts[0].clone())
.unwrap();
factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite {
set: set.raw(),
binding: 0,
array_offset: 0,
descriptors: Some(hal::pso::Descriptor::Buffer(
buffer.raw(),
Some(uniform_offset(index as usize, align))
..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE),
)),
}));
sets.push(set);
}
}
if !scene.per_instance_const.is_empty() {
// println!(
// "upload const: {}",
// std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len()
// );
unsafe {
factory
.upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..])
.expect("update const buffer failed")
};
}
Ok(MeshRenderPipeline {
align,
buffer,
sets,
})
}
}
fn model_transform() -> nalgebra::Matrix4<f32> {
let rot = nalgebra::UnitQuaternion::identity();
nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into()
}
fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] {
let z_pos = nalgebra::UnitQuaternion::identity();
let z_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, 0.0, -1.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let x_pos = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(1.0, 0.0, 0.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let x_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(-1.0, 0.0, 0.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let y_pos = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, 1.0, 0.0),
&Vector3::new(0.0, 0.0, 1.0),
);
let y_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, -1.0, 0.0),
&Vector3::new(0.0, 0.0, -1.0),
);
// let unit = 0.125;
let unit = 0.125;
let scale = 0.125;
[
nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale)
.into(),
nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale)
.into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale)
.into(),
]
}
impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B>
where
B: hal::Backend,
{
type Desc = MeshRenderPipelineDesc;
fn prepare(
&mut self,
factory: &Factory<B>,
_queue: QueueId,
_set_layouts: &[Handle<DescriptorSetLayout<B>>],
index: usize,
scene: &Scene<B>,
) -> PrepareResult {
let pt = ProfileTimer::start("prepare");
// println!("index: {}", index);
// println!(
// "upload uniform {}: {}",
// index,
// std::mem::size_of::<UniformArgs>()
// );
unsafe {
factory
.upload_visible_buffer(
&mut self.buffer,
uniform_offset(index, self.align),
&[UniformArgs {
// proj: scene.camera.proj.to_homogeneous(),
proj: scene.camera.proj,
view: scene.camera.view.to_homogeneous(),
model: model_transform2(),
}],
)
.unwrap()
};
// {
// let per_instance = &scene.per_instance[..];
// println!(
// "upload dyn {}: {}",
// index,
// // std::mem::size_of::<PerInstance>() * scene.per_instance.len(),
// std::mem | {
((s - 1) / align + 1) * align
} | identifier_body | |
main.rs | : u64, frames: u64) -> u64 {
buffer_const_size(align) + buffer_frame_size(align) * frames
}
const fn | (index: usize, align: u64) -> u64 {
buffer_const_size(align) + buffer_frame_size(align) * index as u64
}
const fn per_instance_offset(index: usize, align: u64) -> u64 {
uniform_offset(index, align) + UNIFORM_SIZE
}
#[derive(Debug, Default)]
struct MeshRenderPipelineDesc;
#[derive(Debug)]
struct MeshRenderPipeline<B: hal::Backend> {
align: u64,
buffer: Escape<Buffer<B>>,
sets: Vec<Escape<DescriptorSet<B>>>,
}
struct ProfileTimer {
label: std::string::String,
start: std::time::Instant,
}
impl ProfileTimer {
fn start(label: &str) -> Self {
ProfileTimer {
label: label.into(),
start: std::time::Instant::now(),
}
}
}
// impl Drop for ProfileTimer {
// fn drop(&mut self) {
// println!("{}: {:?}", self.label, self.start.elapsed());
// }
// }
impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc
where
B: hal::Backend,
{
type Pipeline = MeshRenderPipeline<B>;
fn load_shader_set(
&self,
factory: &mut Factory<B>,
_scene: &Scene<B>,
) -> rendy_shader::ShaderSet<B> {
SHADERS.build(factory, Default::default()).unwrap()
}
fn vertices(
&self,
) -> Vec<(
Vec<hal::pso::Element<hal::format::Format>>,
hal::pso::ElemStride,
hal::pso::VertexInputRate,
)> {
return vec![
SHADER_REFLECTION
.attributes(&["position"])
.unwrap()
.gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex),
SHADER_REFLECTION
.attributes(&["translate", "dir"])
.unwrap()
.gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)),
SHADER_REFLECTION
.attributes(&["color", "pad"])
.unwrap()
.gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)),
];
}
fn layout(&self) -> Layout {
return SHADER_REFLECTION.layout().unwrap();
}
fn build<'a>(
self,
ctx: &GraphContext<B>,
factory: &mut Factory<B>,
_queue: QueueId,
scene: &Scene<B>,
buffers: Vec<NodeBuffer>,
images: Vec<NodeImage>,
set_layouts: &[Handle<DescriptorSetLayout<B>>],
) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> {
assert!(buffers.is_empty());
assert!(images.is_empty());
assert_eq!(set_layouts.len(), 1);
let frames = ctx.frames_in_flight as _;
let align = factory
.physical()
.limits()
.min_uniform_buffer_offset_alignment;
let mut buffer = factory
.create_buffer(
BufferInfo {
size: buffer_size(align, frames) as u64,
usage: hal::buffer::Usage::UNIFORM
| hal::buffer::Usage::INDIRECT
| hal::buffer::Usage::VERTEX,
},
Dynamic,
)
.unwrap();
let mut sets = Vec::new();
for index in 0..frames {
unsafe {
let set = factory
.create_descriptor_set(set_layouts[0].clone())
.unwrap();
factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite {
set: set.raw(),
binding: 0,
array_offset: 0,
descriptors: Some(hal::pso::Descriptor::Buffer(
buffer.raw(),
Some(uniform_offset(index as usize, align))
..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE),
)),
}));
sets.push(set);
}
}
if !scene.per_instance_const.is_empty() {
// println!(
// "upload const: {}",
// std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len()
// );
unsafe {
factory
.upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..])
.expect("update const buffer failed")
};
}
Ok(MeshRenderPipeline {
align,
buffer,
sets,
})
}
}
fn model_transform() -> nalgebra::Matrix4<f32> {
let rot = nalgebra::UnitQuaternion::identity();
nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into()
}
fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] {
let z_pos = nalgebra::UnitQuaternion::identity();
let z_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, 0.0, -1.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let x_pos = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(1.0, 0.0, 0.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let x_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(-1.0, 0.0, 0.0),
&Vector3::new(0.0, 1.0, 0.0),
);
let y_pos = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, 1.0, 0.0),
&Vector3::new(0.0, 0.0, 1.0),
);
let y_neg = nalgebra::UnitQuaternion::face_towards(
&Vector3::new(0.0, -1.0, 0.0),
&Vector3::new(0.0, 0.0, -1.0),
);
// let unit = 0.125;
let unit = 0.125;
let scale = 0.125;
[
nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale)
.into(),
nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale)
.into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(),
nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale)
.into(),
]
}
impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B>
where
B: hal::Backend,
{
type Desc = MeshRenderPipelineDesc;
fn prepare(
&mut self,
factory: &Factory<B>,
_queue: QueueId,
_set_layouts: &[Handle<DescriptorSetLayout<B>>],
index: usize,
scene: &Scene<B>,
) -> PrepareResult {
let pt = ProfileTimer::start("prepare");
// println!("index: {}", index);
// println!(
// "upload uniform {}: {}",
// index,
// std::mem::size_of::<UniformArgs>()
// );
unsafe {
factory
.upload_visible_buffer(
&mut self.buffer,
uniform_offset(index, self.align),
&[UniformArgs {
// proj: scene.camera.proj.to_homogeneous(),
proj: scene.camera.proj,
view: scene.camera.view.to_homogeneous(),
model: model_transform2(),
}],
)
.unwrap()
};
// {
// let per_instance = &scene.per_instance[..];
// println!(
// "upload dyn {}: {}",
// index,
// // std::mem::size_of::<PerInstance>() * scene.per_instance.len(),
// std::mem::size_of_val(per_instance)
// );
// }
if !scene.per_instance.is_empty() {
unsafe {
factory
.upload_visible_buffer(
&mut self.buffer,
per_instance_offset(index, self.align),
&scene.per_instance[..],
)
.unwrap()
};
}
PrepareResult::DrawReuse
}
fn draw(
&mut self,
layout: &B::Pipeline | uniform_offset | identifier_name |
catalog.rs | new();
map.insert(TIMESTAMP_COLUMN, ts_column);
map.insert(1, source_column);
self.ensure_columns(map)
}
pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> {
let root = root.as_ref().to_path_buf();
let meta = root.join(CATALOG_METADATA);
Catalog::deserialize(&meta, &root)
}
pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> {
Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref()))
}
pub fn columns(&self) -> &ColumnMap {
&self.colmap
}
#[cfg(feature = "validate_append")]
fn validate_append(&self, data: &Append) -> bool {
let ts_len = data.ts.len();
data.data.iter().all(|(_col, fragment)| {
// check fragment length for dense blocks
if !fragment.is_sparse() {
if ts_len != fragment.len() {
error!("Dense append fragment has different length than ts");
return false;
}
} else {
if ts_len > fragment.len() {
error!("Sparse append fragment longer than ts");
return false;
}
if fragment.iter().any(|(idx, _)| idx >= ts_len) {
error!("Sparse append fragment has index greater than ts length");
return false;
}
}
true
})
}
pub fn append(&self, data: &Append) -> Result<usize> {
if data.is_empty() {
bail!("Provided Append contains no data");
}
#[cfg(feature = "validate_append")]
{
if !self.validate_append(&data) {
bail!("Provided Append is not consistent");
}
}
// dispatch to proper PartitionGroup
if let Some(pg) = self.groups.get(&data.source_id) {
pg.append(&self, &data)
} else {
bail!("No PartitionGroup found for source_id = {}", data.source_id);
}
}
pub fn scan(&self, scan: &Scan) -> Result<ScanResult> {
let all_groups = if scan.groups.is_some() {
None
} else {
Some(self.groups.keys().cloned().collect::<Vec<_>>())
};
if scan.groups.is_some() {
scan.groups.as_ref().unwrap()
} else {
all_groups.as_ref().unwrap()
}
.chunks(2)
.map(|group| {
group
.par_iter()
.filter_map(|pgid| self.groups.get(pgid))
.map(|pg| pg.scan(&self, &scan))
// todo: this would potentially be better with some short-circuiting combinator
// instead
// need to bench with collect_into()
.reduce(
|| Ok(ScanResult::merge_identity()),
|a, b| {
let mut a = a?;
let b = b?;
a.merge(b)?;
Ok(a)
},
)
})
.fold(Ok(ScanResult::merge_identity()), |a, b| {
let mut a = a?;
let b = b?;
a.merge(b)?;
Ok(a)
})
}
pub fn flush(&self) -> Result<()> {
// TODO: add dirty flag
let meta = self.data_root.join(CATALOG_METADATA);
for pg in self.groups.values() {
pg.flush()?
}
Catalog::serialize(self, &meta)
}
/// Extend internal column map without any sanitization checks.
///
/// This function uses `std::iter::Extend` internally,
/// so it allows redefinition of a column type.
/// Use this feature with great caution.
pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> {
self.colmap.extend(type_map);
Ok(())
}
/// Adds a column to the catalog. It verifies that catalog does not already contain:
/// a) column with the given id, or
/// b) column with the given name.
/// This function takes all-or-nothing approach:
/// either all columns are added, or no changes are applied.
pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> {
for (id, column) in column_map.iter() {
info!(
"Adding column {}:{:?} with id {}",
column.name, column.ty, id
);
if self.colmap.contains_key(id) {
bail!("Column Id already exists {}", *id);
}
if self.colmap.values().any(|col| col.name == column.name) {
bail!("Column Name already exists '{}'", column.name);
}
}
self.ensure_columns(column_map)
}
/// Extend internal index map without any sanitization checks.
///
/// This function uses `std::iter::Extend` internally,
/// so it allows redefinition of a index type.
/// Also, the index' support for a given column is not checked.
/// Use this feature with great caution.
pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> {
self.indexes.extend(&*index_map);
Ok(())
}
/// Adds index to the catalog. Verifies that catalog does not already contain:
/// a) index for a column with the given id, or
/// b) index for a column with the given name.
/// This function takes all-or-nothing approach:
/// either all indexes are added, or no changes are applied.
pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> {
for (id, index) in index_map.iter() {
let column = self
.colmap
.get(id)
.ok_or_else(|| err_msg(format!("column not found {}", id)))?;
info!(
"Adding index {:?} for column {}[{}]:{:?}",
index, column.name, id, column.ty
);
if self.indexes.contains_key(id) {
bail!("Index already exists {}", *id);
}
}
self.ensure_indexes(index_map)
}
/// Fetch the first non-occupied column index
///
/// todo: rethink this approach (max() every time)
pub fn next_id(&self) -> usize {
let default = 0;
*self.colmap.keys().max().unwrap_or(&default) + 1
}
/// Calculate an empty partition's capacity for given column set
pub(super) fn space_for_blocks<'iter>(
&self,
indices: impl Iterator<Item = &'iter ColumnId>,
) -> usize {
use crate::params::BLOCK_SIZE;
indices
.filter_map(|col_id| {
if let Some(column) = self.colmap.get(col_id) {
Some(BLOCK_SIZE / column.size_of())
} else {
None
}
})
.min()
// the default shouldn't ever happen, as there always should be ts block
// but in case it happens, this will return 0
// which in turn will cause new partition to be used
.unwrap_or_default()
}
pub(crate) fn ensure_group(
&mut self,
source_id: SourceId,
) -> Result<&mut PartitionGroup<'cat>> {
let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root);
Ok(self.groups.entry(source_id).or_insert_with(|| {
// this shouldn't fail in general
let root = PartitionGroupManager::new(data_root, source_id)
.with_context(|_| "Failed to create group manager")
.unwrap();
let pg = PartitionGroup::new(&root, source_id)
.with_context(|_| "Unable to create partition group")
.unwrap();
pg.flush().unwrap();
pg
}))
}
/// Add new partition group with given source id
pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()> {
let _ = self.ensure_group(source_id)?;
Ok(())
}
fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>>
where
P: AsRef<Path>,
I: IntoIterator<Item = SourceId>,
{
ids.into_iter()
.map(|source_id| {
let path = PartitionGroupManager::new(&root, source_id).with_context(|_| {
format!(
"Unable to obtain data directory for partition group {}",
source_id
)
})?;
let partition_group = PartitionGroup::with_data(path)
.with_context(|_| format!("Unable to read partition group {:?}", source_id))?;
Ok((source_id, partition_group))
})
.collect()
}
fn serialize<P: AsRef<Path>>(catalog: &Catalog<'cat>, meta: P) -> Result<()> {
let meta = meta.as_ref();
let group_metas = Vec::from_iter(catalog.groups.keys());
let data = (catalog, group_metas);
serialize!(file meta, &data)
.with_context(|_| "Failed to serialize catalog metadata")
.map_err(|e| e.into())
}
fn deserialize<P: AsRef<Path>, R: AsRef<Path>>(meta: P, root: R) -> Result<Catalog<'cat>> { | let meta = meta.as_ref();
if !meta.exists() { | random_line_split | |
catalog.rs | _METADATA);
if meta.exists() {
bail!("Catalog metadata already exists {:?}", meta);
}
let mut catalog = Catalog {
colmap: Default::default(),
groups: Default::default(),
indexes: Default::default(),
data_root: root,
};
catalog.ensure_default_columns()?;
Ok(catalog)
}
fn ensure_default_columns(&mut self) -> Result<()> {
let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp");
let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id");
let mut map = HashMap::new();
map.insert(TIMESTAMP_COLUMN, ts_column);
map.insert(1, source_column);
self.ensure_columns(map)
}
pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> {
let root = root.as_ref().to_path_buf();
let meta = root.join(CATALOG_METADATA);
Catalog::deserialize(&meta, &root)
}
pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> {
Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref()))
}
pub fn columns(&self) -> &ColumnMap {
&self.colmap
}
#[cfg(feature = "validate_append")]
fn validate_append(&self, data: &Append) -> bool {
let ts_len = data.ts.len();
data.data.iter().all(|(_col, fragment)| {
// check fragment length for dense blocks
if !fragment.is_sparse() {
if ts_len != fragment.len() {
error!("Dense append fragment has different length than ts");
return false;
}
} else {
if ts_len > fragment.len() {
error!("Sparse append fragment longer than ts");
return false;
}
if fragment.iter().any(|(idx, _)| idx >= ts_len) {
error!("Sparse append fragment has index greater than ts length");
return false;
}
}
true
})
}
pub fn append(&self, data: &Append) -> Result<usize> {
if data.is_empty() {
bail!("Provided Append contains no data");
}
#[cfg(feature = "validate_append")]
{
if !self.validate_append(&data) {
bail!("Provided Append is not consistent");
}
}
// dispatch to proper PartitionGroup
if let Some(pg) = self.groups.get(&data.source_id) {
pg.append(&self, &data)
} else {
bail!("No PartitionGroup found for source_id = {}", data.source_id);
}
}
pub fn scan(&self, scan: &Scan) -> Result<ScanResult> {
let all_groups = if scan.groups.is_some() {
None
} else {
Some(self.groups.keys().cloned().collect::<Vec<_>>())
};
if scan.groups.is_some() {
scan.groups.as_ref().unwrap()
} else {
all_groups.as_ref().unwrap()
}
.chunks(2)
.map(|group| {
group
.par_iter()
.filter_map(|pgid| self.groups.get(pgid))
.map(|pg| pg.scan(&self, &scan))
// todo: this would potentially be better with some short-circuiting combinator
// instead
// need to bench with collect_into()
.reduce(
|| Ok(ScanResult::merge_identity()),
|a, b| {
let mut a = a?;
let b = b?;
a.merge(b)?;
Ok(a)
},
)
})
.fold(Ok(ScanResult::merge_identity()), |a, b| {
let mut a = a?;
let b = b?;
a.merge(b)?;
Ok(a)
})
}
pub fn flush(&self) -> Result<()> {
// TODO: add dirty flag
let meta = self.data_root.join(CATALOG_METADATA);
for pg in self.groups.values() {
pg.flush()?
}
Catalog::serialize(self, &meta)
}
/// Extend internal column map without any sanitization checks.
///
/// This function uses `std::iter::Extend` internally,
/// so it allows redefinition of a column type.
/// Use this feature with great caution.
pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> {
self.colmap.extend(type_map);
Ok(())
}
/// Adds a column to the catalog. It verifies that catalog does not already contain:
/// a) column with the given id, or
/// b) column with the given name.
/// This function takes all-or-nothing approach:
/// either all columns are added, or no changes are applied.
pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> {
for (id, column) in column_map.iter() {
info!(
"Adding column {}:{:?} with id {}",
column.name, column.ty, id
);
if self.colmap.contains_key(id) {
bail!("Column Id already exists {}", *id);
}
if self.colmap.values().any(|col| col.name == column.name) {
bail!("Column Name already exists '{}'", column.name);
}
}
self.ensure_columns(column_map)
}
/// Extend internal index map without any sanitization checks.
///
/// This function uses `std::iter::Extend` internally,
/// so it allows redefinition of a index type.
/// Also, the index' support for a given column is not checked.
/// Use this feature with great caution.
pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> {
self.indexes.extend(&*index_map);
Ok(())
}
/// Adds index to the catalog. Verifies that catalog does not already contain:
/// a) index for a column with the given id, or
/// b) index for a column with the given name.
/// This function takes all-or-nothing approach:
/// either all indexes are added, or no changes are applied.
pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> {
for (id, index) in index_map.iter() {
let column = self
.colmap
.get(id)
.ok_or_else(|| err_msg(format!("column not found {}", id)))?;
info!(
"Adding index {:?} for column {}[{}]:{:?}",
index, column.name, id, column.ty
);
if self.indexes.contains_key(id) {
bail!("Index already exists {}", *id);
}
}
self.ensure_indexes(index_map)
}
/// Fetch the first non-occupied column index
///
/// todo: rethink this approach (max() every time)
pub fn next_id(&self) -> usize {
let default = 0;
*self.colmap.keys().max().unwrap_or(&default) + 1
}
/// Calculate an empty partition's capacity for given column set
pub(super) fn space_for_blocks<'iter>(
&self,
indices: impl Iterator<Item = &'iter ColumnId>,
) -> usize {
use crate::params::BLOCK_SIZE;
indices
.filter_map(|col_id| {
if let Some(column) = self.colmap.get(col_id) {
Some(BLOCK_SIZE / column.size_of())
} else {
None
}
})
.min()
// the default shouldn't ever happen, as there always should be ts block
// but in case it happens, this will return 0
// which in turn will cause new partition to be used
.unwrap_or_default()
}
pub(crate) fn ensure_group(
&mut self,
source_id: SourceId,
) -> Result<&mut PartitionGroup<'cat>> {
let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root);
Ok(self.groups.entry(source_id).or_insert_with(|| {
// this shouldn't fail in general
let root = PartitionGroupManager::new(data_root, source_id)
.with_context(|_| "Failed to create group manager")
.unwrap();
let pg = PartitionGroup::new(&root, source_id)
.with_context(|_| "Unable to create partition group")
.unwrap();
pg.flush().unwrap();
pg
}))
}
/// Add new partition group with given source id
pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()> |
fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>>
where
P: AsRef<Path>,
I: IntoIterator<Item = SourceId>,
{
ids.into_iter()
.map(|source_id| {
let path = PartitionGroupManager::new(&root, source_id).with_context(|_| {
format!(
"Unable to obtain data directory for partition group {}",
source_id
)
})?;
let partition_group = PartitionGroup::with_data(path)
.with_context(|_| format!("Unable to read partition group {:?}", source_id))?;
Ok((source_id, partition_group))
})
| {
let _ = self.ensure_group(source_id)?;
Ok(())
} | identifier_body |
catalog.rs | _METADATA);
if meta.exists() {
bail!("Catalog metadata already exists {:?}", meta);
}
let mut catalog = Catalog {
colmap: Default::default(),
groups: Default::default(),
indexes: Default::default(),
data_root: root,
};
catalog.ensure_default_columns()?;
Ok(catalog)
}
fn | (&mut self) -> Result<()> {
let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp");
let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id");
let mut map = HashMap::new();
map.insert(TIMESTAMP_COLUMN, ts_column);
map.insert(1, source_column);
self.ensure_columns(map)
}
pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> {
let root = root.as_ref().to_path_buf();
let meta = root.join(CATALOG_METADATA);
Catalog::deserialize(&meta, &root)
}
pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> {
Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref()))
}
pub fn columns(&self) -> &ColumnMap {
&self.colmap
}
#[cfg(feature = "validate_append")]
fn validate_append(&self, data: &Append) -> bool {
let ts_len = data.ts.len();
data.data.iter().all(|(_col, fragment)| {
// check fragment length for dense blocks
if !fragment.is_sparse() {
if ts_len != fragment.len() {
error!("Dense append fragment has different length than ts");
return false;
}
} else {
if ts_len > fragment.len() {
error!("Sparse append fragment longer than ts");
return false;
}
if fragment.iter().any(|(idx, _)| idx >= ts_len) {
error!("Sparse append fragment has index greater than ts length");
return false;
}
}
true
})
}
pub fn append(&self, data: &Append) -> Result<usize> {
if data.is_empty() {
bail!("Provided Append contains no data");
}
#[cfg(feature = "validate_append")]
{
if !self.validate_append(&data) {
bail!("Provided Append is not consistent");
}
}
// dispatch to proper PartitionGroup
if let Some(pg) = self.groups.get(&data.source_id) {
pg.append(&self, &data)
} else {
bail!("No PartitionGroup found for source_id = {}", data.source_id);
}
}
pub fn scan(&self, scan: &Scan) -> Result<ScanResult> {
let all_groups = if scan.groups.is_some() {
None
} else {
Some(self.groups.keys().cloned().collect::<Vec<_>>())
};
if scan.groups.is_some() {
scan.groups.as_ref().unwrap()
} else {
all_groups.as_ref().unwrap()
}
.chunks(2)
.map(|group| {
group
.par_iter()
.filter_map(|pgid| self.groups.get(pgid))
.map(|pg| pg.scan(&self, &scan))
// todo: this would potentially be better with some short-circuiting combinator
// instead
// need to bench with collect_into()
.reduce(
|| Ok(ScanResult::merge_identity()),
|a, b| {
let mut a = a?;
let b = b?;
a.merge(b)?;
Ok(a)
},
)
})
.fold(Ok(ScanResult::merge_identity()), |a, b| {
let mut a = a?;
let b = b?;
a.merge(b)?;
Ok(a)
})
}
pub fn flush(&self) -> Result<()> {
// TODO: add dirty flag
let meta = self.data_root.join(CATALOG_METADATA);
for pg in self.groups.values() {
pg.flush()?
}
Catalog::serialize(self, &meta)
}
/// Extend internal column map without any sanitization checks.
///
/// This function uses `std::iter::Extend` internally,
/// so it allows redefinition of a column type.
/// Use this feature with great caution.
pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> {
self.colmap.extend(type_map);
Ok(())
}
/// Adds a column to the catalog. It verifies that catalog does not already contain:
/// a) column with the given id, or
/// b) column with the given name.
/// This function takes all-or-nothing approach:
/// either all columns are added, or no changes are applied.
pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> {
for (id, column) in column_map.iter() {
info!(
"Adding column {}:{:?} with id {}",
column.name, column.ty, id
);
if self.colmap.contains_key(id) {
bail!("Column Id already exists {}", *id);
}
if self.colmap.values().any(|col| col.name == column.name) {
bail!("Column Name already exists '{}'", column.name);
}
}
self.ensure_columns(column_map)
}
/// Extend internal index map without any sanitization checks.
///
/// This function uses `std::iter::Extend` internally,
/// so it allows redefinition of a index type.
/// Also, the index' support for a given column is not checked.
/// Use this feature with great caution.
pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> {
self.indexes.extend(&*index_map);
Ok(())
}
/// Adds index to the catalog. Verifies that catalog does not already contain:
/// a) index for a column with the given id, or
/// b) index for a column with the given name.
/// This function takes all-or-nothing approach:
/// either all indexes are added, or no changes are applied.
pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> {
for (id, index) in index_map.iter() {
let column = self
.colmap
.get(id)
.ok_or_else(|| err_msg(format!("column not found {}", id)))?;
info!(
"Adding index {:?} for column {}[{}]:{:?}",
index, column.name, id, column.ty
);
if self.indexes.contains_key(id) {
bail!("Index already exists {}", *id);
}
}
self.ensure_indexes(index_map)
}
/// Fetch the first non-occupied column index
///
/// todo: rethink this approach (max() every time)
pub fn next_id(&self) -> usize {
let default = 0;
*self.colmap.keys().max().unwrap_or(&default) + 1
}
/// Calculate an empty partition's capacity for given column set
pub(super) fn space_for_blocks<'iter>(
&self,
indices: impl Iterator<Item = &'iter ColumnId>,
) -> usize {
use crate::params::BLOCK_SIZE;
indices
.filter_map(|col_id| {
if let Some(column) = self.colmap.get(col_id) {
Some(BLOCK_SIZE / column.size_of())
} else {
None
}
})
.min()
// the default shouldn't ever happen, as there always should be ts block
// but in case it happens, this will return 0
// which in turn will cause new partition to be used
.unwrap_or_default()
}
pub(crate) fn ensure_group(
&mut self,
source_id: SourceId,
) -> Result<&mut PartitionGroup<'cat>> {
let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root);
Ok(self.groups.entry(source_id).or_insert_with(|| {
// this shouldn't fail in general
let root = PartitionGroupManager::new(data_root, source_id)
.with_context(|_| "Failed to create group manager")
.unwrap();
let pg = PartitionGroup::new(&root, source_id)
.with_context(|_| "Unable to create partition group")
.unwrap();
pg.flush().unwrap();
pg
}))
}
/// Add new partition group with given source id
pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()> {
let _ = self.ensure_group(source_id)?;
Ok(())
}
fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>>
where
P: AsRef<Path>,
I: IntoIterator<Item = SourceId>,
{
ids.into_iter()
.map(|source_id| {
let path = PartitionGroupManager::new(&root, source_id).with_context(|_| {
format!(
"Unable to obtain data directory for partition group {}",
source_id
)
})?;
let partition_group = PartitionGroup::with_data(path)
.with_context(|_| format!("Unable to read partition group {:?}", source_id))?;
Ok((source_id, partition_group))
})
. | ensure_default_columns | identifier_name |
catalog.rs | _METADATA);
if meta.exists() {
bail!("Catalog metadata already exists {:?}", meta);
}
let mut catalog = Catalog {
colmap: Default::default(),
groups: Default::default(),
indexes: Default::default(),
data_root: root,
};
catalog.ensure_default_columns()?;
Ok(catalog)
}
fn ensure_default_columns(&mut self) -> Result<()> {
let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp");
let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id");
let mut map = HashMap::new();
map.insert(TIMESTAMP_COLUMN, ts_column);
map.insert(1, source_column);
self.ensure_columns(map)
}
pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> {
let root = root.as_ref().to_path_buf();
let meta = root.join(CATALOG_METADATA);
Catalog::deserialize(&meta, &root)
}
pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> {
Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref()))
}
pub fn columns(&self) -> &ColumnMap {
&self.colmap
}
#[cfg(feature = "validate_append")]
fn validate_append(&self, data: &Append) -> bool {
let ts_len = data.ts.len();
data.data.iter().all(|(_col, fragment)| {
// check fragment length for dense blocks
if !fragment.is_sparse() {
if ts_len != fragment.len() {
error!("Dense append fragment has different length than ts");
return false;
}
} else {
if ts_len > fragment.len() {
error!("Sparse append fragment longer than ts");
return false;
}
if fragment.iter().any(|(idx, _)| idx >= ts_len) {
error!("Sparse append fragment has index greater than ts length");
return false;
}
}
true
})
}
pub fn append(&self, data: &Append) -> Result<usize> {
if data.is_empty() {
bail!("Provided Append contains no data");
}
#[cfg(feature = "validate_append")]
{
if !self.validate_append(&data) {
bail!("Provided Append is not consistent");
}
}
// dispatch to proper PartitionGroup
if let Some(pg) = self.groups.get(&data.source_id) {
pg.append(&self, &data)
} else {
bail!("No PartitionGroup found for source_id = {}", data.source_id);
}
}
pub fn scan(&self, scan: &Scan) -> Result<ScanResult> {
let all_groups = if scan.groups.is_some() {
None
} else {
Some(self.groups.keys().cloned().collect::<Vec<_>>())
};
if scan.groups.is_some() {
scan.groups.as_ref().unwrap()
} else {
all_groups.as_ref().unwrap()
}
.chunks(2)
.map(|group| {
group
.par_iter()
.filter_map(|pgid| self.groups.get(pgid))
.map(|pg| pg.scan(&self, &scan))
// todo: this would potentially be better with some short-circuiting combinator
// instead
// need to bench with collect_into()
.reduce(
|| Ok(ScanResult::merge_identity()),
|a, b| {
let mut a = a?;
let b = b?;
a.merge(b)?;
Ok(a)
},
)
})
.fold(Ok(ScanResult::merge_identity()), |a, b| {
let mut a = a?;
let b = b?;
a.merge(b)?;
Ok(a)
})
}
pub fn flush(&self) -> Result<()> {
// TODO: add dirty flag
let meta = self.data_root.join(CATALOG_METADATA);
for pg in self.groups.values() {
pg.flush()?
}
Catalog::serialize(self, &meta)
}
/// Extend internal column map without any sanitization checks.
///
/// This function uses `std::iter::Extend` internally,
/// so it allows redefinition of a column type.
/// Use this feature with great caution.
pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> {
self.colmap.extend(type_map);
Ok(())
}
/// Adds a column to the catalog. It verifies that catalog does not already contain:
/// a) column with the given id, or
/// b) column with the given name.
/// This function takes all-or-nothing approach:
/// either all columns are added, or no changes are applied.
pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> {
for (id, column) in column_map.iter() {
info!(
"Adding column {}:{:?} with id {}",
column.name, column.ty, id
);
if self.colmap.contains_key(id) {
bail!("Column Id already exists {}", *id);
}
if self.colmap.values().any(|col| col.name == column.name) |
}
self.ensure_columns(column_map)
}
/// Extend internal index map without any sanitization checks.
///
/// This function uses `std::iter::Extend` internally,
/// so it allows redefinition of a index type.
/// Also, the index' support for a given column is not checked.
/// Use this feature with great caution.
pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> {
self.indexes.extend(&*index_map);
Ok(())
}
/// Adds index to the catalog. Verifies that catalog does not already contain:
/// a) index for a column with the given id, or
/// b) index for a column with the given name.
/// This function takes all-or-nothing approach:
/// either all indexes are added, or no changes are applied.
pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> {
for (id, index) in index_map.iter() {
let column = self
.colmap
.get(id)
.ok_or_else(|| err_msg(format!("column not found {}", id)))?;
info!(
"Adding index {:?} for column {}[{}]:{:?}",
index, column.name, id, column.ty
);
if self.indexes.contains_key(id) {
bail!("Index already exists {}", *id);
}
}
self.ensure_indexes(index_map)
}
/// Fetch the first non-occupied column index
///
/// todo: rethink this approach (max() every time)
pub fn next_id(&self) -> usize {
let default = 0;
*self.colmap.keys().max().unwrap_or(&default) + 1
}
/// Calculate an empty partition's capacity for given column set
pub(super) fn space_for_blocks<'iter>(
&self,
indices: impl Iterator<Item = &'iter ColumnId>,
) -> usize {
use crate::params::BLOCK_SIZE;
indices
.filter_map(|col_id| {
if let Some(column) = self.colmap.get(col_id) {
Some(BLOCK_SIZE / column.size_of())
} else {
None
}
})
.min()
// the default shouldn't ever happen, as there always should be ts block
// but in case it happens, this will return 0
// which in turn will cause new partition to be used
.unwrap_or_default()
}
pub(crate) fn ensure_group(
&mut self,
source_id: SourceId,
) -> Result<&mut PartitionGroup<'cat>> {
let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root);
Ok(self.groups.entry(source_id).or_insert_with(|| {
// this shouldn't fail in general
let root = PartitionGroupManager::new(data_root, source_id)
.with_context(|_| "Failed to create group manager")
.unwrap();
let pg = PartitionGroup::new(&root, source_id)
.with_context(|_| "Unable to create partition group")
.unwrap();
pg.flush().unwrap();
pg
}))
}
/// Add new partition group with given source id
pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()> {
let _ = self.ensure_group(source_id)?;
Ok(())
}
fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>>
where
P: AsRef<Path>,
I: IntoIterator<Item = SourceId>,
{
ids.into_iter()
.map(|source_id| {
let path = PartitionGroupManager::new(&root, source_id).with_context(|_| {
format!(
"Unable to obtain data directory for partition group {}",
source_id
)
})?;
let partition_group = PartitionGroup::with_data(path)
.with_context(|_| format!("Unable to read partition group {:?}", source_id))?;
Ok((source_id, partition_group))
})
| {
bail!("Column Name already exists '{}'", column.name);
} | conditional_block |
ViewTrajectories.py | "GREEN", "SALMON", "VIOLET"]
CurrentColor = [0]
def GetColor():
color = colorlist[CurrentColor[0]]
CurrentColor[0] += 1
if CurrentColor[0] > len(colorlist):
CurrentColor[0] = 0
return color
def EVT_NEW_FRAME_EVENT( window, function ):
window.Connect( -1, -1, NEW_FRAME_EVENT, function )
class FrameEvent(wx.PyEvent):
def __init__(self):
wx.PyEvent.__init__(self)
self.SetEventType(NEW_FRAME_EVENT)
class DrawFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
## Set up the MenuBar
MenuBar = wx.MenuBar()
file_menu = wx.Menu()
file_menu.Append(ID_OPEN, "&Open map","Open a bna file")
wx.EVT_MENU(self, ID_OPEN, self.Open_bna)
file_menu.AppendSeparator()
file_menu.Append(ID_RUN_MOVIE, "Run &Movie","Run a movie of the trajectory")
wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie)
file_menu.Append(ID_RUNONTOP_MOVIE, "Run On Top &Movie","Run a movie of the trajectory on top of existing")
wx.EVT_MENU(self, ID_RUNONTOP_MOVIE, self.RunOnTop_Movie)
file_menu.Append(ID_RERUN_MOVIE, "Re Run &Movie","Re-Run the existing movie of the trajectory")
wx.EVT_MENU(self, ID_RERUN_MOVIE, self.ReRun_Movie)
file_menu.AppendSeparator()
file_menu.Append(ID_EXIT_MENU, "E&xit","Terminate the program")
wx.EVT_MENU(self, ID_EXIT_MENU, self.OnQuit)
wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie)
MenuBar.Append(file_menu, "&File")
view_menu = wx.Menu()
view_menu.Append(ID_ZOOM_TO_FIT_MENU, "Zoom to &Fit","Zoom to fit the window")
wx.EVT_MENU(self, ID_ZOOM_TO_FIT_MENU,self.ZoomToFit)
view_menu.Append(ID_SET_FRAMERATE_MENU, "Set Frame &Rate","Set the Frame Rate for Movie playback")
wx.EVT_MENU(self, ID_SET_FRAMERATE_MENU,self.SetFrameRate)
MenuBar.Append(view_menu, "&View")
help_menu = wx.Menu()
help_menu.Append(ID_ABOUT_MENU, "&About",
"More information About this program")
wx.EVT_MENU(self, ID_ABOUT_MENU, self.OnAbout)
MenuBar.Append(help_menu, "&Help")
self.SetMenuBar(MenuBar)
self.CreateStatusBar()
self.SetStatusText("")
wx.EVT_CLOSE(self, self.OnCloseWindow)
# Add the Canvas
self.NavCanvas = NavCanvas.NavCanvas(self,-1,(500,500),
ProjectionFun = 'FlatEarth',
Debug = 0,
#BackgroundColor = "DARK SLATE BLUE")
BackgroundColor = "WHITE",
#UseBackground = 1,
).Canvas
self.Canvas = NavCanvas.Canvas
self.Canvas.NumBetweenBlits = 20
tb = self.NavCanvas.ToolBar
tb.AddSeparator()
RewindButton = wx.Button(tb, -1, "Rewind")
tb.AddControl(RewindButton)
wx.EVT_BUTTON(self, RewindButton.GetId() , self.Rewind)
StopButton = wx.Button(tb, -1, "Stop")
tb.AddControl(StopButton)
wx.EVT_BUTTON(self, StopButton.GetId() , self.Stop)
PlayButton = wx.Button(tb, -1, "Play")
tb.AddControl(PlayButton)
wx.EVT_BUTTON(self, PlayButton.GetId() ,self.Play)
tb.Realize()
self.Show(True)
self.LE_movie = None
self.LEsObjects = []
self.TimeStep = 0
self.FrameDelay = 10 # milliseconds
self.FileDialog = wx.FileDialog(self, "Pick a file",".","","*",wx.OPEN)
self.Timer = wx.PyTimer(self.ShowFrame)
return None
def Open_bna(self, event):
dlg = self.FileDialog
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.LoadMap(filename)
def LoadMap(self, filename):
self.Canvas.Clear()
try:
shorelines = hazmat.read_bna(filename,polytype = "PolygonSet")
for shoreline in shorelines:
self.Canvas.AddPolygon(shoreline,
LineWidth = 1,
LineColor = "Black",
FillColor = "Brown",
FillStyle = 'Solid',
Foreground = 0)
self.Canvas.ZoomToBB()
except:
dlg = wx.MessageDialog(self, 'There was something wrong with the selected map file',
'View Trajectories', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def Load_Movie(self, event):
import glob
dlg = self.FileDialog
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
(self.LE_movie,(NumTimesteps,NumLEs),HeaderData,flags) = TAP_mod.ReadTrajectory(filename)
wx.GetApp().Yield()
return True
else:
return None
def Run_Movie(self, event):
if self.Load_Movie(None):
if self.LEsObjects:
self.Canvas.RemoveObjects(self.LEsObjects)
self.LEsObjects = []
self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = "Black", Diameter = 1.5,Foreground = 1))
CurrentColor[0] = 1
self.ReRun_Movie(None)
def RunOnTop_Movie(self, event):
if self.Load_Movie(None):
for object in self.LEsObjects:
object.PutInBackground()
self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = GetColor(), Diameter = 1.5,Foreground = 1) )
self.ReRun_Movie(None)
def ReRun_Movie(self, event):
if not self.LE_movie:
self.Run_Movie(None)
else:
self.Play(None)
## def UpdateThread(self):
## try:
## while hasattr(self, 'event') and not self.event.isSet():
## wx.PostEvent(self, FrameEvent())
## self.event.wait(self.FrameDelay)
## except wx.PyDeadObjectError: # BUG: we were destroyed
## return
def Running(self):
"""Returns true if the animation is running"""
return self.Timer.IsRunning()
def Play(self,event):
"""Start the animation"""
if not self.Running():
if self.LE_movie:
#self.event.clear()
#thread = threading.Thread(target = self.UpdateThread)
#thread.start()
self.Timer.Start(self.FrameDelay)
else:
self.Run_Movie(None)
def Stop(self,event):
self.Timer.Stop()
def ShowFrame(self):
if self.TimeStep < len(self.LE_movie):
self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie)))
# this sets the data for the next frame
self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep])
self.Canvas.Draw()
self.TimeStep += 1
wx.GetApp().Yield(True)
else:
self.Timer.Stop()
def Rewind(self,event):
self.TimeStep = 0
if self.LE_movie:
self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep])
self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie)))
self.Canvas.Draw()
def OnAbout(self, event):
dlg = wx.MessageDialog(self, "This is a small program to demonstrate\n"
"the use of the FloatCanvas\n",
"About Me", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def ZoomToFit(self,event):
self.Canvas.ZoomToBB()
def Clear(self,event = None):
self.Canvas.Clear()
self.Canvas.Draw()
def OnQuit(self,event):
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
def RunMovie(self,event = None):
import RandomArray
start = clock()
shift = RandomArray.randint(0,0,(2,))
NumFrames = 50
for i in range(NumFrames):
points = self.LEs.Points
shift = RandomArray.randint(-5,5,(2,))
points += shift
self.LEs.SetPoints(points)
self.Canvas.Draw()
print "running the movie took %f seconds to disply %i frames"%((clock() - start),NumFrames)
def SetFrameRate(self,event):
dlg = wx.TextEntryDialog(self,
'Please set the time between frames in milliseconds',
'ViewTrajectories',
"%i"%self.FrameDelay)
dlg.SetValue("%i"%self.FrameDelay)
if dlg.ShowModal() == wx.ID_OK:
try:
self.FrameDelay = int(dlg.GetValue())
except:
pass | dlg.Destroy()
class TrajectoryViewer(wx.App):
""" | random_line_split | |
ViewTrajectories.py | IT_MENU = wx.NewId()
ID_DRAWTEST_MENU = wx.NewId()
ID_DRAWMAP_MENU = wx.NewId()
ID_CLEAR_MENU = wx.NewId()
ID_SET_FRAMERATE_MENU = wx.NewId()
ID_OPEN = wx.NewId()
ID_RUN_MOVIE = wx.NewId()
ID_RUNONTOP_MOVIE = wx.NewId()
ID_RERUN_MOVIE = wx.NewId()
ID_PAUSE_BUTTON = wx.NewId()
colorlist = ["BLACK", "RED", "CYAN", "GREEN", "SALMON", "VIOLET"]
CurrentColor = [0]
def GetColor():
color = colorlist[CurrentColor[0]]
CurrentColor[0] += 1
if CurrentColor[0] > len(colorlist):
CurrentColor[0] = 0
return color
def EVT_NEW_FRAME_EVENT( window, function ):
window.Connect( -1, -1, NEW_FRAME_EVENT, function )
class FrameEvent(wx.PyEvent):
def __init__(self):
wx.PyEvent.__init__(self)
self.SetEventType(NEW_FRAME_EVENT)
class DrawFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
## Set up the MenuBar
MenuBar = wx.MenuBar()
file_menu = wx.Menu()
file_menu.Append(ID_OPEN, "&Open map","Open a bna file")
wx.EVT_MENU(self, ID_OPEN, self.Open_bna)
file_menu.AppendSeparator()
file_menu.Append(ID_RUN_MOVIE, "Run &Movie","Run a movie of the trajectory")
wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie)
file_menu.Append(ID_RUNONTOP_MOVIE, "Run On Top &Movie","Run a movie of the trajectory on top of existing")
wx.EVT_MENU(self, ID_RUNONTOP_MOVIE, self.RunOnTop_Movie)
file_menu.Append(ID_RERUN_MOVIE, "Re Run &Movie","Re-Run the existing movie of the trajectory")
wx.EVT_MENU(self, ID_RERUN_MOVIE, self.ReRun_Movie)
file_menu.AppendSeparator()
file_menu.Append(ID_EXIT_MENU, "E&xit","Terminate the program")
wx.EVT_MENU(self, ID_EXIT_MENU, self.OnQuit)
wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie)
MenuBar.Append(file_menu, "&File")
view_menu = wx.Menu()
view_menu.Append(ID_ZOOM_TO_FIT_MENU, "Zoom to &Fit","Zoom to fit the window")
wx.EVT_MENU(self, ID_ZOOM_TO_FIT_MENU,self.ZoomToFit)
view_menu.Append(ID_SET_FRAMERATE_MENU, "Set Frame &Rate","Set the Frame Rate for Movie playback")
wx.EVT_MENU(self, ID_SET_FRAMERATE_MENU,self.SetFrameRate)
MenuBar.Append(view_menu, "&View")
help_menu = wx.Menu()
help_menu.Append(ID_ABOUT_MENU, "&About",
"More information About this program")
wx.EVT_MENU(self, ID_ABOUT_MENU, self.OnAbout)
MenuBar.Append(help_menu, "&Help")
self.SetMenuBar(MenuBar)
self.CreateStatusBar()
self.SetStatusText("")
wx.EVT_CLOSE(self, self.OnCloseWindow)
# Add the Canvas
self.NavCanvas = NavCanvas.NavCanvas(self,-1,(500,500),
ProjectionFun = 'FlatEarth',
Debug = 0,
#BackgroundColor = "DARK SLATE BLUE")
BackgroundColor = "WHITE",
#UseBackground = 1,
).Canvas
self.Canvas = NavCanvas.Canvas
self.Canvas.NumBetweenBlits = 20
tb = self.NavCanvas.ToolBar
tb.AddSeparator()
RewindButton = wx.Button(tb, -1, "Rewind")
tb.AddControl(RewindButton)
wx.EVT_BUTTON(self, RewindButton.GetId() , self.Rewind)
StopButton = wx.Button(tb, -1, "Stop")
tb.AddControl(StopButton)
wx.EVT_BUTTON(self, StopButton.GetId() , self.Stop)
PlayButton = wx.Button(tb, -1, "Play")
tb.AddControl(PlayButton)
wx.EVT_BUTTON(self, PlayButton.GetId() ,self.Play)
tb.Realize()
self.Show(True)
self.LE_movie = None
self.LEsObjects = []
self.TimeStep = 0
self.FrameDelay = 10 # milliseconds
self.FileDialog = wx.FileDialog(self, "Pick a file",".","","*",wx.OPEN)
self.Timer = wx.PyTimer(self.ShowFrame)
return None
def Open_bna(self, event):
dlg = self.FileDialog
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.LoadMap(filename)
def LoadMap(self, filename):
self.Canvas.Clear()
try:
shorelines = hazmat.read_bna(filename,polytype = "PolygonSet")
for shoreline in shorelines:
self.Canvas.AddPolygon(shoreline,
LineWidth = 1,
LineColor = "Black",
FillColor = "Brown",
FillStyle = 'Solid',
Foreground = 0)
self.Canvas.ZoomToBB()
except:
dlg = wx.MessageDialog(self, 'There was something wrong with the selected map file',
'View Trajectories', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def Load_Movie(self, event):
import glob
dlg = self.FileDialog
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
(self.LE_movie,(NumTimesteps,NumLEs),HeaderData,flags) = TAP_mod.ReadTrajectory(filename)
wx.GetApp().Yield()
return True
else:
return None
def Run_Movie(self, event):
if self.Load_Movie(None):
if self.LEsObjects:
self.Canvas.RemoveObjects(self.LEsObjects)
self.LEsObjects = []
self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = "Black", Diameter = 1.5,Foreground = 1))
CurrentColor[0] = 1
self.ReRun_Movie(None)
def RunOnTop_Movie(self, event):
if self.Load_Movie(None):
for object in self.LEsObjects:
object.PutInBackground()
self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = GetColor(), Diameter = 1.5,Foreground = 1) )
self.ReRun_Movie(None)
def ReRun_Movie(self, event):
if not self.LE_movie:
self.Run_Movie(None)
else:
self.Play(None)
## def UpdateThread(self):
## try:
## while hasattr(self, 'event') and not self.event.isSet():
## wx.PostEvent(self, FrameEvent())
## self.event.wait(self.FrameDelay)
## except wx.PyDeadObjectError: # BUG: we were destroyed
## return
def Running(self):
"""Returns true if the animation is running"""
return self.Timer.IsRunning()
def Play(self,event):
"""Start the animation"""
if not self.Running():
if self.LE_movie:
#self.event.clear()
#thread = threading.Thread(target = self.UpdateThread)
#thread.start()
self.Timer.Start(self.FrameDelay)
else:
self.Run_Movie(None)
def Stop(self,event):
self.Timer.Stop()
def ShowFrame(self):
if self.TimeStep < len(self.LE_movie):
self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie)))
# this sets the data for the next frame
self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep])
self.Canvas.Draw()
self.TimeStep += 1
wx.GetApp().Yield(True)
else:
self.Timer.Stop()
def Rewind(self,event):
self.TimeStep = 0
if self.LE_movie:
self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep])
self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie)))
self.Canvas.Draw()
def OnAbout(self, event):
dlg = wx.MessageDialog(self, "This is a small program to demonstrate\n"
"the use of the FloatCanvas\n",
"About Me", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def ZoomToFit(self,event):
self.Canvas.ZoomToBB()
def Clear(self,event = None):
self.Canvas.Clear()
self.Canvas.Draw()
def | (self,event):
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
def RunMovie(self,event = None):
import RandomArray
start = clock()
shift = RandomArray.randint(0,0,(2,))
NumFrames = 50
for i in range(NumFrames):
points = self.LEs.Points
shift = RandomArray.randint(-5,5,(2,))
points += shift
self.LEs.SetPoints(points)
self.Canvas.Draw()
print "running the movie took %f seconds to disply % | OnQuit | identifier_name |
ViewTrajectories.py | IT_MENU = wx.NewId()
ID_DRAWTEST_MENU = wx.NewId()
ID_DRAWMAP_MENU = wx.NewId()
ID_CLEAR_MENU = wx.NewId()
ID_SET_FRAMERATE_MENU = wx.NewId()
ID_OPEN = wx.NewId()
ID_RUN_MOVIE = wx.NewId()
ID_RUNONTOP_MOVIE = wx.NewId()
ID_RERUN_MOVIE = wx.NewId()
ID_PAUSE_BUTTON = wx.NewId()
colorlist = ["BLACK", "RED", "CYAN", "GREEN", "SALMON", "VIOLET"]
CurrentColor = [0]
def GetColor():
color = colorlist[CurrentColor[0]]
CurrentColor[0] += 1
if CurrentColor[0] > len(colorlist):
CurrentColor[0] = 0
return color
def EVT_NEW_FRAME_EVENT( window, function ):
window.Connect( -1, -1, NEW_FRAME_EVENT, function )
class FrameEvent(wx.PyEvent):
def __init__(self):
wx.PyEvent.__init__(self)
self.SetEventType(NEW_FRAME_EVENT)
class DrawFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
## Set up the MenuBar
MenuBar = wx.MenuBar()
file_menu = wx.Menu()
file_menu.Append(ID_OPEN, "&Open map","Open a bna file")
wx.EVT_MENU(self, ID_OPEN, self.Open_bna)
file_menu.AppendSeparator()
file_menu.Append(ID_RUN_MOVIE, "Run &Movie","Run a movie of the trajectory")
wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie)
file_menu.Append(ID_RUNONTOP_MOVIE, "Run On Top &Movie","Run a movie of the trajectory on top of existing")
wx.EVT_MENU(self, ID_RUNONTOP_MOVIE, self.RunOnTop_Movie)
file_menu.Append(ID_RERUN_MOVIE, "Re Run &Movie","Re-Run the existing movie of the trajectory")
wx.EVT_MENU(self, ID_RERUN_MOVIE, self.ReRun_Movie)
file_menu.AppendSeparator()
file_menu.Append(ID_EXIT_MENU, "E&xit","Terminate the program")
wx.EVT_MENU(self, ID_EXIT_MENU, self.OnQuit)
wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie)
MenuBar.Append(file_menu, "&File")
view_menu = wx.Menu()
view_menu.Append(ID_ZOOM_TO_FIT_MENU, "Zoom to &Fit","Zoom to fit the window")
wx.EVT_MENU(self, ID_ZOOM_TO_FIT_MENU,self.ZoomToFit)
view_menu.Append(ID_SET_FRAMERATE_MENU, "Set Frame &Rate","Set the Frame Rate for Movie playback")
wx.EVT_MENU(self, ID_SET_FRAMERATE_MENU,self.SetFrameRate)
MenuBar.Append(view_menu, "&View")
help_menu = wx.Menu()
help_menu.Append(ID_ABOUT_MENU, "&About",
"More information About this program")
wx.EVT_MENU(self, ID_ABOUT_MENU, self.OnAbout)
MenuBar.Append(help_menu, "&Help")
self.SetMenuBar(MenuBar)
self.CreateStatusBar()
self.SetStatusText("")
wx.EVT_CLOSE(self, self.OnCloseWindow)
# Add the Canvas
self.NavCanvas = NavCanvas.NavCanvas(self,-1,(500,500),
ProjectionFun = 'FlatEarth',
Debug = 0,
#BackgroundColor = "DARK SLATE BLUE")
BackgroundColor = "WHITE",
#UseBackground = 1,
).Canvas
self.Canvas = NavCanvas.Canvas
self.Canvas.NumBetweenBlits = 20
tb = self.NavCanvas.ToolBar
tb.AddSeparator()
RewindButton = wx.Button(tb, -1, "Rewind")
tb.AddControl(RewindButton)
wx.EVT_BUTTON(self, RewindButton.GetId() , self.Rewind)
StopButton = wx.Button(tb, -1, "Stop")
tb.AddControl(StopButton)
wx.EVT_BUTTON(self, StopButton.GetId() , self.Stop)
PlayButton = wx.Button(tb, -1, "Play")
tb.AddControl(PlayButton)
wx.EVT_BUTTON(self, PlayButton.GetId() ,self.Play)
tb.Realize()
self.Show(True)
self.LE_movie = None
self.LEsObjects = []
self.TimeStep = 0
self.FrameDelay = 10 # milliseconds
self.FileDialog = wx.FileDialog(self, "Pick a file",".","","*",wx.OPEN)
self.Timer = wx.PyTimer(self.ShowFrame)
return None
def Open_bna(self, event):
dlg = self.FileDialog
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.LoadMap(filename)
def LoadMap(self, filename):
self.Canvas.Clear()
try:
shorelines = hazmat.read_bna(filename,polytype = "PolygonSet")
for shoreline in shorelines:
self.Canvas.AddPolygon(shoreline,
LineWidth = 1,
LineColor = "Black",
FillColor = "Brown",
FillStyle = 'Solid',
Foreground = 0)
self.Canvas.ZoomToBB()
except:
dlg = wx.MessageDialog(self, 'There was something wrong with the selected map file',
'View Trajectories', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def Load_Movie(self, event):
import glob
dlg = self.FileDialog
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
(self.LE_movie,(NumTimesteps,NumLEs),HeaderData,flags) = TAP_mod.ReadTrajectory(filename)
wx.GetApp().Yield()
return True
else:
return None
def Run_Movie(self, event):
if self.Load_Movie(None):
if self.LEsObjects:
self.Canvas.RemoveObjects(self.LEsObjects)
self.LEsObjects = []
self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = "Black", Diameter = 1.5,Foreground = 1))
CurrentColor[0] = 1
self.ReRun_Movie(None)
def RunOnTop_Movie(self, event):
if self.Load_Movie(None):
for object in self.LEsObjects:
object.PutInBackground()
self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = GetColor(), Diameter = 1.5,Foreground = 1) )
self.ReRun_Movie(None)
def ReRun_Movie(self, event):
if not self.LE_movie:
self.Run_Movie(None)
else:
self.Play(None)
## def UpdateThread(self):
## try:
## while hasattr(self, 'event') and not self.event.isSet():
## wx.PostEvent(self, FrameEvent())
## self.event.wait(self.FrameDelay)
## except wx.PyDeadObjectError: # BUG: we were destroyed
## return
def Running(self):
"""Returns true if the animation is running"""
return self.Timer.IsRunning()
def Play(self,event):
"""Start the animation"""
if not self.Running():
if self.LE_movie:
#self.event.clear()
#thread = threading.Thread(target = self.UpdateThread)
#thread.start()
self.Timer.Start(self.FrameDelay)
else:
self.Run_Movie(None)
def Stop(self,event):
self.Timer.Stop()
def ShowFrame(self):
if self.TimeStep < len(self.LE_movie):
self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie)))
# this sets the data for the next frame
self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep])
self.Canvas.Draw()
self.TimeStep += 1
wx.GetApp().Yield(True)
else:
self.Timer.Stop()
def Rewind(self,event):
self.TimeStep = 0
if self.LE_movie:
self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep])
self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie)))
self.Canvas.Draw()
def OnAbout(self, event):
dlg = wx.MessageDialog(self, "This is a small program to demonstrate\n"
"the use of the FloatCanvas\n",
"About Me", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def ZoomToFit(self,event):
self.Canvas.ZoomToBB()
def Clear(self,event = None):
self.Canvas.Clear()
self.Canvas.Draw()
def OnQuit(self,event):
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
def RunMovie(self,event = None):
import RandomArray
start = clock()
shift = RandomArray.randint(0,0,(2,))
NumFrames = 50
for i in range(NumFrames):
|
print "running the movie took %f seconds to disply % | points = self.LEs.Points
shift = RandomArray.randint(-5,5,(2,))
points += shift
self.LEs.SetPoints(points)
self.Canvas.Draw() | conditional_block |
ViewTrajectories.py | _FRAME_EVENT( window, function ):
window.Connect( -1, -1, NEW_FRAME_EVENT, function )
class FrameEvent(wx.PyEvent):
def __init__(self):
wx.PyEvent.__init__(self)
self.SetEventType(NEW_FRAME_EVENT)
class DrawFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
## Set up the MenuBar
MenuBar = wx.MenuBar()
file_menu = wx.Menu()
file_menu.Append(ID_OPEN, "&Open map","Open a bna file")
wx.EVT_MENU(self, ID_OPEN, self.Open_bna)
file_menu.AppendSeparator()
file_menu.Append(ID_RUN_MOVIE, "Run &Movie","Run a movie of the trajectory")
wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie)
file_menu.Append(ID_RUNONTOP_MOVIE, "Run On Top &Movie","Run a movie of the trajectory on top of existing")
wx.EVT_MENU(self, ID_RUNONTOP_MOVIE, self.RunOnTop_Movie)
file_menu.Append(ID_RERUN_MOVIE, "Re Run &Movie","Re-Run the existing movie of the trajectory")
wx.EVT_MENU(self, ID_RERUN_MOVIE, self.ReRun_Movie)
file_menu.AppendSeparator()
file_menu.Append(ID_EXIT_MENU, "E&xit","Terminate the program")
wx.EVT_MENU(self, ID_EXIT_MENU, self.OnQuit)
wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie)
MenuBar.Append(file_menu, "&File")
view_menu = wx.Menu()
view_menu.Append(ID_ZOOM_TO_FIT_MENU, "Zoom to &Fit","Zoom to fit the window")
wx.EVT_MENU(self, ID_ZOOM_TO_FIT_MENU,self.ZoomToFit)
view_menu.Append(ID_SET_FRAMERATE_MENU, "Set Frame &Rate","Set the Frame Rate for Movie playback")
wx.EVT_MENU(self, ID_SET_FRAMERATE_MENU,self.SetFrameRate)
MenuBar.Append(view_menu, "&View")
help_menu = wx.Menu()
help_menu.Append(ID_ABOUT_MENU, "&About",
"More information About this program")
wx.EVT_MENU(self, ID_ABOUT_MENU, self.OnAbout)
MenuBar.Append(help_menu, "&Help")
self.SetMenuBar(MenuBar)
self.CreateStatusBar()
self.SetStatusText("")
wx.EVT_CLOSE(self, self.OnCloseWindow)
# Add the Canvas
self.NavCanvas = NavCanvas.NavCanvas(self,-1,(500,500),
ProjectionFun = 'FlatEarth',
Debug = 0,
#BackgroundColor = "DARK SLATE BLUE")
BackgroundColor = "WHITE",
#UseBackground = 1,
).Canvas
self.Canvas = NavCanvas.Canvas
self.Canvas.NumBetweenBlits = 20
tb = self.NavCanvas.ToolBar
tb.AddSeparator()
RewindButton = wx.Button(tb, -1, "Rewind")
tb.AddControl(RewindButton)
wx.EVT_BUTTON(self, RewindButton.GetId() , self.Rewind)
StopButton = wx.Button(tb, -1, "Stop")
tb.AddControl(StopButton)
wx.EVT_BUTTON(self, StopButton.GetId() , self.Stop)
PlayButton = wx.Button(tb, -1, "Play")
tb.AddControl(PlayButton)
wx.EVT_BUTTON(self, PlayButton.GetId() ,self.Play)
tb.Realize()
self.Show(True)
self.LE_movie = None
self.LEsObjects = []
self.TimeStep = 0
self.FrameDelay = 10 # milliseconds
self.FileDialog = wx.FileDialog(self, "Pick a file",".","","*",wx.OPEN)
self.Timer = wx.PyTimer(self.ShowFrame)
return None
def Open_bna(self, event):
dlg = self.FileDialog
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.LoadMap(filename)
def LoadMap(self, filename):
self.Canvas.Clear()
try:
shorelines = hazmat.read_bna(filename,polytype = "PolygonSet")
for shoreline in shorelines:
self.Canvas.AddPolygon(shoreline,
LineWidth = 1,
LineColor = "Black",
FillColor = "Brown",
FillStyle = 'Solid',
Foreground = 0)
self.Canvas.ZoomToBB()
except:
dlg = wx.MessageDialog(self, 'There was something wrong with the selected map file',
'View Trajectories', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def Load_Movie(self, event):
import glob
dlg = self.FileDialog
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
(self.LE_movie,(NumTimesteps,NumLEs),HeaderData,flags) = TAP_mod.ReadTrajectory(filename)
wx.GetApp().Yield()
return True
else:
return None
def Run_Movie(self, event):
if self.Load_Movie(None):
if self.LEsObjects:
self.Canvas.RemoveObjects(self.LEsObjects)
self.LEsObjects = []
self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = "Black", Diameter = 1.5,Foreground = 1))
CurrentColor[0] = 1
self.ReRun_Movie(None)
def RunOnTop_Movie(self, event):
if self.Load_Movie(None):
for object in self.LEsObjects:
object.PutInBackground()
self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = GetColor(), Diameter = 1.5,Foreground = 1) )
self.ReRun_Movie(None)
def ReRun_Movie(self, event):
if not self.LE_movie:
self.Run_Movie(None)
else:
self.Play(None)
## def UpdateThread(self):
## try:
## while hasattr(self, 'event') and not self.event.isSet():
## wx.PostEvent(self, FrameEvent())
## self.event.wait(self.FrameDelay)
## except wx.PyDeadObjectError: # BUG: we were destroyed
## return
def Running(self):
"""Returns true if the animation is running"""
return self.Timer.IsRunning()
def Play(self,event):
"""Start the animation"""
if not self.Running():
if self.LE_movie:
#self.event.clear()
#thread = threading.Thread(target = self.UpdateThread)
#thread.start()
self.Timer.Start(self.FrameDelay)
else:
self.Run_Movie(None)
def Stop(self,event):
self.Timer.Stop()
def ShowFrame(self):
if self.TimeStep < len(self.LE_movie):
self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie)))
# this sets the data for the next frame
self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep])
self.Canvas.Draw()
self.TimeStep += 1
wx.GetApp().Yield(True)
else:
self.Timer.Stop()
def Rewind(self,event):
self.TimeStep = 0
if self.LE_movie:
self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep])
self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie)))
self.Canvas.Draw()
def OnAbout(self, event):
dlg = wx.MessageDialog(self, "This is a small program to demonstrate\n"
"the use of the FloatCanvas\n",
"About Me", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def ZoomToFit(self,event):
self.Canvas.ZoomToBB()
def Clear(self,event = None):
self.Canvas.Clear()
self.Canvas.Draw()
def OnQuit(self,event):
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
def RunMovie(self,event = None):
import RandomArray
start = clock()
shift = RandomArray.randint(0,0,(2,))
NumFrames = 50
for i in range(NumFrames):
points = self.LEs.Points
shift = RandomArray.randint(-5,5,(2,))
points += shift
self.LEs.SetPoints(points)
self.Canvas.Draw()
print "running the movie took %f seconds to disply %i frames"%((clock() - start),NumFrames)
def SetFrameRate(self,event):
dlg = wx.TextEntryDialog(self,
'Please set the time between frames in milliseconds',
'ViewTrajectories',
"%i"%self.FrameDelay)
dlg.SetValue("%i"%self.FrameDelay)
if dlg.ShowModal() == wx.ID_OK:
try:
self.FrameDelay = int(dlg.GetValue())
except:
pass
dlg.Destroy()
class TrajectoryViewer(wx.App):
"""
Any bugs, comments, feedback, questions, and especially code are welcome:
-Chris Barker
Chris.Barker@noaa.gov
"""
def OnInit(self):
| frame = DrawFrame(None, title="Trajectory Viewer", size=(700,700))
self.SetTopWindow(frame)
return True | identifier_body | |
cluster_instances.js | </b>";
}
if ("SLAVE" == value) {
if (row.slaveof) {
return "└";
}
}
return "";
}
function instanceStatusFormatter(value) {
if ("STARTED" == value) {
return "运行中";
}
if ("STOPPED" == value) {
return "已停止";
}
return "未运行";
}
function instanceTypeFormatter(value) {
if (value.indexOf("SENTINEL") != -1) {
return "sentinel节点";
} else {
return "redis节点";
}
}
function dateTimeFormatter(value) {
return new Date(value).format("yyyy-MM-dd HH:mm:ss");
}
// 存储当前行的样式,用于区分每组主从
var classIndex = 2;
function rowStyle(row, index) {
var classes = ['active', 'success', 'info', 'warning', 'danger'];
if (row.status == "IDLE") {
return {
classes: 'info'
};
}
if (row.status == "STARTED") {
if (row.role == 'MASTER') {
// success info两个来回切换
classIndex = 3 - classIndex;
}
return {
classes: classes[classIndex]
};
}
if (row.status == "STOPPED") {
return {
classes: 'danger'
};
}
return {};
}
function monitorStatus() {
var table = $('#redisServerTable');
var data = table.bootstrapTable('getData');
if (data.length > 0) {
$.ajax({
type: "PATCH",
url: "/clusters/" + data[0].clusterId + "/instances",
complete: function(XMLHttpRequest, textStatus) {
switch(XMLHttpRequest.status) {
case 205:
table.bootstrapTable('refresh');
break;
default:
break;
}
}
});
}
}
function startRedisServer() {
sendSelectedServerInfo('startup', '启动');
}
function stopRedisServer() {
sendSelectedServerInfo('shutdown', '停止');
}
function sendSelectedServerInfo(command, commandTip) {
if (!confirm("您确认进行\"" + commandTip + "\"操作吗?")) {
return;
}
var table = $('#redisServerTable');
var hasError = false;
var servers = $.map(table.bootstrapTable('getSelections'), fun | type: "PATCH",
url: "/" + command + "/instances",
data: JSON.stringify(servers),
contentType: "application/json",
success: function () {
table.bootstrapTable('refresh');
unblockUI();
}
});
}
function delClusterNodes() {
var table = $('#redisServerTable');
var servers = $.map(table.bootstrapTable('getSelections'), function (row) {
return hierarchicalRow(row);
});
if (servers.length < 1) {
alert("请至少选择一项!");
return;
}
;
if (!
confirm("您确认进行删除吗?")) {
return;
}
if (!checkWhetherAllowedRemove(servers)) {
return;
}
table.bootstrapTable('showLoading');
$.ajax({
type: "POST",
url: "redis/cluster/delClusterNodes",
data: JSON.stringify(servers),
dataType: "json",
contentType: "application/json",
success: function (response) {
table.bootstrapTable('hideLoading');
table.bootstrapTable('refresh', {data: response});
}
});
}
function checkWhetherAllowedRemove(servers) {
var result = false;
$.ajax({
type: "POST",
url: "redis/cluster/getNodesInfo",
data: JSON.stringify(servers),
dataType: "json",
contentType: "application/json",
async: false,
success: function (data) {
if (!data || data.length == 0) {
alert("未找到匹配的节点,请重试或检查主机状态!");
return;
}
var msg = "";
for (var i = 0; i < data.length; i++) {
node = data[i];
if (node.slotRanges.length) {
msg += "[" + node.ip + ":" + node.port + "],";
}
}
if (msg != "") {
msg = msg.substring(0, msg.length - 1) + "仍有slot分布,请先迁移slot!";
alert(msg);
} else {
result = true;
}
},
error: function (e) {
alert("检测主机状态失败!");
}
});
return result;
}
/**
* 主从切换
*/
function failover() {
var table = $('#redisServerTable');
var servers = table.bootstrapTable('getSelections');
if (servers.length != 1) {
alert("您只能选取一个节点执行此操作!");
return;
}
if (servers[0].role != 'SLAVE') {
alert("此操作只能在slave节点上进行,请重新选取slave节点!");
return;
}
if (!confirm("确定执行主从切换操作?")) {
return;
}
blockUI("正在执行操作");
$.ajax({
type: "PATCH",
url: "/failover/instances/" + servers[0].id,
success: function (response) {
table.bootstrapTable('refresh');
},
error: function (e) {
alert(e.responseJSON.data);
},
complete: function(e) {
unblockUI();
}
});
}
function openSlaveOfModal() {
var selectedRows = $('#redisServerTable').bootstrapTable('getSelections');
if (selectedRows.length != 1) {
alert("请选中一项进行修改!");
return;
}
var row = selectedRows[0];
if (row.redisVersion == 'redis-sentinel') {
alert("该节点为sentinel节点,无法配置主从关系!");
return;
}
var allRows = $('#redisServerTable').bootstrapTable('getData');
var isWithSentinel = false;
for (i = 0; i < allRows.length; i++) {
if (allRows[i].redisVersion == 'redis-sentinel') {
isWithSentinel = true;
break;
}
}
if (isWithSentinel == true) {
if (!confirm("带sentinel监控的集群sentinel可能会自主恢复为配置前的主从状态,建议手工移除或更新sentinel节点,请确认知晓该情况!")) {
return;
}
}
$('#slaveOf_serverInstanceId').val(row.id);
$('#slaveOf_clusterId').val(row.clusterId);
$('#slaveInfo').val(row.ip + ":" + row.port);
$('#slaveOfModal').modal('show');
}
function configSlaveOf() {
var confirmMsg =
"您确认按照如下配置调整主从关系吗?" + "\r" +
"从节点:" + $('#slaveInfo').val() + "\r";
var type = $("input[type='radio'][name='slaveof_type']:checked").val();
if (type == 'OTHER_INST') {
if ("" == $("#masterHost").val() || "" == $("#masterPort").val()) {
alert("请输入正确的主节点HOST、PORT信息");
return;
}
var masterURI = $('#masterHost').val() + ":" + $('#masterPort').val();
if (masterURI == $('#slaveInfo').val()) {
alert("主从节点信息相同,请调整主节点信息");
return;
}
confirmMsg +=
"主节点:" + masterURI + "\r" +
"主节点访问密码:" + $('#masterPassword').val();
} else {
confirmMsg += "主节点: NO ONE";
}
if (!confirm(confirmMsg)) {
return;
}
var slaveOfInfo = {};
var type;
if (type == 'OTHER_INST') {
slaveOfInfo.ip = $("#masterHost").val();
slaveOfInfo.port = $("#masterPort").val();
slaveOfInfo.password = $("#masterPassword").val();
type = "PATCH";
} else {
type = "DELETE";
}
// blockUI first
blockUI("正在保存配置");
var table = $('#redisServerTable');
table.bootstrapTable('showLoading');
$.ajax({
type: type,
url: "/replication/instances/" + $("#slaveOf_serverInstanceId").val(),
data: JSON.stringify(slaveOfInfo),
contentType: 'application/json',
success: function (response) {
alert("配置主从关系成功!");
table.bootstrapTable('refresh');
},
error: function (e) {
alert("配置主从关系失败!");
},
complete: function (e) {
table.bootstrapTable('hideLoading');
unblockUI();
}
});
$('#slaveOfModal').modal('hide');
}
function centerModals() {
$('.modal').each(function (i) {
var $clone = $( | ction (row) {
if (row["status"].toLowerCase().indexOf(command) != -1) {
hasError = true;
return;
}
return row.id;
});
if (hasError) {
alert("无法重复操作已开启或停止的服务器!");
return;
}
if (servers.length < 1) {
alert("请至少选择一项!");
return;
}
blockUI("正在执行操作");
$.ajax({ | identifier_body |
cluster_instances.js | 节点</b>";
}
if ("SLAVE" == value) {
if (row.slaveof) {
return "└";
}
}
return "";
}
function instanceStatusFormatter(value) {
if ("STARTED" == value) {
return "运行中";
}
if ("STOPPED" == value) {
return "已停止";
}
return "未运行";
}
function instanceTypeFormatter(value) {
if (value.indexOf("SENTINEL") != -1) {
return "sentinel节点";
} else {
return "redis节点";
}
}
function dateTimeFormatter(value) {
return new Date(value).format("yyyy-MM-dd HH:mm:ss");
}
// 存储当前行的样式,用于区分每组主从
var classIndex = 2;
function rowStyle(row, index) {
var classes = ['active', 'success', 'info', 'warning', 'danger'];
if (row.status == "IDLE") {
return {
classes: 'info'
};
}
if (row.status == "STARTED") {
if (row.role == 'MASTER') {
// success info两个来回切换
classIndex = 3 - classIndex;
}
return {
classes: classes[classIndex]
};
}
if (row.status == "STOPPED") {
return {
classes: 'danger'
};
}
return {};
}
function monitorStatus() {
var table = $('#redisServerTable');
var data = table.bootstrapTable('getData');
if (data.length > 0) {
$.ajax({
type: "PATCH",
url: "/clusters/" + data[0].clusterId + "/instances",
complete: function(XMLHttpRequest, textStatus) {
switch(XMLHttpRequest.status) {
case 205:
table.bootstrapTable('refresh');
break;
default:
break;
}
}
});
}
}
function startRedisServer() {
sendSelectedServerInfo('startup', '启动');
}
function stopRedisServer() {
sendSelectedServerInfo('shutdown', '停止');
}
function sendSelectedServerInfo(command, commandTip) {
if (!confirm("您确认进行\"" + commandTip + "\"操作吗?")) {
return;
}
var table = $('#redisServerTable');
var hasError = false;
var servers = $.map(table.bootstrapTable('getSelections'), function (row) {
if (row["status"].toLowerCase().indexOf(command) != -1) {
hasError = true;
return;
}
return row.id;
});
if (hasError) {
alert("无法重复操作已开启或停止的服务器!");
return;
}
if (servers.length < 1) {
alert("请至少选择一项!");
return;
}
blockUI("正在执行操作");
$.ajax({
type: "PATCH",
url: "/" + command + "/instances",
data: JSON.stringify(servers),
contentType: "application/json",
success: function () {
table.bootstrapTable('refresh');
unblockUI();
}
});
}
function delClusterNodes() {
var table = $('#redisServerTable');
var servers = $.map(table.bootstrapTable('getSelections'), function (row) {
return hierarchicalRow(row);
});
if (servers.length < 1) {
alert("请至少选择一项!");
return;
}
;
if (!confirm("您确认进行删除吗?")) {
return;
}
if (!checkWhetherAllowedRemove(servers)) {
return;
}
table.bootstrapTable('showLoading');
$.ajax({
type: "POST",
url: "redis/cluster/delClusterNodes",
data: JSON.stringify(servers),
dataType: "json",
contentType: "application/json",
success: function (response) {
table.bootstrapTable('hideLoading');
table.bootstrapTable('refresh', {data: response});
}
});
}
function checkWhetherAllowedRemove(servers) {
var result = false;
$.ajax({
type: "POST",
url: "redis/cluster/getNodesInfo", | success: function (data) {
if (!data || data.length == 0) {
alert("未找到匹配的节点,请重试或检查主机状态!");
return;
}
var msg = "";
for (var i = 0; i < data.length; i++) {
node = data[i];
if (node.slotRanges.length) {
msg += "[" + node.ip + ":" + node.port + "],";
}
}
if (msg != "") {
msg = msg.substring(0, msg.length - 1) + "仍有slot分布,请先迁移slot!";
alert(msg);
} else {
result = true;
}
},
error: function (e) {
alert("检测主机状态失败!");
}
});
return result;
}
/**
* 主从切换
*/
function failover() {
var table = $('#redisServerTable');
var servers = table.bootstrapTable('getSelections');
if (servers.length != 1) {
alert("您只能选取一个节点执行此操作!");
return;
}
if (servers[0].role != 'SLAVE') {
alert("此操作只能在slave节点上进行,请重新选取slave节点!");
return;
}
if (!confirm("确定执行主从切换操作?")) {
return;
}
blockUI("正在执行操作");
$.ajax({
type: "PATCH",
url: "/failover/instances/" + servers[0].id,
success: function (response) {
table.bootstrapTable('refresh');
},
error: function (e) {
alert(e.responseJSON.data);
},
complete: function(e) {
unblockUI();
}
});
}
function openSlaveOfModal() {
var selectedRows = $('#redisServerTable').bootstrapTable('getSelections');
if (selectedRows.length != 1) {
alert("请选中一项进行修改!");
return;
}
var row = selectedRows[0];
if (row.redisVersion == 'redis-sentinel') {
alert("该节点为sentinel节点,无法配置主从关系!");
return;
}
var allRows = $('#redisServerTable').bootstrapTable('getData');
var isWithSentinel = false;
for (i = 0; i < allRows.length; i++) {
if (allRows[i].redisVersion == 'redis-sentinel') {
isWithSentinel = true;
break;
}
}
if (isWithSentinel == true) {
if (!confirm("带sentinel监控的集群sentinel可能会自主恢复为配置前的主从状态,建议手工移除或更新sentinel节点,请确认知晓该情况!")) {
return;
}
}
$('#slaveOf_serverInstanceId').val(row.id);
$('#slaveOf_clusterId').val(row.clusterId);
$('#slaveInfo').val(row.ip + ":" + row.port);
$('#slaveOfModal').modal('show');
}
function configSlaveOf() {
var confirmMsg =
"您确认按照如下配置调整主从关系吗?" + "\r" +
"从节点:" + $('#slaveInfo').val() + "\r";
var type = $("input[type='radio'][name='slaveof_type']:checked").val();
if (type == 'OTHER_INST') {
if ("" == $("#masterHost").val() || "" == $("#masterPort").val()) {
alert("请输入正确的主节点HOST、PORT信息");
return;
}
var masterURI = $('#masterHost').val() + ":" + $('#masterPort').val();
if (masterURI == $('#slaveInfo').val()) {
alert("主从节点信息相同,请调整主节点信息");
return;
}
confirmMsg +=
"主节点:" + masterURI + "\r" +
"主节点访问密码:" + $('#masterPassword').val();
} else {
confirmMsg += "主节点: NO ONE";
}
if (!confirm(confirmMsg)) {
return;
}
var slaveOfInfo = {};
var type;
if (type == 'OTHER_INST') {
slaveOfInfo.ip = $("#masterHost").val();
slaveOfInfo.port = $("#masterPort").val();
slaveOfInfo.password = $("#masterPassword").val();
type = "PATCH";
} else {
type = "DELETE";
}
// blockUI first
blockUI("正在保存配置");
var table = $('#redisServerTable');
table.bootstrapTable('showLoading');
$.ajax({
type: type,
url: "/replication/instances/" + $("#slaveOf_serverInstanceId").val(),
data: JSON.stringify(slaveOfInfo),
contentType: 'application/json',
success: function (response) {
alert("配置主从关系成功!");
table.bootstrapTable('refresh');
},
error: function (e) {
alert("配置主从关系失败!");
},
complete: function (e) {
table.bootstrapTable('hideLoading');
unblockUI();
}
});
$('#slaveOfModal').modal('hide');
}
function centerModals() {
$('.modal').each(function (i) {
var $clone = $(this | data: JSON.stringify(servers),
dataType: "json",
contentType: "application/json",
async: false, | random_line_split |
cluster_instances.js | </b>";
}
if ("SLAVE" == value) {
if (row.slaveof) {
return "└";
}
}
return "";
}
function instanceStatusFormatter(value) {
if ("STARTED" == value) {
return "运行中";
}
if ("STOPPED" == value) {
return "已停止";
}
return "未运行";
}
function instanceTypeFormatter(value) {
if (value.indexOf("SENTINEL") != -1) {
return "sentinel节点";
} else {
return "redis节点";
}
}
function dateTimeFormatter(value) {
return new Date(value).format("yyyy-MM-dd HH:mm:ss");
}
// 存储当前行的样式,用于区分每组主从
var classIndex = 2;
function rowStyle(row, index) {
var classes = ['active', 'success', 'info', 'warning', 'danger'];
if (row.status == "IDLE") {
return {
classes: 'info'
};
}
if (row.status == "STARTED") {
if (row.role == 'MASTER') {
// success info两个来回切换
classIndex = 3 - classIndex;
}
return {
classes: classes[classIndex]
};
}
if (row.status == "STOPPED") {
return {
classes: 'danger'
};
}
return {};
}
function monitorStatus() {
var table = $('#redisServerTable');
var data = table.bootstrapTable('getData');
if (data.length > 0) {
$.ajax({
type: "PATCH",
url: "/clusters/" + data[0].clusterId + "/instances",
complete: function(XMLHttpRequest, textStatus) {
switch(XMLHttpRequest.status) {
case 205:
table.bootstrapTable('refresh');
break;
default:
break;
}
}
});
}
}
function startRedisServer() {
sendSelectedServerInfo('startup', '启动');
}
function stopRedisServer() {
sendSelectedServerInfo('shutdown', '停止');
}
function sendSelectedServerInfo(command, commandTip) {
if (!confirm("您确认进行\"" + commandTip + "\"操作吗?")) {
return;
}
var table = $('#redisServerTable');
var hasError = false;
var servers = $.map(table.bootstrapTable('getSelections'), function (row) {
if (row["status"].toLowerCase().indexOf(command) != -1) {
hasError = true;
return;
}
return row.id;
});
if (hasError) {
alert("无法重复操作已开启或停止的服务器!");
return;
}
if (servers.length < 1) {
alert("请至少选择一项!");
return;
}
blockUI("正在执行操作");
$.ajax({
type: "PATCH",
url: "/" + command + "/instances",
data: JSON.stringify(servers),
contentType: "application/json",
success: function () {
table.bootstrapTable('refresh');
unblockUI();
}
});
}
function delClusterNodes() {
var table = $('#redisServerTable');
var servers = $.map(table.bootstrapTable('getSelections'), function (row) {
return hierarchicalRow(row);
});
if (servers.length < 1) {
alert("请至少选择一项!");
return;
}
;
if (!confirm("您确认进行删除吗?")) {
return;
}
if (!checkWhetherAllowedRemove(servers)) {
return;
}
table.bootstrapTable('showLoading');
$.ajax({
type: "POST",
url: "redis/cluster/delClusterNodes",
data: JSON.stringify(servers),
dataType: "json",
contentType: "application/json",
success: function (response) {
table.bootstrapTable('hideLoading');
table.bootstrapTable('refresh', {data: response});
}
});
}
function checkWhetherAllowedRemove(servers) {
var result = false;
$.ajax({
type: "POST",
url: "redis/cluster/getNodesInfo",
data: JSON.stringify(servers),
dataType: "json",
contentType: "application/json",
async: false,
success: function (data) {
if (!data | alert("未找到匹配的节点,请重试或检查主机状态!");
return;
}
var msg = "";
for (var i = 0; i < data.length; i++) {
node = data[i];
if (node.slotRanges.length) {
msg += "[" + node.ip + ":" + node.port + "],";
}
}
if (msg != "") {
msg = msg.substring(0, msg.length - 1) + "仍有slot分布,请先迁移slot!";
alert(msg);
} else {
result = true;
}
},
error: function (e) {
alert("检测主机状态失败!");
}
});
return result;
}
/**
* 主从切换
*/
function failover() {
var table = $('#redisServerTable');
var servers = table.bootstrapTable('getSelections');
if (servers.length != 1) {
alert("您只能选取一个节点执行此操作!");
return;
}
if (servers[0].role != 'SLAVE') {
alert("此操作只能在slave节点上进行,请重新选取slave节点!");
return;
}
if (!confirm("确定执行主从切换操作?")) {
return;
}
blockUI("正在执行操作");
$.ajax({
type: "PATCH",
url: "/failover/instances/" + servers[0].id,
success: function (response) {
table.bootstrapTable('refresh');
},
error: function (e) {
alert(e.responseJSON.data);
},
complete: function(e) {
unblockUI();
}
});
}
function openSlaveOfModal() {
var selectedRows = $('#redisServerTable').bootstrapTable('getSelections');
if (selectedRows.length != 1) {
alert("请选中一项进行修改!");
return;
}
var row = selectedRows[0];
if (row.redisVersion == 'redis-sentinel') {
alert("该节点为sentinel节点,无法配置主从关系!");
return;
}
var allRows = $('#redisServerTable').bootstrapTable('getData');
var isWithSentinel = false;
for (i = 0; i < allRows.length; i++) {
if (allRows[i].redisVersion == 'redis-sentinel') {
isWithSentinel = true;
break;
}
}
if (isWithSentinel == true) {
if (!confirm("带sentinel监控的集群sentinel可能会自主恢复为配置前的主从状态,建议手工移除或更新sentinel节点,请确认知晓该情况!")) {
return;
}
}
$('#slaveOf_serverInstanceId').val(row.id);
$('#slaveOf_clusterId').val(row.clusterId);
$('#slaveInfo').val(row.ip + ":" + row.port);
$('#slaveOfModal').modal('show');
}
function configSlaveOf() {
var confirmMsg =
"您确认按照如下配置调整主从关系吗?" + "\r" +
"从节点:" + $('#slaveInfo').val() + "\r";
var type = $("input[type='radio'][name='slaveof_type']:checked").val();
if (type == 'OTHER_INST') {
if ("" == $("#masterHost").val() || "" == $("#masterPort").val()) {
alert("请输入正确的主节点HOST、PORT信息");
return;
}
var masterURI = $('#masterHost').val() + ":" + $('#masterPort').val();
if (masterURI == $('#slaveInfo').val()) {
alert("主从节点信息相同,请调整主节点信息");
return;
}
confirmMsg +=
"主节点:" + masterURI + "\r" +
"主节点访问密码:" + $('#masterPassword').val();
} else {
confirmMsg += "主节点: NO ONE";
}
if (!confirm(confirmMsg)) {
return;
}
var slaveOfInfo = {};
var type;
if (type == 'OTHER_INST') {
slaveOfInfo.ip = $("#masterHost").val();
slaveOfInfo.port = $("#masterPort").val();
slaveOfInfo.password = $("#masterPassword").val();
type = "PATCH";
} else {
type = "DELETE";
}
// blockUI first
blockUI("正在保存配置");
var table = $('#redisServerTable');
table.bootstrapTable('showLoading');
$.ajax({
type: type,
url: "/replication/instances/" + $("#slaveOf_serverInstanceId").val(),
data: JSON.stringify(slaveOfInfo),
contentType: 'application/json',
success: function (response) {
alert("配置主从关系成功!");
table.bootstrapTable('refresh');
},
error: function (e) {
alert("配置主从关系失败!");
},
complete: function (e) {
table.bootstrapTable('hideLoading');
unblockUI();
}
});
$('#slaveOfModal').modal('hide');
}
function centerModals() {
$('.modal').each(function (i) {
var $clone = $( | || data.length == 0) {
| identifier_name |
cluster_instances.js | ) {
if ("MASTER" == value) {
return "<b>主节点</b>";
}
if ("SLAVE" == value) {
if (row.slaveof) {
return "└";
}
}
return "";
}
function instanceStatusFormatter(value) {
if ("STARTED" == value) {
return "运行中";
}
if ("STOPPED" == value) {
return "已停止";
}
return "未运行";
}
function instanceTypeFormatter(value) {
if (value.indexOf("SENTINEL") != -1) {
return "sentinel节点";
} else {
return "redis节点";
}
}
function dateTimeFormatter(value) {
return new Date(value).format("yyyy-MM-dd HH:mm:ss");
}
// 存储当前行的样式,用于区分每组主从
var classIndex = 2;
function rowStyle(row, index) {
var classes = ['active', 'success', 'info', 'warning', 'danger'];
if (row.status == "IDLE") {
return {
classes: 'info'
};
}
if (row.status == "STARTED") {
if (row.role == 'MASTER') {
// success info两个来回切换
classIndex = 3 - classIndex;
}
return {
classes: classes[classIndex]
};
}
if (row.status == "STOPPED") {
return {
classes: 'danger'
};
}
return {};
}
function monitorStatus() {
var table = $('#redisServerTable');
var data = table.bootstrapTable('getData');
if (data.length > 0) {
$.ajax({
type: "PATCH",
url: "/clusters/" + data[0].clusterId + "/instances",
complete: function(XMLHttpRequest, textStatus) {
switch(XMLHttpRequest.status) {
case 205:
table.bootstrapTable('refresh');
break;
default:
break;
}
}
});
}
}
function startRedisServer() {
sendSelectedServerInfo('startup', '启动');
}
function stopRedisServer() {
sendSelectedServerInfo('shutdown', '停止');
}
function sendSelectedServerInfo(command, commandTip) {
if (!confirm("您确认进行\"" + commandTip + "\"操作吗?")) {
return;
}
var table = $('#redisServerTable');
var hasError = false;
var servers = $.map(table.bootstrapTable('getSelections'), function (row) {
if (row["status"].toLowerCase().indexOf(command) != -1) {
hasError = true;
return;
}
return row.id;
});
if (hasError) {
alert("无法重复操作已开启或停止的服务器!");
return;
}
if (servers.length < 1) {
alert("请至少选择一项!");
return;
}
blockUI("正在执行操作");
$.ajax({
type: "PATCH",
url: "/" + command + "/instances",
data: JSON.stringify(servers),
contentType: "application/json",
success: function () {
table.bootstrapTable('refresh');
unblockUI();
}
});
}
function delClusterNodes() {
var table = $('#redisServerTable');
var servers = $.map(table.bootstrapTable('getSelections'), function (row) {
return hierarchicalRow(row);
});
if (servers.length < 1) {
alert("请至少选择一项!");
return;
}
;
if (!confirm("您确认进行删除吗?")) {
return;
}
if (!checkWhetherAllowedRemove(servers)) {
return;
}
table.bootstrapTable('showLoading');
$.ajax({
type: "POST",
url: "redis/cluster/delClusterNodes",
data: JSON.stringify(servers),
dataType: "json",
contentType: "application/json",
success: function (response) {
table.bootstrapTable('hideLoading');
table.bootstrapTable('refresh', {data: response});
}
});
}
function checkWhetherAllowedRemove(servers) {
var result = false;
$.ajax({
type: "POST",
url: "redis/cluster/getNodesInfo",
data: JSON.stringify(servers),
dataType: "json",
contentType: "application/json",
async: false,
success: function (data) {
if (!data || data.length == 0) {
alert("未找到匹配的节点,请重试或检查主机状态!");
return;
}
var msg = "";
for (var i = 0; i < data.length; i++) {
node = data[i];
if (node.slotRanges.length) {
msg += "[" + node.ip + ":" + node.port + "],";
}
}
if (msg != "") {
msg = msg.substring(0, msg.length - 1) + "仍有slot分布,请先迁移slot!";
alert(msg);
} else {
result = true;
}
},
error: function (e) {
alert("检测主机状态失败!");
}
});
return result;
}
/**
* 主从切换
*/
function failover() {
var table = $('#redisServerTable');
var servers = table.bootstrapTable('getSelections');
if (servers.length != 1) {
alert("您只能选取一个节点执行此操作!");
return;
}
if (servers[0].role != 'SLAVE') {
alert("此操作只能在slave节点上进行,请重新选取slave节点!");
return;
}
if (!confirm("确定执行主从切换操作?")) {
return;
}
blockUI("正在执行操作");
$.ajax({
type: "PATCH",
url: "/failover/instances/" + servers[0].id,
success: function (response) {
table.bootstrapTable('refresh');
},
error: function (e) {
alert(e.responseJSON.data);
},
complete: function(e) {
unblockUI();
}
});
}
function openSlaveOfModal() {
var selectedRows = $('#redisServerTable').bootstrapTable('getSelections');
if (selectedRows.length != 1) {
alert("请选中一项进行修改!");
return;
}
var row = selectedRows[0];
if (row.redisVersion == 'redis-sentinel') {
alert("该节点为sentinel节点,无法配置主从关系!");
return;
}
var allRows = $('#redisServerTable').bootstrapTable('getData');
var isWithSentinel = false;
for (i = 0; i < allRows.length; i++) {
if (allRows[i].redisVersion == 'redis-sentinel') {
isWithSentinel = true;
break;
}
}
if (isWithSentinel == true) {
if (!confirm("带sentinel监控的集群sentinel可能会自主恢复为配置前的主从状态,建议手工移除或更新sentinel节点,请确认知晓该情况!")) {
return;
}
}
$('#slaveOf_serverInstanceId').val(row.id);
$('#slaveOf_clusterId').val(row.clusterId);
$('#slaveInfo').val(row.ip + ":" + row.port);
$('#slaveOfModal').modal('show');
}
function configSlaveOf() {
var confirmMsg =
"您确认按照如下配置调整主从关系吗?" + "\r" +
"从节点:" + $('#slaveInfo').val() + "\r";
var type = $("input[type='radio'][name='slaveof_type']:checked").val();
if (type == 'OTHER_INST') {
if ("" == $("#masterHost").val() || "" == $("#masterPort").val()) {
alert("请输入正确的主节点HOST、PORT信息");
return;
}
var masterURI = $('#masterHost').val() + ":" + $('#masterPort').val();
if (masterURI == $('#slaveInfo').val()) {
alert("主从节点信息相同,请调整主节点信息");
return;
}
confirmMsg +=
"主节点:" + masterURI + "\r" +
"主节点访问密码:" + $('#masterPassword').val();
} else {
confirmMsg += "主节点: NO ONE";
}
if (!confirm(confirmMsg)) {
return;
}
var slaveOfInfo = {};
var type;
if (type == 'OTHER_INST') {
slaveOfInfo.ip = $("#masterHost").val();
slaveOfInfo.port = $("#masterPort").val();
slaveOfInfo.password = $("#masterPassword").val();
type = "PATCH";
} else {
type = "DELETE";
}
// blockUI first
blockUI("正在保存配置");
var table = $('#redisServerTable');
table.bootstrapTable('showLoading');
$.ajax({
type: type,
url: "/replication/instances/" + $("#slaveOf_serverInstanceId").val(),
data: JSON.stringify(slaveOfInfo),
contentType: 'application/json',
success: function (response) {
alert("配置主从关系成功!");
table.bootstrapTable('refresh');
},
error: function (e) {
alert("配置主从关系失败!");
},
complete: function (e) {
| Password").val('');
}
}
function cacheSizeFormatter(value) {
return value + 'GB';
}
function instanceRoleFormatter(value, row, index, field | conditional_block | |
debug-trace-player.ts | // Tracks all the data slots which have been touched during the current step.
private dirtyMask: boolean[] = [];
// Tracks all the data slots which hold function return values.
private returnValues: boolean[] = [];
// Tracks line numbers that have breakpoints set on them.
private breakpointLines: Set<number> = new Set();
/** Throws an error if a precondition is not met. Indicates a logic bug or invalid trace. */
private check(result: boolean): void {
if (!result) {
throw new Error('check failed');
}
}
/** Copies trace info from the JSON number array into a TraceInfo struct. */
private getTraceInfo(position: number): TraceInfo {
this.check(position < this.trace!.trace.length);
this.check(this.trace!.trace[position][0] in TraceOp);
const info: TraceInfo = {
op: this.trace!.trace[position][0] as TraceOp,
data: this.trace!.trace[position].slice(1),
};
return info;
}
/** Resets playback to the start of the trace. Breakpoints are not cleared. */
public reset(trace: DebugTrace | null): void {
const nslots = trace?.slots?.length ?? 0;
const globalStackFrame: StackFrame = {
func: -1,
line: -1,
displayMask: Array<boolean>(nslots).map(() => false),
};
this.trace = trace;
this.cursor = 0;
this.slots = [];
this.stack = [globalStackFrame];
this.dirtyMask = Array<boolean>(nslots).map(() => false);
this.returnValues = Array<boolean>(nslots).map(() => false);
if (trace !== null) {
this.slots = trace.slots.map(
(): Slot => ({
value: 0,
scope: Infinity,
writeTime: 0,
})
);
this.returnValues = trace.slots.map(
(slotInfo: SlotInfo): boolean => (slotInfo.retval ?? -1) >= 0
);
// Build a map holding the number of times each line is reached.
this.lineNumbers.clear();
trace.trace.forEach((_, traceIdx: number) => {
const info: TraceInfo = this.getTraceInfo(traceIdx);
if (info.op === TraceOp.Line) {
const lineNumber = info.data[0];
const lineCount = this.lineNumbers.get(lineNumber) ?? 0;
this.lineNumbers.set(lineNumber, lineCount + 1);
}
});
}
}
/** Advances the simulation to the next Line op. */
public step(): void {
this.tidyState();
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
break;
}
}
}
/**
* Advances the simulation to the next Line op, skipping past matched Enter/Exit pairs.
* Breakpoints will also stop the simulation even if we haven't reached an Exit.
*/
public stepOver(): void {
this.tidyState();
const initialStackDepth = this.stack.length;
while (!this.traceHasCompleted()) {
const canEscapeFromThisStackDepth =
this.stack.length <= initialStackDepth;
if (this.execute(this.cursor++)) {
if (canEscapeFromThisStackDepth || this.atBreakpoint()) {
break;
}
}
}
}
/**
* Advances the simulation until we exit from the current stack frame.
* Breakpoints will also stop the simulation even if we haven't left the stack frame.
*/
public stepOut(): void {
this.tidyState();
const initialStackDepth = this.stack.length;
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
const hasEscapedFromInitialStackDepth =
this.stack.length < initialStackDepth;
if (hasEscapedFromInitialStackDepth || this.atBreakpoint()) {
break;
}
}
}
}
/** Advances the simulation until we hit a breakpoint, or the trace completes. */
public run(): void {
this.tidyState();
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
if (this.atBreakpoint()) {
break;
}
}
}
}
/**
* Cleans up temporary state between steps, such as the dirty mask and function return values.
*/
private tidyState(): void {
this.dirtyMask.fill(false);
const stackTop = this.stack[this.stack.length - 1];
this.returnValues.forEach((_, slotIdx: number) => {
stackTop.displayMask[slotIdx] &&= !this.returnValues[slotIdx];
});
}
/** Returns true if we have reached the end of the trace. */
public traceHasCompleted(): boolean {
return this.trace == null || this.cursor >= this.trace.trace.length;
}
/** Reports the position of the cursor "read head" within the array of trace instructions. */
public getCursor(): number {
return this.cursor;
}
/** Returns true if the current line has a breakpoint set on it. */
public | (): boolean {
return this.breakpointLines.has(this.getCurrentLine());
}
/** Replaces all current breakpoints with a new set of them. */
public setBreakpoints(breakpointLines: Set<number>): void {
this.breakpointLines = breakpointLines;
}
/** Returns the current set of lines which have a breakpoint. */
public getBreakpoints(): Set<number> {
return this.breakpointLines;
}
/** Adds a breakpoint to a line (if one doesn't exist). */
public addBreakpoint(line: number): void {
this.breakpointLines.add(line);
}
/** Removes a breakpoint from a line (if one exists). */
public removeBreakpoint(line: number): void {
this.breakpointLines.delete(line);
}
/** Retrieves the current line. */
public getCurrentLine(): number {
this.check(this.stack.length > 0);
return this.stack[this.stack.length - 1].line;
}
/** Retrieves the current line for a given stack frame. */
public getCurrentLineInStackFrame(stackFrameIndex: number): number {
// The first entry on the stack is the "global" frame before we enter main, so offset our index
// by one to account for it.
++stackFrameIndex;
this.check(stackFrameIndex > 0);
this.check(stackFrameIndex < this.stack.length);
return this.stack[stackFrameIndex].line;
}
/**
* Returns every line number reached inside this debug trace, along with the remaining number of
* times that this trace will reach it. e.g. {100, 2} means line 100 will be reached twice.
*/
public getLineNumbersReached(): Map<number, number> {
return this.lineNumbers;
}
/** Returns the call stack as an array of FunctionInfo indices. */
public getCallStack(): number[] {
this.check(this.stack.length > 0);
return this.stack.slice(1).map((frame: StackFrame) => frame.func);
}
/** Returns the size of the call stack. */
public getStackDepth(): number {
this.check(this.stack.length > 0);
return this.stack.length - 1;
}
/** Returns a slot's component as a variable-name suffix, e.g. ".x" or "[2][2]". */
public getSlotComponentSuffix(slotIndex: number): string {
const slot: SlotInfo = this.trace!.slots[slotIndex];
if (slot.rows > 1) {
return `[${Math.floor(slot.index / slot.rows)}][${
slot.index % slot.rows
}]`;
}
if (slot.columns > 1) {
switch (slot.index) {
case 0:
return '.x';
case 1:
return '.y';
case 2:
return '.z';
case 3:
return '.w';
default:
return '[???]';
}
}
return '';
}
/** Bit-casts a value for a given slot into a double, honoring the slot's NumberKind. */
private interpretValueBits(
slotIdx: number,
valueBits: number
): number | boolean {
const bitArray: Int32Array = new Int32Array(1);
bitArray[0] = valueBits;
switch (this.trace!.slots[slotIdx].kind) {
case NumberKind.Float:
return new Float32Array(bitArray.buffer)[0];
case NumberKind.Unsigned:
return new Uint32Array(bitArray.buffer)[0];
case NumberKind.Boolean:
return valueBits !== 0;
case NumberKind.Signed:
return valueBits;
default:
return valueBits;
}
}
/** Returns a vector of the indices and values of each slot that is enabled in `bits`. */
private getVariablesForDisplayMask(displayMask: boolean[]): VariableData[] {
this.check(displayMask.length === this.slots.length);
let vars: VariableData[] = [];
displayMask.forEach((_, slot: number) => {
if (displayMask[slot]) {
const varData: VariableData = {
slotIndex: slot,
dirty: this.dirtyMask[slot],
value: this.interpretValueBits(slot, this.slots[slot].value),
};
vars.push(varData);
}
});
// Order the variable list so that the most recently-written | atBreakpoint | identifier_name |
debug-trace-player.ts | // Tracks all the data slots which have been touched during the current step.
private dirtyMask: boolean[] = [];
// Tracks all the data slots which hold function return values.
private returnValues: boolean[] = [];
// Tracks line numbers that have breakpoints set on them.
private breakpointLines: Set<number> = new Set();
/** Throws an error if a precondition is not met. Indicates a logic bug or invalid trace. */
private check(result: boolean): void {
if (!result) {
throw new Error('check failed');
}
}
/** Copies trace info from the JSON number array into a TraceInfo struct. */
private getTraceInfo(position: number): TraceInfo {
this.check(position < this.trace!.trace.length);
this.check(this.trace!.trace[position][0] in TraceOp);
const info: TraceInfo = {
op: this.trace!.trace[position][0] as TraceOp,
data: this.trace!.trace[position].slice(1),
};
return info;
}
/** Resets playback to the start of the trace. Breakpoints are not cleared. */
public reset(trace: DebugTrace | null): void {
const nslots = trace?.slots?.length ?? 0;
const globalStackFrame: StackFrame = {
func: -1,
line: -1,
displayMask: Array<boolean>(nslots).map(() => false),
};
this.trace = trace;
this.cursor = 0;
this.slots = [];
this.stack = [globalStackFrame];
this.dirtyMask = Array<boolean>(nslots).map(() => false);
this.returnValues = Array<boolean>(nslots).map(() => false);
if (trace !== null) {
this.slots = trace.slots.map(
(): Slot => ({
value: 0,
scope: Infinity,
writeTime: 0,
})
);
this.returnValues = trace.slots.map(
(slotInfo: SlotInfo): boolean => (slotInfo.retval ?? -1) >= 0
);
// Build a map holding the number of times each line is reached.
this.lineNumbers.clear();
trace.trace.forEach((_, traceIdx: number) => {
const info: TraceInfo = this.getTraceInfo(traceIdx);
if (info.op === TraceOp.Line) {
const lineNumber = info.data[0];
const lineCount = this.lineNumbers.get(lineNumber) ?? 0;
this.lineNumbers.set(lineNumber, lineCount + 1);
}
});
}
}
/** Advances the simulation to the next Line op. */
public step(): void {
this.tidyState();
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
break;
}
}
}
/**
* Advances the simulation to the next Line op, skipping past matched Enter/Exit pairs.
* Breakpoints will also stop the simulation even if we haven't reached an Exit.
*/
public stepOver(): void {
this.tidyState();
const initialStackDepth = this.stack.length;
while (!this.traceHasCompleted()) {
const canEscapeFromThisStackDepth =
this.stack.length <= initialStackDepth;
if (this.execute(this.cursor++)) {
if (canEscapeFromThisStackDepth || this.atBreakpoint()) {
break;
}
}
}
}
/**
* Advances the simulation until we exit from the current stack frame.
* Breakpoints will also stop the simulation even if we haven't left the stack frame.
*/
public stepOut(): void {
this.tidyState();
const initialStackDepth = this.stack.length;
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
const hasEscapedFromInitialStackDepth =
this.stack.length < initialStackDepth;
if (hasEscapedFromInitialStackDepth || this.atBreakpoint()) {
break;
}
}
}
}
/** Advances the simulation until we hit a breakpoint, or the trace completes. */
public run(): void {
this.tidyState();
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
if (this.atBreakpoint()) {
break;
}
}
}
}
/**
* Cleans up temporary state between steps, such as the dirty mask and function return values.
*/
private tidyState(): void {
this.dirtyMask.fill(false);
const stackTop = this.stack[this.stack.length - 1];
this.returnValues.forEach((_, slotIdx: number) => {
stackTop.displayMask[slotIdx] &&= !this.returnValues[slotIdx];
});
}
/** Returns true if we have reached the end of the trace. */
public traceHasCompleted(): boolean {
return this.trace == null || this.cursor >= this.trace.trace.length;
}
/** Reports the position of the cursor "read head" within the array of trace instructions. */
public getCursor(): number {
return this.cursor;
}
/** Returns true if the current line has a breakpoint set on it. */
public atBreakpoint(): boolean {
return this.breakpointLines.has(this.getCurrentLine());
}
/** Replaces all current breakpoints with a new set of them. */
public setBreakpoints(breakpointLines: Set<number>): void {
this.breakpointLines = breakpointLines;
}
/** Returns the current set of lines which have a breakpoint. */
public getBreakpoints(): Set<number> {
return this.breakpointLines;
}
/** Adds a breakpoint to a line (if one doesn't exist). */
public addBreakpoint(line: number): void {
this.breakpointLines.add(line);
}
/** Removes a breakpoint from a line (if one exists). */
public removeBreakpoint(line: number): void {
this.breakpointLines.delete(line);
}
/** Retrieves the current line. */
public getCurrentLine(): number {
this.check(this.stack.length > 0);
return this.stack[this.stack.length - 1].line;
}
/** Retrieves the current line for a given stack frame. */
public getCurrentLineInStackFrame(stackFrameIndex: number): number {
// The first entry on the stack is the "global" frame before we enter main, so offset our index
// by one to account for it.
++stackFrameIndex;
this.check(stackFrameIndex > 0);
this.check(stackFrameIndex < this.stack.length);
return this.stack[stackFrameIndex].line;
}
/**
* Returns every line number reached inside this debug trace, along with the remaining number of
* times that this trace will reach it. e.g. {100, 2} means line 100 will be reached twice.
*/
public getLineNumbersReached(): Map<number, number> {
return this.lineNumbers;
}
/** Returns the call stack as an array of FunctionInfo indices. */
public getCallStack(): number[] {
this.check(this.stack.length > 0);
return this.stack.slice(1).map((frame: StackFrame) => frame.func);
}
/** Returns the size of the call stack. */
public getStackDepth(): number |
/** Returns a slot's component as a variable-name suffix, e.g. ".x" or "[2][2]". */
public getSlotComponentSuffix(slotIndex: number): string {
const slot: SlotInfo = this.trace!.slots[slotIndex];
if (slot.rows > 1) {
return `[${Math.floor(slot.index / slot.rows)}][${
slot.index % slot.rows
}]`;
}
if (slot.columns > 1) {
switch (slot.index) {
case 0:
return '.x';
case 1:
return '.y';
case 2:
return '.z';
case 3:
return '.w';
default:
return '[???]';
}
}
return '';
}
/** Bit-casts a value for a given slot into a double, honoring the slot's NumberKind. */
private interpretValueBits(
slotIdx: number,
valueBits: number
): number | boolean {
const bitArray: Int32Array = new Int32Array(1);
bitArray[0] = valueBits;
switch (this.trace!.slots[slotIdx].kind) {
case NumberKind.Float:
return new Float32Array(bitArray.buffer)[0];
case NumberKind.Unsigned:
return new Uint32Array(bitArray.buffer)[0];
case NumberKind.Boolean:
return valueBits !== 0;
case NumberKind.Signed:
return valueBits;
default:
return valueBits;
}
}
/** Returns a vector of the indices and values of each slot that is enabled in `bits`. */
private getVariablesForDisplayMask(displayMask: boolean[]): VariableData[] {
this.check(displayMask.length === this.slots.length);
let vars: VariableData[] = [];
displayMask.forEach((_, slot: number) => {
if (displayMask[slot]) {
const varData: VariableData = {
slotIndex: slot,
dirty: this.dirtyMask[slot],
value: this.interpretValueBits(slot, this.slots[slot].value),
};
vars.push(varData);
}
});
// Order the variable list so that the most recently | {
this.check(this.stack.length > 0);
return this.stack.length - 1;
} | identifier_body |
debug-trace-player.ts | Throws an error if a precondition is not met. Indicates a logic bug or invalid trace. */
private check(result: boolean): void {
if (!result) {
throw new Error('check failed');
}
}
/** Copies trace info from the JSON number array into a TraceInfo struct. */
private getTraceInfo(position: number): TraceInfo {
this.check(position < this.trace!.trace.length);
this.check(this.trace!.trace[position][0] in TraceOp);
const info: TraceInfo = {
op: this.trace!.trace[position][0] as TraceOp,
data: this.trace!.trace[position].slice(1),
};
return info;
}
/** Resets playback to the start of the trace. Breakpoints are not cleared. */
public reset(trace: DebugTrace | null): void {
const nslots = trace?.slots?.length ?? 0;
const globalStackFrame: StackFrame = {
func: -1,
line: -1,
displayMask: Array<boolean>(nslots).map(() => false),
};
this.trace = trace;
this.cursor = 0;
this.slots = [];
this.stack = [globalStackFrame];
this.dirtyMask = Array<boolean>(nslots).map(() => false);
this.returnValues = Array<boolean>(nslots).map(() => false);
if (trace !== null) {
this.slots = trace.slots.map(
(): Slot => ({
value: 0,
scope: Infinity,
writeTime: 0,
})
);
this.returnValues = trace.slots.map(
(slotInfo: SlotInfo): boolean => (slotInfo.retval ?? -1) >= 0
);
// Build a map holding the number of times each line is reached.
this.lineNumbers.clear();
trace.trace.forEach((_, traceIdx: number) => {
const info: TraceInfo = this.getTraceInfo(traceIdx);
if (info.op === TraceOp.Line) {
const lineNumber = info.data[0];
const lineCount = this.lineNumbers.get(lineNumber) ?? 0;
this.lineNumbers.set(lineNumber, lineCount + 1);
}
});
}
}
/** Advances the simulation to the next Line op. */
public step(): void {
this.tidyState();
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
break;
}
}
}
/**
* Advances the simulation to the next Line op, skipping past matched Enter/Exit pairs.
* Breakpoints will also stop the simulation even if we haven't reached an Exit.
*/
public stepOver(): void {
this.tidyState();
const initialStackDepth = this.stack.length;
while (!this.traceHasCompleted()) {
const canEscapeFromThisStackDepth =
this.stack.length <= initialStackDepth;
if (this.execute(this.cursor++)) {
if (canEscapeFromThisStackDepth || this.atBreakpoint()) {
break;
}
}
}
}
/**
* Advances the simulation until we exit from the current stack frame.
* Breakpoints will also stop the simulation even if we haven't left the stack frame.
*/
public stepOut(): void {
this.tidyState();
const initialStackDepth = this.stack.length;
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
const hasEscapedFromInitialStackDepth =
this.stack.length < initialStackDepth;
if (hasEscapedFromInitialStackDepth || this.atBreakpoint()) {
break;
}
}
}
}
/** Advances the simulation until we hit a breakpoint, or the trace completes. */
public run(): void {
this.tidyState();
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
if (this.atBreakpoint()) {
break;
}
}
}
}
/**
* Cleans up temporary state between steps, such as the dirty mask and function return values.
*/
private tidyState(): void {
this.dirtyMask.fill(false);
const stackTop = this.stack[this.stack.length - 1];
this.returnValues.forEach((_, slotIdx: number) => {
stackTop.displayMask[slotIdx] &&= !this.returnValues[slotIdx];
});
}
/** Returns true if we have reached the end of the trace. */
public traceHasCompleted(): boolean {
return this.trace == null || this.cursor >= this.trace.trace.length;
}
/** Reports the position of the cursor "read head" within the array of trace instructions. */
public getCursor(): number {
return this.cursor;
}
/** Returns true if the current line has a breakpoint set on it. */
public atBreakpoint(): boolean {
return this.breakpointLines.has(this.getCurrentLine());
}
/** Replaces all current breakpoints with a new set of them. */
public setBreakpoints(breakpointLines: Set<number>): void {
this.breakpointLines = breakpointLines;
}
/** Returns the current set of lines which have a breakpoint. */
public getBreakpoints(): Set<number> {
return this.breakpointLines;
}
/** Adds a breakpoint to a line (if one doesn't exist). */
public addBreakpoint(line: number): void {
this.breakpointLines.add(line);
}
/** Removes a breakpoint from a line (if one exists). */
public removeBreakpoint(line: number): void {
this.breakpointLines.delete(line);
}
/** Retrieves the current line. */
public getCurrentLine(): number {
this.check(this.stack.length > 0);
return this.stack[this.stack.length - 1].line;
}
/** Retrieves the current line for a given stack frame. */
public getCurrentLineInStackFrame(stackFrameIndex: number): number {
// The first entry on the stack is the "global" frame before we enter main, so offset our index
// by one to account for it.
++stackFrameIndex;
this.check(stackFrameIndex > 0);
this.check(stackFrameIndex < this.stack.length);
return this.stack[stackFrameIndex].line;
}
/**
* Returns every line number reached inside this debug trace, along with the remaining number of
* times that this trace will reach it. e.g. {100, 2} means line 100 will be reached twice.
*/
public getLineNumbersReached(): Map<number, number> {
return this.lineNumbers;
}
/** Returns the call stack as an array of FunctionInfo indices. */
public getCallStack(): number[] {
this.check(this.stack.length > 0);
return this.stack.slice(1).map((frame: StackFrame) => frame.func);
}
/** Returns the size of the call stack. */
public getStackDepth(): number {
this.check(this.stack.length > 0);
return this.stack.length - 1;
}
/** Returns a slot's component as a variable-name suffix, e.g. ".x" or "[2][2]". */
public getSlotComponentSuffix(slotIndex: number): string {
const slot: SlotInfo = this.trace!.slots[slotIndex];
if (slot.rows > 1) {
return `[${Math.floor(slot.index / slot.rows)}][${
slot.index % slot.rows
}]`;
}
if (slot.columns > 1) {
switch (slot.index) {
case 0:
return '.x';
case 1:
return '.y';
case 2:
return '.z';
case 3:
return '.w';
default:
return '[???]';
}
}
return '';
}
/** Bit-casts a value for a given slot into a double, honoring the slot's NumberKind. */
private interpretValueBits(
slotIdx: number,
valueBits: number
): number | boolean {
const bitArray: Int32Array = new Int32Array(1);
bitArray[0] = valueBits;
switch (this.trace!.slots[slotIdx].kind) {
case NumberKind.Float:
return new Float32Array(bitArray.buffer)[0];
case NumberKind.Unsigned:
return new Uint32Array(bitArray.buffer)[0];
case NumberKind.Boolean:
return valueBits !== 0;
case NumberKind.Signed:
return valueBits;
default:
return valueBits;
}
}
/** Returns a vector of the indices and values of each slot that is enabled in `bits`. */
private getVariablesForDisplayMask(displayMask: boolean[]): VariableData[] {
this.check(displayMask.length === this.slots.length);
let vars: VariableData[] = [];
displayMask.forEach((_, slot: number) => {
if (displayMask[slot]) {
const varData: VariableData = {
slotIndex: slot,
dirty: this.dirtyMask[slot],
value: this.interpretValueBits(slot, this.slots[slot].value),
};
vars.push(varData);
}
});
// Order the variable list so that the most recently-written variables are shown at the top.
vars = vars.sort((a: VariableData, b: VariableData) => {
// Order by descending write-time.
const delta =
this.slots[b.slotIndex].writeTime - this.slots[a.slotIndex].writeTime;
if (delta !== 0) | {
return delta;
} | conditional_block | |
debug-trace-player.ts | ();
/** Throws an error if a precondition is not met. Indicates a logic bug or invalid trace. */
private check(result: boolean): void {
if (!result) {
throw new Error('check failed');
}
}
/** Copies trace info from the JSON number array into a TraceInfo struct. */
private getTraceInfo(position: number): TraceInfo {
this.check(position < this.trace!.trace.length);
this.check(this.trace!.trace[position][0] in TraceOp);
const info: TraceInfo = {
op: this.trace!.trace[position][0] as TraceOp,
data: this.trace!.trace[position].slice(1),
};
return info;
}
/** Resets playback to the start of the trace. Breakpoints are not cleared. */
public reset(trace: DebugTrace | null): void {
const nslots = trace?.slots?.length ?? 0;
const globalStackFrame: StackFrame = {
func: -1,
line: -1,
displayMask: Array<boolean>(nslots).map(() => false),
};
this.trace = trace;
this.cursor = 0;
this.slots = [];
this.stack = [globalStackFrame];
this.dirtyMask = Array<boolean>(nslots).map(() => false);
this.returnValues = Array<boolean>(nslots).map(() => false);
if (trace !== null) {
this.slots = trace.slots.map(
(): Slot => ({
value: 0,
scope: Infinity,
writeTime: 0,
})
);
this.returnValues = trace.slots.map(
(slotInfo: SlotInfo): boolean => (slotInfo.retval ?? -1) >= 0
);
// Build a map holding the number of times each line is reached.
this.lineNumbers.clear();
trace.trace.forEach((_, traceIdx: number) => {
const info: TraceInfo = this.getTraceInfo(traceIdx);
if (info.op === TraceOp.Line) {
const lineNumber = info.data[0];
const lineCount = this.lineNumbers.get(lineNumber) ?? 0;
this.lineNumbers.set(lineNumber, lineCount + 1);
}
});
}
}
/** Advances the simulation to the next Line op. */
public step(): void {
this.tidyState();
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
break;
}
}
}
/**
* Advances the simulation to the next Line op, skipping past matched Enter/Exit pairs.
* Breakpoints will also stop the simulation even if we haven't reached an Exit.
*/
public stepOver(): void {
this.tidyState();
const initialStackDepth = this.stack.length;
while (!this.traceHasCompleted()) {
const canEscapeFromThisStackDepth =
this.stack.length <= initialStackDepth;
if (this.execute(this.cursor++)) {
if (canEscapeFromThisStackDepth || this.atBreakpoint()) {
break;
}
}
}
}
/**
* Advances the simulation until we exit from the current stack frame.
* Breakpoints will also stop the simulation even if we haven't left the stack frame.
*/
public stepOut(): void {
this.tidyState();
const initialStackDepth = this.stack.length;
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
const hasEscapedFromInitialStackDepth =
this.stack.length < initialStackDepth;
if (hasEscapedFromInitialStackDepth || this.atBreakpoint()) {
break;
}
}
}
}
/** Advances the simulation until we hit a breakpoint, or the trace completes. */
public run(): void {
this.tidyState();
while (!this.traceHasCompleted()) {
if (this.execute(this.cursor++)) {
if (this.atBreakpoint()) {
break;
}
}
}
}
/**
* Cleans up temporary state between steps, such as the dirty mask and function return values.
*/
private tidyState(): void {
this.dirtyMask.fill(false);
const stackTop = this.stack[this.stack.length - 1];
this.returnValues.forEach((_, slotIdx: number) => {
stackTop.displayMask[slotIdx] &&= !this.returnValues[slotIdx];
});
}
/** Returns true if we have reached the end of the trace. */
public traceHasCompleted(): boolean {
return this.trace == null || this.cursor >= this.trace.trace.length;
}
/** Reports the position of the cursor "read head" within the array of trace instructions. */
public getCursor(): number {
return this.cursor;
}
/** Returns true if the current line has a breakpoint set on it. */
public atBreakpoint(): boolean {
return this.breakpointLines.has(this.getCurrentLine());
}
/** Replaces all current breakpoints with a new set of them. */
public setBreakpoints(breakpointLines: Set<number>): void {
this.breakpointLines = breakpointLines;
}
/** Returns the current set of lines which have a breakpoint. */
public getBreakpoints(): Set<number> {
return this.breakpointLines;
}
/** Adds a breakpoint to a line (if one doesn't exist). */
public addBreakpoint(line: number): void {
this.breakpointLines.add(line);
}
/** Removes a breakpoint from a line (if one exists). */
public removeBreakpoint(line: number): void {
this.breakpointLines.delete(line);
}
/** Retrieves the current line. */
public getCurrentLine(): number {
this.check(this.stack.length > 0);
return this.stack[this.stack.length - 1].line;
}
/** Retrieves the current line for a given stack frame. */
public getCurrentLineInStackFrame(stackFrameIndex: number): number {
// The first entry on the stack is the "global" frame before we enter main, so offset our index
// by one to account for it.
++stackFrameIndex;
this.check(stackFrameIndex > 0);
this.check(stackFrameIndex < this.stack.length);
return this.stack[stackFrameIndex].line;
}
/**
* Returns every line number reached inside this debug trace, along with the remaining number of
* times that this trace will reach it. e.g. {100, 2} means line 100 will be reached twice.
*/
public getLineNumbersReached(): Map<number, number> {
return this.lineNumbers;
}
/** Returns the call stack as an array of FunctionInfo indices. */
public getCallStack(): number[] {
this.check(this.stack.length > 0);
return this.stack.slice(1).map((frame: StackFrame) => frame.func);
}
/** Returns the size of the call stack. */
public getStackDepth(): number {
this.check(this.stack.length > 0);
return this.stack.length - 1;
}
/** Returns a slot's component as a variable-name suffix, e.g. ".x" or "[2][2]". */
public getSlotComponentSuffix(slotIndex: number): string {
const slot: SlotInfo = this.trace!.slots[slotIndex];
if (slot.rows > 1) {
return `[${Math.floor(slot.index / slot.rows)}][${
slot.index % slot.rows
}]`;
}
if (slot.columns > 1) {
switch (slot.index) {
case 0:
return '.x';
case 1:
return '.y';
case 2:
return '.z';
case 3:
return '.w';
default:
return '[???]';
}
}
return '';
}
/** Bit-casts a value for a given slot into a double, honoring the slot's NumberKind. */
private interpretValueBits(
slotIdx: number,
valueBits: number
): number | boolean {
const bitArray: Int32Array = new Int32Array(1);
bitArray[0] = valueBits;
switch (this.trace!.slots[slotIdx].kind) {
case NumberKind.Float:
return new Float32Array(bitArray.buffer)[0];
case NumberKind.Unsigned:
return new Uint32Array(bitArray.buffer)[0];
case NumberKind.Boolean:
return valueBits !== 0;
case NumberKind.Signed:
return valueBits;
default:
return valueBits;
}
}
/** Returns a vector of the indices and values of each slot that is enabled in `bits`. */
private getVariablesForDisplayMask(displayMask: boolean[]): VariableData[] {
this.check(displayMask.length === this.slots.length);
let vars: VariableData[] = [];
displayMask.forEach((_, slot: number) => {
if (displayMask[slot]) {
const varData: VariableData = {
slotIndex: slot,
dirty: this.dirtyMask[slot],
value: this.interpretValueBits(slot, this.slots[slot].value),
};
vars.push(varData);
}
});
// Order the variable list so that the most recently-written variables are shown at the top.
vars = vars.sort((a: VariableData, b: VariableData) => {
// Order by descending write-time.
const delta =
this.slots[b.slotIndex].writeTime - this.slots[a.slotIndex].writeTime; | if (delta !== 0) {
return delta; | random_line_split | |
lstm_predictor.py |
def add_input_layer(self,):
l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size)
# Ws (in_size, cell_size)
Ws_in = self._weight_variable([self.input_size, self.cell_size])
# bs (cell_size, )
bs_in = self._bias_variable([self.cell_size,])
# l_in_y = (batch * n_steps, cell_size)
with tf.name_scope('Wx_plus_b'):
l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in
# reshape l_in_y ==> (batch, n_steps, cell_size)
self.l_in_y = tf.reshape(l_in_y, [self.batch_size, -1, self.cell_size], name='2_3D')
def add_cell(self):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True)
# lstm_cell = tf.contrib.rnn.MultiRNNCell(
# [lstm_cell() for _ in range(3)], state_is_tuple=True)
with tf.name_scope('initial_state'):
self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)
self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(
lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False)
def add_output_layer(self):
# shape = (batch * steps, cell_size)
l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D')
Ws_out = self._weight_variable([self.cell_size, self.output_size])
bs_out = self._bias_variable([self.output_size, ])
# shape = (batch * steps, output_size)
with tf.name_scope('Wx_plus_b'):
outputs = tf.matmul(l_out_x, Ws_out) + bs_out
self.pred = tf.reshape(outputs, [self.batch_size, -1, self.output_size])
def compute_cost(self):
if self.x_vx_mode == 'x' or self.x_vx_mode == 'vx':
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred, [-1], name='reshape_pred')],
[tf.reshape(self.ys, [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps * self.output_size], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
elif self.x_vx_mode == 'x_vx':
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred[:,:,0], [-1], name='reshape_pred')],
[tf.reshape(self.ys[:,:,0], [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps ], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)+tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred[:,:,1], [-1], name='reshape_pred')],
[tf.reshape(self.ys[:,:,1], [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
else:
pass
with tf.name_scope('average_cost'):
self.cost = tf.div(
tf.reduce_sum(losses, name='losses_sum'),
self.batch_size,
name='average_cost')
tf.summary.scalar('cost', self.cost)
@staticmethod
def ms_error(labels, logits):
# return tf.reduce_sum(tf.square(tf.subtract(labels, logits)))
return tf.square(tf.subtract(labels, logits))
def _weight_variable(self, shape, name='weights'):
initializer = tf.random_normal_initializer(mean=0., stddev=1.,)
return tf.get_variable(shape=shape, initializer=initializer, name=name)
def _bias_variable(self, shape, name='biases'):
initializer = tf.constant_initializer(0.1)
return tf.get_variable(name=name, shape=shape, initializer=initializer)
class LSTMPredictor(BasePredictor):
def __init__(self, args):
BasePredictor.__init__(self, args)
self.x_vx_mode = args.x_vx_mode
self.lstm_predictor_model = args.lstm_predictor_model
self.Tx = 50 #args.n_steps
self.M = 100 #args.batch_size
self.n_a = 16
self.lr = args.learning_rate
self.WIDTH = 500.
self.HIGHT = 500.
self.bboxes = []
self.samples = []
self.x_all = []
self.vx_all = []
if self.x_vx_mode == 'x':
self.n_x = 3
self.n_y = 1
elif self.x_vx_mode == 'vx':
self.n_x = 3
self.n_y = 1
elif self.x_vx_mode == 'x_vx':
self.n_x = 3
self.n_y = 2
else:
pass
self.build_model()
def get_test_batch(self, X_test, batch_size):
X_test = np.asarray(X_test)
X_test_batch = np.tile(X_test, (batch_size, 1, 1))
# print('X_test_batch shape:',X_test_batch.shape)
return X_test_batch
def extract_bbox(self, b):
'''
"top": 597.832580566406,
"right": 880.870239257812,
"bot": 739.836364746094,
"left": 686.165344238281
'''
# h = (b['bot'] - b['top'] + 1.) / self.HIGHT
# w = (b['right'] - b['left'] + 1.) / self.WIDTH
h = (b[3] - b[1] + 1.) / self.HIGHT
w = (b[2] - b[0] + 1.) / self.WIDTH
area = h * w
# x, y = bird_proj.proj((b['left'] + b['right'])/2, b['bot'])
# return [h, w, area, x]
# return [h, w, area, area_dao]
return [h, w, area]
def extract_sample(self, filename, time_file):
with open(filename) as fin:
gts = json.loads(fin.read())['frame_data']
# t_samples = [extract_bbox(e['ref_bbox'])] for e in gts]
t_samples = [self.extract_bbox(e['ref_bbox']) for e in gts]
# t_targets = [e['vx'] for e in gts] # e['x'] for dis, e['vx'] for relative v
t_targets = [[e['x'], e['vx']] for e in gts] # e['x'] for dis, e['vx'] for relative v
times = []
with open(time_file, 'r') as fin:
for line in fin.readlines():
times.append(float(line))
# add vx as new feature
tvx = []
for i in range(len(times)):
if i == 0:
tvx.append(0)
else:
tvx.append((t_samples[i][-1] - t_samples[i - 1][-1]) / (times[i] - times[i - 1]))
# tvx[0] = tvx[1]
# for i in range(len(times)):
# t_samples[i].append(tvx[i])
return t_samples, t_targets
def build_model(self):
config = tf.ConfigProto(allow_soft_placement=True)
g2 = tf.Graph()
self.sess2 = tf.Session(config=config, graph=g2)
with self.sess2.as_default():
with g2.as_default():
LR = tf.Variable(self | self.n_steps = n_steps
self.input_size = input_size
self.output_size = output_size
self.cell_size = cell_size
self.batch_size = batch_size
self.learning_rate = LR
self.x_vx_mode = x_vx_mode
with tf.name_scope('inputs'):
self.xs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='xs')
self.ys = tf.placeholder(tf.float32, [batch_size, None, output_size], name='ys')
with tf.variable_scope('in_hidden'):
self.add_input_layer()
with tf.variable_scope('LSTM_cell'):
self.add_cell()
with tf.variable_scope('out_hidden'):
self.add_output_layer()
with tf.name_scope('cost'):
self.compute_cost()
with tf.name_scope('train'):
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cost) | identifier_body | |
lstm_predictor.py | (tf.float32, [batch_size, None, input_size], name='xs')
self.ys = tf.placeholder(tf.float32, [batch_size, None, output_size], name='ys')
with tf.variable_scope('in_hidden'):
self.add_input_layer()
with tf.variable_scope('LSTM_cell'):
self.add_cell()
with tf.variable_scope('out_hidden'):
self.add_output_layer()
with tf.name_scope('cost'):
self.compute_cost()
with tf.name_scope('train'):
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cost)
def add_input_layer(self,):
l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size)
# Ws (in_size, cell_size)
Ws_in = self._weight_variable([self.input_size, self.cell_size])
# bs (cell_size, )
bs_in = self._bias_variable([self.cell_size,])
# l_in_y = (batch * n_steps, cell_size)
with tf.name_scope('Wx_plus_b'):
l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in
# reshape l_in_y ==> (batch, n_steps, cell_size)
self.l_in_y = tf.reshape(l_in_y, [self.batch_size, -1, self.cell_size], name='2_3D')
def add_cell(self):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True)
# lstm_cell = tf.contrib.rnn.MultiRNNCell(
# [lstm_cell() for _ in range(3)], state_is_tuple=True)
with tf.name_scope('initial_state'):
self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)
self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(
lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False)
def add_output_layer(self):
# shape = (batch * steps, cell_size)
l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D')
Ws_out = self._weight_variable([self.cell_size, self.output_size])
bs_out = self._bias_variable([self.output_size, ])
# shape = (batch * steps, output_size)
with tf.name_scope('Wx_plus_b'):
outputs = tf.matmul(l_out_x, Ws_out) + bs_out
self.pred = tf.reshape(outputs, [self.batch_size, -1, self.output_size])
def compute_cost(self):
if self.x_vx_mode == 'x' or self.x_vx_mode == 'vx':
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred, [-1], name='reshape_pred')],
[tf.reshape(self.ys, [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps * self.output_size], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
elif self.x_vx_mode == 'x_vx':
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred[:,:,0], [-1], name='reshape_pred')],
[tf.reshape(self.ys[:,:,0], [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps ], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)+tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred[:,:,1], [-1], name='reshape_pred')],
[tf.reshape(self.ys[:,:,1], [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
else:
pass
with tf.name_scope('average_cost'):
self.cost = tf.div(
tf.reduce_sum(losses, name='losses_sum'),
self.batch_size,
name='average_cost')
tf.summary.scalar('cost', self.cost)
@staticmethod
def ms_error(labels, logits):
# return tf.reduce_sum(tf.square(tf.subtract(labels, logits)))
return tf.square(tf.subtract(labels, logits))
def _weight_variable(self, shape, name='weights'):
initializer = tf.random_normal_initializer(mean=0., stddev=1.,)
return tf.get_variable(shape=shape, initializer=initializer, name=name)
def _bias_variable(self, shape, name='biases'):
initializer = tf.constant_initializer(0.1)
return tf.get_variable(name=name, shape=shape, initializer=initializer)
class LSTMPredictor(BasePredictor):
def __init__(self, args):
BasePredictor.__init__(self, args)
self.x_vx_mode = args.x_vx_mode
self.lstm_predictor_model = args.lstm_predictor_model
self.Tx = 50 #args.n_steps
self.M = 100 #args.batch_size
self.n_a = 16
self.lr = args.learning_rate
self.WIDTH = 500.
self.HIGHT = 500.
self.bboxes = []
self.samples = []
self.x_all = []
self.vx_all = []
if self.x_vx_mode == 'x':
self.n_x = 3
self.n_y = 1
elif self.x_vx_mode == 'vx':
self.n_x = 3
self.n_y = 1
elif self.x_vx_mode == 'x_vx':
self.n_x = 3
self.n_y = 2
else:
pass
self.build_model()
def get_test_batch(self, X_test, batch_size):
X_test = np.asarray(X_test)
X_test_batch = np.tile(X_test, (batch_size, 1, 1))
# print('X_test_batch shape:',X_test_batch.shape)
return X_test_batch
def extract_bbox(self, b):
'''
"top": 597.832580566406,
"right": 880.870239257812,
"bot": 739.836364746094,
"left": 686.165344238281
'''
# h = (b['bot'] - b['top'] + 1.) / self.HIGHT
# w = (b['right'] - b['left'] + 1.) / self.WIDTH
h = (b[3] - b[1] + 1.) / self.HIGHT | # return [h, w, area, x]
# return [h, w, area, area_dao]
return [h, w, area]
def extract_sample(self, filename, time_file):
with open(filename) as fin:
gts = json.loads(fin.read())['frame_data']
# t_samples = [extract_bbox(e['ref_bbox'])] for e in gts]
t_samples = [self.extract_bbox(e['ref_bbox']) for e in gts]
# t_targets = [e['vx'] for e in gts] # e['x'] for dis, e['vx'] for relative v
t_targets = [[e['x'], e['vx']] for e in gts] # e['x'] for dis, e['vx'] for relative v
times = []
with open(time_file, 'r') as fin:
for line in fin.readlines():
times.append(float(line))
# add vx as new feature
tvx = []
for i in range(len(times)):
if i == 0:
tvx.append(0)
else:
tvx.append((t_samples[i][-1] - t_samples[i - 1][-1]) / (times[i] - times[i - 1]))
# tvx[0] = tvx[1]
# for i in range(len(times)):
# t_samples[i].append(tvx[i])
return t_samples, t_targets
def build_model(self):
config = tf.ConfigProto(allow_soft_placement=True)
g2 = tf.Graph()
self.sess2 = tf.Session(config=config, graph=g2)
with self.sess2.as_default():
with g2.as_default():
LR = tf.Variable(self.lr, trainable=False)
self.model = LSTMRNN(self.Tx, self.n_x, self.n_y, self.n_a, self.M, LR, self.x_vx_mode)
saver = tf.train.Saver()
saved_path = os.path.join(self.lstm_predictor_model,self.x_vx_mode,self.x_vx_mode+'_lstm')
| w = (b[2] - b[0] + 1.) / self.WIDTH
area = h * w
# x, y = bird_proj.proj((b['left'] + b['right'])/2, b['bot'])
| random_line_split |
lstm_predictor.py | )
with tf.name_scope('initial_state'):
self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)
self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(
lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False)
def add_output_layer(self):
# shape = (batch * steps, cell_size)
l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D')
Ws_out = self._weight_variable([self.cell_size, self.output_size])
bs_out = self._bias_variable([self.output_size, ])
# shape = (batch * steps, output_size)
with tf.name_scope('Wx_plus_b'):
outputs = tf.matmul(l_out_x, Ws_out) + bs_out
self.pred = tf.reshape(outputs, [self.batch_size, -1, self.output_size])
def compute_cost(self):
if self.x_vx_mode == 'x' or self.x_vx_mode == 'vx':
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred, [-1], name='reshape_pred')],
[tf.reshape(self.ys, [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps * self.output_size], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
elif self.x_vx_mode == 'x_vx':
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred[:,:,0], [-1], name='reshape_pred')],
[tf.reshape(self.ys[:,:,0], [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps ], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)+tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred[:,:,1], [-1], name='reshape_pred')],
[tf.reshape(self.ys[:,:,1], [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
else:
pass
with tf.name_scope('average_cost'):
self.cost = tf.div(
tf.reduce_sum(losses, name='losses_sum'),
self.batch_size,
name='average_cost')
tf.summary.scalar('cost', self.cost)
@staticmethod
def ms_error(labels, logits):
# return tf.reduce_sum(tf.square(tf.subtract(labels, logits)))
return tf.square(tf.subtract(labels, logits))
def _weight_variable(self, shape, name='weights'):
initializer = tf.random_normal_initializer(mean=0., stddev=1.,)
return tf.get_variable(shape=shape, initializer=initializer, name=name)
def _bias_variable(self, shape, name='biases'):
initializer = tf.constant_initializer(0.1)
return tf.get_variable(name=name, shape=shape, initializer=initializer)
class LSTMPredictor(BasePredictor):
def __init__(self, args):
BasePredictor.__init__(self, args)
self.x_vx_mode = args.x_vx_mode
self.lstm_predictor_model = args.lstm_predictor_model
self.Tx = 50 #args.n_steps
self.M = 100 #args.batch_size
self.n_a = 16
self.lr = args.learning_rate
self.WIDTH = 500.
self.HIGHT = 500.
self.bboxes = []
self.samples = []
self.x_all = []
self.vx_all = []
if self.x_vx_mode == 'x':
self.n_x = 3
self.n_y = 1
elif self.x_vx_mode == 'vx':
self.n_x = 3
self.n_y = 1
elif self.x_vx_mode == 'x_vx':
self.n_x = 3
self.n_y = 2
else:
pass
self.build_model()
def get_test_batch(self, X_test, batch_size):
X_test = np.asarray(X_test)
X_test_batch = np.tile(X_test, (batch_size, 1, 1))
# print('X_test_batch shape:',X_test_batch.shape)
return X_test_batch
def extract_bbox(self, b):
'''
"top": 597.832580566406,
"right": 880.870239257812,
"bot": 739.836364746094,
"left": 686.165344238281
'''
# h = (b['bot'] - b['top'] + 1.) / self.HIGHT
# w = (b['right'] - b['left'] + 1.) / self.WIDTH
h = (b[3] - b[1] + 1.) / self.HIGHT
w = (b[2] - b[0] + 1.) / self.WIDTH
area = h * w
# x, y = bird_proj.proj((b['left'] + b['right'])/2, b['bot'])
# return [h, w, area, x]
# return [h, w, area, area_dao]
return [h, w, area]
def extract_sample(self, filename, time_file):
with open(filename) as fin:
gts = json.loads(fin.read())['frame_data']
# t_samples = [extract_bbox(e['ref_bbox'])] for e in gts]
t_samples = [self.extract_bbox(e['ref_bbox']) for e in gts]
# t_targets = [e['vx'] for e in gts] # e['x'] for dis, e['vx'] for relative v
t_targets = [[e['x'], e['vx']] for e in gts] # e['x'] for dis, e['vx'] for relative v
times = []
with open(time_file, 'r') as fin:
for line in fin.readlines():
times.append(float(line))
# add vx as new feature
tvx = []
for i in range(len(times)):
if i == 0:
tvx.append(0)
else:
tvx.append((t_samples[i][-1] - t_samples[i - 1][-1]) / (times[i] - times[i - 1]))
# tvx[0] = tvx[1]
# for i in range(len(times)):
# t_samples[i].append(tvx[i])
return t_samples, t_targets
def build_model(self):
config = tf.ConfigProto(allow_soft_placement=True)
g2 = tf.Graph()
self.sess2 = tf.Session(config=config, graph=g2)
with self.sess2.as_default():
with g2.as_default():
LR = tf.Variable(self.lr, trainable=False)
self.model = LSTMRNN(self.Tx, self.n_x, self.n_y, self.n_a, self.M, LR, self.x_vx_mode)
saver = tf.train.Saver()
saved_path = os.path.join(self.lstm_predictor_model,self.x_vx_mode,self.x_vx_mode+'_lstm')
saver.restore(self.sess2, saved_path+'_20000')
def predict(self, bbox, time, fid):
bbox = bbox[:4]
self.bboxes.append(bbox)
self.samples.append(self.extract_bbox(bbox))
test_batch = self.get_test_batch(self.samples, self.M)
feed_dict = {
self.model.xs: test_batch,
}
test_pred = self.sess2.run(self.model.pred, feed_dict=feed_dict).astype(np.float32)
x = test_pred[0,:,0][-1]
vx= test_pred[0,:,1][-1]
# x, vx = self.lstm_process(bbox,fid)
cur_pred = {
'fid': fid,
'vx': float(vx),
'x': float(x),
# 'ref_bbox': {
# 'top': float(bbox[0]), 'left': float(bbox[1]),
# 'bot': float(bbox[2]), 'right': float(bbox[3])
# }
}
self.result.append(cur_pred)
if fid == 500:
for i, x in enumerate(test_pred[0,:,0]):
self.result[i]['x'] = x
for i, vx in enumerate(test_pred[0,:,1]):
self.result[i]['vx'] = vx
return cur_pred
def to_json(self, filename):
print('Save prediction to ', filename)
with open(filename, 'w') as fout:
data = {'frame_data': self.result}
print('data:', data)
print("Ready save to json...")
# json.dump(data, fout, indent=4, ensure_ascii=False)
json.dump(data, fout)
print("Saved to json.")
def | lstm_process | identifier_name | |
lstm_predictor.py | self.l_in_y = tf.reshape(l_in_y, [self.batch_size, -1, self.cell_size], name='2_3D')
def add_cell(self):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True)
# lstm_cell = tf.contrib.rnn.MultiRNNCell(
# [lstm_cell() for _ in range(3)], state_is_tuple=True)
with tf.name_scope('initial_state'):
self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)
self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(
lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False)
def add_output_layer(self):
# shape = (batch * steps, cell_size)
l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D')
Ws_out = self._weight_variable([self.cell_size, self.output_size])
bs_out = self._bias_variable([self.output_size, ])
# shape = (batch * steps, output_size)
with tf.name_scope('Wx_plus_b'):
outputs = tf.matmul(l_out_x, Ws_out) + bs_out
self.pred = tf.reshape(outputs, [self.batch_size, -1, self.output_size])
def compute_cost(self):
if self.x_vx_mode == 'x' or self.x_vx_mode == 'vx':
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred, [-1], name='reshape_pred')],
[tf.reshape(self.ys, [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps * self.output_size], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
elif self.x_vx_mode == 'x_vx':
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred[:,:,0], [-1], name='reshape_pred')],
[tf.reshape(self.ys[:,:,0], [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps ], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)+tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred[:,:,1], [-1], name='reshape_pred')],
[tf.reshape(self.ys[:,:,1], [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
else:
pass
with tf.name_scope('average_cost'):
self.cost = tf.div(
tf.reduce_sum(losses, name='losses_sum'),
self.batch_size,
name='average_cost')
tf.summary.scalar('cost', self.cost)
@staticmethod
def ms_error(labels, logits):
# return tf.reduce_sum(tf.square(tf.subtract(labels, logits)))
return tf.square(tf.subtract(labels, logits))
def _weight_variable(self, shape, name='weights'):
initializer = tf.random_normal_initializer(mean=0., stddev=1.,)
return tf.get_variable(shape=shape, initializer=initializer, name=name)
def _bias_variable(self, shape, name='biases'):
initializer = tf.constant_initializer(0.1)
return tf.get_variable(name=name, shape=shape, initializer=initializer)
class LSTMPredictor(BasePredictor):
def __init__(self, args):
BasePredictor.__init__(self, args)
self.x_vx_mode = args.x_vx_mode
self.lstm_predictor_model = args.lstm_predictor_model
self.Tx = 50 #args.n_steps
self.M = 100 #args.batch_size
self.n_a = 16
self.lr = args.learning_rate
self.WIDTH = 500.
self.HIGHT = 500.
self.bboxes = []
self.samples = []
self.x_all = []
self.vx_all = []
if self.x_vx_mode == 'x':
self.n_x = 3
self.n_y = 1
elif self.x_vx_mode == 'vx':
self.n_x = 3
self.n_y = 1
elif self.x_vx_mode == 'x_vx':
self.n_x = 3
self.n_y = 2
else:
pass
self.build_model()
def get_test_batch(self, X_test, batch_size):
X_test = np.asarray(X_test)
X_test_batch = np.tile(X_test, (batch_size, 1, 1))
# print('X_test_batch shape:',X_test_batch.shape)
return X_test_batch
def extract_bbox(self, b):
'''
"top": 597.832580566406,
"right": 880.870239257812,
"bot": 739.836364746094,
"left": 686.165344238281
'''
# h = (b['bot'] - b['top'] + 1.) / self.HIGHT
# w = (b['right'] - b['left'] + 1.) / self.WIDTH
h = (b[3] - b[1] + 1.) / self.HIGHT
w = (b[2] - b[0] + 1.) / self.WIDTH
area = h * w
# x, y = bird_proj.proj((b['left'] + b['right'])/2, b['bot'])
# return [h, w, area, x]
# return [h, w, area, area_dao]
return [h, w, area]
def extract_sample(self, filename, time_file):
with open(filename) as fin:
gts = json.loads(fin.read())['frame_data']
# t_samples = [extract_bbox(e['ref_bbox'])] for e in gts]
t_samples = [self.extract_bbox(e['ref_bbox']) for e in gts]
# t_targets = [e['vx'] for e in gts] # e['x'] for dis, e['vx'] for relative v
t_targets = [[e['x'], e['vx']] for e in gts] # e['x'] for dis, e['vx'] for relative v
times = []
with open(time_file, 'r') as fin:
for line in fin.readlines():
times.append(float(line))
# add vx as new feature
tvx = []
for i in range(len(times)):
if i == 0:
tvx.append(0)
else:
tvx.append((t_samples[i][-1] - t_samples[i - 1][-1]) / (times[i] - times[i - 1]))
# tvx[0] = tvx[1]
# for i in range(len(times)):
# t_samples[i].append(tvx[i])
return t_samples, t_targets
def build_model(self):
config = tf.ConfigProto(allow_soft_placement=True)
g2 = tf.Graph()
self.sess2 = tf.Session(config=config, graph=g2)
with self.sess2.as_default():
with g2.as_default():
LR = tf.Variable(self.lr, trainable=False)
self.model = LSTMRNN(self.Tx, self.n_x, self.n_y, self.n_a, self.M, LR, self.x_vx_mode)
saver = tf.train.Saver()
saved_path = os.path.join(self.lstm_predictor_model,self.x_vx_mode,self.x_vx_mode+'_lstm')
saver.restore(self.sess2, saved_path+'_20000')
def predict(self, bbox, time, fid):
bbox = bbox[:4]
self.bboxes.append(bbox)
self.samples.append(self.extract_bbox(bbox))
test_batch = self.get_test_batch(self.samples, self.M)
feed_dict = {
self.model.xs: test_batch,
}
test_pred = self.sess2.run(self.model.pred, feed_dict=feed_dict).astype(np.float32)
x = test_pred[0,:,0][-1]
vx= test_pred[0,:,1][-1]
# x, vx = self.lstm_process(bbox,fid)
cur_pred = {
'fid': fid,
'vx': float(vx),
'x': float(x),
# 'ref_bbox': {
# 'top': float(bbox[0]), 'left': float(bbox[1]),
# 'bot': float(bbox[2]), 'right': float(bbox[3])
# }
}
self.result.append(cur_pred)
if fid == 500:
for i, x in enumerate(test_pred[0,:,0]):
self.result[i]['x'] = x
for i, vx in enumerate(test_pred[0,:,1]):
| self.result[i]['vx'] = vx | conditional_block | |
credentials.rs | 8, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4,
162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123,
231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60,
145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137,
70, 139, 79, 193, 26, 18, 182, 150,
];
assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]);
}
impl Credentials {
/// Create a [`Credentials`] object by parsing a google-service-account json string
///
/// Example:
///
/// Assuming that your firebase service account credentials file is called "service-account-test.json" and
/// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds
/// the file content during compile time. This avoids and http or io calls.
///
/// ```
/// use firestore_db_and_auth::{Credentials};
/// use firestore_db_and_auth::jwt::JWKSet;
///
/// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))?
/// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
///
/// You need two JWKS files for this crate to work:
/// * https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com
/// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email}
pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> {
let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?;
credentials.compute_secret()?;
Ok(credentials)
}
/// Create a [`Credentials`] object by reading and parsing a google-service-account json file.
///
/// This is a convenience method, that reads in the given credentials file and acts otherwise the same as
/// the [`Credentials::new`] method.
pub fn from_file(credential_file: &str) -> Result<Self, Error> {
let f = BufReader::new(File::open(credential_file)?);
let mut credentials: Credentials = serde_json::from_reader(f)?;
credentials.compute_secret()?;
Ok(credentials)
}
/// Adds public-key JWKs to a credentials instance and returns it.
///
/// This method will also verify that the given JWKs files allow verification of Google access tokens.
/// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`].
pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> {
self.add_jwks_public_keys(jwks);
self.verify()?;
Ok(self)
}
/// The public keys to verify generated tokens will be downloaded, for the given service account as well as
/// for "securetoken@system.gserviceaccount.com".
/// Do not use this option if additional downloads are not desired,
/// for example in cloud functions that require fast cold boot start times.
///
/// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on.
///
/// Example:
///
/// Assuming that your firebase service account credentials file is called "service-account-test.json".
///
/// ```no_run
/// use firestore_db_and_auth::{Credentials};
///
/// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))?
/// .download_jwkset()?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
pub fn download_jwkset(mut self) -> Result<Credentials, Error> {
self.download_google_jwks()?;
self.verify()?;
Ok(self)
}
/// Verifies that creating access tokens is possible with the given credentials and public keys.
/// Returns an empty result type on success.
pub fn verify(&self) -> Result<(), Error> {
let access_token = create_jwt_encoded(
&self,
Some(["admin"].iter()),
Duration::hours(1),
Some(self.client_id.clone()),
None,
JWT_AUDIENCE_IDENTITY,
)?;
verify_access_token(&self, &access_token)?;
Ok(())
}
/// Find the secret in the jwt set that matches the given key id, if any.
/// Used for jws validation
pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> {
self.keys.pub_key.get(kid).and_then(|f| Some(f.clone()))
}
/// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens.
///
/// Example:
///
/// ```
/// use firestore_db_and_auth::credentials::Credentials;
/// use firestore_db_and_auth::JWKSet;
///
/// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?;
/// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?);
/// c.compute_secret()?;
/// c.verify()?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) {
for entry in jwkset.keys.iter() {
if !entry.headers.key_id.is_some() {
continue;
}
let key_id = entry.headers.key_id.as_ref().unwrap().to_owned();
self.keys
.pub_key
.insert(key_id, Arc::new(entry.ne.jws_public_key_secret()));
}
}
/// If you haven't called [`Credentials::add_jwks_public_keys`] to manually add public keys,
/// this method will download one for your google service account and one for the oauth related
/// securetoken@system.gserviceaccount.com service account.
pub fn download_google_jwks(&mut self) -> Result<(), Error> {
let jwks = download_google_jwks(&self.client_email)?;
self.add_jwks_public_keys(&JWKSet::new(&jwks)?);
let jwks = download_google_jwks("securetoken@system.gserviceaccount.com")?;
self.add_jwks_public_keys(&JWKSet::new(&jwks)?);
Ok(())
}
/// Compute the Rsa keypair by using the private_key of the credentials file.
/// You must call this if you have manually created a credentials object.
///
/// This is automatically invoked if you use [`Credentials::new`] or [`Credentials::from_file`].
pub fn compute_secret(&mut self) -> Result<(), Error> {
use biscuit::jws::Secret;
use ring::signature;
let vec = pem_to_der(&self.private_key)?;
let key_pair = signature::RsaKeyPair::from_pkcs8(&vec)?;
self.keys.secret = Some(Arc::new(Secret::RsaKeyPair(Arc::new(key_pair))));
Ok(())
}
}
#[doc(hidden)]
#[allow(dead_code)]
pub fn doctest_credentials() -> Credentials {
let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap();
Credentials::new(include_str!("../tests/service-account-test.json"))
.expect("Failed to deserialize credentials")
.with_jwkset(&jwk_list)
.expect("JWK public keys verification failed")
}
#[test]
fn | deserialize_credentials | identifier_name | |
credentials.rs | #[derive(Serialize, Deserialize, Default, Clone)]
pub struct Credentials {
pub project_id: String,
pub private_key_id: String,
pub private_key: String,
pub client_email: String,
pub client_id: String,
pub api_key: String,
#[serde(default, skip)]
pub(crate) keys: Keys,
}
/// Converts a PEM (ascii base64) encoded private key into the binary der representation
pub fn pem_to_der(pem_file_contents: &str) -> Result<Vec<u8>, Error> {
use base64::decode;
let pem_file_contents = pem_file_contents
.find("-----BEGIN")
// Cut off the first BEGIN part
.and_then(|i| Some(&pem_file_contents[i + 10..]))
// Find the trailing ---- after BEGIN and cut that off
.and_then(|str| str.find("-----").and_then(|i| Some(&str[i + 5..])))
// Cut off -----END
.and_then(|str| str.rfind("-----END").and_then(|i| Some(&str[..i])));
if pem_file_contents.is_none() {
return Err(FirebaseError::Generic(
"Invalid private key in credentials file. Must be valid PEM.",
));
}
let base64_body = pem_file_contents.unwrap().replace("\n", "");
Ok(decode(&base64_body)
.map_err(|_| FirebaseError::Generic("Invalid private key in credentials file. Expected Base64 data."))?)
}
#[test]
fn pem_to_der_test() {
const INPUT: &str = r#"-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCTbt9Rs2niyIRE
FIdrhIN757eq/1Ry/VhZALBXAveg+lt+ui/9EHtYPJH1A9NyyAwChs0UCRWqkkEo
Amtz4dJQ1YlGi0/BGhK2lg==
-----END PRIVATE KEY-----
"#;
const EXPECTED: [u8; 112] = [
48, 130, 4, 188, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4,
162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123,
231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60,
145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137,
70, 139, 79, 193, 26, 18, 182, 150,
];
assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]);
}
impl Credentials {
/// Create a [`Credentials`] object by parsing a google-service-account json string
///
/// Example:
///
/// Assuming that your firebase service account credentials file is called "service-account-test.json" and
/// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds
/// the file content during compile time. This avoids and http or io calls.
///
/// ```
/// use firestore_db_and_auth::{Credentials};
/// use firestore_db_and_auth::jwt::JWKSet;
///
/// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))?
/// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
///
/// You need two JWKS files for this crate to work:
/// * https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com
/// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email}
pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> {
let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?;
credentials.compute_secret()?;
Ok(credentials)
}
/// Create a [`Credentials`] object by reading and parsing a google-service-account json file.
///
/// This is a convenience method, that reads in the given credentials file and acts otherwise the same as
/// the [`Credentials::new`] method.
pub fn from_file(credential_file: &str) -> Result<Self, Error> {
let f = BufReader::new(File::open(credential_file)?);
let mut credentials: Credentials = serde_json::from_reader(f)?;
credentials.compute_secret()?;
Ok(credentials)
}
/// Adds public-key JWKs to a credentials instance and returns it.
///
/// This method will also verify that the given JWKs files allow verification of Google access tokens.
/// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`].
pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> {
self.add_jwks_public_keys(jwks);
self.verify()?;
Ok(self)
}
/// The public keys to verify generated tokens will be downloaded, for the given service account as well as
/// for "securetoken@system.gserviceaccount.com".
/// Do not use this option if additional downloads are not desired,
/// for example in cloud functions that require fast cold boot start times.
///
/// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on.
///
/// Example:
///
/// Assuming that your firebase service account credentials file is called "service-account-test.json".
///
/// ```no_run
/// use firestore_db_and_auth::{Credentials};
///
/// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))?
/// .download_jwkset()?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
pub fn download_jwkset(mut self) -> Result<Credentials, Error> {
self.download_google_jwks()?;
self.verify()?;
Ok(self)
}
/// Verifies that creating access tokens is possible with the given credentials and public keys.
/// Returns an empty result type on success.
pub fn verify(&self) -> Result<(), Error> {
let access_token = create_jwt_encoded(
&self,
Some(["admin"].iter()),
Duration::hours(1),
Some(self.client_id.clone()),
None,
JWT_AUDIENCE_IDENTITY,
)?;
verify_access_token(&self, &access_token)?;
Ok(())
}
/// Find the secret in the jwt set that matches the given key id, if any.
/// Used for jws validation
pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> {
self.keys.pub_key.get(kid).and_then(|f| Some(f.clone()))
}
/// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens.
///
/// Example:
///
/// ```
/// use firestore_db_and_auth::credentials::Credentials;
/// use firestore_db_and_auth::JWKSet;
///
/// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?; | /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// | /// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?);
/// c.compute_secret()?;
/// c.verify()?; | random_line_split |
credentials.rs | 132, 131, 123,
231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60,
145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137,
70, 139, 79, 193, 26, 18, 182, 150,
];
assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]);
}
impl Credentials {
/// Create a [`Credentials`] object by parsing a google-service-account json string
///
/// Example:
///
/// Assuming that your firebase service account credentials file is called "service-account-test.json" and
/// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds
/// the file content during compile time. This avoids and http or io calls.
///
/// ```
/// use firestore_db_and_auth::{Credentials};
/// use firestore_db_and_auth::jwt::JWKSet;
///
/// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))?
/// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
///
/// You need two JWKS files for this crate to work:
/// * https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com
/// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email}
pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> {
let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?;
credentials.compute_secret()?;
Ok(credentials)
}
/// Create a [`Credentials`] object by reading and parsing a google-service-account json file.
///
/// This is a convenience method, that reads in the given credentials file and acts otherwise the same as
/// the [`Credentials::new`] method.
pub fn from_file(credential_file: &str) -> Result<Self, Error> {
let f = BufReader::new(File::open(credential_file)?);
let mut credentials: Credentials = serde_json::from_reader(f)?;
credentials.compute_secret()?;
Ok(credentials)
}
/// Adds public-key JWKs to a credentials instance and returns it.
///
/// This method will also verify that the given JWKs files allow verification of Google access tokens.
/// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`].
pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> {
self.add_jwks_public_keys(jwks);
self.verify()?;
Ok(self)
}
/// The public keys to verify generated tokens will be downloaded, for the given service account as well as
/// for "securetoken@system.gserviceaccount.com".
/// Do not use this option if additional downloads are not desired,
/// for example in cloud functions that require fast cold boot start times.
///
/// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on.
///
/// Example:
///
/// Assuming that your firebase service account credentials file is called "service-account-test.json".
///
/// ```no_run
/// use firestore_db_and_auth::{Credentials};
///
/// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))?
/// .download_jwkset()?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
pub fn download_jwkset(mut self) -> Result<Credentials, Error> {
self.download_google_jwks()?;
self.verify()?;
Ok(self)
}
/// Verifies that creating access tokens is possible with the given credentials and public keys.
/// Returns an empty result type on success.
pub fn verify(&self) -> Result<(), Error> {
let access_token = create_jwt_encoded(
&self,
Some(["admin"].iter()),
Duration::hours(1),
Some(self.client_id.clone()),
None,
JWT_AUDIENCE_IDENTITY,
)?;
verify_access_token(&self, &access_token)?;
Ok(())
}
/// Find the secret in the jwt set that matches the given key id, if any.
/// Used for jws validation
pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> {
self.keys.pub_key.get(kid).and_then(|f| Some(f.clone()))
}
/// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens.
///
/// Example:
///
/// ```
/// use firestore_db_and_auth::credentials::Credentials;
/// use firestore_db_and_auth::JWKSet;
///
/// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?;
/// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?);
/// c.compute_secret()?;
/// c.verify()?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) {
for entry in jwkset.keys.iter() {
if !entry.headers.key_id.is_some() {
continue;
}
let key_id = entry.headers.key_id.as_ref().unwrap().to_owned();
self.keys
.pub_key
.insert(key_id, Arc::new(entry.ne.jws_public_key_secret()));
}
}
/// If you haven't called [`Credentials::add_jwks_public_keys`] to manually add public keys,
/// this method will download one for your google service account and one for the oauth related
/// securetoken@system.gserviceaccount.com service account.
pub fn download_google_jwks(&mut self) -> Result<(), Error> {
let jwks = download_google_jwks(&self.client_email)?;
self.add_jwks_public_keys(&JWKSet::new(&jwks)?);
let jwks = download_google_jwks("securetoken@system.gserviceaccount.com")?;
self.add_jwks_public_keys(&JWKSet::new(&jwks)?);
Ok(())
}
/// Compute the Rsa keypair by using the private_key of the credentials file.
/// You must call this if you have manually created a credentials object.
///
/// This is automatically invoked if you use [`Credentials::new`] or [`Credentials::from_file`].
pub fn compute_secret(&mut self) -> Result<(), Error> {
use biscuit::jws::Secret;
use ring::signature;
let vec = pem_to_der(&self.private_key)?;
let key_pair = signature::RsaKeyPair::from_pkcs8(&vec)?;
self.keys.secret = Some(Arc::new(Secret::RsaKeyPair(Arc::new(key_pair))));
Ok(())
}
}
#[doc(hidden)]
#[allow(dead_code)]
pub fn doctest_credentials() -> Credentials {
let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap();
Credentials::new(include_str!("../tests/service-account-test.json"))
.expect("Failed to deserialize credentials")
.with_jwkset(&jwk_list)
.expect("JWK public keys verification failed")
}
#[test]
fn deserialize_credentials() | {
let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap();
let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))
.expect("Failed to deserialize credentials")
.with_jwkset(&jwk_list)
.expect("JWK public keys verification failed");
assert_eq!(c.api_key, "api_key");
use std::path::PathBuf;
let mut credential_file = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
credential_file.push("tests/service-account-test.json");
let c = Credentials::from_file(credential_file.to_str().unwrap())
.expect("Failed to open credentials file")
.with_jwkset(&jwk_list)
.expect("JWK public keys verification failed");
assert_eq!(c.api_key, "api_key");
} | identifier_body | |
credentials.rs | ,
pub api_key: String,
#[serde(default, skip)]
pub(crate) keys: Keys,
}
/// Converts a PEM (ascii base64) encoded private key into the binary der representation
pub fn pem_to_der(pem_file_contents: &str) -> Result<Vec<u8>, Error> {
use base64::decode;
let pem_file_contents = pem_file_contents
.find("-----BEGIN")
// Cut off the first BEGIN part
.and_then(|i| Some(&pem_file_contents[i + 10..]))
// Find the trailing ---- after BEGIN and cut that off
.and_then(|str| str.find("-----").and_then(|i| Some(&str[i + 5..])))
// Cut off -----END
.and_then(|str| str.rfind("-----END").and_then(|i| Some(&str[..i])));
if pem_file_contents.is_none() {
return Err(FirebaseError::Generic(
"Invalid private key in credentials file. Must be valid PEM.",
));
}
let base64_body = pem_file_contents.unwrap().replace("\n", "");
Ok(decode(&base64_body)
.map_err(|_| FirebaseError::Generic("Invalid private key in credentials file. Expected Base64 data."))?)
}
#[test]
fn pem_to_der_test() {
const INPUT: &str = r#"-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCTbt9Rs2niyIRE
FIdrhIN757eq/1Ry/VhZALBXAveg+lt+ui/9EHtYPJH1A9NyyAwChs0UCRWqkkEo
Amtz4dJQ1YlGi0/BGhK2lg==
-----END PRIVATE KEY-----
"#;
const EXPECTED: [u8; 112] = [
48, 130, 4, 188, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4,
162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123,
231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60,
145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137,
70, 139, 79, 193, 26, 18, 182, 150,
];
assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]);
}
impl Credentials {
/// Create a [`Credentials`] object by parsing a google-service-account json string
///
/// Example:
///
/// Assuming that your firebase service account credentials file is called "service-account-test.json" and
/// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds
/// the file content during compile time. This avoids and http or io calls.
///
/// ```
/// use firestore_db_and_auth::{Credentials};
/// use firestore_db_and_auth::jwt::JWKSet;
///
/// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))?
/// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
///
/// You need two JWKS files for this crate to work:
/// * https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com
/// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email}
pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> {
let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?;
credentials.compute_secret()?;
Ok(credentials)
}
/// Create a [`Credentials`] object by reading and parsing a google-service-account json file.
///
/// This is a convenience method, that reads in the given credentials file and acts otherwise the same as
/// the [`Credentials::new`] method.
pub fn from_file(credential_file: &str) -> Result<Self, Error> {
let f = BufReader::new(File::open(credential_file)?);
let mut credentials: Credentials = serde_json::from_reader(f)?;
credentials.compute_secret()?;
Ok(credentials)
}
/// Adds public-key JWKs to a credentials instance and returns it.
///
/// This method will also verify that the given JWKs files allow verification of Google access tokens.
/// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`].
pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> {
self.add_jwks_public_keys(jwks);
self.verify()?;
Ok(self)
}
/// The public keys to verify generated tokens will be downloaded, for the given service account as well as
/// for "securetoken@system.gserviceaccount.com".
/// Do not use this option if additional downloads are not desired,
/// for example in cloud functions that require fast cold boot start times.
///
/// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on.
///
/// Example:
///
/// Assuming that your firebase service account credentials file is called "service-account-test.json".
///
/// ```no_run
/// use firestore_db_and_auth::{Credentials};
///
/// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))?
/// .download_jwkset()?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
pub fn download_jwkset(mut self) -> Result<Credentials, Error> {
self.download_google_jwks()?;
self.verify()?;
Ok(self)
}
/// Verifies that creating access tokens is possible with the given credentials and public keys.
/// Returns an empty result type on success.
pub fn verify(&self) -> Result<(), Error> {
let access_token = create_jwt_encoded(
&self,
Some(["admin"].iter()),
Duration::hours(1),
Some(self.client_id.clone()),
None,
JWT_AUDIENCE_IDENTITY,
)?;
verify_access_token(&self, &access_token)?;
Ok(())
}
/// Find the secret in the jwt set that matches the given key id, if any.
/// Used for jws validation
pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> {
self.keys.pub_key.get(kid).and_then(|f| Some(f.clone()))
}
/// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens.
///
/// Example:
///
/// ```
/// use firestore_db_and_auth::credentials::Credentials;
/// use firestore_db_and_auth::JWKSet;
///
/// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?;
/// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?);
/// c.compute_secret()?;
/// c.verify()?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(())
/// ```
pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) {
for entry in jwkset.keys.iter() {
if !entry.headers.key_id.is_some() | {
continue;
} | conditional_block | |
zcooldl.py | =resp.json().get('data', {}).get('content'),
offset=page_size * (page - 1))
# 根据用户 ID 或用户名下载
else:
self.user_id = user_id or self.search_id_by_username(username)
self.base_url = urljoin(HOST_PAGE, USER_SUFFIX.format(id=self.user_id))
try:
response = session_request(self.base_url)
except requests.exceptions.ProxyError:
cprint('Cannot connect to proxy.', 'red')
sys.exit(1)
except Exception as e:
cprint(f'Failed to connect to {self.base_url}, {e}', 'red')
sys.exit(1)
soup = BeautifulSoup(markup=response.text, features='html.parser')
try:
author = soup.find(name='div', id='body').get('data-name')
if username and username != author:
cprint(f'Invalid user id:「{user_id}」or username:「{username}」!', 'red')
sys.exit(1)
self.username = author
except Exception:
self.username = username or 'anonymous'
self.directory = dest / safe_filename(self.username)
try:
max_pages_ = int(soup.find(id='laypage_0').find_all(name='a')[-2].text)
except Exception:
max_pages_ = 1
self.max_pages = min(max_pages or 9999, max_pages_)
if self.spec_topics:
topics = ', '.join(self.spec_topics)
elif self.max_topics == 'all':
topics = 'all'
else:
topics = self.max_pages * self.max_topics
print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n'
f'{"User ID".rjust(17)}: {self.user_id}\n'
f'{"Maximum pages".rjust(17)}: {max_pages_}\n'
f'{"Pages to scrapy".rjust(17)}: {self.max_pages}\n'
f'{"Topics to scrapy".rjust(17)}: {topics}\n'
f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n')
self.END_PARSING_TOPICS = False
self.fetch_all(initialized=True if self.collection else False)
def search_id_by_username(self, username):
"""通过用户昵称查找用户 ID。
:param str username: 用户昵称
:return int: 用户 ID
"""
if not username:
cprint('Must give an <user id> or <username>!', 'yellow')
sys.exit(1)
search_url = urljoin(HOST_PAGE, SEARCH_DESIGNER_SUFFIX.format(word=username))
try:
response = session_request(search_url)
except requests.exceptions.ProxyError:
cprint('Cannot connect to proxy.', 'red')
sys.exit(1)
except Exception as e:
cprint(f'Failed to connect to {search_url}, {e}', 'red')
sys.exit(1)
author_1st = BeautifulSoup(response.text, 'html.parser').find(name='div', class_='author-info')
if (not author_1st) or (author_1st.get('data-name') != username):
cprint(f'Username「{username}」does not exist!', 'yellow')
sys.exit(1)
return author_1st.get('data-id')
def reload_records(self, file):
"""从本地下载记录里读取下载失败的内容。
:param str file: 下载记录文件的路径。
:return str: 用户名
"""
with open(file, 'r', encoding='utf-8') as f:
for fail in json.loads(f.read()).get('fail'):
scrapy = Scrapy._make(fail.values())
if scrapy.type == 'page':
self.pages.put(scrapy)
elif scrapy.type == 'topic':
self.topics.put(scrapy)
elif scrapy.type == 'image':
self.images.put(scrapy)
return scrapy.author
def generate_pages(self):
"""根据最大下载页数,生成需要爬取主页的任务。"""
for page in range(1, self.max_pages + 1):
suffix = COLLECTION_SUFFIX if self.collection else PAGE_SUFFIX
url = urljoin(self.base_url, suffix.format(page=page))
scrapy = Scrapy(type='page', author=self.username, title=page,
objid=None, index=page - 1, url=url)
if scrapy not in self.stat["pages_pass"]:
self.pages.put(scrapy)
def parse_collection_topics(self, topics: List[dict], offset: int = 0):
for idx, topic in enumerate(topics):
new_scrapy = Scrapy(type='topic',
author=topic.get('creatorObj', {}).get('username'),
title=topic.get('title'),
objid=topic.get('id'),
index=offset + idx,
url=topic.get('pageUrl'))
if new_scrapy not in self.stat["topics_pass"]:
self.topics.put(new_scrapy)
self.stat["ntopics"] += 1
def parse_topics(self, scrapy):
"""爬取主页,解析所有 topic,并将爬取主题的任务添加到任务队列。
:param scrapy: 记录任务信息的数据体
:return Scrapy: 记录任务信息的数据体
"""
resp = session_request(scrapy.url)
cards = BeautifulSoup(resp.text, 'html.parser').find_all(name='a', class_='card-img-hover')
for idx, card in enumerate(cards if self.max_topics == 'all' else cards[:self.max_topics + 1]):
title = card.get('title')
if self.spec_topics and (title not in self.spec_topics):
continue
new_scrapy = Scrapy(type='topic', author=scrapy.author, title=title,
objid=None, index=idx, url=card.get('href'))
if new_scrapy not in self.stat["topics_pass"]:
self.topics.put(new_scrapy)
self.stat["ntopics"] += 1
return scrapy
def fetch_topics(self):
"""从任务队列中获取要爬取的主页,使用多线程处理得到需要爬取的主题。"""
page_futures = {}
while True:
try:
scrapy = self.pages.get(timeout=Q_TIMEOUT)
page_futures[self.pool.submit(self.parse_topics, scrapy)] = scrapy
except Empty:
break
except Exception:
continue
for future in as_completed(page_futures):
scrapy = page_futures.get(future)
try:
future.result()
self.stat["pages_pass"].add(scrapy)
except Exception:
self.stat["pages_fail"].add(scrapy)
cprint(f'GET page: {scrapy.title} ({scrapy.url}) failed.', 'red')
self.END_PARSING_TOPICS = True
def parse_objid(self, url: str, is_collection: bool = False) -> str:
"""根据 topic 页面解析 objid
:param url: topic 或 collection 的 URL
:return: objid
"""
soup = BeautifulSoup(session_request(url).text, 'html.parser')
objid = soup.find('input', id='dataInput').attrs.get('data-objid')
if is_collection:
self._collection_name = soup.find('h2', class_='title-h2').text
user = soup.find(name='span', class_='details-user-avatar')
self.user_id = user.find('div').attrs.get('data-id')
self.username = user.find('a').attrs.get('title')
return objid
def parse_images(self, scrapy):
"""爬取 topic,获得 objid 后直接调用 API,从返回数据里获得图片地址等信息,
并将下载图片的任务添加到任务队列。
:param scrapy: 记录任务信息的数据体
:return Scrapy: 记录任务信息的数据体
"""
objid = scrapy.objid or self.parse_objid(scrapy.url)
resp = session_request(urljoin(HOST_PAGE, WORK_SUFFIX.format(objid=objid)))
data = resp.json().get('data', {})
author = data.get('product', {}).get('creatorObj', {}).get('username')
title = data.get('product', {}).get('title')
objid = data.get('product', {}).get('id')
for img in data.get('allImageList', []):
new_scrapy = Scrapy(type='image', author=author, title=title,
objid=objid, index=img.get('orderNo') or 0, url=img.get('url'))
if new_scrapy not in self.stat["images_pass"]:
self.im | ages.put(new_scrapy)
self.stat["nimages"] += 1
return scrapy
def fetch_images(self):
"""从任务队列中获取要爬取的主题,使用多线程处理得到需要下载的图片。"""
image_futures = {}
while True:
try:
scrapy = self.topics.get(timeout=Q_TIMEOUT)
image_futures[self.pool.submit(self.parse_images, scrapy)] = scrapy
except Empty:
if self.END_PARSING_TOPICS:
break
except Exception:
continue
for future in as_completed(image_futures):
scrapy = image_futures.get(future)
try: | identifier_body | |
zcooldl.py | connect to proxy.', 'red')
sys.exit(1)
except Exception as e:
cprint(f'Failed to connect to {search_url}, {e}', 'red')
sys.exit(1)
author_1st = BeautifulSoup(response.text, 'html.parser').find(name='div', class_='author-info')
if (not author_1st) or (author_1st.get('data-name') != username):
cprint(f'Username「{username}」does not exist!', 'yellow')
sys.exit(1)
return author_1st.get('data-id')
def reload_records(self, file):
"""从本地下载记录里读取下载失败的内容。
:param str file: 下载记录文件的路径。
:return str: 用户名
"""
with open(file, 'r', encoding='utf-8') as f:
for fail in json.loads(f.read()).get('fail'):
scrapy = Scrapy._make(fail.values())
if scrapy.type == 'page':
self.pages.put(scrapy)
elif scrapy.type == 'topic':
self.topics.put(scrapy)
elif scrapy.type == 'image':
self.images.put(scrapy)
return scrapy.author
def generate_pages(self):
"""根据最大下载页数,生成需要爬取主页的任务。"""
for page in range(1, self.max_pages + 1):
suffix = COLLECTION_SUFFIX if self.collection else PAGE_SUFFIX
url = urljoin(self.base_url, suffix.format(page=page))
scrapy = Scrapy(type='page', author=self.username, title=page,
objid=None, index=page - 1, url=url)
if scrapy not in self.stat["pages_pass"]:
self.pages.put(scrapy)
def parse_collection_topics(self, topics: List[dict], offset: int = 0):
for idx, topic in enumerate(topics):
new_scrapy = Scrapy(type='topic',
author=topic.get('creatorObj', {}).get('username'),
title=topic.get('title'),
objid=topic.get('id'),
index=offset + idx,
url=topic.get('pageUrl'))
if new_scrapy not in self.stat["topics_pass"]:
self.topics.put(new_scrapy)
self.stat["ntopics"] += 1
def parse_topics(self, scrapy):
"""爬取主页,解析所有 topic,并将爬取主题的任务添加到任务队列。
:param scrapy: 记录任务信息的数据体
:return Scrapy: 记录任务信息的数据体
"""
resp = session_request(scrapy.url)
cards = BeautifulSoup(resp.text, 'html.parser').find_all(name='a', class_='card-img-hover')
for idx, card in enumerate(cards if self.max_topics == 'all' else cards[:self.max_topics + 1]):
title = card.get('title')
if self.spec_topics and (title not in self.spec_topics):
continue
new_scrapy = Scrapy(type='topic', author=scrapy.author, title=title,
objid=None, index=idx, url=card.get('href'))
if new_scrapy not in self.stat["topics_pass"]:
self.topics.put(new_scrapy)
self.stat["ntopics"] += 1
return scrapy
def fetch_topics(self):
"""从任务队列中获取要爬取的主页,使用多线程处理得到需要爬取的主题。"""
page_futures = {}
while True:
try:
scrapy = self.pages.get(timeout=Q_TIMEOUT)
page_futures[self.pool.submit(self.parse_topics, scrapy)] = scrapy
except Empty:
break
except Exception:
continue
for future in as_completed(page_futures):
scrapy = page_futures.get(future)
try:
future.result()
self.stat["pages_pass"].add(scrapy)
except Exception:
self.stat["pages_fail"].add(scrapy)
cprint(f'GET page: {scrapy.title} ({scrapy.url}) failed.', 'red')
self.END_PARSING_TOPICS = True
def parse_objid(self, url: str, is_collection: bool = False) -> str:
"""根据 topic 页面解析 objid
:param url: topic 或 collection 的 URL
:return: objid
"""
soup = BeautifulSoup(session_request(url).text, 'html.parser')
objid = soup.find('input', id='dataInput').attrs.get('data-objid')
if is_collection:
self._collection_name = soup.find('h2', class_='title-h2').text
user = soup.find(name='span', class_='details-user-avatar')
self.user_id = user.find('div').attrs.get('data-id')
self.username = user.find('a').attrs.get('title')
return objid
def parse_images(self, scrapy):
"""爬取 topic,获得 objid 后直接调用 API,从返回数据里获得图片地址等信息,
并将下载图片的任务添加到任务队列。
:param scrapy: 记录任务信息的数据体
:return Scrapy: 记录任务信息的数据体
"""
objid = scrapy.objid or self.parse_objid(scrapy.url)
resp = session_request(urljoin(HOST_PAGE, WORK_SUFFIX.format(objid=objid)))
data = resp.json().get('data', {})
author = data.get('product', {}).get('creatorObj', {}).get('username')
title = data.get('product', {}).get('title')
objid = data.get('product', {}).get('id')
for img in data.get('allImageList', []):
new_scrapy = Scrapy(type='image', author=author, title=title,
objid=objid, index=img.get('orderNo') or 0, url=img.get('url'))
if new_scrapy not in self.stat["images_pass"]:
self.images.put(new_scrapy)
self.stat["nimages"] += 1
return scrapy
def fetch_images(self):
"""从任务队列中获取要爬取的主题,使用多线程处理得到需要下载的图片。"""
image_futures = {}
while True:
try:
scrapy = self.topics.get(timeout=Q_TIMEOUT)
image_futures[self.pool.submit(self.parse_images, scrapy)] = scrapy
except Empty:
if self.END_PARSING_TOPICS:
break
except Exception:
continue
for future in as_completed(image_futures):
scrapy = image_futures.get(future)
try:
future.result()
self.stat["topics_pass"].add(scrapy)
except Exception:
self.stat["topics_fail"].add(scrapy)
cprint(f'GET topic: {scrapy.title} ({scrapy.url}) failed.', 'red')
def fetch_all(self, initialized: bool = False):
"""同时爬取主页、主题,并更新状态。"""
if not initialized:
self.generate_pages()
fetch_futures = [self.pool.submit(self.fetch_topics),
self.pool.submit(self.fetch_images)]
end_show_fetch = False
t = threading.Thread(target=self.show_fetch_status, kwargs={'end': lambda: end_show_fetch})
t.start()
try:
wait(fetch_futures)
except KeyboardInterrupt:
raise
finally:
end_show_fetch = True
t.join()
def show_fetch_status(self, interval=0.5, end=None):
"""用于后台线程,实现边爬取边显示状态。
:param int interval: 状态更新间隔,秒
:param function end: 用于控制退出线程
"""
while True:
status = 'Fetched Pages: {pages}\tTopics: {topics}\tImages: {images}'.format(
pages=colored(str(self.max_pages).rjust(3), 'blue'),
topics=colored(str(self.stat["ntopics"]).rjust(3), 'blue'),
images=colored(str(self.stat["nimages"]).rjust(5), 'blue'))
print(status, end='\r', flush=True)
if (interval == 0) or (end and end()):
print('\n')
break
time.sleep(interval)
def show_download_status(self, interval=0.5, end=None):
"""用于后台线程,实现边下载边显示状态。
:param int interval: 状态更新间隔,秒
:param function end: 用于控制退出线程
"""
while True:
completed = len(self.stat["images_pass"]) + len(self.stat["images_fail"])
if self.stat["nimages"] > 0:
status = 'Time used: {time_used}\tFailed: {failed}\tCompleted: {completed}'.format(
time_used=colored(str(datetime.now() - self.start_time)[:-7], 'yellow'),
failed=colored(str(len(self.stat["images_fail"])).rjust(3), 'red'),
completed=colored(str(int(completed / self.stat["nimages"] * 100))
+ f'% ({completed}/{self.stat["nimages"]})', 'green'))
print(status, end='\r', flush=True)
if (interval == 0) or (end and end()):
if self.stat["nimages"] > 0:
print('\n')
break
time.sleep(interval)
def download_image(self, | scrapy):
"""下载图片保存到本地。
| conditional_block | |
zcooldl.py | .overwrite = overwrite
self.thumbnail = thumbnail
self.pages = Queue()
self.topics = Queue()
self.images = Queue()
self.stat = {
'npages': 0,
'ntopics': 0,
'nimages': 0,
'pages_pass': set(),
'pages_fail': set(),
'topics_pass': set(),
'topics_fail': set(),
'images_pass': set(),
'images_fail': set()
}
if retries:
# 重置全局变量 RETRIES
global RETRIES
RETRIES = retries
dest = Path(destination or '', urlparse(HOST_PAGE).netloc)
# 从记录文件中的失败项开始下载
if redownload:
self.username = self.reload_records(redownload)
self.user_id = self.search_id_by_username(self.username)
self.max_pages = self.pages.qsize()
self.max_topics = self.topics.qsize()
self.directory = dest / safe_filename(self.username)
self.stat.update({
'npages': self.max_pages,
'ntopics': self.max_topics,
'nimages': self.images.qsize()
})
print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n'
f'{"User ID".rjust(17)}: {self.user_id}\n'
f'{"Pages to scrapy".rjust(17)}: {self.max_pages:2d}\n'
f'{"Topics to scrapy".rjust(17)}: {self.max_topics:3d}\n'
f'{"Images to scrapy".rjust(17)}: {self.images.qsize():4d}\n'
f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n')
self.fetch_all(initialized=True)
return
# 从收藏集下载
if collection:
objid = self.parse_objid(collection, is_collection=True)
resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=1)))
data = resp.json().get('data', {})
total = data.get('total', 0)
page_size = data.get('pageable', {}).get('pageSize')
max_pages_ = math.ceil(total / page_size)
self.max_pages = min(max_pages or 9999, max_pages_)
self.directory = dest / safe_filename(f'{self.username}-{self._collection_name}')
self.parse_collection_topics(data.get('content'))
# 解析第 2 页 至 最大页的 topic 到下载任务
for page in range(2, self.max_pages + 1):
resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=page)))
self.parse_collection_topics(topics=resp.json().get('data', {}).get('content'),
offset=page_size * (page - 1))
# 根据用户 ID 或用户名下载
else:
self.user_id = user_id or self.search_id_by_username(username)
self.base_url = urljoin(HOST_PAGE, USER_SUFFIX.format(id=self.user_id))
try:
response = session_request(self.base_url)
except requests.exceptions.ProxyError:
cprint('Cannot connect to proxy.', 'red')
sys.exit(1)
except Exception as e:
cprint(f'Failed to connect to {self.base_url}, {e}', 'red')
sys.exit(1)
soup = BeautifulSoup(markup=response.text, features='html.parser')
try:
author = soup.find(name='div', id='body').get('data-name')
if username and username != author:
cprint(f'Invalid user id:「{user_id}」or username:「{username}」!', 'red')
sys.exit(1)
self.username = author
except Exception:
self.username = username or 'anonymous'
self.directory = dest / safe_filename(self.username)
try:
max_pages_ = int(soup.find(id='laypage_0').find_all(name='a')[-2].text)
except Exception:
max_pages_ = 1
self.max_pages = min(max_pages or 9999, max_pages_)
if self.spec_topics:
topics = ', '.join(self.spec_topics)
elif self.max_topics == 'all':
topics = 'all'
else:
topics = self.max_pages * self.max_topics
print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n'
f'{"User ID".rjust(17)}: {self.user_id}\n'
f'{"Maximum pages".rjust(17)}: {max_pages_}\n'
f'{"Pages to scrapy".rjust(17)}: {self.max_pages}\n'
f'{"Topics to scrapy".rjust(17)}: {topics}\n'
f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n')
self.END_PARSING_TOPICS = False
self.fetch_all(initialized=True if self.collection else False)
def search_id_by_username(self, username):
"""通过用户昵称查找用户 ID。
:param str username: 用户昵称
:return int: 用户 ID
"""
if not username:
cprint('Must give an <user id> or <username>!', 'yellow')
sys.exit(1)
search_url = urljoin(HOST_PAGE, SEARCH_DESIGNER_SUFFIX.format(word=username))
try:
response = session_request(search_url)
except requests.exceptions.ProxyError:
cprint('Cannot connect to proxy.', 'red')
sys.exit(1)
except Exception as e:
cprint(f'Failed to connect to {search_url}, {e}', 'red')
sys.exit(1)
author_1st = BeautifulSoup(response.text, 'html.parser').find(name='div', class_='author-info')
if (not author_1st) or (author_1st.get('data-name') != username):
cprint(f'Username「{username}」does not exist!', 'yellow')
sys.exit(1)
return author_1st.get('data-id')
def reload_records(self, file):
"""从本地下载记录里读取下载失败的内容。
:param str file: 下载记录文件的路径。
:return str: 用户名
"""
with open(file, 'r', encoding='utf-8') as f:
for fail in json.loads(f.read()).get('fail'):
scrapy = Scrapy._make(fail.values())
if scrapy.type == 'page':
self.pages.put(scrapy)
elif scrapy.type == 'topic':
self.topics.put(scrapy)
elif scrapy.type == 'image':
self.images.put(scrapy)
return scrapy.author
def generate_pages(self):
"""根据最大下载页数,生成需要爬取主页的任务。"""
for page in range(1, self.max_pages + 1):
suffix = COLLECTION_SUFFIX if self.collection else PAGE_SUFFIX
url = urljoin(self.base_url, suffix.format(page=page))
scrapy = Scrapy(type='page', author=self.username, title=page,
objid=None, index=page - 1, url=url)
if scrapy not in self.stat["pages_pass"]:
self.pages.put(scrapy)
def parse_collection_topics(self, topics: List[dict], offset: int = 0):
for idx, topic in enumerate(topics):
new_scrapy = Scrapy(type='topic',
author=topic.get('creatorObj', {}).get('username'),
title=topic.get('title'),
objid=topic.get('id'),
index=offset + idx,
url=topic.get('pageUrl'))
if new_scrapy not in self.stat["topics_pass"]:
self.topics.put(new_scrapy)
self.stat["ntopics"] += 1 | :return Scrapy: 记录任务信息的数据体
"""
resp = session_request(scrapy.url)
cards = BeautifulSoup(resp.text, 'html.parser').find_all(name='a', class_='card-img-hover')
for idx, card in enumerate(cards if self.max_topics == 'all' else cards[:self.max_topics + 1]):
title = card.get('title')
if self.spec_topics and (title not in self.spec_topics):
continue
new_scrapy = Scrapy(type='topic', author=scrapy.author, title=title,
objid=None, index=idx, url=card.get('href'))
if new_scrapy not in self.stat["topics_pass"]:
self.topics.put(new_scrapy)
self.stat["ntopics"] += 1
return scrapy
def fetch_topics(self):
"""从任务队列中获取要爬取的主页,使用多线程处理得到需要爬取的主题。"""
page_futures = {}
while True:
try:
scrapy = self.pages.get(timeout=Q_TIMEOUT)
page_futures[self.pool.submit(self.parse_topics, scrapy)] = scrapy
except Empty:
break
except Exception:
continue
for future in as_completed(page |
def parse_topics(self, scrapy):
"""爬取主页,解析所有 topic,并将爬取主题的任务添加到任务队列。
:param scrapy: 记录任务信息的数据体 | random_line_split |
zcooldl.py | """
resp = session_request(scrapy.url)
cards = BeautifulSoup(resp.text, 'html.parser').find_all(name='a', class_='card-img-hover')
for idx, card in enumerate(cards if self.max_topics == 'all' else cards[:self.max_topics + 1]):
title = card.get('title')
if self.spec_topics and (title not in self.spec_topics):
continue
new_scrapy = Scrapy(type='topic', author=scrapy.author, title=title,
objid=None, index=idx, url=card.get('href'))
if new_scrapy not in self.stat["topics_pass"]:
self.topics.put(new_scrapy)
self.stat["ntopics"] += 1
return scrapy
def fetch_topics(self):
"""从任务队列中获取要爬取的主页,使用多线程处理得到需要爬取的主题。"""
page_futures = {}
while True:
try:
scrapy = self.pages.get(timeout=Q_TIMEOUT)
page_futures[self.pool.submit(self.parse_topics, scrapy)] = scrapy
except Empty:
break
except Exception:
continue
for future in as_completed(page_futures):
scrapy = page_futures.get(future)
try:
future.result()
self.stat["pages_pass"].add(scrapy)
except Exception:
self.stat["pages_fail"].add(scrapy)
cprint(f'GET page: {scrapy.title} ({scrapy.url}) failed.', 'red')
self.END_PARSING_TOPICS = True
def parse_objid(self, url: str, is_collection: bool = False) -> str:
"""根据 topic 页面解析 objid
:param url: topic 或 collection 的 URL
:return: objid
"""
soup = BeautifulSoup(session_request(url).text, 'html.parser')
objid = soup.find('input', id='dataInput').attrs.get('data-objid')
if is_collection:
self._collection_name = soup.find('h2', class_='title-h2').text
user = soup.find(name='span', class_='details-user-avatar')
self.user_id = user.find('div').attrs.get('data-id')
self.username = user.find('a').attrs.get('title')
return objid
def parse_images(self, scrapy):
"""爬取 topic,获得 objid 后直接调用 API,从返回数据里获得图片地址等信息,
并将下载图片的任务添加到任务队列。
:param scrapy: 记录任务信息的数据体
:return Scrapy: 记录任务信息的数据体
"""
objid = scrapy.objid or self.parse_objid(scrapy.url)
resp = session_request(urljoin(HOST_PAGE, WORK_SUFFIX.format(objid=objid)))
data = resp.json().get('data', {})
author = data.get('product', {}).get('creatorObj', {}).get('username')
title = data.get('product', {}).get('title')
objid = data.get('product', {}).get('id')
for img in data.get('allImageList', []):
new_scrapy = Scrapy(type='image', author=author, title=title,
objid=objid, index=img.get('orderNo') or 0, url=img.get('url'))
if new_scrapy not in self.stat["images_pass"]:
self.images.put(new_scrapy)
self.stat["nimages"] += 1
return scrapy
def fetch_images(self):
"""从任务队列中获取要爬取的主题,使用多线程处理得到需要下载的图片。"""
image_futures = {}
while True:
try:
scrapy = self.topics.get(timeout=Q_TIMEOUT)
image_futures[self.pool.submit(self.parse_images, scrapy)] = scrapy
except Empty:
if self.END_PARSING_TOPICS:
break
except Exception:
continue
for future in as_completed(image_futures):
scrapy = image_futures.get(future)
try:
future.result()
self.stat["topics_pass"].add(scrapy)
except Exception:
self.stat["topics_fail"].add(scrapy)
cprint(f'GET topic: {scrapy.title} ({scrapy.url}) failed.', 'red')
def fetch_all(self, initialized: bool = False):
"""同时爬取主页、主题,并更新状态。"""
if not initialized:
self.generate_pages()
fetch_futures = [self.pool.submit(self.fetch_topics),
self.pool.submit(self.fetch_images)]
end_show_fetch = False
t = threading.Thread(target=self.show_fetch_status, kwargs={'end': lambda: end_show_fetch})
t.start()
try:
wait(fetch_futures)
except KeyboardInterrupt:
raise
finally:
end_show_fetch = True
t.join()
def show_fetch_status(self, interval=0.5, end=None):
"""用于后台线程,实现边爬取边显示状态。
:param int interval: 状态更新间隔,秒
:param function end: 用于控制退出线程
"""
while True:
status = 'Fetched Pages: {pages}\tTopics: {topics}\tImages: {images}'.format(
pages=colored(str(self.max_pages).rjust(3), 'blue'),
topics=colored(str(self.stat["ntopics"]).rjust(3), 'blue'),
images=colored(str(self.stat["nimages"]).rjust(5), 'blue'))
print(status, end='\r', flush=True)
if (interval == 0) or (end and end()):
print('\n')
break
time.sleep(interval)
def show_download_status(self, interval=0.5, end=None):
"""用于后台线程,实现边下载边显示状态。
:param int interval: 状态更新间隔,秒
:param function end: 用于控制退出线程
"""
while True:
completed = len(self.stat["images_pass"]) + len(self.stat["images_fail"])
if self.stat["nimages"] > 0:
status = 'Time used: {time_used}\tFailed: {failed}\tCompleted: {completed}'.format(
time_used=colored(str(datetime.now() - self.start_time)[:-7], 'yellow'),
failed=colored(str(len(self.stat["images_fail"])).rjust(3), 'red'),
completed=colored(str(int(completed / self.stat["nimages"] * 100))
+ f'% ({completed}/{self.stat["nimages"]})', 'green'))
print(status, end='\r', flush=True)
if (interval == 0) or (end and end()):
if self.stat["nimages"] > 0:
print('\n')
break
time.sleep(interval)
def download_image(self, scrapy):
"""下载图片保存到本地。
:param scrapy: 记录任务信息的数据体
:return Scrapy: 记录任务信息的数据体
"""
try:
name = re.findall(r'(?<=/)\w*?\.(?:jpg|gif|png|bmp)', scrapy.url, re.IGNORECASE)[0]
except IndexError:
name = uuid4().hex + '.jpg'
path = self.directory / safe_filename(scrapy.title)
filename = path / f'[{scrapy.index + 1 or 0:02d}]{name}'
if (not self.overwrite) and op.isfile(filename):
return scrapy
url = scrapy.url
if self.thumbnail:
if url.lower().endswith(('jpg', 'png', 'bmp')):
url = f'{scrapy.url}@1280w_1l_2o_100sh.{url[-3:]}'
resp = session_request(url)
mkdirs_if_not_exist(path)
with open(filename, 'wb') as f:
for chunk in resp.iter_content(8192):
f.write(chunk)
return scrapy
def save_records(self):
"""将成功及失败的下载记录保存到本地文件。
:return str: 记录文件的路径
"""
filename = f'{safe_filename(self.start_time.isoformat()[:-7])}.json'
abspath = op.abspath(self.directory / filename)
with open(abspath, 'w', encoding='utf-8') as f:
success = (self.stat["pages_pass"] | self.stat["topics_pass"] | self.stat["images_pass"])
fail = (self.stat["pages_fail"] | self.stat["topics_fail"] | self.stat["images_fail"])
type_order = {'page': 1, 'topic': 2, 'image': 3}
s_ordered = sort_records(success, order=type_order)
f_ordered = sort_records(fail, order=type_order)
records = {
'time': self.start_time.isoformat(),
'success': [scrapy._asdict() for scrapy in s_ordered],
'fail': [scrapy._asdict() for scrapy in f_ordered]
}
f.write(json.dumps(records, ensure_ascii=False, indent=2))
return abspath
def run_scraper(self):
"""使用多线程下载所有图片,完成后保存记录并退出程序。"""
end_show_download = False
t = threading.Thread(target=self.show_download_status, kwargs={'end': lambda: end_show_download})
t.start()
image_futuress = {}
| whil | identifier_name | |
script.js | Canada",
paragraphs : [
"Озеро Морейн підживлюється льодовиком і досягає свого повного наповнення лише у другій половині червня. Головною принадою озера Морейн є синій колір води. Коли воно наповнене, в ньому відбиваються різні відтінки синього кольору через заломлення світла на кам'янистому дні озера.",
"Поверхня озера Морейн відображає, як в дзеркалі, десять величезних канадських піків, і це робить його одним з найкрасивіших місць в Канаді. Це одне з найбільш часто фотографованих місць в Канаді, що дуже славиться красою мальовничих місць і своїми пішохідними екскурсіями."
]
},
{
title : "Франція",
subtitle : "Етрета, Нормандія",
imgSrc : "images/etretat_normandy_france.jpg",
imgAlt : "etretat normandy france",
paragraphs : [
"Étretat (Етрета) - невеликий курорт з прекрасними гальковими пляжами на узбережжі Алебастрового Ла Манша, одна з головних визначних пам'яток Нормандії, на півночі Франції. Він розташований біля самого пляжу, де води Англійського каналу сформували дивовижну красу прямовисні скелі з природними арками — один із найкрасивіших краєвидів Франції.",
""
]
},
{
title : "Гренландія",
subtitle : "Льодяний каньйон",
imgSrc : "images/Grenland_canyon.jpg",
imgAlt : "Greenland canyon",
paragraphs : [
"Цей воістину дивовижний острів притягує мандрівників з усього світу, як магніт. Аналогія банальна, але краще вигадати складно. Значна частина Гренландії справді схована під надійним щитом із льоду, який останнім часом починає активно танути. Це результат глобального потепління, на яке вже багато років намагаються звернути увагу багато громадських організацій.",
"У льодовиках утворюються численні каньйони - їх краса просто приголомшує. Таке враження, що природа, як найталановитіший художник на цій планеті, малює своїм пензлем хитромудрі, часом навіть сюрреалістичні картини."
]
},
{
title : "Норвегія",
subtitle : "Місто - Олесунн",
imgSrc : "images/Alesund_norway.jpg",
imgAlt : "Alesund norway",
paragraphs : [
"О́лесунн (норв. Ålesund) — місто і порт на заході Норвегії. Розташоване на північ від гирла фіорду Стур. Місто розкинулося на декількох островах, а саме — Норвьойя, Аспьойя, Гейсса (Гесса) та Окснойя, які є з'єднані мостами. За легендою, поселення бере свій початок у 9 столітті коли Ролло (Рольф) заснував поблизу маєток, де мешкав війт. Статус міста Олесунн отримав в 1848 р. Після того як місто зазнало пожежі в 1904 р., воно було відбудоване у оригінальному стилі арт-нуво.",
"Олесунн — регіональний торговий центр і туристична база для маршрутів в регіонах Суннмьоре, долині Норанґ, льодовиках Ойє та островах Рунде та Ґіске. В місті знаходиться найбільша риболовна гавань Норвегії, пристановище для риболовних трейлерів що виловлюють тріску та палтус з виду камбалових. Разом з Тромсьо, Олесунн є осередком ловлі арктичних тюленів."
]
}
];
const mainContentBlock = document.querySelectorAll('.main__content-block');
const navigationButton = document.querySelectorAll('.main__navigation-button');
const humbergerBtn = document.querySelector('.header__hamburger'),
adaptiveTabsMenu = document.querySelector('.adaptive'),
adaptiveNavTabsBtn = document.querySelectorAll('.adaptive__navigation-button'),
body = document.querySelector('body');
document.addEventListener('DOMContentLoaded', startEventsForPage());
function startEventsForPage() {
createElements();
hamburgerMenu();
currentTabButton('main__navigation-button_active', '.main__navigation-list');
mainContentBlock[0].classList.add('main__content-active');
function currentTabButton(tabSelector, parrentSelector) {
const parrentMenuWrap = document.querySelector(parrentSelector);
navigationButton[0].classList.add('main__navigation-button_active');
parrentMenuWrap.addEventListener('click', e => {
let index = +e.target.getAttribute("data-number");
navigationButton.forEach(item => {
item.classList.remove('main__navigation-button_active');
});
mainContentBlock.forEach(item => {
item.classList.remove('main__content-active');
});
e.target.classList.add(tabSelector);
deleteInnerHtml();
createElements(index);
mainContentBlock[index].classList.add('main__content-active');
});
};
function createElements (index = 0) {
mainContentBlock[index].innerHTML = `
<h2 class="main__content-title">${content[index].title}</h2>
<p class="main__content-subtitle">${content[index].subtitle}</p>
<img class="main__content-image" src="${content[index].imgSrc}" alt="${content[index].imgAlt}">
<p class="main__content-descr">${content[index].paragraphs[0]}</p>
<p class="main__content-descr">${content[index].paragraphs[1]}</p>
`;
};
function deleteInnerHtml() {
mainContentBlock.forEach(element => {
element.innerHTML = "";
});
};
function hamburgerMenu() {
adaptiveNavTabsBtn[0].classList.add('adaptive__navigation-button__active');
openContentWithTab('adaptive__navigation-button__active','adaptive__navigation-list');
humbergerBtn.addEventListener('click', e => {
e.currentTarget.classList.toggle('header__hamburger-active');
body.classList.toggle('body__active');
adaptiveTabsMenu.classList.toggle('adaptive__active');
});
};
function openContentWithTab(activeTabSelector, parrentSelector) {
const parrentMenuWrap = document.querySelector('.' + parrentSelector);
parrentMenuWrap.addEventListener('click', e => {
let index = +e.target.getAttribute("data-number");
mainContentBlock.forEach(element => {
element.classList.remove('main__content-active');
});
adaptiveNavTabsBtn.forEach(element => {
element.classList.remove(activeTabSelector);
});
deleteInnerHtml();
createElements(index);
adaptiveTabsMenu.classList.toggle('adaptive__active');
body.classList.toggle('body__active');
humbergerBtn.classList.remove('header__hamburger-active');
mainContentBlock[index].classList.add('main__content-active');
adaptiveNavTabsBtn[index].classList.add(activeTabSelector);
});
};
}
| identifier_body | ||
script.js | Canada",
paragraphs : [
"Озеро Морейн підживлюється льодовиком і досягає свого повного наповнення лише у другій половині червня. Головною принадою озера Морейн є синій колір води. Коли воно наповнене, в ньому відбиваються різні відтінки синього кольору через заломлення світла на кам'янистому дні озера.",
"Поверхня озера Морейн відображає, як в дзеркалі, десять величезних канадських піків, і це робить його одним з найкрасивіших місць в Канаді. Це одне з найбільш часто фотографованих місць в Канаді, що дуже славиться красою мальовничих місць і своїми пішохідними екскурсіями."
]
},
{
title : "Франція",
subtitle : "Етрета, Нормандія",
imgSrc : "images/etretat_normandy_france.jpg",
imgAlt : "etretat normandy france",
paragraphs : [
"Étretat (Етрета) - невеликий курорт з прекрасними гальковими пляжами на узбережжі Алебастрового Ла Манша, одна з головних визначних пам'яток Нормандії, на півночі Франції. Він розташований біля самого пляжу, де води Англійського каналу сформували дивовижну красу прямовисні скелі з природними арками — один із найкрасивіших краєвидів Франції.",
""
]
},
{
title : "Гренландія",
subtitle : "Льодяний каньйон",
imgSrc : "images/Grenland_canyon.jpg",
imgAlt : "Greenland canyon",
paragraphs : [
"Цей воістину дивовижний острів притягує мандрівників з усього світу, як магніт. Аналогія банальна, але краще вигадати складно. Значна частина Гренландії справді схована під надійним щитом із льоду, який останнім часом починає активно танути. Це результат глобального потепління, на яке вже багато років намагаються звернути увагу багато громадських організацій.",
"У льодовиках утворюються численні каньйони - їх краса просто приголомшує. Таке враження, що природа, як найталановитіший художник на цій планеті, малює своїм пензлем хитромудрі, часом навіть сюрреалістичні картини."
]
},
{
title : "Норвегія",
subtitle : "Місто - Олесунн",
imgSrc : "images/Alesund_norway.jpg",
imgAlt : "Alesund norway",
paragraphs : [
"О́лесунн (норв. Ålesund) — місто і порт на заході Норвегії. Розташоване на північ від гирла фіорду Стур. Місто розкинулося на декількох островах, а саме — Норвьойя, Аспьойя, Гейсса (Гесса) та Окснойя, які є з'єднані мостами. За легендою, поселення бере свій початок у 9 столітті коли Ролло (Рольф) заснував поблизу маєток, де мешкав війт. Статус міста Олесунн отримав в 1848 р. Після того як місто зазнало пожежі в 1904 р., воно було відбудоване у оригінальному стилі арт-нуво.",
"Олесунн — регіональний торговий центр і туристична база для маршрутів в регіонах Суннмьоре, долині Норанґ, льодовиках Ойє та островах Рунде та Ґіске. В місті знаходиться найбільша риболовна гавань Норвегії, пристановище для риболовних трейлерів що виловлюють тріску та палтус з виду камбалових. Разом з Тромсьо, Олесунн є осередком ловлі арктичних тюленів."
]
}
];
const mainContentBlock = document.querySelectorAll('.main__content-block');
const navigationButton = document.querySelectorAll('.main__navigation-button');
const humbergerBtn = document.querySelector('.header__hamburger'),
adaptiveTabsMenu = document.querySelector('.adaptive'),
adaptiveNavTabsBtn = document.querySelectorAll('.adaptive__navigation-button'),
body = document.querySelector('body');
document.addEventListener('DOMContentLoaded', startEventsForPage());
function startEventsForPage() {
createElements();
hamburgerMenu();
currentTabButton('main__navigation-button_active', '.main__navigation-list');
mainContentBlock[0].classList.add('main__content-active');
function currentTabButton(tabSelector, parrentSelector) {
const parrentMenuWrap = document.querySelector(parrentSelector);
navigationButton[0].classList.add('main__navigation-button_active');
parrentMenuWrap.addEventListener('click', e => {
let index = +e.target.getAttribute("data-number");
navigationButton.forEach(item => {
item.classList.remove('main__navigation-button_active');
});
mainContentBlock.forEach(item => {
item.classList.remove('main__content-active');
});
e.target.classList.add(tabSelector);
deleteInnerHtml();
createElements(index);
mainContentBlock[index].classList.add('main__content-active');
});
};
function createElements (index = 0) {
mainContentBlock[index].innerHTML = `
<h2 class="main__content-title">${content[index].title}</h2>
<p class="main__content-subtitle">${content[index].subtitle}</p>
<img class="main__content-image" src="${content[index].imgSrc}" alt="${content[index].imgAlt}">
<p class="main__content-descr">${content[index].paragraphs[0]}</p>
<p class="main__content-descr">${content[index].paragraphs[1]}</p>
`;
};
function deleteInnerHtml() {
mainContentBlock.forEach(element => {
element.innerHTML = "";
});
};
function hamburgerMenu() {
adaptiveNavTabsBtn[0].classList.add('adaptive__navigation-button__active');
openContentWithTab('adaptive__navigation-button__active','adaptive__navigation-list');
humbergerBtn.addEventListener('click', e => {
e.currentTarget.classList.toggle('header__hamburger-active');
body.classList.toggle('body__active');
adaptiveTabsMenu.classList.toggle('adaptive__active');
});
};
function openContentWithTab(activeTabSelector, parrentSelector) {
const parrentMenuWrap = document.querySelector('.' + parrentSelector);
parrentMenuWrap.addEventListener('click', e => {
let index = +e.target.getAttribute("data-number");
mainContentBlock.forEach(element => {
element.classList.remove('main__content-active');
});
adaptiveNavTabsBtn.forEach(element => {
element.classList.remove(activeTabSelector);
});
deleteInnerHtml();
createElements(index);
adaptiveTabsMenu.classList.toggle('adaptive__active');
body.classList.toggle('body__active');
humbergerBtn.classList.remove('header__hamburger-active');
mainContentBlock[index].classList.add('main__content-active');
adaptiveNavTabsBtn[index].classList.add(activeTabSelector);
});
};
}
| identifier_name | ||
script.js | ться лише у спеціальному спелеологічному спорядженні. Мавпа печера – має найбільшу протяжність серед лавових печер США – майже 400 метрів."
]
},
{
title : "Канада",
subtitle : "Озеро Морейн",
imgSrc : "images/Moraine_Lake_Canada.jpg",
imgAlt : "Moraine Lake Canada",
paragraphs : [
"Озеро Морейн підживлюється льодовиком і досягає свого повного наповнення лише у другій половині червня. Головною принадою озера Морейн є синій колір води. Коли воно наповнене, в ньому відбиваються різні відтінки синього кольору через заломлення світла на кам'янистому дні озера.",
"Поверхня озера Морейн відображає, як в дзеркалі, десять величезних канадських піків, і це робить його одним з найкрасивіших місць в Канаді. Це одне з найбільш часто фотографованих місць в Канаді, що дуже славиться красою мальовничих місць і своїми пішохідними екскурсіями."
]
},
{
title : "Франція",
subtitle : "Етрета, Нормандія",
imgSrc : "images/etretat_normandy_france.jpg",
imgAlt : "etretat normandy france",
paragraphs : [
"Étretat (Етрета) - невеликий курорт з прекрасними гальковими пляжами на узбережжі Алебастрового Ла Манша, одна з головних визначних пам'яток Нормандії, на півночі Франції. Він розташований біля самого пляжу, де води Англійського каналу сформували дивовижну красу прямовисні скелі з природними арками — один із найкрасивіших краєвидів Франції.",
""
]
},
{
title : "Гренландія",
subtitle : "Льодяний каньйон",
imgSrc : "images/Grenland_canyon.jpg",
imgAlt : "Greenland canyon",
paragraphs : [
"Цей воістину дивовижний острів притягує мандрівників з усього світу, як магніт. Аналогія банальна, але краще вигадати складно. Значна частина Гренландії справді схована під надійним щитом із льоду, який останнім часом починає активно танути. Це результат глобального потепління, на яке вже багато років намагаються звернути увагу багато громадських організацій.",
"У льодовиках утворюються численні каньйони - їх краса просто приголомшує. Таке враження, що природа, як найталановитіший художник на цій планеті, малює своїм пензлем хитромудрі, часом навіть сюрреалістичні картини."
]
},
{
title : "Норвегія",
subtitle : "Місто - Олесунн",
imgSrc : "images/Alesund_norway.jpg",
imgAlt : "Alesund norway",
paragraphs : [
"О́лесунн (норв. Ålesund) — місто і порт на заході Норвегії. Розташоване на північ від гирла фіорду Стур. Місто розкинулося на декількох островах, а саме — Норвьойя, Аспьойя, Гейсса (Гесса) та Окснойя, які є з'єднані мостами. За легендою, поселення бере свій початок у 9 столітті коли Ролло (Рольф) заснував поблизу маєток, де мешкав війт. Статус міста Олесунн отримав в 1848 р. Після того як місто зазнало пожежі в 1904 р., воно було відбудоване у оригінальному стилі арт-нуво.",
"Олесунн — регіональний торговий центр і туристична база для маршрутів в регіонах Суннмьоре, долині Норанґ, льодовиках Ойє та островах Рунде та Ґіске. В місті знаходиться найбільша риболовна гавань Норвегії, пристановище для риболовних трейлерів що виловлюють тріску та палтус з виду камбалових. Разом з Тромсьо, Олесунн є осередком ловлі арктичних тюленів."
]
}
];
const mainContentBlock = document.querySelectorAll('.main__content-block');
const navigationButton = document.querySelectorAll('.main__navigation-button');
const humbergerBtn = document.querySelector('.header__hamburger'),
adaptiveTabsMenu = document.querySelector('.adaptive'),
adaptiveNavTabsBtn = document.querySelectorAll('.adaptive__navigation-button'),
body = document.querySelector('body');
document.addEventListener('DOMContentLoaded', startEventsForPage());
function startEventsForPage() {
createElements();
hamburgerMenu();
currentTabButton('main__navigation-button_active', '.main__navigation-list');
mainContentBlock[0].classList.add('main__content-active');
function currentTabButton(tabSelector, parrentSelector) {
const parrentMenuWrap = document.querySelector(parrentSelector);
navigationButton[0].classList.add('main__navigation-button_active');
parrentMenuWrap.addEventListener('click', e => {
let index = +e.target.getAttribute("data-number");
navigationButton.forEach(item => {
item.classList.remove('main__navigation-button_active');
});
mainContentBlock.forEach(item => {
item.classList.remove('main__content-active');
});
e.target.classList.add(tabSelector);
deleteInnerHtml();
createElements(index);
mainContentBlock[index].classList.add('main__content-active');
});
};
function createElements (index = 0) {
mainContentBlock[index].innerHTML = `
<h2 class="main__content-title">${content[index].title}</h2>
<p class="main__content-subtitle">${content[index].subtitle}</p>
<img class="main__content-image" src="${content[index].imgSrc}" alt="${content[index].imgAlt}">
<p class="main__content-descr">${content[index].paragraphs[0]}</p>
<p class="main__content-descr">${content[index].paragraphs[1]}</p>
`;
};
function deleteInnerHtml() {
mainContentBlock.forEach(element => {
element.innerHTML = "";
});
};
function hamburgerMenu() {
adaptiveNavTabsBtn[0].classList.add('adaptive__navigation-button__active');
openContentWithTab('adaptive__navigation-button__active','adaptive__navigation-list');
humbergerBtn.addEventListener('click', e => {
e.currentTarget.classList.toggle('header__hamburger-active');
body.classList.toggle('body__active');
adaptiveTabsMenu.classList.toggle('adaptive__active');
});
};
function openContentWithTab(activeTabSelector, parrentSelector) {
const parrentMenuWrap = document.querySelector('.' + parrentSelector);
| parrentMenuWrap.addEventListener('click', e => {
let index = +e.target.getAttribute("data-number");
| random_line_split | |
reco_tracks.py | _ID
from modules.reco import config, plot
from modules.analysis import config as CONFIGURATION
import os
import itertools
import bokeh
import numpy as np
############################################# INPUT ARGUMENTS
import argparse
parser = argparse.ArgumentParser(description='Track reconstruction from input hits.')
parser.add_argument('-E', '--events', metavar='N', help='Events to process', type=int, default=None, nargs='+')
parser.add_argument('-f', '--format', help='Input hits format', default='time_wire')
parser.add_argument('-g', '--glance', help='Only show # hits in each event', action='store_true', default=False)
parser.add_argument('-m', '--max_hits', dest='max_hits', type=int, help='Maximum number of hits allowed in one event [default: 15]', action='store', default=15)
parser.add_argument('-o', '--output', help='Output path', default='plots/hits_ev{0:d}.html')
parser.add_argument('-p', '--plot', help='Draw plots', action='store_true', default=False)
parser.add_argument('inputs', metavar='FILE', help='Input files with raw hits, 1 event/line', nargs='+')
args = parser.parse_args()
# Checking validity of the input format
if args.format not in OUT_CONFIG:
raise ValueError('Wrong input format (-f) specified')
# Checking existence of input files
for file_path in args.inputs:
if not os.path.exists(os.path.expandvars(file_path)):
print('--- ERROR ---')
print(' \''+file_path+'\' file not found')
print(' please provide the correct path to the file containing raw hits' )
print()
exit()
def | (input_files):
"""Reconstruct tracks from hits in all events from the provided input files"""
n_words_event = len(OUT_CONFIG['event']['fields'])
n_words_hit = len(OUT_CONFIG[args.format]['fields'])
# Initialising event
event = -1
G = Geometry(CONFIGURATION)
H = HitManager()
SLs = {}
for iSL in config.SL_SHIFT.keys():
SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL])
# Defining which SLs should be plotted in which global view
GLOBAL_VIEW_SLs = {
'xz': [SLs[0], SLs[2]],
'yz': [SLs[1], SLs[3]]
}
# Analyzing the hits in each event
for file_path in input_files:
# Reading input file line by line
with open(file_path, 'r') as file_in:
file_line_nr = 0
for line in file_in:
file_line_nr += 1
if file_line_nr <= 1:
continue
hits_lst = []
H.reset()
words = line.strip().split()
event = int(words[0])
# Skipping event if it was not specified in command line
if args.events is not None and event not in args.events:
continue
nhits = int(words[1])
print('Event {0:<5d} # hits: {1:d}'.format(event, nhits))
if args.glance:
continue
# Skipping event with too many hits (most likely a spark event that will take forever to process)
if nhits > args.max_hits:
continue
# Extracting hit information
for iHit in range(nhits):
start = n_words_event + iHit*n_words_hit
ww = words[start:start+n_words_hit]
hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])])
H.add_hits(hits_lst)
# Removing hits with time outside the timebox region
H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True)
# Calculating local+global hit positions
H.calc_pos(SLs)
# Creating figures of the chambers
figs = {}
figs['sl'] = plot.book_chambers_figure(G)
figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs)
# Analyzing hits in each SL
sl_fit_results = {}
for iSL, sl in SLs.items():
# print('- SL', iSL)
hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer')
if args.plot:
# Drawing the left and right hits in local frame
figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5,
fill_color='red', fill_alpha=0.7, line_width=0)
figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5,
fill_color='blue', fill_alpha=0.7, line_width=0)
# Performing track reconstruction in the local frame
sl_fit_results[iSL] = []
layer_groups = hits_sl.groupby('layer').groups
n_layers = len(layer_groups)
# Stopping if lass than 3 layers of hits
if n_layers < config.NHITS_MIN_LOCAL:
continue
hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()]
# Building the list of all possible hit combinations with 1 hit from each layer
hits_layered = list(itertools.product(*hitid_layers))
# Building more combinations using only either left or right position of each hit
for hit_ids in hits_layered:
# print('- -', hit_ids)
posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values
posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values
posx_combs = list(itertools.product(*posx))
# Fitting each combination
fit_results_lr = []
fit_range = (min(posz), max(posz))
for iC, posx_comb in enumerate(posx_combs):
pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range)
chi2 = stats[0][0] / n_layers
if chi2 < config.FIT_CHI2_MAX:
a0, a1 = pfit
fit_results_lr.append((chi2, hit_ids, pfit))
# Keeping only the best fit result from the given set of physical hits
fit_results_lr.sort(key=itemgetter(0))
if fit_results_lr:
sl_fit_results[iSL].append(fit_results_lr[0])
# Sorting the fit results of a SL by Chi2
sl_fit_results[iSL].sort(key=itemgetter(0))
if sl_fit_results[iSL]:
# Drawing fitted tracks
posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32)
for iR, res in enumerate(sl_fit_results[iSL][:5]):
col = config.TRACK_COLORS[iR]
posx = res[2](posz)
figs['sl'][iSL].line(x=posx, y=posz,
line_color=col, line_alpha=0.7, line_width=3)
if args.plot:
# Drawing the left and right hits in global frame
for view, sls in GLOBAL_VIEW_SLs.items():
sl_ids = [sl.id for sl in sls]
hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)]
figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]],
fill_color='red', fill_alpha=0.7, line_width=0)
figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]],
fill_color='blue', fill_alpha=0.7, line_width=0)
# Building 3D segments from the fit results in each SL
posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32)
for sl in sls:
for iR, res in enumerate(sl_fit_results[sl.id][:5]):
posx = res[2](posz)
start = (posx[0], 0, posz[0])
end = (posx[1], 0, posz[1])
segL = Segment(start, end)
segG = segL.fromSL(sl)
segG.calc_vector()
# Extending the global segment to the full height of the view
start = segG.pointAtZ(plot.PLOT_RANGE['y'][0])
end = segG.pointAtZ(plot.PLOT_RANGE['y'][1])
# Getting XY coordinates of the global segment for the current view
iX = COOR_ID[view[0]]
posx = [start[iX], end[iX]]
posy = [start[2], end[2]]
# Drawing the segment | process | identifier_name |
reco_tracks.py | _ID
from modules.reco import config, plot
from modules.analysis import config as CONFIGURATION
import os
import itertools
import bokeh
import numpy as np
############################################# INPUT ARGUMENTS
import argparse
parser = argparse.ArgumentParser(description='Track reconstruction from input hits.')
parser.add_argument('-E', '--events', metavar='N', help='Events to process', type=int, default=None, nargs='+')
parser.add_argument('-f', '--format', help='Input hits format', default='time_wire')
parser.add_argument('-g', '--glance', help='Only show # hits in each event', action='store_true', default=False)
parser.add_argument('-m', '--max_hits', dest='max_hits', type=int, help='Maximum number of hits allowed in one event [default: 15]', action='store', default=15)
parser.add_argument('-o', '--output', help='Output path', default='plots/hits_ev{0:d}.html')
parser.add_argument('-p', '--plot', help='Draw plots', action='store_true', default=False)
parser.add_argument('inputs', metavar='FILE', help='Input files with raw hits, 1 event/line', nargs='+')
args = parser.parse_args()
# Checking validity of the input format
if args.format not in OUT_CONFIG:
raise ValueError('Wrong input format (-f) specified')
# Checking existence of input files
for file_path in args.inputs:
if not os.path.exists(os.path.expandvars(file_path)):
print('--- ERROR ---')
print(' \''+file_path+'\' file not found')
print(' please provide the correct path to the file containing raw hits' )
print()
exit()
def process(input_files):
| for line in file_in:
file_line_nr += 1
if file_line_nr <= 1:
continue
hits_lst = []
H.reset()
words = line.strip().split()
event = int(words[0])
# Skipping event if it was not specified in command line
if args.events is not None and event not in args.events:
continue
nhits = int(words[1])
print('Event {0:<5d} # hits: {1:d}'.format(event, nhits))
if args.glance:
continue
# Skipping event with too many hits (most likely a spark event that will take forever to process)
if nhits > args.max_hits:
continue
# Extracting hit information
for iHit in range(nhits):
start = n_words_event + iHit*n_words_hit
ww = words[start:start+n_words_hit]
hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])])
H.add_hits(hits_lst)
# Removing hits with time outside the timebox region
H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True)
# Calculating local+global hit positions
H.calc_pos(SLs)
# Creating figures of the chambers
figs = {}
figs['sl'] = plot.book_chambers_figure(G)
figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs)
# Analyzing hits in each SL
sl_fit_results = {}
for iSL, sl in SLs.items():
# print('- SL', iSL)
hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer')
if args.plot:
# Drawing the left and right hits in local frame
figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5,
fill_color='red', fill_alpha=0.7, line_width=0)
figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5,
fill_color='blue', fill_alpha=0.7, line_width=0)
# Performing track reconstruction in the local frame
sl_fit_results[iSL] = []
layer_groups = hits_sl.groupby('layer').groups
n_layers = len(layer_groups)
# Stopping if lass than 3 layers of hits
if n_layers < config.NHITS_MIN_LOCAL:
continue
hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()]
# Building the list of all possible hit combinations with 1 hit from each layer
hits_layered = list(itertools.product(*hitid_layers))
# Building more combinations using only either left or right position of each hit
for hit_ids in hits_layered:
# print('- -', hit_ids)
posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values
posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values
posx_combs = list(itertools.product(*posx))
# Fitting each combination
fit_results_lr = []
fit_range = (min(posz), max(posz))
for iC, posx_comb in enumerate(posx_combs):
pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range)
chi2 = stats[0][0] / n_layers
if chi2 < config.FIT_CHI2_MAX:
a0, a1 = pfit
fit_results_lr.append((chi2, hit_ids, pfit))
# Keeping only the best fit result from the given set of physical hits
fit_results_lr.sort(key=itemgetter(0))
if fit_results_lr:
sl_fit_results[iSL].append(fit_results_lr[0])
# Sorting the fit results of a SL by Chi2
sl_fit_results[iSL].sort(key=itemgetter(0))
if sl_fit_results[iSL]:
# Drawing fitted tracks
posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32)
for iR, res in enumerate(sl_fit_results[iSL][:5]):
col = config.TRACK_COLORS[iR]
posx = res[2](posz)
figs['sl'][iSL].line(x=posx, y=posz,
line_color=col, line_alpha=0.7, line_width=3)
if args.plot:
# Drawing the left and right hits in global frame
for view, sls in GLOBAL_VIEW_SLs.items():
sl_ids = [sl.id for sl in sls]
hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)]
figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]],
fill_color='red', fill_alpha=0.7, line_width=0)
figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]],
fill_color='blue', fill_alpha=0.7, line_width=0)
# Building 3D segments from the fit results in each SL
posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32)
for sl in sls:
for iR, res in enumerate(sl_fit_results[sl.id][:5]):
posx = res[2](posz)
start = (posx[0], 0, posz[0])
end = (posx[1], 0, posz[1])
segL = Segment(start, end)
segG = segL.fromSL(sl)
segG.calc_vector()
# Extending the global segment to the full height of the view
start = segG.pointAtZ(plot.PLOT_RANGE['y'][0])
end = segG.pointAtZ(plot.PLOT_RANGE['y'][1])
# Getting XY coordinates of the global segment for the current view
iX = COOR_ID[view[0]]
posx = [start[iX], end[iX]]
posy = [start[2], end[2]]
# Drawing the segment
| """Reconstruct tracks from hits in all events from the provided input files"""
n_words_event = len(OUT_CONFIG['event']['fields'])
n_words_hit = len(OUT_CONFIG[args.format]['fields'])
# Initialising event
event = -1
G = Geometry(CONFIGURATION)
H = HitManager()
SLs = {}
for iSL in config.SL_SHIFT.keys():
SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL])
# Defining which SLs should be plotted in which global view
GLOBAL_VIEW_SLs = {
'xz': [SLs[0], SLs[2]],
'yz': [SLs[1], SLs[3]]
}
# Analyzing the hits in each event
for file_path in input_files:
# Reading input file line by line
with open(file_path, 'r') as file_in:
file_line_nr = 0 | identifier_body |
reco_tracks.py | _ID
from modules.reco import config, plot
from modules.analysis import config as CONFIGURATION
import os
import itertools
import bokeh
import numpy as np
############################################# INPUT ARGUMENTS
import argparse
parser = argparse.ArgumentParser(description='Track reconstruction from input hits.')
parser.add_argument('-E', '--events', metavar='N', help='Events to process', type=int, default=None, nargs='+')
parser.add_argument('-f', '--format', help='Input hits format', default='time_wire')
parser.add_argument('-g', '--glance', help='Only show # hits in each event', action='store_true', default=False)
parser.add_argument('-m', '--max_hits', dest='max_hits', type=int, help='Maximum number of hits allowed in one event [default: 15]', action='store', default=15)
parser.add_argument('-o', '--output', help='Output path', default='plots/hits_ev{0:d}.html')
parser.add_argument('-p', '--plot', help='Draw plots', action='store_true', default=False)
parser.add_argument('inputs', metavar='FILE', help='Input files with raw hits, 1 event/line', nargs='+')
args = parser.parse_args()
# Checking validity of the input format
if args.format not in OUT_CONFIG:
raise ValueError('Wrong input format (-f) specified')
# Checking existence of input files
for file_path in args.inputs:
if not os.path.exists(os.path.expandvars(file_path)):
print('--- ERROR ---')
print(' \''+file_path+'\' file not found')
print(' please provide the correct path to the file containing raw hits' )
print()
exit()
def process(input_files):
"""Reconstruct tracks from hits in all events from the provided input files"""
n_words_event = len(OUT_CONFIG['event']['fields'])
n_words_hit = len(OUT_CONFIG[args.format]['fields'])
# Initialising event
event = -1
G = Geometry(CONFIGURATION)
H = HitManager()
SLs = {}
for iSL in config.SL_SHIFT.keys():
SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL])
# Defining which SLs should be plotted in which global view
GLOBAL_VIEW_SLs = {
'xz': [SLs[0], SLs[2]],
'yz': [SLs[1], SLs[3]]
}
# Analyzing the hits in each event
for file_path in input_files:
# Reading input file line by line | file_line_nr += 1
if file_line_nr <= 1:
continue
hits_lst = []
H.reset()
words = line.strip().split()
event = int(words[0])
# Skipping event if it was not specified in command line
if args.events is not None and event not in args.events:
continue
nhits = int(words[1])
print('Event {0:<5d} # hits: {1:d}'.format(event, nhits))
if args.glance:
continue
# Skipping event with too many hits (most likely a spark event that will take forever to process)
if nhits > args.max_hits:
continue
# Extracting hit information
for iHit in range(nhits):
start = n_words_event + iHit*n_words_hit
ww = words[start:start+n_words_hit]
hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])])
H.add_hits(hits_lst)
# Removing hits with time outside the timebox region
H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True)
# Calculating local+global hit positions
H.calc_pos(SLs)
# Creating figures of the chambers
figs = {}
figs['sl'] = plot.book_chambers_figure(G)
figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs)
# Analyzing hits in each SL
sl_fit_results = {}
for iSL, sl in SLs.items():
# print('- SL', iSL)
hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer')
if args.plot:
# Drawing the left and right hits in local frame
figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5,
fill_color='red', fill_alpha=0.7, line_width=0)
figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5,
fill_color='blue', fill_alpha=0.7, line_width=0)
# Performing track reconstruction in the local frame
sl_fit_results[iSL] = []
layer_groups = hits_sl.groupby('layer').groups
n_layers = len(layer_groups)
# Stopping if lass than 3 layers of hits
if n_layers < config.NHITS_MIN_LOCAL:
continue
hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()]
# Building the list of all possible hit combinations with 1 hit from each layer
hits_layered = list(itertools.product(*hitid_layers))
# Building more combinations using only either left or right position of each hit
for hit_ids in hits_layered:
# print('- -', hit_ids)
posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values
posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values
posx_combs = list(itertools.product(*posx))
# Fitting each combination
fit_results_lr = []
fit_range = (min(posz), max(posz))
for iC, posx_comb in enumerate(posx_combs):
pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range)
chi2 = stats[0][0] / n_layers
if chi2 < config.FIT_CHI2_MAX:
a0, a1 = pfit
fit_results_lr.append((chi2, hit_ids, pfit))
# Keeping only the best fit result from the given set of physical hits
fit_results_lr.sort(key=itemgetter(0))
if fit_results_lr:
sl_fit_results[iSL].append(fit_results_lr[0])
# Sorting the fit results of a SL by Chi2
sl_fit_results[iSL].sort(key=itemgetter(0))
if sl_fit_results[iSL]:
# Drawing fitted tracks
posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32)
for iR, res in enumerate(sl_fit_results[iSL][:5]):
col = config.TRACK_COLORS[iR]
posx = res[2](posz)
figs['sl'][iSL].line(x=posx, y=posz,
line_color=col, line_alpha=0.7, line_width=3)
if args.plot:
# Drawing the left and right hits in global frame
for view, sls in GLOBAL_VIEW_SLs.items():
sl_ids = [sl.id for sl in sls]
hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)]
figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]],
fill_color='red', fill_alpha=0.7, line_width=0)
figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]],
fill_color='blue', fill_alpha=0.7, line_width=0)
# Building 3D segments from the fit results in each SL
posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32)
for sl in sls:
for iR, res in enumerate(sl_fit_results[sl.id][:5]):
posx = res[2](posz)
start = (posx[0], 0, posz[0])
end = (posx[1], 0, posz[1])
segL = Segment(start, end)
segG = segL.fromSL(sl)
segG.calc_vector()
# Extending the global segment to the full height of the view
start = segG.pointAtZ(plot.PLOT_RANGE['y'][0])
end = segG.pointAtZ(plot.PLOT_RANGE['y'][1])
# Getting XY coordinates of the global segment for the current view
iX = COOR_ID[view[0]]
posx = [start[iX], end[iX]]
posy = [start[2], end[2]]
# Drawing the segment
| with open(file_path, 'r') as file_in:
file_line_nr = 0
for line in file_in: | random_line_split |
reco_tracks.py | _ID
from modules.reco import config, plot
from modules.analysis import config as CONFIGURATION
import os
import itertools
import bokeh
import numpy as np
############################################# INPUT ARGUMENTS
import argparse
parser = argparse.ArgumentParser(description='Track reconstruction from input hits.')
parser.add_argument('-E', '--events', metavar='N', help='Events to process', type=int, default=None, nargs='+')
parser.add_argument('-f', '--format', help='Input hits format', default='time_wire')
parser.add_argument('-g', '--glance', help='Only show # hits in each event', action='store_true', default=False)
parser.add_argument('-m', '--max_hits', dest='max_hits', type=int, help='Maximum number of hits allowed in one event [default: 15]', action='store', default=15)
parser.add_argument('-o', '--output', help='Output path', default='plots/hits_ev{0:d}.html')
parser.add_argument('-p', '--plot', help='Draw plots', action='store_true', default=False)
parser.add_argument('inputs', metavar='FILE', help='Input files with raw hits, 1 event/line', nargs='+')
args = parser.parse_args()
# Checking validity of the input format
if args.format not in OUT_CONFIG:
raise ValueError('Wrong input format (-f) specified')
# Checking existence of input files
for file_path in args.inputs:
if not os.path.exists(os.path.expandvars(file_path)):
print('--- ERROR ---')
print(' \''+file_path+'\' file not found')
print(' please provide the correct path to the file containing raw hits' )
print()
exit()
def process(input_files):
"""Reconstruct tracks from hits in all events from the provided input files"""
n_words_event = len(OUT_CONFIG['event']['fields'])
n_words_hit = len(OUT_CONFIG[args.format]['fields'])
# Initialising event
event = -1
G = Geometry(CONFIGURATION)
H = HitManager()
SLs = {}
for iSL in config.SL_SHIFT.keys():
SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL])
# Defining which SLs should be plotted in which global view
GLOBAL_VIEW_SLs = {
'xz': [SLs[0], SLs[2]],
'yz': [SLs[1], SLs[3]]
}
# Analyzing the hits in each event
for file_path in input_files:
# Reading input file line by line
with open(file_path, 'r') as file_in:
file_line_nr = 0
for line in file_in:
file_line_nr += 1
if file_line_nr <= 1:
continue
hits_lst = []
H.reset()
words = line.strip().split()
event = int(words[0])
# Skipping event if it was not specified in command line
if args.events is not None and event not in args.events:
continue
nhits = int(words[1])
print('Event {0:<5d} # hits: {1:d}'.format(event, nhits))
if args.glance:
continue
# Skipping event with too many hits (most likely a spark event that will take forever to process)
if nhits > args.max_hits:
continue
# Extracting hit information
for iHit in range(nhits):
start = n_words_event + iHit*n_words_hit
ww = words[start:start+n_words_hit]
hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])])
H.add_hits(hits_lst)
# Removing hits with time outside the timebox region
H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True)
# Calculating local+global hit positions
H.calc_pos(SLs)
# Creating figures of the chambers
figs = {}
figs['sl'] = plot.book_chambers_figure(G)
figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs)
# Analyzing hits in each SL
sl_fit_results = {}
for iSL, sl in SLs.items():
# print('- SL', iSL)
hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer')
if args.plot:
# Drawing the left and right hits in local frame
figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5,
fill_color='red', fill_alpha=0.7, line_width=0)
figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5,
fill_color='blue', fill_alpha=0.7, line_width=0)
# Performing track reconstruction in the local frame
sl_fit_results[iSL] = []
layer_groups = hits_sl.groupby('layer').groups
n_layers = len(layer_groups)
# Stopping if lass than 3 layers of hits
if n_layers < config.NHITS_MIN_LOCAL:
continue
hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()]
# Building the list of all possible hit combinations with 1 hit from each layer
hits_layered = list(itertools.product(*hitid_layers))
# Building more combinations using only either left or right position of each hit
for hit_ids in hits_layered:
# print('- -', hit_ids)
posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values
posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values
posx_combs = list(itertools.product(*posx))
# Fitting each combination
fit_results_lr = []
fit_range = (min(posz), max(posz))
for iC, posx_comb in enumerate(posx_combs):
pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range)
chi2 = stats[0][0] / n_layers
if chi2 < config.FIT_CHI2_MAX:
a0, a1 = pfit
fit_results_lr.append((chi2, hit_ids, pfit))
# Keeping only the best fit result from the given set of physical hits
fit_results_lr.sort(key=itemgetter(0))
if fit_results_lr:
sl_fit_results[iSL].append(fit_results_lr[0])
# Sorting the fit results of a SL by Chi2
sl_fit_results[iSL].sort(key=itemgetter(0))
if sl_fit_results[iSL]:
# Drawing fitted tracks
posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32)
for iR, res in enumerate(sl_fit_results[iSL][:5]):
col = config.TRACK_COLORS[iR]
posx = res[2](posz)
figs['sl'][iSL].line(x=posx, y=posz,
line_color=col, line_alpha=0.7, line_width=3)
if args.plot:
# Drawing the left and right hits in global frame
| # Getting XY coordinates of the global segment for the current view
iX = COOR_ID[view[0]]
posx = [start[iX], end[iX]]
posy = [start[2], end[2]]
# Drawing the segment
| for view, sls in GLOBAL_VIEW_SLs.items():
sl_ids = [sl.id for sl in sls]
hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)]
figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]],
fill_color='red', fill_alpha=0.7, line_width=0)
figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]],
fill_color='blue', fill_alpha=0.7, line_width=0)
# Building 3D segments from the fit results in each SL
posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32)
for sl in sls:
for iR, res in enumerate(sl_fit_results[sl.id][:5]):
posx = res[2](posz)
start = (posx[0], 0, posz[0])
end = (posx[1], 0, posz[1])
segL = Segment(start, end)
segG = segL.fromSL(sl)
segG.calc_vector()
# Extending the global segment to the full height of the view
start = segG.pointAtZ(plot.PLOT_RANGE['y'][0])
end = segG.pointAtZ(plot.PLOT_RANGE['y'][1]) | conditional_block |
params.pb.go | else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ConnParameters) XXX_Merge(src proto.Message) {
xxx_messageInfo_ConnParameters.Merge(m, src)
}
func (m *ConnParameters) XXX_Size() int {
return m.Size()
}
func (m *ConnParameters) XXX_DiscardUnknown() {
xxx_messageInfo_ConnParameters.DiscardUnknown(m)
}
var xxx_messageInfo_ConnParameters proto.InternalMessageInfo
func (m *ConnParameters) GetPingInterval() int64 {
if m != nil {
return m.PingInterval
}
return 0
}
func (m *ConnParameters) GetPingTimeout() int64 {
if m != nil {
return m.PingTimeout
}
return 0
}
func (m *ConnParameters) GetSID() string {
if m != nil {
return m.SID
}
return ""
}
func (m *ConnParameters) GetUpgrades() []string {
if m != nil {
return m.Upgrades
}
return nil
}
func init() {
proto.RegisterType((*ConnParameters)(nil), "core.go.ConnParameters")
}
func init() { proto.RegisterFile("params.proto", fileDescriptor_8679b07c520418a1) }
var fileDescriptor_8679b07c520418a1 = []byte{
// 182 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x48, 0x2c, 0x4a,
0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4b,
0xcf, 0x57, 0x6a, 0x61, 0xe4, 0xe2, 0x73, 0xce, 0xcf, 0xcb, 0x0b, 0x00, 0xc9, 0xa6, 0x96, 0xa4,
0x16, 0x15, 0x0b, 0x29, 0x71, 0xf1, 0x14, 0x64, 0xe6, 0xa5, 0x7b, 0xe6, 0x95, 0xa4, 0x16, 0x95,
0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xa1, 0x88, 0x09, 0x29, 0x70, 0x71, 0x83,
0xf8, 0x21, 0x99, 0xb9, 0xa9, 0xf9, 0xa5, 0x25, 0x12, 0x4c, 0x60, 0x25, 0xc8, 0x42, 0x42, 0x02,
0x5c, 0xcc, 0xc5, 0x9e, 0x2e, 0x12, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6, 0x90, 0x14,
0x17, 0x47, 0x69, 0x41, 0x7a, 0x51, 0x62, 0x4a, 0x6a, 0xb1, 0x04, 0x8b, 0x02, 0xb3, 0x06, 0x67,
0x10, 0x9c, 0xef, 0x24, 0x7b, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9,
0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xcc,
0x05, 0x49, 0xc5, 0x49, 0x6c, 0x60, 0x57, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x52, 0x43,
0xb7, 0x5d, 0xc5, 0x00, 0x00, 0x00,
}
func (m *ConnParameters) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ConnParameters) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PingInterval != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintParams(dAtA, i, uint64(m.PingInterval))
}
if m.PingTimeout != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintParams(dAtA, i, uint64(m.PingTimeout))
}
if len(m.SID) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintParams(dAtA, i, uint64(len(m.SID)))
i += copy(dAtA[i:], m.SID)
}
if len(m.Upgrades) > 0 {
for _, s := range m.Upgrades {
dAtA[i] = 0x22
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
func encodeVarintParams(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *ConnParameters) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.PingInterval != 0 {
n += 1 + sovParams(uint64(m.PingInterval))
}
if m.PingTimeout != 0 {
n += 1 + sovParams(uint64(m.PingTimeout))
}
l = len(m.SID)
if l > 0 {
n += 1 + l + sovParams(uint64(l))
}
if len(m.Upgrades) > 0 {
for _, s := range m.Upgrades {
l = len(s)
n += 1 + l + sovParams(uint64(l))
}
}
return n
}
func sovParams(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozParams(x uint64 | {
return xxx_messageInfo_ConnParameters.Marshal(b, m, deterministic)
} | conditional_block | |
params.pb.go | []string {
if m != nil {
return m.Upgrades
}
return nil
}
func init() {
proto.RegisterType((*ConnParameters)(nil), "core.go.ConnParameters")
}
func init() { proto.RegisterFile("params.proto", fileDescriptor_8679b07c520418a1) }
var fileDescriptor_8679b07c520418a1 = []byte{
// 182 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x48, 0x2c, 0x4a,
0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4b,
0xcf, 0x57, 0x6a, 0x61, 0xe4, 0xe2, 0x73, 0xce, 0xcf, 0xcb, 0x0b, 0x00, 0xc9, 0xa6, 0x96, 0xa4,
0x16, 0x15, 0x0b, 0x29, 0x71, 0xf1, 0x14, 0x64, 0xe6, 0xa5, 0x7b, 0xe6, 0x95, 0xa4, 0x16, 0x95,
0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xa1, 0x88, 0x09, 0x29, 0x70, 0x71, 0x83,
0xf8, 0x21, 0x99, 0xb9, 0xa9, 0xf9, 0xa5, 0x25, 0x12, 0x4c, 0x60, 0x25, 0xc8, 0x42, 0x42, 0x02,
0x5c, 0xcc, 0xc5, 0x9e, 0x2e, 0x12, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6, 0x90, 0x14,
0x17, 0x47, 0x69, 0x41, 0x7a, 0x51, 0x62, 0x4a, 0x6a, 0xb1, 0x04, 0x8b, 0x02, 0xb3, 0x06, 0x67,
0x10, 0x9c, 0xef, 0x24, 0x7b, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9,
0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xcc,
0x05, 0x49, 0xc5, 0x49, 0x6c, 0x60, 0x57, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x52, 0x43,
0xb7, 0x5d, 0xc5, 0x00, 0x00, 0x00,
}
func (m *ConnParameters) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ConnParameters) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PingInterval != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintParams(dAtA, i, uint64(m.PingInterval))
}
if m.PingTimeout != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintParams(dAtA, i, uint64(m.PingTimeout))
}
if len(m.SID) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintParams(dAtA, i, uint64(len(m.SID)))
i += copy(dAtA[i:], m.SID)
}
if len(m.Upgrades) > 0 {
for _, s := range m.Upgrades {
dAtA[i] = 0x22
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
func encodeVarintParams(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *ConnParameters) | () (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.PingInterval != 0 {
n += 1 + sovParams(uint64(m.PingInterval))
}
if m.PingTimeout != 0 {
n += 1 + sovParams(uint64(m.PingTimeout))
}
l = len(m.SID)
if l > 0 {
n += 1 + l + sovParams(uint64(l))
}
if len(m.Upgrades) > 0 {
for _, s := range m.Upgrades {
l = len(s)
n += 1 + l + sovParams(uint64(l))
}
}
return n
}
func sovParams(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozParams(x uint64) (n int) {
return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *ConnParameters) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowParams
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ConnParameters: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ConnParameters: illegal tag %d (wire type | Size | identifier_name |
params.pb.go | return 0
}
func (m *ConnParameters) GetSID() string {
if m != nil {
return m.SID
}
return ""
}
func (m *ConnParameters) GetUpgrades() []string {
if m != nil {
return m.Upgrades
}
return nil
}
func init() {
proto.RegisterType((*ConnParameters)(nil), "core.go.ConnParameters")
}
func init() { proto.RegisterFile("params.proto", fileDescriptor_8679b07c520418a1) }
var fileDescriptor_8679b07c520418a1 = []byte{
// 182 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x48, 0x2c, 0x4a,
0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4b,
0xcf, 0x57, 0x6a, 0x61, 0xe4, 0xe2, 0x73, 0xce, 0xcf, 0xcb, 0x0b, 0x00, 0xc9, 0xa6, 0x96, 0xa4,
0x16, 0x15, 0x0b, 0x29, 0x71, 0xf1, 0x14, 0x64, 0xe6, 0xa5, 0x7b, 0xe6, 0x95, 0xa4, 0x16, 0x95,
0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xa1, 0x88, 0x09, 0x29, 0x70, 0x71, 0x83,
0xf8, 0x21, 0x99, 0xb9, 0xa9, 0xf9, 0xa5, 0x25, 0x12, 0x4c, 0x60, 0x25, 0xc8, 0x42, 0x42, 0x02,
0x5c, 0xcc, 0xc5, 0x9e, 0x2e, 0x12, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6, 0x90, 0x14,
0x17, 0x47, 0x69, 0x41, 0x7a, 0x51, 0x62, 0x4a, 0x6a, 0xb1, 0x04, 0x8b, 0x02, 0xb3, 0x06, 0x67,
0x10, 0x9c, 0xef, 0x24, 0x7b, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9,
0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xcc,
0x05, 0x49, 0xc5, 0x49, 0x6c, 0x60, 0x57, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x52, 0x43,
0xb7, 0x5d, 0xc5, 0x00, 0x00, 0x00,
}
func (m *ConnParameters) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ConnParameters) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PingInterval != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintParams(dAtA, i, uint64(m.PingInterval))
}
if m.PingTimeout != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintParams(dAtA, i, uint64(m.PingTimeout))
}
if len(m.SID) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintParams(dAtA, i, uint64(len(m.SID)))
i += copy(dAtA[i:], m.SID)
}
if len(m.Upgrades) > 0 {
for _, s := range m.Upgrades {
dAtA[i] = 0x22
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
func encodeVarintParams(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *ConnParameters) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.PingInterval != 0 {
n += 1 + sovParams(uint64(m.PingInterval))
}
if m.PingTimeout != 0 {
n += 1 + sovParams(uint64(m.PingTimeout))
}
l = len(m.SID)
if l > 0 {
n += 1 + l + sovParams(uint64(l))
}
if len(m.Upgrades) > 0 {
for _, s := range m.Upgrades {
l = len(s)
n += 1 + l + sovParams(uint64(l))
}
}
return n
}
func sovParams(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozParams(x uint64) (n int) {
return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *ConnParameters) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowParams
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32 |
func (m *ConnParameters) GetPingTimeout() int64 {
if m != nil {
return m.PingTimeout
} | random_line_split | |
params.pb.go |
var xxx_messageInfo_ConnParameters proto.InternalMessageInfo
func (m *ConnParameters) GetPingInterval() int64 {
if m != nil {
return m.PingInterval
}
return 0
}
func (m *ConnParameters) GetPingTimeout() int64 {
if m != nil {
return m.PingTimeout
}
return 0
}
func (m *ConnParameters) GetSID() string {
if m != nil {
return m.SID
}
return ""
}
func (m *ConnParameters) GetUpgrades() []string {
if m != nil {
return m.Upgrades
}
return nil
}
func init() {
proto.RegisterType((*ConnParameters)(nil), "core.go.ConnParameters")
}
func init() { proto.RegisterFile("params.proto", fileDescriptor_8679b07c520418a1) }
var fileDescriptor_8679b07c520418a1 = []byte{
// 182 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x48, 0x2c, 0x4a,
0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4b,
0xcf, 0x57, 0x6a, 0x61, 0xe4, 0xe2, 0x73, 0xce, 0xcf, 0xcb, 0x0b, 0x00, 0xc9, 0xa6, 0x96, 0xa4,
0x16, 0x15, 0x0b, 0x29, 0x71, 0xf1, 0x14, 0x64, 0xe6, 0xa5, 0x7b, 0xe6, 0x95, 0xa4, 0x16, 0x95,
0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xa1, 0x88, 0x09, 0x29, 0x70, 0x71, 0x83,
0xf8, 0x21, 0x99, 0xb9, 0xa9, 0xf9, 0xa5, 0x25, 0x12, 0x4c, 0x60, 0x25, 0xc8, 0x42, 0x42, 0x02,
0x5c, 0xcc, 0xc5, 0x9e, 0x2e, 0x12, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6, 0x90, 0x14,
0x17, 0x47, 0x69, 0x41, 0x7a, 0x51, 0x62, 0x4a, 0x6a, 0xb1, 0x04, 0x8b, 0x02, 0xb3, 0x06, 0x67,
0x10, 0x9c, 0xef, 0x24, 0x7b, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9,
0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xcc,
0x05, 0x49, 0xc5, 0x49, 0x6c, 0x60, 0x57, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x52, 0x43,
0xb7, 0x5d, 0xc5, 0x00, 0x00, 0x00,
}
func (m *ConnParameters) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ConnParameters) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PingInterval != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintParams(dAtA, i, uint64(m.PingInterval))
}
if m.PingTimeout != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintParams(dAtA, i, uint64(m.PingTimeout))
}
if len(m.SID) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintParams(dAtA, i, uint64(len(m.SID)))
i += copy(dAtA[i:], m.SID)
}
if len(m.Upgrades) > 0 {
for _, s := range m.Upgrades {
dAtA[i] = 0x22
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
func encodeVarintParams(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *ConnParameters) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.PingInterval != 0 {
n += 1 + sovParams(uint64(m.PingInterval))
}
if m.PingTimeout != 0 {
n += 1 + sovParams(uint64(m.PingTimeout))
}
l = len(m.SID)
if l > 0 {
n += 1 + l + sovParams(uint64(l))
}
if len(m.Upgrades) > 0 {
for _, s := range m.Upgrades {
l = len(s)
n += 1 + l + sovParams(uint64(l))
}
}
return n
}
func sovParams(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozParams(x uint64) (n int) {
return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *ConnParameters) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowParams
}
if iNdEx >= l {
return io.Err | {
xxx_messageInfo_ConnParameters.DiscardUnknown(m)
} | identifier_body | |
main.rs | .zs.push(s.p.2);
me.rsqrds.push(s.rsqrd);
me.mats.push(s.m);
}
// pad everything out to the simd width
me.xs.resize(len, 0.0);
me.ys.resize(len, 0.0);
me.zs.resize(len, 0.0);
me.rsqrds.resize(len, 0.0);
let default_mat = Material {
emit_color: V3(0.0, 0.0, 0.0),
reflect_color: V3(0.0, 0.0, 0.0),
t: MaterialType::Specular,
};
me.mats.resize(len, default_mat);
me
}
fn len(&self) -> usize {
self.xs.len()
}
}
// https://entropymine.com/imageworsener/srgbformula/
fn linear_to_srgb(x: f32) -> f32 {
if x < 0.0 {
0.0
} else if x > 1.0 {
1.0
} else if x > 0.0031308 {
1.055 * x.powf(1.0 / 2.4) - 0.055
} else {
x * 12.92
}
}
thread_local! {
static THREAD_RNG: Cell<u64> = {
let mut buf = [0u8; 8];
getrandom::getrandom(&mut buf).unwrap();
Cell::new(u64::from_le_bytes(buf))
};
}
fn rand_seed() -> u32 {
let mut buf = [0u8; 4];
getrandom::getrandom(&mut buf).unwrap();
u32::from_le_bytes(buf)
}
#[allow(dead_code)]
fn thread_rand() -> u32 {
// TODO(eli): thread local perf is terrible; causes function call and branching
THREAD_RNG.with(|rng_cell| {
let mut state = rng_cell.get();
let randu = pcg(&mut state);
rng_cell.set(state);
randu
})
}
// Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs"
// xorshift isn't great, but is good enough for our purposes and has two
// nice properties:
// 1. it only needs a u32 of state to generate a u32
// 2. it's easy to SIMD
fn xorshift(state: &mut u32) -> u32 {
debug_assert!(*state != 0, "xorshift cannot be seeded with 0");
let mut x = *state;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
*state = x;
x
}
// pcg xsh rs 64/32 (mcg)
#[allow(dead_code)]
fn pcg(state: &mut u64) -> u32 {
let s = *state;
*state = s.wrapping_mul(6364136223846793005);
(((s >> 22) ^ s) >> ((s >> 61) + 22)) as u32
}
fn randf(state: &mut u32) -> f32 {
let randu = (xorshift(state) >> 9) | 0x3f800000;
let randf = f32::from_bits(randu) - 1.0;
randf
}
fn randf_range(state: &mut u32, min: f32, max: f32) -> f32 {
min + (max - min) * randf(state)
}
#[inline(always)]
fn cast(
rng_state: &mut u32,
bg: &Material,
spheres: &Spheres,
mut origin: V3,
mut dir: V3,
mut bounces: u32,
) -> (V3, u32) {
let mut color = V3(0.0, 0.0, 0.0);
let mut reflectance = V3(1.0, 1.0, 1.0);
let orig_bounces = bounces;
loop {
debug_assert!(dir.is_unit_vector());
let origin_xs = WideF32::splat(origin.0);
let origin_ys = WideF32::splat(origin.1);
let origin_zs = WideF32::splat(origin.2);
let dir_x = WideF32::splat(dir.0);
let dir_y = WideF32::splat(dir.1);
let dir_z = WideF32::splat(dir.2);
let mut hit_ids = WideI32::splat(-1);
let mut hit_dists = WideF32::splat(f32::MAX);
let mut iteration_ids = WideI32::new(7, 6, 5, 4, 3, 2, 1, 0);
// TODO(eli): egregious bounds checking here
for i in (0..spheres.len()).step_by(SIMD_WIDTH) {
let sphere_xs = WideF32::load(&spheres.xs[i..i + SIMD_WIDTH]);
let sphere_ys = WideF32::load(&spheres.ys[i..i + SIMD_WIDTH]);
let sphere_zs = WideF32::load(&spheres.zs[i..i + SIMD_WIDTH]);
let sphere_rsqrds = WideF32::load(&spheres.rsqrds[i..i + SIMD_WIDTH]);
// this is sphere_relative_origin = origin - sphere_origin
// but the math is flipped backwards because it saves us having to negate the b term
let relative_xs = sphere_xs - origin_xs;
let relative_ys = sphere_ys - origin_ys;
let relative_zs = sphere_zs - origin_zs;
let neg_b = dir_x * relative_xs;
let neg_b = WideF32::mul_add(dir_y, relative_ys, neg_b);
let neg_b = WideF32::mul_add(dir_z, relative_zs, neg_b);
let c = WideF32::mul_sub(relative_xs, relative_xs, sphere_rsqrds);
let c = WideF32::mul_add(relative_ys, relative_ys, c);
let c = WideF32::mul_add(relative_zs, relative_zs, c);
let discr = WideF32::mul_sub(neg_b, neg_b, c);
let discrmask = discr.gt(WideF32::splat(0.0));
if discrmask.any() {
let root_term = discr.sqrt();
let t0 = neg_b - root_term;
let t1 = neg_b + root_term;
// t0 if hit, else t1
let t = WideF32::select(t1, t0, t0.gt(WideF32::splat(TOLERANCE)));
let mask = discrmask & t.gt(WideF32::splat(TOLERANCE)) & t.lt(hit_dists);
hit_ids = WideI32::select(hit_ids, iteration_ids, mask);
hit_dists = WideF32::select(hit_dists, t, mask);
}
iteration_ids += WideI32::splat(SIMD_WIDTH as i32);
}
let hmin = hit_dists.hmin();
if hmin < f32::MAX {
let minmask = hit_dists.eq(WideF32::splat(hmin)).mask();
let min_idx = minmask.trailing_zeros() as usize;
let hit_ids_arr: [i32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_ids.0) };
let hit_dists_arr: [f32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_dists.0) };
let id = hit_ids_arr[min_idx] as usize;
let hit_dist = hit_dists_arr[min_idx];
let mat = &spheres.mats[id];
if bounces == 0 {
color += reflectance * mat.emit_color;
break;
} else {
bounces -= 1;
color += reflectance * mat.emit_color;
reflectance *= mat.reflect_color;
let hit_point = origin + dir * hit_dist;
origin = hit_point;
dir = match mat.t {
MaterialType::Specular => {
let sp = V3(spheres.xs[id], spheres.ys[id], spheres.zs[id]);
let hit_normal = (hit_point - sp).normalize();
dir.reflect(hit_normal)
}
MaterialType::Diffuse => | {
let a = randf_range(rng_state, 0.0, 2.0 * PI);
let z = randf_range(rng_state, -1.0, 1.0);
let r = (1.0 - z * z).sqrt();
V3(r * a.cos(), r * a.sin(), z)
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.