file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.rs | // main struggle problems in this section were 11 and 18, and to some extent, 12 and 14. 17 was annoying to debug, but not hard.
extern crate timings_proc_macro;
use timings_proc_macro::timings;
#[timings]
fn e11() {
let s: Vec<usize> = std::fs::read_to_string("src/e11.txt")
.unwrap()
.split_whitespace()
.map(|n| n.parse::<usize>().unwrap())
.collect();
//println!("{:?}", s);
// could just run with s, but let's build our 2d array.
let mut v = [[0; 20]; 20];
(0..400).for_each(|i| v[i / 20][i % 20] = s[i]);
//println!("{:?}", v);
let mut big = 0;
use itertools::Itertools;
(0..20).cartesian_product(0..20).for_each(|(i, j)| {
if i < 17 {
// h_
let temp = v[i][j] * v[i + 1][j] * v[i + 2][j] * v[i + 3][j];
if temp > big {
// println!(
// "h_ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
if j < 17 {
// v|
let temp = v[i][j] * v[i][j + 1] * v[i][j + 2] * v[i][j + 3];
if temp > big {
// println!(
// "v| new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
if i < 17 && j < 17 {
// d\
let temp = v[i][j] * v[i + 1][j + 1] * v[i + 2][j + 2] * v[i + 3][j + 3];
if temp > big {
// println!(
// "d\\ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j],
// );
big = temp
}
}
if i < 17 && j > 2 {
// d/
let temp = v[i][j] * v[i + 1][j - 1] * v[i + 2][j - 2] * v[i + 3][j - 3];
if temp > big {
// println!(
// "d/ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
});
println!("biggest: {}", big);
}
// v elegant: https://github.com/zacharydenton/euler/blob/master/011/grid.rs
// 1. include_str!("grid.txt") I could be using this macro instead.
// 2. .filter_map(|n| n.parse().ok()), well isn't that sweet.
// 3. his solution collects the maximum value in each direction in an interesting way. Each element is k farther ahead than the current elem. h:1,v:20,d\:21,d/:19. This fails if the line crosses a boundary though.
// What is the value of the first triangle number to have over five hundred divisors?
#[timings]
fn e12() {
// entire problem is "count divisors". Naive soln sucks. Derive a soln.
// Proposition. given X = p_1^a * p_2^b * ...,
// N_factors(X) = (a+1)(b+1)....
// now we only need to find the algebraic multiplicity of each prime divisor.
let multiplicities = |input: usize| -> std::collections::HashMap<usize, usize> {
let mut h = std::collections::HashMap::new();
let mut n = input;
while n % 2 == 0 {
let counter = h.entry(2).or_insert(0);
*counter += 1;
n /= 2;
}
let mut i = 3;
while n > 1 {
while n % i == 0 {
let counter = h.entry(i).or_insert(0);
*counter += 1;
n /= i;
}
i += 2;
}
h
};
let mut i = 1;
let mut sum = 0;
loop {
sum += i;
i += 1;
let divisors = multiplicities(sum).values().fold(1, |acc, d| acc * (1 + d));
//dbg!(sum, divisors);
if divisors > 500 {
println!("value: {}, the {}th triangle number", sum, i);
break;
}
}
}
#[timings]
fn e13() {
let s: Vec<String> = std::fs::read_to_string("src/e13.txt")
.unwrap()
.split_whitespace()
.map(|s| s.parse::<String>().unwrap())
.collect();
let s13: Vec<usize> = s
.iter()
.map(|l| l[..13].parse::<usize>().unwrap())
.collect();
let n = s13.iter().sum::<usize>().to_string();
println!("e13: {}", &n[..10]);
}
#[allow(dead_code)]
fn collatz(n: usize) -> usize {
match n % 2 {
0 => n / 2,
1 => 3 * n + 1,
_ => unreachable!(),
}
}
#[timings]
fn e14() {
use std::collections::HashMap;
let mut h = HashMap::new();
h.insert(1, 0);
let mut it_counter = 0;
let mut biggest = (0, 0);
for it in 2..1_000_000 {
if h.contains_key(&it) {
continue;
}
// Build a cache of values til we find a value we have seen
let mut next = collatz(it);
it_counter += 1;
let mut cache: Vec<(usize, usize)> = vec![(it, it_counter)]; // 2: 1
while h.get(&next).is_none() {
it_counter += 1;
cache.push((next, it_counter));
next = collatz(next);
}
// the next value is now in the hashmap
let count_last = *h.get(&next).unwrap();
let count_for_it = count_last + it_counter;
//println!("it:{},count: {}", it, count_for_it);
for (n, c) in cache {
let count = count_for_it + 1 - c;
//println!("n:{},c: {}, count: {}", n, c, count);
h.insert(n, count);
}
it_counter = 0;
if count_for_it > biggest.0 {
biggest = (count_for_it, it);
}
}
println!("biggest seq len: {:?}, for n={:?}", biggest.0, biggest.1);
}
#[timings] //https://github.com/zacharydenton/euler/blob/master/014/collatz.rs
fn e14_zach_denton() {
let mut collatz: Vec<usize> = vec![0; 1_000_000];
collatz[1] = 1;
let max = (2..collatz.len())
.max_by_key(|&i| {
let f = |n: usize| match n % 2 {
0 => n / 2,
_ => n * 3 + 1,
};
// og:
let (mut j, mut len) = (i, 0);
loop {
// exit if:
if j < collatz.len() && collatz[j] != 0 {
break;
}
len += 1;
j = f(j);
}
len += collatz[j];
collatz[i] = len;
len
})
.unwrap();
println!("{}", max);
}
// How many such (only move left or down) routes are there through a 20×20 grid?
#[timings]
fn e15() {
// basic combinatorics. of 40 positions, choose 20. Equally, the 20th Catalan.
let a: u128 = (21..=40).product();
let b: u128 = (2..=20).product();
println!("{}", a / b);
}
#[timings]
fn e16() {
// mostly, futzing with bigint.
use num_bigint::BigUint;
// note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint.
let a = BigUint::new(vec![2]);
let b = a.pow(1000);
//println!("{:?}", b);
// TFAE:
//let res = b.to_string().chars().fold(0, |a, d| a + d.to_digit(10).unwrap());
let res: u32 = b.to_string().chars().map(|c| c.to_digit(10).unwrap()).sum();
println!("{:?}", res);
//let digits: num::BigInt = 2.pow(1000);
}
// If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
#[timings]
fn e17() {
let map = vec![
(0, 0),
(1, 3),
(2, 3),
(3, 5),
(4, 4),
(5, 4),
(6, 3),
(7, 5),
(8, 5),
(9, 4),
(10, 3),
(11, 6),
(12, 6),
(13, 8),
(14, 8),
(15, 7),
(16, 7),
(17, 9),
(18, 8),
(19, 8),
(20, 6),
(30, 6),
(40, 5),
(50, 5),
(60, 5),
(70, 7),
(80, 6),
(90, 6),
];
let h = std::collections::HashMap::from_iter(map.into_iter());
let res: usize = (1..=1000).fold(0, |acc, x| acc + count_letters(x, &h));
println!("{}", res);
}
fn count_letters(d: usize, h: &std::collections::HashMap<usize, usize>) -> usize {
let (a, b, c, e) = (d % 10, d / 10 % 10, d / 100 % 10, d / 1000 % 10);
let aa = if b == 1 { 0 } else { *h.get(&a).unwrap() };
let bb = if b == 1 {
*h.get(&(b * 10 + a)).unwrap()
} else {
*h.get(&(b * 10)).unwrap()
};
let mut cc = if c > 0 { 3 + 7 + h.get(&c).unwrap() } else { 0 }; // "and" counts apparently
if c > 0 && aa == 0 && bb == 0 {
cc -= 3 // 100 doesn't have an "and"
};
let ee = if e > 0 { 8 + h.get(&e).unwrap() } else { 0 };
//println!("{}:{},{},{},{}", d, ee, cc, bb, aa);
aa + bb + cc + ee
}
// first problem to be a bit of a challenge. I struggled picking a data structure and strategy for this one.
// A couple possible approaches occur:
// naive: at each step, pick the greatest next value
// brute: calculate the value of all 2^14 paths, not hard
// pruning: similar to brute, but if some sufficiently low sequence is included, exit early (optimization parameters: how often to prune, and what sufficiently low means)
// This problem begs to be solved recursively somehow.
#[timings]
fn e18() { | /// traverse the triangle picking the greatest value at the next binary choice
#[allow(dead_code)]
fn e18_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize {
if t.is_empty() {
running_sum
} else {
let (rs, li) = if t[0][last_index] > t[0][last_index + 1] {
(t[0][last_index], last_index)
} else {
(t[0][last_index + 1], last_index + 1)
};
println!("append:{},{}", rs, li);
e18_naive_r(&t[1..], running_sum + rs, li)
}
}
// 18 minutes to try naively. Now let's try a little harder.
// let's try something with look ahead.
const PEEK_DIST: usize = 5;
/// traverse the triangle picking the greatest single step-PEEK_DIST-chain at each next binary choice
fn e18_less_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize {
if t.is_empty() {
running_sum
} else {
// need to peek here
let (_, dir, _path) = peek_ahead_r(t, running_sum, last_index, PEEK_DIST, None, vec![]);
let (val, ind) = match dir {
Dir::Left => (t[0][last_index], last_index),
Dir::Right => (t[0][last_index + 1], last_index + 1),
};
//println!("append val:{}, ind:{}, path:{:?}", val, ind, _path);
e18_less_naive_r(&t[1..], running_sum + val, ind)
}
}
// if looking ahead 1 step, terminate, returning (running_sum, LEFT|RIGHT)
#[derive(Clone, Debug)]
enum Dir {
Left,
Right,
}
fn peek_ahead_r(
t: &[Vec<usize>],
running_sum: usize,
last_index: usize,
mut peek_dist: usize,
first_step: Option<Dir>,
/* debugging */ mut path: Vec<(usize, usize)>,
) -> (usize /* value */, Dir, Vec<(usize, usize)>) {
if peek_dist > t.len() {
peek_dist = t.len()
}
assert!(peek_dist > 0);
if peek_dist == 1 {
// if tie: prefer rightward motion, THIS IS A (temporarily acceptable) BUG
if t[0][last_index] > t[0][last_index + 1] {
path.push((t[0][last_index], last_index));
(
t[0][last_index] + running_sum,
first_step.unwrap_or(Dir::Left),
path,
)
} else {
path.push((t[0][last_index + 1], last_index + 1));
(
t[0][last_index + 1] + running_sum,
first_step.unwrap_or(Dir::Right),
path,
)
}
} else {
let mut p_left = path.clone();
p_left.push((t[0][last_index], last_index));
let left = peek_ahead_r(
&t[1..],
running_sum + t[0][last_index],
last_index,
peek_dist - 1,
first_step.clone().unwrap_or(Dir::Left).into(),
p_left,
);
let mut p_right = path.clone();
p_right.push((t[0][last_index + 1], last_index + 1));
let right = peek_ahead_r(
&t[1..],
running_sum + t[0][last_index + 1],
last_index + 1,
peek_dist - 1,
first_step.unwrap_or(Dir::Right).into(),
p_right,
);
if left.0 > right.0 {
left
} else {
right
}
}
}
// How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
#[timings]
fn e19() {
// Sundays are uniformly distributed, with P(first is Sunday) = 1/7.
// How many first of the months were there? 12*100
println!("{}", 12.0 * 100.0 / 7.0);
}
// Can't win em all. But when ya do~
#[timings]
fn e20() {
// Find the sum of the digits in the number 100!
// would expect the number of digits to be roughly equiv to 50^100, which has about 150 digits, though there will of course be many zeroes, about 24. Still, it seems probably best to just shove it in a bigint. Anything more creative? 97 multiplications (2..99). Some theorem may exist about the sum of digits of the product of two numbers, could search for it. Meh, thought for 5 minutes, do the bigint thing.
use num_bigint::BigUint;
// note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint.
let a = BigUint::new(vec![2]);
let a = (3..=99).fold(a, |acc, i| acc * (i as u32));
let res = a
.to_string()
.chars()
.fold(0, |acc, i| acc + i.to_digit(10).unwrap());
println!("{:?}", res);
}
fn main() {
e11();
e12();
e13();
//e14();
e14_zach_denton();
e15();
e16();
e17();
e18();
e19();
e20();
}
|
let triangle: Vec<Vec<usize>> = std::fs::read_to_string("src/e18.txt")
.unwrap()
.lines()
.map(|l| {
l.split_whitespace()
.into_iter()
.map(|n| n.parse::<usize>().unwrap())
.collect::<Vec<usize>>()
})
.collect();
let res = e18_less_naive_r(&triangle[1..], 75, 0);
println!("{}", res);
}
| identifier_body |
main.rs | // main struggle problems in this section were 11 and 18, and to some extent, 12 and 14. 17 was annoying to debug, but not hard.
extern crate timings_proc_macro;
use timings_proc_macro::timings;
#[timings]
fn e11() {
let s: Vec<usize> = std::fs::read_to_string("src/e11.txt")
.unwrap()
.split_whitespace()
.map(|n| n.parse::<usize>().unwrap())
.collect();
//println!("{:?}", s);
// could just run with s, but let's build our 2d array.
let mut v = [[0; 20]; 20];
(0..400).for_each(|i| v[i / 20][i % 20] = s[i]);
//println!("{:?}", v);
let mut big = 0;
use itertools::Itertools;
(0..20).cartesian_product(0..20).for_each(|(i, j)| {
if i < 17 {
// h_
let temp = v[i][j] * v[i + 1][j] * v[i + 2][j] * v[i + 3][j];
if temp > big {
// println!(
// "h_ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
if j < 17 {
// v|
let temp = v[i][j] * v[i][j + 1] * v[i][j + 2] * v[i][j + 3];
if temp > big {
// println!(
// "v| new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
if i < 17 && j < 17 {
// d\
let temp = v[i][j] * v[i + 1][j + 1] * v[i + 2][j + 2] * v[i + 3][j + 3];
if temp > big {
// println!(
// "d\\ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j],
// );
big = temp
}
}
if i < 17 && j > 2 {
// d/
let temp = v[i][j] * v[i + 1][j - 1] * v[i + 2][j - 2] * v[i + 3][j - 3];
if temp > big {
// println!(
// "d/ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
});
println!("biggest: {}", big);
}
// v elegant: https://github.com/zacharydenton/euler/blob/master/011/grid.rs
// 1. include_str!("grid.txt") I could be using this macro instead.
// 2. .filter_map(|n| n.parse().ok()), well isn't that sweet.
// 3. his solution collects the maximum value in each direction in an interesting way. Each element is k farther ahead than the current elem. h:1,v:20,d\:21,d/:19. This fails if the line crosses a boundary though.
// What is the value of the first triangle number to have over five hundred divisors?
#[timings]
fn e12() {
// entire problem is "count divisors". Naive soln sucks. Derive a soln.
// Proposition. given X = p_1^a * p_2^b * ...,
// N_factors(X) = (a+1)(b+1)....
// now we only need to find the algebraic multiplicity of each prime divisor.
let multiplicities = |input: usize| -> std::collections::HashMap<usize, usize> {
let mut h = std::collections::HashMap::new();
let mut n = input;
while n % 2 == 0 {
let counter = h.entry(2).or_insert(0);
*counter += 1;
n /= 2;
}
let mut i = 3;
while n > 1 {
while n % i == 0 {
let counter = h.entry(i).or_insert(0);
*counter += 1;
n /= i;
}
i += 2;
}
h
};
let mut i = 1;
let mut sum = 0;
loop {
sum += i;
i += 1;
let divisors = multiplicities(sum).values().fold(1, |acc, d| acc * (1 + d));
//dbg!(sum, divisors);
if divisors > 500 {
println!("value: {}, the {}th triangle number", sum, i);
break;
}
}
}
#[timings]
fn e13() {
let s: Vec<String> = std::fs::read_to_string("src/e13.txt")
.unwrap()
.split_whitespace()
.map(|s| s.parse::<String>().unwrap())
.collect();
let s13: Vec<usize> = s
.iter()
.map(|l| l[..13].parse::<usize>().unwrap())
.collect();
let n = s13.iter().sum::<usize>().to_string();
println!("e13: {}", &n[..10]);
}
#[allow(dead_code)]
fn collatz(n: usize) -> usize {
match n % 2 {
0 => n / 2,
1 => 3 * n + 1,
_ => unreachable!(),
}
}
#[timings]
fn e14() {
use std::collections::HashMap;
let mut h = HashMap::new();
h.insert(1, 0);
let mut it_counter = 0;
let mut biggest = (0, 0);
for it in 2..1_000_000 {
if h.contains_key(&it) {
continue;
}
// Build a cache of values til we find a value we have seen
let mut next = collatz(it);
it_counter += 1;
let mut cache: Vec<(usize, usize)> = vec![(it, it_counter)]; // 2: 1
while h.get(&next).is_none() {
it_counter += 1;
cache.push((next, it_counter));
next = collatz(next);
}
// the next value is now in the hashmap
let count_last = *h.get(&next).unwrap();
let count_for_it = count_last + it_counter;
//println!("it:{},count: {}", it, count_for_it);
for (n, c) in cache {
let count = count_for_it + 1 - c;
//println!("n:{},c: {}, count: {}", n, c, count);
h.insert(n, count);
}
it_counter = 0;
if count_for_it > biggest.0 {
biggest = (count_for_it, it);
}
}
println!("biggest seq len: {:?}, for n={:?}", biggest.0, biggest.1);
}
#[timings] //https://github.com/zacharydenton/euler/blob/master/014/collatz.rs
fn e14_zach_denton() {
let mut collatz: Vec<usize> = vec![0; 1_000_000];
collatz[1] = 1;
let max = (2..collatz.len())
.max_by_key(|&i| {
let f = |n: usize| match n % 2 {
0 => n / 2,
_ => n * 3 + 1,
};
// og:
let (mut j, mut len) = (i, 0);
loop {
// exit if:
if j < collatz.len() && collatz[j] != 0 {
break;
}
len += 1;
j = f(j);
}
len += collatz[j];
collatz[i] = len;
len
})
.unwrap();
println!("{}", max);
}
// How many such (only move left or down) routes are there through a 20×20 grid?
#[timings]
fn e15() {
// basic combinatorics. of 40 positions, choose 20. Equally, the 20th Catalan.
let a: u128 = (21..=40).product();
let b: u128 = (2..=20).product();
println!("{}", a / b);
}
#[timings]
fn e16() {
// mostly, futzing with bigint.
use num_bigint::BigUint;
// note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint.
let a = BigUint::new(vec![2]);
let b = a.pow(1000);
//println!("{:?}", b);
// TFAE:
//let res = b.to_string().chars().fold(0, |a, d| a + d.to_digit(10).unwrap());
let res: u32 = b.to_string().chars().map(|c| c.to_digit(10).unwrap()).sum();
println!("{:?}", res);
//let digits: num::BigInt = 2.pow(1000);
}
// If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
#[timings]
fn e17() {
let map = vec![
(0, 0),
(1, 3),
(2, 3),
(3, 5),
(4, 4),
(5, 4),
(6, 3),
(7, 5),
(8, 5),
(9, 4),
(10, 3),
(11, 6),
(12, 6),
(13, 8),
(14, 8),
(15, 7),
(16, 7),
(17, 9),
(18, 8),
(19, 8),
(20, 6),
(30, 6),
(40, 5),
(50, 5),
(60, 5),
(70, 7),
(80, 6),
(90, 6),
];
let h = std::collections::HashMap::from_iter(map.into_iter());
let res: usize = (1..=1000).fold(0, |acc, x| acc + count_letters(x, &h));
println!("{}", res);
}
fn count_letters(d: usize, h: &std::collections::HashMap<usize, usize>) -> usize {
let (a, b, c, e) = (d % 10, d / 10 % 10, d / 100 % 10, d / 1000 % 10);
let aa = if b == 1 { 0 } else { *h.get(&a).unwrap() };
let bb = if b == 1 {
*h.get(&(b * 10 + a)).unwrap()
} else {
*h.get(&(b * 10)).unwrap()
};
let mut cc = if c > 0 { 3 + 7 + h.get(&c).unwrap() } else { 0 }; // "and" counts apparently
if c > 0 && aa == 0 && bb == 0 {
cc -= 3 // 100 doesn't have an "and"
};
let ee = if e > 0 { 8 + h.get(&e).unwrap() } else { 0 };
//println!("{}:{},{},{},{}", d, ee, cc, bb, aa);
aa + bb + cc + ee
}
// first problem to be a bit of a challenge. I struggled picking a data structure and strategy for this one.
// A couple possible approaches occur:
// naive: at each step, pick the greatest next value
// brute: calculate the value of all 2^14 paths, not hard
// pruning: similar to brute, but if some sufficiently low sequence is included, exit early (optimization parameters: how often to prune, and what sufficiently low means)
// This problem begs to be solved recursively somehow.
#[timings]
fn e18() {
let triangle: Vec<Vec<usize>> = std::fs::read_to_string("src/e18.txt")
.unwrap()
.lines()
.map(|l| {
l.split_whitespace()
.into_iter()
.map(|n| n.parse::<usize>().unwrap())
.collect::<Vec<usize>>()
})
.collect();
let res = e18_less_naive_r(&triangle[1..], 75, 0);
println!("{}", res);
}
/// traverse the triangle picking the greatest value at the next binary choice
#[allow(dead_code)]
fn e18_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize {
if t.is_empty() {
running_sum
} else {
let (rs, li) = if t[0][last_index] > t[0][last_index + 1] {
(t[0][last_index], last_index)
} else {
(t[0][last_index + 1], last_index + 1)
};
println!("append:{},{}", rs, li);
e18_naive_r(&t[1..], running_sum + rs, li)
}
}
// 18 minutes to try naively. Now let's try a little harder.
// let's try something with look ahead.
const PEEK_DIST: usize = 5;
/// traverse the triangle picking the greatest single step-PEEK_DIST-chain at each next binary choice
fn e18_less_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize {
if t.is_empty() {
running_sum
} else {
// need to peek here
let (_, dir, _path) = peek_ahead_r(t, running_sum, last_index, PEEK_DIST, None, vec![]);
let (val, ind) = match dir {
Dir::Left => (t[0][last_index], last_index),
Dir::Right => (t[0][last_index + 1], last_index + 1),
};
//println!("append val:{}, ind:{}, path:{:?}", val, ind, _path);
e18_less_naive_r(&t[1..], running_sum + val, ind)
}
}
// if looking ahead 1 step, terminate, returning (running_sum, LEFT|RIGHT)
#[derive(Clone, Debug)]
enum Dir {
Left,
Right,
}
fn peek_ahead_r(
t: &[Vec<usize>],
running_sum: usize,
last_index: usize,
mut peek_dist: usize,
first_step: Option<Dir>,
/* debugging */ mut path: Vec<(usize, usize)>,
) -> (usize /* value */, Dir, Vec<(usize, usize)>) {
if peek_dist > t.len() {
peek_dist = t.len()
}
assert!(peek_dist > 0);
if peek_dist == 1 { | else {
let mut p_left = path.clone();
p_left.push((t[0][last_index], last_index));
let left = peek_ahead_r(
&t[1..],
running_sum + t[0][last_index],
last_index,
peek_dist - 1,
first_step.clone().unwrap_or(Dir::Left).into(),
p_left,
);
let mut p_right = path.clone();
p_right.push((t[0][last_index + 1], last_index + 1));
let right = peek_ahead_r(
&t[1..],
running_sum + t[0][last_index + 1],
last_index + 1,
peek_dist - 1,
first_step.unwrap_or(Dir::Right).into(),
p_right,
);
if left.0 > right.0 {
left
} else {
right
}
}
}
// How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
#[timings]
fn e19() {
// Sundays are uniformly distributed, with P(first is Sunday) = 1/7.
// How many first of the months were there? 12*100
println!("{}", 12.0 * 100.0 / 7.0);
}
// Can't win em all. But when ya do~
#[timings]
fn e20() {
// Find the sum of the digits in the number 100!
// would expect the number of digits to be roughly equiv to 50^100, which has about 150 digits, though there will of course be many zeroes, about 24. Still, it seems probably best to just shove it in a bigint. Anything more creative? 97 multiplications (2..99). Some theorem may exist about the sum of digits of the product of two numbers, could search for it. Meh, thought for 5 minutes, do the bigint thing.
use num_bigint::BigUint;
// note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint.
let a = BigUint::new(vec![2]);
let a = (3..=99).fold(a, |acc, i| acc * (i as u32));
let res = a
.to_string()
.chars()
.fold(0, |acc, i| acc + i.to_digit(10).unwrap());
println!("{:?}", res);
}
fn main() {
e11();
e12();
e13();
//e14();
e14_zach_denton();
e15();
e16();
e17();
e18();
e19();
e20();
}
|
// if tie: prefer rightward motion, THIS IS A (temporarily acceptable) BUG
if t[0][last_index] > t[0][last_index + 1] {
path.push((t[0][last_index], last_index));
(
t[0][last_index] + running_sum,
first_step.unwrap_or(Dir::Left),
path,
)
} else {
path.push((t[0][last_index + 1], last_index + 1));
(
t[0][last_index + 1] + running_sum,
first_step.unwrap_or(Dir::Right),
path,
)
}
} | conditional_block |
main.rs | // main struggle problems in this section were 11 and 18, and to some extent, 12 and 14. 17 was annoying to debug, but not hard.
extern crate timings_proc_macro;
use timings_proc_macro::timings;
#[timings]
fn e11() {
let s: Vec<usize> = std::fs::read_to_string("src/e11.txt")
.unwrap()
.split_whitespace()
.map(|n| n.parse::<usize>().unwrap())
.collect();
//println!("{:?}", s);
// could just run with s, but let's build our 2d array.
let mut v = [[0; 20]; 20];
(0..400).for_each(|i| v[i / 20][i % 20] = s[i]);
//println!("{:?}", v);
let mut big = 0;
use itertools::Itertools;
(0..20).cartesian_product(0..20).for_each(|(i, j)| {
if i < 17 {
// h_
let temp = v[i][j] * v[i + 1][j] * v[i + 2][j] * v[i + 3][j];
if temp > big {
// println!(
// "h_ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
if j < 17 {
// v|
let temp = v[i][j] * v[i][j + 1] * v[i][j + 2] * v[i][j + 3];
if temp > big {
// println!(
// "v| new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
if i < 17 && j < 17 {
// d\
let temp = v[i][j] * v[i + 1][j + 1] * v[i + 2][j + 2] * v[i + 3][j + 3];
if temp > big {
// println!(
// "d\\ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j],
// );
big = temp
}
}
if i < 17 && j > 2 {
// d/
let temp = v[i][j] * v[i + 1][j - 1] * v[i + 2][j - 2] * v[i + 3][j - 3];
if temp > big {
// println!(
// "d/ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
});
println!("biggest: {}", big);
}
// v elegant: https://github.com/zacharydenton/euler/blob/master/011/grid.rs
// 1. include_str!("grid.txt") I could be using this macro instead.
// 2. .filter_map(|n| n.parse().ok()), well isn't that sweet.
// 3. his solution collects the maximum value in each direction in an interesting way. Each element is k farther ahead than the current elem. h:1,v:20,d\:21,d/:19. This fails if the line crosses a boundary though.
// What is the value of the first triangle number to have over five hundred divisors?
#[timings]
fn e12() {
// entire problem is "count divisors". Naive soln sucks. Derive a soln.
// Proposition. given X = p_1^a * p_2^b * ...,
// N_factors(X) = (a+1)(b+1)....
// now we only need to find the algebraic multiplicity of each prime divisor.
let multiplicities = |input: usize| -> std::collections::HashMap<usize, usize> {
let mut h = std::collections::HashMap::new();
let mut n = input;
while n % 2 == 0 {
let counter = h.entry(2).or_insert(0);
*counter += 1;
n /= 2;
}
let mut i = 3;
while n > 1 {
while n % i == 0 {
let counter = h.entry(i).or_insert(0);
*counter += 1;
n /= i;
}
i += 2;
}
h
};
let mut i = 1;
let mut sum = 0;
loop {
sum += i;
i += 1;
let divisors = multiplicities(sum).values().fold(1, |acc, d| acc * (1 + d));
//dbg!(sum, divisors);
if divisors > 500 {
println!("value: {}, the {}th triangle number", sum, i);
break; | }
}
}
#[timings]
fn e13() {
let s: Vec<String> = std::fs::read_to_string("src/e13.txt")
.unwrap()
.split_whitespace()
.map(|s| s.parse::<String>().unwrap())
.collect();
let s13: Vec<usize> = s
.iter()
.map(|l| l[..13].parse::<usize>().unwrap())
.collect();
let n = s13.iter().sum::<usize>().to_string();
println!("e13: {}", &n[..10]);
}
#[allow(dead_code)]
fn collatz(n: usize) -> usize {
match n % 2 {
0 => n / 2,
1 => 3 * n + 1,
_ => unreachable!(),
}
}
#[timings]
fn e14() {
use std::collections::HashMap;
let mut h = HashMap::new();
h.insert(1, 0);
let mut it_counter = 0;
let mut biggest = (0, 0);
for it in 2..1_000_000 {
if h.contains_key(&it) {
continue;
}
// Build a cache of values til we find a value we have seen
let mut next = collatz(it);
it_counter += 1;
let mut cache: Vec<(usize, usize)> = vec![(it, it_counter)]; // 2: 1
while h.get(&next).is_none() {
it_counter += 1;
cache.push((next, it_counter));
next = collatz(next);
}
// the next value is now in the hashmap
let count_last = *h.get(&next).unwrap();
let count_for_it = count_last + it_counter;
//println!("it:{},count: {}", it, count_for_it);
for (n, c) in cache {
let count = count_for_it + 1 - c;
//println!("n:{},c: {}, count: {}", n, c, count);
h.insert(n, count);
}
it_counter = 0;
if count_for_it > biggest.0 {
biggest = (count_for_it, it);
}
}
println!("biggest seq len: {:?}, for n={:?}", biggest.0, biggest.1);
}
#[timings] //https://github.com/zacharydenton/euler/blob/master/014/collatz.rs
fn e14_zach_denton() {
let mut collatz: Vec<usize> = vec![0; 1_000_000];
collatz[1] = 1;
let max = (2..collatz.len())
.max_by_key(|&i| {
let f = |n: usize| match n % 2 {
0 => n / 2,
_ => n * 3 + 1,
};
// og:
let (mut j, mut len) = (i, 0);
loop {
// exit if:
if j < collatz.len() && collatz[j] != 0 {
break;
}
len += 1;
j = f(j);
}
len += collatz[j];
collatz[i] = len;
len
})
.unwrap();
println!("{}", max);
}
// How many such (only move left or down) routes are there through a 20×20 grid?
#[timings]
fn e15() {
// basic combinatorics. of 40 positions, choose 20. Equally, the 20th Catalan.
let a: u128 = (21..=40).product();
let b: u128 = (2..=20).product();
println!("{}", a / b);
}
#[timings]
fn e16() {
// mostly, futzing with bigint.
use num_bigint::BigUint;
// note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint.
let a = BigUint::new(vec![2]);
let b = a.pow(1000);
//println!("{:?}", b);
// TFAE:
//let res = b.to_string().chars().fold(0, |a, d| a + d.to_digit(10).unwrap());
let res: u32 = b.to_string().chars().map(|c| c.to_digit(10).unwrap()).sum();
println!("{:?}", res);
//let digits: num::BigInt = 2.pow(1000);
}
// If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
#[timings]
fn e17() {
let map = vec![
(0, 0),
(1, 3),
(2, 3),
(3, 5),
(4, 4),
(5, 4),
(6, 3),
(7, 5),
(8, 5),
(9, 4),
(10, 3),
(11, 6),
(12, 6),
(13, 8),
(14, 8),
(15, 7),
(16, 7),
(17, 9),
(18, 8),
(19, 8),
(20, 6),
(30, 6),
(40, 5),
(50, 5),
(60, 5),
(70, 7),
(80, 6),
(90, 6),
];
let h = std::collections::HashMap::from_iter(map.into_iter());
let res: usize = (1..=1000).fold(0, |acc, x| acc + count_letters(x, &h));
println!("{}", res);
}
fn count_letters(d: usize, h: &std::collections::HashMap<usize, usize>) -> usize {
let (a, b, c, e) = (d % 10, d / 10 % 10, d / 100 % 10, d / 1000 % 10);
let aa = if b == 1 { 0 } else { *h.get(&a).unwrap() };
let bb = if b == 1 {
*h.get(&(b * 10 + a)).unwrap()
} else {
*h.get(&(b * 10)).unwrap()
};
let mut cc = if c > 0 { 3 + 7 + h.get(&c).unwrap() } else { 0 }; // "and" counts apparently
if c > 0 && aa == 0 && bb == 0 {
cc -= 3 // 100 doesn't have an "and"
};
let ee = if e > 0 { 8 + h.get(&e).unwrap() } else { 0 };
//println!("{}:{},{},{},{}", d, ee, cc, bb, aa);
aa + bb + cc + ee
}
// first problem to be a bit of a challenge. I struggled picking a data structure and strategy for this one.
// A couple possible approaches occur:
// naive: at each step, pick the greatest next value
// brute: calculate the value of all 2^14 paths, not hard
// pruning: similar to brute, but if some sufficiently low sequence is included, exit early (optimization parameters: how often to prune, and what sufficiently low means)
// This problem begs to be solved recursively somehow.
#[timings]
fn e18() {
let triangle: Vec<Vec<usize>> = std::fs::read_to_string("src/e18.txt")
.unwrap()
.lines()
.map(|l| {
l.split_whitespace()
.into_iter()
.map(|n| n.parse::<usize>().unwrap())
.collect::<Vec<usize>>()
})
.collect();
let res = e18_less_naive_r(&triangle[1..], 75, 0);
println!("{}", res);
}
/// traverse the triangle picking the greatest value at the next binary choice
#[allow(dead_code)]
fn e18_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize {
if t.is_empty() {
running_sum
} else {
let (rs, li) = if t[0][last_index] > t[0][last_index + 1] {
(t[0][last_index], last_index)
} else {
(t[0][last_index + 1], last_index + 1)
};
println!("append:{},{}", rs, li);
e18_naive_r(&t[1..], running_sum + rs, li)
}
}
// 18 minutes to try naively. Now let's try a little harder.
// let's try something with look ahead.
const PEEK_DIST: usize = 5;
/// traverse the triangle picking the greatest single step-PEEK_DIST-chain at each next binary choice
fn e18_less_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize {
if t.is_empty() {
running_sum
} else {
// need to peek here
let (_, dir, _path) = peek_ahead_r(t, running_sum, last_index, PEEK_DIST, None, vec![]);
let (val, ind) = match dir {
Dir::Left => (t[0][last_index], last_index),
Dir::Right => (t[0][last_index + 1], last_index + 1),
};
//println!("append val:{}, ind:{}, path:{:?}", val, ind, _path);
e18_less_naive_r(&t[1..], running_sum + val, ind)
}
}
// if looking ahead 1 step, terminate, returning (running_sum, LEFT|RIGHT)
#[derive(Clone, Debug)]
enum Dir {
Left,
Right,
}
fn peek_ahead_r(
t: &[Vec<usize>],
running_sum: usize,
last_index: usize,
mut peek_dist: usize,
first_step: Option<Dir>,
/* debugging */ mut path: Vec<(usize, usize)>,
) -> (usize /* value */, Dir, Vec<(usize, usize)>) {
if peek_dist > t.len() {
peek_dist = t.len()
}
assert!(peek_dist > 0);
if peek_dist == 1 {
// if tie: prefer rightward motion, THIS IS A (temporarily acceptable) BUG
if t[0][last_index] > t[0][last_index + 1] {
path.push((t[0][last_index], last_index));
(
t[0][last_index] + running_sum,
first_step.unwrap_or(Dir::Left),
path,
)
} else {
path.push((t[0][last_index + 1], last_index + 1));
(
t[0][last_index + 1] + running_sum,
first_step.unwrap_or(Dir::Right),
path,
)
}
} else {
let mut p_left = path.clone();
p_left.push((t[0][last_index], last_index));
let left = peek_ahead_r(
&t[1..],
running_sum + t[0][last_index],
last_index,
peek_dist - 1,
first_step.clone().unwrap_or(Dir::Left).into(),
p_left,
);
let mut p_right = path.clone();
p_right.push((t[0][last_index + 1], last_index + 1));
let right = peek_ahead_r(
&t[1..],
running_sum + t[0][last_index + 1],
last_index + 1,
peek_dist - 1,
first_step.unwrap_or(Dir::Right).into(),
p_right,
);
if left.0 > right.0 {
left
} else {
right
}
}
}
// How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
#[timings]
fn e19() {
// Sundays are uniformly distributed, with P(first is Sunday) = 1/7.
// How many first of the months were there? 12*100
println!("{}", 12.0 * 100.0 / 7.0);
}
// Can't win em all. But when ya do~
#[timings]
fn e20() {
// Find the sum of the digits in the number 100!
// would expect the number of digits to be roughly equiv to 50^100, which has about 150 digits, though there will of course be many zeroes, about 24. Still, it seems probably best to just shove it in a bigint. Anything more creative? 97 multiplications (2..99). Some theorem may exist about the sum of digits of the product of two numbers, could search for it. Meh, thought for 5 minutes, do the bigint thing.
use num_bigint::BigUint;
// note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint.
let a = BigUint::new(vec![2]);
let a = (3..=99).fold(a, |acc, i| acc * (i as u32));
let res = a
.to_string()
.chars()
.fold(0, |acc, i| acc + i.to_digit(10).unwrap());
println!("{:?}", res);
}
fn main() {
e11();
e12();
e13();
//e14();
e14_zach_denton();
e15();
e16();
e17();
e18();
e19();
e20();
} | random_line_split | |
common.js | /*
* Module with common functions from all modules
* */
voiceBase = (function(VB, $) {
"use strict";
if (!Object.keys) {
Object.keys = (function() {
var hasOwnProperty = Object.prototype.hasOwnProperty,
hasDontEnumBug = !({toString: null}).propertyIsEnumerable('toString'),
dontEnums = [
'toString',
'toLocaleString',
'valueOf',
'hasOwnProperty',
'isPrototypeOf',
'propertyIsEnumerable',
'constructor'
],
dontEnumsLength = dontEnums.length;
return function(obj) {
if (typeof obj !== 'object' && (typeof obj !== 'function' || obj === null)) {
throw new TypeError('Object.keys called on non-object');
}
var result = [], prop, i;
for (prop in obj) {
if (hasOwnProperty.call(obj, prop)) {
result.push(prop);
}
}
if (hasDontEnumBug) {
for (i = 0; i < dontEnumsLength; i++) {
if (hasOwnProperty.call(obj, dontEnums[i])) {
result.push(dontEnums[i]);
}
}
}
return result;
};
}());
}
String.prototype.padLeft = function(total) {
return new Array(total - this.length + 1).join('0') + this;
};
// Extends
jQuery.extend(jQuery.expr[':'], {
"wordtime": function(element, i, match, elements) {
var value = parseFloat($(element).attr('t'));
var minMaxValues = match[3].split(/\s?,\s?/);
var minValue = parseFloat(minMaxValues[0]);
var maxValue = parseFloat(minMaxValues[1]);
return !isNaN(value) && !isNaN(minValue) && !isNaN(maxValue) && value <= maxValue && value >= minValue;
}
});
VB.common = {
getStringFromObject: function (obj) {
return Object.keys(obj).map(function (key) {
return encodeURIComponent(key) + '=' + encodeURIComponent(obj[key]);
}).join('&');
},
inArrayV: function(sarray, needle) {
for (var iss in sarray) {
if (sarray[iss] == needle)
return true;
}
return false;
},
findTermInArray: function (words, term) {
var isFind = false;
for (var i = 0; i < words.length; i++) {
var word = words[i];
word = VB.helper.replaceTrimAndLower(word);
if(word === term) {
isFind = true;
break;
}
}
return isFind;
},
keysToLowerCase: function(obj) {
var keys = Object.keys(obj);
var n = keys.length;
while (n--) {
var key = keys[n]; // "cache" it, for less lookups to the array
if (key !== key.toLowerCase()) { // might already be in its lower case version
obj[key.toLowerCase()] = obj[key]; // swap the value to a new lower case key
delete obj[key]; // delete the old key
}
}
return (obj);
},
vbmenus: function(event, type, elem) {
var copy = typeof $.fn.zclip !== 'undefined';
var share = typeof addthis !== 'undefined';
if (copy === false && share === false && VB.settings.editKeywords === false || VB.settings.contextMenu === false && type !== 'keyword') {
return false;
}
event.preventDefault();
var newparam = {};
var kwGroup = $(elem).parents('ul').hasClass('group');
if (type === 'timeline') {
var played;
if (event.target.localName == 'ins') {
played = $(event.target).parent().attr('stime');
} else {
var x = (event.offsetX || event.clientX - $(event.target).offset().left);
played = Math.round(VB.data.duration * (x + event.target.offsetLeft) / VB.helper.find(".vbs-record-timeline-wrap").width());
}
newparam['vbt'] = played;
var $voice_search_txt = $('#vbs-voice_search_txt');
if ($voice_search_txt.val().length) {
newparam['vbs'] = encodeURI($voice_search_txt.val());
}
} else if (type == 'keyword') {
var keyword = $(elem).data("keywordInternalName");
if (keyword.match(/\s+/g)) {
keyword = '"' + keyword + '"';
}
newparam['vbs'] = encodeURI(keyword);
} else if (type == 'transcript') {
var transcript = $(elem).text();
transcript = encodeURI(transcript);
newparam['vbs'] = transcript;
}
$("ul.vbs-vbcmenu").remove();
var url = VB.helper.getNewUrl(newparam);
var menu = $("<ul class='vbs-vbcmenu'></ul>");
if (copy && VB.settings.contextMenu) {
menu.append('<li id="vbc_url"><a href="#">Copy URL</a></li>');
}
if (share && VB.settings.contextMenu) {
menu.append('<li id="vbc_share"><a class="addthis_button_expanded addthis_default_style" addthis:url="' + url + '" addthis:title="Check out">Share</a></li>');
}
if (type == 'keyword' && VB.settings.editKeywords && !kwGroup) {
var $elem = $(elem);
var editmenu = '<span class="vbs-keyword_controls">';
if ($elem.parent().prev().length) {
editmenu += '<span class="vbs-vbe vbs-voicebase_first" title="Move to Top">Move to Top</span>' +
'<span class="vbs-vbe vbs-voicebase_up" title="Move up">Move up</span>';
}
if ($elem.parent().next().length) {
editmenu += '<span class="vbs-vbe vbs-voicebase_down" title="Move down">Move down</span>';
}
editmenu += '<span class="vbs-vbe vbs-voicebase_remove" title="Remove">Remove</span>' +
'</span>';
var $editmenu = $(editmenu);
$editmenu.data('keywordInternalName', $elem.data("keywordInternalName"));
var $li = $('<li id="vbc_move"></li>');
$li.append($editmenu);
menu.append($li);
}
menu.appendTo("body");
var $menu = $('.vbs-vbcmenu');
var pos = VB.view.getPositionElementForTooltip($(elem));
if($menu.height() + event.pageY < document.body.clientHeight){
$menu.css({
top: pos.top + $(elem).height() + "px",
left: pos.left + pos.width / 2 + "px"
});
}
else{
$menu.css({
top: (pos.top - $(elem).height() - $menu.height()) + "px",
left: pos.left + pos.width / 2 + "px"
});
}
if (copy) |
if (share) {
addthis.toolbox("#vbc_share");
}
},
vbEditMenu: function(event, elem) {
var $this = $(elem);
var $editWrapper = $('.vbs-edit-mode-wrapper');
$("ul.vbs-vbcmenu").remove();
$editWrapper.find('.vbs-menu-target').removeClass('vbs-menu-target');
$this.addClass('vbs-menu-target');
var stime = $this.attr('t') / 1000;
var stimep = stime > 1 ? stime - 1 : stime;
var menu = '';
menu += '<li><a href="#" class="vbsc-edit-play" data-time="'+ stimep +'">Play</a></li>';
if(!$this.hasClass('vbs-edit-speaker') && !$this.prev().hasClass('vbs-edit-speaker')){
menu += '<li><a href="#" class="vbsc-edit-speaker" data-time="'+ stime * 1000 +'">Insert Speaker</a></li>';
}
if($this.hasClass('vbs-edit-speaker')){
var speakerKey = $this.attr('m') || '';
menu += '<li><a href="#" class="vbsc-rename-speaker" data-speaker-key="'+ speakerKey +'">Rename Speaker</a></li>';
}
$editWrapper.append("<ul class='vbs-vbcmenu'>" + menu + "</ul>");
var $menu = $('.vbs-vbcmenu');
var coordY = event.clientY + $editWrapper.scrollTop();
if($menu.height() + event.clientY < document.body.clientHeight){
$menu.css({
top: coordY + "px",
left: event.pageX + "px"
});
}
else{
if($(elem).find('br').length > 0) {
coordY += 15 * $(elem).find('br').length;
}
$menu.css({
top: (coordY - $menu.height() - $this.height()) + "px",
left: event.pageX + "px"
});
}
},
uniqueArray: function(array){
array = array ? array : [];
var unique_array = {};
for (var i = 0; i < array.length; i++) {
unique_array[array[i]] = true;
}
array = Object.keys(unique_array);
return array;
},
hidePopup: function($popup){
$popup.fadeOut('fast', function(){
$(this).remove();
});
},
unEscapeHtml: function(phrase) {
return phrase
.replace(/>/g,'>')
.replace(/</g,'<')
.replace(/"/g,'"')
.replace(/&lt;/g, "<")
.replace(/&gt;/g, ">");
}
};
return VB;
})(voiceBase, jQuery); | {
$("#vbc_url").find('a').zclip({
path: VB.settings.zeroclipboard,
copy: url
});
} | conditional_block |
common.js | /*
* Module with common functions from all modules
* */
voiceBase = (function(VB, $) {
"use strict";
if (!Object.keys) {
Object.keys = (function() {
var hasOwnProperty = Object.prototype.hasOwnProperty,
hasDontEnumBug = !({toString: null}).propertyIsEnumerable('toString'),
dontEnums = [
'toString',
'toLocaleString',
'valueOf',
'hasOwnProperty',
'isPrototypeOf',
'propertyIsEnumerable',
'constructor'
],
dontEnumsLength = dontEnums.length;
return function(obj) {
if (typeof obj !== 'object' && (typeof obj !== 'function' || obj === null)) {
throw new TypeError('Object.keys called on non-object');
}
var result = [], prop, i;
for (prop in obj) {
if (hasOwnProperty.call(obj, prop)) {
result.push(prop);
}
}
if (hasDontEnumBug) {
for (i = 0; i < dontEnumsLength; i++) {
if (hasOwnProperty.call(obj, dontEnums[i])) {
result.push(dontEnums[i]);
}
}
}
return result;
};
}());
}
String.prototype.padLeft = function(total) {
return new Array(total - this.length + 1).join('0') + this;
};
// Extends
jQuery.extend(jQuery.expr[':'], {
"wordtime": function(element, i, match, elements) {
var value = parseFloat($(element).attr('t'));
var minMaxValues = match[3].split(/\s?,\s?/);
var minValue = parseFloat(minMaxValues[0]);
var maxValue = parseFloat(minMaxValues[1]);
return !isNaN(value) && !isNaN(minValue) && !isNaN(maxValue) && value <= maxValue && value >= minValue;
}
});
VB.common = {
getStringFromObject: function (obj) {
return Object.keys(obj).map(function (key) {
return encodeURIComponent(key) + '=' + encodeURIComponent(obj[key]);
}).join('&');
},
inArrayV: function(sarray, needle) {
for (var iss in sarray) {
if (sarray[iss] == needle)
return true;
}
return false;
},
findTermInArray: function (words, term) {
var isFind = false;
for (var i = 0; i < words.length; i++) {
var word = words[i];
word = VB.helper.replaceTrimAndLower(word);
if(word === term) {
isFind = true;
break;
}
}
return isFind;
},
keysToLowerCase: function(obj) {
var keys = Object.keys(obj);
var n = keys.length;
while (n--) {
var key = keys[n]; // "cache" it, for less lookups to the array
if (key !== key.toLowerCase()) { // might already be in its lower case version
obj[key.toLowerCase()] = obj[key]; // swap the value to a new lower case key
delete obj[key]; // delete the old key
}
}
return (obj);
},
vbmenus: function(event, type, elem) {
var copy = typeof $.fn.zclip !== 'undefined';
var share = typeof addthis !== 'undefined';
if (copy === false && share === false && VB.settings.editKeywords === false || VB.settings.contextMenu === false && type !== 'keyword') {
return false;
}
event.preventDefault();
var newparam = {};
var kwGroup = $(elem).parents('ul').hasClass('group');
if (type === 'timeline') {
var played;
if (event.target.localName == 'ins') {
played = $(event.target).parent().attr('stime');
} else {
var x = (event.offsetX || event.clientX - $(event.target).offset().left);
played = Math.round(VB.data.duration * (x + event.target.offsetLeft) / VB.helper.find(".vbs-record-timeline-wrap").width());
}
newparam['vbt'] = played;
var $voice_search_txt = $('#vbs-voice_search_txt');
if ($voice_search_txt.val().length) {
newparam['vbs'] = encodeURI($voice_search_txt.val());
}
} else if (type == 'keyword') {
var keyword = $(elem).data("keywordInternalName");
if (keyword.match(/\s+/g)) {
keyword = '"' + keyword + '"';
}
newparam['vbs'] = encodeURI(keyword);
} else if (type == 'transcript') {
var transcript = $(elem).text();
transcript = encodeURI(transcript);
newparam['vbs'] = transcript;
}
$("ul.vbs-vbcmenu").remove();
var url = VB.helper.getNewUrl(newparam);
var menu = $("<ul class='vbs-vbcmenu'></ul>");
if (copy && VB.settings.contextMenu) {
menu.append('<li id="vbc_url"><a href="#">Copy URL</a></li>');
}
if (share && VB.settings.contextMenu) {
menu.append('<li id="vbc_share"><a class="addthis_button_expanded addthis_default_style" addthis:url="' + url + '" addthis:title="Check out">Share</a></li>');
}
if (type == 'keyword' && VB.settings.editKeywords && !kwGroup) {
var $elem = $(elem);
var editmenu = '<span class="vbs-keyword_controls">';
if ($elem.parent().prev().length) {
editmenu += '<span class="vbs-vbe vbs-voicebase_first" title="Move to Top">Move to Top</span>' +
'<span class="vbs-vbe vbs-voicebase_up" title="Move up">Move up</span>';
}
if ($elem.parent().next().length) {
editmenu += '<span class="vbs-vbe vbs-voicebase_down" title="Move down">Move down</span>';
}
editmenu += '<span class="vbs-vbe vbs-voicebase_remove" title="Remove">Remove</span>' +
'</span>';
var $editmenu = $(editmenu);
$editmenu.data('keywordInternalName', $elem.data("keywordInternalName"));
var $li = $('<li id="vbc_move"></li>');
$li.append($editmenu);
menu.append($li);
}
menu.appendTo("body");
var $menu = $('.vbs-vbcmenu');
var pos = VB.view.getPositionElementForTooltip($(elem));
if($menu.height() + event.pageY < document.body.clientHeight){
$menu.css({
top: pos.top + $(elem).height() + "px",
left: pos.left + pos.width / 2 + "px"
});
}
else{
$menu.css({
top: (pos.top - $(elem).height() - $menu.height()) + "px",
left: pos.left + pos.width / 2 + "px"
});
}
if (copy) {
$("#vbc_url").find('a').zclip({
path: VB.settings.zeroclipboard,
copy: url
});
}
if (share) {
addthis.toolbox("#vbc_share");
}
},
vbEditMenu: function(event, elem) {
var $this = $(elem);
var $editWrapper = $('.vbs-edit-mode-wrapper');
$("ul.vbs-vbcmenu").remove();
$editWrapper.find('.vbs-menu-target').removeClass('vbs-menu-target');
$this.addClass('vbs-menu-target');
var stime = $this.attr('t') / 1000;
var stimep = stime > 1 ? stime - 1 : stime;
var menu = '';
menu += '<li><a href="#" class="vbsc-edit-play" data-time="'+ stimep +'">Play</a></li>';
if(!$this.hasClass('vbs-edit-speaker') && !$this.prev().hasClass('vbs-edit-speaker')){
menu += '<li><a href="#" class="vbsc-edit-speaker" data-time="'+ stime * 1000 +'">Insert Speaker</a></li>';
}
if($this.hasClass('vbs-edit-speaker')){
var speakerKey = $this.attr('m') || '';
menu += '<li><a href="#" class="vbsc-rename-speaker" data-speaker-key="'+ speakerKey +'">Rename Speaker</a></li>';
}
$editWrapper.append("<ul class='vbs-vbcmenu'>" + menu + "</ul>");
var $menu = $('.vbs-vbcmenu');
var coordY = event.clientY + $editWrapper.scrollTop();
if($menu.height() + event.clientY < document.body.clientHeight){
$menu.css({
top: coordY + "px",
left: event.pageX + "px"
});
}
else{
if($(elem).find('br').length > 0) {
coordY += 15 * $(elem).find('br').length;
}
$menu.css({
top: (coordY - $menu.height() - $this.height()) + "px",
left: event.pageX + "px"
});
}
},
uniqueArray: function(array){
array = array ? array : []; | array = Object.keys(unique_array);
return array;
},
hidePopup: function($popup){
$popup.fadeOut('fast', function(){
$(this).remove();
});
},
unEscapeHtml: function(phrase) {
return phrase
.replace(/>/g,'>')
.replace(/</g,'<')
.replace(/"/g,'"')
.replace(/&lt;/g, "<")
.replace(/&gt;/g, ">");
}
};
return VB;
})(voiceBase, jQuery); | var unique_array = {};
for (var i = 0; i < array.length; i++) {
unique_array[array[i]] = true;
} | random_line_split |
japari-bun-catch.js | import {
APP_WIDTH, APP_HEIGHT, TILE_SIZE, DIRECTIONS,
TIME_BETWEEN_BUNS, ROWS_FOR_BUNS, COLUMNS_FOR_BUNS,
STARTING_LIVES, MINIMUM_PAUSE_DURATION,
FONT_FAMILY,
} from './constants'
import { fillTextWithShadow } from './utility'
import ImageAsset from './image-asset'
import LuckyBeast from './entities/lucky-beast'
import Friend from './entities/friend'
import Bun from './entities/bun'
const searchParams = new URLSearchParams(window.location.search)
const DEBUG = searchParams.get('debug') || false
class JapariBunCatch {
constructor () {
this.html = {
main: document.getElementById('main'),
canvas: document.getElementById('canvas'),
menu: document.getElementById('menu'),
buttonHome: document.getElementById('button-home'),
buttonReload: document.getElementById('button-reload'),
buttonLeft: document.getElementById('button-left'),
buttonRight: document.getElementById('button-right'),
}
this.canvas2d = this.html.canvas.getContext('2d')
this.canvasWidth = APP_WIDTH
this.canvasHeight = APP_HEIGHT
this.menu = false
this.setMenu(false)
this.setupUI()
this.initialised = false
this.assets = {
background: new ImageAsset('assets/background.jpg'),
basket: new ImageAsset('assets/basket.png'),
bun: new ImageAsset('assets/bun.png'),
friends: new ImageAsset('assets/friends.png'),
luckyBeast: new ImageAsset('assets/lucky-beast.png'),
}
this.luckyBeast = null
this.friend = null
this.entities = []
this.lives = 0
this.score = 0
this.difficulty = 0
this.timeToNextBun = 0
this.paused = false // Game is paused when a bun drops to the floor. Pausing due to the menu being open is dictated by this.menu
this.pauseTimer = 0 // When the game is paused, it stays paused for a short amount of time.
this.prevTime = null
this.nextFrame = window.requestAnimationFrame(this.main.bind(this))
}
initialisationCheck () {
// Assets check
let allAssetsLoaded = true
let numLoadedAssets = 0
let numTotalAssets = 0
Object.keys(this.assets).forEach((id) => {
const asset = this.assets[id]
allAssetsLoaded = allAssetsLoaded && asset.loaded
if (asset.loaded) numLoadedAssets++
numTotalAssets++
})
// Paint status
this.canvas2d.clearRect(0, 0, this.canvasWidth, this.canvasHeight)
this.canvas2d.textAlign = 'start'
this.canvas2d.textBaseline = 'top'
this.canvas2d.fillStyle = '#ccc'
this.canvas2d.font = `1em ${FONT_FAMILY}`
this.canvas2d.fillText(`Loading ${numLoadedAssets} / ${numTotalAssets} `, TILE_SIZE, TILE_SIZE)
if (allAssetsLoaded) {
this.initialised = true
this.showUI()
this.startGame()
}
}
/*
Section: General Logic
----------------------------------------------------------------------------
*/
main (time) {
const timeStep = (this.prevTime) ? time - this.prevTime : time
this.prevTime = time
if (this.initialised) {
this.play(timeStep)
this.paint()
} else {
this.initialisationCheck()
}
this.nextFrame = window.requestAnimationFrame(this.main.bind(this))
}
play (timeStep) {
// If the menu is open, pause all action gameplay
if (this.menu) return
// If game is paused (as a result of losing a life), pause all action gameplay, of course
if (this.paused) {
this.pauseTimer = Math.max(0, this.pauseTimer - timeStep)
return
}
// Run entity logic
this.entities.forEach(entity => entity.play(timeStep))
// Spawn a new bun
this.timeToNextBun -= timeStep
if (this.timeToNextBun <= 0) |
// Cleanup
this.entities = this.entities.filter(entity => !entity._expired)
}
paint () {
const c2d = this.canvas2d
c2d.clearRect(0, 0, this.canvasWidth, this.canvasHeight)
// ----------------
// Draw background
// ----------------
if (this.assets.background) {
const BACKGROUND_SIZE_X = 800
const BACKGROUND_SIZE_Y = 500
c2d.drawImage(this.assets.background.img, 0, 0, BACKGROUND_SIZE_X, BACKGROUND_SIZE_Y, 0, 0, APP_WIDTH, APP_HEIGHT)
}
// ----------------
// Draw grid
// ----------------
/*
c2d.strokeStyle = 'rgba(128, 128, 128, 0.05)'
c2d.lineWidth = 2
const offsetX = 0
const offsetY = 0
for (let y = offsetY ; y < APP_HEIGHT ; y += TILE_SIZE) {
for (let x = offsetX ; x < APP_WIDTH ; x += TILE_SIZE) {
c2d.beginPath()
c2d.rect(x, y, TILE_SIZE, TILE_SIZE)
c2d.closePath()
c2d.stroke()
}
}
*/
// ----------------
// Draw entities
// ----------------
const MAX_LAYER = 2
for (let layer = 0 ; layer < MAX_LAYER ; layer++) {
this.entities.forEach(entity => entity.paint(layer))
}
// ----------------
// Draw pause overlay (indicating a bun just dropped to the floor)
// ----------------
if (this.paused) {
const PAUSE_OFFSET = 20
c2d.fillStyle = 'rgba(255, 255, 255, 0.5)'
c2d.beginPath()
c2d.rect(0, 0, APP_WIDTH, APP_HEIGHT)
c2d.closePath()
c2d.fill()
c2d.textAlign = 'center'
c2d.textBaseline = 'middle'
c2d.fillStyle = '#000'
if (this.lives > 0) {
c2d.font = `1em ${FONT_FAMILY}`
c2d.fillText('No problem, let\'s try again!', APP_WIDTH / 2, APP_HEIGHT / 2 - PAUSE_OFFSET)
c2d.fillText('大丈夫、もう一度やってみよう!', APP_WIDTH / 2, APP_HEIGHT / 2 + PAUSE_OFFSET)
} else {
c2d.font = `1.5em ${FONT_FAMILY}`
c2d.fillText('Good job! おめでとう!', APP_WIDTH / 2, APP_HEIGHT / 2 - PAUSE_OFFSET)
c2d.fillText(this.score + ' すごい', APP_WIDTH / 2, APP_HEIGHT / 2 + PAUSE_OFFSET)
}
}
// ----------------
const SCREEN_EDGE_OFFSET = 20
const SHADOW_X = 2
const SHADOW_Y = 1
// Draw UI data: score
// ----------------
c2d.textAlign = 'right'
c2d.textBaseline = 'top'
c2d.font = `1.5em ${FONT_FAMILY}`
fillTextWithShadow(c2d, this.score + ' すごい', APP_WIDTH - SCREEN_EDGE_OFFSET, SCREEN_EDGE_OFFSET)
// ----------------
// Draw UI data: lives
// ----------------
c2d.textAlign = 'left'
c2d.textBaseline = 'top'
c2d.font = `1.5em ${FONT_FAMILY}`
fillTextWithShadow(c2d, 'LUCKY BEAST ' + '❤'.repeat(this.lives), SCREEN_EDGE_OFFSET, SCREEN_EDGE_OFFSET, '#ee4444')
// ----------------
// Draw UI data: difficulty
// ----------------
const DIFFICULTY_OFFSET = SCREEN_EDGE_OFFSET + 40
c2d.textAlign = 'left'
c2d.textBaseline = 'top'
c2d.fillStyle = '#444'
c2d.font = `1em ${FONT_FAMILY}`
c2d.fillText('⭐'.repeat(this.difficulty), SCREEN_EDGE_OFFSET, DIFFICULTY_OFFSET)
// ----------------
}
/*
Section: UI and Event Handling
----------------------------------------------------------------------------
*/
setupUI () {
this.html.canvas.width = this.canvasWidth
this.html.canvas.height = this.canvasHeight
// Prevent "touch and hold to open context menu" menu on touchscreens.
this.html.canvas.addEventListener('touchstart', stopEvent)
this.html.canvas.addEventListener('touchmove', stopEvent)
this.html.canvas.addEventListener('touchend', stopEvent)
this.html.canvas.addEventListener('touchcancel', stopEvent)
this.html.buttonHome.addEventListener('click', this.buttonHome_onClick.bind(this))
this.html.buttonReload.addEventListener('click', this.buttonReload_onClick.bind(this))
this.html.buttonLeft.addEventListener('click', this.buttonLeft_onClick.bind(this))
this.html.buttonRight.addEventListener('click', this.buttonRight_onClick.bind(this))
this.html.main.addEventListener('keydown', this.onKeyDown.bind(this))
window.addEventListener('resize', this.updateUI.bind(this))
this.updateUI()
this.hideUI() // Hide until all assets are loaded
this.html.main.focus()
}
hideUI () {
this.html.buttonHome.style.visibility = 'hidden'
this.html.buttonReload.style.visibility = 'hidden'
this.html.buttonLeft.style.visibility = 'hidden'
this.html.buttonRight.style.visibility = 'hidden'
}
showUI () {
this.html.buttonHome.style.visibility = 'visible'
this.html.buttonReload.style.visibility = 'visible'
this.html.buttonLeft.style.visibility = 'visible'
this.html.buttonRight.style.visibility = 'visible'
}
updateUI () {
// Fit the Interaction layer to the canvas
const mainDivBounds = this.html.main.getBoundingClientRect()
const canvasBounds = this.html.canvas.getBoundingClientRect()
this.html.menu.style.width = `${canvasBounds.width}px`
this.html.menu.style.height = `${canvasBounds.height}px`
this.html.menu.style.top = `${canvasBounds.top - mainDivBounds.top}px`
this.html.menu.style.left = `${canvasBounds.left}px`
}
setMenu (menu) {
this.menu = menu
if (menu) {
this.html.menu.style.visibility = 'visible'
this.html.buttonReload.style.visibility = 'hidden'
this.html.buttonLeft.style.visibility = 'hidden'
this.html.buttonRight.style.visibility = 'hidden'
} else {
this.html.menu.style.visibility = 'hidden'
this.html.buttonReload.style.visibility = 'visible'
this.html.buttonLeft.style.visibility = 'visible'
this.html.buttonRight.style.visibility = 'visible'
this.html.main.focus()
}
}
onKeyDown (e) {
if (this.luckyBeast) {
switch (e.key) {
case 'Escape':
this.setMenu(!this.menu)
break
case 'R':
case 'r':
this.startGame()
break
case 'ArrowRight':
this.moveLuckyBeast(DIRECTIONS.EAST)
return stopEvent(e)
break
case 'ArrowLeft':
this.moveLuckyBeast(DIRECTIONS.WEST)
return stopEvent(e)
break
}
}
}
buttonHome_onClick () {
this.setMenu(!this.menu)
}
buttonReload_onClick () {
this.startGame()
this.html.main.focus()
}
buttonLeft_onClick () {
this.moveLuckyBeast(DIRECTIONS.WEST)
}
buttonRight_onClick () {
this.moveLuckyBeast(DIRECTIONS.EAST)
}
/*
Section: Gameplay
----------------------------------------------------------------------------
*/
/*
Start the game. Triggers when game loads, or reloads.
*/
startGame (resetScore = true) {
if (resetScore) {
this.lives = STARTING_LIVES
this.score = 0
}
this.difficulty = 0
this.entities = []
this.luckyBeast = new LuckyBeast(this)
this.entities.push(this.luckyBeast)
this.friend = new Friend(this)
this.entities.push(this.friend)
this.timeToNextBun = TIME_BETWEEN_BUNS
this.paused = false
}
/*
Stop the game after dropping a bun (losing a life).
*/
stopGame () {
if (this.paused) return // Don't trigger this more than once
this.lives = Math.max(0, this.lives - 1)
this.paused = true
this.pauseTimer = MINIMUM_PAUSE_DURATION
}
/*
Continue the game after game is paused.
*/
continueGame () {
if (this.pauseTimer > 0) return
if (this.lives > 0) {
this.startGame(false)
}
}
increaseScore (score) {
this.score += score
}
/*
Difficulty increases every time Lucky Beast delivers buns
*/
increaseDifficulty () {
this.difficulty++
}
moveLuckyBeast (direction) {
if (this.menu) return
if (this.paused) {
this.continueGame()
return
}
this.luckyBeast.move(direction)
}
}
function stopEvent (e) {
if (!e) return false
e.preventDefault && e.preventDefault()
e.stopPropagation && e.stopPropagation()
e.returnValue = false
e.cancelBubble = true
return false
}
export default JapariBunCatch
| {
const DIFFICULTY_MODIFIER = 0.2
const timeToBun = TIME_BETWEEN_BUNS / (1 + this.difficulty * DIFFICULTY_MODIFIER)
this.timeToNextBun += timeToBun
const newCol = Math.floor(Math.random() * COLUMNS_FOR_BUNS)
const newBun = new Bun(this, newCol, this.difficulty)
this.entities.push(newBun)
} | conditional_block |
japari-bun-catch.js | import {
APP_WIDTH, APP_HEIGHT, TILE_SIZE, DIRECTIONS,
TIME_BETWEEN_BUNS, ROWS_FOR_BUNS, COLUMNS_FOR_BUNS,
STARTING_LIVES, MINIMUM_PAUSE_DURATION,
FONT_FAMILY,
} from './constants'
import { fillTextWithShadow } from './utility'
import ImageAsset from './image-asset'
import LuckyBeast from './entities/lucky-beast'
import Friend from './entities/friend'
import Bun from './entities/bun'
const searchParams = new URLSearchParams(window.location.search)
const DEBUG = searchParams.get('debug') || false
class JapariBunCatch {
constructor () {
this.html = {
main: document.getElementById('main'),
canvas: document.getElementById('canvas'),
menu: document.getElementById('menu'),
buttonHome: document.getElementById('button-home'),
buttonReload: document.getElementById('button-reload'),
buttonLeft: document.getElementById('button-left'),
buttonRight: document.getElementById('button-right'),
}
this.canvas2d = this.html.canvas.getContext('2d')
this.canvasWidth = APP_WIDTH
this.canvasHeight = APP_HEIGHT
this.menu = false
this.setMenu(false)
this.setupUI()
this.initialised = false
this.assets = {
background: new ImageAsset('assets/background.jpg'),
basket: new ImageAsset('assets/basket.png'),
bun: new ImageAsset('assets/bun.png'),
friends: new ImageAsset('assets/friends.png'),
luckyBeast: new ImageAsset('assets/lucky-beast.png'),
}
this.luckyBeast = null
this.friend = null
this.entities = []
this.lives = 0
this.score = 0
this.difficulty = 0
this.timeToNextBun = 0
this.paused = false // Game is paused when a bun drops to the floor. Pausing due to the menu being open is dictated by this.menu
this.pauseTimer = 0 // When the game is paused, it stays paused for a short amount of time.
this.prevTime = null
this.nextFrame = window.requestAnimationFrame(this.main.bind(this))
}
initialisationCheck () {
// Assets check
let allAssetsLoaded = true
let numLoadedAssets = 0
let numTotalAssets = 0
Object.keys(this.assets).forEach((id) => {
const asset = this.assets[id]
allAssetsLoaded = allAssetsLoaded && asset.loaded
if (asset.loaded) numLoadedAssets++
numTotalAssets++
})
// Paint status
this.canvas2d.clearRect(0, 0, this.canvasWidth, this.canvasHeight)
this.canvas2d.textAlign = 'start'
this.canvas2d.textBaseline = 'top'
this.canvas2d.fillStyle = '#ccc'
this.canvas2d.font = `1em ${FONT_FAMILY}`
this.canvas2d.fillText(`Loading ${numLoadedAssets} / ${numTotalAssets} `, TILE_SIZE, TILE_SIZE)
if (allAssetsLoaded) {
this.initialised = true
this.showUI()
this.startGame()
}
}
/*
Section: General Logic
----------------------------------------------------------------------------
*/
main (time) {
const timeStep = (this.prevTime) ? time - this.prevTime : time
this.prevTime = time
if (this.initialised) {
this.play(timeStep)
this.paint()
} else {
this.initialisationCheck()
}
this.nextFrame = window.requestAnimationFrame(this.main.bind(this))
}
play (timeStep) {
// If the menu is open, pause all action gameplay
if (this.menu) return
// If game is paused (as a result of losing a life), pause all action gameplay, of course
if (this.paused) {
this.pauseTimer = Math.max(0, this.pauseTimer - timeStep)
return
}
// Run entity logic
this.entities.forEach(entity => entity.play(timeStep))
// Spawn a new bun
this.timeToNextBun -= timeStep
if (this.timeToNextBun <= 0) {
const DIFFICULTY_MODIFIER = 0.2
const timeToBun = TIME_BETWEEN_BUNS / (1 + this.difficulty * DIFFICULTY_MODIFIER)
this.timeToNextBun += timeToBun
const newCol = Math.floor(Math.random() * COLUMNS_FOR_BUNS)
const newBun = new Bun(this, newCol, this.difficulty)
this.entities.push(newBun)
}
// Cleanup
this.entities = this.entities.filter(entity => !entity._expired)
}
paint () | --------------------------------------------------------------
*/
setupUI () {
this.html.canvas.width = this.canvasWidth
this.html.canvas.height = this.canvasHeight
// Prevent "touch and hold to open context menu" menu on touchscreens.
this.html.canvas.addEventListener('touchstart', stopEvent)
this.html.canvas.addEventListener('touchmove', stopEvent)
this.html.canvas.addEventListener('touchend', stopEvent)
this.html.canvas.addEventListener('touchcancel', stopEvent)
this.html.buttonHome.addEventListener('click', this.buttonHome_onClick.bind(this))
this.html.buttonReload.addEventListener('click', this.buttonReload_onClick.bind(this))
this.html.buttonLeft.addEventListener('click', this.buttonLeft_onClick.bind(this))
this.html.buttonRight.addEventListener('click', this.buttonRight_onClick.bind(this))
this.html.main.addEventListener('keydown', this.onKeyDown.bind(this))
window.addEventListener('resize', this.updateUI.bind(this))
this.updateUI()
this.hideUI() // Hide until all assets are loaded
this.html.main.focus()
}
hideUI () {
this.html.buttonHome.style.visibility = 'hidden'
this.html.buttonReload.style.visibility = 'hidden'
this.html.buttonLeft.style.visibility = 'hidden'
this.html.buttonRight.style.visibility = 'hidden'
}
showUI () {
this.html.buttonHome.style.visibility = 'visible'
this.html.buttonReload.style.visibility = 'visible'
this.html.buttonLeft.style.visibility = 'visible'
this.html.buttonRight.style.visibility = 'visible'
}
updateUI () {
// Fit the Interaction layer to the canvas
const mainDivBounds = this.html.main.getBoundingClientRect()
const canvasBounds = this.html.canvas.getBoundingClientRect()
this.html.menu.style.width = `${canvasBounds.width}px`
this.html.menu.style.height = `${canvasBounds.height}px`
this.html.menu.style.top = `${canvasBounds.top - mainDivBounds.top}px`
this.html.menu.style.left = `${canvasBounds.left}px`
}
setMenu (menu) {
this.menu = menu
if (menu) {
this.html.menu.style.visibility = 'visible'
this.html.buttonReload.style.visibility = 'hidden'
this.html.buttonLeft.style.visibility = 'hidden'
this.html.buttonRight.style.visibility = 'hidden'
} else {
this.html.menu.style.visibility = 'hidden'
this.html.buttonReload.style.visibility = 'visible'
this.html.buttonLeft.style.visibility = 'visible'
this.html.buttonRight.style.visibility = 'visible'
this.html.main.focus()
}
}
onKeyDown (e) {
if (this.luckyBeast) {
switch (e.key) {
case 'Escape':
this.setMenu(!this.menu)
break
case 'R':
case 'r':
this.startGame()
break
case 'ArrowRight':
this.moveLuckyBeast(DIRECTIONS.EAST)
return stopEvent(e)
break
case 'ArrowLeft':
this.moveLuckyBeast(DIRECTIONS.WEST)
return stopEvent(e)
break
}
}
}
buttonHome_onClick () {
this.setMenu(!this.menu)
}
buttonReload_onClick () {
this.startGame()
this.html.main.focus()
}
buttonLeft_onClick () {
this.moveLuckyBeast(DIRECTIONS.WEST)
}
buttonRight_onClick () {
this.moveLuckyBeast(DIRECTIONS.EAST)
}
/*
Section: Gameplay
----------------------------------------------------------------------------
*/
/*
Start the game. Triggers when game loads, or reloads.
*/
startGame (resetScore = true) {
if (resetScore) {
this.lives = STARTING_LIVES
this.score = 0
}
this.difficulty = 0
this.entities = []
this.luckyBeast = new LuckyBeast(this)
this.entities.push(this.luckyBeast)
this.friend = new Friend(this)
this.entities.push(this.friend)
this.timeToNextBun = TIME_BETWEEN_BUNS
this.paused = false
}
/*
Stop the game after dropping a bun (losing a life).
*/
stopGame () {
if (this.paused) return // Don't trigger this more than once
this.lives = Math.max(0, this.lives - 1)
this.paused = true
this.pauseTimer = MINIMUM_PAUSE_DURATION
}
/*
Continue the game after game is paused.
*/
continueGame () {
if (this.pauseTimer > 0) return
if (this.lives > 0) {
this.startGame(false)
}
}
increaseScore (score) {
this.score += score
}
/*
Difficulty increases every time Lucky Beast delivers buns
*/
increaseDifficulty () {
this.difficulty++
}
moveLuckyBeast (direction) {
if (this.menu) return
if (this.paused) {
this.continueGame()
return
}
this.luckyBeast.move(direction)
}
}
function stopEvent (e) {
if (!e) return false
e.preventDefault && e.preventDefault()
e.stopPropagation && e.stopPropagation()
e.returnValue = false
e.cancelBubble = true
return false
}
export default JapariBunCatch
| {
const c2d = this.canvas2d
c2d.clearRect(0, 0, this.canvasWidth, this.canvasHeight)
// ----------------
// Draw background
// ----------------
if (this.assets.background) {
const BACKGROUND_SIZE_X = 800
const BACKGROUND_SIZE_Y = 500
c2d.drawImage(this.assets.background.img, 0, 0, BACKGROUND_SIZE_X, BACKGROUND_SIZE_Y, 0, 0, APP_WIDTH, APP_HEIGHT)
}
// ----------------
// Draw grid
// ----------------
/*
c2d.strokeStyle = 'rgba(128, 128, 128, 0.05)'
c2d.lineWidth = 2
const offsetX = 0
const offsetY = 0
for (let y = offsetY ; y < APP_HEIGHT ; y += TILE_SIZE) {
for (let x = offsetX ; x < APP_WIDTH ; x += TILE_SIZE) {
c2d.beginPath()
c2d.rect(x, y, TILE_SIZE, TILE_SIZE)
c2d.closePath()
c2d.stroke()
}
}
*/
// ----------------
// Draw entities
// ----------------
const MAX_LAYER = 2
for (let layer = 0 ; layer < MAX_LAYER ; layer++) {
this.entities.forEach(entity => entity.paint(layer))
}
// ----------------
// Draw pause overlay (indicating a bun just dropped to the floor)
// ----------------
if (this.paused) {
const PAUSE_OFFSET = 20
c2d.fillStyle = 'rgba(255, 255, 255, 0.5)'
c2d.beginPath()
c2d.rect(0, 0, APP_WIDTH, APP_HEIGHT)
c2d.closePath()
c2d.fill()
c2d.textAlign = 'center'
c2d.textBaseline = 'middle'
c2d.fillStyle = '#000'
if (this.lives > 0) {
c2d.font = `1em ${FONT_FAMILY}`
c2d.fillText('No problem, let\'s try again!', APP_WIDTH / 2, APP_HEIGHT / 2 - PAUSE_OFFSET)
c2d.fillText('大丈夫、もう一度やってみよう!', APP_WIDTH / 2, APP_HEIGHT / 2 + PAUSE_OFFSET)
} else {
c2d.font = `1.5em ${FONT_FAMILY}`
c2d.fillText('Good job! おめでとう!', APP_WIDTH / 2, APP_HEIGHT / 2 - PAUSE_OFFSET)
c2d.fillText(this.score + ' すごい', APP_WIDTH / 2, APP_HEIGHT / 2 + PAUSE_OFFSET)
}
}
// ----------------
const SCREEN_EDGE_OFFSET = 20
const SHADOW_X = 2
const SHADOW_Y = 1
// Draw UI data: score
// ----------------
c2d.textAlign = 'right'
c2d.textBaseline = 'top'
c2d.font = `1.5em ${FONT_FAMILY}`
fillTextWithShadow(c2d, this.score + ' すごい', APP_WIDTH - SCREEN_EDGE_OFFSET, SCREEN_EDGE_OFFSET)
// ----------------
// Draw UI data: lives
// ----------------
c2d.textAlign = 'left'
c2d.textBaseline = 'top'
c2d.font = `1.5em ${FONT_FAMILY}`
fillTextWithShadow(c2d, 'LUCKY BEAST ' + '❤'.repeat(this.lives), SCREEN_EDGE_OFFSET, SCREEN_EDGE_OFFSET, '#ee4444')
// ----------------
// Draw UI data: difficulty
// ----------------
const DIFFICULTY_OFFSET = SCREEN_EDGE_OFFSET + 40
c2d.textAlign = 'left'
c2d.textBaseline = 'top'
c2d.fillStyle = '#444'
c2d.font = `1em ${FONT_FAMILY}`
c2d.fillText('⭐'.repeat(this.difficulty), SCREEN_EDGE_OFFSET, DIFFICULTY_OFFSET)
// ----------------
}
/*
Section: UI and Event Handling
-------------- | identifier_body |
japari-bun-catch.js | import {
APP_WIDTH, APP_HEIGHT, TILE_SIZE, DIRECTIONS,
TIME_BETWEEN_BUNS, ROWS_FOR_BUNS, COLUMNS_FOR_BUNS,
STARTING_LIVES, MINIMUM_PAUSE_DURATION,
FONT_FAMILY,
} from './constants'
import { fillTextWithShadow } from './utility'
import ImageAsset from './image-asset'
import LuckyBeast from './entities/lucky-beast'
import Friend from './entities/friend'
import Bun from './entities/bun'
const searchParams = new URLSearchParams(window.location.search)
const DEBUG = searchParams.get('debug') || false
class JapariBunCatch {
constructor () {
this.html = {
main: document.getElementById('main'),
canvas: document.getElementById('canvas'),
menu: document.getElementById('menu'),
buttonHome: document.getElementById('button-home'),
buttonReload: document.getElementById('button-reload'),
buttonLeft: document.getElementById('button-left'),
buttonRight: document.getElementById('button-right'),
}
this.canvas2d = this.html.canvas.getContext('2d')
this.canvasWidth = APP_WIDTH
this.canvasHeight = APP_HEIGHT
this.menu = false
this.setMenu(false)
this.setupUI()
this.initialised = false
this.assets = {
background: new ImageAsset('assets/background.jpg'),
basket: new ImageAsset('assets/basket.png'),
bun: new ImageAsset('assets/bun.png'),
friends: new ImageAsset('assets/friends.png'),
luckyBeast: new ImageAsset('assets/lucky-beast.png'),
}
this.luckyBeast = null
this.friend = null
this.entities = []
this.lives = 0
this.score = 0
this.difficulty = 0
this.timeToNextBun = 0
this.paused = false // Game is paused when a bun drops to the floor. Pausing due to the menu being open is dictated by this.menu
this.pauseTimer = 0 // When the game is paused, it stays paused for a short amount of time.
this.prevTime = null
this.nextFrame = window.requestAnimationFrame(this.main.bind(this))
}
initialisationCheck () {
// Assets check
let allAssetsLoaded = true
let numLoadedAssets = 0
let numTotalAssets = 0
Object.keys(this.assets).forEach((id) => {
const asset = this.assets[id]
allAssetsLoaded = allAssetsLoaded && asset.loaded
if (asset.loaded) numLoadedAssets++
numTotalAssets++
})
// Paint status
this.canvas2d.clearRect(0, 0, this.canvasWidth, this.canvasHeight)
this.canvas2d.textAlign = 'start'
this.canvas2d.textBaseline = 'top'
this.canvas2d.fillStyle = '#ccc'
this.canvas2d.font = `1em ${FONT_FAMILY}`
this.canvas2d.fillText(`Loading ${numLoadedAssets} / ${numTotalAssets} `, TILE_SIZE, TILE_SIZE)
| }
}
/*
Section: General Logic
----------------------------------------------------------------------------
*/
main (time) {
const timeStep = (this.prevTime) ? time - this.prevTime : time
this.prevTime = time
if (this.initialised) {
this.play(timeStep)
this.paint()
} else {
this.initialisationCheck()
}
this.nextFrame = window.requestAnimationFrame(this.main.bind(this))
}
play (timeStep) {
// If the menu is open, pause all action gameplay
if (this.menu) return
// If game is paused (as a result of losing a life), pause all action gameplay, of course
if (this.paused) {
this.pauseTimer = Math.max(0, this.pauseTimer - timeStep)
return
}
// Run entity logic
this.entities.forEach(entity => entity.play(timeStep))
// Spawn a new bun
this.timeToNextBun -= timeStep
if (this.timeToNextBun <= 0) {
const DIFFICULTY_MODIFIER = 0.2
const timeToBun = TIME_BETWEEN_BUNS / (1 + this.difficulty * DIFFICULTY_MODIFIER)
this.timeToNextBun += timeToBun
const newCol = Math.floor(Math.random() * COLUMNS_FOR_BUNS)
const newBun = new Bun(this, newCol, this.difficulty)
this.entities.push(newBun)
}
// Cleanup
this.entities = this.entities.filter(entity => !entity._expired)
}
paint () {
const c2d = this.canvas2d
c2d.clearRect(0, 0, this.canvasWidth, this.canvasHeight)
// ----------------
// Draw background
// ----------------
if (this.assets.background) {
const BACKGROUND_SIZE_X = 800
const BACKGROUND_SIZE_Y = 500
c2d.drawImage(this.assets.background.img, 0, 0, BACKGROUND_SIZE_X, BACKGROUND_SIZE_Y, 0, 0, APP_WIDTH, APP_HEIGHT)
}
// ----------------
// Draw grid
// ----------------
/*
c2d.strokeStyle = 'rgba(128, 128, 128, 0.05)'
c2d.lineWidth = 2
const offsetX = 0
const offsetY = 0
for (let y = offsetY ; y < APP_HEIGHT ; y += TILE_SIZE) {
for (let x = offsetX ; x < APP_WIDTH ; x += TILE_SIZE) {
c2d.beginPath()
c2d.rect(x, y, TILE_SIZE, TILE_SIZE)
c2d.closePath()
c2d.stroke()
}
}
*/
// ----------------
// Draw entities
// ----------------
const MAX_LAYER = 2
for (let layer = 0 ; layer < MAX_LAYER ; layer++) {
this.entities.forEach(entity => entity.paint(layer))
}
// ----------------
// Draw pause overlay (indicating a bun just dropped to the floor)
// ----------------
if (this.paused) {
const PAUSE_OFFSET = 20
c2d.fillStyle = 'rgba(255, 255, 255, 0.5)'
c2d.beginPath()
c2d.rect(0, 0, APP_WIDTH, APP_HEIGHT)
c2d.closePath()
c2d.fill()
c2d.textAlign = 'center'
c2d.textBaseline = 'middle'
c2d.fillStyle = '#000'
if (this.lives > 0) {
c2d.font = `1em ${FONT_FAMILY}`
c2d.fillText('No problem, let\'s try again!', APP_WIDTH / 2, APP_HEIGHT / 2 - PAUSE_OFFSET)
c2d.fillText('大丈夫、もう一度やってみよう!', APP_WIDTH / 2, APP_HEIGHT / 2 + PAUSE_OFFSET)
} else {
c2d.font = `1.5em ${FONT_FAMILY}`
c2d.fillText('Good job! おめでとう!', APP_WIDTH / 2, APP_HEIGHT / 2 - PAUSE_OFFSET)
c2d.fillText(this.score + ' すごい', APP_WIDTH / 2, APP_HEIGHT / 2 + PAUSE_OFFSET)
}
}
// ----------------
const SCREEN_EDGE_OFFSET = 20
const SHADOW_X = 2
const SHADOW_Y = 1
// Draw UI data: score
// ----------------
c2d.textAlign = 'right'
c2d.textBaseline = 'top'
c2d.font = `1.5em ${FONT_FAMILY}`
fillTextWithShadow(c2d, this.score + ' すごい', APP_WIDTH - SCREEN_EDGE_OFFSET, SCREEN_EDGE_OFFSET)
// ----------------
// Draw UI data: lives
// ----------------
c2d.textAlign = 'left'
c2d.textBaseline = 'top'
c2d.font = `1.5em ${FONT_FAMILY}`
fillTextWithShadow(c2d, 'LUCKY BEAST ' + '❤'.repeat(this.lives), SCREEN_EDGE_OFFSET, SCREEN_EDGE_OFFSET, '#ee4444')
// ----------------
// Draw UI data: difficulty
// ----------------
const DIFFICULTY_OFFSET = SCREEN_EDGE_OFFSET + 40
c2d.textAlign = 'left'
c2d.textBaseline = 'top'
c2d.fillStyle = '#444'
c2d.font = `1em ${FONT_FAMILY}`
c2d.fillText('⭐'.repeat(this.difficulty), SCREEN_EDGE_OFFSET, DIFFICULTY_OFFSET)
// ----------------
}
/*
Section: UI and Event Handling
----------------------------------------------------------------------------
*/
setupUI () {
this.html.canvas.width = this.canvasWidth
this.html.canvas.height = this.canvasHeight
// Prevent "touch and hold to open context menu" menu on touchscreens.
this.html.canvas.addEventListener('touchstart', stopEvent)
this.html.canvas.addEventListener('touchmove', stopEvent)
this.html.canvas.addEventListener('touchend', stopEvent)
this.html.canvas.addEventListener('touchcancel', stopEvent)
this.html.buttonHome.addEventListener('click', this.buttonHome_onClick.bind(this))
this.html.buttonReload.addEventListener('click', this.buttonReload_onClick.bind(this))
this.html.buttonLeft.addEventListener('click', this.buttonLeft_onClick.bind(this))
this.html.buttonRight.addEventListener('click', this.buttonRight_onClick.bind(this))
this.html.main.addEventListener('keydown', this.onKeyDown.bind(this))
window.addEventListener('resize', this.updateUI.bind(this))
this.updateUI()
this.hideUI() // Hide until all assets are loaded
this.html.main.focus()
}
hideUI () {
this.html.buttonHome.style.visibility = 'hidden'
this.html.buttonReload.style.visibility = 'hidden'
this.html.buttonLeft.style.visibility = 'hidden'
this.html.buttonRight.style.visibility = 'hidden'
}
showUI () {
this.html.buttonHome.style.visibility = 'visible'
this.html.buttonReload.style.visibility = 'visible'
this.html.buttonLeft.style.visibility = 'visible'
this.html.buttonRight.style.visibility = 'visible'
}
updateUI () {
// Fit the Interaction layer to the canvas
const mainDivBounds = this.html.main.getBoundingClientRect()
const canvasBounds = this.html.canvas.getBoundingClientRect()
this.html.menu.style.width = `${canvasBounds.width}px`
this.html.menu.style.height = `${canvasBounds.height}px`
this.html.menu.style.top = `${canvasBounds.top - mainDivBounds.top}px`
this.html.menu.style.left = `${canvasBounds.left}px`
}
setMenu (menu) {
this.menu = menu
if (menu) {
this.html.menu.style.visibility = 'visible'
this.html.buttonReload.style.visibility = 'hidden'
this.html.buttonLeft.style.visibility = 'hidden'
this.html.buttonRight.style.visibility = 'hidden'
} else {
this.html.menu.style.visibility = 'hidden'
this.html.buttonReload.style.visibility = 'visible'
this.html.buttonLeft.style.visibility = 'visible'
this.html.buttonRight.style.visibility = 'visible'
this.html.main.focus()
}
}
onKeyDown (e) {
if (this.luckyBeast) {
switch (e.key) {
case 'Escape':
this.setMenu(!this.menu)
break
case 'R':
case 'r':
this.startGame()
break
case 'ArrowRight':
this.moveLuckyBeast(DIRECTIONS.EAST)
return stopEvent(e)
break
case 'ArrowLeft':
this.moveLuckyBeast(DIRECTIONS.WEST)
return stopEvent(e)
break
}
}
}
buttonHome_onClick () {
this.setMenu(!this.menu)
}
buttonReload_onClick () {
this.startGame()
this.html.main.focus()
}
buttonLeft_onClick () {
this.moveLuckyBeast(DIRECTIONS.WEST)
}
buttonRight_onClick () {
this.moveLuckyBeast(DIRECTIONS.EAST)
}
/*
Section: Gameplay
----------------------------------------------------------------------------
*/
/*
Start the game. Triggers when game loads, or reloads.
*/
startGame (resetScore = true) {
if (resetScore) {
this.lives = STARTING_LIVES
this.score = 0
}
this.difficulty = 0
this.entities = []
this.luckyBeast = new LuckyBeast(this)
this.entities.push(this.luckyBeast)
this.friend = new Friend(this)
this.entities.push(this.friend)
this.timeToNextBun = TIME_BETWEEN_BUNS
this.paused = false
}
/*
Stop the game after dropping a bun (losing a life).
*/
stopGame () {
if (this.paused) return // Don't trigger this more than once
this.lives = Math.max(0, this.lives - 1)
this.paused = true
this.pauseTimer = MINIMUM_PAUSE_DURATION
}
/*
Continue the game after game is paused.
*/
continueGame () {
if (this.pauseTimer > 0) return
if (this.lives > 0) {
this.startGame(false)
}
}
increaseScore (score) {
this.score += score
}
/*
Difficulty increases every time Lucky Beast delivers buns
*/
increaseDifficulty () {
this.difficulty++
}
moveLuckyBeast (direction) {
if (this.menu) return
if (this.paused) {
this.continueGame()
return
}
this.luckyBeast.move(direction)
}
}
function stopEvent (e) {
if (!e) return false
e.preventDefault && e.preventDefault()
e.stopPropagation && e.stopPropagation()
e.returnValue = false
e.cancelBubble = true
return false
}
export default JapariBunCatch | if (allAssetsLoaded) {
this.initialised = true
this.showUI()
this.startGame() | random_line_split |
japari-bun-catch.js | import {
APP_WIDTH, APP_HEIGHT, TILE_SIZE, DIRECTIONS,
TIME_BETWEEN_BUNS, ROWS_FOR_BUNS, COLUMNS_FOR_BUNS,
STARTING_LIVES, MINIMUM_PAUSE_DURATION,
FONT_FAMILY,
} from './constants'
import { fillTextWithShadow } from './utility'
import ImageAsset from './image-asset'
import LuckyBeast from './entities/lucky-beast'
import Friend from './entities/friend'
import Bun from './entities/bun'
const searchParams = new URLSearchParams(window.location.search)
const DEBUG = searchParams.get('debug') || false
class JapariBunCatch {
constructor () {
this.html = {
main: document.getElementById('main'),
canvas: document.getElementById('canvas'),
menu: document.getElementById('menu'),
buttonHome: document.getElementById('button-home'),
buttonReload: document.getElementById('button-reload'),
buttonLeft: document.getElementById('button-left'),
buttonRight: document.getElementById('button-right'),
}
this.canvas2d = this.html.canvas.getContext('2d')
this.canvasWidth = APP_WIDTH
this.canvasHeight = APP_HEIGHT
this.menu = false
this.setMenu(false)
this.setupUI()
this.initialised = false
this.assets = {
background: new ImageAsset('assets/background.jpg'),
basket: new ImageAsset('assets/basket.png'),
bun: new ImageAsset('assets/bun.png'),
friends: new ImageAsset('assets/friends.png'),
luckyBeast: new ImageAsset('assets/lucky-beast.png'),
}
this.luckyBeast = null
this.friend = null
this.entities = []
this.lives = 0
this.score = 0
this.difficulty = 0
this.timeToNextBun = 0
this.paused = false // Game is paused when a bun drops to the floor. Pausing due to the menu being open is dictated by this.menu
this.pauseTimer = 0 // When the game is paused, it stays paused for a short amount of time.
this.prevTime = null
this.nextFrame = window.requestAnimationFrame(this.main.bind(this))
}
initialisationCheck () {
// Assets check
let allAssetsLoaded = true
let numLoadedAssets = 0
let numTotalAssets = 0
Object.keys(this.assets).forEach((id) => {
const asset = this.assets[id]
allAssetsLoaded = allAssetsLoaded && asset.loaded
if (asset.loaded) numLoadedAssets++
numTotalAssets++
})
// Paint status
this.canvas2d.clearRect(0, 0, this.canvasWidth, this.canvasHeight)
this.canvas2d.textAlign = 'start'
this.canvas2d.textBaseline = 'top'
this.canvas2d.fillStyle = '#ccc'
this.canvas2d.font = `1em ${FONT_FAMILY}`
this.canvas2d.fillText(`Loading ${numLoadedAssets} / ${numTotalAssets} `, TILE_SIZE, TILE_SIZE)
if (allAssetsLoaded) {
this.initialised = true
this.showUI()
this.startGame()
}
}
/*
Section: General Logic
----------------------------------------------------------------------------
*/
main (time) {
const timeStep = (this.prevTime) ? time - this.prevTime : time
this.prevTime = time
if (this.initialised) {
this.play(timeStep)
this.paint()
} else {
this.initialisationCheck()
}
this.nextFrame = window.requestAnimationFrame(this.main.bind(this))
}
play (timeStep) {
// If the menu is open, pause all action gameplay
if (this.menu) return
// If game is paused (as a result of losing a life), pause all action gameplay, of course
if (this.paused) {
this.pauseTimer = Math.max(0, this.pauseTimer - timeStep)
return
}
// Run entity logic
this.entities.forEach(entity => entity.play(timeStep))
// Spawn a new bun
this.timeToNextBun -= timeStep
if (this.timeToNextBun <= 0) {
const DIFFICULTY_MODIFIER = 0.2
const timeToBun = TIME_BETWEEN_BUNS / (1 + this.difficulty * DIFFICULTY_MODIFIER)
this.timeToNextBun += timeToBun
const newCol = Math.floor(Math.random() * COLUMNS_FOR_BUNS)
const newBun = new Bun(this, newCol, this.difficulty)
this.entities.push(newBun)
}
// Cleanup
this.entities = this.entities.filter(entity => !entity._expired)
}
paint () {
const c2d = this.canvas2d
c2d.clearRect(0, 0, this.canvasWidth, this.canvasHeight)
// ----------------
// Draw background
// ----------------
if (this.assets.background) {
const BACKGROUND_SIZE_X = 800
const BACKGROUND_SIZE_Y = 500
c2d.drawImage(this.assets.background.img, 0, 0, BACKGROUND_SIZE_X, BACKGROUND_SIZE_Y, 0, 0, APP_WIDTH, APP_HEIGHT)
}
// ----------------
// Draw grid
// ----------------
/*
c2d.strokeStyle = 'rgba(128, 128, 128, 0.05)'
c2d.lineWidth = 2
const offsetX = 0
const offsetY = 0
for (let y = offsetY ; y < APP_HEIGHT ; y += TILE_SIZE) {
for (let x = offsetX ; x < APP_WIDTH ; x += TILE_SIZE) {
c2d.beginPath()
c2d.rect(x, y, TILE_SIZE, TILE_SIZE)
c2d.closePath()
c2d.stroke()
}
}
*/
// ----------------
// Draw entities
// ----------------
const MAX_LAYER = 2
for (let layer = 0 ; layer < MAX_LAYER ; layer++) {
this.entities.forEach(entity => entity.paint(layer))
}
// ----------------
// Draw pause overlay (indicating a bun just dropped to the floor)
// ----------------
if (this.paused) {
const PAUSE_OFFSET = 20
c2d.fillStyle = 'rgba(255, 255, 255, 0.5)'
c2d.beginPath()
c2d.rect(0, 0, APP_WIDTH, APP_HEIGHT)
c2d.closePath()
c2d.fill()
c2d.textAlign = 'center'
c2d.textBaseline = 'middle'
c2d.fillStyle = '#000'
if (this.lives > 0) {
c2d.font = `1em ${FONT_FAMILY}`
c2d.fillText('No problem, let\'s try again!', APP_WIDTH / 2, APP_HEIGHT / 2 - PAUSE_OFFSET)
c2d.fillText('大丈夫、もう一度やってみよう!', APP_WIDTH / 2, APP_HEIGHT / 2 + PAUSE_OFFSET)
} else {
c2d.font = `1.5em ${FONT_FAMILY}`
c2d.fillText('Good job! おめでとう!', APP_WIDTH / 2, APP_HEIGHT / 2 - PAUSE_OFFSET)
c2d.fillText(this.score + ' すごい', APP_WIDTH / 2, APP_HEIGHT / 2 + PAUSE_OFFSET)
}
}
// ----------------
const SCREEN_EDGE_OFFSET = 20
const SHADOW_X = 2
const SHADOW_Y = 1
// Draw UI data: score
// ----------------
c2d.textAlign = 'right'
c2d.textBaseline = 'top'
c2d.font = `1.5em ${FONT_FAMILY}`
fillTextWithShadow(c2d, this.score + ' すごい', APP_WIDTH - SCREEN_EDGE_OFFSET, SCREEN_EDGE_OFFSET)
// ----------------
// Draw UI data: lives
// ----------------
c2d.textAlign = 'left'
c2d.textBaseline = 'top'
c2d.font = `1.5em ${FONT_FAMILY}`
fillTextWithShadow(c2d, 'LUCKY BEAST ' + '❤'.repeat(this.lives), SCREEN_EDGE_OFFSET, SCREEN_EDGE_OFFSET, '#ee4444')
// ----------------
// Draw UI data: difficulty
// ----------------
const DIFFICULTY_OFFSET = SCREEN_EDGE_OFFSET + 40
c2d.textAlign = 'left'
c2d.textBaseline = 'top'
c2d.fillStyle = '#444'
c2d.font = `1em ${FONT_FAMILY}`
c2d.fillText('⭐'.repeat(this.difficulty), SCREEN_EDGE_OFFSET, DIFFICULTY_OFFSET)
// ----------------
}
/*
Section: UI and Event Handling
----------------------------------------------------------------------------
*/
setupUI () {
this.html.canvas.width = this.canvasWidth
this.html.canvas.height = this.canvasHeight
// Prevent "touch and hold to open context menu" menu on touchscreens.
this.html.canvas.addEventListener('touchstart', stopEvent)
this.html.canvas.addEventListener('touchmove', stopEvent)
this.html.canvas.addEventListener('touchend', stopEvent)
this.html.canvas.addEventListener('touchcancel', stopEvent)
this.html.buttonHome.addEventListener('click', this.buttonHome_onClick.bind(this))
this.html.buttonReload.addEventListener('click', this.buttonReload_onClick.bind(this))
this.html.buttonLeft.addEventListener('click', this.buttonLeft_onClick.bind(this))
this.html.buttonRight.addEventListener('click', this.buttonRight_onClick.bind(this))
this.html.main.addEventListener('keydown', this.onKeyDown.bind(this))
window.addEventListener('resize', this.updateUI.bind(this))
this.updateUI()
this.hideUI() // Hide until all assets are loaded
this.html.main.focus()
}
hideUI () {
this.html.buttonHome.style.visibility = 'hidden'
this.html.buttonReload.style.visibility = 'hidden'
this.html.buttonLeft.style.visibility = 'hidden'
this.html.buttonRight.style.visibility = 'hidden'
}
showUI () {
this.html.buttonHome.style.visibility = 'visible'
this.html.buttonReload.style.visibility = 'visible'
this.html.buttonLeft.style.visibility = 'visible'
this.html.buttonRight.style.visibility = 'visible'
}
updateUI () {
// Fit the Interaction layer to the canvas
const mainDivBounds = this.html.main.getBoundingClientRect()
const canvasBounds = this.html.canvas.getBoundingClientRect()
this.html.menu.style.width = `${canvasBounds.width}px`
this.html.menu.style.height = `${canvasBounds.height}px`
this.html.menu.style.top = `${canvasBounds.top - mainDivBounds.top}px`
this.html.menu.style.left = `${canvasBounds.left}px`
}
setMenu (menu) {
this.menu = menu
if (menu) {
this.html.menu.style.visibility = 'visible'
this.html.buttonReload.style.visibility = 'hidden'
this.html.buttonLeft.style.visibility = 'hidden'
this.html.buttonRight.style.visibility = 'hidden'
} else {
this.html.menu.style.visibility = 'hidden'
this.html.buttonReload.style.visibility = 'visible'
this.html.buttonLeft.style.visibility = 'visible'
this.html.buttonRight.style.visibility = 'visible'
this.html.main.focus()
}
}
onKeyDown (e) {
if (this.luckyBeast) {
switch (e.key) {
case 'Escape':
this.setMenu(!this.menu)
break
case 'R':
case 'r':
this.startGame()
break
case 'ArrowRight':
this.moveLuckyBeast(DIRECTIONS.EAST)
return stopEvent(e)
break
case 'ArrowLeft':
this.moveLuckyBeast(DIRECTIONS.WEST)
return stopEvent(e)
break
}
}
}
buttonHome_onClick () {
this.setMenu(!this.menu)
}
buttonReload_onClick () {
this.startGame()
this.html.main.focus()
}
buttonLeft_onClick () {
this.moveLuckyBeast(DIRECTIONS.WEST)
}
buttonRight_onClick () {
this.moveLuckyBeast(DIRECTION |
Section: Gameplay
----------------------------------------------------------------------------
*/
/*
Start the game. Triggers when game loads, or reloads.
*/
startGame (resetScore = true) {
if (resetScore) {
this.lives = STARTING_LIVES
this.score = 0
}
this.difficulty = 0
this.entities = []
this.luckyBeast = new LuckyBeast(this)
this.entities.push(this.luckyBeast)
this.friend = new Friend(this)
this.entities.push(this.friend)
this.timeToNextBun = TIME_BETWEEN_BUNS
this.paused = false
}
/*
Stop the game after dropping a bun (losing a life).
*/
stopGame () {
if (this.paused) return // Don't trigger this more than once
this.lives = Math.max(0, this.lives - 1)
this.paused = true
this.pauseTimer = MINIMUM_PAUSE_DURATION
}
/*
Continue the game after game is paused.
*/
continueGame () {
if (this.pauseTimer > 0) return
if (this.lives > 0) {
this.startGame(false)
}
}
increaseScore (score) {
this.score += score
}
/*
Difficulty increases every time Lucky Beast delivers buns
*/
increaseDifficulty () {
this.difficulty++
}
moveLuckyBeast (direction) {
if (this.menu) return
if (this.paused) {
this.continueGame()
return
}
this.luckyBeast.move(direction)
}
}
function stopEvent (e) {
if (!e) return false
e.preventDefault && e.preventDefault()
e.stopPropagation && e.stopPropagation()
e.returnValue = false
e.cancelBubble = true
return false
}
export default JapariBunCatch
| S.EAST)
}
/* | identifier_name |
anime-face-detector.py | import sys
import time
import numpy as np
import cv2
from PIL import Image
import ailia
# import original modules
sys.path.append('../../util')
from arg_utils import get_base_parser, update_parser, get_savepath # noqa: E402
from model_utils import check_and_download_models # noqa: E402
from detector_utils import load_image # noqa: E402C
from image_utils import normalize_image # noqa: E402C
from webcamera_utils import get_capture, get_writer # noqa: E402
# logger
from logging import getLogger # noqa: E402
from post_transforms_utils import flip_back, get_affine_transform
from top_down_utils import keypoints_from_heatmaps
from visual_utils import visualize
logger = getLogger(__name__)
# ======================
# Parameters
# ======================
WEIGHT_YOLOV3_PATH = 'anime-face_yolov3.onnx'
MODEL_YOLOV3_PATH = 'anime-face_yolov3.onnx.prototxt'
WEIGHT_FASTERRCNN_PATH = 'anime-face_faster-rcnn.onnx'
MODEL_FASTERRCNN_PATH = 'anime-face_faster-rcnn.onnx.prototxt'
WEIGHT_LANDMARK_PATH = 'anime-face_hrnetv2.onnx'
MODEL_LANDMARK_PATH = 'anime-face_hrnetv2.onnx.prototxt'
REMOTE_PATH = \
'https://storage.googleapis.com/ailia-models/anime-face-detector/'
IMAGE_PATH = 'input.jpg'
SAVE_IMAGE_PATH = 'output.png'
IMAGE_YOLOV3_HEIGHT = IMAGE_YOLOV3_WIDTH = 608
IMAGE_FASTERRCNN_HEIGHT = 800
IMAGE_FASTERRCNN_WIDTH = 1333
LANDMARK_SCORE_THRESHOLD = 0.3
SHOW_BOX_SCORE = True
DRAW_CONTOUR = True
SKIP_CONTOUR_WITH_LOW_SCORE = True
# ======================
# Arguemnt Parser Config
# ======================
parser = get_base_parser(
'Anime Face Detector', IMAGE_PATH, SAVE_IMAGE_PATH
)
parser.add_argument(
'-d', '--detector', default='yolov3', choices=('yolov3', 'faster-rcnn'),
help='face detector model.'
)
args = update_parser(parser)
# ======================
# Secondaty Functions
# ======================
def update_pred_box(pred_boxes):
box_scale_factor = 1.1
boxes = []
for pred_box in pred_boxes:
box = pred_box[:4]
size = box[2:] - box[:2] + 1
new_size = size * box_scale_factor
center = (box[:2] + box[2:]) / 2
tl = center - new_size / 2
br = tl + new_size
pred_box[:4] = np.concatenate([tl, br])
boxes.append(pred_box)
return boxes
def xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] + 1
bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] + 1
return bbox_xywh
# ======================
# Main functions
# ======================
def preprocess(img, resize_shape):
h, w = resize_shape
im_h, im_w, _ = img.shape
# adaptive_resize
scale = min(h / im_h, w / im_w)
ow, oh = int(im_w * scale), int(im_h * scale)
if ow != im_w or oh != im_h:
_img = cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR)
img = np.zeros((h, w, 3), dtype=np.uint8)
ph, pw = (h - oh) // 2, (w - ow) // 2
img[ph: ph + oh, pw: pw + ow] = _img
else:
ph = pw = 0
img = img[:, :, ::-1] # GBR -> RGB
img = img / 255
img = img.transpose(2, 0, 1) # HWC -> CHW
img = np.expand_dims(img, axis=0)
img = img.astype(np.float32)
# return img
return img, (ph, pw), (oh, ow)
def box2cs(box):
"""This encodes bbox(x,y,w,h) into (center, scale)
Args:
x, y, w, h
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32](2,): Center of the bbox (x, y).
- np.ndarray[float32](2,): Scale of the bbox w & h.
"""
input_size = (256, 256)
x, y, w, h = box[:4]
aspect_ratio = input_size[0] / input_size[1]
center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
# pixel std is 200.0
scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)
scale = scale * 1.25
return center, scale
def detect_faces(img, face_detector):
shape = (IMAGE_YOLOV3_HEIGHT, IMAGE_YOLOV3_WIDTH) \
if args.detector == "yolov3" \
else (IMAGE_FASTERRCNN_HEIGHT, IMAGE_FASTERRCNN_WIDTH)
im_h, im_w = img.shape[:2]
img, pad_hw, resized_hw = preprocess(img, shape)
# feedforward
output = face_detector.predict([img])
boxes, _ = output
boxes = boxes[0]
boxes = boxes[boxes[:, 4] > 0]
pad_x = pad_hw[1]
pad_y = pad_hw[0]
resized_x = resized_hw[1]
resized_y = resized_hw[0]
boxes[:, [0, 2]] = boxes[:, [0, 2]] - pad_x
boxes[:, [1, 3]] = boxes[:, [1, 3]] - pad_y
boxes[:, [0, 2]] = boxes[:, [0, 2]] * im_w / resized_x
boxes[:, [1, 3]] = boxes[:, [1, 3]] * im_h / resized_y
# scale boxes
boxes = update_pred_box(boxes)
return boxes
def keypoint_decode(output, img_metas):
"""Decode keypoints from heatmaps.
Args:
img_metas (list(dict)): Information about data augmentation
By default this includes:
- "image_file: path to the image file
- "center": center of the bbox
- "scale": scale of the bbox
- "rotation": rotation of the bbox
- "bbox_score": score of bbox
output (np.ndarray[N, K, H, W]): model predicted heatmaps.
"""
batch_size = len(output)
c = np.zeros((batch_size, 2), dtype=np.float32)
s = np.zeros((batch_size, 2), dtype=np.float32)
for i in range(batch_size):
c[i, :] = img_metas[i]['center']
s[i, :] = img_metas[i]['scale']
preds, maxvals = keypoints_from_heatmaps(
output, c, s,
kernel=11)
all_preds = np.zeros((batch_size, preds.shape[1], 3), dtype=np.float32)
all_preds[:, :, 0:2] = preds[:, :, 0:2]
all_preds[:, :, 2:3] = maxvals
return all_preds
def predict(landmark_detector, face_detector, img):
if face_detector is not None:
bboxes = detect_faces(img, face_detector)
else:
h, w = img.shape[:2]
bboxes = [np.array([0, 0, w - 1, h - 1, 1])]
bboxes = np.array(bboxes)
pose_results = []
if len(bboxes) == 0:
return pose_results, bboxes
bboxes_xywh = xyxy2xywh(bboxes)
img_size = (256, 256)
batch_data = []
img_metas = []
for bbox in bboxes_xywh:
c, s = box2cs(bbox)
r = 0
img_metas.append({
"center": c,
"scale": s,
})
trans = get_affine_transform(c, s, r, img_size)
_img = cv2.warpAffine(
img,
trans, (img_size[0], img_size[1]),
flags=cv2.INTER_LINEAR)
_img = normalize_image(_img[:, :, ::-1], 'ImageNet')
batch_data.append(_img)
batch_data = np.asarray(batch_data)
batch_data = batch_data.transpose((0, 3, 1, 2))
output = landmark_detector.predict([batch_data])
heatmap = output[0]
if 1: # do flip
batch_data = batch_data[:, :, :, ::-1] # horizontal flip
output = landmark_detector.predict([batch_data])
flipped_heatmap = output[0]
flip_pairs = [
[0, 4], [1, 3], [5, 10], [6, 9],
[7, 8], [11, 19], [12, 18], [13, 17],
[14, 22], [15, 21], [16, 20], [24, 26]]
flipped_heatmap = flip_back(
flipped_heatmap,
flip_pairs)
# feature is not aligned, shift flipped heatmap for higher accuracy
flipped_heatmap[:, :, :, 1:] = flipped_heatmap[:, :, :, :-1]
heatmap = (heatmap + flipped_heatmap) * 0.5
keypoint_result = keypoint_decode(heatmap, img_metas)
return keypoint_result, bboxes
def recognize_from_image(landmark_detector, face_detector):
# input image loop
|
def recognize_from_video(landmark_detector, face_detector):
video_file = args.video if args.video else args.input[0]
capture = get_capture(video_file)
assert capture.isOpened(), 'Cannot capture source'
# create video writer if savepath is specified as video format
f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
if args.savepath != SAVE_IMAGE_PATH:
logger.warning(
'currently, video results cannot be output correctly...'
)
writer = get_writer(args.savepath, f_h, f_w)
else:
writer = None
frame_shown = False
while True:
ret, frame = capture.read()
if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret:
break
if frame_shown and cv2.getWindowProperty('frame', cv2.WND_PROP_VISIBLE) == 0:
break
# inference
keypoints, bboxes = predict(landmark_detector, face_detector, frame)
# plot result
res_img = visualize(
frame, keypoints, bboxes,
LANDMARK_SCORE_THRESHOLD,
SHOW_BOX_SCORE, DRAW_CONTOUR,
SKIP_CONTOUR_WITH_LOW_SCORE)
# show
cv2.imshow('frame', res_img)
frame_shown = True
# save results
if writer is not None:
writer.write(res_img.astype(np.uint8))
capture.release()
cv2.destroyAllWindows()
if writer is not None:
writer.release()
logger.info('Script finished successfully.')
def main():
# model files check and download
logger.info('Checking detect_landmarks model...')
check_and_download_models(WEIGHT_LANDMARK_PATH, MODEL_LANDMARK_PATH, REMOTE_PATH)
dic_model = {
'yolov3': (WEIGHT_YOLOV3_PATH, MODEL_YOLOV3_PATH),
'faster-rcnn': (WEIGHT_FASTERRCNN_PATH, MODEL_FASTERRCNN_PATH),
}
weight_path, model_path = dic_model[args.detector]
logger.info('Check face_detector model...')
check_and_download_models(
weight_path, model_path, REMOTE_PATH
)
env_id = args.env_id
# initialize
face_detector = ailia.Net(model_path, weight_path, env_id=env_id)
landmark_detector = ailia.Net(
MODEL_LANDMARK_PATH, WEIGHT_LANDMARK_PATH, env_id=env_id)
if args.video is not None:
recognize_from_video(landmark_detector, face_detector)
else:
recognize_from_image(landmark_detector, face_detector)
if __name__ == '__main__':
main()
| for image_path in args.input:
logger.info(image_path)
# prepare input data
img = load_image(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
# inference
logger.info('Start inference...')
if args.benchmark:
logger.info('BENCHMARK mode')
total_time_estimation = 0
for i in range(args.benchmark_count):
start = int(round(time.time() * 1000))
preds = predict(landmark_detector, face_detector, img)
end = int(round(time.time() * 1000))
estimation_time = (end - start)
# Loggin
logger.info(f'\tailia processing estimation time {estimation_time} ms')
if i != 0:
total_time_estimation = total_time_estimation + estimation_time
logger.info(f'\taverage time estimation {total_time_estimation / (args.benchmark_count - 1)} ms')
else:
keypoints, bboxes = predict(landmark_detector, face_detector, img)
res_img = visualize(
img, keypoints, bboxes,
LANDMARK_SCORE_THRESHOLD,
SHOW_BOX_SCORE, DRAW_CONTOUR,
SKIP_CONTOUR_WITH_LOW_SCORE)
# plot result
savepath = get_savepath(args.savepath, image_path, ext='.png')
logger.info(f'saved at : {savepath}')
cv2.imwrite(savepath, res_img)
logger.info('Script finished successfully.') | identifier_body |
anime-face-detector.py | import sys
import time
import numpy as np
import cv2
from PIL import Image
import ailia
# import original modules
sys.path.append('../../util')
from arg_utils import get_base_parser, update_parser, get_savepath # noqa: E402
from model_utils import check_and_download_models # noqa: E402
from detector_utils import load_image # noqa: E402C
from image_utils import normalize_image # noqa: E402C
from webcamera_utils import get_capture, get_writer # noqa: E402
# logger
from logging import getLogger # noqa: E402
from post_transforms_utils import flip_back, get_affine_transform
from top_down_utils import keypoints_from_heatmaps
from visual_utils import visualize
logger = getLogger(__name__)
# ======================
# Parameters
# ======================
WEIGHT_YOLOV3_PATH = 'anime-face_yolov3.onnx'
MODEL_YOLOV3_PATH = 'anime-face_yolov3.onnx.prototxt'
WEIGHT_FASTERRCNN_PATH = 'anime-face_faster-rcnn.onnx'
MODEL_FASTERRCNN_PATH = 'anime-face_faster-rcnn.onnx.prototxt'
WEIGHT_LANDMARK_PATH = 'anime-face_hrnetv2.onnx'
MODEL_LANDMARK_PATH = 'anime-face_hrnetv2.onnx.prototxt'
REMOTE_PATH = \
'https://storage.googleapis.com/ailia-models/anime-face-detector/'
IMAGE_PATH = 'input.jpg'
SAVE_IMAGE_PATH = 'output.png'
IMAGE_YOLOV3_HEIGHT = IMAGE_YOLOV3_WIDTH = 608
IMAGE_FASTERRCNN_HEIGHT = 800
IMAGE_FASTERRCNN_WIDTH = 1333
LANDMARK_SCORE_THRESHOLD = 0.3
SHOW_BOX_SCORE = True
DRAW_CONTOUR = True
SKIP_CONTOUR_WITH_LOW_SCORE = True
# ======================
# Arguemnt Parser Config
# ======================
parser = get_base_parser(
'Anime Face Detector', IMAGE_PATH, SAVE_IMAGE_PATH
)
parser.add_argument(
'-d', '--detector', default='yolov3', choices=('yolov3', 'faster-rcnn'),
help='face detector model.'
)
args = update_parser(parser)
# ======================
# Secondaty Functions
# ======================
def update_pred_box(pred_boxes):
box_scale_factor = 1.1
boxes = []
for pred_box in pred_boxes:
box = pred_box[:4]
size = box[2:] - box[:2] + 1
new_size = size * box_scale_factor
center = (box[:2] + box[2:]) / 2
tl = center - new_size / 2
br = tl + new_size
pred_box[:4] = np.concatenate([tl, br])
boxes.append(pred_box)
return boxes
def xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] + 1
bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] + 1
return bbox_xywh
# ======================
# Main functions
# ======================
def preprocess(img, resize_shape):
h, w = resize_shape
im_h, im_w, _ = img.shape
# adaptive_resize
scale = min(h / im_h, w / im_w)
ow, oh = int(im_w * scale), int(im_h * scale)
if ow != im_w or oh != im_h:
_img = cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR)
img = np.zeros((h, w, 3), dtype=np.uint8)
ph, pw = (h - oh) // 2, (w - ow) // 2
img[ph: ph + oh, pw: pw + ow] = _img
else:
ph = pw = 0
img = img[:, :, ::-1] # GBR -> RGB
img = img / 255
img = img.transpose(2, 0, 1) # HWC -> CHW
img = np.expand_dims(img, axis=0)
img = img.astype(np.float32)
# return img
return img, (ph, pw), (oh, ow)
def box2cs(box):
"""This encodes bbox(x,y,w,h) into (center, scale)
Args:
x, y, w, h
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32](2,): Center of the bbox (x, y).
- np.ndarray[float32](2,): Scale of the bbox w & h.
"""
input_size = (256, 256)
x, y, w, h = box[:4]
aspect_ratio = input_size[0] / input_size[1]
center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
# pixel std is 200.0
scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)
scale = scale * 1.25
return center, scale
def detect_faces(img, face_detector):
shape = (IMAGE_YOLOV3_HEIGHT, IMAGE_YOLOV3_WIDTH) \
if args.detector == "yolov3" \
else (IMAGE_FASTERRCNN_HEIGHT, IMAGE_FASTERRCNN_WIDTH)
im_h, im_w = img.shape[:2]
img, pad_hw, resized_hw = preprocess(img, shape)
# feedforward
output = face_detector.predict([img])
boxes, _ = output
boxes = boxes[0]
boxes = boxes[boxes[:, 4] > 0]
pad_x = pad_hw[1]
pad_y = pad_hw[0]
resized_x = resized_hw[1]
resized_y = resized_hw[0]
boxes[:, [0, 2]] = boxes[:, [0, 2]] - pad_x
boxes[:, [1, 3]] = boxes[:, [1, 3]] - pad_y
boxes[:, [0, 2]] = boxes[:, [0, 2]] * im_w / resized_x
boxes[:, [1, 3]] = boxes[:, [1, 3]] * im_h / resized_y
# scale boxes
boxes = update_pred_box(boxes)
return boxes
def keypoint_decode(output, img_metas):
"""Decode keypoints from heatmaps.
Args:
img_metas (list(dict)): Information about data augmentation
By default this includes:
- "image_file: path to the image file
- "center": center of the bbox
- "scale": scale of the bbox
- "rotation": rotation of the bbox
- "bbox_score": score of bbox
output (np.ndarray[N, K, H, W]): model predicted heatmaps.
"""
batch_size = len(output)
c = np.zeros((batch_size, 2), dtype=np.float32)
s = np.zeros((batch_size, 2), dtype=np.float32)
for i in range(batch_size):
c[i, :] = img_metas[i]['center']
s[i, :] = img_metas[i]['scale']
preds, maxvals = keypoints_from_heatmaps(
output, c, s,
kernel=11)
all_preds = np.zeros((batch_size, preds.shape[1], 3), dtype=np.float32)
all_preds[:, :, 0:2] = preds[:, :, 0:2]
all_preds[:, :, 2:3] = maxvals
return all_preds
def predict(landmark_detector, face_detector, img):
if face_detector is not None:
bboxes = detect_faces(img, face_detector)
else:
h, w = img.shape[:2]
bboxes = [np.array([0, 0, w - 1, h - 1, 1])]
bboxes = np.array(bboxes)
pose_results = []
if len(bboxes) == 0:
return pose_results, bboxes
bboxes_xywh = xyxy2xywh(bboxes)
img_size = (256, 256)
batch_data = []
img_metas = []
for bbox in bboxes_xywh:
c, s = box2cs(bbox)
r = 0
img_metas.append({
"center": c,
"scale": s,
})
trans = get_affine_transform(c, s, r, img_size)
_img = cv2.warpAffine(
img,
trans, (img_size[0], img_size[1]),
flags=cv2.INTER_LINEAR)
_img = normalize_image(_img[:, :, ::-1], 'ImageNet')
batch_data.append(_img)
batch_data = np.asarray(batch_data)
batch_data = batch_data.transpose((0, 3, 1, 2))
output = landmark_detector.predict([batch_data])
heatmap = output[0]
if 1: # do flip
batch_data = batch_data[:, :, :, ::-1] # horizontal flip
output = landmark_detector.predict([batch_data])
flipped_heatmap = output[0]
flip_pairs = [
[0, 4], [1, 3], [5, 10], [6, 9],
[7, 8], [11, 19], [12, 18], [13, 17],
[14, 22], [15, 21], [16, 20], [24, 26]]
flipped_heatmap = flip_back(
flipped_heatmap,
flip_pairs)
# feature is not aligned, shift flipped heatmap for higher accuracy
flipped_heatmap[:, :, :, 1:] = flipped_heatmap[:, :, :, :-1]
heatmap = (heatmap + flipped_heatmap) * 0.5
keypoint_result = keypoint_decode(heatmap, img_metas)
return keypoint_result, bboxes
def recognize_from_image(landmark_detector, face_detector):
# input image loop
for image_path in args.input:
logger.info(image_path)
# prepare input data
img = load_image(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
# inference
logger.info('Start inference...')
if args.benchmark:
logger.info('BENCHMARK mode')
total_time_estimation = 0
for i in range(args.benchmark_count):
start = int(round(time.time() * 1000))
preds = predict(landmark_detector, face_detector, img)
end = int(round(time.time() * 1000))
estimation_time = (end - start)
# Loggin
logger.info(f'\tailia processing estimation time {estimation_time} ms')
if i != 0:
total_time_estimation = total_time_estimation + estimation_time
logger.info(f'\taverage time estimation {total_time_estimation / (args.benchmark_count - 1)} ms')
else:
keypoints, bboxes = predict(landmark_detector, face_detector, img)
res_img = visualize(
img, keypoints, bboxes,
LANDMARK_SCORE_THRESHOLD,
SHOW_BOX_SCORE, DRAW_CONTOUR,
SKIP_CONTOUR_WITH_LOW_SCORE)
# plot result
savepath = get_savepath(args.savepath, image_path, ext='.png')
logger.info(f'saved at : {savepath}')
cv2.imwrite(savepath, res_img)
logger.info('Script finished successfully.')
def recognize_from_video(landmark_detector, face_detector):
video_file = args.video if args.video else args.input[0]
capture = get_capture(video_file)
assert capture.isOpened(), 'Cannot capture source'
# create video writer if savepath is specified as video format
f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
if args.savepath != SAVE_IMAGE_PATH:
logger.warning(
'currently, video results cannot be output correctly...'
)
writer = get_writer(args.savepath, f_h, f_w)
else:
writer = None
frame_shown = False
while True:
ret, frame = capture.read()
if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret:
break
if frame_shown and cv2.getWindowProperty('frame', cv2.WND_PROP_VISIBLE) == 0:
break
# inference
keypoints, bboxes = predict(landmark_detector, face_detector, frame)
# plot result
res_img = visualize(
frame, keypoints, bboxes,
LANDMARK_SCORE_THRESHOLD,
SHOW_BOX_SCORE, DRAW_CONTOUR,
SKIP_CONTOUR_WITH_LOW_SCORE)
# show
cv2.imshow('frame', res_img)
frame_shown = True
# save results
if writer is not None:
writer.write(res_img.astype(np.uint8))
capture.release()
cv2.destroyAllWindows()
if writer is not None:
writer.release()
logger.info('Script finished successfully.')
def main():
# model files check and download
logger.info('Checking detect_landmarks model...')
check_and_download_models(WEIGHT_LANDMARK_PATH, MODEL_LANDMARK_PATH, REMOTE_PATH)
dic_model = {
'yolov3': (WEIGHT_YOLOV3_PATH, MODEL_YOLOV3_PATH),
'faster-rcnn': (WEIGHT_FASTERRCNN_PATH, MODEL_FASTERRCNN_PATH),
}
weight_path, model_path = dic_model[args.detector]
logger.info('Check face_detector model...')
check_and_download_models(
weight_path, model_path, REMOTE_PATH
)
env_id = args.env_id
# initialize
face_detector = ailia.Net(model_path, weight_path, env_id=env_id)
landmark_detector = ailia.Net(
MODEL_LANDMARK_PATH, WEIGHT_LANDMARK_PATH, env_id=env_id)
if args.video is not None:
recognize_from_video(landmark_detector, face_detector)
else:
recognize_from_image(landmark_detector, face_detector)
if __name__ == '__main__':
| main() | conditional_block | |
anime-face-detector.py | import sys
import time
import numpy as np
import cv2
from PIL import Image
import ailia
# import original modules
sys.path.append('../../util')
from arg_utils import get_base_parser, update_parser, get_savepath # noqa: E402
from model_utils import check_and_download_models # noqa: E402
from detector_utils import load_image # noqa: E402C
from image_utils import normalize_image # noqa: E402C
from webcamera_utils import get_capture, get_writer # noqa: E402
# logger
from logging import getLogger # noqa: E402
from post_transforms_utils import flip_back, get_affine_transform
from top_down_utils import keypoints_from_heatmaps
from visual_utils import visualize
logger = getLogger(__name__)
# ======================
# Parameters
# ======================
WEIGHT_YOLOV3_PATH = 'anime-face_yolov3.onnx'
MODEL_YOLOV3_PATH = 'anime-face_yolov3.onnx.prototxt'
WEIGHT_FASTERRCNN_PATH = 'anime-face_faster-rcnn.onnx'
MODEL_FASTERRCNN_PATH = 'anime-face_faster-rcnn.onnx.prototxt'
WEIGHT_LANDMARK_PATH = 'anime-face_hrnetv2.onnx'
MODEL_LANDMARK_PATH = 'anime-face_hrnetv2.onnx.prototxt'
REMOTE_PATH = \
'https://storage.googleapis.com/ailia-models/anime-face-detector/'
IMAGE_PATH = 'input.jpg'
SAVE_IMAGE_PATH = 'output.png'
IMAGE_YOLOV3_HEIGHT = IMAGE_YOLOV3_WIDTH = 608
IMAGE_FASTERRCNN_HEIGHT = 800
IMAGE_FASTERRCNN_WIDTH = 1333
LANDMARK_SCORE_THRESHOLD = 0.3
SHOW_BOX_SCORE = True
DRAW_CONTOUR = True
SKIP_CONTOUR_WITH_LOW_SCORE = True
# ======================
# Arguemnt Parser Config
# ======================
parser = get_base_parser(
'Anime Face Detector', IMAGE_PATH, SAVE_IMAGE_PATH
)
parser.add_argument(
'-d', '--detector', default='yolov3', choices=('yolov3', 'faster-rcnn'),
help='face detector model.'
)
args = update_parser(parser)
# ======================
# Secondaty Functions
# ======================
def update_pred_box(pred_boxes):
box_scale_factor = 1.1
boxes = []
for pred_box in pred_boxes:
box = pred_box[:4]
size = box[2:] - box[:2] + 1
new_size = size * box_scale_factor
center = (box[:2] + box[2:]) / 2
tl = center - new_size / 2
br = tl + new_size
pred_box[:4] = np.concatenate([tl, br])
boxes.append(pred_box)
return boxes
def xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] + 1
bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] + 1
return bbox_xywh
# ======================
# Main functions
# ======================
def preprocess(img, resize_shape):
h, w = resize_shape
im_h, im_w, _ = img.shape
# adaptive_resize
scale = min(h / im_h, w / im_w)
ow, oh = int(im_w * scale), int(im_h * scale)
if ow != im_w or oh != im_h:
_img = cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR)
img = np.zeros((h, w, 3), dtype=np.uint8)
ph, pw = (h - oh) // 2, (w - ow) // 2
img[ph: ph + oh, pw: pw + ow] = _img
else:
ph = pw = 0
img = img[:, :, ::-1] # GBR -> RGB
img = img / 255
img = img.transpose(2, 0, 1) # HWC -> CHW
img = np.expand_dims(img, axis=0)
img = img.astype(np.float32)
# return img
return img, (ph, pw), (oh, ow)
def | (box):
"""This encodes bbox(x,y,w,h) into (center, scale)
Args:
x, y, w, h
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32](2,): Center of the bbox (x, y).
- np.ndarray[float32](2,): Scale of the bbox w & h.
"""
input_size = (256, 256)
x, y, w, h = box[:4]
aspect_ratio = input_size[0] / input_size[1]
center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
# pixel std is 200.0
scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)
scale = scale * 1.25
return center, scale
def detect_faces(img, face_detector):
shape = (IMAGE_YOLOV3_HEIGHT, IMAGE_YOLOV3_WIDTH) \
if args.detector == "yolov3" \
else (IMAGE_FASTERRCNN_HEIGHT, IMAGE_FASTERRCNN_WIDTH)
im_h, im_w = img.shape[:2]
img, pad_hw, resized_hw = preprocess(img, shape)
# feedforward
output = face_detector.predict([img])
boxes, _ = output
boxes = boxes[0]
boxes = boxes[boxes[:, 4] > 0]
pad_x = pad_hw[1]
pad_y = pad_hw[0]
resized_x = resized_hw[1]
resized_y = resized_hw[0]
boxes[:, [0, 2]] = boxes[:, [0, 2]] - pad_x
boxes[:, [1, 3]] = boxes[:, [1, 3]] - pad_y
boxes[:, [0, 2]] = boxes[:, [0, 2]] * im_w / resized_x
boxes[:, [1, 3]] = boxes[:, [1, 3]] * im_h / resized_y
# scale boxes
boxes = update_pred_box(boxes)
return boxes
def keypoint_decode(output, img_metas):
"""Decode keypoints from heatmaps.
Args:
img_metas (list(dict)): Information about data augmentation
By default this includes:
- "image_file: path to the image file
- "center": center of the bbox
- "scale": scale of the bbox
- "rotation": rotation of the bbox
- "bbox_score": score of bbox
output (np.ndarray[N, K, H, W]): model predicted heatmaps.
"""
batch_size = len(output)
c = np.zeros((batch_size, 2), dtype=np.float32)
s = np.zeros((batch_size, 2), dtype=np.float32)
for i in range(batch_size):
c[i, :] = img_metas[i]['center']
s[i, :] = img_metas[i]['scale']
preds, maxvals = keypoints_from_heatmaps(
output, c, s,
kernel=11)
all_preds = np.zeros((batch_size, preds.shape[1], 3), dtype=np.float32)
all_preds[:, :, 0:2] = preds[:, :, 0:2]
all_preds[:, :, 2:3] = maxvals
return all_preds
def predict(landmark_detector, face_detector, img):
if face_detector is not None:
bboxes = detect_faces(img, face_detector)
else:
h, w = img.shape[:2]
bboxes = [np.array([0, 0, w - 1, h - 1, 1])]
bboxes = np.array(bboxes)
pose_results = []
if len(bboxes) == 0:
return pose_results, bboxes
bboxes_xywh = xyxy2xywh(bboxes)
img_size = (256, 256)
batch_data = []
img_metas = []
for bbox in bboxes_xywh:
c, s = box2cs(bbox)
r = 0
img_metas.append({
"center": c,
"scale": s,
})
trans = get_affine_transform(c, s, r, img_size)
_img = cv2.warpAffine(
img,
trans, (img_size[0], img_size[1]),
flags=cv2.INTER_LINEAR)
_img = normalize_image(_img[:, :, ::-1], 'ImageNet')
batch_data.append(_img)
batch_data = np.asarray(batch_data)
batch_data = batch_data.transpose((0, 3, 1, 2))
output = landmark_detector.predict([batch_data])
heatmap = output[0]
if 1: # do flip
batch_data = batch_data[:, :, :, ::-1] # horizontal flip
output = landmark_detector.predict([batch_data])
flipped_heatmap = output[0]
flip_pairs = [
[0, 4], [1, 3], [5, 10], [6, 9],
[7, 8], [11, 19], [12, 18], [13, 17],
[14, 22], [15, 21], [16, 20], [24, 26]]
flipped_heatmap = flip_back(
flipped_heatmap,
flip_pairs)
# feature is not aligned, shift flipped heatmap for higher accuracy
flipped_heatmap[:, :, :, 1:] = flipped_heatmap[:, :, :, :-1]
heatmap = (heatmap + flipped_heatmap) * 0.5
keypoint_result = keypoint_decode(heatmap, img_metas)
return keypoint_result, bboxes
def recognize_from_image(landmark_detector, face_detector):
# input image loop
for image_path in args.input:
logger.info(image_path)
# prepare input data
img = load_image(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
# inference
logger.info('Start inference...')
if args.benchmark:
logger.info('BENCHMARK mode')
total_time_estimation = 0
for i in range(args.benchmark_count):
start = int(round(time.time() * 1000))
preds = predict(landmark_detector, face_detector, img)
end = int(round(time.time() * 1000))
estimation_time = (end - start)
# Loggin
logger.info(f'\tailia processing estimation time {estimation_time} ms')
if i != 0:
total_time_estimation = total_time_estimation + estimation_time
logger.info(f'\taverage time estimation {total_time_estimation / (args.benchmark_count - 1)} ms')
else:
keypoints, bboxes = predict(landmark_detector, face_detector, img)
res_img = visualize(
img, keypoints, bboxes,
LANDMARK_SCORE_THRESHOLD,
SHOW_BOX_SCORE, DRAW_CONTOUR,
SKIP_CONTOUR_WITH_LOW_SCORE)
# plot result
savepath = get_savepath(args.savepath, image_path, ext='.png')
logger.info(f'saved at : {savepath}')
cv2.imwrite(savepath, res_img)
logger.info('Script finished successfully.')
def recognize_from_video(landmark_detector, face_detector):
video_file = args.video if args.video else args.input[0]
capture = get_capture(video_file)
assert capture.isOpened(), 'Cannot capture source'
# create video writer if savepath is specified as video format
f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
if args.savepath != SAVE_IMAGE_PATH:
logger.warning(
'currently, video results cannot be output correctly...'
)
writer = get_writer(args.savepath, f_h, f_w)
else:
writer = None
frame_shown = False
while True:
ret, frame = capture.read()
if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret:
break
if frame_shown and cv2.getWindowProperty('frame', cv2.WND_PROP_VISIBLE) == 0:
break
# inference
keypoints, bboxes = predict(landmark_detector, face_detector, frame)
# plot result
res_img = visualize(
frame, keypoints, bboxes,
LANDMARK_SCORE_THRESHOLD,
SHOW_BOX_SCORE, DRAW_CONTOUR,
SKIP_CONTOUR_WITH_LOW_SCORE)
# show
cv2.imshow('frame', res_img)
frame_shown = True
# save results
if writer is not None:
writer.write(res_img.astype(np.uint8))
capture.release()
cv2.destroyAllWindows()
if writer is not None:
writer.release()
logger.info('Script finished successfully.')
def main():
# model files check and download
logger.info('Checking detect_landmarks model...')
check_and_download_models(WEIGHT_LANDMARK_PATH, MODEL_LANDMARK_PATH, REMOTE_PATH)
dic_model = {
'yolov3': (WEIGHT_YOLOV3_PATH, MODEL_YOLOV3_PATH),
'faster-rcnn': (WEIGHT_FASTERRCNN_PATH, MODEL_FASTERRCNN_PATH),
}
weight_path, model_path = dic_model[args.detector]
logger.info('Check face_detector model...')
check_and_download_models(
weight_path, model_path, REMOTE_PATH
)
env_id = args.env_id
# initialize
face_detector = ailia.Net(model_path, weight_path, env_id=env_id)
landmark_detector = ailia.Net(
MODEL_LANDMARK_PATH, WEIGHT_LANDMARK_PATH, env_id=env_id)
if args.video is not None:
recognize_from_video(landmark_detector, face_detector)
else:
recognize_from_image(landmark_detector, face_detector)
if __name__ == '__main__':
main()
| box2cs | identifier_name |
anime-face-detector.py | import sys
import time
import numpy as np
import cv2
from PIL import Image
import ailia
# import original modules
sys.path.append('../../util')
from arg_utils import get_base_parser, update_parser, get_savepath # noqa: E402
from model_utils import check_and_download_models # noqa: E402
from detector_utils import load_image # noqa: E402C
from image_utils import normalize_image # noqa: E402C
from webcamera_utils import get_capture, get_writer # noqa: E402
# logger
from logging import getLogger # noqa: E402
from post_transforms_utils import flip_back, get_affine_transform
from top_down_utils import keypoints_from_heatmaps
from visual_utils import visualize
logger = getLogger(__name__)
# ======================
# Parameters
# ======================
WEIGHT_YOLOV3_PATH = 'anime-face_yolov3.onnx'
MODEL_YOLOV3_PATH = 'anime-face_yolov3.onnx.prototxt'
WEIGHT_FASTERRCNN_PATH = 'anime-face_faster-rcnn.onnx'
MODEL_FASTERRCNN_PATH = 'anime-face_faster-rcnn.onnx.prototxt'
WEIGHT_LANDMARK_PATH = 'anime-face_hrnetv2.onnx'
MODEL_LANDMARK_PATH = 'anime-face_hrnetv2.onnx.prototxt'
REMOTE_PATH = \
'https://storage.googleapis.com/ailia-models/anime-face-detector/'
IMAGE_PATH = 'input.jpg'
SAVE_IMAGE_PATH = 'output.png'
IMAGE_YOLOV3_HEIGHT = IMAGE_YOLOV3_WIDTH = 608
IMAGE_FASTERRCNN_HEIGHT = 800
IMAGE_FASTERRCNN_WIDTH = 1333
LANDMARK_SCORE_THRESHOLD = 0.3
SHOW_BOX_SCORE = True
DRAW_CONTOUR = True
SKIP_CONTOUR_WITH_LOW_SCORE = True
# ======================
# Arguemnt Parser Config
# ======================
parser = get_base_parser(
'Anime Face Detector', IMAGE_PATH, SAVE_IMAGE_PATH
)
parser.add_argument(
'-d', '--detector', default='yolov3', choices=('yolov3', 'faster-rcnn'),
help='face detector model.'
)
args = update_parser(parser)
# ======================
# Secondaty Functions
# ======================
def update_pred_box(pred_boxes):
box_scale_factor = 1.1
boxes = []
for pred_box in pred_boxes:
box = pred_box[:4]
size = box[2:] - box[:2] + 1
new_size = size * box_scale_factor
center = (box[:2] + box[2:]) / 2
tl = center - new_size / 2
br = tl + new_size
pred_box[:4] = np.concatenate([tl, br])
boxes.append(pred_box)
return boxes
|
def xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] + 1
bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] + 1
return bbox_xywh
# ======================
# Main functions
# ======================
def preprocess(img, resize_shape):
h, w = resize_shape
im_h, im_w, _ = img.shape
# adaptive_resize
scale = min(h / im_h, w / im_w)
ow, oh = int(im_w * scale), int(im_h * scale)
if ow != im_w or oh != im_h:
_img = cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR)
img = np.zeros((h, w, 3), dtype=np.uint8)
ph, pw = (h - oh) // 2, (w - ow) // 2
img[ph: ph + oh, pw: pw + ow] = _img
else:
ph = pw = 0
img = img[:, :, ::-1] # GBR -> RGB
img = img / 255
img = img.transpose(2, 0, 1) # HWC -> CHW
img = np.expand_dims(img, axis=0)
img = img.astype(np.float32)
# return img
return img, (ph, pw), (oh, ow)
def box2cs(box):
"""This encodes bbox(x,y,w,h) into (center, scale)
Args:
x, y, w, h
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32](2,): Center of the bbox (x, y).
- np.ndarray[float32](2,): Scale of the bbox w & h.
"""
input_size = (256, 256)
x, y, w, h = box[:4]
aspect_ratio = input_size[0] / input_size[1]
center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
# pixel std is 200.0
scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)
scale = scale * 1.25
return center, scale
def detect_faces(img, face_detector):
shape = (IMAGE_YOLOV3_HEIGHT, IMAGE_YOLOV3_WIDTH) \
if args.detector == "yolov3" \
else (IMAGE_FASTERRCNN_HEIGHT, IMAGE_FASTERRCNN_WIDTH)
im_h, im_w = img.shape[:2]
img, pad_hw, resized_hw = preprocess(img, shape)
# feedforward
output = face_detector.predict([img])
boxes, _ = output
boxes = boxes[0]
boxes = boxes[boxes[:, 4] > 0]
pad_x = pad_hw[1]
pad_y = pad_hw[0]
resized_x = resized_hw[1]
resized_y = resized_hw[0]
boxes[:, [0, 2]] = boxes[:, [0, 2]] - pad_x
boxes[:, [1, 3]] = boxes[:, [1, 3]] - pad_y
boxes[:, [0, 2]] = boxes[:, [0, 2]] * im_w / resized_x
boxes[:, [1, 3]] = boxes[:, [1, 3]] * im_h / resized_y
# scale boxes
boxes = update_pred_box(boxes)
return boxes
def keypoint_decode(output, img_metas):
"""Decode keypoints from heatmaps.
Args:
img_metas (list(dict)): Information about data augmentation
By default this includes:
- "image_file: path to the image file
- "center": center of the bbox
- "scale": scale of the bbox
- "rotation": rotation of the bbox
- "bbox_score": score of bbox
output (np.ndarray[N, K, H, W]): model predicted heatmaps.
"""
batch_size = len(output)
c = np.zeros((batch_size, 2), dtype=np.float32)
s = np.zeros((batch_size, 2), dtype=np.float32)
for i in range(batch_size):
c[i, :] = img_metas[i]['center']
s[i, :] = img_metas[i]['scale']
preds, maxvals = keypoints_from_heatmaps(
output, c, s,
kernel=11)
all_preds = np.zeros((batch_size, preds.shape[1], 3), dtype=np.float32)
all_preds[:, :, 0:2] = preds[:, :, 0:2]
all_preds[:, :, 2:3] = maxvals
return all_preds
def predict(landmark_detector, face_detector, img):
if face_detector is not None:
bboxes = detect_faces(img, face_detector)
else:
h, w = img.shape[:2]
bboxes = [np.array([0, 0, w - 1, h - 1, 1])]
bboxes = np.array(bboxes)
pose_results = []
if len(bboxes) == 0:
return pose_results, bboxes
bboxes_xywh = xyxy2xywh(bboxes)
img_size = (256, 256)
batch_data = []
img_metas = []
for bbox in bboxes_xywh:
c, s = box2cs(bbox)
r = 0
img_metas.append({
"center": c,
"scale": s,
})
trans = get_affine_transform(c, s, r, img_size)
_img = cv2.warpAffine(
img,
trans, (img_size[0], img_size[1]),
flags=cv2.INTER_LINEAR)
_img = normalize_image(_img[:, :, ::-1], 'ImageNet')
batch_data.append(_img)
batch_data = np.asarray(batch_data)
batch_data = batch_data.transpose((0, 3, 1, 2))
output = landmark_detector.predict([batch_data])
heatmap = output[0]
if 1: # do flip
batch_data = batch_data[:, :, :, ::-1] # horizontal flip
output = landmark_detector.predict([batch_data])
flipped_heatmap = output[0]
flip_pairs = [
[0, 4], [1, 3], [5, 10], [6, 9],
[7, 8], [11, 19], [12, 18], [13, 17],
[14, 22], [15, 21], [16, 20], [24, 26]]
flipped_heatmap = flip_back(
flipped_heatmap,
flip_pairs)
# feature is not aligned, shift flipped heatmap for higher accuracy
flipped_heatmap[:, :, :, 1:] = flipped_heatmap[:, :, :, :-1]
heatmap = (heatmap + flipped_heatmap) * 0.5
keypoint_result = keypoint_decode(heatmap, img_metas)
return keypoint_result, bboxes
def recognize_from_image(landmark_detector, face_detector):
# input image loop
for image_path in args.input:
logger.info(image_path)
# prepare input data
img = load_image(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
# inference
logger.info('Start inference...')
if args.benchmark:
logger.info('BENCHMARK mode')
total_time_estimation = 0
for i in range(args.benchmark_count):
start = int(round(time.time() * 1000))
preds = predict(landmark_detector, face_detector, img)
end = int(round(time.time() * 1000))
estimation_time = (end - start)
# Loggin
logger.info(f'\tailia processing estimation time {estimation_time} ms')
if i != 0:
total_time_estimation = total_time_estimation + estimation_time
logger.info(f'\taverage time estimation {total_time_estimation / (args.benchmark_count - 1)} ms')
else:
keypoints, bboxes = predict(landmark_detector, face_detector, img)
res_img = visualize(
img, keypoints, bboxes,
LANDMARK_SCORE_THRESHOLD,
SHOW_BOX_SCORE, DRAW_CONTOUR,
SKIP_CONTOUR_WITH_LOW_SCORE)
# plot result
savepath = get_savepath(args.savepath, image_path, ext='.png')
logger.info(f'saved at : {savepath}')
cv2.imwrite(savepath, res_img)
logger.info('Script finished successfully.')
def recognize_from_video(landmark_detector, face_detector):
video_file = args.video if args.video else args.input[0]
capture = get_capture(video_file)
assert capture.isOpened(), 'Cannot capture source'
# create video writer if savepath is specified as video format
f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
if args.savepath != SAVE_IMAGE_PATH:
logger.warning(
'currently, video results cannot be output correctly...'
)
writer = get_writer(args.savepath, f_h, f_w)
else:
writer = None
frame_shown = False
while True:
ret, frame = capture.read()
if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret:
break
if frame_shown and cv2.getWindowProperty('frame', cv2.WND_PROP_VISIBLE) == 0:
break
# inference
keypoints, bboxes = predict(landmark_detector, face_detector, frame)
# plot result
res_img = visualize(
frame, keypoints, bboxes,
LANDMARK_SCORE_THRESHOLD,
SHOW_BOX_SCORE, DRAW_CONTOUR,
SKIP_CONTOUR_WITH_LOW_SCORE)
# show
cv2.imshow('frame', res_img)
frame_shown = True
# save results
if writer is not None:
writer.write(res_img.astype(np.uint8))
capture.release()
cv2.destroyAllWindows()
if writer is not None:
writer.release()
logger.info('Script finished successfully.')
def main():
# model files check and download
logger.info('Checking detect_landmarks model...')
check_and_download_models(WEIGHT_LANDMARK_PATH, MODEL_LANDMARK_PATH, REMOTE_PATH)
dic_model = {
'yolov3': (WEIGHT_YOLOV3_PATH, MODEL_YOLOV3_PATH),
'faster-rcnn': (WEIGHT_FASTERRCNN_PATH, MODEL_FASTERRCNN_PATH),
}
weight_path, model_path = dic_model[args.detector]
logger.info('Check face_detector model...')
check_and_download_models(
weight_path, model_path, REMOTE_PATH
)
env_id = args.env_id
# initialize
face_detector = ailia.Net(model_path, weight_path, env_id=env_id)
landmark_detector = ailia.Net(
MODEL_LANDMARK_PATH, WEIGHT_LANDMARK_PATH, env_id=env_id)
if args.video is not None:
recognize_from_video(landmark_detector, face_detector)
else:
recognize_from_image(landmark_detector, face_detector)
if __name__ == '__main__':
main() | random_line_split | |
main.go | package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/allan-simon/go-singleinstance"
"github.com/dlasky/gotk3-layershell/layershell"
"github.com/gotk3/gotk3/gdk"
"github.com/gotk3/gotk3/glib"
"github.com/gotk3/gotk3/gtk"
)
const version = "0.1.10"
var (
appDirs []string
configDirectory string
pinnedFile string
pinned []string
src glib.SourceHandle
id2entry map[string]desktopEntry
preferredApps map[string]interface{}
)
var categoryNames = [...]string{
"utility",
"development",
"game",
"graphics",
"internet-and-network",
"office",
"audio-video",
"system-tools",
"other",
}
type category struct {
Name string
DisplayName string
Icon string
}
var categories []category
type desktopEntry struct {
DesktopID string
Name string
NameLoc string
Comment string
CommentLoc string
Icon string
Exec string
Category string
Terminal bool
NoDisplay bool
}
// slices below will hold DesktopID strings
var (
listUtility []string
listDevelopment []string
listGame []string
listGraphics []string
listInternetAndNetwork []string
listOffice []string
listAudioVideo []string
listSystemTools []string
listOther []string
)
var desktopEntries []desktopEntry
// UI elements
var (
resultWindow *gtk.ScrolledWindow
fileSearchResults []string
searchEntry *gtk.SearchEntry
phrase string
fileSearchResultFlowBox *gtk.FlowBox
userDirsMap map[string]string
appFlowBox *gtk.FlowBox
appSearchResultWrapper *gtk.Box
fileSearchResultWrapper *gtk.Box
pinnedFlowBox *gtk.FlowBox
pinnedFlowBoxWrapper *gtk.Box
categoriesWrapper *gtk.Box
catButtons []*gtk.Button
statusLabel *gtk.Label
status string
ignore string
)
func defaultStringIfBlank(s, fallback string) string {
s = strings.TrimSpace(s)
if s == "" {
return fallback
}
return s
}
// Flags
var cssFileName = flag.String("s", "drawer.css", "Styling: css file name")
var targetOutput = flag.String("o", "", "name of the Output to display the drawer on (sway only)")
var displayVersion = flag.Bool("v", false, "display Version information")
var overlay = flag.Bool("ovl", false, "use OVerLay layer")
var iconSize = flag.Int("is", 64, "Icon Size")
var fsColumns = flag.Uint("fscol", 2, "File Search result COLumns")
var columnsNumber = flag.Uint("c", 6, "number of Columns")
var itemSpacing = flag.Uint("spacing", 20, "icon spacing")
var lang = flag.String("lang", "", "force lang, e.g. \"en\", \"pl\"")
var fileManager = flag.String("fm", "thunar", "File Manager")
var term = flag.String("term", defaultStringIfBlank(os.Getenv("TERM"), "alacritty"), "Terminal emulator")
var nameLimit = flag.Int("fslen", 80, "File Search name length Limit")
var noCats = flag.Bool("nocats", false, "Disable filtering by category")
var noFS = flag.Bool("nofs", false, "Disable file search")
func | () {
timeStart := time.Now()
flag.Parse()
if *displayVersion {
fmt.Printf("nwg-drawer version %s\n", version)
os.Exit(0)
}
// Gentle SIGTERM handler thanks to reiki4040 https://gist.github.com/reiki4040/be3705f307d3cd136e85
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
go func() {
for {
s := <-signalChan
if s == syscall.SIGTERM {
println("SIGTERM received, bye bye!")
gtk.MainQuit()
}
}
}()
// We want the same key/mouse binding to turn the dock off: kill the running instance and exit.
lockFilePath := fmt.Sprintf("%s/nwg-drawer.lock", tempDir())
lockFile, err := singleinstance.CreateLockFile(lockFilePath)
if err != nil {
pid, err := readTextFile(lockFilePath)
if err == nil {
i, err := strconv.Atoi(pid)
if err == nil {
println("Running instance found, sending SIGTERM and exiting...")
syscall.Kill(i, syscall.SIGTERM)
}
}
os.Exit(0)
}
defer lockFile.Close()
// LANGUAGE
if *lang == "" && os.Getenv("LANG") != "" {
*lang = strings.Split(os.Getenv("LANG"), ".")[0]
}
println(fmt.Sprintf("lang: %s", *lang))
// ENVIRONMENT
configDirectory = configDir()
if !pathExists(filepath.Join(configDirectory, "drawer.css")) {
copyFile(filepath.Join(getDataHome(), "nwg-drawer/drawer.css"), filepath.Join(configDirectory, "drawer.css"))
}
cacheDirectory := cacheDir()
if cacheDirectory == "" {
log.Panic("Couldn't determine cache directory location")
}
// DATA
pinnedFile = filepath.Join(cacheDirectory, "nwg-pin-cache")
pinned, err = loadTextFile(pinnedFile)
if err != nil {
pinned = nil
}
println(fmt.Sprintf("Found %v pinned items", len(pinned)))
cssFile := filepath.Join(configDirectory, *cssFileName)
appDirs = getAppDirs()
setUpCategories()
desktopFiles := listDesktopFiles()
println(fmt.Sprintf("Found %v desktop files", len(desktopFiles)))
status = parseDesktopFiles(desktopFiles)
// For opening files we use xdg-open. As its configuration is PITA, we may override some associations
// in the ~/.config/nwg-panel/preferred-apps.json file.
paFile := filepath.Join(configDirectory, "preferred-apps.json")
preferredApps, err = loadPreferredApps(paFile)
if err != nil {
println(fmt.Sprintf("Custom associations file %s not found or invalid", paFile))
} else {
println(fmt.Sprintf("Found %v associations in %s", len(preferredApps), paFile))
}
// USER INTERFACE
gtk.Init(nil)
cssProvider, _ := gtk.CssProviderNew()
err = cssProvider.LoadFromPath(cssFile)
if err != nil {
println(fmt.Sprintf("ERROR: %s css file not found or erroneous. Using GTK styling.", cssFile))
println(fmt.Sprintf("%s", err))
} else {
println(fmt.Sprintf("Using style from %s", cssFile))
screen, _ := gdk.ScreenGetDefault()
gtk.AddProviderForScreen(screen, cssProvider, gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
}
win, err := gtk.WindowNew(gtk.WINDOW_TOPLEVEL)
if err != nil {
log.Fatal("Unable to create window:", err)
}
if wayland() {
layershell.InitForWindow(win)
var output2mon map[string]*gdk.Monitor
if *targetOutput != "" {
// We want to assign layershell to a monitor, but we only know the output name!
output2mon, err = mapOutputs()
if err == nil {
monitor := output2mon[*targetOutput]
layershell.SetMonitor(win, monitor)
} else {
println(fmt.Sprintf("%s", err))
}
}
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_BOTTOM, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_TOP, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_LEFT, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_RIGHT, true)
if *overlay {
layershell.SetLayer(win, layershell.LAYER_SHELL_LAYER_OVERLAY)
layershell.SetExclusiveZone(win, -1)
} else {
layershell.SetLayer(win, layershell.LAYER_SHELL_LAYER_TOP)
}
layershell.SetKeyboardMode(win, layershell.LAYER_SHELL_KEYBOARD_MODE_EXCLUSIVE)
}
win.Connect("destroy", func() {
gtk.MainQuit()
})
win.Connect("key-press-event", func(window *gtk.Window, event *gdk.Event) bool {
key := &gdk.EventKey{Event: event}
switch key.KeyVal() {
case gdk.KEY_Escape:
s, _ := searchEntry.GetText()
if s != "" {
searchEntry.GrabFocus()
searchEntry.SetText("")
} else {
gtk.MainQuit()
}
return false
case gdk.KEY_downarrow, gdk.KEY_Up, gdk.KEY_Down, gdk.KEY_Left, gdk.KEY_Right, gdk.KEY_Tab,
gdk.KEY_Return, gdk.KEY_Page_Up, gdk.KEY_Page_Down, gdk.KEY_Home, gdk.KEY_End:
return false
default:
if !searchEntry.IsFocus() {
searchEntry.GrabFocusWithoutSelecting()
}
return false
}
})
// Close the window on leave, but not immediately, to avoid accidental closes
win.Connect("leave-notify-event", func() {
src = glib.TimeoutAdd(uint(500), func() bool {
gtk.MainQuit()
return false
})
})
win.Connect("enter-notify-event", func() {
cancelClose()
})
/*
In case someone REALLY needed to use X11 - for some stupid Zoom meeting or something, this allows
the drawer to behave properly on Openbox, and possibly somewhere else. For sure not on i3.
This feature is not really supported and will stay undocumented.
*/
if !wayland() {
println("Not Wayland, oh really?")
win.SetDecorated(false)
win.Maximize()
}
// Set up UI
outerVBox, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
win.Add(outerVBox)
searchBoxWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(searchBoxWrapper, false, false, 10)
searchEntry = setUpSearchEntry()
searchEntry.SetMaxWidthChars(30)
searchBoxWrapper.PackStart(searchEntry, true, false, 0)
if !*noCats {
categoriesWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
categoriesButtonBox := setUpCategoriesButtonBox()
categoriesWrapper.PackStart(categoriesButtonBox, true, false, 0)
outerVBox.PackStart(categoriesWrapper, false, false, 0)
}
pinnedWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(pinnedWrapper, false, false, 0)
pinnedFlowBoxWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(pinnedFlowBoxWrapper, false, false, 0)
pinnedFlowBox = setUpPinnedFlowBox()
resultWindow, _ = gtk.ScrolledWindowNew(nil, nil)
resultWindow.SetEvents(int(gdk.ALL_EVENTS_MASK))
resultWindow.SetPolicy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
resultWindow.Connect("enter-notify-event", func() {
cancelClose()
})
resultWindow.Connect("button-release-event", func(sw *gtk.ScrolledWindow, e *gdk.Event) bool {
btnEvent := gdk.EventButtonNewFromEvent(e)
if btnEvent.Button() == 1 || btnEvent.Button() == 3 {
gtk.MainQuit()
return true
}
return false
})
outerVBox.PackStart(resultWindow, true, true, 10)
resultsWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultWindow.Add(resultsWrapper)
appSearchResultWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultsWrapper.PackStart(appSearchResultWrapper, false, false, 0)
appFlowBox = setUpAppsFlowBox(nil, "")
// Focus 1st pinned item if any, otherwise focus 1st found app icon
var button gtk.IWidget
if pinnedFlowBox.GetChildren().Length() > 0 {
button, err = pinnedFlowBox.GetChildAtIndex(0).GetChild()
} else {
button, err = appFlowBox.GetChildAtIndex(0).GetChild()
}
if err == nil {
button.ToWidget().GrabFocus()
}
userDirsMap = mapXdgUserDirs()
placeholder, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultsWrapper.PackStart(placeholder, true, true, 0)
placeholder.SetSizeRequest(20, 20)
if !*noFS {
wrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
fileSearchResultWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
fileSearchResultWrapper.SetProperty("name", "files-box")
wrapper.PackStart(fileSearchResultWrapper, true, false, 0)
resultsWrapper.PackEnd(wrapper, false, false, 10)
}
statusLineWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(statusLineWrapper, false, false, 10)
statusLabel, _ = gtk.LabelNew(status)
statusLineWrapper.PackStart(statusLabel, true, false, 0)
win.ShowAll()
if !*noFS {
fileSearchResultWrapper.SetSizeRequest(appFlowBox.GetAllocatedWidth(), 1)
fileSearchResultWrapper.Hide()
}
if !*noCats {
categoriesWrapper.SetSizeRequest(1, categoriesWrapper.GetAllocatedHeight()*2)
}
t := time.Now()
println(fmt.Sprintf("UI created in %v ms. Thank you for your patience.", t.Sub(timeStart).Milliseconds()))
gtk.Main()
}
| main | identifier_name |
main.go | package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/allan-simon/go-singleinstance"
"github.com/dlasky/gotk3-layershell/layershell"
"github.com/gotk3/gotk3/gdk"
"github.com/gotk3/gotk3/glib"
"github.com/gotk3/gotk3/gtk"
)
const version = "0.1.10"
var (
appDirs []string
configDirectory string
pinnedFile string
pinned []string
src glib.SourceHandle
id2entry map[string]desktopEntry
preferredApps map[string]interface{}
)
var categoryNames = [...]string{
"utility",
"development",
"game",
"graphics",
"internet-and-network",
"office",
"audio-video",
"system-tools",
"other",
}
type category struct {
Name string
DisplayName string
Icon string
}
var categories []category
type desktopEntry struct {
DesktopID string
Name string
NameLoc string
Comment string
CommentLoc string
Icon string
Exec string
Category string
Terminal bool
NoDisplay bool
}
// slices below will hold DesktopID strings
var (
listUtility []string
listDevelopment []string
listGame []string
listGraphics []string
listInternetAndNetwork []string
listOffice []string
listAudioVideo []string
listSystemTools []string
listOther []string
)
var desktopEntries []desktopEntry
// UI elements
var (
resultWindow *gtk.ScrolledWindow
fileSearchResults []string
searchEntry *gtk.SearchEntry
phrase string
fileSearchResultFlowBox *gtk.FlowBox
userDirsMap map[string]string
appFlowBox *gtk.FlowBox
appSearchResultWrapper *gtk.Box
fileSearchResultWrapper *gtk.Box
pinnedFlowBox *gtk.FlowBox
pinnedFlowBoxWrapper *gtk.Box
categoriesWrapper *gtk.Box
catButtons []*gtk.Button
statusLabel *gtk.Label
status string
ignore string
)
func defaultStringIfBlank(s, fallback string) string |
// Flags
var cssFileName = flag.String("s", "drawer.css", "Styling: css file name")
var targetOutput = flag.String("o", "", "name of the Output to display the drawer on (sway only)")
var displayVersion = flag.Bool("v", false, "display Version information")
var overlay = flag.Bool("ovl", false, "use OVerLay layer")
var iconSize = flag.Int("is", 64, "Icon Size")
var fsColumns = flag.Uint("fscol", 2, "File Search result COLumns")
var columnsNumber = flag.Uint("c", 6, "number of Columns")
var itemSpacing = flag.Uint("spacing", 20, "icon spacing")
var lang = flag.String("lang", "", "force lang, e.g. \"en\", \"pl\"")
var fileManager = flag.String("fm", "thunar", "File Manager")
var term = flag.String("term", defaultStringIfBlank(os.Getenv("TERM"), "alacritty"), "Terminal emulator")
var nameLimit = flag.Int("fslen", 80, "File Search name length Limit")
var noCats = flag.Bool("nocats", false, "Disable filtering by category")
var noFS = flag.Bool("nofs", false, "Disable file search")
func main() {
timeStart := time.Now()
flag.Parse()
if *displayVersion {
fmt.Printf("nwg-drawer version %s\n", version)
os.Exit(0)
}
// Gentle SIGTERM handler thanks to reiki4040 https://gist.github.com/reiki4040/be3705f307d3cd136e85
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
go func() {
for {
s := <-signalChan
if s == syscall.SIGTERM {
println("SIGTERM received, bye bye!")
gtk.MainQuit()
}
}
}()
// We want the same key/mouse binding to turn the dock off: kill the running instance and exit.
lockFilePath := fmt.Sprintf("%s/nwg-drawer.lock", tempDir())
lockFile, err := singleinstance.CreateLockFile(lockFilePath)
if err != nil {
pid, err := readTextFile(lockFilePath)
if err == nil {
i, err := strconv.Atoi(pid)
if err == nil {
println("Running instance found, sending SIGTERM and exiting...")
syscall.Kill(i, syscall.SIGTERM)
}
}
os.Exit(0)
}
defer lockFile.Close()
// LANGUAGE
if *lang == "" && os.Getenv("LANG") != "" {
*lang = strings.Split(os.Getenv("LANG"), ".")[0]
}
println(fmt.Sprintf("lang: %s", *lang))
// ENVIRONMENT
configDirectory = configDir()
if !pathExists(filepath.Join(configDirectory, "drawer.css")) {
copyFile(filepath.Join(getDataHome(), "nwg-drawer/drawer.css"), filepath.Join(configDirectory, "drawer.css"))
}
cacheDirectory := cacheDir()
if cacheDirectory == "" {
log.Panic("Couldn't determine cache directory location")
}
// DATA
pinnedFile = filepath.Join(cacheDirectory, "nwg-pin-cache")
pinned, err = loadTextFile(pinnedFile)
if err != nil {
pinned = nil
}
println(fmt.Sprintf("Found %v pinned items", len(pinned)))
cssFile := filepath.Join(configDirectory, *cssFileName)
appDirs = getAppDirs()
setUpCategories()
desktopFiles := listDesktopFiles()
println(fmt.Sprintf("Found %v desktop files", len(desktopFiles)))
status = parseDesktopFiles(desktopFiles)
// For opening files we use xdg-open. As its configuration is PITA, we may override some associations
// in the ~/.config/nwg-panel/preferred-apps.json file.
paFile := filepath.Join(configDirectory, "preferred-apps.json")
preferredApps, err = loadPreferredApps(paFile)
if err != nil {
println(fmt.Sprintf("Custom associations file %s not found or invalid", paFile))
} else {
println(fmt.Sprintf("Found %v associations in %s", len(preferredApps), paFile))
}
// USER INTERFACE
gtk.Init(nil)
cssProvider, _ := gtk.CssProviderNew()
err = cssProvider.LoadFromPath(cssFile)
if err != nil {
println(fmt.Sprintf("ERROR: %s css file not found or erroneous. Using GTK styling.", cssFile))
println(fmt.Sprintf("%s", err))
} else {
println(fmt.Sprintf("Using style from %s", cssFile))
screen, _ := gdk.ScreenGetDefault()
gtk.AddProviderForScreen(screen, cssProvider, gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
}
win, err := gtk.WindowNew(gtk.WINDOW_TOPLEVEL)
if err != nil {
log.Fatal("Unable to create window:", err)
}
if wayland() {
layershell.InitForWindow(win)
var output2mon map[string]*gdk.Monitor
if *targetOutput != "" {
// We want to assign layershell to a monitor, but we only know the output name!
output2mon, err = mapOutputs()
if err == nil {
monitor := output2mon[*targetOutput]
layershell.SetMonitor(win, monitor)
} else {
println(fmt.Sprintf("%s", err))
}
}
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_BOTTOM, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_TOP, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_LEFT, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_RIGHT, true)
if *overlay {
layershell.SetLayer(win, layershell.LAYER_SHELL_LAYER_OVERLAY)
layershell.SetExclusiveZone(win, -1)
} else {
layershell.SetLayer(win, layershell.LAYER_SHELL_LAYER_TOP)
}
layershell.SetKeyboardMode(win, layershell.LAYER_SHELL_KEYBOARD_MODE_EXCLUSIVE)
}
win.Connect("destroy", func() {
gtk.MainQuit()
})
win.Connect("key-press-event", func(window *gtk.Window, event *gdk.Event) bool {
key := &gdk.EventKey{Event: event}
switch key.KeyVal() {
case gdk.KEY_Escape:
s, _ := searchEntry.GetText()
if s != "" {
searchEntry.GrabFocus()
searchEntry.SetText("")
} else {
gtk.MainQuit()
}
return false
case gdk.KEY_downarrow, gdk.KEY_Up, gdk.KEY_Down, gdk.KEY_Left, gdk.KEY_Right, gdk.KEY_Tab,
gdk.KEY_Return, gdk.KEY_Page_Up, gdk.KEY_Page_Down, gdk.KEY_Home, gdk.KEY_End:
return false
default:
if !searchEntry.IsFocus() {
searchEntry.GrabFocusWithoutSelecting()
}
return false
}
})
// Close the window on leave, but not immediately, to avoid accidental closes
win.Connect("leave-notify-event", func() {
src = glib.TimeoutAdd(uint(500), func() bool {
gtk.MainQuit()
return false
})
})
win.Connect("enter-notify-event", func() {
cancelClose()
})
/*
In case someone REALLY needed to use X11 - for some stupid Zoom meeting or something, this allows
the drawer to behave properly on Openbox, and possibly somewhere else. For sure not on i3.
This feature is not really supported and will stay undocumented.
*/
if !wayland() {
println("Not Wayland, oh really?")
win.SetDecorated(false)
win.Maximize()
}
// Set up UI
outerVBox, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
win.Add(outerVBox)
searchBoxWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(searchBoxWrapper, false, false, 10)
searchEntry = setUpSearchEntry()
searchEntry.SetMaxWidthChars(30)
searchBoxWrapper.PackStart(searchEntry, true, false, 0)
if !*noCats {
categoriesWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
categoriesButtonBox := setUpCategoriesButtonBox()
categoriesWrapper.PackStart(categoriesButtonBox, true, false, 0)
outerVBox.PackStart(categoriesWrapper, false, false, 0)
}
pinnedWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(pinnedWrapper, false, false, 0)
pinnedFlowBoxWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(pinnedFlowBoxWrapper, false, false, 0)
pinnedFlowBox = setUpPinnedFlowBox()
resultWindow, _ = gtk.ScrolledWindowNew(nil, nil)
resultWindow.SetEvents(int(gdk.ALL_EVENTS_MASK))
resultWindow.SetPolicy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
resultWindow.Connect("enter-notify-event", func() {
cancelClose()
})
resultWindow.Connect("button-release-event", func(sw *gtk.ScrolledWindow, e *gdk.Event) bool {
btnEvent := gdk.EventButtonNewFromEvent(e)
if btnEvent.Button() == 1 || btnEvent.Button() == 3 {
gtk.MainQuit()
return true
}
return false
})
outerVBox.PackStart(resultWindow, true, true, 10)
resultsWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultWindow.Add(resultsWrapper)
appSearchResultWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultsWrapper.PackStart(appSearchResultWrapper, false, false, 0)
appFlowBox = setUpAppsFlowBox(nil, "")
// Focus 1st pinned item if any, otherwise focus 1st found app icon
var button gtk.IWidget
if pinnedFlowBox.GetChildren().Length() > 0 {
button, err = pinnedFlowBox.GetChildAtIndex(0).GetChild()
} else {
button, err = appFlowBox.GetChildAtIndex(0).GetChild()
}
if err == nil {
button.ToWidget().GrabFocus()
}
userDirsMap = mapXdgUserDirs()
placeholder, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultsWrapper.PackStart(placeholder, true, true, 0)
placeholder.SetSizeRequest(20, 20)
if !*noFS {
wrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
fileSearchResultWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
fileSearchResultWrapper.SetProperty("name", "files-box")
wrapper.PackStart(fileSearchResultWrapper, true, false, 0)
resultsWrapper.PackEnd(wrapper, false, false, 10)
}
statusLineWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(statusLineWrapper, false, false, 10)
statusLabel, _ = gtk.LabelNew(status)
statusLineWrapper.PackStart(statusLabel, true, false, 0)
win.ShowAll()
if !*noFS {
fileSearchResultWrapper.SetSizeRequest(appFlowBox.GetAllocatedWidth(), 1)
fileSearchResultWrapper.Hide()
}
if !*noCats {
categoriesWrapper.SetSizeRequest(1, categoriesWrapper.GetAllocatedHeight()*2)
}
t := time.Now()
println(fmt.Sprintf("UI created in %v ms. Thank you for your patience.", t.Sub(timeStart).Milliseconds()))
gtk.Main()
}
| {
s = strings.TrimSpace(s)
if s == "" {
return fallback
}
return s
} | identifier_body |
main.go | package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/allan-simon/go-singleinstance"
"github.com/dlasky/gotk3-layershell/layershell"
"github.com/gotk3/gotk3/gdk"
"github.com/gotk3/gotk3/glib"
"github.com/gotk3/gotk3/gtk"
)
const version = "0.1.10"
var (
appDirs []string
configDirectory string
pinnedFile string
pinned []string
src glib.SourceHandle
id2entry map[string]desktopEntry
preferredApps map[string]interface{}
)
var categoryNames = [...]string{
"utility",
"development",
"game",
"graphics",
"internet-and-network",
"office",
"audio-video",
"system-tools",
"other",
}
type category struct {
Name string
DisplayName string
Icon string
}
var categories []category
type desktopEntry struct {
DesktopID string
Name string
NameLoc string
Comment string
CommentLoc string
Icon string
Exec string
Category string
Terminal bool
NoDisplay bool
}
// slices below will hold DesktopID strings
var (
listUtility []string
listDevelopment []string
listGame []string
listGraphics []string
listInternetAndNetwork []string
listOffice []string
listAudioVideo []string
listSystemTools []string
listOther []string
)
var desktopEntries []desktopEntry
// UI elements
var (
resultWindow *gtk.ScrolledWindow
fileSearchResults []string
searchEntry *gtk.SearchEntry
phrase string
fileSearchResultFlowBox *gtk.FlowBox
userDirsMap map[string]string
appFlowBox *gtk.FlowBox
appSearchResultWrapper *gtk.Box
fileSearchResultWrapper *gtk.Box
pinnedFlowBox *gtk.FlowBox
pinnedFlowBoxWrapper *gtk.Box
categoriesWrapper *gtk.Box
catButtons []*gtk.Button
statusLabel *gtk.Label
status string
ignore string
)
func defaultStringIfBlank(s, fallback string) string {
s = strings.TrimSpace(s)
if s == "" {
return fallback
}
return s
}
// Flags
var cssFileName = flag.String("s", "drawer.css", "Styling: css file name")
var targetOutput = flag.String("o", "", "name of the Output to display the drawer on (sway only)")
var displayVersion = flag.Bool("v", false, "display Version information")
var overlay = flag.Bool("ovl", false, "use OVerLay layer")
var iconSize = flag.Int("is", 64, "Icon Size")
var fsColumns = flag.Uint("fscol", 2, "File Search result COLumns")
var columnsNumber = flag.Uint("c", 6, "number of Columns")
var itemSpacing = flag.Uint("spacing", 20, "icon spacing")
var lang = flag.String("lang", "", "force lang, e.g. \"en\", \"pl\"")
var fileManager = flag.String("fm", "thunar", "File Manager")
var term = flag.String("term", defaultStringIfBlank(os.Getenv("TERM"), "alacritty"), "Terminal emulator")
var nameLimit = flag.Int("fslen", 80, "File Search name length Limit")
var noCats = flag.Bool("nocats", false, "Disable filtering by category")
var noFS = flag.Bool("nofs", false, "Disable file search")
func main() {
timeStart := time.Now()
flag.Parse()
if *displayVersion {
fmt.Printf("nwg-drawer version %s\n", version)
os.Exit(0)
}
// Gentle SIGTERM handler thanks to reiki4040 https://gist.github.com/reiki4040/be3705f307d3cd136e85
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
go func() {
for {
s := <-signalChan
if s == syscall.SIGTERM {
println("SIGTERM received, bye bye!")
gtk.MainQuit()
}
}
}()
// We want the same key/mouse binding to turn the dock off: kill the running instance and exit.
lockFilePath := fmt.Sprintf("%s/nwg-drawer.lock", tempDir())
lockFile, err := singleinstance.CreateLockFile(lockFilePath)
if err != nil {
pid, err := readTextFile(lockFilePath)
if err == nil {
i, err := strconv.Atoi(pid)
if err == nil {
println("Running instance found, sending SIGTERM and exiting...")
syscall.Kill(i, syscall.SIGTERM)
}
}
os.Exit(0)
}
defer lockFile.Close()
// LANGUAGE
if *lang == "" && os.Getenv("LANG") != "" {
*lang = strings.Split(os.Getenv("LANG"), ".")[0]
}
println(fmt.Sprintf("lang: %s", *lang))
// ENVIRONMENT
configDirectory = configDir()
if !pathExists(filepath.Join(configDirectory, "drawer.css")) {
copyFile(filepath.Join(getDataHome(), "nwg-drawer/drawer.css"), filepath.Join(configDirectory, "drawer.css"))
}
cacheDirectory := cacheDir()
if cacheDirectory == "" {
log.Panic("Couldn't determine cache directory location")
}
// DATA
pinnedFile = filepath.Join(cacheDirectory, "nwg-pin-cache")
pinned, err = loadTextFile(pinnedFile)
if err != nil {
pinned = nil
}
println(fmt.Sprintf("Found %v pinned items", len(pinned)))
cssFile := filepath.Join(configDirectory, *cssFileName)
appDirs = getAppDirs()
setUpCategories()
desktopFiles := listDesktopFiles()
println(fmt.Sprintf("Found %v desktop files", len(desktopFiles)))
status = parseDesktopFiles(desktopFiles)
// For opening files we use xdg-open. As its configuration is PITA, we may override some associations
// in the ~/.config/nwg-panel/preferred-apps.json file.
paFile := filepath.Join(configDirectory, "preferred-apps.json")
preferredApps, err = loadPreferredApps(paFile)
if err != nil {
println(fmt.Sprintf("Custom associations file %s not found or invalid", paFile))
} else {
println(fmt.Sprintf("Found %v associations in %s", len(preferredApps), paFile))
}
// USER INTERFACE
gtk.Init(nil)
cssProvider, _ := gtk.CssProviderNew()
err = cssProvider.LoadFromPath(cssFile)
if err != nil {
println(fmt.Sprintf("ERROR: %s css file not found or erroneous. Using GTK styling.", cssFile))
println(fmt.Sprintf("%s", err))
} else {
println(fmt.Sprintf("Using style from %s", cssFile))
screen, _ := gdk.ScreenGetDefault()
gtk.AddProviderForScreen(screen, cssProvider, gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
}
win, err := gtk.WindowNew(gtk.WINDOW_TOPLEVEL)
if err != nil {
log.Fatal("Unable to create window:", err)
}
if wayland() {
layershell.InitForWindow(win)
var output2mon map[string]*gdk.Monitor
if *targetOutput != "" {
// We want to assign layershell to a monitor, but we only know the output name!
output2mon, err = mapOutputs()
if err == nil {
monitor := output2mon[*targetOutput]
layershell.SetMonitor(win, monitor)
} else {
println(fmt.Sprintf("%s", err))
}
}
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_BOTTOM, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_TOP, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_LEFT, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_RIGHT, true)
if *overlay {
layershell.SetLayer(win, layershell.LAYER_SHELL_LAYER_OVERLAY)
layershell.SetExclusiveZone(win, -1)
} else {
layershell.SetLayer(win, layershell.LAYER_SHELL_LAYER_TOP)
}
layershell.SetKeyboardMode(win, layershell.LAYER_SHELL_KEYBOARD_MODE_EXCLUSIVE)
}
win.Connect("destroy", func() {
gtk.MainQuit()
})
win.Connect("key-press-event", func(window *gtk.Window, event *gdk.Event) bool {
key := &gdk.EventKey{Event: event}
switch key.KeyVal() {
case gdk.KEY_Escape:
s, _ := searchEntry.GetText()
if s != "" {
searchEntry.GrabFocus()
searchEntry.SetText("")
} else {
gtk.MainQuit()
}
return false
case gdk.KEY_downarrow, gdk.KEY_Up, gdk.KEY_Down, gdk.KEY_Left, gdk.KEY_Right, gdk.KEY_Tab,
gdk.KEY_Return, gdk.KEY_Page_Up, gdk.KEY_Page_Down, gdk.KEY_Home, gdk.KEY_End:
return false
default:
if !searchEntry.IsFocus() {
searchEntry.GrabFocusWithoutSelecting()
}
return false
}
})
// Close the window on leave, but not immediately, to avoid accidental closes
win.Connect("leave-notify-event", func() {
src = glib.TimeoutAdd(uint(500), func() bool {
gtk.MainQuit()
return false
})
})
win.Connect("enter-notify-event", func() {
cancelClose()
})
/*
In case someone REALLY needed to use X11 - for some stupid Zoom meeting or something, this allows
the drawer to behave properly on Openbox, and possibly somewhere else. For sure not on i3.
This feature is not really supported and will stay undocumented.
*/
if !wayland() {
println("Not Wayland, oh really?")
win.SetDecorated(false)
win.Maximize()
}
// Set up UI
outerVBox, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
win.Add(outerVBox)
searchBoxWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(searchBoxWrapper, false, false, 10)
searchEntry = setUpSearchEntry()
searchEntry.SetMaxWidthChars(30)
searchBoxWrapper.PackStart(searchEntry, true, false, 0)
if !*noCats {
categoriesWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
categoriesButtonBox := setUpCategoriesButtonBox()
categoriesWrapper.PackStart(categoriesButtonBox, true, false, 0)
outerVBox.PackStart(categoriesWrapper, false, false, 0)
}
pinnedWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(pinnedWrapper, false, false, 0)
pinnedFlowBoxWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(pinnedFlowBoxWrapper, false, false, 0)
pinnedFlowBox = setUpPinnedFlowBox()
resultWindow, _ = gtk.ScrolledWindowNew(nil, nil)
resultWindow.SetEvents(int(gdk.ALL_EVENTS_MASK))
resultWindow.SetPolicy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
resultWindow.Connect("enter-notify-event", func() {
cancelClose()
})
resultWindow.Connect("button-release-event", func(sw *gtk.ScrolledWindow, e *gdk.Event) bool {
btnEvent := gdk.EventButtonNewFromEvent(e)
if btnEvent.Button() == 1 || btnEvent.Button() == 3 {
gtk.MainQuit()
return true
}
return false
})
outerVBox.PackStart(resultWindow, true, true, 10)
resultsWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultWindow.Add(resultsWrapper)
appSearchResultWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultsWrapper.PackStart(appSearchResultWrapper, false, false, 0)
appFlowBox = setUpAppsFlowBox(nil, "")
// Focus 1st pinned item if any, otherwise focus 1st found app icon
var button gtk.IWidget
if pinnedFlowBox.GetChildren().Length() > 0 {
button, err = pinnedFlowBox.GetChildAtIndex(0).GetChild()
} else {
button, err = appFlowBox.GetChildAtIndex(0).GetChild()
}
if err == nil {
button.ToWidget().GrabFocus()
}
userDirsMap = mapXdgUserDirs()
placeholder, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultsWrapper.PackStart(placeholder, true, true, 0)
placeholder.SetSizeRequest(20, 20)
if !*noFS {
wrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
fileSearchResultWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
fileSearchResultWrapper.SetProperty("name", "files-box")
wrapper.PackStart(fileSearchResultWrapper, true, false, 0)
resultsWrapper.PackEnd(wrapper, false, false, 10)
}
statusLineWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(statusLineWrapper, false, false, 10)
statusLabel, _ = gtk.LabelNew(status)
statusLineWrapper.PackStart(statusLabel, true, false, 0)
win.ShowAll()
if !*noFS {
fileSearchResultWrapper.SetSizeRequest(appFlowBox.GetAllocatedWidth(), 1)
fileSearchResultWrapper.Hide()
}
if !*noCats {
categoriesWrapper.SetSizeRequest(1, categoriesWrapper.GetAllocatedHeight()*2)
}
t := time.Now() | println(fmt.Sprintf("UI created in %v ms. Thank you for your patience.", t.Sub(timeStart).Milliseconds()))
gtk.Main()
} | random_line_split | |
main.go | package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/allan-simon/go-singleinstance"
"github.com/dlasky/gotk3-layershell/layershell"
"github.com/gotk3/gotk3/gdk"
"github.com/gotk3/gotk3/glib"
"github.com/gotk3/gotk3/gtk"
)
const version = "0.1.10"
var (
appDirs []string
configDirectory string
pinnedFile string
pinned []string
src glib.SourceHandle
id2entry map[string]desktopEntry
preferredApps map[string]interface{}
)
var categoryNames = [...]string{
"utility",
"development",
"game",
"graphics",
"internet-and-network",
"office",
"audio-video",
"system-tools",
"other",
}
type category struct {
Name string
DisplayName string
Icon string
}
var categories []category
type desktopEntry struct {
DesktopID string
Name string
NameLoc string
Comment string
CommentLoc string
Icon string
Exec string
Category string
Terminal bool
NoDisplay bool
}
// slices below will hold DesktopID strings
var (
listUtility []string
listDevelopment []string
listGame []string
listGraphics []string
listInternetAndNetwork []string
listOffice []string
listAudioVideo []string
listSystemTools []string
listOther []string
)
var desktopEntries []desktopEntry
// UI elements
var (
resultWindow *gtk.ScrolledWindow
fileSearchResults []string
searchEntry *gtk.SearchEntry
phrase string
fileSearchResultFlowBox *gtk.FlowBox
userDirsMap map[string]string
appFlowBox *gtk.FlowBox
appSearchResultWrapper *gtk.Box
fileSearchResultWrapper *gtk.Box
pinnedFlowBox *gtk.FlowBox
pinnedFlowBoxWrapper *gtk.Box
categoriesWrapper *gtk.Box
catButtons []*gtk.Button
statusLabel *gtk.Label
status string
ignore string
)
func defaultStringIfBlank(s, fallback string) string {
s = strings.TrimSpace(s)
if s == "" {
return fallback
}
return s
}
// Flags
var cssFileName = flag.String("s", "drawer.css", "Styling: css file name")
var targetOutput = flag.String("o", "", "name of the Output to display the drawer on (sway only)")
var displayVersion = flag.Bool("v", false, "display Version information")
var overlay = flag.Bool("ovl", false, "use OVerLay layer")
var iconSize = flag.Int("is", 64, "Icon Size")
var fsColumns = flag.Uint("fscol", 2, "File Search result COLumns")
var columnsNumber = flag.Uint("c", 6, "number of Columns")
var itemSpacing = flag.Uint("spacing", 20, "icon spacing")
var lang = flag.String("lang", "", "force lang, e.g. \"en\", \"pl\"")
var fileManager = flag.String("fm", "thunar", "File Manager")
var term = flag.String("term", defaultStringIfBlank(os.Getenv("TERM"), "alacritty"), "Terminal emulator")
var nameLimit = flag.Int("fslen", 80, "File Search name length Limit")
var noCats = flag.Bool("nocats", false, "Disable filtering by category")
var noFS = flag.Bool("nofs", false, "Disable file search")
func main() {
timeStart := time.Now()
flag.Parse()
if *displayVersion |
// Gentle SIGTERM handler thanks to reiki4040 https://gist.github.com/reiki4040/be3705f307d3cd136e85
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
go func() {
for {
s := <-signalChan
if s == syscall.SIGTERM {
println("SIGTERM received, bye bye!")
gtk.MainQuit()
}
}
}()
// We want the same key/mouse binding to turn the dock off: kill the running instance and exit.
lockFilePath := fmt.Sprintf("%s/nwg-drawer.lock", tempDir())
lockFile, err := singleinstance.CreateLockFile(lockFilePath)
if err != nil {
pid, err := readTextFile(lockFilePath)
if err == nil {
i, err := strconv.Atoi(pid)
if err == nil {
println("Running instance found, sending SIGTERM and exiting...")
syscall.Kill(i, syscall.SIGTERM)
}
}
os.Exit(0)
}
defer lockFile.Close()
// LANGUAGE
if *lang == "" && os.Getenv("LANG") != "" {
*lang = strings.Split(os.Getenv("LANG"), ".")[0]
}
println(fmt.Sprintf("lang: %s", *lang))
// ENVIRONMENT
configDirectory = configDir()
if !pathExists(filepath.Join(configDirectory, "drawer.css")) {
copyFile(filepath.Join(getDataHome(), "nwg-drawer/drawer.css"), filepath.Join(configDirectory, "drawer.css"))
}
cacheDirectory := cacheDir()
if cacheDirectory == "" {
log.Panic("Couldn't determine cache directory location")
}
// DATA
pinnedFile = filepath.Join(cacheDirectory, "nwg-pin-cache")
pinned, err = loadTextFile(pinnedFile)
if err != nil {
pinned = nil
}
println(fmt.Sprintf("Found %v pinned items", len(pinned)))
cssFile := filepath.Join(configDirectory, *cssFileName)
appDirs = getAppDirs()
setUpCategories()
desktopFiles := listDesktopFiles()
println(fmt.Sprintf("Found %v desktop files", len(desktopFiles)))
status = parseDesktopFiles(desktopFiles)
// For opening files we use xdg-open. As its configuration is PITA, we may override some associations
// in the ~/.config/nwg-panel/preferred-apps.json file.
paFile := filepath.Join(configDirectory, "preferred-apps.json")
preferredApps, err = loadPreferredApps(paFile)
if err != nil {
println(fmt.Sprintf("Custom associations file %s not found or invalid", paFile))
} else {
println(fmt.Sprintf("Found %v associations in %s", len(preferredApps), paFile))
}
// USER INTERFACE
gtk.Init(nil)
cssProvider, _ := gtk.CssProviderNew()
err = cssProvider.LoadFromPath(cssFile)
if err != nil {
println(fmt.Sprintf("ERROR: %s css file not found or erroneous. Using GTK styling.", cssFile))
println(fmt.Sprintf("%s", err))
} else {
println(fmt.Sprintf("Using style from %s", cssFile))
screen, _ := gdk.ScreenGetDefault()
gtk.AddProviderForScreen(screen, cssProvider, gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
}
win, err := gtk.WindowNew(gtk.WINDOW_TOPLEVEL)
if err != nil {
log.Fatal("Unable to create window:", err)
}
if wayland() {
layershell.InitForWindow(win)
var output2mon map[string]*gdk.Monitor
if *targetOutput != "" {
// We want to assign layershell to a monitor, but we only know the output name!
output2mon, err = mapOutputs()
if err == nil {
monitor := output2mon[*targetOutput]
layershell.SetMonitor(win, monitor)
} else {
println(fmt.Sprintf("%s", err))
}
}
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_BOTTOM, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_TOP, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_LEFT, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_RIGHT, true)
if *overlay {
layershell.SetLayer(win, layershell.LAYER_SHELL_LAYER_OVERLAY)
layershell.SetExclusiveZone(win, -1)
} else {
layershell.SetLayer(win, layershell.LAYER_SHELL_LAYER_TOP)
}
layershell.SetKeyboardMode(win, layershell.LAYER_SHELL_KEYBOARD_MODE_EXCLUSIVE)
}
win.Connect("destroy", func() {
gtk.MainQuit()
})
win.Connect("key-press-event", func(window *gtk.Window, event *gdk.Event) bool {
key := &gdk.EventKey{Event: event}
switch key.KeyVal() {
case gdk.KEY_Escape:
s, _ := searchEntry.GetText()
if s != "" {
searchEntry.GrabFocus()
searchEntry.SetText("")
} else {
gtk.MainQuit()
}
return false
case gdk.KEY_downarrow, gdk.KEY_Up, gdk.KEY_Down, gdk.KEY_Left, gdk.KEY_Right, gdk.KEY_Tab,
gdk.KEY_Return, gdk.KEY_Page_Up, gdk.KEY_Page_Down, gdk.KEY_Home, gdk.KEY_End:
return false
default:
if !searchEntry.IsFocus() {
searchEntry.GrabFocusWithoutSelecting()
}
return false
}
})
// Close the window on leave, but not immediately, to avoid accidental closes
win.Connect("leave-notify-event", func() {
src = glib.TimeoutAdd(uint(500), func() bool {
gtk.MainQuit()
return false
})
})
win.Connect("enter-notify-event", func() {
cancelClose()
})
/*
In case someone REALLY needed to use X11 - for some stupid Zoom meeting or something, this allows
the drawer to behave properly on Openbox, and possibly somewhere else. For sure not on i3.
This feature is not really supported and will stay undocumented.
*/
if !wayland() {
println("Not Wayland, oh really?")
win.SetDecorated(false)
win.Maximize()
}
// Set up UI
outerVBox, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
win.Add(outerVBox)
searchBoxWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(searchBoxWrapper, false, false, 10)
searchEntry = setUpSearchEntry()
searchEntry.SetMaxWidthChars(30)
searchBoxWrapper.PackStart(searchEntry, true, false, 0)
if !*noCats {
categoriesWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
categoriesButtonBox := setUpCategoriesButtonBox()
categoriesWrapper.PackStart(categoriesButtonBox, true, false, 0)
outerVBox.PackStart(categoriesWrapper, false, false, 0)
}
pinnedWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(pinnedWrapper, false, false, 0)
pinnedFlowBoxWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(pinnedFlowBoxWrapper, false, false, 0)
pinnedFlowBox = setUpPinnedFlowBox()
resultWindow, _ = gtk.ScrolledWindowNew(nil, nil)
resultWindow.SetEvents(int(gdk.ALL_EVENTS_MASK))
resultWindow.SetPolicy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
resultWindow.Connect("enter-notify-event", func() {
cancelClose()
})
resultWindow.Connect("button-release-event", func(sw *gtk.ScrolledWindow, e *gdk.Event) bool {
btnEvent := gdk.EventButtonNewFromEvent(e)
if btnEvent.Button() == 1 || btnEvent.Button() == 3 {
gtk.MainQuit()
return true
}
return false
})
outerVBox.PackStart(resultWindow, true, true, 10)
resultsWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultWindow.Add(resultsWrapper)
appSearchResultWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultsWrapper.PackStart(appSearchResultWrapper, false, false, 0)
appFlowBox = setUpAppsFlowBox(nil, "")
// Focus 1st pinned item if any, otherwise focus 1st found app icon
var button gtk.IWidget
if pinnedFlowBox.GetChildren().Length() > 0 {
button, err = pinnedFlowBox.GetChildAtIndex(0).GetChild()
} else {
button, err = appFlowBox.GetChildAtIndex(0).GetChild()
}
if err == nil {
button.ToWidget().GrabFocus()
}
userDirsMap = mapXdgUserDirs()
placeholder, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultsWrapper.PackStart(placeholder, true, true, 0)
placeholder.SetSizeRequest(20, 20)
if !*noFS {
wrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
fileSearchResultWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
fileSearchResultWrapper.SetProperty("name", "files-box")
wrapper.PackStart(fileSearchResultWrapper, true, false, 0)
resultsWrapper.PackEnd(wrapper, false, false, 10)
}
statusLineWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(statusLineWrapper, false, false, 10)
statusLabel, _ = gtk.LabelNew(status)
statusLineWrapper.PackStart(statusLabel, true, false, 0)
win.ShowAll()
if !*noFS {
fileSearchResultWrapper.SetSizeRequest(appFlowBox.GetAllocatedWidth(), 1)
fileSearchResultWrapper.Hide()
}
if !*noCats {
categoriesWrapper.SetSizeRequest(1, categoriesWrapper.GetAllocatedHeight()*2)
}
t := time.Now()
println(fmt.Sprintf("UI created in %v ms. Thank you for your patience.", t.Sub(timeStart).Milliseconds()))
gtk.Main()
}
| {
fmt.Printf("nwg-drawer version %s\n", version)
os.Exit(0)
} | conditional_block |
tabs.rs | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A container for all the tabs being edited. Also functions as main dispatch for RPC.
use std::collections::BTreeMap;
use std::io::{self, Read, Write};
use std::path::{PathBuf, Path};
use std::fs::File;
use std::sync::{Arc, Mutex};
use serde_json::value::Value;
use xi_rope::rope::Rope;
use editor::Editor;
use rpc::{CoreCommand, EditCommand};
use styles::{Style, StyleMap};
use MainPeer;
/// ViewIdentifiers are the primary means of routing messages between xi-core and a client view.
pub type ViewIdentifier = String;
/// BufferIdentifiers uniquely identify open buffers.
type BufferIdentifier = String;
// TODO: proposed new name: something like "Core" or "CoreState" or "EditorState"? "Documents?"
pub struct Tabs<W: Write> {
/// maps file names to buffer identifiers. If a client asks to open a file that is already
/// open, we treat it as a request for a new view.
open_files: BTreeMap<PathBuf, BufferIdentifier>,
/// maps buffer identifiers (filenames) to editor instances
buffers: BTreeMap<BufferIdentifier, Arc<Mutex<Editor<W>>>>,
/// maps view identifiers to editor instances. All actions originate in a view; this lets us
/// route messages correctly when multiple views share a buffer.
views: BTreeMap<ViewIdentifier, BufferIdentifier>,
id_counter: usize,
kill_ring: Arc<Mutex<Rope>>,
style_map: Arc<Mutex<StyleMap>>,
}
#[derive(Clone)]
pub struct TabCtx<W: Write> {
kill_ring: Arc<Mutex<Rope>>,
rpc_peer: MainPeer<W>,
style_map: Arc<Mutex<StyleMap>>,
}
impl<W: Write + Send + 'static> Tabs<W> {
pub fn new() -> Tabs<W> {
Tabs {
open_files: BTreeMap::new(),
buffers: BTreeMap::new(),
views: BTreeMap::new(),
id_counter: 0,
kill_ring: Arc::new(Mutex::new(Rope::from(""))),
style_map: Arc::new(Mutex::new(StyleMap::new())),
}
}
fn new_tab_ctx(&self, peer: &MainPeer<W>) -> TabCtx<W> {
TabCtx {
kill_ring: self.kill_ring.clone(),
rpc_peer: peer.clone(),
style_map: self.style_map.clone(),
}
}
fn next_view_id(&mut self) -> ViewIdentifier {
self.id_counter += 1;
format!("view-id-{}", self.id_counter)
}
fn next_buffer_id(&mut self) -> BufferIdentifier {
self.id_counter += 1;
format!("buffer-id-{}", self.id_counter)
}
pub fn do_rpc(&mut self, cmd: CoreCommand, rpc_peer: &MainPeer<W>) -> Option<Value> {
use rpc::CoreCommand::*;
match cmd {
CloseView { view_id } => {
self.do_close_view(view_id);
None
},
NewView { file_path } => Some(Value::String(self.do_new_view(rpc_peer, file_path))),
Save { view_id, file_path } => self.do_save(view_id, file_path),
Edit { view_id, edit_command } => self.do_edit(view_id, edit_command),
}
}
/// Creates a new view and associates it with a buffer.
///
/// This function always creates a new view and associates it with a buffer (which we access
///through an `Editor` instance). This buffer may be existing, or it may be created.
///
///A `new_view` request is handled differently depending on the `file_path` argument, and on
///application state. If `file_path` is given and a buffer associated with that file is already
///open, we create a new view into the existing buffer. If `file_path` is given and that file
///_isn't_ open, we load that file into a new buffer. If `file_path` is not given, we create a
///new empty buffer.
fn do_new_view(&mut self, rpc_peer: &MainPeer<W>, file_path: Option<&str>) -> ViewIdentifier {
// three code paths: new buffer, open file, and new view into existing buffer
let view_id = self.next_view_id();
if let Some(file_path) = file_path.map(PathBuf::from) {
// TODO: here, we should eventually be adding views to the existing editor.
// for the time being, we just create a new empty view.
if self.open_files.contains_key(&file_path) {
let buffer_id = self.next_buffer_id();
self.new_empty_view(rpc_peer, &view_id, buffer_id);
// let buffer_id = self.open_files.get(&file_path).unwrap().to_owned();
//self.add_view(&view_id, buffer_id);
} else {
// not open: create new buffer_id and open file
let buffer_id = self.next_buffer_id();
self.open_files.insert(file_path.to_owned(), buffer_id.clone());
self.new_view_with_file(rpc_peer, &view_id, buffer_id.clone(), &file_path);
// above fn has two branches: set path after
self.buffers.get(&buffer_id).unwrap().lock().unwrap().set_path(&file_path);
}
} else {
// file_path was nil: create a new empty buffer.
let buffer_id = self.next_buffer_id();
self.new_empty_view(rpc_peer, &view_id, buffer_id);
}
view_id
}
fn do_close_view(&mut self, view_id: &str) {
self.close_view(view_id);
}
fn new_empty_view(&mut self, rpc_peer: &MainPeer<W>,
view_id: &str, buffer_id: BufferIdentifier) {
let editor = Editor::new(self.new_tab_ctx(rpc_peer), view_id);
self.finalize_new_view(view_id, buffer_id, editor);
}
fn new_view_with_file(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier, path: &Path) {
match self.read_file(&path) {
Ok(contents) => {
let editor = Editor::with_text(self.new_tab_ctx(rpc_peer), view_id, contents);
self.finalize_new_view(view_id, buffer_id, editor)
}
Err(err) => |
}
}
/// Adds a new view to an existing editor instance.
#[allow(unreachable_code, unused_variables, dead_code)]
fn add_view(&mut self, view_id: &str, buffer_id: BufferIdentifier) {
panic!("add_view should not currently be accessible");
let editor = self.buffers.get(&buffer_id).expect("missing editor_id for view_id");
self.views.insert(view_id.to_owned(), buffer_id);
editor.lock().unwrap().add_view(view_id);
}
fn finalize_new_view(&mut self, view_id: &str, buffer_id: String, editor: Arc<Mutex<Editor<W>>>) {
self.views.insert(view_id.to_owned(), buffer_id.clone());
self.buffers.insert(buffer_id, editor.clone());
}
fn read_file<P: AsRef<Path>>(&self, path: P) -> io::Result<String> {
let mut f = File::open(path)?;
let mut s = String::new();
f.read_to_string(&mut s)?;
Ok(s)
}
fn close_view(&mut self, view_id: &str) {
let buf_id = self.views.remove(view_id).expect("missing buffer id when closing view");
let (has_views, path) = {
let editor = self.buffers.get(&buf_id).expect("missing editor when closing view");
let mut editor = editor.lock().unwrap();
editor.remove_view(view_id);
(editor.has_views(), editor.get_path().map(PathBuf::from))
};
if !has_views {
self.buffers.remove(&buf_id);
if let Some(path) = path {
self.open_files.remove(&path);
}
}
}
fn do_save(&mut self, view_id: &str, file_path: &str) -> Option<Value> {
let buffer_id = self.views.get(view_id)
.expect(&format!("missing buffer id for view {}", view_id));
let editor = self.buffers.get(buffer_id)
.expect(&format!("missing editor for buffer {}", buffer_id));
let file_path = PathBuf::from(file_path);
// if this is a new path for an existing file, we have a bit of housekeeping to do:
if let Some(prev_path) = editor.lock().unwrap().get_path() {
if prev_path != file_path {
self.open_files.remove(prev_path);
}
}
editor.lock().unwrap().do_save(&file_path);
self.open_files.insert(file_path, buffer_id.to_owned());
None
}
fn do_edit(&mut self, view_id: &str, cmd: EditCommand) -> Option<Value> {
let buffer_id = self.views.get(view_id)
.expect(&format!("missing buffer id for view {}", view_id));
if let Some(editor) = self.buffers.get(buffer_id) {
Editor::do_rpc(editor, view_id, cmd)
} else {
print_err!("buffer not found: {}, for view {}", buffer_id, view_id);
None
}
}
pub fn handle_idle(&self) {
for editor in self.buffers.values() {
editor.lock().unwrap().render();
}
}
}
impl<W: Write> TabCtx<W> {
pub fn update_view(&self, view_id: &str, update: &Value) {
self.rpc_peer.send_rpc_notification("update",
&json!({
"view_id": view_id,
"update": update,
}));
}
pub fn scroll_to(&self, view_id: &str, line: usize, col: usize) {
self.rpc_peer.send_rpc_notification("scroll_to",
&json!({
"view_id": view_id,
"line": line,
"col": col,
}));
}
pub fn get_kill_ring(&self) -> Rope {
self.kill_ring.lock().unwrap().clone()
}
pub fn set_kill_ring(&self, val: Rope) {
let mut kill_ring = self.kill_ring.lock().unwrap();
*kill_ring = val;
}
pub fn alert(&self, msg: &str) {
self.rpc_peer.send_rpc_notification("alert",
&json!({
"msg": msg,
}));
}
// Get the index for a given style. If the style is not in the existing
// style map, then issues a def_style request to the front end. Intended
// to be reasonably efficient, but ideally callers would do their own
// indexing.
pub fn get_style_id(&self, style: &Style) -> usize {
let mut style_map = self.style_map.lock().unwrap();
if let Some(ix) = style_map.lookup(style) {
return ix;
}
let ix = style_map.add(style);
self.rpc_peer.send_rpc_notification("def_style", &style.to_json(ix));
ix
}
}
| {
// TODO: we should be reporting errors to the client
// (if this is even an error? we treat opening a non-existent file as a new buffer,
// but set the editor's path)
print_err!("unable to read file: {}, error: {:?}", buffer_id, err);
self.new_empty_view(rpc_peer, view_id, buffer_id);
} | conditional_block |
tabs.rs | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A container for all the tabs being edited. Also functions as main dispatch for RPC.
use std::collections::BTreeMap;
use std::io::{self, Read, Write};
use std::path::{PathBuf, Path};
use std::fs::File;
use std::sync::{Arc, Mutex};
use serde_json::value::Value;
use xi_rope::rope::Rope;
use editor::Editor;
use rpc::{CoreCommand, EditCommand};
use styles::{Style, StyleMap};
use MainPeer;
/// ViewIdentifiers are the primary means of routing messages between xi-core and a client view.
pub type ViewIdentifier = String;
/// BufferIdentifiers uniquely identify open buffers.
type BufferIdentifier = String;
// TODO: proposed new name: something like "Core" or "CoreState" or "EditorState"? "Documents?"
pub struct Tabs<W: Write> {
/// maps file names to buffer identifiers. If a client asks to open a file that is already
/// open, we treat it as a request for a new view.
open_files: BTreeMap<PathBuf, BufferIdentifier>,
/// maps buffer identifiers (filenames) to editor instances
buffers: BTreeMap<BufferIdentifier, Arc<Mutex<Editor<W>>>>,
/// maps view identifiers to editor instances. All actions originate in a view; this lets us
/// route messages correctly when multiple views share a buffer.
views: BTreeMap<ViewIdentifier, BufferIdentifier>,
id_counter: usize,
kill_ring: Arc<Mutex<Rope>>,
style_map: Arc<Mutex<StyleMap>>,
}
#[derive(Clone)]
pub struct TabCtx<W: Write> {
kill_ring: Arc<Mutex<Rope>>,
rpc_peer: MainPeer<W>,
style_map: Arc<Mutex<StyleMap>>,
}
impl<W: Write + Send + 'static> Tabs<W> {
pub fn new() -> Tabs<W> {
Tabs {
open_files: BTreeMap::new(),
buffers: BTreeMap::new(),
views: BTreeMap::new(),
id_counter: 0,
kill_ring: Arc::new(Mutex::new(Rope::from(""))),
style_map: Arc::new(Mutex::new(StyleMap::new())),
}
}
fn new_tab_ctx(&self, peer: &MainPeer<W>) -> TabCtx<W> {
TabCtx {
kill_ring: self.kill_ring.clone(),
rpc_peer: peer.clone(),
style_map: self.style_map.clone(),
}
}
fn next_view_id(&mut self) -> ViewIdentifier {
self.id_counter += 1;
format!("view-id-{}", self.id_counter)
}
fn next_buffer_id(&mut self) -> BufferIdentifier {
self.id_counter += 1;
format!("buffer-id-{}", self.id_counter)
}
pub fn do_rpc(&mut self, cmd: CoreCommand, rpc_peer: &MainPeer<W>) -> Option<Value> {
use rpc::CoreCommand::*;
match cmd {
CloseView { view_id } => {
self.do_close_view(view_id);
None
},
NewView { file_path } => Some(Value::String(self.do_new_view(rpc_peer, file_path))),
Save { view_id, file_path } => self.do_save(view_id, file_path),
Edit { view_id, edit_command } => self.do_edit(view_id, edit_command),
}
}
/// Creates a new view and associates it with a buffer.
///
/// This function always creates a new view and associates it with a buffer (which we access
///through an `Editor` instance). This buffer may be existing, or it may be created.
///
///A `new_view` request is handled differently depending on the `file_path` argument, and on
///application state. If `file_path` is given and a buffer associated with that file is already
///open, we create a new view into the existing buffer. If `file_path` is given and that file
///_isn't_ open, we load that file into a new buffer. If `file_path` is not given, we create a
///new empty buffer.
fn do_new_view(&mut self, rpc_peer: &MainPeer<W>, file_path: Option<&str>) -> ViewIdentifier {
// three code paths: new buffer, open file, and new view into existing buffer
let view_id = self.next_view_id();
if let Some(file_path) = file_path.map(PathBuf::from) {
// TODO: here, we should eventually be adding views to the existing editor.
// for the time being, we just create a new empty view.
if self.open_files.contains_key(&file_path) {
let buffer_id = self.next_buffer_id();
self.new_empty_view(rpc_peer, &view_id, buffer_id);
// let buffer_id = self.open_files.get(&file_path).unwrap().to_owned();
//self.add_view(&view_id, buffer_id);
} else {
// not open: create new buffer_id and open file
let buffer_id = self.next_buffer_id();
self.open_files.insert(file_path.to_owned(), buffer_id.clone());
self.new_view_with_file(rpc_peer, &view_id, buffer_id.clone(), &file_path);
// above fn has two branches: set path after
self.buffers.get(&buffer_id).unwrap().lock().unwrap().set_path(&file_path);
}
} else {
// file_path was nil: create a new empty buffer.
let buffer_id = self.next_buffer_id();
self.new_empty_view(rpc_peer, &view_id, buffer_id);
}
view_id
}
fn do_close_view(&mut self, view_id: &str) {
self.close_view(view_id);
}
fn new_empty_view(&mut self, rpc_peer: &MainPeer<W>,
view_id: &str, buffer_id: BufferIdentifier) {
let editor = Editor::new(self.new_tab_ctx(rpc_peer), view_id);
self.finalize_new_view(view_id, buffer_id, editor);
}
fn new_view_with_file(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier, path: &Path) {
match self.read_file(&path) {
Ok(contents) => {
let editor = Editor::with_text(self.new_tab_ctx(rpc_peer), view_id, contents);
self.finalize_new_view(view_id, buffer_id, editor)
}
Err(err) => {
// TODO: we should be reporting errors to the client
// (if this is even an error? we treat opening a non-existent file as a new buffer,
// but set the editor's path)
print_err!("unable to read file: {}, error: {:?}", buffer_id, err);
self.new_empty_view(rpc_peer, view_id, buffer_id);
}
}
}
/// Adds a new view to an existing editor instance.
#[allow(unreachable_code, unused_variables, dead_code)]
fn add_view(&mut self, view_id: &str, buffer_id: BufferIdentifier) {
panic!("add_view should not currently be accessible");
let editor = self.buffers.get(&buffer_id).expect("missing editor_id for view_id");
self.views.insert(view_id.to_owned(), buffer_id);
editor.lock().unwrap().add_view(view_id);
}
fn finalize_new_view(&mut self, view_id: &str, buffer_id: String, editor: Arc<Mutex<Editor<W>>>) {
self.views.insert(view_id.to_owned(), buffer_id.clone());
self.buffers.insert(buffer_id, editor.clone());
}
fn read_file<P: AsRef<Path>>(&self, path: P) -> io::Result<String> {
let mut f = File::open(path)?;
let mut s = String::new();
f.read_to_string(&mut s)?;
Ok(s)
}
fn close_view(&mut self, view_id: &str) {
let buf_id = self.views.remove(view_id).expect("missing buffer id when closing view");
let (has_views, path) = {
let editor = self.buffers.get(&buf_id).expect("missing editor when closing view");
let mut editor = editor.lock().unwrap();
editor.remove_view(view_id); | self.buffers.remove(&buf_id);
if let Some(path) = path {
self.open_files.remove(&path);
}
}
}
fn do_save(&mut self, view_id: &str, file_path: &str) -> Option<Value> {
let buffer_id = self.views.get(view_id)
.expect(&format!("missing buffer id for view {}", view_id));
let editor = self.buffers.get(buffer_id)
.expect(&format!("missing editor for buffer {}", buffer_id));
let file_path = PathBuf::from(file_path);
// if this is a new path for an existing file, we have a bit of housekeeping to do:
if let Some(prev_path) = editor.lock().unwrap().get_path() {
if prev_path != file_path {
self.open_files.remove(prev_path);
}
}
editor.lock().unwrap().do_save(&file_path);
self.open_files.insert(file_path, buffer_id.to_owned());
None
}
fn do_edit(&mut self, view_id: &str, cmd: EditCommand) -> Option<Value> {
let buffer_id = self.views.get(view_id)
.expect(&format!("missing buffer id for view {}", view_id));
if let Some(editor) = self.buffers.get(buffer_id) {
Editor::do_rpc(editor, view_id, cmd)
} else {
print_err!("buffer not found: {}, for view {}", buffer_id, view_id);
None
}
}
pub fn handle_idle(&self) {
for editor in self.buffers.values() {
editor.lock().unwrap().render();
}
}
}
impl<W: Write> TabCtx<W> {
pub fn update_view(&self, view_id: &str, update: &Value) {
self.rpc_peer.send_rpc_notification("update",
&json!({
"view_id": view_id,
"update": update,
}));
}
pub fn scroll_to(&self, view_id: &str, line: usize, col: usize) {
self.rpc_peer.send_rpc_notification("scroll_to",
&json!({
"view_id": view_id,
"line": line,
"col": col,
}));
}
pub fn get_kill_ring(&self) -> Rope {
self.kill_ring.lock().unwrap().clone()
}
pub fn set_kill_ring(&self, val: Rope) {
let mut kill_ring = self.kill_ring.lock().unwrap();
*kill_ring = val;
}
pub fn alert(&self, msg: &str) {
self.rpc_peer.send_rpc_notification("alert",
&json!({
"msg": msg,
}));
}
// Get the index for a given style. If the style is not in the existing
// style map, then issues a def_style request to the front end. Intended
// to be reasonably efficient, but ideally callers would do their own
// indexing.
pub fn get_style_id(&self, style: &Style) -> usize {
let mut style_map = self.style_map.lock().unwrap();
if let Some(ix) = style_map.lookup(style) {
return ix;
}
let ix = style_map.add(style);
self.rpc_peer.send_rpc_notification("def_style", &style.to_json(ix));
ix
}
} | (editor.has_views(), editor.get_path().map(PathBuf::from))
};
if !has_views { | random_line_split |
tabs.rs | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A container for all the tabs being edited. Also functions as main dispatch for RPC.
use std::collections::BTreeMap;
use std::io::{self, Read, Write};
use std::path::{PathBuf, Path};
use std::fs::File;
use std::sync::{Arc, Mutex};
use serde_json::value::Value;
use xi_rope::rope::Rope;
use editor::Editor;
use rpc::{CoreCommand, EditCommand};
use styles::{Style, StyleMap};
use MainPeer;
/// ViewIdentifiers are the primary means of routing messages between xi-core and a client view.
pub type ViewIdentifier = String;
/// BufferIdentifiers uniquely identify open buffers.
type BufferIdentifier = String;
// TODO: proposed new name: something like "Core" or "CoreState" or "EditorState"? "Documents?"
pub struct Tabs<W: Write> {
/// maps file names to buffer identifiers. If a client asks to open a file that is already
/// open, we treat it as a request for a new view.
open_files: BTreeMap<PathBuf, BufferIdentifier>,
/// maps buffer identifiers (filenames) to editor instances
buffers: BTreeMap<BufferIdentifier, Arc<Mutex<Editor<W>>>>,
/// maps view identifiers to editor instances. All actions originate in a view; this lets us
/// route messages correctly when multiple views share a buffer.
views: BTreeMap<ViewIdentifier, BufferIdentifier>,
id_counter: usize,
kill_ring: Arc<Mutex<Rope>>,
style_map: Arc<Mutex<StyleMap>>,
}
#[derive(Clone)]
pub struct TabCtx<W: Write> {
kill_ring: Arc<Mutex<Rope>>,
rpc_peer: MainPeer<W>,
style_map: Arc<Mutex<StyleMap>>,
}
impl<W: Write + Send + 'static> Tabs<W> {
pub fn new() -> Tabs<W> {
Tabs {
open_files: BTreeMap::new(),
buffers: BTreeMap::new(),
views: BTreeMap::new(),
id_counter: 0,
kill_ring: Arc::new(Mutex::new(Rope::from(""))),
style_map: Arc::new(Mutex::new(StyleMap::new())),
}
}
fn new_tab_ctx(&self, peer: &MainPeer<W>) -> TabCtx<W> {
TabCtx {
kill_ring: self.kill_ring.clone(),
rpc_peer: peer.clone(),
style_map: self.style_map.clone(),
}
}
fn next_view_id(&mut self) -> ViewIdentifier {
self.id_counter += 1;
format!("view-id-{}", self.id_counter)
}
fn next_buffer_id(&mut self) -> BufferIdentifier {
self.id_counter += 1;
format!("buffer-id-{}", self.id_counter)
}
pub fn do_rpc(&mut self, cmd: CoreCommand, rpc_peer: &MainPeer<W>) -> Option<Value> {
use rpc::CoreCommand::*;
match cmd {
CloseView { view_id } => {
self.do_close_view(view_id);
None
},
NewView { file_path } => Some(Value::String(self.do_new_view(rpc_peer, file_path))),
Save { view_id, file_path } => self.do_save(view_id, file_path),
Edit { view_id, edit_command } => self.do_edit(view_id, edit_command),
}
}
/// Creates a new view and associates it with a buffer.
///
/// This function always creates a new view and associates it with a buffer (which we access
///through an `Editor` instance). This buffer may be existing, or it may be created.
///
///A `new_view` request is handled differently depending on the `file_path` argument, and on
///application state. If `file_path` is given and a buffer associated with that file is already
///open, we create a new view into the existing buffer. If `file_path` is given and that file
///_isn't_ open, we load that file into a new buffer. If `file_path` is not given, we create a
///new empty buffer.
fn do_new_view(&mut self, rpc_peer: &MainPeer<W>, file_path: Option<&str>) -> ViewIdentifier {
// three code paths: new buffer, open file, and new view into existing buffer
let view_id = self.next_view_id();
if let Some(file_path) = file_path.map(PathBuf::from) {
// TODO: here, we should eventually be adding views to the existing editor.
// for the time being, we just create a new empty view.
if self.open_files.contains_key(&file_path) {
let buffer_id = self.next_buffer_id();
self.new_empty_view(rpc_peer, &view_id, buffer_id);
// let buffer_id = self.open_files.get(&file_path).unwrap().to_owned();
//self.add_view(&view_id, buffer_id);
} else {
// not open: create new buffer_id and open file
let buffer_id = self.next_buffer_id();
self.open_files.insert(file_path.to_owned(), buffer_id.clone());
self.new_view_with_file(rpc_peer, &view_id, buffer_id.clone(), &file_path);
// above fn has two branches: set path after
self.buffers.get(&buffer_id).unwrap().lock().unwrap().set_path(&file_path);
}
} else {
// file_path was nil: create a new empty buffer.
let buffer_id = self.next_buffer_id();
self.new_empty_view(rpc_peer, &view_id, buffer_id);
}
view_id
}
fn do_close_view(&mut self, view_id: &str) {
self.close_view(view_id);
}
fn new_empty_view(&mut self, rpc_peer: &MainPeer<W>,
view_id: &str, buffer_id: BufferIdentifier) {
let editor = Editor::new(self.new_tab_ctx(rpc_peer), view_id);
self.finalize_new_view(view_id, buffer_id, editor);
}
fn new_view_with_file(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier, path: &Path) {
match self.read_file(&path) {
Ok(contents) => {
let editor = Editor::with_text(self.new_tab_ctx(rpc_peer), view_id, contents);
self.finalize_new_view(view_id, buffer_id, editor)
}
Err(err) => {
// TODO: we should be reporting errors to the client
// (if this is even an error? we treat opening a non-existent file as a new buffer,
// but set the editor's path)
print_err!("unable to read file: {}, error: {:?}", buffer_id, err);
self.new_empty_view(rpc_peer, view_id, buffer_id);
}
}
}
/// Adds a new view to an existing editor instance.
#[allow(unreachable_code, unused_variables, dead_code)]
fn add_view(&mut self, view_id: &str, buffer_id: BufferIdentifier) {
panic!("add_view should not currently be accessible");
let editor = self.buffers.get(&buffer_id).expect("missing editor_id for view_id");
self.views.insert(view_id.to_owned(), buffer_id);
editor.lock().unwrap().add_view(view_id);
}
fn finalize_new_view(&mut self, view_id: &str, buffer_id: String, editor: Arc<Mutex<Editor<W>>>) {
self.views.insert(view_id.to_owned(), buffer_id.clone());
self.buffers.insert(buffer_id, editor.clone());
}
fn | <P: AsRef<Path>>(&self, path: P) -> io::Result<String> {
let mut f = File::open(path)?;
let mut s = String::new();
f.read_to_string(&mut s)?;
Ok(s)
}
fn close_view(&mut self, view_id: &str) {
let buf_id = self.views.remove(view_id).expect("missing buffer id when closing view");
let (has_views, path) = {
let editor = self.buffers.get(&buf_id).expect("missing editor when closing view");
let mut editor = editor.lock().unwrap();
editor.remove_view(view_id);
(editor.has_views(), editor.get_path().map(PathBuf::from))
};
if !has_views {
self.buffers.remove(&buf_id);
if let Some(path) = path {
self.open_files.remove(&path);
}
}
}
fn do_save(&mut self, view_id: &str, file_path: &str) -> Option<Value> {
let buffer_id = self.views.get(view_id)
.expect(&format!("missing buffer id for view {}", view_id));
let editor = self.buffers.get(buffer_id)
.expect(&format!("missing editor for buffer {}", buffer_id));
let file_path = PathBuf::from(file_path);
// if this is a new path for an existing file, we have a bit of housekeeping to do:
if let Some(prev_path) = editor.lock().unwrap().get_path() {
if prev_path != file_path {
self.open_files.remove(prev_path);
}
}
editor.lock().unwrap().do_save(&file_path);
self.open_files.insert(file_path, buffer_id.to_owned());
None
}
fn do_edit(&mut self, view_id: &str, cmd: EditCommand) -> Option<Value> {
let buffer_id = self.views.get(view_id)
.expect(&format!("missing buffer id for view {}", view_id));
if let Some(editor) = self.buffers.get(buffer_id) {
Editor::do_rpc(editor, view_id, cmd)
} else {
print_err!("buffer not found: {}, for view {}", buffer_id, view_id);
None
}
}
pub fn handle_idle(&self) {
for editor in self.buffers.values() {
editor.lock().unwrap().render();
}
}
}
impl<W: Write> TabCtx<W> {
pub fn update_view(&self, view_id: &str, update: &Value) {
self.rpc_peer.send_rpc_notification("update",
&json!({
"view_id": view_id,
"update": update,
}));
}
pub fn scroll_to(&self, view_id: &str, line: usize, col: usize) {
self.rpc_peer.send_rpc_notification("scroll_to",
&json!({
"view_id": view_id,
"line": line,
"col": col,
}));
}
pub fn get_kill_ring(&self) -> Rope {
self.kill_ring.lock().unwrap().clone()
}
pub fn set_kill_ring(&self, val: Rope) {
let mut kill_ring = self.kill_ring.lock().unwrap();
*kill_ring = val;
}
pub fn alert(&self, msg: &str) {
self.rpc_peer.send_rpc_notification("alert",
&json!({
"msg": msg,
}));
}
// Get the index for a given style. If the style is not in the existing
// style map, then issues a def_style request to the front end. Intended
// to be reasonably efficient, but ideally callers would do their own
// indexing.
pub fn get_style_id(&self, style: &Style) -> usize {
let mut style_map = self.style_map.lock().unwrap();
if let Some(ix) = style_map.lookup(style) {
return ix;
}
let ix = style_map.add(style);
self.rpc_peer.send_rpc_notification("def_style", &style.to_json(ix));
ix
}
}
| read_file | identifier_name |
tabs.rs | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A container for all the tabs being edited. Also functions as main dispatch for RPC.
use std::collections::BTreeMap;
use std::io::{self, Read, Write};
use std::path::{PathBuf, Path};
use std::fs::File;
use std::sync::{Arc, Mutex};
use serde_json::value::Value;
use xi_rope::rope::Rope;
use editor::Editor;
use rpc::{CoreCommand, EditCommand};
use styles::{Style, StyleMap};
use MainPeer;
/// ViewIdentifiers are the primary means of routing messages between xi-core and a client view.
pub type ViewIdentifier = String;
/// BufferIdentifiers uniquely identify open buffers.
type BufferIdentifier = String;
// TODO: proposed new name: something like "Core" or "CoreState" or "EditorState"? "Documents?"
pub struct Tabs<W: Write> {
/// maps file names to buffer identifiers. If a client asks to open a file that is already
/// open, we treat it as a request for a new view.
open_files: BTreeMap<PathBuf, BufferIdentifier>,
/// maps buffer identifiers (filenames) to editor instances
buffers: BTreeMap<BufferIdentifier, Arc<Mutex<Editor<W>>>>,
/// maps view identifiers to editor instances. All actions originate in a view; this lets us
/// route messages correctly when multiple views share a buffer.
views: BTreeMap<ViewIdentifier, BufferIdentifier>,
id_counter: usize,
kill_ring: Arc<Mutex<Rope>>,
style_map: Arc<Mutex<StyleMap>>,
}
#[derive(Clone)]
pub struct TabCtx<W: Write> {
kill_ring: Arc<Mutex<Rope>>,
rpc_peer: MainPeer<W>,
style_map: Arc<Mutex<StyleMap>>,
}
impl<W: Write + Send + 'static> Tabs<W> {
pub fn new() -> Tabs<W> {
Tabs {
open_files: BTreeMap::new(),
buffers: BTreeMap::new(),
views: BTreeMap::new(),
id_counter: 0,
kill_ring: Arc::new(Mutex::new(Rope::from(""))),
style_map: Arc::new(Mutex::new(StyleMap::new())),
}
}
fn new_tab_ctx(&self, peer: &MainPeer<W>) -> TabCtx<W> {
TabCtx {
kill_ring: self.kill_ring.clone(),
rpc_peer: peer.clone(),
style_map: self.style_map.clone(),
}
}
fn next_view_id(&mut self) -> ViewIdentifier {
self.id_counter += 1;
format!("view-id-{}", self.id_counter)
}
fn next_buffer_id(&mut self) -> BufferIdentifier {
self.id_counter += 1;
format!("buffer-id-{}", self.id_counter)
}
pub fn do_rpc(&mut self, cmd: CoreCommand, rpc_peer: &MainPeer<W>) -> Option<Value> {
use rpc::CoreCommand::*;
match cmd {
CloseView { view_id } => {
self.do_close_view(view_id);
None
},
NewView { file_path } => Some(Value::String(self.do_new_view(rpc_peer, file_path))),
Save { view_id, file_path } => self.do_save(view_id, file_path),
Edit { view_id, edit_command } => self.do_edit(view_id, edit_command),
}
}
/// Creates a new view and associates it with a buffer.
///
/// This function always creates a new view and associates it with a buffer (which we access
///through an `Editor` instance). This buffer may be existing, or it may be created.
///
///A `new_view` request is handled differently depending on the `file_path` argument, and on
///application state. If `file_path` is given and a buffer associated with that file is already
///open, we create a new view into the existing buffer. If `file_path` is given and that file
///_isn't_ open, we load that file into a new buffer. If `file_path` is not given, we create a
///new empty buffer.
fn do_new_view(&mut self, rpc_peer: &MainPeer<W>, file_path: Option<&str>) -> ViewIdentifier {
// three code paths: new buffer, open file, and new view into existing buffer
let view_id = self.next_view_id();
if let Some(file_path) = file_path.map(PathBuf::from) {
// TODO: here, we should eventually be adding views to the existing editor.
// for the time being, we just create a new empty view.
if self.open_files.contains_key(&file_path) {
let buffer_id = self.next_buffer_id();
self.new_empty_view(rpc_peer, &view_id, buffer_id);
// let buffer_id = self.open_files.get(&file_path).unwrap().to_owned();
//self.add_view(&view_id, buffer_id);
} else {
// not open: create new buffer_id and open file
let buffer_id = self.next_buffer_id();
self.open_files.insert(file_path.to_owned(), buffer_id.clone());
self.new_view_with_file(rpc_peer, &view_id, buffer_id.clone(), &file_path);
// above fn has two branches: set path after
self.buffers.get(&buffer_id).unwrap().lock().unwrap().set_path(&file_path);
}
} else {
// file_path was nil: create a new empty buffer.
let buffer_id = self.next_buffer_id();
self.new_empty_view(rpc_peer, &view_id, buffer_id);
}
view_id
}
fn do_close_view(&mut self, view_id: &str) {
self.close_view(view_id);
}
fn new_empty_view(&mut self, rpc_peer: &MainPeer<W>,
view_id: &str, buffer_id: BufferIdentifier) {
let editor = Editor::new(self.new_tab_ctx(rpc_peer), view_id);
self.finalize_new_view(view_id, buffer_id, editor);
}
fn new_view_with_file(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier, path: &Path) {
match self.read_file(&path) {
Ok(contents) => {
let editor = Editor::with_text(self.new_tab_ctx(rpc_peer), view_id, contents);
self.finalize_new_view(view_id, buffer_id, editor)
}
Err(err) => {
// TODO: we should be reporting errors to the client
// (if this is even an error? we treat opening a non-existent file as a new buffer,
// but set the editor's path)
print_err!("unable to read file: {}, error: {:?}", buffer_id, err);
self.new_empty_view(rpc_peer, view_id, buffer_id);
}
}
}
/// Adds a new view to an existing editor instance.
#[allow(unreachable_code, unused_variables, dead_code)]
fn add_view(&mut self, view_id: &str, buffer_id: BufferIdentifier) {
panic!("add_view should not currently be accessible");
let editor = self.buffers.get(&buffer_id).expect("missing editor_id for view_id");
self.views.insert(view_id.to_owned(), buffer_id);
editor.lock().unwrap().add_view(view_id);
}
fn finalize_new_view(&mut self, view_id: &str, buffer_id: String, editor: Arc<Mutex<Editor<W>>>) {
self.views.insert(view_id.to_owned(), buffer_id.clone());
self.buffers.insert(buffer_id, editor.clone());
}
fn read_file<P: AsRef<Path>>(&self, path: P) -> io::Result<String> |
fn close_view(&mut self, view_id: &str) {
let buf_id = self.views.remove(view_id).expect("missing buffer id when closing view");
let (has_views, path) = {
let editor = self.buffers.get(&buf_id).expect("missing editor when closing view");
let mut editor = editor.lock().unwrap();
editor.remove_view(view_id);
(editor.has_views(), editor.get_path().map(PathBuf::from))
};
if !has_views {
self.buffers.remove(&buf_id);
if let Some(path) = path {
self.open_files.remove(&path);
}
}
}
fn do_save(&mut self, view_id: &str, file_path: &str) -> Option<Value> {
let buffer_id = self.views.get(view_id)
.expect(&format!("missing buffer id for view {}", view_id));
let editor = self.buffers.get(buffer_id)
.expect(&format!("missing editor for buffer {}", buffer_id));
let file_path = PathBuf::from(file_path);
// if this is a new path for an existing file, we have a bit of housekeeping to do:
if let Some(prev_path) = editor.lock().unwrap().get_path() {
if prev_path != file_path {
self.open_files.remove(prev_path);
}
}
editor.lock().unwrap().do_save(&file_path);
self.open_files.insert(file_path, buffer_id.to_owned());
None
}
fn do_edit(&mut self, view_id: &str, cmd: EditCommand) -> Option<Value> {
let buffer_id = self.views.get(view_id)
.expect(&format!("missing buffer id for view {}", view_id));
if let Some(editor) = self.buffers.get(buffer_id) {
Editor::do_rpc(editor, view_id, cmd)
} else {
print_err!("buffer not found: {}, for view {}", buffer_id, view_id);
None
}
}
pub fn handle_idle(&self) {
for editor in self.buffers.values() {
editor.lock().unwrap().render();
}
}
}
impl<W: Write> TabCtx<W> {
pub fn update_view(&self, view_id: &str, update: &Value) {
self.rpc_peer.send_rpc_notification("update",
&json!({
"view_id": view_id,
"update": update,
}));
}
pub fn scroll_to(&self, view_id: &str, line: usize, col: usize) {
self.rpc_peer.send_rpc_notification("scroll_to",
&json!({
"view_id": view_id,
"line": line,
"col": col,
}));
}
pub fn get_kill_ring(&self) -> Rope {
self.kill_ring.lock().unwrap().clone()
}
pub fn set_kill_ring(&self, val: Rope) {
let mut kill_ring = self.kill_ring.lock().unwrap();
*kill_ring = val;
}
pub fn alert(&self, msg: &str) {
self.rpc_peer.send_rpc_notification("alert",
&json!({
"msg": msg,
}));
}
// Get the index for a given style. If the style is not in the existing
// style map, then issues a def_style request to the front end. Intended
// to be reasonably efficient, but ideally callers would do their own
// indexing.
pub fn get_style_id(&self, style: &Style) -> usize {
let mut style_map = self.style_map.lock().unwrap();
if let Some(ix) = style_map.lookup(style) {
return ix;
}
let ix = style_map.add(style);
self.rpc_peer.send_rpc_notification("def_style", &style.to_json(ix));
ix
}
}
| {
let mut f = File::open(path)?;
let mut s = String::new();
f.read_to_string(&mut s)?;
Ok(s)
} | identifier_body |
vehicle.js | /* eslint-disable linebreak-style */
/* eslint-disable no-console */
const fordConnect = require('./fordConnect/fordConnect');
const { geo } = require('./geo');
let activeVehicle;
/**
* Updates the access token and sets the active vehicle to the vehicle with the
* vehicleAuthorizationIndicator set to 1.
*
* We have this routine, since we only have a single user (one Alexa developer account for
* the hack with the API access going away, so not publishing the Alexa skill publically).
* To support multiple users we would simply add a listener on port 3000 and use the state to
* do a user regisration lookup, with a quick expiry. We would store the results refresh
* tokens in a NoSQL database.
*/
async function init() {
// Try to use the FORD_CODE environment variable to refresh our access token and refresh token.
await fordConnect.updateTokenFromCode();
// Try to use the FORD_REFRESH environment variable to refresh our access token and refresh token.
await fordConnect.refreshToken(60);
// Get the list of vehicles (hopefully one of the above APIs set our access token.)
const vehicles = await fordConnect.getVehicles();
if (vehicles.statusCode === 200) {
// Grab the first vehicle that we have authorized (FordPass UI only lets you select 1 vehicle).
// eslint-disable-next-line prefer-destructuring
activeVehicle = vehicles.body.vehicles.filter((v) => v.vehicleAuthorizationIndicator === 1)[0];
if (activeVehicle && activeVehicle.vehicleId) {
console.log('\nAlexa commands will use the following vehicle:');
console.log(activeVehicle);
} else {
console.error(`SPECBUG ${JSON.stringify(vehicles)}`);
console.error('Did not get a vehicle back from getVehicles.');
console.error('Please provide a new FORD_CODE or MYFORD_REFRESH.');
process.exit(1);
}
} else if (vehicles.statusCode === 500) {
// We got HTTP 500 during the hack and the request from Ford was to get a new token.
// Refreshing the access token with the old refresh token would succeed OAuth calls,
// but all calls to the FordConnect API still failed with HTTP 500.
console.error(`500FORDCONNECT ${JSON.stringify(vehicles)}`);
console.error('GOT 500 (INTERNAL SERVER ERROR) from FordConnect API calling getVehicles!');
console.error('Please provide a new FORD_CODE or FORD_REFRESH.');
process.exit(1);
} else if (vehicles.stautsCode === 401) {
console.error('Access deined.');
console.error('Please provide a new FORD_CODE or FORD_REFRESH.');
} else {
console.log(`SPECBUG ${JSON.stringify(vehicles)}`);
console.error('*** Unexpected error calling getVehicles.');
process.exit(1);
}
}
/**
* This API should convert a userId into a vehicleId. For now this always just returns the single
* active vehicle. To support multiple users, we could use a NoSQL database to do the persistent
* mapping.
*
* @param {*} userId The user passed in the request.
* @returns The vehicleId to use for the request.
*/
function toVehicleId(userId) {
// TODO: Add mapping if we need to support multiple users.
const { vehicleId } = activeVehicle;
console.log(`User ${userId} is using vehicle ${vehicleId}.`);
return vehicleId;
}
/**
* Updates the cloud data by geting a doStatus followed by a getStatus, to know when it is complete.
* The timeout is set fairly tight, since we only have 8-10 seconds to return data to Alexa.
*
* @param {*} vehicleId The vehicle to push to the cloud.
* @returns The response object from the getStatus (or undefined if the doStatus call failed).
* For success the .statusCode should be 202 and the body.commandStatus should be COMPLETED.
* Because of agressive timeouts it may still be PENDINGRESPONSE.
*/
async function cloudPush(vehicleId) {
const response = await fordConnect.doStatus(vehicleId);
if (response.statusCode === 202
&& response.body
&& response.body.status === 'SUCCESS'
&& response.body.commandStatus === 'COMPLETED'
&& response.body.commandId) {
const { commandId } = response.body;
// NOTE: We get an HTTP 202 from the GET call not a 200.
const status = await fordConnect.getStatus(vehicleId, commandId);
return status;
}
return undefined;
}
/**
* Returns a message about the fuel and battery levels.
*
* @param {*} vehicleInfo The .body.vehicle data from getDetails call.
* @returns String. A message to speak about the status of charging.
*/
function checkFuel(vehicleInfo) {
const energy = {
fuelLevel: null,
fuelDTE: null,
batteryLevel: null,
batteryDTE: null,
};
// BUGBUG: I'm really unclear on what the values here should look like. I need
// to see real data from ICE, PHEV, HEV and BEV to understand all of the use
// cases. For now, I'm going with the API sends null if it isn't supported and
// it sends a float if it is & I'm not relying on engineType.
if (vehicleInfo.vehicleDetails && vehicleInfo.vehicleDetails.fuelLevel) {
energy.fuelLevel = vehicleInfo.vehicleDetails.fuelLevel.value;
energy.fuelDTE = vehicleInfo.vehicleDetails.fuelLevel.distanceToEmpty;
}
if (vehicleInfo.vehicleDetails && vehicleInfo.vehicleDetails.batteryChargeLevel) {
energy.batteryLevel = vehicleInfo.vehicleDetails.batteryChargeLevel.value;
energy.batteryDTE = vehicleInfo.vehicleDetails.batteryChargeLevel.distanceToEmpty;
}
let message;
if (energy.fuelLevel !== null && energy.fuelLevel <= 0.0) {
message = 'Fuel is empty. ';
} else if (energy.fuelLevel !== null && energy.fuelLevel) {
message = `Fuel is ${energy.fuelLevel} percent. `;
} else {
message = '';
}
if (energy.fuelDTE !== null && energy.fuelDTE >= 0) {
message += `You can travel ${geo.distance(energy.fuelDTE)} on fuel. `;
}
if (energy.batteryLevel !== null && energy.batteryLevel <= 0.0) {
message += 'Battery is empty. ';
} else if (energy.batteryLevel !== null && energy.batteryLevel) {
message += `Battery is ${energy.batteryLevel} percent. `;
}
if (energy.batteryDTE !== null && energy.batteryDTE >= 0) {
message += `You can travel ${geo.distance(energy.batteryDTE)} on battery. `;
}
return message;
}
/**
* Returns a message about the EV plug and the charging status.
*
* @param {*} vehicleInfo The .body.vehicle data from getDetails call.
* @returns String. A message to speak about the status of the plug.
*/
function checkPlug(vehicleInfo) {
let message;
if (vehicleInfo.vehicleStatus.plugStatus) {
message = `The EV plug is ${vehicleInfo.vehicleStatus.plugStatus.value === true ? 'connected' : 'disconnected'}. `;
} else {
message = 'Failed to get EV plug status. ';
}
if (vehicleInfo.vehicleStatus.chargingStatus) {
message += `The current charging status is ${vehicleInfo.vehicleStatus.chargingStatus.value}.`;
}
return message;
}
/**
* Charges an electric vehicle.
*
* @param {*} vehicleId The vehicle to charge.
* @returns String. A message to speak about the status of charging.
*/
async function chargeVehicle(vehicleId) {
let message;
// Start charging.
const response = await fordConnect.doStartCharge(vehicleId);
if (response.statusCode === 406) {
if (response.body && response.body.error && response.body.error.details) {
message = `Failed charging vehicle. ${response.body.error.details}.`;
} else {
console.error(`SPECBUG ${JSON.stringify(response)}`);
message = 'Failed charging vehicle. Only EV cars are supported.';
}
} else if (response.statusCode < 300) {
// Try to update the cloud with the latest and get the details. Due to aggressive timeouts
// it is possible that vehicleStatus.chargingStatus didn't get a chance to update, but likely
// plugStatus was already correct (e.g. the vehicle was typically plugged in a long time ago.)
await cloudPush(vehicleId);
const details = await fordConnect.getDetails(vehicleId);
if (details.statusCode === 200 && details.body.vehicle.vehicleStatus.plugStatus) {
if (details.body.vehicle.vehicleStatus.plugStatus.value === true) {
message = 'Request for charging sent.';
} else if (details.body.vehicle.vehicleStatus.plugStatus.value === false) {
message = 'Request for charging sent, but the plug is not connected.';
} else {
console.log(JSON.stringify(details));
message = "Request for charging sent, but I'm unable to determine if the vehicle is plugged in.";
}
} else {
console.log(JSON.stringify(details));
message = 'Request for charging sent, but getting vehicle status failed.';
}
} else {
message = `Failed charging vehicle. Got status code ${response.statusCode}.`;
}
return message;
}
/**
* Returns a message with the name (if known) and address where the vehicle is located.
*
* @param {*} vehicleId The vehicle to find.
* @returns String. A message to speak containing the name and address where the vehicle is located.
*/
async function locateVehicle(vehicleId) {
let message;
await fordConnect.doLocation(vehicleId);
// REVIEW: The GET /location API doesn't require a commandId, so how do we know it is updated.
const response = await fordConnect.getLocation(vehicleId);
if (response.statusCode === 200 && response.body && response.body.status === 'SUCCESS' && response.body.vehicleLocation) {
message = `The vehicle is at ${await geo.getLocation(response.body.vehicleLocation.latitude, response.body.vehicleLocation.longitude)}`;
} else if (response.body) {
message = `Failed to get location information with status code ${response.statusCode} and body status of ${response.body.status}. `;
} else {
message = `Failed to get location information with status ${response.statusCode}. `;
}
return message;
}
/**
* Returns a message with the status of door locks (LOCKED, UNLOCKED) and the
* alarm (SET, NOT SET, ACTIVE, ERROR).
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about the status of the door locks and alarm.
*/
async function checkLocksAndAlarm(vehicleId) {
let message;
const cloud = await cloudPush(vehicleId);
if (cloud && cloud.statusCode === 202 && cloud.body && cloud.body.commandStatus === 'COMPLETED' && cloud.body.vehiclestatus) {
const status = cloud.body.vehiclestatus;
message = `The locks are ${status.lockStatus.value}. The alarm is ${status.alarm.value.replace('NOTSET', 'NOT SET')}. `;
} else {
console.error(JSON.stringify(cloud));
console.error('Failed to get lock and alarm status.');
message = 'Unable to check locks and alarm. ';
}
return message;
}
/**
* Returns a message with the status of fuel & battery level and EV plug connection.
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about the fuel and EV plug status.
*/
async function checkFuelAndPlug(vehicleId) {
let message;
const details = await fordConnect.getDetails(vehicleId);
// Message about fuel level.
if (details.statusCode === 200 && details.body.vehicle) {
message = checkFuel(details.body.vehicle);
// Message about EV plug.
if (details.body
&& details.body.vehicle
&& details.body.vehicle.engineType
&& details.body.vehicle.engineType.indexOf('EV') >= 0) {
message += checkPlug(details.body.vehicle);
}
} else {
console.error(JSON.stringify(details));
message = 'Unable to check fuel level. ';
}
return message;
}
/**
* Returns a message about any open doors (or confirms all doors are closed.)
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about any open doors.
*/
async function checkDoors(vehicleId) {
let message;
const details = await fordConnect.getDetails(vehicleId);
// Message about any open doors.
if (details.body
&& details.body.vehicle
&& details.body.vehicle.vehicleStatus
&& details.body.vehicle.vehicleStatus.doorStatus) {
const doors = details.body.vehicle.vehicleStatus.doorStatus;
message = doors.map((d) => (d.value !== 'CLOSED' | //
// TODO: We could do a more user friendly mapping. Right now we have voice responses like
// "DRIVER FRONT", "PASSENGER FRONT", "PASSENGER REAR LEFT", "HOOD DOOR",
// "PASSENGER INNER TAILGATE", etc.
? `${d.vehicleOccupantRole} ${d.vehicleDoor} is ${d.value}. `.replace(/UNSPECIFIED_|NOT_APPLICABLE/g, '').replace('_', ' ')
: '')).join('');
if (doors.filter((d) => d.value !== 'CLOSED').length === 0) {
message = 'All doors are closed. ';
}
}
return message;
}
/**
* Returns a message about the weekday and weekend charge schedule.
*
* @param {*} vehicleId The vehicle to get the schedule of.
* @returns String. A message to speak about the chaging schedule.
*/
async function chargeSchedule(vehicleId) {
let message;
const response = await fordConnect.getChargeSchedule(vehicleId);
if (response.statusCode === 200 && response.body.chargeSchedules) {
if (response.body.chargeSchedules.length === 0) {
message = 'No charging schedule is set. ';
} else {
message = 'The charge schedule is ';
message += response.body.chargeSchedules.map(
// The Ford dash UI allows each schedule to have multiple charge windows.
// REVIEW: What does "00:00" to "00:00" mean? For now we just say it.
(sch) => sch.chargeWindows.map(
(cw) => `${sch.days}S from ${cw.startTime} to ${cw.endTime} at ${sch.desiredChargeLevel} percent. `,
).join(' '),
).join(' ');
}
} else {
message = `Failed getting charge schedule with status code ${response.statusCode}`;
}
return message;
}
exports.vehicle = {
init,
toVehicleId,
cloudPush,
chargeVehicle,
checkFuel,
checkPlug,
checkFuelAndPlug,
checkLocksAndAlarm,
checkDoors,
locateVehicle,
chargeSchedule,
}; | // Delete the words "UNSPECIFIED_" AND "NOT_APPLICABLE". Replace underscore with spaces
// for better speach output. Per the FAQ the d.value is either OPEN or CLOSED. | random_line_split |
vehicle.js | /* eslint-disable linebreak-style */
/* eslint-disable no-console */
const fordConnect = require('./fordConnect/fordConnect');
const { geo } = require('./geo');
let activeVehicle;
/**
* Updates the access token and sets the active vehicle to the vehicle with the
* vehicleAuthorizationIndicator set to 1.
*
* We have this routine, since we only have a single user (one Alexa developer account for
* the hack with the API access going away, so not publishing the Alexa skill publically).
* To support multiple users we would simply add a listener on port 3000 and use the state to
* do a user regisration lookup, with a quick expiry. We would store the results refresh
* tokens in a NoSQL database.
*/
async function init() {
// Try to use the FORD_CODE environment variable to refresh our access token and refresh token.
await fordConnect.updateTokenFromCode();
// Try to use the FORD_REFRESH environment variable to refresh our access token and refresh token.
await fordConnect.refreshToken(60);
// Get the list of vehicles (hopefully one of the above APIs set our access token.)
const vehicles = await fordConnect.getVehicles();
if (vehicles.statusCode === 200) {
// Grab the first vehicle that we have authorized (FordPass UI only lets you select 1 vehicle).
// eslint-disable-next-line prefer-destructuring
activeVehicle = vehicles.body.vehicles.filter((v) => v.vehicleAuthorizationIndicator === 1)[0];
if (activeVehicle && activeVehicle.vehicleId) {
console.log('\nAlexa commands will use the following vehicle:');
console.log(activeVehicle);
} else {
console.error(`SPECBUG ${JSON.stringify(vehicles)}`);
console.error('Did not get a vehicle back from getVehicles.');
console.error('Please provide a new FORD_CODE or MYFORD_REFRESH.');
process.exit(1);
}
} else if (vehicles.statusCode === 500) {
// We got HTTP 500 during the hack and the request from Ford was to get a new token.
// Refreshing the access token with the old refresh token would succeed OAuth calls,
// but all calls to the FordConnect API still failed with HTTP 500.
console.error(`500FORDCONNECT ${JSON.stringify(vehicles)}`);
console.error('GOT 500 (INTERNAL SERVER ERROR) from FordConnect API calling getVehicles!');
console.error('Please provide a new FORD_CODE or FORD_REFRESH.');
process.exit(1);
} else if (vehicles.stautsCode === 401) {
console.error('Access deined.');
console.error('Please provide a new FORD_CODE or FORD_REFRESH.');
} else {
console.log(`SPECBUG ${JSON.stringify(vehicles)}`);
console.error('*** Unexpected error calling getVehicles.');
process.exit(1);
}
}
/**
* This API should convert a userId into a vehicleId. For now this always just returns the single
* active vehicle. To support multiple users, we could use a NoSQL database to do the persistent
* mapping.
*
* @param {*} userId The user passed in the request.
* @returns The vehicleId to use for the request.
*/
function toVehicleId(userId) {
// TODO: Add mapping if we need to support multiple users.
const { vehicleId } = activeVehicle;
console.log(`User ${userId} is using vehicle ${vehicleId}.`);
return vehicleId;
}
/**
* Updates the cloud data by geting a doStatus followed by a getStatus, to know when it is complete.
* The timeout is set fairly tight, since we only have 8-10 seconds to return data to Alexa.
*
* @param {*} vehicleId The vehicle to push to the cloud.
* @returns The response object from the getStatus (or undefined if the doStatus call failed).
* For success the .statusCode should be 202 and the body.commandStatus should be COMPLETED.
* Because of agressive timeouts it may still be PENDINGRESPONSE.
*/
async function cloudPush(vehicleId) {
const response = await fordConnect.doStatus(vehicleId);
if (response.statusCode === 202
&& response.body
&& response.body.status === 'SUCCESS'
&& response.body.commandStatus === 'COMPLETED'
&& response.body.commandId) {
const { commandId } = response.body;
// NOTE: We get an HTTP 202 from the GET call not a 200.
const status = await fordConnect.getStatus(vehicleId, commandId);
return status;
}
return undefined;
}
/**
* Returns a message about the fuel and battery levels.
*
* @param {*} vehicleInfo The .body.vehicle data from getDetails call.
* @returns String. A message to speak about the status of charging.
*/
function checkFuel(vehicleInfo) {
const energy = {
fuelLevel: null,
fuelDTE: null,
batteryLevel: null,
batteryDTE: null,
};
// BUGBUG: I'm really unclear on what the values here should look like. I need
// to see real data from ICE, PHEV, HEV and BEV to understand all of the use
// cases. For now, I'm going with the API sends null if it isn't supported and
// it sends a float if it is & I'm not relying on engineType.
if (vehicleInfo.vehicleDetails && vehicleInfo.vehicleDetails.fuelLevel) {
energy.fuelLevel = vehicleInfo.vehicleDetails.fuelLevel.value;
energy.fuelDTE = vehicleInfo.vehicleDetails.fuelLevel.distanceToEmpty;
}
if (vehicleInfo.vehicleDetails && vehicleInfo.vehicleDetails.batteryChargeLevel) {
energy.batteryLevel = vehicleInfo.vehicleDetails.batteryChargeLevel.value;
energy.batteryDTE = vehicleInfo.vehicleDetails.batteryChargeLevel.distanceToEmpty;
}
let message;
if (energy.fuelLevel !== null && energy.fuelLevel <= 0.0) {
message = 'Fuel is empty. ';
} else if (energy.fuelLevel !== null && energy.fuelLevel) {
message = `Fuel is ${energy.fuelLevel} percent. `;
} else {
message = '';
}
if (energy.fuelDTE !== null && energy.fuelDTE >= 0) {
message += `You can travel ${geo.distance(energy.fuelDTE)} on fuel. `;
}
if (energy.batteryLevel !== null && energy.batteryLevel <= 0.0) {
message += 'Battery is empty. ';
} else if (energy.batteryLevel !== null && energy.batteryLevel) {
message += `Battery is ${energy.batteryLevel} percent. `;
}
if (energy.batteryDTE !== null && energy.batteryDTE >= 0) {
message += `You can travel ${geo.distance(energy.batteryDTE)} on battery. `;
}
return message;
}
/**
* Returns a message about the EV plug and the charging status.
*
* @param {*} vehicleInfo The .body.vehicle data from getDetails call.
* @returns String. A message to speak about the status of the plug.
*/
function checkPlug(vehicleInfo) {
let message;
if (vehicleInfo.vehicleStatus.plugStatus) {
message = `The EV plug is ${vehicleInfo.vehicleStatus.plugStatus.value === true ? 'connected' : 'disconnected'}. `;
} else {
message = 'Failed to get EV plug status. ';
}
if (vehicleInfo.vehicleStatus.chargingStatus) {
message += `The current charging status is ${vehicleInfo.vehicleStatus.chargingStatus.value}.`;
}
return message;
}
/**
* Charges an electric vehicle.
*
* @param {*} vehicleId The vehicle to charge.
* @returns String. A message to speak about the status of charging.
*/
async function | (vehicleId) {
let message;
// Start charging.
const response = await fordConnect.doStartCharge(vehicleId);
if (response.statusCode === 406) {
if (response.body && response.body.error && response.body.error.details) {
message = `Failed charging vehicle. ${response.body.error.details}.`;
} else {
console.error(`SPECBUG ${JSON.stringify(response)}`);
message = 'Failed charging vehicle. Only EV cars are supported.';
}
} else if (response.statusCode < 300) {
// Try to update the cloud with the latest and get the details. Due to aggressive timeouts
// it is possible that vehicleStatus.chargingStatus didn't get a chance to update, but likely
// plugStatus was already correct (e.g. the vehicle was typically plugged in a long time ago.)
await cloudPush(vehicleId);
const details = await fordConnect.getDetails(vehicleId);
if (details.statusCode === 200 && details.body.vehicle.vehicleStatus.plugStatus) {
if (details.body.vehicle.vehicleStatus.plugStatus.value === true) {
message = 'Request for charging sent.';
} else if (details.body.vehicle.vehicleStatus.plugStatus.value === false) {
message = 'Request for charging sent, but the plug is not connected.';
} else {
console.log(JSON.stringify(details));
message = "Request for charging sent, but I'm unable to determine if the vehicle is plugged in.";
}
} else {
console.log(JSON.stringify(details));
message = 'Request for charging sent, but getting vehicle status failed.';
}
} else {
message = `Failed charging vehicle. Got status code ${response.statusCode}.`;
}
return message;
}
/**
* Returns a message with the name (if known) and address where the vehicle is located.
*
* @param {*} vehicleId The vehicle to find.
* @returns String. A message to speak containing the name and address where the vehicle is located.
*/
async function locateVehicle(vehicleId) {
let message;
await fordConnect.doLocation(vehicleId);
// REVIEW: The GET /location API doesn't require a commandId, so how do we know it is updated.
const response = await fordConnect.getLocation(vehicleId);
if (response.statusCode === 200 && response.body && response.body.status === 'SUCCESS' && response.body.vehicleLocation) {
message = `The vehicle is at ${await geo.getLocation(response.body.vehicleLocation.latitude, response.body.vehicleLocation.longitude)}`;
} else if (response.body) {
message = `Failed to get location information with status code ${response.statusCode} and body status of ${response.body.status}. `;
} else {
message = `Failed to get location information with status ${response.statusCode}. `;
}
return message;
}
/**
* Returns a message with the status of door locks (LOCKED, UNLOCKED) and the
* alarm (SET, NOT SET, ACTIVE, ERROR).
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about the status of the door locks and alarm.
*/
async function checkLocksAndAlarm(vehicleId) {
let message;
const cloud = await cloudPush(vehicleId);
if (cloud && cloud.statusCode === 202 && cloud.body && cloud.body.commandStatus === 'COMPLETED' && cloud.body.vehiclestatus) {
const status = cloud.body.vehiclestatus;
message = `The locks are ${status.lockStatus.value}. The alarm is ${status.alarm.value.replace('NOTSET', 'NOT SET')}. `;
} else {
console.error(JSON.stringify(cloud));
console.error('Failed to get lock and alarm status.');
message = 'Unable to check locks and alarm. ';
}
return message;
}
/**
* Returns a message with the status of fuel & battery level and EV plug connection.
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about the fuel and EV plug status.
*/
async function checkFuelAndPlug(vehicleId) {
let message;
const details = await fordConnect.getDetails(vehicleId);
// Message about fuel level.
if (details.statusCode === 200 && details.body.vehicle) {
message = checkFuel(details.body.vehicle);
// Message about EV plug.
if (details.body
&& details.body.vehicle
&& details.body.vehicle.engineType
&& details.body.vehicle.engineType.indexOf('EV') >= 0) {
message += checkPlug(details.body.vehicle);
}
} else {
console.error(JSON.stringify(details));
message = 'Unable to check fuel level. ';
}
return message;
}
/**
* Returns a message about any open doors (or confirms all doors are closed.)
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about any open doors.
*/
async function checkDoors(vehicleId) {
let message;
const details = await fordConnect.getDetails(vehicleId);
// Message about any open doors.
if (details.body
&& details.body.vehicle
&& details.body.vehicle.vehicleStatus
&& details.body.vehicle.vehicleStatus.doorStatus) {
const doors = details.body.vehicle.vehicleStatus.doorStatus;
message = doors.map((d) => (d.value !== 'CLOSED'
// Delete the words "UNSPECIFIED_" AND "NOT_APPLICABLE". Replace underscore with spaces
// for better speach output. Per the FAQ the d.value is either OPEN or CLOSED.
//
// TODO: We could do a more user friendly mapping. Right now we have voice responses like
// "DRIVER FRONT", "PASSENGER FRONT", "PASSENGER REAR LEFT", "HOOD DOOR",
// "PASSENGER INNER TAILGATE", etc.
? `${d.vehicleOccupantRole} ${d.vehicleDoor} is ${d.value}. `.replace(/UNSPECIFIED_|NOT_APPLICABLE/g, '').replace('_', ' ')
: '')).join('');
if (doors.filter((d) => d.value !== 'CLOSED').length === 0) {
message = 'All doors are closed. ';
}
}
return message;
}
/**
* Returns a message about the weekday and weekend charge schedule.
*
* @param {*} vehicleId The vehicle to get the schedule of.
* @returns String. A message to speak about the chaging schedule.
*/
async function chargeSchedule(vehicleId) {
let message;
const response = await fordConnect.getChargeSchedule(vehicleId);
if (response.statusCode === 200 && response.body.chargeSchedules) {
if (response.body.chargeSchedules.length === 0) {
message = 'No charging schedule is set. ';
} else {
message = 'The charge schedule is ';
message += response.body.chargeSchedules.map(
// The Ford dash UI allows each schedule to have multiple charge windows.
// REVIEW: What does "00:00" to "00:00" mean? For now we just say it.
(sch) => sch.chargeWindows.map(
(cw) => `${sch.days}S from ${cw.startTime} to ${cw.endTime} at ${sch.desiredChargeLevel} percent. `,
).join(' '),
).join(' ');
}
} else {
message = `Failed getting charge schedule with status code ${response.statusCode}`;
}
return message;
}
exports.vehicle = {
init,
toVehicleId,
cloudPush,
chargeVehicle,
checkFuel,
checkPlug,
checkFuelAndPlug,
checkLocksAndAlarm,
checkDoors,
locateVehicle,
chargeSchedule,
};
| chargeVehicle | identifier_name |
vehicle.js | /* eslint-disable linebreak-style */
/* eslint-disable no-console */
const fordConnect = require('./fordConnect/fordConnect');
const { geo } = require('./geo');
let activeVehicle;
/**
* Updates the access token and sets the active vehicle to the vehicle with the
* vehicleAuthorizationIndicator set to 1.
*
* We have this routine, since we only have a single user (one Alexa developer account for
* the hack with the API access going away, so not publishing the Alexa skill publically).
* To support multiple users we would simply add a listener on port 3000 and use the state to
* do a user regisration lookup, with a quick expiry. We would store the results refresh
* tokens in a NoSQL database.
*/
async function init() {
// Try to use the FORD_CODE environment variable to refresh our access token and refresh token.
await fordConnect.updateTokenFromCode();
// Try to use the FORD_REFRESH environment variable to refresh our access token and refresh token.
await fordConnect.refreshToken(60);
// Get the list of vehicles (hopefully one of the above APIs set our access token.)
const vehicles = await fordConnect.getVehicles();
if (vehicles.statusCode === 200) {
// Grab the first vehicle that we have authorized (FordPass UI only lets you select 1 vehicle).
// eslint-disable-next-line prefer-destructuring
activeVehicle = vehicles.body.vehicles.filter((v) => v.vehicleAuthorizationIndicator === 1)[0];
if (activeVehicle && activeVehicle.vehicleId) {
console.log('\nAlexa commands will use the following vehicle:');
console.log(activeVehicle);
} else {
console.error(`SPECBUG ${JSON.stringify(vehicles)}`);
console.error('Did not get a vehicle back from getVehicles.');
console.error('Please provide a new FORD_CODE or MYFORD_REFRESH.');
process.exit(1);
}
} else if (vehicles.statusCode === 500) {
// We got HTTP 500 during the hack and the request from Ford was to get a new token.
// Refreshing the access token with the old refresh token would succeed OAuth calls,
// but all calls to the FordConnect API still failed with HTTP 500.
console.error(`500FORDCONNECT ${JSON.stringify(vehicles)}`);
console.error('GOT 500 (INTERNAL SERVER ERROR) from FordConnect API calling getVehicles!');
console.error('Please provide a new FORD_CODE or FORD_REFRESH.');
process.exit(1);
} else if (vehicles.stautsCode === 401) {
console.error('Access deined.');
console.error('Please provide a new FORD_CODE or FORD_REFRESH.');
} else {
console.log(`SPECBUG ${JSON.stringify(vehicles)}`);
console.error('*** Unexpected error calling getVehicles.');
process.exit(1);
}
}
/**
* This API should convert a userId into a vehicleId. For now this always just returns the single
* active vehicle. To support multiple users, we could use a NoSQL database to do the persistent
* mapping.
*
* @param {*} userId The user passed in the request.
* @returns The vehicleId to use for the request.
*/
function toVehicleId(userId) {
// TODO: Add mapping if we need to support multiple users.
const { vehicleId } = activeVehicle;
console.log(`User ${userId} is using vehicle ${vehicleId}.`);
return vehicleId;
}
/**
* Updates the cloud data by geting a doStatus followed by a getStatus, to know when it is complete.
* The timeout is set fairly tight, since we only have 8-10 seconds to return data to Alexa.
*
* @param {*} vehicleId The vehicle to push to the cloud.
* @returns The response object from the getStatus (or undefined if the doStatus call failed).
* For success the .statusCode should be 202 and the body.commandStatus should be COMPLETED.
* Because of agressive timeouts it may still be PENDINGRESPONSE.
*/
async function cloudPush(vehicleId) {
const response = await fordConnect.doStatus(vehicleId);
if (response.statusCode === 202
&& response.body
&& response.body.status === 'SUCCESS'
&& response.body.commandStatus === 'COMPLETED'
&& response.body.commandId) {
const { commandId } = response.body;
// NOTE: We get an HTTP 202 from the GET call not a 200.
const status = await fordConnect.getStatus(vehicleId, commandId);
return status;
}
return undefined;
}
/**
* Returns a message about the fuel and battery levels.
*
* @param {*} vehicleInfo The .body.vehicle data from getDetails call.
* @returns String. A message to speak about the status of charging.
*/
function checkFuel(vehicleInfo) {
const energy = {
fuelLevel: null,
fuelDTE: null,
batteryLevel: null,
batteryDTE: null,
};
// BUGBUG: I'm really unclear on what the values here should look like. I need
// to see real data from ICE, PHEV, HEV and BEV to understand all of the use
// cases. For now, I'm going with the API sends null if it isn't supported and
// it sends a float if it is & I'm not relying on engineType.
if (vehicleInfo.vehicleDetails && vehicleInfo.vehicleDetails.fuelLevel) {
energy.fuelLevel = vehicleInfo.vehicleDetails.fuelLevel.value;
energy.fuelDTE = vehicleInfo.vehicleDetails.fuelLevel.distanceToEmpty;
}
if (vehicleInfo.vehicleDetails && vehicleInfo.vehicleDetails.batteryChargeLevel) {
energy.batteryLevel = vehicleInfo.vehicleDetails.batteryChargeLevel.value;
energy.batteryDTE = vehicleInfo.vehicleDetails.batteryChargeLevel.distanceToEmpty;
}
let message;
if (energy.fuelLevel !== null && energy.fuelLevel <= 0.0) {
message = 'Fuel is empty. ';
} else if (energy.fuelLevel !== null && energy.fuelLevel) {
message = `Fuel is ${energy.fuelLevel} percent. `;
} else {
message = '';
}
if (energy.fuelDTE !== null && energy.fuelDTE >= 0) {
message += `You can travel ${geo.distance(energy.fuelDTE)} on fuel. `;
}
if (energy.batteryLevel !== null && energy.batteryLevel <= 0.0) {
message += 'Battery is empty. ';
} else if (energy.batteryLevel !== null && energy.batteryLevel) {
message += `Battery is ${energy.batteryLevel} percent. `;
}
if (energy.batteryDTE !== null && energy.batteryDTE >= 0) {
message += `You can travel ${geo.distance(energy.batteryDTE)} on battery. `;
}
return message;
}
/**
* Returns a message about the EV plug and the charging status.
*
* @param {*} vehicleInfo The .body.vehicle data from getDetails call.
* @returns String. A message to speak about the status of the plug.
*/
function checkPlug(vehicleInfo) {
let message;
if (vehicleInfo.vehicleStatus.plugStatus) {
message = `The EV plug is ${vehicleInfo.vehicleStatus.plugStatus.value === true ? 'connected' : 'disconnected'}. `;
} else {
message = 'Failed to get EV plug status. ';
}
if (vehicleInfo.vehicleStatus.chargingStatus) {
message += `The current charging status is ${vehicleInfo.vehicleStatus.chargingStatus.value}.`;
}
return message;
}
/**
* Charges an electric vehicle.
*
* @param {*} vehicleId The vehicle to charge.
* @returns String. A message to speak about the status of charging.
*/
async function chargeVehicle(vehicleId) {
let message;
// Start charging.
const response = await fordConnect.doStartCharge(vehicleId);
if (response.statusCode === 406) {
if (response.body && response.body.error && response.body.error.details) {
message = `Failed charging vehicle. ${response.body.error.details}.`;
} else {
console.error(`SPECBUG ${JSON.stringify(response)}`);
message = 'Failed charging vehicle. Only EV cars are supported.';
}
} else if (response.statusCode < 300) {
// Try to update the cloud with the latest and get the details. Due to aggressive timeouts
// it is possible that vehicleStatus.chargingStatus didn't get a chance to update, but likely
// plugStatus was already correct (e.g. the vehicle was typically plugged in a long time ago.)
await cloudPush(vehicleId);
const details = await fordConnect.getDetails(vehicleId);
if (details.statusCode === 200 && details.body.vehicle.vehicleStatus.plugStatus) {
if (details.body.vehicle.vehicleStatus.plugStatus.value === true) {
message = 'Request for charging sent.';
} else if (details.body.vehicle.vehicleStatus.plugStatus.value === false) {
message = 'Request for charging sent, but the plug is not connected.';
} else {
console.log(JSON.stringify(details));
message = "Request for charging sent, but I'm unable to determine if the vehicle is plugged in.";
}
} else {
console.log(JSON.stringify(details));
message = 'Request for charging sent, but getting vehicle status failed.';
}
} else {
message = `Failed charging vehicle. Got status code ${response.statusCode}.`;
}
return message;
}
/**
* Returns a message with the name (if known) and address where the vehicle is located.
*
* @param {*} vehicleId The vehicle to find.
* @returns String. A message to speak containing the name and address where the vehicle is located.
*/
async function locateVehicle(vehicleId) |
/**
* Returns a message with the status of door locks (LOCKED, UNLOCKED) and the
* alarm (SET, NOT SET, ACTIVE, ERROR).
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about the status of the door locks and alarm.
*/
async function checkLocksAndAlarm(vehicleId) {
let message;
const cloud = await cloudPush(vehicleId);
if (cloud && cloud.statusCode === 202 && cloud.body && cloud.body.commandStatus === 'COMPLETED' && cloud.body.vehiclestatus) {
const status = cloud.body.vehiclestatus;
message = `The locks are ${status.lockStatus.value}. The alarm is ${status.alarm.value.replace('NOTSET', 'NOT SET')}. `;
} else {
console.error(JSON.stringify(cloud));
console.error('Failed to get lock and alarm status.');
message = 'Unable to check locks and alarm. ';
}
return message;
}
/**
* Returns a message with the status of fuel & battery level and EV plug connection.
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about the fuel and EV plug status.
*/
async function checkFuelAndPlug(vehicleId) {
let message;
const details = await fordConnect.getDetails(vehicleId);
// Message about fuel level.
if (details.statusCode === 200 && details.body.vehicle) {
message = checkFuel(details.body.vehicle);
// Message about EV plug.
if (details.body
&& details.body.vehicle
&& details.body.vehicle.engineType
&& details.body.vehicle.engineType.indexOf('EV') >= 0) {
message += checkPlug(details.body.vehicle);
}
} else {
console.error(JSON.stringify(details));
message = 'Unable to check fuel level. ';
}
return message;
}
/**
* Returns a message about any open doors (or confirms all doors are closed.)
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about any open doors.
*/
async function checkDoors(vehicleId) {
let message;
const details = await fordConnect.getDetails(vehicleId);
// Message about any open doors.
if (details.body
&& details.body.vehicle
&& details.body.vehicle.vehicleStatus
&& details.body.vehicle.vehicleStatus.doorStatus) {
const doors = details.body.vehicle.vehicleStatus.doorStatus;
message = doors.map((d) => (d.value !== 'CLOSED'
// Delete the words "UNSPECIFIED_" AND "NOT_APPLICABLE". Replace underscore with spaces
// for better speach output. Per the FAQ the d.value is either OPEN or CLOSED.
//
// TODO: We could do a more user friendly mapping. Right now we have voice responses like
// "DRIVER FRONT", "PASSENGER FRONT", "PASSENGER REAR LEFT", "HOOD DOOR",
// "PASSENGER INNER TAILGATE", etc.
? `${d.vehicleOccupantRole} ${d.vehicleDoor} is ${d.value}. `.replace(/UNSPECIFIED_|NOT_APPLICABLE/g, '').replace('_', ' ')
: '')).join('');
if (doors.filter((d) => d.value !== 'CLOSED').length === 0) {
message = 'All doors are closed. ';
}
}
return message;
}
/**
* Returns a message about the weekday and weekend charge schedule.
*
* @param {*} vehicleId The vehicle to get the schedule of.
* @returns String. A message to speak about the chaging schedule.
*/
async function chargeSchedule(vehicleId) {
let message;
const response = await fordConnect.getChargeSchedule(vehicleId);
if (response.statusCode === 200 && response.body.chargeSchedules) {
if (response.body.chargeSchedules.length === 0) {
message = 'No charging schedule is set. ';
} else {
message = 'The charge schedule is ';
message += response.body.chargeSchedules.map(
// The Ford dash UI allows each schedule to have multiple charge windows.
// REVIEW: What does "00:00" to "00:00" mean? For now we just say it.
(sch) => sch.chargeWindows.map(
(cw) => `${sch.days}S from ${cw.startTime} to ${cw.endTime} at ${sch.desiredChargeLevel} percent. `,
).join(' '),
).join(' ');
}
} else {
message = `Failed getting charge schedule with status code ${response.statusCode}`;
}
return message;
}
exports.vehicle = {
init,
toVehicleId,
cloudPush,
chargeVehicle,
checkFuel,
checkPlug,
checkFuelAndPlug,
checkLocksAndAlarm,
checkDoors,
locateVehicle,
chargeSchedule,
};
| {
let message;
await fordConnect.doLocation(vehicleId);
// REVIEW: The GET /location API doesn't require a commandId, so how do we know it is updated.
const response = await fordConnect.getLocation(vehicleId);
if (response.statusCode === 200 && response.body && response.body.status === 'SUCCESS' && response.body.vehicleLocation) {
message = `The vehicle is at ${await geo.getLocation(response.body.vehicleLocation.latitude, response.body.vehicleLocation.longitude)}`;
} else if (response.body) {
message = `Failed to get location information with status code ${response.statusCode} and body status of ${response.body.status}. `;
} else {
message = `Failed to get location information with status ${response.statusCode}. `;
}
return message;
} | identifier_body |
vehicle.js | /* eslint-disable linebreak-style */
/* eslint-disable no-console */
const fordConnect = require('./fordConnect/fordConnect');
const { geo } = require('./geo');
let activeVehicle;
/**
* Updates the access token and sets the active vehicle to the vehicle with the
* vehicleAuthorizationIndicator set to 1.
*
* We have this routine, since we only have a single user (one Alexa developer account for
* the hack with the API access going away, so not publishing the Alexa skill publically).
* To support multiple users we would simply add a listener on port 3000 and use the state to
* do a user regisration lookup, with a quick expiry. We would store the results refresh
* tokens in a NoSQL database.
*/
async function init() {
// Try to use the FORD_CODE environment variable to refresh our access token and refresh token.
await fordConnect.updateTokenFromCode();
// Try to use the FORD_REFRESH environment variable to refresh our access token and refresh token.
await fordConnect.refreshToken(60);
// Get the list of vehicles (hopefully one of the above APIs set our access token.)
const vehicles = await fordConnect.getVehicles();
if (vehicles.statusCode === 200) {
// Grab the first vehicle that we have authorized (FordPass UI only lets you select 1 vehicle).
// eslint-disable-next-line prefer-destructuring
activeVehicle = vehicles.body.vehicles.filter((v) => v.vehicleAuthorizationIndicator === 1)[0];
if (activeVehicle && activeVehicle.vehicleId) {
console.log('\nAlexa commands will use the following vehicle:');
console.log(activeVehicle);
} else {
console.error(`SPECBUG ${JSON.stringify(vehicles)}`);
console.error('Did not get a vehicle back from getVehicles.');
console.error('Please provide a new FORD_CODE or MYFORD_REFRESH.');
process.exit(1);
}
} else if (vehicles.statusCode === 500) {
// We got HTTP 500 during the hack and the request from Ford was to get a new token.
// Refreshing the access token with the old refresh token would succeed OAuth calls,
// but all calls to the FordConnect API still failed with HTTP 500.
console.error(`500FORDCONNECT ${JSON.stringify(vehicles)}`);
console.error('GOT 500 (INTERNAL SERVER ERROR) from FordConnect API calling getVehicles!');
console.error('Please provide a new FORD_CODE or FORD_REFRESH.');
process.exit(1);
} else if (vehicles.stautsCode === 401) {
console.error('Access deined.');
console.error('Please provide a new FORD_CODE or FORD_REFRESH.');
} else {
console.log(`SPECBUG ${JSON.stringify(vehicles)}`);
console.error('*** Unexpected error calling getVehicles.');
process.exit(1);
}
}
/**
* This API should convert a userId into a vehicleId. For now this always just returns the single
* active vehicle. To support multiple users, we could use a NoSQL database to do the persistent
* mapping.
*
* @param {*} userId The user passed in the request.
* @returns The vehicleId to use for the request.
*/
function toVehicleId(userId) {
// TODO: Add mapping if we need to support multiple users.
const { vehicleId } = activeVehicle;
console.log(`User ${userId} is using vehicle ${vehicleId}.`);
return vehicleId;
}
/**
* Updates the cloud data by geting a doStatus followed by a getStatus, to know when it is complete.
* The timeout is set fairly tight, since we only have 8-10 seconds to return data to Alexa.
*
* @param {*} vehicleId The vehicle to push to the cloud.
* @returns The response object from the getStatus (or undefined if the doStatus call failed).
* For success the .statusCode should be 202 and the body.commandStatus should be COMPLETED.
* Because of agressive timeouts it may still be PENDINGRESPONSE.
*/
async function cloudPush(vehicleId) {
const response = await fordConnect.doStatus(vehicleId);
if (response.statusCode === 202
&& response.body
&& response.body.status === 'SUCCESS'
&& response.body.commandStatus === 'COMPLETED'
&& response.body.commandId) |
return undefined;
}
/**
* Returns a message about the fuel and battery levels.
*
* @param {*} vehicleInfo The .body.vehicle data from getDetails call.
* @returns String. A message to speak about the status of charging.
*/
function checkFuel(vehicleInfo) {
const energy = {
fuelLevel: null,
fuelDTE: null,
batteryLevel: null,
batteryDTE: null,
};
// BUGBUG: I'm really unclear on what the values here should look like. I need
// to see real data from ICE, PHEV, HEV and BEV to understand all of the use
// cases. For now, I'm going with the API sends null if it isn't supported and
// it sends a float if it is & I'm not relying on engineType.
if (vehicleInfo.vehicleDetails && vehicleInfo.vehicleDetails.fuelLevel) {
energy.fuelLevel = vehicleInfo.vehicleDetails.fuelLevel.value;
energy.fuelDTE = vehicleInfo.vehicleDetails.fuelLevel.distanceToEmpty;
}
if (vehicleInfo.vehicleDetails && vehicleInfo.vehicleDetails.batteryChargeLevel) {
energy.batteryLevel = vehicleInfo.vehicleDetails.batteryChargeLevel.value;
energy.batteryDTE = vehicleInfo.vehicleDetails.batteryChargeLevel.distanceToEmpty;
}
let message;
if (energy.fuelLevel !== null && energy.fuelLevel <= 0.0) {
message = 'Fuel is empty. ';
} else if (energy.fuelLevel !== null && energy.fuelLevel) {
message = `Fuel is ${energy.fuelLevel} percent. `;
} else {
message = '';
}
if (energy.fuelDTE !== null && energy.fuelDTE >= 0) {
message += `You can travel ${geo.distance(energy.fuelDTE)} on fuel. `;
}
if (energy.batteryLevel !== null && energy.batteryLevel <= 0.0) {
message += 'Battery is empty. ';
} else if (energy.batteryLevel !== null && energy.batteryLevel) {
message += `Battery is ${energy.batteryLevel} percent. `;
}
if (energy.batteryDTE !== null && energy.batteryDTE >= 0) {
message += `You can travel ${geo.distance(energy.batteryDTE)} on battery. `;
}
return message;
}
/**
* Returns a message about the EV plug and the charging status.
*
* @param {*} vehicleInfo The .body.vehicle data from getDetails call.
* @returns String. A message to speak about the status of the plug.
*/
function checkPlug(vehicleInfo) {
let message;
if (vehicleInfo.vehicleStatus.plugStatus) {
message = `The EV plug is ${vehicleInfo.vehicleStatus.plugStatus.value === true ? 'connected' : 'disconnected'}. `;
} else {
message = 'Failed to get EV plug status. ';
}
if (vehicleInfo.vehicleStatus.chargingStatus) {
message += `The current charging status is ${vehicleInfo.vehicleStatus.chargingStatus.value}.`;
}
return message;
}
/**
* Charges an electric vehicle.
*
* @param {*} vehicleId The vehicle to charge.
* @returns String. A message to speak about the status of charging.
*/
async function chargeVehicle(vehicleId) {
let message;
// Start charging.
const response = await fordConnect.doStartCharge(vehicleId);
if (response.statusCode === 406) {
if (response.body && response.body.error && response.body.error.details) {
message = `Failed charging vehicle. ${response.body.error.details}.`;
} else {
console.error(`SPECBUG ${JSON.stringify(response)}`);
message = 'Failed charging vehicle. Only EV cars are supported.';
}
} else if (response.statusCode < 300) {
// Try to update the cloud with the latest and get the details. Due to aggressive timeouts
// it is possible that vehicleStatus.chargingStatus didn't get a chance to update, but likely
// plugStatus was already correct (e.g. the vehicle was typically plugged in a long time ago.)
await cloudPush(vehicleId);
const details = await fordConnect.getDetails(vehicleId);
if (details.statusCode === 200 && details.body.vehicle.vehicleStatus.plugStatus) {
if (details.body.vehicle.vehicleStatus.plugStatus.value === true) {
message = 'Request for charging sent.';
} else if (details.body.vehicle.vehicleStatus.plugStatus.value === false) {
message = 'Request for charging sent, but the plug is not connected.';
} else {
console.log(JSON.stringify(details));
message = "Request for charging sent, but I'm unable to determine if the vehicle is plugged in.";
}
} else {
console.log(JSON.stringify(details));
message = 'Request for charging sent, but getting vehicle status failed.';
}
} else {
message = `Failed charging vehicle. Got status code ${response.statusCode}.`;
}
return message;
}
/**
* Returns a message with the name (if known) and address where the vehicle is located.
*
* @param {*} vehicleId The vehicle to find.
* @returns String. A message to speak containing the name and address where the vehicle is located.
*/
async function locateVehicle(vehicleId) {
let message;
await fordConnect.doLocation(vehicleId);
// REVIEW: The GET /location API doesn't require a commandId, so how do we know it is updated.
const response = await fordConnect.getLocation(vehicleId);
if (response.statusCode === 200 && response.body && response.body.status === 'SUCCESS' && response.body.vehicleLocation) {
message = `The vehicle is at ${await geo.getLocation(response.body.vehicleLocation.latitude, response.body.vehicleLocation.longitude)}`;
} else if (response.body) {
message = `Failed to get location information with status code ${response.statusCode} and body status of ${response.body.status}. `;
} else {
message = `Failed to get location information with status ${response.statusCode}. `;
}
return message;
}
/**
* Returns a message with the status of door locks (LOCKED, UNLOCKED) and the
* alarm (SET, NOT SET, ACTIVE, ERROR).
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about the status of the door locks and alarm.
*/
async function checkLocksAndAlarm(vehicleId) {
let message;
const cloud = await cloudPush(vehicleId);
if (cloud && cloud.statusCode === 202 && cloud.body && cloud.body.commandStatus === 'COMPLETED' && cloud.body.vehiclestatus) {
const status = cloud.body.vehiclestatus;
message = `The locks are ${status.lockStatus.value}. The alarm is ${status.alarm.value.replace('NOTSET', 'NOT SET')}. `;
} else {
console.error(JSON.stringify(cloud));
console.error('Failed to get lock and alarm status.');
message = 'Unable to check locks and alarm. ';
}
return message;
}
/**
* Returns a message with the status of fuel & battery level and EV plug connection.
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about the fuel and EV plug status.
*/
async function checkFuelAndPlug(vehicleId) {
let message;
const details = await fordConnect.getDetails(vehicleId);
// Message about fuel level.
if (details.statusCode === 200 && details.body.vehicle) {
message = checkFuel(details.body.vehicle);
// Message about EV plug.
if (details.body
&& details.body.vehicle
&& details.body.vehicle.engineType
&& details.body.vehicle.engineType.indexOf('EV') >= 0) {
message += checkPlug(details.body.vehicle);
}
} else {
console.error(JSON.stringify(details));
message = 'Unable to check fuel level. ';
}
return message;
}
/**
* Returns a message about any open doors (or confirms all doors are closed.)
*
* @param {*} vehicleId The vehicle to check.
* @returns String. A message to speak about any open doors.
*/
async function checkDoors(vehicleId) {
let message;
const details = await fordConnect.getDetails(vehicleId);
// Message about any open doors.
if (details.body
&& details.body.vehicle
&& details.body.vehicle.vehicleStatus
&& details.body.vehicle.vehicleStatus.doorStatus) {
const doors = details.body.vehicle.vehicleStatus.doorStatus;
message = doors.map((d) => (d.value !== 'CLOSED'
// Delete the words "UNSPECIFIED_" AND "NOT_APPLICABLE". Replace underscore with spaces
// for better speach output. Per the FAQ the d.value is either OPEN or CLOSED.
//
// TODO: We could do a more user friendly mapping. Right now we have voice responses like
// "DRIVER FRONT", "PASSENGER FRONT", "PASSENGER REAR LEFT", "HOOD DOOR",
// "PASSENGER INNER TAILGATE", etc.
? `${d.vehicleOccupantRole} ${d.vehicleDoor} is ${d.value}. `.replace(/UNSPECIFIED_|NOT_APPLICABLE/g, '').replace('_', ' ')
: '')).join('');
if (doors.filter((d) => d.value !== 'CLOSED').length === 0) {
message = 'All doors are closed. ';
}
}
return message;
}
/**
* Returns a message about the weekday and weekend charge schedule.
*
* @param {*} vehicleId The vehicle to get the schedule of.
* @returns String. A message to speak about the chaging schedule.
*/
async function chargeSchedule(vehicleId) {
let message;
const response = await fordConnect.getChargeSchedule(vehicleId);
if (response.statusCode === 200 && response.body.chargeSchedules) {
if (response.body.chargeSchedules.length === 0) {
message = 'No charging schedule is set. ';
} else {
message = 'The charge schedule is ';
message += response.body.chargeSchedules.map(
// The Ford dash UI allows each schedule to have multiple charge windows.
// REVIEW: What does "00:00" to "00:00" mean? For now we just say it.
(sch) => sch.chargeWindows.map(
(cw) => `${sch.days}S from ${cw.startTime} to ${cw.endTime} at ${sch.desiredChargeLevel} percent. `,
).join(' '),
).join(' ');
}
} else {
message = `Failed getting charge schedule with status code ${response.statusCode}`;
}
return message;
}
exports.vehicle = {
init,
toVehicleId,
cloudPush,
chargeVehicle,
checkFuel,
checkPlug,
checkFuelAndPlug,
checkLocksAndAlarm,
checkDoors,
locateVehicle,
chargeSchedule,
};
| {
const { commandId } = response.body;
// NOTE: We get an HTTP 202 from the GET call not a 200.
const status = await fordConnect.getStatus(vehicleId, commandId);
return status;
} | conditional_block |
queryOECD.py | #!/usr/bin/env python
'''
@author : Mitchell Van Braeckel
@id : 1002297
@date : 10/10/2020
@version : python 3.8-32 / python 3.8.5
@course : CIS*4010 Cloud Computing
@brief : A1 Part 2 - AWS DynamoDB ; Q2 - Query OECD
@note :
Description: There are many CSV files containing info from the OECD about agricultural production, each for various regions around the world.
Queries all 4 tables (northamerica, canada, usa, mexico -table names) based on a commodity (code key or label),
looking for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
then output the specific NA definition 'hit' results and probable conclusion for NA definition per variable,
as well as an overall conclusion for NA definition
NOTE: forgot to add ability to specify commodity as cmd line arg instead of STDIN
NOTE: assume year range is 2010 to 2029 (inclusive)
NOTE: assume perfect user input for commodity and variables
- however, if input commodity that's not a valid commodity code or label, exits program with error message
NOTE: NA definition hit refers to if the calculated sum from different tables of CAN, USA, MEX are equal to that of NA (CAN+USA, CAN+USA+MEX, or Neither)
'''
'''
IMPROVEMENT: Use 'encodings' table instead of the CSV file
'''
############################################# IMPORTS #############################################
# IMPORTS - 'pip install <import-package>'
import boto3
import csv
import sys
from boto3.dynamodb.conditions import Key, Attr
############################################ CONSTANTS ############################################
# TABLE CONSTANTS
NORTH_AMERICA = "northamerica"
CANADA = "canada"
USA = "usa"
MEXICO = "mexico"
TABLE_LIST = [NORTH_AMERICA, CANADA, USA, MEXICO]
YEAR_RANGE = range(2010, 2030)
# OTHER CONSTANTS
OUTPUT_FORMAT = "{:<8}{:<18}{:<18}{:<18}{:<18}{:<18}{:<18}{:<10}"
ENCODINGS_CSV = "encodings.csv"
#ENCODINGS_TABLE_NAME = "encodings"
USAGE_STATEMENT = "Usage: py queryOECD.py <commodity-code|commodity-label>"
############################## STATE VARIABLES, INITIALIZATION, MAIN ##############################
# MAIN - Declares global vars and state here, then ask for commodity (check both key/label),
# look for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
# then output the specific NA definition 'hit' results and probable conclusion for NA definition
def main():
#globals
global dynamodb_client
global dynamodb_resource
global na_table
global canada_table
global usa_table
global mexico_table
global total_can_usa
global total_can_usa_mex
global total_neither
# ========== ARGUMENTS ==========
# Collect command line arguments when executing this python script
argc = len(sys.argv)
bad_usage_flag = False
# Check #of args (deal with it later tho)
# 1 optional arg for commodity, otherwise prompt user for it
if argc > 2:
bad_usage_flag = True
print("Error: Too many arguments.")
# Exit with usage statement if flag has been triggered for any reason
if bad_usage_flag:
sys.exit(USAGE_STATEMENT)
# ========== AWS DYNAMO DB ==========
# Init AWS DynamoDB client and resource (NOTE: these are global)
dynamodb_client = boto3.client("dynamodb")
dynamodb_resource = boto3.resource("dynamodb")
# Validate AWS DynamoDB credentials (by testing if 'list_tables()' works)
try:
dynamodb_client.list_tables()
except Exception as e:
print("Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')")
sys.exit(f"[ERROR] {e}")
# Check the 4 tables exist, then get them all
err_output = ""
table_list = dynamodb_client.list_tables()['TableNames']
print(f"Existing Tables: {table_list}")
for t in TABLE_LIST:
if t not in table_list:
err_output += f"Error: Invalid table name '{t}' - table does not exist.\n"
# Print all tables that did not exist, then exit
if err_output != "":
print(err_output.strip("\n"))
sys.exit("ERROR: Terminating program because unable to get table that does not exist.")
# Get all tables (after checking they exist) (NOTE: these are global)
na_table = dynamodb_resource.Table(NORTH_AMERICA)
canada_table = dynamodb_resource.Table(CANADA)
usa_table = dynamodb_resource.Table(USA)
mexico_table = dynamodb_resource.Table(MEXICO)
# Open the encodings CSV file and read its contents
commodity_encodings_dict = {}
variable_encodings_dict = {}
with open(ENCODINGS_CSV, "r", newline='') as csv_file:
csv_content = csv.reader(csv_file, delimiter=',')
# if field is var or commodity, set a key-value pair between code and label (in the respective map)
for row in csv_content:
if row[2] == "variable":
variable_encodings_dict[row[0]] = row[1]
elif row[2] == "commodity":
commodity_encodings_dict[row[0]] = row[1]
csv_file.close()
# Check args for commodity now, otherwise prompt user
if argc == 2:
commodity_input = sys.argv[1]
else:
# Ask user for commodity
commodity_input = input("Commodity: ").strip()
# Check if input exists as code key, otherwise try to convert assumed label to code key (if not a label, code will be None after)
if commodity_input.upper() in commodity_encodings_dict:
commodity_code = commodity_input.upper()
else:
commodity_code = convert_dict_label_to_code_key(commodity_input, commodity_encodings_dict)
# Check if commodity found a code or None
print(f"ENCODING: {commodity_code}")
if commodity_code is None:
print(f"Error: Commodity '{commodity_input}' was not found.")
sys.exit("ERROR: Terminating program because input does not exist as an encoding commodity code or label.")
# Init total accumulators for each category
total_can_usa = 0
total_can_usa_mex = 0
total_neither = 0
# iterate through each variable and analyze data (if applicable)
for var in variable_encodings_dict.keys():
if is_common_variable(commodity_code, var):
output_table(commodity_code, var, variable_encodings_dict, commodity_encodings_dict)
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(total_can_usa, total_can_usa_mex, total_neither)
if total_can_usa == max_hits:
na_defn = "CAN+USA"
elif total_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither")
print(f"Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\n")
############################################ FUNCTIONS ############################################
# Converts the label of a dict into its code key, returns None if not a label
def convert_dict_label_to_code_key(label, encodings_dict):
# Get the key of the label if the label exists in the dict as a value
if label in list(encodings_dict.values()):
return list(encodings_dict.keys())[list(encodings_dict.values()).index(label)]
else:
return None
# Check if a commodity code + variable is common across all 4 tables, return true if it is
def is_common_variable(commodity_code, variable):
return (has_commodity_and_variable(na_table, commodity_code, variable) and
has_commodity_and_variable(canada_table, commodity_code, variable) and
has_commodity_and_variable(usa_table, commodity_code, variable) and
has_commodity_and_variable(mexico_table, commodity_code, variable))
# Check if a table has data for commodity code + variable (ie. scan table), returns true if at least 1 item is found
def | (table, commodity_code, variable):
response = table.scan(
FilterExpression = Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)
return response['Count'] > 0
# Retrieves and outputs table data based on commodity and variable and analyze for NA definition
def output_table(commodity_code, variable, variable_encodings_dict, commodity_encodings_dict):
# Bring in globals to modify
global total_can_usa
global total_can_usa_mex
global total_neither
# Init local accumulators
temp_can_usa = 0
temp_can_usa_mex = 0
temp_neither = 0
# Print table headers: common variable (for commodity code) across all 4 tables, and table column names
print(f"Variable: {variable_encodings_dict[variable]}")
print(OUTPUT_FORMAT.format("Year", "North America", "Canada", "USA", "Mexico", "CAN+USA", "CAN+USA+MEX", "NA Defn"))
# Retrieve all data, from all years (ie. the items from the scan)
na_scan_data = na_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
can_scan_data = canada_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
usa_scan_data = usa_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
mex_scan_data = mexico_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
# Sort each scan data by key
na_scan_data.sort(key=data_sort)
can_scan_data.sort(key=data_sort)
usa_scan_data.sort(key=data_sort)
mex_scan_data.sort(key=data_sort)
# Analyze data
for year in YEAR_RANGE:
# For each relevant year, calculate total value using multiplication factor
i = year - 2010
na_value = na_scan_data[i]['value'] * (10**na_scan_data[i]['mfactor'])
can_value = can_scan_data[i]['value'] * (10**can_scan_data[i]['mfactor'])
usa_value = usa_scan_data[i]['value'] * (10**usa_scan_data[i]['mfactor'])
mex_value = mex_scan_data[i]['value'] * (10**mex_scan_data[i]['mfactor'])
# Calc temp sums for the CAN+USA and CAN+USA+MEX columns
temp_can_usa_value = can_value + usa_value
temp_can_usa_mex_value = can_value + usa_value + mex_value
# Determine OECD def of NA, by checking if the temp calc sums from scan data calc values are equivalent to CAN+USA sum, CAN+USA+MEX sum, or Neither
# Note: accumulate the #of accurate NA def 'hits'
if temp_can_usa_value == na_value:
na_defn = 'CAN+USA'
temp_can_usa += 1
elif temp_can_usa_mex_value == na_value:
na_defn = 'CAN+USA+MEX'
temp_can_usa_mex += 1
else:
na_defn = 'Neither'
temp_neither += 1
# Print table row for current year
print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value, mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)
if temp_can_usa == max_hits:
na_defn = "CAN+USA"
elif temp_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither")
print(f"Therefore we can conclude North America = {na_defn}\n")
# Accumulate global totals using temp local accumulators for NA definition 'hits'
total_can_usa += temp_can_usa
total_can_usa_mex += temp_can_usa_mex
total_neither += temp_neither
# Sorter Helper for queried data by year
def data_sort(elem):
return elem['year']
###################################################################################################
main()
| has_commodity_and_variable | identifier_name |
queryOECD.py | #!/usr/bin/env python
'''
@author : Mitchell Van Braeckel
@id : 1002297
@date : 10/10/2020
@version : python 3.8-32 / python 3.8.5
@course : CIS*4010 Cloud Computing
@brief : A1 Part 2 - AWS DynamoDB ; Q2 - Query OECD
@note :
Description: There are many CSV files containing info from the OECD about agricultural production, each for various regions around the world.
Queries all 4 tables (northamerica, canada, usa, mexico -table names) based on a commodity (code key or label),
looking for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
then output the specific NA definition 'hit' results and probable conclusion for NA definition per variable,
as well as an overall conclusion for NA definition
NOTE: forgot to add ability to specify commodity as cmd line arg instead of STDIN
NOTE: assume year range is 2010 to 2029 (inclusive)
NOTE: assume perfect user input for commodity and variables
- however, if input commodity that's not a valid commodity code or label, exits program with error message
NOTE: NA definition hit refers to if the calculated sum from different tables of CAN, USA, MEX are equal to that of NA (CAN+USA, CAN+USA+MEX, or Neither)
'''
'''
IMPROVEMENT: Use 'encodings' table instead of the CSV file
'''
############################################# IMPORTS #############################################
# IMPORTS - 'pip install <import-package>'
import boto3
import csv
import sys
from boto3.dynamodb.conditions import Key, Attr
############################################ CONSTANTS ############################################
# TABLE CONSTANTS
NORTH_AMERICA = "northamerica"
CANADA = "canada"
USA = "usa"
MEXICO = "mexico"
TABLE_LIST = [NORTH_AMERICA, CANADA, USA, MEXICO]
YEAR_RANGE = range(2010, 2030)
# OTHER CONSTANTS
OUTPUT_FORMAT = "{:<8}{:<18}{:<18}{:<18}{:<18}{:<18}{:<18}{:<10}"
ENCODINGS_CSV = "encodings.csv"
#ENCODINGS_TABLE_NAME = "encodings"
USAGE_STATEMENT = "Usage: py queryOECD.py <commodity-code|commodity-label>"
############################## STATE VARIABLES, INITIALIZATION, MAIN ##############################
# MAIN - Declares global vars and state here, then ask for commodity (check both key/label),
# look for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
# then output the specific NA definition 'hit' results and probable conclusion for NA definition
def main():
#globals
global dynamodb_client
global dynamodb_resource
global na_table
global canada_table
global usa_table
global mexico_table
global total_can_usa
global total_can_usa_mex
global total_neither
# ========== ARGUMENTS ==========
# Collect command line arguments when executing this python script
argc = len(sys.argv)
bad_usage_flag = False
# Check #of args (deal with it later tho)
# 1 optional arg for commodity, otherwise prompt user for it
if argc > 2:
bad_usage_flag = True
print("Error: Too many arguments.")
# Exit with usage statement if flag has been triggered for any reason
if bad_usage_flag:
sys.exit(USAGE_STATEMENT)
# ========== AWS DYNAMO DB ==========
# Init AWS DynamoDB client and resource (NOTE: these are global)
dynamodb_client = boto3.client("dynamodb")
dynamodb_resource = boto3.resource("dynamodb")
# Validate AWS DynamoDB credentials (by testing if 'list_tables()' works)
try:
dynamodb_client.list_tables()
except Exception as e:
print("Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')")
sys.exit(f"[ERROR] {e}")
# Check the 4 tables exist, then get them all
err_output = ""
table_list = dynamodb_client.list_tables()['TableNames']
print(f"Existing Tables: {table_list}")
for t in TABLE_LIST:
if t not in table_list:
err_output += f"Error: Invalid table name '{t}' - table does not exist.\n"
# Print all tables that did not exist, then exit
if err_output != "":
print(err_output.strip("\n"))
sys.exit("ERROR: Terminating program because unable to get table that does not exist.")
# Get all tables (after checking they exist) (NOTE: these are global)
na_table = dynamodb_resource.Table(NORTH_AMERICA)
canada_table = dynamodb_resource.Table(CANADA)
usa_table = dynamodb_resource.Table(USA) | # Open the encodings CSV file and read its contents
commodity_encodings_dict = {}
variable_encodings_dict = {}
with open(ENCODINGS_CSV, "r", newline='') as csv_file:
csv_content = csv.reader(csv_file, delimiter=',')
# if field is var or commodity, set a key-value pair between code and label (in the respective map)
for row in csv_content:
if row[2] == "variable":
variable_encodings_dict[row[0]] = row[1]
elif row[2] == "commodity":
commodity_encodings_dict[row[0]] = row[1]
csv_file.close()
# Check args for commodity now, otherwise prompt user
if argc == 2:
commodity_input = sys.argv[1]
else:
# Ask user for commodity
commodity_input = input("Commodity: ").strip()
# Check if input exists as code key, otherwise try to convert assumed label to code key (if not a label, code will be None after)
if commodity_input.upper() in commodity_encodings_dict:
commodity_code = commodity_input.upper()
else:
commodity_code = convert_dict_label_to_code_key(commodity_input, commodity_encodings_dict)
# Check if commodity found a code or None
print(f"ENCODING: {commodity_code}")
if commodity_code is None:
print(f"Error: Commodity '{commodity_input}' was not found.")
sys.exit("ERROR: Terminating program because input does not exist as an encoding commodity code or label.")
# Init total accumulators for each category
total_can_usa = 0
total_can_usa_mex = 0
total_neither = 0
# iterate through each variable and analyze data (if applicable)
for var in variable_encodings_dict.keys():
if is_common_variable(commodity_code, var):
output_table(commodity_code, var, variable_encodings_dict, commodity_encodings_dict)
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(total_can_usa, total_can_usa_mex, total_neither)
if total_can_usa == max_hits:
na_defn = "CAN+USA"
elif total_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither")
print(f"Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\n")
############################################ FUNCTIONS ############################################
# Converts the label of a dict into its code key, returns None if not a label
def convert_dict_label_to_code_key(label, encodings_dict):
# Get the key of the label if the label exists in the dict as a value
if label in list(encodings_dict.values()):
return list(encodings_dict.keys())[list(encodings_dict.values()).index(label)]
else:
return None
# Check if a commodity code + variable is common across all 4 tables, return true if it is
def is_common_variable(commodity_code, variable):
return (has_commodity_and_variable(na_table, commodity_code, variable) and
has_commodity_and_variable(canada_table, commodity_code, variable) and
has_commodity_and_variable(usa_table, commodity_code, variable) and
has_commodity_and_variable(mexico_table, commodity_code, variable))
# Check if a table has data for commodity code + variable (ie. scan table), returns true if at least 1 item is found
def has_commodity_and_variable(table, commodity_code, variable):
response = table.scan(
FilterExpression = Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)
return response['Count'] > 0
# Retrieves and outputs table data based on commodity and variable and analyze for NA definition
def output_table(commodity_code, variable, variable_encodings_dict, commodity_encodings_dict):
# Bring in globals to modify
global total_can_usa
global total_can_usa_mex
global total_neither
# Init local accumulators
temp_can_usa = 0
temp_can_usa_mex = 0
temp_neither = 0
# Print table headers: common variable (for commodity code) across all 4 tables, and table column names
print(f"Variable: {variable_encodings_dict[variable]}")
print(OUTPUT_FORMAT.format("Year", "North America", "Canada", "USA", "Mexico", "CAN+USA", "CAN+USA+MEX", "NA Defn"))
# Retrieve all data, from all years (ie. the items from the scan)
na_scan_data = na_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
can_scan_data = canada_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
usa_scan_data = usa_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
mex_scan_data = mexico_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
# Sort each scan data by key
na_scan_data.sort(key=data_sort)
can_scan_data.sort(key=data_sort)
usa_scan_data.sort(key=data_sort)
mex_scan_data.sort(key=data_sort)
# Analyze data
for year in YEAR_RANGE:
# For each relevant year, calculate total value using multiplication factor
i = year - 2010
na_value = na_scan_data[i]['value'] * (10**na_scan_data[i]['mfactor'])
can_value = can_scan_data[i]['value'] * (10**can_scan_data[i]['mfactor'])
usa_value = usa_scan_data[i]['value'] * (10**usa_scan_data[i]['mfactor'])
mex_value = mex_scan_data[i]['value'] * (10**mex_scan_data[i]['mfactor'])
# Calc temp sums for the CAN+USA and CAN+USA+MEX columns
temp_can_usa_value = can_value + usa_value
temp_can_usa_mex_value = can_value + usa_value + mex_value
# Determine OECD def of NA, by checking if the temp calc sums from scan data calc values are equivalent to CAN+USA sum, CAN+USA+MEX sum, or Neither
# Note: accumulate the #of accurate NA def 'hits'
if temp_can_usa_value == na_value:
na_defn = 'CAN+USA'
temp_can_usa += 1
elif temp_can_usa_mex_value == na_value:
na_defn = 'CAN+USA+MEX'
temp_can_usa_mex += 1
else:
na_defn = 'Neither'
temp_neither += 1
# Print table row for current year
print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value, mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)
if temp_can_usa == max_hits:
na_defn = "CAN+USA"
elif temp_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither")
print(f"Therefore we can conclude North America = {na_defn}\n")
# Accumulate global totals using temp local accumulators for NA definition 'hits'
total_can_usa += temp_can_usa
total_can_usa_mex += temp_can_usa_mex
total_neither += temp_neither
# Sorter Helper for queried data by year
def data_sort(elem):
return elem['year']
###################################################################################################
main() | mexico_table = dynamodb_resource.Table(MEXICO)
| random_line_split |
queryOECD.py | #!/usr/bin/env python
'''
@author : Mitchell Van Braeckel
@id : 1002297
@date : 10/10/2020
@version : python 3.8-32 / python 3.8.5
@course : CIS*4010 Cloud Computing
@brief : A1 Part 2 - AWS DynamoDB ; Q2 - Query OECD
@note :
Description: There are many CSV files containing info from the OECD about agricultural production, each for various regions around the world.
Queries all 4 tables (northamerica, canada, usa, mexico -table names) based on a commodity (code key or label),
looking for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
then output the specific NA definition 'hit' results and probable conclusion for NA definition per variable,
as well as an overall conclusion for NA definition
NOTE: forgot to add ability to specify commodity as cmd line arg instead of STDIN
NOTE: assume year range is 2010 to 2029 (inclusive)
NOTE: assume perfect user input for commodity and variables
- however, if input commodity that's not a valid commodity code or label, exits program with error message
NOTE: NA definition hit refers to if the calculated sum from different tables of CAN, USA, MEX are equal to that of NA (CAN+USA, CAN+USA+MEX, or Neither)
'''
'''
IMPROVEMENT: Use 'encodings' table instead of the CSV file
'''
############################################# IMPORTS #############################################
# IMPORTS - 'pip install <import-package>'
import boto3
import csv
import sys
from boto3.dynamodb.conditions import Key, Attr
############################################ CONSTANTS ############################################
# TABLE CONSTANTS
NORTH_AMERICA = "northamerica"
CANADA = "canada"
USA = "usa"
MEXICO = "mexico"
TABLE_LIST = [NORTH_AMERICA, CANADA, USA, MEXICO]
YEAR_RANGE = range(2010, 2030)
# OTHER CONSTANTS
OUTPUT_FORMAT = "{:<8}{:<18}{:<18}{:<18}{:<18}{:<18}{:<18}{:<10}"
ENCODINGS_CSV = "encodings.csv"
#ENCODINGS_TABLE_NAME = "encodings"
USAGE_STATEMENT = "Usage: py queryOECD.py <commodity-code|commodity-label>"
############################## STATE VARIABLES, INITIALIZATION, MAIN ##############################
# MAIN - Declares global vars and state here, then ask for commodity (check both key/label),
# look for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
# then output the specific NA definition 'hit' results and probable conclusion for NA definition
def main():
#globals
|
############################################ FUNCTIONS ############################################
# Converts the label of a dict into its code key, returns None if not a label
def convert_dict_label_to_code_key(label, encodings_dict):
# Get the key of the label if the label exists in the dict as a value
if label in list(encodings_dict.values()):
return list(encodings_dict.keys())[list(encodings_dict.values()).index(label)]
else:
return None
# Check if a commodity code + variable is common across all 4 tables, return true if it is
def is_common_variable(commodity_code, variable):
return (has_commodity_and_variable(na_table, commodity_code, variable) and
has_commodity_and_variable(canada_table, commodity_code, variable) and
has_commodity_and_variable(usa_table, commodity_code, variable) and
has_commodity_and_variable(mexico_table, commodity_code, variable))
# Check if a table has data for commodity code + variable (ie. scan table), returns true if at least 1 item is found
def has_commodity_and_variable(table, commodity_code, variable):
response = table.scan(
FilterExpression = Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)
return response['Count'] > 0
# Retrieves and outputs table data based on commodity and variable and analyze for NA definition
def output_table(commodity_code, variable, variable_encodings_dict, commodity_encodings_dict):
# Bring in globals to modify
global total_can_usa
global total_can_usa_mex
global total_neither
# Init local accumulators
temp_can_usa = 0
temp_can_usa_mex = 0
temp_neither = 0
# Print table headers: common variable (for commodity code) across all 4 tables, and table column names
print(f"Variable: {variable_encodings_dict[variable]}")
print(OUTPUT_FORMAT.format("Year", "North America", "Canada", "USA", "Mexico", "CAN+USA", "CAN+USA+MEX", "NA Defn"))
# Retrieve all data, from all years (ie. the items from the scan)
na_scan_data = na_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
can_scan_data = canada_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
usa_scan_data = usa_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
mex_scan_data = mexico_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
# Sort each scan data by key
na_scan_data.sort(key=data_sort)
can_scan_data.sort(key=data_sort)
usa_scan_data.sort(key=data_sort)
mex_scan_data.sort(key=data_sort)
# Analyze data
for year in YEAR_RANGE:
# For each relevant year, calculate total value using multiplication factor
i = year - 2010
na_value = na_scan_data[i]['value'] * (10**na_scan_data[i]['mfactor'])
can_value = can_scan_data[i]['value'] * (10**can_scan_data[i]['mfactor'])
usa_value = usa_scan_data[i]['value'] * (10**usa_scan_data[i]['mfactor'])
mex_value = mex_scan_data[i]['value'] * (10**mex_scan_data[i]['mfactor'])
# Calc temp sums for the CAN+USA and CAN+USA+MEX columns
temp_can_usa_value = can_value + usa_value
temp_can_usa_mex_value = can_value + usa_value + mex_value
# Determine OECD def of NA, by checking if the temp calc sums from scan data calc values are equivalent to CAN+USA sum, CAN+USA+MEX sum, or Neither
# Note: accumulate the #of accurate NA def 'hits'
if temp_can_usa_value == na_value:
na_defn = 'CAN+USA'
temp_can_usa += 1
elif temp_can_usa_mex_value == na_value:
na_defn = 'CAN+USA+MEX'
temp_can_usa_mex += 1
else:
na_defn = 'Neither'
temp_neither += 1
# Print table row for current year
print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value, mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)
if temp_can_usa == max_hits:
na_defn = "CAN+USA"
elif temp_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither")
print(f"Therefore we can conclude North America = {na_defn}\n")
# Accumulate global totals using temp local accumulators for NA definition 'hits'
total_can_usa += temp_can_usa
total_can_usa_mex += temp_can_usa_mex
total_neither += temp_neither
# Sorter Helper for queried data by year
def data_sort(elem):
return elem['year']
###################################################################################################
main()
| global dynamodb_client
global dynamodb_resource
global na_table
global canada_table
global usa_table
global mexico_table
global total_can_usa
global total_can_usa_mex
global total_neither
# ========== ARGUMENTS ==========
# Collect command line arguments when executing this python script
argc = len(sys.argv)
bad_usage_flag = False
# Check #of args (deal with it later tho)
# 1 optional arg for commodity, otherwise prompt user for it
if argc > 2:
bad_usage_flag = True
print("Error: Too many arguments.")
# Exit with usage statement if flag has been triggered for any reason
if bad_usage_flag:
sys.exit(USAGE_STATEMENT)
# ========== AWS DYNAMO DB ==========
# Init AWS DynamoDB client and resource (NOTE: these are global)
dynamodb_client = boto3.client("dynamodb")
dynamodb_resource = boto3.resource("dynamodb")
# Validate AWS DynamoDB credentials (by testing if 'list_tables()' works)
try:
dynamodb_client.list_tables()
except Exception as e:
print("Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')")
sys.exit(f"[ERROR] {e}")
# Check the 4 tables exist, then get them all
err_output = ""
table_list = dynamodb_client.list_tables()['TableNames']
print(f"Existing Tables: {table_list}")
for t in TABLE_LIST:
if t not in table_list:
err_output += f"Error: Invalid table name '{t}' - table does not exist.\n"
# Print all tables that did not exist, then exit
if err_output != "":
print(err_output.strip("\n"))
sys.exit("ERROR: Terminating program because unable to get table that does not exist.")
# Get all tables (after checking they exist) (NOTE: these are global)
na_table = dynamodb_resource.Table(NORTH_AMERICA)
canada_table = dynamodb_resource.Table(CANADA)
usa_table = dynamodb_resource.Table(USA)
mexico_table = dynamodb_resource.Table(MEXICO)
# Open the encodings CSV file and read its contents
commodity_encodings_dict = {}
variable_encodings_dict = {}
with open(ENCODINGS_CSV, "r", newline='') as csv_file:
csv_content = csv.reader(csv_file, delimiter=',')
# if field is var or commodity, set a key-value pair between code and label (in the respective map)
for row in csv_content:
if row[2] == "variable":
variable_encodings_dict[row[0]] = row[1]
elif row[2] == "commodity":
commodity_encodings_dict[row[0]] = row[1]
csv_file.close()
# Check args for commodity now, otherwise prompt user
if argc == 2:
commodity_input = sys.argv[1]
else:
# Ask user for commodity
commodity_input = input("Commodity: ").strip()
# Check if input exists as code key, otherwise try to convert assumed label to code key (if not a label, code will be None after)
if commodity_input.upper() in commodity_encodings_dict:
commodity_code = commodity_input.upper()
else:
commodity_code = convert_dict_label_to_code_key(commodity_input, commodity_encodings_dict)
# Check if commodity found a code or None
print(f"ENCODING: {commodity_code}")
if commodity_code is None:
print(f"Error: Commodity '{commodity_input}' was not found.")
sys.exit("ERROR: Terminating program because input does not exist as an encoding commodity code or label.")
# Init total accumulators for each category
total_can_usa = 0
total_can_usa_mex = 0
total_neither = 0
# iterate through each variable and analyze data (if applicable)
for var in variable_encodings_dict.keys():
if is_common_variable(commodity_code, var):
output_table(commodity_code, var, variable_encodings_dict, commodity_encodings_dict)
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(total_can_usa, total_can_usa_mex, total_neither)
if total_can_usa == max_hits:
na_defn = "CAN+USA"
elif total_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither")
print(f"Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\n") | identifier_body |
queryOECD.py | #!/usr/bin/env python
'''
@author : Mitchell Van Braeckel
@id : 1002297
@date : 10/10/2020
@version : python 3.8-32 / python 3.8.5
@course : CIS*4010 Cloud Computing
@brief : A1 Part 2 - AWS DynamoDB ; Q2 - Query OECD
@note :
Description: There are many CSV files containing info from the OECD about agricultural production, each for various regions around the world.
Queries all 4 tables (northamerica, canada, usa, mexico -table names) based on a commodity (code key or label),
looking for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
then output the specific NA definition 'hit' results and probable conclusion for NA definition per variable,
as well as an overall conclusion for NA definition
NOTE: forgot to add ability to specify commodity as cmd line arg instead of STDIN
NOTE: assume year range is 2010 to 2029 (inclusive)
NOTE: assume perfect user input for commodity and variables
- however, if input commodity that's not a valid commodity code or label, exits program with error message
NOTE: NA definition hit refers to if the calculated sum from different tables of CAN, USA, MEX are equal to that of NA (CAN+USA, CAN+USA+MEX, or Neither)
'''
'''
IMPROVEMENT: Use 'encodings' table instead of the CSV file
'''
############################################# IMPORTS #############################################
# IMPORTS - 'pip install <import-package>'
import boto3
import csv
import sys
from boto3.dynamodb.conditions import Key, Attr
############################################ CONSTANTS ############################################
# TABLE CONSTANTS
NORTH_AMERICA = "northamerica"
CANADA = "canada"
USA = "usa"
MEXICO = "mexico"
TABLE_LIST = [NORTH_AMERICA, CANADA, USA, MEXICO]
YEAR_RANGE = range(2010, 2030)
# OTHER CONSTANTS
OUTPUT_FORMAT = "{:<8}{:<18}{:<18}{:<18}{:<18}{:<18}{:<18}{:<10}"
ENCODINGS_CSV = "encodings.csv"
#ENCODINGS_TABLE_NAME = "encodings"
USAGE_STATEMENT = "Usage: py queryOECD.py <commodity-code|commodity-label>"
############################## STATE VARIABLES, INITIALIZATION, MAIN ##############################
# MAIN - Declares global vars and state here, then ask for commodity (check both key/label),
# look for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
# then output the specific NA definition 'hit' results and probable conclusion for NA definition
def main():
#globals
global dynamodb_client
global dynamodb_resource
global na_table
global canada_table
global usa_table
global mexico_table
global total_can_usa
global total_can_usa_mex
global total_neither
# ========== ARGUMENTS ==========
# Collect command line arguments when executing this python script
argc = len(sys.argv)
bad_usage_flag = False
# Check #of args (deal with it later tho)
# 1 optional arg for commodity, otherwise prompt user for it
if argc > 2:
bad_usage_flag = True
print("Error: Too many arguments.")
# Exit with usage statement if flag has been triggered for any reason
if bad_usage_flag:
sys.exit(USAGE_STATEMENT)
# ========== AWS DYNAMO DB ==========
# Init AWS DynamoDB client and resource (NOTE: these are global)
dynamodb_client = boto3.client("dynamodb")
dynamodb_resource = boto3.resource("dynamodb")
# Validate AWS DynamoDB credentials (by testing if 'list_tables()' works)
try:
dynamodb_client.list_tables()
except Exception as e:
print("Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')")
sys.exit(f"[ERROR] {e}")
# Check the 4 tables exist, then get them all
err_output = ""
table_list = dynamodb_client.list_tables()['TableNames']
print(f"Existing Tables: {table_list}")
for t in TABLE_LIST:
if t not in table_list:
err_output += f"Error: Invalid table name '{t}' - table does not exist.\n"
# Print all tables that did not exist, then exit
if err_output != "":
print(err_output.strip("\n"))
sys.exit("ERROR: Terminating program because unable to get table that does not exist.")
# Get all tables (after checking they exist) (NOTE: these are global)
na_table = dynamodb_resource.Table(NORTH_AMERICA)
canada_table = dynamodb_resource.Table(CANADA)
usa_table = dynamodb_resource.Table(USA)
mexico_table = dynamodb_resource.Table(MEXICO)
# Open the encodings CSV file and read its contents
commodity_encodings_dict = {}
variable_encodings_dict = {}
with open(ENCODINGS_CSV, "r", newline='') as csv_file:
csv_content = csv.reader(csv_file, delimiter=',')
# if field is var or commodity, set a key-value pair between code and label (in the respective map)
for row in csv_content:
if row[2] == "variable":
variable_encodings_dict[row[0]] = row[1]
elif row[2] == "commodity":
commodity_encodings_dict[row[0]] = row[1]
csv_file.close()
# Check args for commodity now, otherwise prompt user
if argc == 2:
commodity_input = sys.argv[1]
else:
# Ask user for commodity
commodity_input = input("Commodity: ").strip()
# Check if input exists as code key, otherwise try to convert assumed label to code key (if not a label, code will be None after)
if commodity_input.upper() in commodity_encodings_dict:
commodity_code = commodity_input.upper()
else:
commodity_code = convert_dict_label_to_code_key(commodity_input, commodity_encodings_dict)
# Check if commodity found a code or None
print(f"ENCODING: {commodity_code}")
if commodity_code is None:
print(f"Error: Commodity '{commodity_input}' was not found.")
sys.exit("ERROR: Terminating program because input does not exist as an encoding commodity code or label.")
# Init total accumulators for each category
total_can_usa = 0
total_can_usa_mex = 0
total_neither = 0
# iterate through each variable and analyze data (if applicable)
for var in variable_encodings_dict.keys():
if is_common_variable(commodity_code, var):
output_table(commodity_code, var, variable_encodings_dict, commodity_encodings_dict)
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(total_can_usa, total_can_usa_mex, total_neither)
if total_can_usa == max_hits:
na_defn = "CAN+USA"
elif total_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither")
print(f"Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\n")
############################################ FUNCTIONS ############################################
# Converts the label of a dict into its code key, returns None if not a label
def convert_dict_label_to_code_key(label, encodings_dict):
# Get the key of the label if the label exists in the dict as a value
if label in list(encodings_dict.values()):
return list(encodings_dict.keys())[list(encodings_dict.values()).index(label)]
else:
return None
# Check if a commodity code + variable is common across all 4 tables, return true if it is
def is_common_variable(commodity_code, variable):
return (has_commodity_and_variable(na_table, commodity_code, variable) and
has_commodity_and_variable(canada_table, commodity_code, variable) and
has_commodity_and_variable(usa_table, commodity_code, variable) and
has_commodity_and_variable(mexico_table, commodity_code, variable))
# Check if a table has data for commodity code + variable (ie. scan table), returns true if at least 1 item is found
def has_commodity_and_variable(table, commodity_code, variable):
response = table.scan(
FilterExpression = Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)
return response['Count'] > 0
# Retrieves and outputs table data based on commodity and variable and analyze for NA definition
def output_table(commodity_code, variable, variable_encodings_dict, commodity_encodings_dict):
# Bring in globals to modify
global total_can_usa
global total_can_usa_mex
global total_neither
# Init local accumulators
temp_can_usa = 0
temp_can_usa_mex = 0
temp_neither = 0
# Print table headers: common variable (for commodity code) across all 4 tables, and table column names
print(f"Variable: {variable_encodings_dict[variable]}")
print(OUTPUT_FORMAT.format("Year", "North America", "Canada", "USA", "Mexico", "CAN+USA", "CAN+USA+MEX", "NA Defn"))
# Retrieve all data, from all years (ie. the items from the scan)
na_scan_data = na_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
can_scan_data = canada_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
usa_scan_data = usa_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
mex_scan_data = mexico_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
# Sort each scan data by key
na_scan_data.sort(key=data_sort)
can_scan_data.sort(key=data_sort)
usa_scan_data.sort(key=data_sort)
mex_scan_data.sort(key=data_sort)
# Analyze data
for year in YEAR_RANGE:
# For each relevant year, calculate total value using multiplication factor
i = year - 2010
na_value = na_scan_data[i]['value'] * (10**na_scan_data[i]['mfactor'])
can_value = can_scan_data[i]['value'] * (10**can_scan_data[i]['mfactor'])
usa_value = usa_scan_data[i]['value'] * (10**usa_scan_data[i]['mfactor'])
mex_value = mex_scan_data[i]['value'] * (10**mex_scan_data[i]['mfactor'])
# Calc temp sums for the CAN+USA and CAN+USA+MEX columns
temp_can_usa_value = can_value + usa_value
temp_can_usa_mex_value = can_value + usa_value + mex_value
# Determine OECD def of NA, by checking if the temp calc sums from scan data calc values are equivalent to CAN+USA sum, CAN+USA+MEX sum, or Neither
# Note: accumulate the #of accurate NA def 'hits'
if temp_can_usa_value == na_value:
|
elif temp_can_usa_mex_value == na_value:
na_defn = 'CAN+USA+MEX'
temp_can_usa_mex += 1
else:
na_defn = 'Neither'
temp_neither += 1
# Print table row for current year
print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value, mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)
if temp_can_usa == max_hits:
na_defn = "CAN+USA"
elif temp_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither")
print(f"Therefore we can conclude North America = {na_defn}\n")
# Accumulate global totals using temp local accumulators for NA definition 'hits'
total_can_usa += temp_can_usa
total_can_usa_mex += temp_can_usa_mex
total_neither += temp_neither
# Sorter Helper for queried data by year
def data_sort(elem):
return elem['year']
###################################################################################################
main()
| na_defn = 'CAN+USA'
temp_can_usa += 1 | conditional_block |
main.go | //go:generate statik -src=./ui
package main
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"time"
log "github.com/Sirupsen/logrus"
"github.com/abbot/go-http-auth"
"github.com/briandowns/spinner"
"github.com/fatih/color"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/rakyll/statik/fs"
"github.com/urfave/cli"
"github.com/xuqingfeng/mailman/account"
"github.com/xuqingfeng/mailman/contacts"
"github.com/xuqingfeng/mailman/lang"
"github.com/xuqingfeng/mailman/mail"
"github.com/xuqingfeng/mailman/smtp"
_ "github.com/xuqingfeng/mailman/statik"
"github.com/xuqingfeng/mailman/util"
)
const (
SPINNER_CHAR_INDEX = 14
READ_LOG_FILE_GAP = 5 // second
MAILMAN_IS_AWESOME = "mailman is awesome !"
MIN_TCP_PORT = 0
MAX_TCP_PORT = 65535
//maxReservedTCPPort = 1024
// 15M
MAX_MEMORY = 1024 * 1024 * 15
ASSETS_PREFIX = "ui"
)
var (
name = "mailman"
version = "master"
msg util.Msg
enableBasicAuth = false
previewContent = ""
unauthorized = "401 Unauthorized"
loopback = "127.0.0.1"
upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
ErrDataIsNotJson = errors.New("data is not json format")
)
type Key struct {
Key string `json:"key"`
}
func main() {
cyan := color.New(color.FgCyan).SprintFunc()
colorName := cyan("NAME:")
colorUsage := cyan("USAGE:")
colorVersion := cyan("VERSION:")
colorAuthor := cyan("AUTHOR")
colorCommands := cyan("COMMANDS")
colorGlobalOptions := cyan("GLOBAL OPTIONS")
cli.AppHelpTemplate = colorName + `
{{.Name}} - {{.Usage}}
` + colorUsage + `
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
{{if .Version}}
` + colorVersion + `
{{.Version}}
{{end}}{{if len .Authors}}
` + colorAuthor + `
{{range .Authors}}{{ . }}{{end}}
{{end}}{{if .Commands}}
` + colorCommands + `
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
{{end}}{{end}}{{if .Flags}}
` + colorGlobalOptions + `
{{range .Flags}}{{.}}
{{end}}{{end}}{{if .Copyright }}
COPYRIGHT:
{{.Copyright}}
{{end}}
`
app := cli.NewApp()
app.Name = name
app.Usage = "Web email client supporting HTML template and SMTP"
app.Version = version
app.Author = "xuqingfeng"
app.Action = func(c *cli.Context) {
portInUse := -1
portStart := 8000
portEnd := 8100
for portStart <= portEnd {
if isTCPPortAvailable(portStart) {
portInUse = portStart
break
}
portStart++
}
if -1 == portInUse {
log.Fatal("can't find available port")
}
localIP := getLocalIP()
if runtime.GOOS == "darwin" {
_, err := exec.Command("open", "http://"+localIP+":"+strconv.Itoa(portInUse)).Output()
if err != nil {
log.Fatalf("darwin open fail: %s", err.Error())
}
} else {
log.Info("Open " + localIP + ":" + strconv.Itoa(portInUse) + " in browser")
}
s := spinner.New(spinner.CharSets[SPINNER_CHAR_INDEX], 100*time.Millisecond)
s.Color("cyan")
s.Start()
// util init
util.CreateConfigDir()
// router
router := mux.NewRouter()
apiSubRouter := router.PathPrefix("/api").Subrouter()
apiSubRouter.HandleFunc("/ping", pingHandler)
apiSubRouter.HandleFunc("/lang", langHandler)
apiSubRouter.HandleFunc("/mail", mailHandler)
apiSubRouter.HandleFunc("/file", fileHandler)
apiSubRouter.HandleFunc("/account", accountHandler)
apiSubRouter.HandleFunc("/contacts", contactsHandler)
apiSubRouter.HandleFunc("/smtpServer", smtpServerHandler)
apiSubRouter.HandleFunc("/preview", previewHandler)
apiSubRouter.HandleFunc("/wslog", wsLogHandler)
statikFS, err := fs.New()
if err != nil {
log.Fatal(err)
}
router.PathPrefix("/").Handler(http.FileServer(statikFS))
http.ListenAndServe(":"+strconv.Itoa(portInUse), router)
}
app.Commands = []cli.Command{
{
Name: "clean",
Usage: "clean up tmp directory",
Description: "mailman clean",
Action: func(c *cli.Context) {
homeDir := util.GetHomeDir()
tmpPath := filepath.Join(homeDir, util.ConfigPath["tmpPath"])
err := os.RemoveAll(tmpPath)
if err != nil {
log.Error(err)
}
util.CreateConfigDir()
},
},
}
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "basic-auth",
Usage: "enable basic auth (~/.mailman/.htpasswd)",
Destination: &enableBasicAuth,
},
}
app.Run(os.Args)
}
func pingHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "pong")
}
func langHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
lg, _ := lang.GetLang()
switch lg {
case "en", "zh":
sendSuccess(w, lg, "I! get lang success")
default:
sendSuccess(w, "en", "I! get lang success")
}
} else if "POST" == r.Method {
var lg lang.Lang
err := json.NewDecoder(r.Body).Decode(&lg)
if err != nil {
sendError(w, ErrDataIsNotJson.Error())
} else if err = lang.SaveLang(lg); err != nil {
sendError(w, "E! save lang fail: "+err.Error())
} else {
l, err := lang.GetLang()
if err != nil {
sendError(w, "E! get lang fail: "+err.Error())
} else {
sendSuccess(w, l, "I! save lang success")
}
}
}
}
func mailHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
sendSuccess(w, struct{}{}, MAILMAN_IS_AWESOME)
} else if "POST" == r.Method {
var m mail.Mail
err := json.NewDecoder(r.Body).Decode(&m)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = mail.SendMail(m); err != nil {
sendError(w, "E! send mail fail: "+err.Error())
} else {
// empty struct
sendSuccess(w, struct{}{}, "I! send mail success")
}
}
}
func fileHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
sendSuccess(w, struct{}{}, MAILMAN_IS_AWESOME)
} else if "POST" == r.Method {
if err := r.ParseMultipartForm(MAX_MEMORY); err != nil {
sendError(w, "E! parse posted file fail: "+err.Error())
}
token := ""
for k, vs := range r.MultipartForm.Value {
for _, v := range vs {
if "token" == k {
token += v
}
}
}
for _, fileHeaders := range r.MultipartForm.File {
for _, fileHeader := range fileHeaders {
f, _ := fileHeader.Open()
fileContent, _ := ioutil.ReadAll(f)
err := mail.SaveAttachment(fileContent, token, fileHeader.Filename)
if err != nil {
sendError(w, "E! save attachment fail")
// todo multi
break
}
}
}
}
}
func accountHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
// empty []string
sendSuccess(w, emails, "I! get account email success")
}
} else if "POST" == r.Method {
var at account.Account
err := json.NewDecoder(r.Body).Decode(&at)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = account.SaveAccount(at); err != nil {
sendError(w, "E! save account fail: "+err.Error())
} else {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
sendSuccess(w, emails, "I! save account success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = account.DeleteAccount(k.Key); err != nil {
sendError(w, "E! delete account fail: "+err.Error())
} else {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
sendSuccess(w, emails, "I! delete account success")
}
}
}
}
func contactsHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! get contacts success")
}
} else if "POST" == r.Method {
var ct contacts.Contacts
err := json.NewDecoder(r.Body).Decode(&ct)
if err != nil {
sendError(w, ErrDataIsNotJson.Error())
} else if err = contacts.SaveContacts(ct); err != nil {
sendError(w, "E! save contacts fail: "+err.Error())
} else {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! save contacts success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = contacts.DeleteContacts(k.Key); err != nil {
sendError(w, "E! delete contacts fail: "+err.Error())
} else {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! delete contacts success")
}
}
}
}
func smtpServerHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
customSMTPServer, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! get custom SMTP server fail: "+err.Error())
} else {
sendSuccess(w, customSMTPServer, "I! get custom SMTP Server success")
}
} else if "POST" == r.Method {
var smtpServer smtp.SMTPServer
err := json.NewDecoder(r.Body).Decode(&smtpServer)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = smtp.SaveSMTPServer(smtpServer); err != nil {
sendError(w, "E! "+err.Error())
} else {
customSMTPServer, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! "+err.Error())
} else {
sendSuccess(w, customSMTPServer, "I! save SMTP Server success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = smtp.DeleteSMTPServer(k.Key); err != nil {
sendError(w, "E! delete SMTPServer fail: "+err.Error())
} else {
server, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! get custom SMTP server fail: "+err.Error())
} else {
sendSuccess(w, server, "I! delete SMTP server success")
}
}
}
}
func previewHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
w.Header().Set("Content-Type", "text/html") | } else if "POST" == r.Method {
type Body struct {
Body string `json:"body"`
}
var body Body
err := json.NewDecoder(r.Body).Decode(&body)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else {
previewContent = mail.ParseMailContent(body.Body)
sendSuccess(w, struct{}{}, previewContent)
}
}
}
func wsLogHandler(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Error(err.Error())
}
homeDir := util.GetHomeDir()
logFilePath := filepath.Join(homeDir, util.ConfigPath["logPath"], util.LogName)
logFile, err := os.Open(logFilePath)
if err != nil {
log.Error(err.Error())
}
reader := bufio.NewReader(logFile)
for {
line, err := reader.ReadString('\n')
if err != nil && err != io.EOF {
log.Fatal(err.Error())
} else if err == io.EOF {
// wait
time.Sleep(READ_LOG_FILE_GAP * time.Second)
} else {
if err = conn.WriteMessage(1, []byte(line)); err != nil {
log.Error(err.Error())
}
}
}
}
func basicAuth(w http.ResponseWriter, r *http.Request) bool {
if enableBasicAuth {
ba := auth.NewBasicAuthenticator(fmt.Sprintf("%s / %s", name, version), auth.HtpasswdFileProvider(filepath.Join(util.GetHomeDir(), util.ConfigPath["htpasswdPath"])))
if ba.CheckAuth(r) == "" {
w.Header().Set("WWW-Authenticate", `Basic realm="realm"`)
return false
}
}
return true
}
func sendSuccess(w http.ResponseWriter, data interface{}, message string) {
msg = util.Msg{
Success: true,
Data: data,
Message: message,
}
msgInByteSlice, _ := json.Marshal(msg)
w.Header().Set("Content-Type", "application/json")
w.Write(msgInByteSlice)
}
func sendError(w http.ResponseWriter, message string) {
msg = util.Msg{
Success: false,
Message: message,
}
msgInByteSlice, _ := json.Marshal(msg)
w.Header().Set("Content-Type", "application/json")
w.Write(msgInByteSlice)
}
func isTCPPortAvailable(port int) bool {
if port < MIN_TCP_PORT || port > MAX_TCP_PORT {
return false
}
conn, err := net.Listen("tcp", ":"+strconv.Itoa(port))
if err != nil {
return false
}
conn.Close()
return true
}
func getLocalIP() (ip string) {
ip = loopback
ifaces, err := net.Interfaces()
if err != nil {
return
}
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
return
}
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
}
return
} | w.Write([]byte(previewContent)) | random_line_split |
main.go | //go:generate statik -src=./ui
package main
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"time"
log "github.com/Sirupsen/logrus"
"github.com/abbot/go-http-auth"
"github.com/briandowns/spinner"
"github.com/fatih/color"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/rakyll/statik/fs"
"github.com/urfave/cli"
"github.com/xuqingfeng/mailman/account"
"github.com/xuqingfeng/mailman/contacts"
"github.com/xuqingfeng/mailman/lang"
"github.com/xuqingfeng/mailman/mail"
"github.com/xuqingfeng/mailman/smtp"
_ "github.com/xuqingfeng/mailman/statik"
"github.com/xuqingfeng/mailman/util"
)
const (
SPINNER_CHAR_INDEX = 14
READ_LOG_FILE_GAP = 5 // second
MAILMAN_IS_AWESOME = "mailman is awesome !"
MIN_TCP_PORT = 0
MAX_TCP_PORT = 65535
//maxReservedTCPPort = 1024
// 15M
MAX_MEMORY = 1024 * 1024 * 15
ASSETS_PREFIX = "ui"
)
var (
name = "mailman"
version = "master"
msg util.Msg
enableBasicAuth = false
previewContent = ""
unauthorized = "401 Unauthorized"
loopback = "127.0.0.1"
upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
ErrDataIsNotJson = errors.New("data is not json format")
)
type Key struct {
Key string `json:"key"`
}
func main() {
cyan := color.New(color.FgCyan).SprintFunc()
colorName := cyan("NAME:")
colorUsage := cyan("USAGE:")
colorVersion := cyan("VERSION:")
colorAuthor := cyan("AUTHOR")
colorCommands := cyan("COMMANDS")
colorGlobalOptions := cyan("GLOBAL OPTIONS")
cli.AppHelpTemplate = colorName + `
{{.Name}} - {{.Usage}}
` + colorUsage + `
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
{{if .Version}}
` + colorVersion + `
{{.Version}}
{{end}}{{if len .Authors}}
` + colorAuthor + `
{{range .Authors}}{{ . }}{{end}}
{{end}}{{if .Commands}}
` + colorCommands + `
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
{{end}}{{end}}{{if .Flags}}
` + colorGlobalOptions + `
{{range .Flags}}{{.}}
{{end}}{{end}}{{if .Copyright }}
COPYRIGHT:
{{.Copyright}}
{{end}}
`
app := cli.NewApp()
app.Name = name
app.Usage = "Web email client supporting HTML template and SMTP"
app.Version = version
app.Author = "xuqingfeng"
app.Action = func(c *cli.Context) {
portInUse := -1
portStart := 8000
portEnd := 8100
for portStart <= portEnd {
if isTCPPortAvailable(portStart) {
portInUse = portStart
break
}
portStart++
}
if -1 == portInUse {
log.Fatal("can't find available port")
}
localIP := getLocalIP()
if runtime.GOOS == "darwin" {
_, err := exec.Command("open", "http://"+localIP+":"+strconv.Itoa(portInUse)).Output()
if err != nil {
log.Fatalf("darwin open fail: %s", err.Error())
}
} else {
log.Info("Open " + localIP + ":" + strconv.Itoa(portInUse) + " in browser")
}
s := spinner.New(spinner.CharSets[SPINNER_CHAR_INDEX], 100*time.Millisecond)
s.Color("cyan")
s.Start()
// util init
util.CreateConfigDir()
// router
router := mux.NewRouter()
apiSubRouter := router.PathPrefix("/api").Subrouter()
apiSubRouter.HandleFunc("/ping", pingHandler)
apiSubRouter.HandleFunc("/lang", langHandler)
apiSubRouter.HandleFunc("/mail", mailHandler)
apiSubRouter.HandleFunc("/file", fileHandler)
apiSubRouter.HandleFunc("/account", accountHandler)
apiSubRouter.HandleFunc("/contacts", contactsHandler)
apiSubRouter.HandleFunc("/smtpServer", smtpServerHandler)
apiSubRouter.HandleFunc("/preview", previewHandler)
apiSubRouter.HandleFunc("/wslog", wsLogHandler)
statikFS, err := fs.New()
if err != nil {
log.Fatal(err)
}
router.PathPrefix("/").Handler(http.FileServer(statikFS))
http.ListenAndServe(":"+strconv.Itoa(portInUse), router)
}
app.Commands = []cli.Command{
{
Name: "clean",
Usage: "clean up tmp directory",
Description: "mailman clean",
Action: func(c *cli.Context) {
homeDir := util.GetHomeDir()
tmpPath := filepath.Join(homeDir, util.ConfigPath["tmpPath"])
err := os.RemoveAll(tmpPath)
if err != nil {
log.Error(err)
}
util.CreateConfigDir()
},
},
}
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "basic-auth",
Usage: "enable basic auth (~/.mailman/.htpasswd)",
Destination: &enableBasicAuth,
},
}
app.Run(os.Args)
}
func pingHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "pong")
}
func | (w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
lg, _ := lang.GetLang()
switch lg {
case "en", "zh":
sendSuccess(w, lg, "I! get lang success")
default:
sendSuccess(w, "en", "I! get lang success")
}
} else if "POST" == r.Method {
var lg lang.Lang
err := json.NewDecoder(r.Body).Decode(&lg)
if err != nil {
sendError(w, ErrDataIsNotJson.Error())
} else if err = lang.SaveLang(lg); err != nil {
sendError(w, "E! save lang fail: "+err.Error())
} else {
l, err := lang.GetLang()
if err != nil {
sendError(w, "E! get lang fail: "+err.Error())
} else {
sendSuccess(w, l, "I! save lang success")
}
}
}
}
func mailHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
sendSuccess(w, struct{}{}, MAILMAN_IS_AWESOME)
} else if "POST" == r.Method {
var m mail.Mail
err := json.NewDecoder(r.Body).Decode(&m)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = mail.SendMail(m); err != nil {
sendError(w, "E! send mail fail: "+err.Error())
} else {
// empty struct
sendSuccess(w, struct{}{}, "I! send mail success")
}
}
}
func fileHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
sendSuccess(w, struct{}{}, MAILMAN_IS_AWESOME)
} else if "POST" == r.Method {
if err := r.ParseMultipartForm(MAX_MEMORY); err != nil {
sendError(w, "E! parse posted file fail: "+err.Error())
}
token := ""
for k, vs := range r.MultipartForm.Value {
for _, v := range vs {
if "token" == k {
token += v
}
}
}
for _, fileHeaders := range r.MultipartForm.File {
for _, fileHeader := range fileHeaders {
f, _ := fileHeader.Open()
fileContent, _ := ioutil.ReadAll(f)
err := mail.SaveAttachment(fileContent, token, fileHeader.Filename)
if err != nil {
sendError(w, "E! save attachment fail")
// todo multi
break
}
}
}
}
}
func accountHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
// empty []string
sendSuccess(w, emails, "I! get account email success")
}
} else if "POST" == r.Method {
var at account.Account
err := json.NewDecoder(r.Body).Decode(&at)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = account.SaveAccount(at); err != nil {
sendError(w, "E! save account fail: "+err.Error())
} else {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
sendSuccess(w, emails, "I! save account success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = account.DeleteAccount(k.Key); err != nil {
sendError(w, "E! delete account fail: "+err.Error())
} else {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
sendSuccess(w, emails, "I! delete account success")
}
}
}
}
func contactsHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! get contacts success")
}
} else if "POST" == r.Method {
var ct contacts.Contacts
err := json.NewDecoder(r.Body).Decode(&ct)
if err != nil {
sendError(w, ErrDataIsNotJson.Error())
} else if err = contacts.SaveContacts(ct); err != nil {
sendError(w, "E! save contacts fail: "+err.Error())
} else {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! save contacts success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = contacts.DeleteContacts(k.Key); err != nil {
sendError(w, "E! delete contacts fail: "+err.Error())
} else {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! delete contacts success")
}
}
}
}
func smtpServerHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
customSMTPServer, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! get custom SMTP server fail: "+err.Error())
} else {
sendSuccess(w, customSMTPServer, "I! get custom SMTP Server success")
}
} else if "POST" == r.Method {
var smtpServer smtp.SMTPServer
err := json.NewDecoder(r.Body).Decode(&smtpServer)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = smtp.SaveSMTPServer(smtpServer); err != nil {
sendError(w, "E! "+err.Error())
} else {
customSMTPServer, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! "+err.Error())
} else {
sendSuccess(w, customSMTPServer, "I! save SMTP Server success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = smtp.DeleteSMTPServer(k.Key); err != nil {
sendError(w, "E! delete SMTPServer fail: "+err.Error())
} else {
server, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! get custom SMTP server fail: "+err.Error())
} else {
sendSuccess(w, server, "I! delete SMTP server success")
}
}
}
}
func previewHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
w.Header().Set("Content-Type", "text/html")
w.Write([]byte(previewContent))
} else if "POST" == r.Method {
type Body struct {
Body string `json:"body"`
}
var body Body
err := json.NewDecoder(r.Body).Decode(&body)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else {
previewContent = mail.ParseMailContent(body.Body)
sendSuccess(w, struct{}{}, previewContent)
}
}
}
func wsLogHandler(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Error(err.Error())
}
homeDir := util.GetHomeDir()
logFilePath := filepath.Join(homeDir, util.ConfigPath["logPath"], util.LogName)
logFile, err := os.Open(logFilePath)
if err != nil {
log.Error(err.Error())
}
reader := bufio.NewReader(logFile)
for {
line, err := reader.ReadString('\n')
if err != nil && err != io.EOF {
log.Fatal(err.Error())
} else if err == io.EOF {
// wait
time.Sleep(READ_LOG_FILE_GAP * time.Second)
} else {
if err = conn.WriteMessage(1, []byte(line)); err != nil {
log.Error(err.Error())
}
}
}
}
func basicAuth(w http.ResponseWriter, r *http.Request) bool {
if enableBasicAuth {
ba := auth.NewBasicAuthenticator(fmt.Sprintf("%s / %s", name, version), auth.HtpasswdFileProvider(filepath.Join(util.GetHomeDir(), util.ConfigPath["htpasswdPath"])))
if ba.CheckAuth(r) == "" {
w.Header().Set("WWW-Authenticate", `Basic realm="realm"`)
return false
}
}
return true
}
func sendSuccess(w http.ResponseWriter, data interface{}, message string) {
msg = util.Msg{
Success: true,
Data: data,
Message: message,
}
msgInByteSlice, _ := json.Marshal(msg)
w.Header().Set("Content-Type", "application/json")
w.Write(msgInByteSlice)
}
func sendError(w http.ResponseWriter, message string) {
msg = util.Msg{
Success: false,
Message: message,
}
msgInByteSlice, _ := json.Marshal(msg)
w.Header().Set("Content-Type", "application/json")
w.Write(msgInByteSlice)
}
func isTCPPortAvailable(port int) bool {
if port < MIN_TCP_PORT || port > MAX_TCP_PORT {
return false
}
conn, err := net.Listen("tcp", ":"+strconv.Itoa(port))
if err != nil {
return false
}
conn.Close()
return true
}
func getLocalIP() (ip string) {
ip = loopback
ifaces, err := net.Interfaces()
if err != nil {
return
}
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
return
}
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
}
return
}
| langHandler | identifier_name |
main.go | //go:generate statik -src=./ui
package main
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"time"
log "github.com/Sirupsen/logrus"
"github.com/abbot/go-http-auth"
"github.com/briandowns/spinner"
"github.com/fatih/color"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/rakyll/statik/fs"
"github.com/urfave/cli"
"github.com/xuqingfeng/mailman/account"
"github.com/xuqingfeng/mailman/contacts"
"github.com/xuqingfeng/mailman/lang"
"github.com/xuqingfeng/mailman/mail"
"github.com/xuqingfeng/mailman/smtp"
_ "github.com/xuqingfeng/mailman/statik"
"github.com/xuqingfeng/mailman/util"
)
const (
SPINNER_CHAR_INDEX = 14
READ_LOG_FILE_GAP = 5 // second
MAILMAN_IS_AWESOME = "mailman is awesome !"
MIN_TCP_PORT = 0
MAX_TCP_PORT = 65535
//maxReservedTCPPort = 1024
// 15M
MAX_MEMORY = 1024 * 1024 * 15
ASSETS_PREFIX = "ui"
)
var (
name = "mailman"
version = "master"
msg util.Msg
enableBasicAuth = false
previewContent = ""
unauthorized = "401 Unauthorized"
loopback = "127.0.0.1"
upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
ErrDataIsNotJson = errors.New("data is not json format")
)
type Key struct {
Key string `json:"key"`
}
func main() {
cyan := color.New(color.FgCyan).SprintFunc()
colorName := cyan("NAME:")
colorUsage := cyan("USAGE:")
colorVersion := cyan("VERSION:")
colorAuthor := cyan("AUTHOR")
colorCommands := cyan("COMMANDS")
colorGlobalOptions := cyan("GLOBAL OPTIONS")
cli.AppHelpTemplate = colorName + `
{{.Name}} - {{.Usage}}
` + colorUsage + `
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
{{if .Version}}
` + colorVersion + `
{{.Version}}
{{end}}{{if len .Authors}}
` + colorAuthor + `
{{range .Authors}}{{ . }}{{end}}
{{end}}{{if .Commands}}
` + colorCommands + `
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
{{end}}{{end}}{{if .Flags}}
` + colorGlobalOptions + `
{{range .Flags}}{{.}}
{{end}}{{end}}{{if .Copyright }}
COPYRIGHT:
{{.Copyright}}
{{end}}
`
app := cli.NewApp()
app.Name = name
app.Usage = "Web email client supporting HTML template and SMTP"
app.Version = version
app.Author = "xuqingfeng"
app.Action = func(c *cli.Context) {
portInUse := -1
portStart := 8000
portEnd := 8100
for portStart <= portEnd {
if isTCPPortAvailable(portStart) {
portInUse = portStart
break
}
portStart++
}
if -1 == portInUse {
log.Fatal("can't find available port")
}
localIP := getLocalIP()
if runtime.GOOS == "darwin" {
_, err := exec.Command("open", "http://"+localIP+":"+strconv.Itoa(portInUse)).Output()
if err != nil {
log.Fatalf("darwin open fail: %s", err.Error())
}
} else {
log.Info("Open " + localIP + ":" + strconv.Itoa(portInUse) + " in browser")
}
s := spinner.New(spinner.CharSets[SPINNER_CHAR_INDEX], 100*time.Millisecond)
s.Color("cyan")
s.Start()
// util init
util.CreateConfigDir()
// router
router := mux.NewRouter()
apiSubRouter := router.PathPrefix("/api").Subrouter()
apiSubRouter.HandleFunc("/ping", pingHandler)
apiSubRouter.HandleFunc("/lang", langHandler)
apiSubRouter.HandleFunc("/mail", mailHandler)
apiSubRouter.HandleFunc("/file", fileHandler)
apiSubRouter.HandleFunc("/account", accountHandler)
apiSubRouter.HandleFunc("/contacts", contactsHandler)
apiSubRouter.HandleFunc("/smtpServer", smtpServerHandler)
apiSubRouter.HandleFunc("/preview", previewHandler)
apiSubRouter.HandleFunc("/wslog", wsLogHandler)
statikFS, err := fs.New()
if err != nil {
log.Fatal(err)
}
router.PathPrefix("/").Handler(http.FileServer(statikFS))
http.ListenAndServe(":"+strconv.Itoa(portInUse), router)
}
app.Commands = []cli.Command{
{
Name: "clean",
Usage: "clean up tmp directory",
Description: "mailman clean",
Action: func(c *cli.Context) {
homeDir := util.GetHomeDir()
tmpPath := filepath.Join(homeDir, util.ConfigPath["tmpPath"])
err := os.RemoveAll(tmpPath)
if err != nil {
log.Error(err)
}
util.CreateConfigDir()
},
},
}
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "basic-auth",
Usage: "enable basic auth (~/.mailman/.htpasswd)",
Destination: &enableBasicAuth,
},
}
app.Run(os.Args)
}
func pingHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "pong")
}
func langHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
lg, _ := lang.GetLang()
switch lg {
case "en", "zh":
sendSuccess(w, lg, "I! get lang success")
default:
sendSuccess(w, "en", "I! get lang success")
}
} else if "POST" == r.Method {
var lg lang.Lang
err := json.NewDecoder(r.Body).Decode(&lg)
if err != nil {
sendError(w, ErrDataIsNotJson.Error())
} else if err = lang.SaveLang(lg); err != nil {
sendError(w, "E! save lang fail: "+err.Error())
} else {
l, err := lang.GetLang()
if err != nil {
sendError(w, "E! get lang fail: "+err.Error())
} else {
sendSuccess(w, l, "I! save lang success")
}
}
}
}
func mailHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
sendSuccess(w, struct{}{}, MAILMAN_IS_AWESOME)
} else if "POST" == r.Method {
var m mail.Mail
err := json.NewDecoder(r.Body).Decode(&m)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = mail.SendMail(m); err != nil {
sendError(w, "E! send mail fail: "+err.Error())
} else {
// empty struct
sendSuccess(w, struct{}{}, "I! send mail success")
}
}
}
func fileHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
sendSuccess(w, struct{}{}, MAILMAN_IS_AWESOME)
} else if "POST" == r.Method {
if err := r.ParseMultipartForm(MAX_MEMORY); err != nil {
sendError(w, "E! parse posted file fail: "+err.Error())
}
token := ""
for k, vs := range r.MultipartForm.Value {
for _, v := range vs {
if "token" == k {
token += v
}
}
}
for _, fileHeaders := range r.MultipartForm.File {
for _, fileHeader := range fileHeaders {
f, _ := fileHeader.Open()
fileContent, _ := ioutil.ReadAll(f)
err := mail.SaveAttachment(fileContent, token, fileHeader.Filename)
if err != nil {
sendError(w, "E! save attachment fail")
// todo multi
break
}
}
}
}
}
func accountHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
// empty []string
sendSuccess(w, emails, "I! get account email success")
}
} else if "POST" == r.Method {
var at account.Account
err := json.NewDecoder(r.Body).Decode(&at)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = account.SaveAccount(at); err != nil {
sendError(w, "E! save account fail: "+err.Error())
} else {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
sendSuccess(w, emails, "I! save account success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = account.DeleteAccount(k.Key); err != nil {
sendError(w, "E! delete account fail: "+err.Error())
} else {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
sendSuccess(w, emails, "I! delete account success")
}
}
}
}
func contactsHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! get contacts success")
}
} else if "POST" == r.Method {
var ct contacts.Contacts
err := json.NewDecoder(r.Body).Decode(&ct)
if err != nil {
sendError(w, ErrDataIsNotJson.Error())
} else if err = contacts.SaveContacts(ct); err != nil {
sendError(w, "E! save contacts fail: "+err.Error())
} else {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! save contacts success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = contacts.DeleteContacts(k.Key); err != nil {
sendError(w, "E! delete contacts fail: "+err.Error())
} else {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! delete contacts success")
}
}
}
}
func smtpServerHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
customSMTPServer, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! get custom SMTP server fail: "+err.Error())
} else {
sendSuccess(w, customSMTPServer, "I! get custom SMTP Server success")
}
} else if "POST" == r.Method {
var smtpServer smtp.SMTPServer
err := json.NewDecoder(r.Body).Decode(&smtpServer)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = smtp.SaveSMTPServer(smtpServer); err != nil {
sendError(w, "E! "+err.Error())
} else {
customSMTPServer, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! "+err.Error())
} else {
sendSuccess(w, customSMTPServer, "I! save SMTP Server success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = smtp.DeleteSMTPServer(k.Key); err != nil {
sendError(w, "E! delete SMTPServer fail: "+err.Error())
} else {
server, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! get custom SMTP server fail: "+err.Error())
} else {
sendSuccess(w, server, "I! delete SMTP server success")
}
}
}
}
func previewHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
w.Header().Set("Content-Type", "text/html")
w.Write([]byte(previewContent))
} else if "POST" == r.Method {
type Body struct {
Body string `json:"body"`
}
var body Body
err := json.NewDecoder(r.Body).Decode(&body)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else {
previewContent = mail.ParseMailContent(body.Body)
sendSuccess(w, struct{}{}, previewContent)
}
}
}
func wsLogHandler(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Error(err.Error())
}
homeDir := util.GetHomeDir()
logFilePath := filepath.Join(homeDir, util.ConfigPath["logPath"], util.LogName)
logFile, err := os.Open(logFilePath)
if err != nil {
log.Error(err.Error())
}
reader := bufio.NewReader(logFile)
for {
line, err := reader.ReadString('\n')
if err != nil && err != io.EOF {
log.Fatal(err.Error())
} else if err == io.EOF {
// wait
time.Sleep(READ_LOG_FILE_GAP * time.Second)
} else {
if err = conn.WriteMessage(1, []byte(line)); err != nil {
log.Error(err.Error())
}
}
}
}
func basicAuth(w http.ResponseWriter, r *http.Request) bool {
if enableBasicAuth {
ba := auth.NewBasicAuthenticator(fmt.Sprintf("%s / %s", name, version), auth.HtpasswdFileProvider(filepath.Join(util.GetHomeDir(), util.ConfigPath["htpasswdPath"])))
if ba.CheckAuth(r) == "" {
w.Header().Set("WWW-Authenticate", `Basic realm="realm"`)
return false
}
}
return true
}
func sendSuccess(w http.ResponseWriter, data interface{}, message string) {
msg = util.Msg{
Success: true,
Data: data,
Message: message,
}
msgInByteSlice, _ := json.Marshal(msg)
w.Header().Set("Content-Type", "application/json")
w.Write(msgInByteSlice)
}
func sendError(w http.ResponseWriter, message string) {
msg = util.Msg{
Success: false,
Message: message,
}
msgInByteSlice, _ := json.Marshal(msg)
w.Header().Set("Content-Type", "application/json")
w.Write(msgInByteSlice)
}
func isTCPPortAvailable(port int) bool |
func getLocalIP() (ip string) {
ip = loopback
ifaces, err := net.Interfaces()
if err != nil {
return
}
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
return
}
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
}
return
}
| {
if port < MIN_TCP_PORT || port > MAX_TCP_PORT {
return false
}
conn, err := net.Listen("tcp", ":"+strconv.Itoa(port))
if err != nil {
return false
}
conn.Close()
return true
} | identifier_body |
main.go | //go:generate statik -src=./ui
package main
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"time"
log "github.com/Sirupsen/logrus"
"github.com/abbot/go-http-auth"
"github.com/briandowns/spinner"
"github.com/fatih/color"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/rakyll/statik/fs"
"github.com/urfave/cli"
"github.com/xuqingfeng/mailman/account"
"github.com/xuqingfeng/mailman/contacts"
"github.com/xuqingfeng/mailman/lang"
"github.com/xuqingfeng/mailman/mail"
"github.com/xuqingfeng/mailman/smtp"
_ "github.com/xuqingfeng/mailman/statik"
"github.com/xuqingfeng/mailman/util"
)
const (
SPINNER_CHAR_INDEX = 14
READ_LOG_FILE_GAP = 5 // second
MAILMAN_IS_AWESOME = "mailman is awesome !"
MIN_TCP_PORT = 0
MAX_TCP_PORT = 65535
//maxReservedTCPPort = 1024
// 15M
MAX_MEMORY = 1024 * 1024 * 15
ASSETS_PREFIX = "ui"
)
var (
name = "mailman"
version = "master"
msg util.Msg
enableBasicAuth = false
previewContent = ""
unauthorized = "401 Unauthorized"
loopback = "127.0.0.1"
upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
ErrDataIsNotJson = errors.New("data is not json format")
)
type Key struct {
Key string `json:"key"`
}
func main() {
cyan := color.New(color.FgCyan).SprintFunc()
colorName := cyan("NAME:")
colorUsage := cyan("USAGE:")
colorVersion := cyan("VERSION:")
colorAuthor := cyan("AUTHOR")
colorCommands := cyan("COMMANDS")
colorGlobalOptions := cyan("GLOBAL OPTIONS")
cli.AppHelpTemplate = colorName + `
{{.Name}} - {{.Usage}}
` + colorUsage + `
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
{{if .Version}}
` + colorVersion + `
{{.Version}}
{{end}}{{if len .Authors}}
` + colorAuthor + `
{{range .Authors}}{{ . }}{{end}}
{{end}}{{if .Commands}}
` + colorCommands + `
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
{{end}}{{end}}{{if .Flags}}
` + colorGlobalOptions + `
{{range .Flags}}{{.}}
{{end}}{{end}}{{if .Copyright }}
COPYRIGHT:
{{.Copyright}}
{{end}}
`
app := cli.NewApp()
app.Name = name
app.Usage = "Web email client supporting HTML template and SMTP"
app.Version = version
app.Author = "xuqingfeng"
app.Action = func(c *cli.Context) {
portInUse := -1
portStart := 8000
portEnd := 8100
for portStart <= portEnd {
if isTCPPortAvailable(portStart) {
portInUse = portStart
break
}
portStart++
}
if -1 == portInUse {
log.Fatal("can't find available port")
}
localIP := getLocalIP()
if runtime.GOOS == "darwin" {
_, err := exec.Command("open", "http://"+localIP+":"+strconv.Itoa(portInUse)).Output()
if err != nil {
log.Fatalf("darwin open fail: %s", err.Error())
}
} else {
log.Info("Open " + localIP + ":" + strconv.Itoa(portInUse) + " in browser")
}
s := spinner.New(spinner.CharSets[SPINNER_CHAR_INDEX], 100*time.Millisecond)
s.Color("cyan")
s.Start()
// util init
util.CreateConfigDir()
// router
router := mux.NewRouter()
apiSubRouter := router.PathPrefix("/api").Subrouter()
apiSubRouter.HandleFunc("/ping", pingHandler)
apiSubRouter.HandleFunc("/lang", langHandler)
apiSubRouter.HandleFunc("/mail", mailHandler)
apiSubRouter.HandleFunc("/file", fileHandler)
apiSubRouter.HandleFunc("/account", accountHandler)
apiSubRouter.HandleFunc("/contacts", contactsHandler)
apiSubRouter.HandleFunc("/smtpServer", smtpServerHandler)
apiSubRouter.HandleFunc("/preview", previewHandler)
apiSubRouter.HandleFunc("/wslog", wsLogHandler)
statikFS, err := fs.New()
if err != nil {
log.Fatal(err)
}
router.PathPrefix("/").Handler(http.FileServer(statikFS))
http.ListenAndServe(":"+strconv.Itoa(portInUse), router)
}
app.Commands = []cli.Command{
{
Name: "clean",
Usage: "clean up tmp directory",
Description: "mailman clean",
Action: func(c *cli.Context) {
homeDir := util.GetHomeDir()
tmpPath := filepath.Join(homeDir, util.ConfigPath["tmpPath"])
err := os.RemoveAll(tmpPath)
if err != nil {
log.Error(err)
}
util.CreateConfigDir()
},
},
}
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "basic-auth",
Usage: "enable basic auth (~/.mailman/.htpasswd)",
Destination: &enableBasicAuth,
},
}
app.Run(os.Args)
}
func pingHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "pong")
}
func langHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
lg, _ := lang.GetLang()
switch lg {
case "en", "zh":
sendSuccess(w, lg, "I! get lang success")
default:
sendSuccess(w, "en", "I! get lang success")
}
} else if "POST" == r.Method {
var lg lang.Lang
err := json.NewDecoder(r.Body).Decode(&lg)
if err != nil {
sendError(w, ErrDataIsNotJson.Error())
} else if err = lang.SaveLang(lg); err != nil {
sendError(w, "E! save lang fail: "+err.Error())
} else {
l, err := lang.GetLang()
if err != nil {
sendError(w, "E! get lang fail: "+err.Error())
} else {
sendSuccess(w, l, "I! save lang success")
}
}
}
}
func mailHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
sendSuccess(w, struct{}{}, MAILMAN_IS_AWESOME)
} else if "POST" == r.Method |
}
func fileHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
sendSuccess(w, struct{}{}, MAILMAN_IS_AWESOME)
} else if "POST" == r.Method {
if err := r.ParseMultipartForm(MAX_MEMORY); err != nil {
sendError(w, "E! parse posted file fail: "+err.Error())
}
token := ""
for k, vs := range r.MultipartForm.Value {
for _, v := range vs {
if "token" == k {
token += v
}
}
}
for _, fileHeaders := range r.MultipartForm.File {
for _, fileHeader := range fileHeaders {
f, _ := fileHeader.Open()
fileContent, _ := ioutil.ReadAll(f)
err := mail.SaveAttachment(fileContent, token, fileHeader.Filename)
if err != nil {
sendError(w, "E! save attachment fail")
// todo multi
break
}
}
}
}
}
func accountHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
// empty []string
sendSuccess(w, emails, "I! get account email success")
}
} else if "POST" == r.Method {
var at account.Account
err := json.NewDecoder(r.Body).Decode(&at)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = account.SaveAccount(at); err != nil {
sendError(w, "E! save account fail: "+err.Error())
} else {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
sendSuccess(w, emails, "I! save account success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = account.DeleteAccount(k.Key); err != nil {
sendError(w, "E! delete account fail: "+err.Error())
} else {
emails, err := account.GetAccountEmail()
if err != nil {
sendError(w, "E! get account email fail: "+err.Error())
} else {
sendSuccess(w, emails, "I! delete account success")
}
}
}
}
func contactsHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! get contacts success")
}
} else if "POST" == r.Method {
var ct contacts.Contacts
err := json.NewDecoder(r.Body).Decode(&ct)
if err != nil {
sendError(w, ErrDataIsNotJson.Error())
} else if err = contacts.SaveContacts(ct); err != nil {
sendError(w, "E! save contacts fail: "+err.Error())
} else {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! save contacts success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = contacts.DeleteContacts(k.Key); err != nil {
sendError(w, "E! delete contacts fail: "+err.Error())
} else {
c, err := contacts.GetContacts()
if err != nil {
sendError(w, "E! get contacts fail: "+err.Error())
} else {
sendSuccess(w, c, "I! delete contacts success")
}
}
}
}
func smtpServerHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
customSMTPServer, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! get custom SMTP server fail: "+err.Error())
} else {
sendSuccess(w, customSMTPServer, "I! get custom SMTP Server success")
}
} else if "POST" == r.Method {
var smtpServer smtp.SMTPServer
err := json.NewDecoder(r.Body).Decode(&smtpServer)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = smtp.SaveSMTPServer(smtpServer); err != nil {
sendError(w, "E! "+err.Error())
} else {
customSMTPServer, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! "+err.Error())
} else {
sendSuccess(w, customSMTPServer, "I! save SMTP Server success")
}
}
} else if "DELETE" == r.Method {
var k Key
err := json.NewDecoder(r.Body).Decode(&k)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error()+" "+err.Error())
} else if err = smtp.DeleteSMTPServer(k.Key); err != nil {
sendError(w, "E! delete SMTPServer fail: "+err.Error())
} else {
server, err := smtp.GetCustomSMTPServer()
if err != nil {
sendError(w, "E! get custom SMTP server fail: "+err.Error())
} else {
sendSuccess(w, server, "I! delete SMTP server success")
}
}
}
}
func previewHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
w.Header().Set("Content-Type", "text/html")
w.Write([]byte(previewContent))
} else if "POST" == r.Method {
type Body struct {
Body string `json:"body"`
}
var body Body
err := json.NewDecoder(r.Body).Decode(&body)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else {
previewContent = mail.ParseMailContent(body.Body)
sendSuccess(w, struct{}{}, previewContent)
}
}
}
func wsLogHandler(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Error(err.Error())
}
homeDir := util.GetHomeDir()
logFilePath := filepath.Join(homeDir, util.ConfigPath["logPath"], util.LogName)
logFile, err := os.Open(logFilePath)
if err != nil {
log.Error(err.Error())
}
reader := bufio.NewReader(logFile)
for {
line, err := reader.ReadString('\n')
if err != nil && err != io.EOF {
log.Fatal(err.Error())
} else if err == io.EOF {
// wait
time.Sleep(READ_LOG_FILE_GAP * time.Second)
} else {
if err = conn.WriteMessage(1, []byte(line)); err != nil {
log.Error(err.Error())
}
}
}
}
func basicAuth(w http.ResponseWriter, r *http.Request) bool {
if enableBasicAuth {
ba := auth.NewBasicAuthenticator(fmt.Sprintf("%s / %s", name, version), auth.HtpasswdFileProvider(filepath.Join(util.GetHomeDir(), util.ConfigPath["htpasswdPath"])))
if ba.CheckAuth(r) == "" {
w.Header().Set("WWW-Authenticate", `Basic realm="realm"`)
return false
}
}
return true
}
func sendSuccess(w http.ResponseWriter, data interface{}, message string) {
msg = util.Msg{
Success: true,
Data: data,
Message: message,
}
msgInByteSlice, _ := json.Marshal(msg)
w.Header().Set("Content-Type", "application/json")
w.Write(msgInByteSlice)
}
func sendError(w http.ResponseWriter, message string) {
msg = util.Msg{
Success: false,
Message: message,
}
msgInByteSlice, _ := json.Marshal(msg)
w.Header().Set("Content-Type", "application/json")
w.Write(msgInByteSlice)
}
func isTCPPortAvailable(port int) bool {
if port < MIN_TCP_PORT || port > MAX_TCP_PORT {
return false
}
conn, err := net.Listen("tcp", ":"+strconv.Itoa(port))
if err != nil {
return false
}
conn.Close()
return true
}
func getLocalIP() (ip string) {
ip = loopback
ifaces, err := net.Interfaces()
if err != nil {
return
}
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
return
}
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
}
return
}
| {
var m mail.Mail
err := json.NewDecoder(r.Body).Decode(&m)
if err != nil {
sendError(w, "E! "+ErrDataIsNotJson.Error())
} else if err = mail.SendMail(m); err != nil {
sendError(w, "E! send mail fail: "+err.Error())
} else {
// empty struct
sendSuccess(w, struct{}{}, "I! send mail success")
}
} | conditional_block |
features.rs | use bio::io::{bed, gff};
use bio::utils::Strand;
use bio::utils::Strand::*;
use crate::lib::{Config, ConfigFeature};
use crate::lib::{Database, GeneNameEachReference, GeneNameTree, Region};
use rocks::rocksdb::*;
use std::collections::HashMap;
use std::error::Error;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::mem::*;
use std::path::Path;
use crate::vg::GraphDB;
use crate::vg::GraphDB::VG;
// NodeId to corresponding feature items.
type Features = HashMap<u64, Vec<Feature>>;
pub type FeatureDB = Vec<Features>;
// Move it to graph, needed.
type CoordToNodeId = HashMap<String, Vec<NodeId>>; // Vec<NodeId> required as sorted by coord.
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct NodeId {
id: u64,
coord: u64,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Feature {
pub start_offset: u64,
pub stop_offset: u64,
pub id: u64,
pub name: String,
pub is_reverse: Option<bool>,
pub attributes: Vec<String>,
pub value: Option<f32>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct FeatureSet {
feature_set_id: u64,
dataset_id: Vec<u64>,
attributes: Option<String>,
}
fn opt_strand_to_opt_bool(strand: Option<Strand>) -> Option<bool> {
strand.and_then(|strand| match strand {
Forward => Some(false),
Reverse => Some(true),
Unknown => None,
})
}
fn record_to_nodes(
record: bed::Record,
coord_map: &CoordToNodeId,
bed_id: u64,
chr_prefix: &Option<String>,
) -> HashMap<u64, Feature> {
let mut hash_map: HashMap<u64, Feature> = HashMap::new();
let chr = match *chr_prefix {
Some(ref k) => record.chrom().replace(k, ""),
None => record.chrom().to_string(),
};
let ref vec = match coord_map.get(&chr) {
Some(k) => k,
None => return hash_map,
};
let lower_bound_index = match vec.binary_search_by_key(&record.start(), |b| b.coord) {
Ok(x) => x,
Err(x) => x,
};
hash_map.insert(
vec[lower_bound_index].id,
Feature {
start_offset: vec[lower_bound_index].coord - record.start(),
stop_offset: 0,
id: bed_id,
name: record.name().unwrap_or_default().to_string(),
is_reverse: opt_strand_to_opt_bool(record.strand()),
attributes: vec![],
value: None,
},
);
let mut index = lower_bound_index;
while vec.len() > index + 1 && vec[index + 1].coord < record.end() {
index += 1;
hash_map.insert(
vec[index].id,
Feature {
start_offset: 0,
stop_offset: 0,
id: bed_id,
name: record.name().unwrap_or_default().to_string(),
is_reverse: opt_strand_to_opt_bool(record.strand()),
attributes: vec![],
value: None,
},
);
}
return hash_map;
}
// tmpNew should be replecated with a novel implementation.
// Required input list is sorted by coordinates.
//pub fn tmp_new(graph: Arc<Graph>, config: &Config) -> Database {
pub fn tmp_new(graph: GraphDB, config: &Config, db_name: String, rocksdb_init: &bool) -> Database {
let chroms = vec![
"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16",
"17", "18", "19", "20", "21", "22", "X", "Y",
];
let hashmap = CoordToNodeId::new();
if *rocksdb_init || !Path::new(&db_name).exists() {
if let Ok(cf) = DB::open(
&Options::default().map_db_options(|db| db.create_if_missing(true)),
db_name.clone(),
) {
'iter: for chr in chroms.iter() {
if let Some(ref path) = config.data[0].source.node_index {
let ref prefix = config.data[0].chr_prefix;
let chr_name = prefix.clone() + chr;
let path_string = path.clone().replace("{}", &chr_name);
let path = Path::new(&path_string);
debug!("Chromosome: {:?}, {:?}", chr, path);
let file = match File::open(path) {
Ok(f) => f,
Err(e) => {
debug!("could not open {}; skipping.", e.description());
continue 'iter;
}
};
/*
let file_gz = match extract_file(path) {
Ok(f) => f,
Err(e) => {continue 'iter;}
};
*/
let br = BufReader::new(file);
let mut last_node: Option<NodeId> = None;
for line in br.lines() {
match line {
Ok(l) => {
let items: Vec<u64> =
l.split("\t").map(|a| a.parse::<u64>().unwrap()).collect();
if items.len() > 1 {
if let Some(item) = last_node {
let reg = Region {
path: (*chr).to_string(),
start: item.coord,
stop: items[1],
};
let raw_bytes: [u8; 8] = unsafe { transmute(item.id) };
if let Err(err) = cf.put(
&WriteOptions::default(),
&raw_bytes,
reg.uuid().as_bytes(),
) {
debug!("{:?} at {}", err, item.id)
}
}
last_node = Some(NodeId {
id: items[0],
coord: items[1],
});
} else {
continue;
}
}
Err(e) => {
debug!("ignoring error {}", e);
continue;
}
};
}
if let Some(item) = last_node {
// coord.insert(item.id, Region{ path: (*chr).to_string(), start: item.coord, stop: item.coord + 1000 }); //Todo seems to wrong code.
let reg = Region {
path: (*chr).to_string(),
start: item.coord,
stop: item.coord + 1000,
};
let raw_bytes: [u8; 8] = unsafe { transmute(item.id) };
if let Err(err) =
cf.put(&WriteOptions::default(), &raw_bytes, reg.uuid().as_bytes())
{
debug!("{:?} at {}", err, item.id)
}
}
}
}
}
}
let mut vec: FeatureDB = FeatureDB::new();
let mut gene_per_ref = GeneNameEachReference::new();
for data in config.reference.data.iter() {
let mut gene: GeneNameTree = GeneNameTree::new();
for feature in data.features.iter() {
// It limits only "config,reference Items."
let path = Path::new(&feature.url);
info!("Parsing: {:?}", path);
match path.extension().unwrap_or_default().to_str() {
Some("bed") => {
vec.push(tmp_new_internal(feature, &graph, &hashmap));
}
Some("gff3") => {
tmp_new_gene_internal(feature, &mut gene, gff::GffType::GFF3);
}
Some("gtf") => {
tmp_new_gene_internal(feature, &mut gene, gff::GffType::GTF2);
}
_ => println!("Unsupported format {:?}", path),
}
}
gene_per_ref.insert(data.name.clone(), gene);
}
match graph {
VG(graph2) => {
let version = graph2.version(config);
println!("{}", version);
return Database {
features: vec,
//coordinates: coord,
rocks: db_name,
gene_name_tree: gene_per_ref,
graph: VG(graph2),
version: version,
};
}
};
}
// It includes only "gene" row.
fn tmp_new_gene_internal(feature: &ConfigFeature, gene: &mut GeneNameTree, gff_type: gff::GffType) {
let gff3 = &feature.url;
let path = Path::new(&gff3);
let mut reader = match gff::Reader::from_file(path, gff_type) {
Ok(f) => f,
Err(e) => {
debug!("could not open {}; skipping.", e.description());
//return result?;
return;
}
};
let mut index = 0; | Ok(rec) => match rec.feature_type() {
"gene" => {
let reg = match opt_strand_to_opt_bool(rec.strand()) {
Some(false) => Region {
path: rec.seqname().to_string(),
stop: *rec.start(),
start: *rec.end(),
},
_ => Region {
path: rec.seqname().to_string(),
start: *rec.start(),
stop: *rec.end(),
},
};
match rec.attributes().get("gene_name") {
Some(name) => gene.insert(name.clone().to_string(), reg),
None => continue,
};
}
_ => continue,
},
Err(_) => continue,
}
}
debug!("{} lines processed. end.", index);
}
fn tmp_new_internal(
feature: &ConfigFeature,
_graph: &GraphDB,
hashmap: &CoordToNodeId,
) -> Features {
let bed = &feature.url;
let path = Path::new(&bed);
let mut features: Features = Features::new();
let mut reader = match bed::Reader::from_file(path) {
Ok(f) => f,
Err(e) => {
debug!("could not open {}; skipping.", e.description());
return features;
}
};
let mut index: u64 = 0;
for record in reader.records() {
let rec = record.ok().expect("Error reading record.");
let nodes = record_to_nodes(rec, &hashmap, index, &feature.chr_prefix);
for (key, value) in nodes.into_iter() {
features.entry(key).or_insert(Vec::new()).push(value);
}
index += 1;
}
return features;
}
/*
fn extract_file(path_compressed: &Path) -> io::Result<Vec<u8>>{
let mut v = Vec::new();
let f = try!(File::open(path_compressed));
try!(try!(GzDecoder::new(f)).read_to_end(&mut v));
Ok(v)
}
fn decode_reader(string: &String) -> io::Result<String> {
let mut gz = GzDecoder::new(string.as_bytes())?;
let mut s = String::new();
gz.read_to_string(&mut s)?;
Ok(s)
}
*/ | for record in reader.records() {
index += 1;
match record { | random_line_split |
features.rs | use bio::io::{bed, gff};
use bio::utils::Strand;
use bio::utils::Strand::*;
use crate::lib::{Config, ConfigFeature};
use crate::lib::{Database, GeneNameEachReference, GeneNameTree, Region};
use rocks::rocksdb::*;
use std::collections::HashMap;
use std::error::Error;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::mem::*;
use std::path::Path;
use crate::vg::GraphDB;
use crate::vg::GraphDB::VG;
// NodeId to corresponding feature items.
type Features = HashMap<u64, Vec<Feature>>;
pub type FeatureDB = Vec<Features>;
// Move it to graph, needed.
type CoordToNodeId = HashMap<String, Vec<NodeId>>; // Vec<NodeId> required as sorted by coord.
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct NodeId {
id: u64,
coord: u64,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Feature {
pub start_offset: u64,
pub stop_offset: u64,
pub id: u64,
pub name: String,
pub is_reverse: Option<bool>,
pub attributes: Vec<String>,
pub value: Option<f32>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct FeatureSet {
feature_set_id: u64,
dataset_id: Vec<u64>,
attributes: Option<String>,
}
fn opt_strand_to_opt_bool(strand: Option<Strand>) -> Option<bool> {
strand.and_then(|strand| match strand {
Forward => Some(false),
Reverse => Some(true),
Unknown => None,
})
}
fn | (
record: bed::Record,
coord_map: &CoordToNodeId,
bed_id: u64,
chr_prefix: &Option<String>,
) -> HashMap<u64, Feature> {
let mut hash_map: HashMap<u64, Feature> = HashMap::new();
let chr = match *chr_prefix {
Some(ref k) => record.chrom().replace(k, ""),
None => record.chrom().to_string(),
};
let ref vec = match coord_map.get(&chr) {
Some(k) => k,
None => return hash_map,
};
let lower_bound_index = match vec.binary_search_by_key(&record.start(), |b| b.coord) {
Ok(x) => x,
Err(x) => x,
};
hash_map.insert(
vec[lower_bound_index].id,
Feature {
start_offset: vec[lower_bound_index].coord - record.start(),
stop_offset: 0,
id: bed_id,
name: record.name().unwrap_or_default().to_string(),
is_reverse: opt_strand_to_opt_bool(record.strand()),
attributes: vec![],
value: None,
},
);
let mut index = lower_bound_index;
while vec.len() > index + 1 && vec[index + 1].coord < record.end() {
index += 1;
hash_map.insert(
vec[index].id,
Feature {
start_offset: 0,
stop_offset: 0,
id: bed_id,
name: record.name().unwrap_or_default().to_string(),
is_reverse: opt_strand_to_opt_bool(record.strand()),
attributes: vec![],
value: None,
},
);
}
return hash_map;
}
// tmpNew should be replecated with a novel implementation.
// Required input list is sorted by coordinates.
//pub fn tmp_new(graph: Arc<Graph>, config: &Config) -> Database {
pub fn tmp_new(graph: GraphDB, config: &Config, db_name: String, rocksdb_init: &bool) -> Database {
let chroms = vec![
"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16",
"17", "18", "19", "20", "21", "22", "X", "Y",
];
let hashmap = CoordToNodeId::new();
if *rocksdb_init || !Path::new(&db_name).exists() {
if let Ok(cf) = DB::open(
&Options::default().map_db_options(|db| db.create_if_missing(true)),
db_name.clone(),
) {
'iter: for chr in chroms.iter() {
if let Some(ref path) = config.data[0].source.node_index {
let ref prefix = config.data[0].chr_prefix;
let chr_name = prefix.clone() + chr;
let path_string = path.clone().replace("{}", &chr_name);
let path = Path::new(&path_string);
debug!("Chromosome: {:?}, {:?}", chr, path);
let file = match File::open(path) {
Ok(f) => f,
Err(e) => {
debug!("could not open {}; skipping.", e.description());
continue 'iter;
}
};
/*
let file_gz = match extract_file(path) {
Ok(f) => f,
Err(e) => {continue 'iter;}
};
*/
let br = BufReader::new(file);
let mut last_node: Option<NodeId> = None;
for line in br.lines() {
match line {
Ok(l) => {
let items: Vec<u64> =
l.split("\t").map(|a| a.parse::<u64>().unwrap()).collect();
if items.len() > 1 {
if let Some(item) = last_node {
let reg = Region {
path: (*chr).to_string(),
start: item.coord,
stop: items[1],
};
let raw_bytes: [u8; 8] = unsafe { transmute(item.id) };
if let Err(err) = cf.put(
&WriteOptions::default(),
&raw_bytes,
reg.uuid().as_bytes(),
) {
debug!("{:?} at {}", err, item.id)
}
}
last_node = Some(NodeId {
id: items[0],
coord: items[1],
});
} else {
continue;
}
}
Err(e) => {
debug!("ignoring error {}", e);
continue;
}
};
}
if let Some(item) = last_node {
// coord.insert(item.id, Region{ path: (*chr).to_string(), start: item.coord, stop: item.coord + 1000 }); //Todo seems to wrong code.
let reg = Region {
path: (*chr).to_string(),
start: item.coord,
stop: item.coord + 1000,
};
let raw_bytes: [u8; 8] = unsafe { transmute(item.id) };
if let Err(err) =
cf.put(&WriteOptions::default(), &raw_bytes, reg.uuid().as_bytes())
{
debug!("{:?} at {}", err, item.id)
}
}
}
}
}
}
let mut vec: FeatureDB = FeatureDB::new();
let mut gene_per_ref = GeneNameEachReference::new();
for data in config.reference.data.iter() {
let mut gene: GeneNameTree = GeneNameTree::new();
for feature in data.features.iter() {
// It limits only "config,reference Items."
let path = Path::new(&feature.url);
info!("Parsing: {:?}", path);
match path.extension().unwrap_or_default().to_str() {
Some("bed") => {
vec.push(tmp_new_internal(feature, &graph, &hashmap));
}
Some("gff3") => {
tmp_new_gene_internal(feature, &mut gene, gff::GffType::GFF3);
}
Some("gtf") => {
tmp_new_gene_internal(feature, &mut gene, gff::GffType::GTF2);
}
_ => println!("Unsupported format {:?}", path),
}
}
gene_per_ref.insert(data.name.clone(), gene);
}
match graph {
VG(graph2) => {
let version = graph2.version(config);
println!("{}", version);
return Database {
features: vec,
//coordinates: coord,
rocks: db_name,
gene_name_tree: gene_per_ref,
graph: VG(graph2),
version: version,
};
}
};
}
// It includes only "gene" row.
fn tmp_new_gene_internal(feature: &ConfigFeature, gene: &mut GeneNameTree, gff_type: gff::GffType) {
let gff3 = &feature.url;
let path = Path::new(&gff3);
let mut reader = match gff::Reader::from_file(path, gff_type) {
Ok(f) => f,
Err(e) => {
debug!("could not open {}; skipping.", e.description());
//return result?;
return;
}
};
let mut index = 0;
for record in reader.records() {
index += 1;
match record {
Ok(rec) => match rec.feature_type() {
"gene" => {
let reg = match opt_strand_to_opt_bool(rec.strand()) {
Some(false) => Region {
path: rec.seqname().to_string(),
stop: *rec.start(),
start: *rec.end(),
},
_ => Region {
path: rec.seqname().to_string(),
start: *rec.start(),
stop: *rec.end(),
},
};
match rec.attributes().get("gene_name") {
Some(name) => gene.insert(name.clone().to_string(), reg),
None => continue,
};
}
_ => continue,
},
Err(_) => continue,
}
}
debug!("{} lines processed. end.", index);
}
fn tmp_new_internal(
feature: &ConfigFeature,
_graph: &GraphDB,
hashmap: &CoordToNodeId,
) -> Features {
let bed = &feature.url;
let path = Path::new(&bed);
let mut features: Features = Features::new();
let mut reader = match bed::Reader::from_file(path) {
Ok(f) => f,
Err(e) => {
debug!("could not open {}; skipping.", e.description());
return features;
}
};
let mut index: u64 = 0;
for record in reader.records() {
let rec = record.ok().expect("Error reading record.");
let nodes = record_to_nodes(rec, &hashmap, index, &feature.chr_prefix);
for (key, value) in nodes.into_iter() {
features.entry(key).or_insert(Vec::new()).push(value);
}
index += 1;
}
return features;
}
/*
fn extract_file(path_compressed: &Path) -> io::Result<Vec<u8>>{
let mut v = Vec::new();
let f = try!(File::open(path_compressed));
try!(try!(GzDecoder::new(f)).read_to_end(&mut v));
Ok(v)
}
fn decode_reader(string: &String) -> io::Result<String> {
let mut gz = GzDecoder::new(string.as_bytes())?;
let mut s = String::new();
gz.read_to_string(&mut s)?;
Ok(s)
}
*/
| record_to_nodes | identifier_name |
majsoul.go | package main
import (
"fmt"
"github.com/fatih/color"
"github.com/EndlessCheng/mahjong-helper/util"
"github.com/EndlessCheng/mahjong-helper/util/model"
"sort"
"time"
"github.com/EndlessCheng/mahjong-helper/platform/majsoul/proto/lq"
)
type majsoulMessage struct {
// 对应到服务器用户数据库中的ID,该值越小表示您的注册时间越早
AccountID int `json:"account_id"`
// 友人列表
Friends lq.FriendList `json:"friends"`
// 新获取到的牌谱基本信息列表
RecordBaseInfoList []*majsoulRecordBaseInfo `json:"record_list"`
// 分享的牌谱基本信息
SharedRecordBaseInfo *majsoulRecordBaseInfo `json:"shared_record_base_info"`
// 当前正在观看的牌谱的 UUID
CurrentRecordUUID string `json:"current_record_uuid"`
// 当前正在观看的牌谱的全部操作
RecordActions []*majsoulRecordAction `json:"record_actions"`
// 玩家在网页上的(点击)操作(网页响应了的)
RecordClickAction string `json:"record_click_action"`
RecordClickActionIndex int `json:"record_click_action_index"`
FastRecordTo int `json:"fast_record_to"` // 闭区间
// 观战
LiveBaseInfo *majsoulLiveRecordBaseInfo `json:"live_head"`
LiveFastAction *majsoulRecordAction `json:"live_fast_action"`
LiveAction *majsoulRecordAction `json:"live_action"`
// 座位变更
ChangeSeatTo *int `json:"change_seat_to"`
// 游戏重连时收到的数据
SyncGameActions []*majsoulRecordAction `json:"sync_game_actions"`
// ResAuthGame
// {"seat_list":[x,x,x,x],"is_game_start":false,"game_config":{"category":1,"mode":{"mode":1,"ai":true,"detail_rule":{"time_fixed":60,"time_add":0,"dora_count":3,"shiduan":1,"init_point":25000,"fandian":30000,"bianjietishi":true,"ai_level":1,"fanfu":1}},"meta":{"room_id":18269}},"ready_id_list":[0,0,0]}
IsGameStart *bool `json:"is_game_start"` // false=新游戏,true=重连
SeatList []int `json:"seat_list"`
ReadyIDList []int `json:"ready_id_list"`
GameConfig *majsoulGameConfig `json:"game_config"`
// NotifyPlayerLoadGameReady
//ReadyIDList []int `json:"ready_id_list"`
// ActionNewRound
// {"chang":0,"ju":0,"ben":0,"tiles":["1m","3m","7m","3p","6p","7p","6s","1z","1z","2z","3z","4z","7z"],"dora":"6m","scores":[25000,25000,25000,25000],"liqibang":0,"al":false,"md5":"","left_tile_count":69}
MD5 string `json:"md5"`
Chang *int `json:"chang"`
Ju *int `json:"ju"`
Ben *int `json:"ben"`
Tiles interface{} `json:"tiles"` // 一般情况下为 []interface{}, interface{} 即 string,但是暗杠的情况下,该值为一个 string
Dora string `json:"dora"`
// RecordNewRound
Tiles0 []string `json:"tiles0"`
Tiles1 []string `json:"tiles1"`
Tiles2 []string `json:"tiles2"`
Tiles3 []string `json:"tiles3"`
// ActionDealTile
// {"seat":1,"tile":"5m","left_tile_count":23,"operation":{"seat":1,"operation_list":[{"type":1}],"time_add":0,"time_fixed":60000},"zhenting":false}
// 他家暗杠后的摸牌
// {"seat":1,"left_tile_count":3,"doras":["7m","0p"],"zhenting":false}
Seat *int `json:"seat"`
Tile string `json:"tile"`
Doras []string `json:"doras"` // 暗杠摸牌了,同时翻出杠宝牌指示牌
LeftTileCount *int `json:"left_tile_count"`
// ActionDiscardTile
// {"seat":0,"tile":"5z","is_liqi":false,"moqie":true,"zhenting":false,"is_wliqi":false}
// {"seat":0,"tile":"1z","is_liqi":false,"operation":{"seat":1,"operation_list":[{"type":3,"combination":["1z|1z"]}],"time_add":0,"time_fixed":60000},"moqie":false,"zhenting":false,"is_wliqi":false}
// 吃 碰 和
// {"seat":0,"tile":"6p","is_liqi":false,"operation":{"seat":1,"operation_list":[{"type":2,"combination":["7p|8p"]},{"type":3,"combination":["6p|6p"]},{"type":9}],"time_add":0,"time_fixed":60000},"moqie":false,"zhenting":true,"is_wliqi":false}
IsLiqi *bool `json:"is_liqi"`
IsWliqi *bool `json:"is_wliqi"`
Moqie *bool `json:"moqie"`
Operation *struct{} `json:"operation"`
// ActionChiPengGang || ActionAnGangAddGang
// 他家吃 {"seat":0,"type":0,"tiles":["2s","3s","4s"],"froms":[0,0,3],"zhenting":false}
// 他家碰 {"seat":1,"type":1,"tiles":["1z","1z","1z"],"froms":[1,1,0],"operation":{"seat":1,"operation_list":[{"type":1,"combination":["1z"]}],"time_add":0,"time_fixed":60000},"zhenting":false,"tingpais":[{"tile":"4m","zhenting":false,"infos":[{"tile":"6s","haveyi":true},{"tile":"6p","haveyi":true}]},{"tile":"7m","zhenting":false,"infos":[{"tile":"6s","haveyi":true},{"tile":"6p","haveyi":true}]}]}
// 他家大明杠 {"seat":2,"type":2,"tiles":["3z","3z","3z","3z"],"froms":[2,2,2,0],"zhenting":false}
// 他家加杠 {"seat":2,"type":2,"tiles":"3z"}
// 他家暗杠 {"seat":2,"type":3,"tiles":"3s"}
Type int `json:"type"`
Froms []int `json:"froms"`
// ActionLiqi
// ActionHule
Hules []struct {
Seat int `json:"seat"`
Zimo bool `json:"zimo"`
PointRong int `json:"point_rong"`
PointZimoQin int `json:"point_zimo_qin"`
PointZimoXian int `json:"point_zimo_xian"`
} `json:"hules"`
// ActionLiuJu
// {"liujumanguan":false,"players":[{"tingpai":true,"hand":["3s","3s","4s","5s","6s","1z","1z","7z","7z","7z"],"tings":[{"tile":"1z","haveyi":true},{"tile":"3s","haveyi":true}]},{"tingpai":false},{"tingpai":false},{"tingpai":true,"hand":["4m","0m","6m","6m","6m","4s","4s","4s","5s","7s"],"tings":[{"tile":"6s","haveyi":true}]}],"scores":[{"old_scores":[23000,29000,24000,24000],"delta_scores":[1500,-1500,-1500,1500]}],"gameend":false}
//Liujumanguan *bool `json:"liujumanguan"`
//Players *struct{ } `json:"players"`
//Gameend *bool `json:"gameend"`
// ActionBabei
}
const (
majsoulMeldTypeChi = iota
majsoulMeldTypePon
majsoulMeldTypeMinkanOrKakan
majsoulMeldTypeAnkan
)
type majsoulRoundData struct {
*roundData
originJSON string
msg *majsoulMessage
selfSeat int // 自家初始座位:0-第一局的东家 1-第一局的南家 2-第一局的西家 3-第一局的北家
}
func (d *majsoulRoundData) fatalParse(info string, msg string) {
panic(fmt.Sprintln(info, len(msg), msg, []byte(msg)))
}
func (d *majsoulRoundData) normalTiles(tiles interface{}) (majsoulTiles []string) {
_tiles, ok := tiles.([]interface{})
if !ok {
_tile, ok := tiles.(string)
if !ok {
panic(fmt.Sprintln("[normalTiles] 解析错误", tiles))
}
return []string{_tile}
}
majsoulTiles = make([]string, len(_tiles))
for i, _tile := range _tiles {
_t, ok := _tile.(string)
if !ok {
panic(fmt.Sprintln("[normalTiles] 解析错误", tiles))
}
majsoulTiles[i] = _t
}
return majsoulTiles
}
func (d *majsoulRoundData) parseWho(seat int) int {
// 转换成 0=自家, 1=下家, 2=对家, 3=上家
// 对三麻四麻均适用
who := (seat + d.dealer - d.roundNumber%4 + 4) % 4
return who
}
func (d *majsoulRoundData) mustParseMajsoulTile(humanTile string) (tile34 int, isRedFive bool) {
tile34, isRedFive, err := util.StrToTile34(humanTile)
if err != nil {
panic(err)
}
return
}
func (d *majsoulRoundData) mustParseMajsoulTiles(majsoulTiles []string) (tiles []int, numRedFive int) {
tiles = make([]int, len(majsoulTiles))
for i, majsoulTile := range majsoulTiles {
var isRedFive bool
tiles[i], isRedFive = d.mustParseMajsoulTile(majsoulTile)
if isRedFive {
numRedFive++
}
}
return
}
func (d *majsoulRoundData) isNewDora(doras []string) bool {
return len(doras) > len(d.doraIndicators)
}
func (d *majsoulRoundData) GetDataSourceType() int {
return dataSourceTypeMajsoul
}
func (d *majsoulRoundData) GetSelfSeat() int {
return d.selfSeat
}
func (d *majsoulRoundData) GetMessage() string {
return d.originJSON
}
func (d *majsoulRoundData) SkipMessage() bool {
msg := d.msg
// 没有账号 skip
if gameConf.currentActiveMajsoulAccountID == -1 {
return true
}
// TODO: 重构
if msg.SeatList != nil {
// 特判古役模式
isGuyiMode := msg.GameConfig.isGuyiMode()
util.SetConsiderOldYaku(isGuyiMode)
if isGuyiMode {
color.HiGreen("古役模式已开启")
time.Sleep(2 * time.Second)
}
} else {
// msg.SeatList 必须为 nil
if msg.ReadyIDList != nil {
// 打印准备信息
fmt.Printf("等待玩家准备 (%d/%d) %v\n", len(msg.ReadyIDList), d.playerNumber, msg.ReadyIDList)
}
}
return false
}
func (d *majsoulRoundData) IsLogin() bool {
msg := d.msg
return msg.AccountID > 0 || msg.SeatList != nil
}
func (d *majsoulRoundData) HandleLogin() {
msg := d.msg
if accountID := msg.AccountID; accountID > 0 {
gameConf.addMajsoulAccountID(accountID)
if accountID != gameConf.currentActiveMajsoulAccountID {
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
}
return
}
// 从对战 ID 列表中获取账号 ID
if seatList := msg.SeatList; seatList != nil {
// 尝试从中找到缓存账号 ID
for _, accountID := range seatList {
if accountID > 0 && gameConf.isIDExist(accountID) {
// 找到了,更新当前使用的账号 ID
if gameConf.currentActiveMajsoulAccountID != accountID {
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
}
return
}
}
// 未找到缓存 ID
if gameConf.currentActiveMajsoulAccountID > 0 {
color.HiRed("尚未获取到您的账号 ID,请您刷新网页,或开启一局人机对战(错误信息:您的账号 ID %d 不在对战列表 %v 中)", gameConf.currentActiveMajsoulAccountID, msg.SeatList)
return
}
// 判断是否为人机对战,若为人机对战,则获取账号 ID
if !util.InInts(0, msg.SeatList) {
return
}
for _, accountID := range msg.SeatList {
if accountID > 0 {
gameConf.addMajsoulAccountID(accountID)
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
return
}
}
}
}
func (d *majsoulRoundData) IsInit() bool {
msg := d.msg
// ResAuthGame || ActionNewRound RecordNewRound
return msg.IsGameStart != nil || msg.MD5 != ""
}
func (d *majsoulRoundData) ParseInit() (roundNumber int, benNumber int, dealer int, doraIndicators []int, handTiles []int, numRedFives []int) {
msg := d.msg
if playerNumber := len(msg.SeatList); playerNumber >= 3 {
d.playerNumber = playerNumber
// 获取自家初始座位:0-第一局的东家 1-第一局的南家 2-第一局的西家 3-第一局的北家
for i, accountID := range msg.SeatList {
if accountID == gameConf.currentActiveMajsoulAccountID {
d.selfSeat = i
break
}
}
// dealer: 0=自家, 1=下家, 2=对家, 3=上家
dealer = (4 - d.selfSeat) % 4
return
} else if len(msg.Tiles2) > 0 {
if len(msg.Tiles3) > 0 {
d.playerNumber = 4
} else {
d.playerNumber = 3
}
}
dealer = -1
roundNumber = 4*(*msg.Chang) + *msg.Ju
benNumber = *msg.Ben
if msg.Dora != "" {
doraIndicator, _ := d.mustParseMajsoulTile(msg.Dora)
doraIndicators = append(doraIndicators, doraIndicator)
} else {
for _, dora := range msg.Doras {
doraIndicator, _ := d.mustParseMajsoulTile(dora)
doraIndicators = append(doraIndicators, doraIndicator)
}
}
numRedFives = make([]int, 3)
var majsoulTiles []string
if msg.Tiles != nil { // 实战
majsoulTiles = d.normalTiles(msg.Tiles)
} else { // 牌谱、观战
majsoulTiles = [][]string{msg.Tiles0, msg.Tiles1, msg.Tiles2, msg.Tiles3}[d.selfSeat]
}
for _, majsoulTile := range majsoulTiles {
tile, isRedFive := d.mustParseMajsoulTile(majsoulTile)
handTiles = append(handTiles, tile)
if isRedFive {
numRedFives[tile/9]++
}
}
return
}
func (d *majsoulRoundData) IsSelfDraw() bool {
msg := d.msg
// ActionDealTile RecordDealTile
return msg.Seat != nil && msg.Tile != "" && msg.Moqie == nil && d.parseWho(*msg.Seat) == 0
}
func (d *majsoulRoundData) ParseSelfDraw() (tile int, isRedFive bool, kanDoraIndicator int) {
msg := d.msg
tile, isRedFive = d.mustParseMajsoulTile(msg.Tile)
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) {
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
return
}
func (d *majsoulRoundData) IsDiscard() bool {
msg := d.msg
// ActionDiscardTile RecordDiscardTile
return msg.IsLiqi != nil
}
func (d *majsoulRoundData) ParseDiscard() (who int, discardTile int, isRedFive bool, isTsumogiri bool, isReach bool, canBeMeld bool, kanDoraIndicator int) {
msg := d.msg
who = d.parseWho(*msg.Seat)
discardTile, isRedFive = d.mustParseMajsoulTile(msg.Tile)
isTsumogiri = *msg.Moqie
isReach = *msg.IsLiqi
if msg.IsWliqi != nil && !isReach { // 兼容雀魂早期牌谱(无 IsWliqi 字段)
isReach = *msg.IsWliqi
}
canBeMeld = msg.Operation != nil // 注意:观战模式下无此选项
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) {
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
return
}
func (d *majsoulRoundData) IsOpen() bool {
msg := d.msg
// ActionChiPengGang RecordChiPengGang || ActionAnGangAddGang RecordAnGangAddGang
return msg.Tiles != nil && len(d.normalTiles(msg.Tiles)) <= 4
}
func (d *majsoulRoundData) ParseOpen() (who int, meld *model.Meld, kanDoraIndicator int) {
msg := d.msg
who = d.parseWho(*msg.Seat)
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) { // 暗杠(有时会在玩家摸牌后才发送 doras,可能是因为需要考虑抢暗杠的情况)
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
var meldType, calledTile int
majsoulTiles := d.normalTiles(msg.Tiles)
isSelfKan := len(majsoulTiles) == 1 // 自家加杠或暗杠
if isSelfKan {
majsoulTile := majsoulTiles[0]
majsoulTiles = []string{majsoulTile, majsoulTile, majsoulTile, majsoulTile}
}
meldTiles, numRedFive := d.mustParseMajsoulTiles(majsoulTiles)
containRedFive := numRedFive > 0
if len(majsoulTiles) == 4 && meldTiles[0] < 27 && meldTiles[0]%9 == 4 {
// 杠5意味着一定有赤5
containRedFive = true
}
if isSelfKan {
calledTile = meldTiles[0]
// 用 msg.Type 判断是加杠还是暗杠
// 也可以通过是否有相关碰副露来判断是加杠还是暗杠
if msg.Type == majsoulMeldTypeMinkanOrKakan {
meldType = meldTypeKakan // 加杠
} else if msg.Type == majsoulMeldTypeAnkan {
meldType = meldTypeAnkan // 暗杠
}
meld = &model.Meld{
MeldType: meldType,
Tiles: meldTiles,
CalledTile: calledTile,
ContainRedFive: containRedFive,
}
return
}
var rawCalledTile string
for i, seat := range msg.Froms {
fromWho := d.parseWho(seat)
if fromWho != who {
rawCalledTile = majsoulTiles[i]
}
}
if rawCalledTile == "" {
panic("数据解析异常: 未找到 rawCalledTile")
}
calledTile, redFiveFromOthers := d.mustParseMajsoulTile(rawCalledTile)
if len(meldTiles) == 3 {
if meldTiles[0] == meldTiles[1] {
meldType = meldTypePon // 碰
} else {
meldType = meldTypeChi // 吃
sort.Ints(meldTiles)
}
} else if len(meldTiles) == 4 {
meldType = meldTypeMinkan // 大明杠
} else {
panic("鸣牌数据解析失败!")
}
meld = &model.Meld{
MeldType: meldType,
Tiles: meldTiles,
CalledTile: calledTile,
ContainRedFive: containRedFive,
RedFiveFromOthers: redFiveFromOthers,
}
return
}
func (d *majsoulRoundData) IsReach() bool {
return false
}
func (d *majsoulRoundData) ParseReach() (who int) {
return 0
}
func (d *majsoulRoundData) IsFuriten() bool {
return false
}
func (d *majsoulRoundData) IsRoundWin() bool {
msg := d.msg
// ActionHule RecordHule
return msg.Hules != nil
}
func (d *majsoulRoundData) ParseRoundWin() (whos []int, points []int) {
msg := d.msg
for _, result := range msg.Hules {
who := d.parseWho(result.Seat)
whos = append(whos, d.parseWho(result.Seat))
point := result.PointRong
if result.Zimo {
if who == d.dealer {
point = 3 * result.PointZimoXian
} else {
point = result.PointZimoQin + 2*result.PointZimoXian
}
if d.playerNumber == 3 {
// 自摸损(一个子家)
point -= result.PointZimoXian
}
}
points = append(points, point)
}
return
}
func (d *majsoulRoundData) IsRyuukyoku() bool {
// TODO
// ActionLiuJu RecordLiuJu
return false
}
func (d *majsoulRoundData) ParseRyuukyoku() (type_ int, whos []int, points []int) {
// TODO
return
}
// 拔北宝牌
func (d *majsoulRoundData) IsNukiDora() bool {
msg := d.msg
// ActionBaBei RecordBaBei
return msg.Seat != nil && msg.Moqie != nil && msg.Tile == ""
}
func (d *majsoulRoundData) ParseNukiDora() (who int, is | l) {
msg := d.msg
return d.parseWho(*msg.Seat), *msg.Moqie
}
// 在最后处理该项
func (d *majsoulRoundData) IsNewDora() bool {
msg := d.msg
// ActionDealTile
return d.isNewDora(msg.Doras)
}
func (d *majsoulRoundData) ParseNewDora() (kanDoraIndicator int) {
msg := d.msg
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
return
}
| Tsumogiri boo | identifier_body |
majsoul.go | package main
import (
"fmt"
"github.com/fatih/color"
"github.com/EndlessCheng/mahjong-helper/util"
"github.com/EndlessCheng/mahjong-helper/util/model"
"sort"
"time"
"github.com/EndlessCheng/mahjong-helper/platform/majsoul/proto/lq"
)
type majsoulMessage struct {
// 对应到服务器用户数据库中的ID,该值越小表示您的注册时间越早
AccountID int `json:"account_id"`
// 友人列表
Friends lq.FriendList `json:"friends"`
// 新获取到的牌谱基本信息列表
RecordBaseInfoList []*majsoulRecordBaseInfo `json:"record_list"`
// 分享的牌谱基本信息
SharedRecordBaseInfo *majsoulRecordBaseInfo `json:"shared_record_base_info"`
// 当前正在观看的牌谱的 UUID
CurrentRecordUUID string `json:"current_record_uuid"`
// 当前正在观看的牌谱的全部操作
RecordActions []*majsoulRecordAction `json:"record_actions"`
// 玩家在网页上的(点击)操作(网页响应了的)
RecordClickAction string `json:"record_click_action"`
RecordClickActionIndex int `json:"record_click_action_index"`
FastRecordTo int `json:"fast_record_to"` // 闭区间
// 观战
LiveBaseInfo *majsoulLiveRecordBaseInfo `json:"live_head"`
LiveFastAction *majsoulRecordAction `json:"live_fast_action"`
LiveAction *majsoulRecordAction `json:"live_action"`
// 座位变更
ChangeSeatTo *int `json:"change_seat_to"`
// 游戏重连时收到的数据
SyncGameActions []*majsoulRecordAction `json:"sync_game_actions"`
// ResAuthGame
// {"seat_list":[x,x,x,x],"is_game_start":false,"game_config":{"category":1,"mode":{"mode":1,"ai":true,"detail_rule":{"time_fixed":60,"time_add":0,"dora_count":3,"shiduan":1,"init_point":25000,"fandian":30000,"bianjietishi":true,"ai_level":1,"fanfu":1}},"meta":{"room_id":18269}},"ready_id_list":[0,0,0]}
IsGameStart *bool `json:"is_game_start"` // false=新游戏,true=重连
SeatList []int `json:"seat_list"`
ReadyIDList []int `json:"ready_id_list"`
GameConfig *majsoulGameConfig `json:"game_config"`
// NotifyPlayerLoadGameReady
//ReadyIDList []int `json:"ready_id_list"`
// ActionNewRound
// {"chang":0,"ju":0,"ben":0,"tiles":["1m","3m","7m","3p","6p","7p","6s","1z","1z","2z","3z","4z","7z"],"dora":"6m","scores":[25000,25000,25000,25000],"liqibang":0,"al":false,"md5":"","left_tile_count":69}
MD5 string `json:"md5"`
Chang *int `json:"chang"`
Ju *int `json:"ju"`
Ben *int `json:"ben"`
Tiles interface{} `json:"tiles"` // 一般情况下为 []interface{}, interface{} 即 string,但是暗杠的情况下,该值为一个 string
Dora string `json:"dora"`
// RecordNewRound
Tiles0 []string `json:"tiles0"`
Tiles1 []string `json:"tiles1"`
Tiles2 []string `json:"tiles2"`
Tiles3 []string `json:"tiles3"`
// ActionDealTile
// {"seat":1,"tile":"5m","left_tile_count":23,"operation":{"seat":1,"operation_list":[{"type":1}],"time_add":0,"time_fixed":60000},"zhenting":false}
// 他家暗杠后的摸牌
// {"seat":1,"left_tile_count":3,"doras":["7m","0p"],"zhenting":false}
Seat *int `json:"seat"`
Tile string `json:"tile"`
Doras []string `json:"doras"` // 暗杠摸牌了,同时翻出杠宝牌指示牌
LeftTileCount *int `json:"left_tile_count"`
// ActionDiscardTile
// {"seat":0,"tile":"5z","is_liqi":false,"moqie":true,"zhenting":false,"is_wliqi":false}
// {"seat":0,"tile":"1z","is_liqi":false,"operation":{"seat":1,"operation_list":[{"type":3,"combination":["1z|1z"]}],"time_add":0,"time_fixed":60000},"moqie":false,"zhenting":false,"is_wliqi":false}
// 吃 碰 和
// {"seat":0,"tile":"6p","is_liqi":false,"operation":{"seat":1,"operation_list":[{"type":2,"combination":["7p|8p"]},{"type":3,"combination":["6p|6p"]},{"type":9}],"time_add":0,"time_fixed":60000},"moqie":false,"zhenting":true,"is_wliqi":false}
IsLiqi *bool `json:"is_liqi"`
IsWliqi *bool `json:"is_wliqi"`
Moqie *bool `json:"moqie"`
Operation *struct{} `json:"operation"`
// ActionChiPengGang || ActionAnGangAddGang
// 他家吃 {"seat":0,"type":0,"tiles":["2s","3s","4s"],"froms":[0,0,3],"zhenting":false}
// 他家碰 {"seat":1,"type":1,"tiles":["1z","1z","1z"],"froms":[1,1,0],"operation":{"seat":1,"operation_list":[{"type":1,"combination":["1z"]}],"time_add":0,"time_fixed":60000},"zhenting":false,"tingpais":[{"tile":"4m","zhenting":false,"infos":[{"tile":"6s","haveyi":true},{"tile":"6p","haveyi":true}]},{"tile":"7m","zhenting":false,"infos":[{"tile":"6s","haveyi":true},{"tile":"6p","haveyi":true}]}]}
// 他家大明杠 {"seat":2,"type":2,"tiles":["3z","3z","3z","3z"],"froms":[2,2,2,0],"zhenting":false}
// 他家加杠 {"seat":2,"type":2,"tiles":"3z"}
// 他家暗杠 {"seat":2,"type":3,"tiles":"3s"}
Type int `json:"type"`
Froms []int `json:"froms"`
// ActionLiqi
// ActionHule
Hules []struct {
Seat int `json:"seat"`
Zimo bool `json:"zimo"`
PointRong int `json:"point_rong"`
PointZimoQin int `json:"point_zimo_qin"`
PointZimoXian int `json:"point_zimo_xian"`
} `json:"hules"`
// ActionLiuJu
// {"liujumanguan":false,"players":[{"tingpai":true,"hand":["3s","3s","4s","5s","6s","1z","1z","7z","7z","7z"],"tings":[{"tile":"1z","haveyi":true},{"tile":"3s","haveyi":true}]},{"tingpai":false},{"tingpai":false},{"tingpai":true,"hand":["4m","0m","6m","6m","6m","4s","4s","4s","5s","7s"],"tings":[{"tile":"6s","haveyi":true}]}],"scores":[{"old_scores":[23000,29000,24000,24000],"delta_scores":[1500,-1500,-1500,1500]}],"gameend":false}
//Liujumanguan *bool `json:"liujumanguan"`
//Players *struct{ } `json:"players"`
//Gameend *bool `json:"gameend"`
// ActionBabei
}
const (
majsoulMeldTypeChi = iota
majsoulMeldTypePon
majsoulMeldTypeMinkanOrKakan
majsoulMeldTypeAnkan
)
type majsoulRoundData struct {
*roundData
originJSON string
msg *majsoulMessage
selfSeat int // 自家初始座位:0-第一局的东家 1-第一局的南家 2-第一局的西家 3-第一局的北家
}
func (d *majsoulRoundData) fatalParse(info string, msg string) {
panic(fmt.Sprintln(info, len(msg), msg, []byte(msg)))
}
func (d *majsoulRoundData) normalTiles(tiles interface{}) (majsoulTiles []string) {
_tiles, ok := tiles.([]interface{})
if !ok {
_tile, ok := tiles.(string)
if !ok {
panic(fmt.Sprintln("[normalTiles] 解析错误", tiles))
}
return []string{_tile}
}
majsoulTiles = make([]string, len(_tiles))
for i, _tile := range _tiles {
_t, ok := _tile.(string)
if !ok {
panic(fmt.Sprintln("[normalTiles] 解析错误", tiles))
}
majsoulTiles[i] = _t
}
return majsoulTiles
}
func (d *majsoulRoundData) parseWho(seat int) int {
// 转换成 0=自家, 1=下家, 2=对家, 3=上家
// 对三麻四麻均适用
who := (seat + d.dealer - d.roundNumber%4 + 4) % 4
return who
}
func (d *majsoulRoundData) mustParseMajsoulTile(humanTile string) (tile34 int, isRedFive bool) {
tile34, isRedFive, err := util.StrToTile34(humanTile)
if err != nil {
panic(err)
}
return
}
func (d *majsoulRoundData) mustParseMajsoulTiles(majsoulTiles []string) (tiles []int, numRedFive int) {
tiles = make([]int, len(majsoulTiles))
for i, majsoulTile := range majsoulTiles {
var isRedFive bool
tiles[i], isRedFive = d.mustParseMajsoulTile(majsoulTile)
if isRedFive {
numRedFive++
}
}
return
}
func (d *majsoulRoundData) isNewDora(doras []string) bool {
return len(doras) > len(d.doraIndicators)
}
func (d *majsoulRoundData) GetDataSourceType() int {
return dataSourceTypeMajsoul
}
func (d *majsoulRoundData) GetSelfSeat() int {
return d.selfSeat
}
func (d *majsoulRoundData) GetMessage() string {
return d.originJSON
}
func (d *majsoulRoundData) SkipMessage() bool {
msg := d.msg
// 没有账号 skip
if gameConf.currentActiveMajsoulAccountID == -1 {
return true
}
// TODO: 重构
if msg.SeatList != nil {
// 特判古役模式
isGuyiMode := msg.GameConfig.isGuyiMode()
util.SetConsiderOldYaku(isGuyiMode)
if isGuyiMode {
color.HiGreen("古役模式已开启")
time.Sleep(2 * time.Second)
}
} else {
// msg.SeatList 必须为 nil
if msg.ReadyIDList != nil {
// 打印准备信息
fmt.Printf("等待玩家准备 (%d/%d) %v\n", len(msg.ReadyIDList), d.playerNumber, msg.ReadyIDList)
}
}
return false
}
func (d *majsoulRoundData) IsLogin() bool {
msg := d.msg
return msg.AccountID > 0 || msg.SeatList != nil
}
func (d *majsoulRoundData) HandleLogin() {
msg := d.msg
if accountID := msg.AccountID; accountID > 0 {
gameConf.addMajsoulAccountID(accountID)
if accountID != gameConf.currentActiveMajsoulAccountID {
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
}
return
}
// | untID > 0 && gameConf.isIDExist(accountID) {
// 找到了,更新当前使用的账号 ID
if gameConf.currentActiveMajsoulAccountID != accountID {
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
}
return
}
}
// 未找到缓存 ID
if gameConf.currentActiveMajsoulAccountID > 0 {
color.HiRed("尚未获取到您的账号 ID,请您刷新网页,或开启一局人机对战(错误信息:您的账号 ID %d 不在对战列表 %v 中)", gameConf.currentActiveMajsoulAccountID, msg.SeatList)
return
}
// 判断是否为人机对战,若为人机对战,则获取账号 ID
if !util.InInts(0, msg.SeatList) {
return
}
for _, accountID := range msg.SeatList {
if accountID > 0 {
gameConf.addMajsoulAccountID(accountID)
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
return
}
}
}
}
func (d *majsoulRoundData) IsInit() bool {
msg := d.msg
// ResAuthGame || ActionNewRound RecordNewRound
return msg.IsGameStart != nil || msg.MD5 != ""
}
func (d *majsoulRoundData) ParseInit() (roundNumber int, benNumber int, dealer int, doraIndicators []int, handTiles []int, numRedFives []int) {
msg := d.msg
if playerNumber := len(msg.SeatList); playerNumber >= 3 {
d.playerNumber = playerNumber
// 获取自家初始座位:0-第一局的东家 1-第一局的南家 2-第一局的西家 3-第一局的北家
for i, accountID := range msg.SeatList {
if accountID == gameConf.currentActiveMajsoulAccountID {
d.selfSeat = i
break
}
}
// dealer: 0=自家, 1=下家, 2=对家, 3=上家
dealer = (4 - d.selfSeat) % 4
return
} else if len(msg.Tiles2) > 0 {
if len(msg.Tiles3) > 0 {
d.playerNumber = 4
} else {
d.playerNumber = 3
}
}
dealer = -1
roundNumber = 4*(*msg.Chang) + *msg.Ju
benNumber = *msg.Ben
if msg.Dora != "" {
doraIndicator, _ := d.mustParseMajsoulTile(msg.Dora)
doraIndicators = append(doraIndicators, doraIndicator)
} else {
for _, dora := range msg.Doras {
doraIndicator, _ := d.mustParseMajsoulTile(dora)
doraIndicators = append(doraIndicators, doraIndicator)
}
}
numRedFives = make([]int, 3)
var majsoulTiles []string
if msg.Tiles != nil { // 实战
majsoulTiles = d.normalTiles(msg.Tiles)
} else { // 牌谱、观战
majsoulTiles = [][]string{msg.Tiles0, msg.Tiles1, msg.Tiles2, msg.Tiles3}[d.selfSeat]
}
for _, majsoulTile := range majsoulTiles {
tile, isRedFive := d.mustParseMajsoulTile(majsoulTile)
handTiles = append(handTiles, tile)
if isRedFive {
numRedFives[tile/9]++
}
}
return
}
func (d *majsoulRoundData) IsSelfDraw() bool {
msg := d.msg
// ActionDealTile RecordDealTile
return msg.Seat != nil && msg.Tile != "" && msg.Moqie == nil && d.parseWho(*msg.Seat) == 0
}
func (d *majsoulRoundData) ParseSelfDraw() (tile int, isRedFive bool, kanDoraIndicator int) {
msg := d.msg
tile, isRedFive = d.mustParseMajsoulTile(msg.Tile)
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) {
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
return
}
func (d *majsoulRoundData) IsDiscard() bool {
msg := d.msg
// ActionDiscardTile RecordDiscardTile
return msg.IsLiqi != nil
}
func (d *majsoulRoundData) ParseDiscard() (who int, discardTile int, isRedFive bool, isTsumogiri bool, isReach bool, canBeMeld bool, kanDoraIndicator int) {
msg := d.msg
who = d.parseWho(*msg.Seat)
discardTile, isRedFive = d.mustParseMajsoulTile(msg.Tile)
isTsumogiri = *msg.Moqie
isReach = *msg.IsLiqi
if msg.IsWliqi != nil && !isReach { // 兼容雀魂早期牌谱(无 IsWliqi 字段)
isReach = *msg.IsWliqi
}
canBeMeld = msg.Operation != nil // 注意:观战模式下无此选项
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) {
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
return
}
func (d *majsoulRoundData) IsOpen() bool {
msg := d.msg
// ActionChiPengGang RecordChiPengGang || ActionAnGangAddGang RecordAnGangAddGang
return msg.Tiles != nil && len(d.normalTiles(msg.Tiles)) <= 4
}
func (d *majsoulRoundData) ParseOpen() (who int, meld *model.Meld, kanDoraIndicator int) {
msg := d.msg
who = d.parseWho(*msg.Seat)
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) { // 暗杠(有时会在玩家摸牌后才发送 doras,可能是因为需要考虑抢暗杠的情况)
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
var meldType, calledTile int
majsoulTiles := d.normalTiles(msg.Tiles)
isSelfKan := len(majsoulTiles) == 1 // 自家加杠或暗杠
if isSelfKan {
majsoulTile := majsoulTiles[0]
majsoulTiles = []string{majsoulTile, majsoulTile, majsoulTile, majsoulTile}
}
meldTiles, numRedFive := d.mustParseMajsoulTiles(majsoulTiles)
containRedFive := numRedFive > 0
if len(majsoulTiles) == 4 && meldTiles[0] < 27 && meldTiles[0]%9 == 4 {
// 杠5意味着一定有赤5
containRedFive = true
}
if isSelfKan {
calledTile = meldTiles[0]
// 用 msg.Type 判断是加杠还是暗杠
// 也可以通过是否有相关碰副露来判断是加杠还是暗杠
if msg.Type == majsoulMeldTypeMinkanOrKakan {
meldType = meldTypeKakan // 加杠
} else if msg.Type == majsoulMeldTypeAnkan {
meldType = meldTypeAnkan // 暗杠
}
meld = &model.Meld{
MeldType: meldType,
Tiles: meldTiles,
CalledTile: calledTile,
ContainRedFive: containRedFive,
}
return
}
var rawCalledTile string
for i, seat := range msg.Froms {
fromWho := d.parseWho(seat)
if fromWho != who {
rawCalledTile = majsoulTiles[i]
}
}
if rawCalledTile == "" {
panic("数据解析异常: 未找到 rawCalledTile")
}
calledTile, redFiveFromOthers := d.mustParseMajsoulTile(rawCalledTile)
if len(meldTiles) == 3 {
if meldTiles[0] == meldTiles[1] {
meldType = meldTypePon // 碰
} else {
meldType = meldTypeChi // 吃
sort.Ints(meldTiles)
}
} else if len(meldTiles) == 4 {
meldType = meldTypeMinkan // 大明杠
} else {
panic("鸣牌数据解析失败!")
}
meld = &model.Meld{
MeldType: meldType,
Tiles: meldTiles,
CalledTile: calledTile,
ContainRedFive: containRedFive,
RedFiveFromOthers: redFiveFromOthers,
}
return
}
func (d *majsoulRoundData) IsReach() bool {
return false
}
func (d *majsoulRoundData) ParseReach() (who int) {
return 0
}
func (d *majsoulRoundData) IsFuriten() bool {
return false
}
func (d *majsoulRoundData) IsRoundWin() bool {
msg := d.msg
// ActionHule RecordHule
return msg.Hules != nil
}
func (d *majsoulRoundData) ParseRoundWin() (whos []int, points []int) {
msg := d.msg
for _, result := range msg.Hules {
who := d.parseWho(result.Seat)
whos = append(whos, d.parseWho(result.Seat))
point := result.PointRong
if result.Zimo {
if who == d.dealer {
point = 3 * result.PointZimoXian
} else {
point = result.PointZimoQin + 2*result.PointZimoXian
}
if d.playerNumber == 3 {
// 自摸损(一个子家)
point -= result.PointZimoXian
}
}
points = append(points, point)
}
return
}
func (d *majsoulRoundData) IsRyuukyoku() bool {
// TODO
// ActionLiuJu RecordLiuJu
return false
}
func (d *majsoulRoundData) ParseRyuukyoku() (type_ int, whos []int, points []int) {
// TODO
return
}
// 拔北宝牌
func (d *majsoulRoundData) IsNukiDora() bool {
msg := d.msg
// ActionBaBei RecordBaBei
return msg.Seat != nil && msg.Moqie != nil && msg.Tile == ""
}
func (d *majsoulRoundData) ParseNukiDora() (who int, isTsumogiri bool) {
msg := d.msg
return d.parseWho(*msg.Seat), *msg.Moqie
}
// 在最后处理该项
func (d *majsoulRoundData) IsNewDora() bool {
msg := d.msg
// ActionDealTile
return d.isNewDora(msg.Doras)
}
func (d *majsoulRoundData) ParseNewDora() (kanDoraIndicator int) {
msg := d.msg
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
return
}
| 从对战 ID 列表中获取账号 ID
if seatList := msg.SeatList; seatList != nil {
// 尝试从中找到缓存账号 ID
for _, accountID := range seatList {
if acco | conditional_block |
majsoul.go | package main
import (
"fmt"
"github.com/fatih/color"
"github.com/EndlessCheng/mahjong-helper/util"
"github.com/EndlessCheng/mahjong-helper/util/model"
"sort"
"time"
"github.com/EndlessCheng/mahjong-helper/platform/majsoul/proto/lq"
)
type majsoulMessage struct {
// 对应到服务器用户数据库中的ID,该值越小表示您的注册时间越早
AccountID int `json:"account_id"`
// 友人列表
Friends lq.FriendList `json:"friends"`
// 新获取到的牌谱基本信息列表
RecordBaseInfoList []*majsoulRecordBaseInfo `json:"record_list"`
// 分享的牌谱基本信息
SharedRecordBaseInfo *majsoulRecordBaseInfo `json:"shared_record_base_info"`
// 当前正在观看的牌谱的 UUID
CurrentRecordUUID string `json:"current_record_uuid"`
// 当前正在观看的牌谱的全部操作
RecordActions []*majsoulRecordAction `json:"record_actions"`
// 玩家在网页上的(点击)操作(网页响应了的)
RecordClickAction string `json:"record_click_action"`
RecordClickActionIndex int `json:"record_click_action_index"`
FastRecordTo int `json:"fast_record_to"` // 闭区间
// 观战
LiveBaseInfo *majsoulLiveRecordBaseInfo `json:"live_head"`
LiveFastAction *majsoulRecordAction `json:"live_fast_action"`
LiveAction *majsoulRecordAction `json:"live_action"`
// 座位变更
ChangeSeatTo *int `json:"change_seat_to"`
// 游戏重连时收到的数据
SyncGameActions []*majsoulRecordAction `json:"sync_game_actions"`
// ResAuthGame
// {"seat_list":[x,x,x,x],"is_game_start":false,"game_config":{"category":1,"mode":{"mode":1,"ai":true,"detail_rule":{"time_fixed":60,"time_add":0,"dora_count":3,"shiduan":1,"init_point":25000,"fandian":30000,"bianjietishi":true,"ai_level":1,"fanfu":1}},"meta":{"room_id":18269}},"ready_id_list":[0,0,0]}
IsGameStart *bool `json:"is_game_start"` // false=新游戏,true=重连
SeatList []int `json:"seat_list"`
ReadyIDList []int `json:"ready_id_list"`
GameConfig *majsoulGameConfig `json:"game_config"`
// NotifyPlayerLoadGameReady
//ReadyIDList []int `json:"ready_id_list"`
// ActionNewRound
// {"chang":0,"ju":0,"ben":0,"tiles":["1m","3m","7m","3p","6p","7p","6s","1z","1z","2z","3z","4z","7z"],"dora":"6m","scores":[25000,25000,25000,25000],"liqibang":0,"al":false,"md5":"","left_tile_count":69}
MD5 string `json:"md5"`
Chang *int `json:"chang"`
Ju *int `json:"ju"`
Ben *int `json:"ben"`
Tiles interface{} `json:"tiles"` // 一般情况下为 []interface{}, interface{} 即 string,但是暗杠的情况下,该值为一个 string
Dora string `json:"dora"`
// RecordNewRound
Tiles0 []string `json:"tiles0"`
Tiles1 []string `json:"tiles1"`
Tiles2 []string `json:"tiles2"`
Tiles3 []string `json:"tiles3"`
// ActionDealTile
// {"seat":1,"tile":"5m","left_tile_count":23,"operation":{"seat":1,"operation_list":[{"type":1}],"time_add":0,"time_fixed":60000},"zhenting":false}
// 他家暗杠后的摸牌
// {"seat":1,"left_tile_count":3,"doras":["7m","0p"],"zhenting":false}
Seat *int `json:"seat"`
Tile string `json:"tile"`
Doras []string `json:"doras"` // 暗杠摸牌了,同时翻出杠宝牌指示牌
LeftTileCount *int `json:"left_tile_count"`
// ActionDiscardTile
// {"seat":0,"tile":"5z","is_liqi":false,"moqie":true,"zhenting":false,"is_wliqi":false}
// {"seat":0,"tile":"1z","is_liqi":false,"operation":{"seat":1,"operation_list":[{"type":3,"combination":["1z|1z"]}],"time_add":0,"time_fixed":60000},"moqie":false,"zhenting":false,"is_wliqi":false}
// 吃 碰 和
// {"seat":0,"tile":"6p","is_liqi":false,"operation":{"seat":1,"operation_list":[{"type":2,"combination":["7p|8p"]},{"type":3,"combination":["6p|6p"]},{"type":9}],"time_add":0,"time_fixed":60000},"moqie":false,"zhenting":true,"is_wliqi":false}
IsLiqi *bool `json:"is_liqi"`
IsWliqi *bool `json:"is_wliqi"`
Moqie *bool `json:"moqie"`
Operation *struct{} `json:"operation"`
// ActionChiPengGang || ActionAnGangAddGang
// 他家吃 {"seat":0,"type":0,"tiles":["2s","3s","4s"],"froms":[0,0,3],"zhenting":false}
// 他家碰 {"seat":1,"type":1,"tiles":["1z","1z","1z"],"froms":[1,1,0],"operation":{"seat":1,"operation_list":[{"type":1,"combination":["1z"]}],"time_add":0,"time_fixed":60000},"zhenting":false,"tingpais":[{"tile":"4m","zhenting":false,"infos":[{"tile":"6s","haveyi":true},{"tile":"6p","haveyi":true}]},{"tile":"7m","zhenting":false,"infos":[{"tile":"6s","haveyi":true},{"tile":"6p","haveyi":true}]}]}
// 他家大明杠 {"seat":2,"type":2,"tiles":["3z","3z","3z","3z"],"froms":[2,2,2,0],"zhenting":false}
// 他家加杠 {"seat":2,"type":2,"tiles":"3z"}
// 他家暗杠 {"seat":2,"type":3,"tiles":"3s"}
Type int `json:"type"`
Froms []int `json:"froms"`
// ActionLiqi
// ActionHule
Hules []struct {
Seat int `json:"seat"`
Zimo bool `json:"zimo"`
PointRong int `json:"point_rong"`
PointZimoQin int `json:"point_zimo_qin"`
PointZimoXian int `json:"point_zimo_xian"`
} `json:"hules"`
// ActionLiuJu
// {"liujumanguan":false,"players":[{"tingpai":true,"hand":["3s","3s","4s","5s","6s","1z","1z","7z","7z","7z"],"tings":[{"tile":"1z","haveyi":true},{"tile":"3s","haveyi":true}]},{"tingpai":false},{"tingpai":false},{"tingpai":true,"hand":["4m","0m","6m","6m","6m","4s","4s","4s","5s","7s"],"tings":[{"tile":"6s","haveyi":true}]}],"scores":[{"old_scores":[23000,29000,24000,24000],"delta_scores":[1500,-1500,-1500,1500]}],"gameend":false}
//Liujumanguan *bool `json:"liujumanguan"`
//Players *struct{ } `json:"players"`
//Gameend *bool `json:"gameend"`
// ActionBabei
}
const (
majsoulMeldTypeChi = iota
majsoulMeldTypePon
majsoulMeldTypeMinkanOrKakan
majsoulMeldTypeAnkan
)
type majsoulRoundData struct {
*roundData
originJSON string
msg *majsoulMessage
selfSeat int // 自家初始座位:0-第一局的东家 1-第一局的南家 2-第一局的西家 3-第一局的北家
}
func (d *majsoulRoundData) fatalParse(info string, msg string) {
panic(fmt.Sprintln(info, len(msg), msg, []byte(msg)))
}
func (d *majsoulRoundData) normalTiles(tiles interface{}) (majsoulTiles []string) {
_tiles, ok := tiles.([]interface{})
if !ok {
_tile, ok := tiles.(string)
if !ok {
panic(fmt.Sprintln("[normalTiles] 解析错误", tiles))
}
return []string{_tile}
}
majsoulTiles = make([]string, len(_tiles))
for i, _tile := range _tiles {
_t, ok := _tile.(string)
if !ok {
panic(fmt.Sprintln("[normalTiles] 解析错误", tiles))
}
majsoulTiles[i] = _t
}
return majsoulTiles
}
func (d *majsoulRoundData) parseWho(seat int) int {
// 转换成 0=自家, 1=下家, 2=对家, 3=上家
// 对三麻四麻均适用
who := (seat + d.dealer - d.roundNumber%4 + 4) % 4
return who
}
func (d *majsoulRoundData) mustParseMajsoulTile(humanTile string) (tile34 int, isRedFive bool) {
tile34, isRedFive, err := util.StrToTile34(humanTile)
if err != nil {
panic(err)
}
return
}
func (d *majsoulRoundData) mustParseMajsoulTiles(majsoulTiles []string) (tiles []int, numRedFive int) {
tiles = make([]int, len(majsoulTiles))
for i, majsoulTile := range majsoulTiles {
var isRedFive bool
tiles[i], isRedFive = d.mustParseMajsoulTile(majsoulTile)
if isRedFive {
numRedFive++
}
}
return
}
func (d *majsoulRoundData) isNewDora(doras []string) bool {
return len(doras) > len(d.doraIndicators)
}
func (d *majsoulRoundData) GetDataSourceType() int {
return dataSourceTypeMajsoul
}
func (d *majsoulRoundData) GetSelfSeat() int {
return d.selfSeat
}
func (d *majsoulRoundData) GetMessage() string {
return d.originJSON
}
func (d *majsoulRoundData) SkipMessage() bool {
msg := d.msg
// 没有账号 skip
if gameConf.currentActiveMajsoulAccountID == -1 {
return true
}
// TODO: 重构
if msg.SeatList != nil {
// 特判古役模式
isGuyiMode := msg.GameConfig.isGuyiMode()
util.SetConsiderOldYaku(isGuyiMode)
if isGuyiMode {
color.HiGreen("古役模式已开启")
time.Sleep(2 * time.Second)
}
} else {
// msg.SeatList 必须为 nil
if msg.ReadyIDList != nil {
// 打印准备信息
fmt.Printf("等待玩家准备 (%d/%d) %v\n", len(msg.ReadyIDList), d.playerNumber, msg.ReadyIDList)
}
}
return false
}
func (d *majsoulRoundData) IsLogin() bool {
msg := d.msg
return msg.AccountID > 0 || msg.SeatList != nil
}
func (d *majsoulRoundData) HandleLogin() {
msg := d.msg
if accountID := msg.AccountID; accountID > 0 {
gameConf.addMajsoulAccountID(accountID)
if accountID != gameConf.currentActiveMajsoulAccountID {
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
}
return
}
// 从对战 ID 列表中获取账号 ID
if seatList := msg.SeatList; seatList != nil {
// 尝试从中找到缓存账号 ID
for _, accountID := range seatList {
if accountID > 0 && gameConf.isIDExist(accountID) {
// 找到了,更新当前使用的账号 ID
if gameConf.currentActiveMajsoulAccountID != accountID {
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
}
return
}
}
// 未找到缓存 ID
if gameConf.currentActiveMajsoulAccountID > 0 {
color.HiRed("尚未获取到您的账号 ID,请您刷新网页,或开启一局人机对战(错误信息:您的账号 ID %d 不在对战列表 %v 中)", gameConf.currentActiveMajsoulAccountID, msg.SeatList)
return
}
// 判断是否为人机对战,若为人机对战,则获取账号 ID
if !util.InInts(0, msg.SeatList) {
return
}
for _, accountID := range msg.SeatList {
if accountID > 0 {
gameConf.addMajsoulAccountID(accountID)
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
return
}
}
}
}
func (d *majsoulRoundData) IsInit() bool {
msg := d.msg
// ResAuthGame || ActionNewRound RecordNewRound
return msg.IsGameStart != nil || msg.MD5 != ""
}
func (d *majsoulRoundData) ParseInit() (roundNumber int, benNumber int, dealer int, doraIndicators []int, handTiles []int, numRedFives []int) {
msg := d.msg
if playerNumber := len(msg.SeatList); playerNumber >= 3 {
d.playerNumber = playerNumber
// 获取自家初始座位:0-第一局的东家 1-第一局的南家 2-第一局的西家 3-第一局的北家
for i, accountID := range msg.SeatList {
if accountID == gameConf.currentActiveMajsoulAccountID {
d.selfSeat = i
break
}
}
// dealer: 0=自家, 1=下家, 2=对家, 3=上家
dealer = (4 - d.selfSeat) % 4
return
} else if len(msg.Tiles2) > 0 {
if len(msg.Tiles3) > 0 {
d.playerNumber = 4
} else {
d.playerNumber = 3
}
}
dealer = -1
roundNumber = 4*(*msg.Chang) + *msg.Ju
benNumber = *msg.Ben
if msg.Dora != "" {
doraIndicator, _ := d.mustParseMajsoulTile(msg.Dora)
doraIndicators = append(doraIndicators, doraIndicator)
} else {
for _, dora := range msg.Doras {
doraIndicator, _ := d.mustParseMajsoulTile(dora)
doraIndicators = append(doraIndicators, doraIndicator)
}
}
numRedFives = make([]int, 3)
var majsoulTiles []string
if msg.Tiles != nil { // 实战
majsoulTiles = d.normalTiles(msg.Tiles)
} else { // 牌谱、观战
majsoulTiles = [][]string{msg.Tiles0, msg.Tiles1, msg.Tiles2, msg.Tiles3}[d.selfSeat]
}
for _, majsoulTile := range majsoulTiles {
tile, isRedFive := d.mustParseMajsoulTile(majsoulTile)
handTiles = append(handTiles, tile)
if isRedFive {
numRedFives[tile/9]++
}
}
return
}
func (d *majsoulRoundData) IsSelfDraw() bool {
msg := d.msg
// ActionDealTile RecordDealTile
return msg.Seat != nil && msg.Tile != "" && msg.Moqie == nil && d.parseWho(*msg.Seat) == 0
}
func (d *majsoulRoundData) ParseSelfDraw() (tile int, isRedFive bool, kanDoraIndicator int) {
msg := d.msg
tile, isRedFive = d.mustParseMajsoulTile(msg.Tile)
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) {
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
return
}
func (d *majsoulRoundData) IsDiscard() bool {
msg := d.msg
// ActionDiscardTile RecordDiscardTile
return msg.IsLiqi != nil
}
func (d *majsoulRoundData) ParseDiscard() (who int, discardTile int, isRedFive bool, isTsumogiri bool, isReach bool, canBeMeld bool, kanDoraIndicator int) {
msg := d.msg
who = d.parseWho(*msg.Seat)
discardTile, isRedFive = d.mustParseMajsoulTile(msg.Tile)
isTsumogiri = *msg.Moqie
isReach = *msg.IsLiqi
if msg.IsWliqi != nil && !isReach { // 兼容雀魂早期牌谱(无 IsWliqi 字段)
isReach = *msg.IsWliqi
}
canBeMeld = msg.Operation != nil // 注意:观战模式下无此选项
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) {
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
return
}
func (d *majsoulRoundData) IsOpen() bool {
msg := d.msg
// ActionChiPengGang RecordChiPengGang || ActionAnGangAddGang RecordAnGangAddGang
return msg.Tiles != nil && len(d.normalTiles(msg.Tiles)) <= 4
}
func (d *majsoulRoundData) ParseOpen() (who int, meld *model.Meld, kanDoraIndicator int) {
msg := d.msg
who = d.parseWho(*msg.Seat)
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) { // 暗杠(有时会在玩家摸牌后才发送 doras,可能是因为需要考虑抢暗杠的情况)
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
var meldType, calledTile int
majsoulTiles := d.normalTiles(msg.Tiles)
isSelfKan := len(majsoulTiles) == 1 // 自家加杠或暗杠
if isSelfKan {
majsoulTile := majsoulTiles[0]
majsoulTiles = []string{majsoulTile, majsoulTile, majsoulTile, majsoulTile}
}
meldTiles, numRedFive := d.mustParseMajsoulTiles(majsoulTiles)
containRedFive := numRedFive > 0
if len(majsoulTiles) == 4 && meldTiles[0] < 27 && meldTiles[0]%9 == 4 {
// 杠5意味着一定有赤5
containRedFive = true
}
if isSelfKan {
calledTile = meldTiles[0]
// 用 msg.Type 判断是加杠还是暗杠
// 也可以通过是否有相关碰副露来判断是加杠还是暗杠
if msg.Type == majsoulMeldTypeMinkanOrKakan {
meldType = meldTypeKakan // 加杠
} else if msg.Type == majsoulMeldTypeAnkan {
meldType = meldTypeAnkan // 暗杠
}
meld = &model.Meld{
MeldType: meldType,
Tiles: meldTiles,
CalledTile: calledTile,
ContainRedFive: containRedFive,
}
return
}
var rawCalledTile string
for i, seat := range msg.Froms {
fromWho := d.parseWho(seat)
if fromWho != who {
rawCalledTile = majsoulTiles[i]
}
}
if rawCalledTile == "" {
panic("数据解析异常: 未找到 rawCalledTile")
}
calledTile, redFiveFromOthers := d.mustParseMajsoulTile(rawCalledTile)
if len(meldTiles) == 3 {
if meldTiles[0] == meldTiles[1] {
meldType = meldTypePon // 碰
} else {
meldType = meldTypeChi // 吃
sort.Ints(meldTiles)
}
} else if len(meldTiles) == 4 {
meldType = meldTypeMinkan // 大明杠
} else {
panic("鸣牌数据解析失败!")
}
meld = &model.Meld{
MeldType: meldType,
Tiles: meldTiles,
CalledTile: calledTile,
ContainRedFive: containRedFive,
RedFiveFromOthers: redFiveFromOthers,
}
return
}
func (d *majsoulRoundData) IsReach() bool {
return false
}
func (d *majsoulRoundData) ParseReach() (who int) {
return 0
}
func (d *majsoulRoundData) IsFuriten() bool {
return false |
func (d *majsoulRoundData) IsRoundWin() bool {
msg := d.msg
// ActionHule RecordHule
return msg.Hules != nil
}
func (d *majsoulRoundData) ParseRoundWin() (whos []int, points []int) {
msg := d.msg
for _, result := range msg.Hules {
who := d.parseWho(result.Seat)
whos = append(whos, d.parseWho(result.Seat))
point := result.PointRong
if result.Zimo {
if who == d.dealer {
point = 3 * result.PointZimoXian
} else {
point = result.PointZimoQin + 2*result.PointZimoXian
}
if d.playerNumber == 3 {
// 自摸损(一个子家)
point -= result.PointZimoXian
}
}
points = append(points, point)
}
return
}
func (d *majsoulRoundData) IsRyuukyoku() bool {
// TODO
// ActionLiuJu RecordLiuJu
return false
}
func (d *majsoulRoundData) ParseRyuukyoku() (type_ int, whos []int, points []int) {
// TODO
return
}
// 拔北宝牌
func (d *majsoulRoundData) IsNukiDora() bool {
msg := d.msg
// ActionBaBei RecordBaBei
return msg.Seat != nil && msg.Moqie != nil && msg.Tile == ""
}
func (d *majsoulRoundData) ParseNukiDora() (who int, isTsumogiri bool) {
msg := d.msg
return d.parseWho(*msg.Seat), *msg.Moqie
}
// 在最后处理该项
func (d *majsoulRoundData) IsNewDora() bool {
msg := d.msg
// ActionDealTile
return d.isNewDora(msg.Doras)
}
func (d *majsoulRoundData) ParseNewDora() (kanDoraIndicator int) {
msg := d.msg
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
return
} | } | random_line_split |
majsoul.go | package main
import (
"fmt"
"github.com/fatih/color"
"github.com/EndlessCheng/mahjong-helper/util"
"github.com/EndlessCheng/mahjong-helper/util/model"
"sort"
"time"
"github.com/EndlessCheng/mahjong-helper/platform/majsoul/proto/lq"
)
type majsoulMessage struct {
// 对应到服务器用户数据库中的ID,该值越小表示您的注册时间越早
AccountID int `json:"account_id"`
// 友人列表
Friends lq.FriendList `json:"friends"`
// 新获取到的牌谱基本信息列表
RecordBaseInfoList []*majsoulRecordBaseInfo `json:"record_list"`
// 分享的牌谱基本信息
SharedRecordBaseInfo *majsoulRecordBaseInfo `json:"shared_record_base_info"`
// 当前正在观看的牌谱的 UUID
CurrentRecordUUID string `json:"current_record_uuid"`
// 当前正在观看的牌谱的全部操作
RecordActions []*majsoulRecordAction `json:"record_actions"`
// 玩家在网页上的(点击)操作(网页响应了的)
RecordClickAction string `json:"record_click_action"`
RecordClickActionIndex int `json:"record_click_action_index"`
FastRecordTo int `json:"fast_record_to"` // 闭区间
// 观战
LiveBaseInfo *majsoulLiveRecordBaseInfo `json:"live_head"`
LiveFastAction *majsoulRecordAction `json:"live_fast_action"`
LiveAction *majsoulRecordAction `json:"live_action"`
// 座位变更
ChangeSeatTo *int `json:"change_seat_to"`
// 游戏重连时收到的数据
SyncGameActions []*majsoulRecordAction `json:"sync_game_actions"`
// ResAuthGame
// {"seat_list":[x,x,x,x],"is_game_start":false,"game_config":{"category":1,"mode":{"mode":1,"ai":true,"detail_rule":{"time_fixed":60,"time_add":0,"dora_count":3,"shiduan":1,"init_point":25000,"fandian":30000,"bianjietishi":true,"ai_level":1,"fanfu":1}},"meta":{"room_id":18269}},"ready_id_list":[0,0,0]}
IsGameStart *bool `json:"is_game_start"` // false=新游戏,true=重连
SeatList []int `json:"seat_list"`
ReadyIDList []int `json:"ready_id_list"`
GameConfig *majsoulGameConfig `json:"game_config"`
// NotifyPlayerLoadGameReady
//ReadyIDList []int `json:"ready_id_list"`
// ActionNewRound
// {"chang":0,"ju":0,"ben":0,"tiles":["1m","3m","7m","3p","6p","7p","6s","1z","1z","2z","3z","4z","7z"],"dora":"6m","scores":[25000,25000,25000,25000],"liqibang":0,"al":false,"md5":"","left_tile_count":69}
MD5 string `json:"md5"`
Chang *int `json:"chang"`
Ju *int `json:"ju"`
Ben *int `json:"ben"`
Tiles interface{} `json:"tiles"` // 一般情况下为 []interface{}, interface{} 即 string,但是暗杠的情况下,该值为一个 string
Dora string `json:"dora"`
// RecordNewRound
Tiles0 []string `json:"tiles0"`
Tiles1 []string `json:"tiles1"`
Tiles2 []string `json:"tiles2"`
Tiles3 []string `json:"tiles3"`
// ActionDealTile
// {"seat":1,"tile":"5m","left_tile_count":23,"operation":{"seat":1,"operation_list":[{"type":1}],"time_add":0,"time_fixed":60000},"zhenting":false}
// 他家暗杠后的摸牌
// {"seat":1,"left_tile_count":3,"doras":["7m","0p"],"zhenting":false}
Seat *int `json:"seat"`
Tile string `json:"tile"`
Doras []string `json:"doras"` // 暗杠摸牌了,同时翻出杠宝牌指示牌
LeftTileCount *int `json:"left_tile_count"`
// ActionDiscardTile
// {"seat":0,"tile":"5z","is_liqi":false,"moqie":true,"zhenting":false,"is_wliqi":false}
// {"seat":0,"tile":"1z","is_liqi":false,"operation":{"seat":1,"operation_list":[{"type":3,"combination":["1z|1z"]}],"time_add":0,"time_fixed":60000},"moqie":false,"zhenting":false,"is_wliqi":false}
// 吃 碰 和
// {"seat":0,"tile":"6p","is_liqi":false,"operation":{"seat":1,"operation_list":[{"type":2,"combination":["7p|8p"]},{"type":3,"combination":["6p|6p"]},{"type":9}],"time_add":0,"time_fixed":60000},"moqie":false,"zhenting":true,"is_wliqi":false}
IsLiqi *bool `json:"is_liqi"`
IsWliqi *bool `json:"is_wliqi"`
Moqie *bool `json:"moqie"`
Operation *struct{} `json:"operation"`
// ActionChiPengGang || ActionAnGangAddGang
// 他家吃 {"seat":0,"type":0,"tiles":["2s","3s","4s"],"froms":[0,0,3],"zhenting":false}
// 他家碰 {"seat":1,"type":1,"tiles":["1z","1z","1z"],"froms":[1,1,0],"operation":{"seat":1,"operation_list":[{"type":1,"combination":["1z"]}],"time_add":0,"time_fixed":60000},"zhenting":false,"tingpais":[{"tile":"4m","zhenting":false,"infos":[{"tile":"6s","haveyi":true},{"tile":"6p","haveyi":true}]},{"tile":"7m","zhenting":false,"infos":[{"tile":"6s","haveyi":true},{"tile":"6p","haveyi":true}]}]}
// 他家大明杠 {"seat":2,"type":2,"tiles":["3z","3z","3z","3z"],"froms":[2,2,2,0],"zhenting":false}
// 他家加杠 {"seat":2,"type":2,"tiles":"3z"}
// 他家暗杠 {"seat":2,"type":3,"tiles":"3s"}
Type int `json:"type"`
Froms []int `json:"froms"`
// ActionLiqi
// ActionHule
Hules []struct {
Seat int `json:"seat"`
Zimo bool `json:"zimo"`
PointRong int `json:"point_rong"`
PointZimoQin int `json:"point_zimo_qin"`
PointZimoXian int `json:"point_zimo_xian"`
} `json:"hules"`
// ActionLiuJu
// {"liujumanguan":false,"players":[{"tingpai":true,"hand":["3s","3s","4s","5s","6s","1z","1z","7z","7z","7z"],"tings":[{"tile":"1z","haveyi":true},{"tile":"3s","haveyi":true}]},{"tingpai":false},{"tingpai":false},{"tingpai":true,"hand":["4m","0m","6m","6m","6m","4s","4s","4s","5s","7s"],"tings":[{"tile":"6s","haveyi":true}]}],"scores":[{"old_scores":[23000,29000,24000,24000],"delta_scores":[1500,-1500,-1500,1500]}],"gameend":false}
//Liujumanguan *bool `json:"liujumanguan"`
//Players *struct{ } `json:"players"`
//Gameend *bool `json:"gameend"`
// ActionBabei
}
const (
majsoulMeldTypeChi = iota
majsoulMeldTypePon
majsoulMeldTypeMinkanOrKakan
majsoulMeldTypeAnkan
)
type majsoulRoundData struct {
*roundData
originJSON string
msg *majsoulMessage
selfSeat int // 自家初始座位:0-第一局的东家 1-第一局的南家 2-第一局的西家 3-第一局的北家
}
func (d *majsoulRoundData) fatalParse(info string, msg string) {
panic(fmt.Sprintln(info, len(msg), msg, []byte(msg)))
}
func (d *majsoulRoundData) normalTiles(tiles interface{}) (majsoulTiles []string) {
_tiles, ok := tiles.([]interface{})
if !ok {
_tile, ok := tiles.(string)
if !ok {
panic(fmt.Sprintln("[normalTiles] 解析错误", tiles))
}
return []string{_tile}
}
majsoulTiles = make([]string, len(_tiles))
for i, _tile := range _tiles {
_t, ok := _tile.(string)
if !ok {
panic(fmt.Sprintln("[normalTiles] 解析错误", tiles))
}
majsoulTiles[i] = _t
}
return majsoulTiles
}
func (d *majsoulRoundData) parseWho(seat int) int {
// 转换成 0=自家, 1=下家, 2=对家, 3=上家
// 对三麻四麻均适用
who := (seat + d.dealer - d.roundNumber%4 + 4) % 4
return who
}
func (d *majsoulRoundData) mustParseMajsoulTile(humanTile string) (tile34 int, isRedFive bool) {
tile34, isRedFive, err := util.StrToTile34(humanTile)
if err != nil {
panic(err)
}
return
}
func (d *majsoulRoundData) mustParseMajsoulTiles(majsoulTiles []string) (tiles []int, numRedFive int) {
tiles = make([]int, len(majsoulTiles))
for i, majsoulTile := range majsoulTiles {
var isRedFive bool
tiles[i], isRedFive = d.mustParseMajsoulTile(majsoulTile)
if isRedFive {
numRedFive++
}
}
return
}
func (d *majsoulRoundData) isNewDora(doras []string) bool {
return len(doras) > len(d.doraIndicators)
}
func (d *majsoulRoundData) GetDataSourceType() int {
return dataSourceTypeMajsoul
}
func (d *majsoulRoundData) GetSelfSeat() int {
return d.selfSeat
}
func (d *majsoulRoundData) GetMessage() string {
return d.originJSON
}
func (d *majsoulRoundData) SkipMessage() bool {
msg := d.msg
// 没有账号 skip
if gameConf.currentActiveMajsoulAccountID == -1 {
return true
}
// TODO: 重构
if msg.SeatList != nil {
// 特判古役模式
isGuyiMode := msg.GameConfig.isGuyiMode()
util.SetConsiderOldYaku(isGuyiMode)
if isGuyiMode {
color.HiGreen("古役模式已开启")
time.Sleep(2 * time.Second)
}
} else {
// msg.SeatList 必须为 nil
if msg.ReadyIDList != nil {
// 打印准备信息
fmt.Printf("等待玩家准备 (%d/%d) %v\n", len(msg.ReadyIDList), d.playerNumber, msg.ReadyIDList)
}
}
return false
}
func (d *majsoulRoundData) IsLogin() bool {
msg := d.msg
return msg.AccountID > 0 || msg.SeatList != nil
}
func (d *majsoulRoundData) HandleLogin() {
msg := d.msg
if accountID := msg.AccountID; accountID > 0 {
gameConf.addMajsoulAccountID(accountID)
if accountID != gameConf.currentActiveMajsoulAccountID {
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
}
return
}
// 从对战 ID 列表中获取账号 ID
if seatList := msg.SeatList; seatList != nil {
// 尝试从中找到缓存账号 ID
for _, accountID := range seatList {
if accountID > 0 && gameConf.isIDExist(accountID) {
// 找到了,更新当前使用的账号 ID
if gameConf.currentActiveMajsoulAccountID != accountID {
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
}
return
}
}
// 未找到缓存 ID
if gameConf.currentActiveMajsoulAccountID > 0 {
color.HiRed("尚未获取到您的账号 ID,请您刷新网页,或开启一局人机对战(错误信息:您的账号 ID %d 不在对战列表 %v 中)", gameConf.currentActiveMajsoulAccountID, msg.SeatList)
return
}
// 判断是否为人机对战,若为人机对战,则获取账号 ID
if !util.InInts(0, msg.SeatList) {
return
}
for _, accountID := range msg.SeatList {
if accountID > 0 {
gameConf.addMajsoulAccountID(accountID)
printAccountInfo(accountID)
gameConf.setMajsoulAccountID(accountID)
return
}
}
}
}
func (d *majsoulRoundData) IsInit() bool {
msg := d.msg
// ResAuthGame || ActionNewRound RecordNewRound
return msg.IsGameStart != nil || msg.MD5 != ""
}
func (d *majsoulRoundData) ParseInit() (roundNumber int, benNumber int, dealer int, doraIndicators []int, handTiles []int, numRedFives []int) {
msg := d.msg
if playerNumber := len(msg.SeatList); playerNumber >= 3 {
d.playerNumber = playerNumber
// 获取自家初始座位:0-第一局的东家 1-第一局的南家 2-第一局的西家 3-第一局的北家
for i, accountID := range msg.SeatList {
if accountID == gameConf.currentActiveMajsoulAccountID {
d.selfSeat = i
break
}
}
// dealer: 0=自家, 1=下家, 2=对家, 3=上家
dealer = (4 - d.selfSeat) % 4
return
} else if len(msg.Tiles2) > 0 {
if len(msg.Tiles3) > 0 {
d.playerNumber = 4
} else {
d.playerNumber = 3
}
}
dealer = -1
roundNumber = 4*(*msg.Chang) + *msg.Ju
benNumber = *msg.Ben
if msg.Dora != "" {
doraIndicator, _ := d.mustParseMajsoulTile(msg.Dora)
doraIndicators = append(doraIndicators, doraIndicator)
} else {
for _, dora := range msg.Doras {
doraIndicator, _ := d.mustParseMajsoulTile(dora)
doraIndicators = append(doraIndicators, doraIndicator)
}
}
numRedFives = make([]int, 3)
var majsoulTiles []string
if msg.Tiles != nil { // 实战
majsoulTiles = d.normalTiles(msg.Tiles)
} else { // 牌谱、观战
majsoulTiles = [][]string{msg.Tiles0, msg.Tiles1, msg.Tiles2, msg.Tiles3}[d.selfSeat]
}
for _, majsoulTile := range majsoulTiles {
tile, isRedFive := d.mustParseMajsoulTile(majsoulTile)
handTiles = append(handTiles, tile)
if isRedFive {
numRedFives[tile/9]++
}
}
return
}
func (d *majsoulRoundData) IsSelfDraw() bool {
msg := d.msg
// ActionDealTile RecordDealTile
return msg.Seat != nil && msg.Tile != "" && msg.Moqie == nil && d.parseWho(*msg.Seat) == 0
}
func (d *majsoulRoundData) ParseSelfDraw() (tile int, isRedFive bool, kanDoraIndicator int) {
msg := d.msg
tile, isRedFive = d.mustParseMajsoulTile(msg.Tile)
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) {
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
return
}
func (d *majsoulRoundData) IsDiscard() bool {
msg := d.msg
// ActionDiscardTile RecordDiscardTile
return msg.IsLiqi != nil
}
func (d *majsoulRoundData) ParseDiscard() (who int, discardTile int, isRedFive bool, isTsumogiri bool, isReach bool, canBeMeld bool, kanDoraIndicator int) {
msg := d.msg
who = d.parseWho(*msg.Seat)
discardTile, isRedFive = d.mustParseMajsoulTile(msg.Tile)
isTsumogiri = *msg.Moqie
isReach = *msg.IsLiqi
if msg.IsWliqi != nil && !isReach { // 兼容雀魂早期牌谱(无 IsWliqi 字段)
isReach = *msg.IsWliqi
}
canBeMeld = msg.Operation != nil // 注意:观战模式下无此选项
kanDoraIndicator = | ewDora(msg.Doras) {
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
return
}
func (d *majsoulRoundData) IsOpen() bool {
msg := d.msg
// ActionChiPengGang RecordChiPengGang || ActionAnGangAddGang RecordAnGangAddGang
return msg.Tiles != nil && len(d.normalTiles(msg.Tiles)) <= 4
}
func (d *majsoulRoundData) ParseOpen() (who int, meld *model.Meld, kanDoraIndicator int) {
msg := d.msg
who = d.parseWho(*msg.Seat)
kanDoraIndicator = -1
if d.isNewDora(msg.Doras) { // 暗杠(有时会在玩家摸牌后才发送 doras,可能是因为需要考虑抢暗杠的情况)
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
}
var meldType, calledTile int
majsoulTiles := d.normalTiles(msg.Tiles)
isSelfKan := len(majsoulTiles) == 1 // 自家加杠或暗杠
if isSelfKan {
majsoulTile := majsoulTiles[0]
majsoulTiles = []string{majsoulTile, majsoulTile, majsoulTile, majsoulTile}
}
meldTiles, numRedFive := d.mustParseMajsoulTiles(majsoulTiles)
containRedFive := numRedFive > 0
if len(majsoulTiles) == 4 && meldTiles[0] < 27 && meldTiles[0]%9 == 4 {
// 杠5意味着一定有赤5
containRedFive = true
}
if isSelfKan {
calledTile = meldTiles[0]
// 用 msg.Type 判断是加杠还是暗杠
// 也可以通过是否有相关碰副露来判断是加杠还是暗杠
if msg.Type == majsoulMeldTypeMinkanOrKakan {
meldType = meldTypeKakan // 加杠
} else if msg.Type == majsoulMeldTypeAnkan {
meldType = meldTypeAnkan // 暗杠
}
meld = &model.Meld{
MeldType: meldType,
Tiles: meldTiles,
CalledTile: calledTile,
ContainRedFive: containRedFive,
}
return
}
var rawCalledTile string
for i, seat := range msg.Froms {
fromWho := d.parseWho(seat)
if fromWho != who {
rawCalledTile = majsoulTiles[i]
}
}
if rawCalledTile == "" {
panic("数据解析异常: 未找到 rawCalledTile")
}
calledTile, redFiveFromOthers := d.mustParseMajsoulTile(rawCalledTile)
if len(meldTiles) == 3 {
if meldTiles[0] == meldTiles[1] {
meldType = meldTypePon // 碰
} else {
meldType = meldTypeChi // 吃
sort.Ints(meldTiles)
}
} else if len(meldTiles) == 4 {
meldType = meldTypeMinkan // 大明杠
} else {
panic("鸣牌数据解析失败!")
}
meld = &model.Meld{
MeldType: meldType,
Tiles: meldTiles,
CalledTile: calledTile,
ContainRedFive: containRedFive,
RedFiveFromOthers: redFiveFromOthers,
}
return
}
func (d *majsoulRoundData) IsReach() bool {
return false
}
func (d *majsoulRoundData) ParseReach() (who int) {
return 0
}
func (d *majsoulRoundData) IsFuriten() bool {
return false
}
func (d *majsoulRoundData) IsRoundWin() bool {
msg := d.msg
// ActionHule RecordHule
return msg.Hules != nil
}
func (d *majsoulRoundData) ParseRoundWin() (whos []int, points []int) {
msg := d.msg
for _, result := range msg.Hules {
who := d.parseWho(result.Seat)
whos = append(whos, d.parseWho(result.Seat))
point := result.PointRong
if result.Zimo {
if who == d.dealer {
point = 3 * result.PointZimoXian
} else {
point = result.PointZimoQin + 2*result.PointZimoXian
}
if d.playerNumber == 3 {
// 自摸损(一个子家)
point -= result.PointZimoXian
}
}
points = append(points, point)
}
return
}
func (d *majsoulRoundData) IsRyuukyoku() bool {
// TODO
// ActionLiuJu RecordLiuJu
return false
}
func (d *majsoulRoundData) ParseRyuukyoku() (type_ int, whos []int, points []int) {
// TODO
return
}
// 拔北宝牌
func (d *majsoulRoundData) IsNukiDora() bool {
msg := d.msg
// ActionBaBei RecordBaBei
return msg.Seat != nil && msg.Moqie != nil && msg.Tile == ""
}
func (d *majsoulRoundData) ParseNukiDora() (who int, isTsumogiri bool) {
msg := d.msg
return d.parseWho(*msg.Seat), *msg.Moqie
}
// 在最后处理该项
func (d *majsoulRoundData) IsNewDora() bool {
msg := d.msg
// ActionDealTile
return d.isNewDora(msg.Doras)
}
func (d *majsoulRoundData) ParseNewDora() (kanDoraIndicator int) {
msg := d.msg
kanDoraIndicator, _ = d.mustParseMajsoulTile(msg.Doras[len(msg.Doras)-1])
return
}
| -1
if d.isN | identifier_name |
v0.rs | use rustc::hir;
use rustc::hir::def_id::{CrateNum, DefId};
use rustc::hir::map::{DefPathData, DisambiguatedDefPathData};
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Instance};
use rustc::ty::print::{Printer, Print};
use rustc::ty::subst::{GenericArg, Subst, GenericArgKind};
use rustc_data_structures::base_n;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_target::spec::abi::Abi;
use syntax::ast::{IntTy, UintTy, FloatTy};
use std::fmt::Write;
use std::ops::Range;
pub(super) fn mangle(
tcx: TyCtxt<'tcx>,
instance: Instance<'tcx>,
instantiating_crate: Option<CrateNum>,
) -> String {
let def_id = instance.def_id();
// FIXME(eddyb) this should ideally not be needed.
let substs =
tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), instance.substs);
let prefix = "_R";
let mut cx = SymbolMangler {
tcx,
compress: Some(Box::new(CompressionCaches {
start_offset: prefix.len(),
paths: FxHashMap::default(),
types: FxHashMap::default(),
consts: FxHashMap::default(),
})),
binders: vec![],
out: String::from(prefix),
};
cx = if instance.is_vtable_shim() {
cx.path_append_ns(
|cx| cx.print_def_path(def_id, substs),
'S',
0,
"",
).unwrap()
} else {
cx.print_def_path(def_id, substs).unwrap()
};
if let Some(instantiating_crate) = instantiating_crate {
cx = cx.print_def_path(instantiating_crate.as_def_id(), &[]).unwrap();
}
cx.out
}
struct CompressionCaches<'tcx> {
// The length of the prefix in `out` (e.g. 2 for `_R`).
start_offset: usize,
// The values are start positions in `out`, in bytes.
paths: FxHashMap<(DefId, &'tcx [GenericArg<'tcx>]), usize>,
types: FxHashMap<Ty<'tcx>, usize>,
consts: FxHashMap<&'tcx ty::Const<'tcx>, usize>,
}
struct BinderLevel {
/// The range of distances from the root of what's
/// being printed, to the lifetimes in a binder.
/// Specifically, a `BrAnon(i)` lifetime has depth
/// `lifetime_depths.start + i`, going away from the
/// the root and towards its use site, as `i` increases.
/// This is used to flatten rustc's pairing of `BrAnon`
/// (intra-binder disambiguation) with a `DebruijnIndex`
/// (binder addressing), to "true" de Bruijn indices,
/// by subtracting the depth of a certain lifetime, from
/// the innermost depth at its use site.
lifetime_depths: Range<u32>,
}
struct SymbolMangler<'tcx> {
tcx: TyCtxt<'tcx>,
compress: Option<Box<CompressionCaches<'tcx>>>,
binders: Vec<BinderLevel>,
out: String,
}
impl SymbolMangler<'tcx> {
fn push(&mut self, s: &str) {
self.out.push_str(s);
}
/// Push a `_`-terminated base 62 integer, using the format
/// specified in the RFC as `<base-62-number>`, that is:
/// * `x = 0` is encoded as just the `"_"` terminator
/// * `x > 0` is encoded as `x - 1` in base 62, followed by `"_"`,
/// e.g. `1` becomes `"0_"`, `62` becomes `"Z_"`, etc.
fn push_integer_62(&mut self, x: u64) {
if let Some(x) = x.checked_sub(1) {
base_n::push_str(x as u128, 62, &mut self.out);
}
self.push("_");
}
/// Push a `tag`-prefixed base 62 integer, when larger than `0`, that is:
/// * `x = 0` is encoded as `""` (nothing)
/// * `x > 0` is encoded as the `tag` followed by `push_integer_62(x - 1)`
/// e.g. `1` becomes `tag + "_"`, `2` becomes `tag + "0_"`, etc.
fn push_opt_integer_62(&mut self, tag: &str, x: u64) {
if let Some(x) = x.checked_sub(1) {
self.push(tag);
self.push_integer_62(x);
}
}
fn push_disambiguator(&mut self, dis: u64) {
self.push_opt_integer_62("s", dis);
}
fn push_ident(&mut self, ident: &str) |
fn path_append_ns(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self, !>,
ns: char,
disambiguator: u64,
name: &str,
) -> Result<Self, !> {
self.push("N");
self.out.push(ns);
self = print_prefix(self)?;
self.push_disambiguator(disambiguator as u64);
self.push_ident(name);
Ok(self)
}
fn print_backref(mut self, i: usize) -> Result<Self, !> {
self.push("B");
self.push_integer_62((i - self.compress.as_ref().unwrap().start_offset) as u64);
Ok(self)
}
fn in_binder<T>(
mut self,
value: &ty::Binder<T>,
print_value: impl FnOnce(Self, &T) -> Result<Self, !>
) -> Result<Self, !>
where T: TypeFoldable<'tcx>
{
let regions = if value.has_late_bound_regions() {
self.tcx.collect_referenced_late_bound_regions(value)
} else {
FxHashSet::default()
};
let mut lifetime_depths =
self.binders.last().map(|b| b.lifetime_depths.end).map_or(0..0, |i| i..i);
let lifetimes = regions.into_iter().map(|br| {
match br {
ty::BrAnon(i) => {
// FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
assert_ne!(i, 0);
i - 1
},
_ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value),
}
}).max().map_or(0, |max| max + 1);
self.push_opt_integer_62("G", lifetimes as u64);
lifetime_depths.end += lifetimes;
self.binders.push(BinderLevel { lifetime_depths });
self = print_value(self, value.skip_binder())?;
self.binders.pop();
Ok(self)
}
}
impl Printer<'tcx> for SymbolMangler<'tcx> {
type Error = !;
type Path = Self;
type Region = Self;
type Type = Self;
type DynExistential = Self;
type Const = Self;
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn print_def_path(
mut self,
def_id: DefId,
substs: &'tcx [GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
if let Some(&i) = self.compress.as_ref().and_then(|c| c.paths.get(&(def_id, substs))) {
return self.print_backref(i);
}
let start = self.out.len();
self = self.default_print_def_path(def_id, substs)?;
// Only cache paths that do not refer to an enclosing
// binder (which would change depending on context).
if !substs.iter().any(|k| k.has_escaping_bound_vars()) {
if let Some(c) = &mut self.compress {
c.paths.insert((def_id, substs), start);
}
}
Ok(self)
}
fn print_impl_path(
self,
impl_def_id: DefId,
substs: &'tcx [GenericArg<'tcx>],
mut self_ty: Ty<'tcx>,
mut impl_trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
let key = self.tcx.def_key(impl_def_id);
let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id };
let mut param_env = self.tcx.param_env(impl_def_id)
.with_reveal_all();
if !substs.is_empty() {
param_env = param_env.subst(self.tcx, substs);
}
match &mut impl_trait_ref {
Some(impl_trait_ref) => {
assert_eq!(impl_trait_ref.self_ty(), self_ty);
*impl_trait_ref =
self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref);
self_ty = impl_trait_ref.self_ty();
}
None => {
self_ty = self.tcx.normalize_erasing_regions(param_env, self_ty);
}
}
self.path_append_impl(
|cx| cx.print_def_path(parent_def_id, &[]),
&key.disambiguated_data,
self_ty,
impl_trait_ref,
)
}
fn print_region(
mut self,
region: ty::Region<'_>,
) -> Result<Self::Region, Self::Error> {
let i = match *region {
// Erased lifetimes use the index 0, for a
// shorter mangling of `L_`.
ty::ReErased => 0,
// Late-bound lifetimes use indices starting at 1,
// see `BinderLevel` for more details.
ty::ReLateBound(debruijn, ty::BrAnon(i)) => {
// FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
assert_ne!(i, 0);
let i = i - 1;
let binder = &self.binders[self.binders.len() - 1 - debruijn.index()];
let depth = binder.lifetime_depths.start + i;
1 + (self.binders.last().unwrap().lifetime_depths.end - 1 - depth)
}
_ => bug!("symbol_names: non-erased region `{:?}`", region),
};
self.push("L");
self.push_integer_62(i as u64);
Ok(self)
}
fn print_type(
mut self,
ty: Ty<'tcx>,
) -> Result<Self::Type, Self::Error> {
// Basic types, never cached (single-character).
let basic_type = match ty.kind {
ty::Bool => "b",
ty::Char => "c",
ty::Str => "e",
ty::Tuple(_) if ty.is_unit() => "u",
ty::Int(IntTy::I8) => "a",
ty::Int(IntTy::I16) => "s",
ty::Int(IntTy::I32) => "l",
ty::Int(IntTy::I64) => "x",
ty::Int(IntTy::I128) => "n",
ty::Int(IntTy::Isize) => "i",
ty::Uint(UintTy::U8) => "h",
ty::Uint(UintTy::U16) => "t",
ty::Uint(UintTy::U32) => "m",
ty::Uint(UintTy::U64) => "y",
ty::Uint(UintTy::U128) => "o",
ty::Uint(UintTy::Usize) => "j",
ty::Float(FloatTy::F32) => "f",
ty::Float(FloatTy::F64) => "d",
ty::Never => "z",
// Placeholders (should be demangled as `_`).
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) |
ty::Infer(_) | ty::Error => "p",
_ => "",
};
if !basic_type.is_empty() {
self.push(basic_type);
return Ok(self);
}
if let Some(&i) = self.compress.as_ref().and_then(|c| c.types.get(&ty)) {
return self.print_backref(i);
}
let start = self.out.len();
match ty.kind {
// Basic types, handled above.
ty::Bool | ty::Char | ty::Str |
ty::Int(_) | ty::Uint(_) | ty::Float(_) |
ty::Never => unreachable!(),
ty::Tuple(_) if ty.is_unit() => unreachable!(),
// Placeholders, also handled as part of basic types.
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) |
ty::Infer(_) | ty::Error => unreachable!(),
ty::Ref(r, ty, mutbl) => {
self.push(match mutbl {
hir::MutImmutable => "R",
hir::MutMutable => "Q",
});
if *r != ty::ReErased {
self = r.print(self)?;
}
self = ty.print(self)?;
}
ty::RawPtr(mt) => {
self.push(match mt.mutbl {
hir::MutImmutable => "P",
hir::MutMutable => "O",
});
self = mt.ty.print(self)?;
}
ty::Array(ty, len) => {
self.push("A");
self = ty.print(self)?;
self = self.print_const(len)?;
}
ty::Slice(ty) => {
self.push("S");
self = ty.print(self)?;
}
ty::Tuple(tys) => {
self.push("T");
for ty in tys.iter().map(|k| k.expect_ty()) {
self = ty.print(self)?;
}
self.push("E");
}
// Mangle all nominal types as paths.
ty::Adt(&ty::AdtDef { did: def_id, .. }, substs) |
ty::FnDef(def_id, substs) |
ty::Opaque(def_id, substs) |
ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs }) |
ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs }) |
ty::Closure(def_id, substs) |
ty::Generator(def_id, substs, _) => {
self = self.print_def_path(def_id, substs)?;
}
ty::Foreign(def_id) => {
self = self.print_def_path(def_id, &[])?;
}
ty::FnPtr(sig) => {
self.push("F");
self = self.in_binder(&sig, |mut cx, sig| {
if sig.unsafety == hir::Unsafety::Unsafe {
cx.push("U");
}
match sig.abi {
Abi::Rust => {}
Abi::C => cx.push("KC"),
abi => {
cx.push("K");
let name = abi.name();
if name.contains('-') {
cx.push_ident(&name.replace('-', "_"));
} else {
cx.push_ident(name);
}
}
}
for &ty in sig.inputs() {
cx = ty.print(cx)?;
}
if sig.c_variadic {
cx.push("v");
}
cx.push("E");
sig.output().print(cx)
})?;
}
ty::Dynamic(predicates, r) => {
self.push("D");
self = self.in_binder(&predicates, |cx, predicates| {
cx.print_dyn_existential(predicates)
})?;
self = r.print(self)?;
}
ty::GeneratorWitness(_) => {
bug!("symbol_names: unexpected `GeneratorWitness`")
}
}
// Only cache types that do not refer to an enclosing
// binder (which would change depending on context).
if !ty.has_escaping_bound_vars() {
if let Some(c) = &mut self.compress {
c.types.insert(ty, start);
}
}
Ok(self)
}
fn print_dyn_existential(
mut self,
predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>,
) -> Result<Self::DynExistential, Self::Error> {
for predicate in predicates {
match *predicate {
ty::ExistentialPredicate::Trait(trait_ref) => {
// Use a type that can't appear in defaults of type parameters.
let dummy_self = self.tcx.mk_ty_infer(ty::FreshTy(0));
let trait_ref = trait_ref.with_self_ty(self.tcx, dummy_self);
self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
}
ty::ExistentialPredicate::Projection(projection) => {
let name = self.tcx.associated_item(projection.item_def_id).ident;
self.push("p");
self.push_ident(&name.as_str());
self = projection.ty.print(self)?;
}
ty::ExistentialPredicate::AutoTrait(def_id) => {
self = self.print_def_path(def_id, &[])?;
}
}
}
self.push("E");
Ok(self)
}
fn print_const(
mut self,
ct: &'tcx ty::Const<'tcx>,
) -> Result<Self::Const, Self::Error> {
if let Some(&i) = self.compress.as_ref().and_then(|c| c.consts.get(&ct)) {
return self.print_backref(i);
}
let start = self.out.len();
match ct.ty.kind {
ty::Uint(_) => {}
_ => {
bug!("symbol_names: unsupported constant of type `{}` ({:?})",
ct.ty, ct);
}
}
self = ct.ty.print(self)?;
if let Some(bits) = ct.try_eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ct.ty) {
let _ = write!(self.out, "{:x}_", bits);
} else {
// NOTE(eddyb) despite having the path, we need to
// encode a placeholder, as the path could refer
// back to e.g. an `impl` using the constant.
self.push("p");
}
// Only cache consts that do not refer to an enclosing
// binder (which would change depending on context).
if !ct.has_escaping_bound_vars() {
if let Some(c) = &mut self.compress {
c.consts.insert(ct, start);
}
}
Ok(self)
}
fn path_crate(
mut self,
cnum: CrateNum,
) -> Result<Self::Path, Self::Error> {
self.push("C");
let fingerprint = self.tcx.crate_disambiguator(cnum).to_fingerprint();
self.push_disambiguator(fingerprint.to_smaller_hash());
let name = self.tcx.original_crate_name(cnum).as_str();
self.push_ident(&name);
Ok(self)
}
fn path_qualified(
mut self,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
assert!(trait_ref.is_some());
let trait_ref = trait_ref.unwrap();
self.push("Y");
self = self_ty.print(self)?;
self.print_def_path(trait_ref.def_id, trait_ref.substs)
}
fn path_append_impl(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
disambiguated_data: &DisambiguatedDefPathData,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
self.push(match trait_ref {
Some(_) => "X",
None => "M",
});
self.push_disambiguator(disambiguated_data.disambiguator as u64);
self = print_prefix(self)?;
self = self_ty.print(self)?;
if let Some(trait_ref) = trait_ref {
self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
}
Ok(self)
}
fn path_append(
self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
disambiguated_data: &DisambiguatedDefPathData,
) -> Result<Self::Path, Self::Error> {
let ns = match disambiguated_data.data {
// Uppercase categories are more stable than lowercase ones.
DefPathData::TypeNs(_) => 't',
DefPathData::ValueNs(_) => 'v',
DefPathData::ClosureExpr => 'C',
DefPathData::Ctor => 'c',
DefPathData::AnonConst => 'k',
DefPathData::ImplTrait => 'i',
// These should never show up as `path_append` arguments.
DefPathData::CrateRoot
| DefPathData::Misc
| DefPathData::Impl
| DefPathData::MacroNs(_)
| DefPathData::LifetimeNs(_)
| DefPathData::GlobalMetaData(_) => {
bug!("symbol_names: unexpected DefPathData: {:?}", disambiguated_data.data)
}
};
let name = disambiguated_data.data.get_opt_name().map(|s| s.as_str());
self.path_append_ns(
print_prefix,
ns,
disambiguated_data.disambiguator as u64,
name.as_ref().map_or("", |s| &s[..])
)
}
fn path_generic_args(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
args: &[GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
// Don't print any regions if they're all erased.
let print_regions = args.iter().any(|arg| {
match arg.unpack() {
GenericArgKind::Lifetime(r) => *r != ty::ReErased,
_ => false,
}
});
let args = args.iter().cloned().filter(|arg| {
match arg.unpack() {
GenericArgKind::Lifetime(_) => print_regions,
_ => true,
}
});
if args.clone().next().is_none() {
return print_prefix(self);
}
self.push("I");
self = print_prefix(self)?;
for arg in args {
match arg.unpack() {
GenericArgKind::Lifetime(lt) => {
self = lt.print(self)?;
}
GenericArgKind::Type(ty) => {
self = ty.print(self)?;
}
GenericArgKind::Const(c) => {
self.push("K");
// FIXME(const_generics) implement `ty::print::Print` on `ty::Const`.
// self = c.print(self)?;
self = self.print_const(c)?;
}
}
}
self.push("E");
Ok(self)
}
}
| {
let mut use_punycode = false;
for b in ident.bytes() {
match b {
b'_' | b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' => {}
0x80..=0xff => use_punycode = true,
_ => bug!("symbol_names: bad byte {} in ident {:?}", b, ident),
}
}
let punycode_string;
let ident = if use_punycode {
self.push("u");
// FIXME(eddyb) we should probably roll our own punycode implementation.
let mut punycode_bytes = match ::punycode::encode(ident) {
Ok(s) => s.into_bytes(),
Err(()) => bug!("symbol_names: punycode encoding failed for ident {:?}", ident),
};
// Replace `-` with `_`.
if let Some(c) = punycode_bytes.iter_mut().rfind(|&&mut c| c == b'-') {
*c = b'_';
}
// FIXME(eddyb) avoid rechecking UTF-8 validity.
punycode_string = String::from_utf8(punycode_bytes).unwrap();
&punycode_string
} else {
ident
};
let _ = write!(self.out, "{}", ident.len());
// Write a separating `_` if necessary (leading digit or `_`).
match ident.chars().next() {
Some('_') | Some('0'..='9') => {
self.push("_");
}
_ => {}
}
self.push(ident);
} | identifier_body |
v0.rs | use rustc::hir;
use rustc::hir::def_id::{CrateNum, DefId};
use rustc::hir::map::{DefPathData, DisambiguatedDefPathData};
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Instance};
use rustc::ty::print::{Printer, Print};
use rustc::ty::subst::{GenericArg, Subst, GenericArgKind};
use rustc_data_structures::base_n;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_target::spec::abi::Abi;
use syntax::ast::{IntTy, UintTy, FloatTy};
use std::fmt::Write;
use std::ops::Range;
pub(super) fn mangle(
tcx: TyCtxt<'tcx>,
instance: Instance<'tcx>,
instantiating_crate: Option<CrateNum>,
) -> String {
let def_id = instance.def_id();
// FIXME(eddyb) this should ideally not be needed.
let substs =
tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), instance.substs);
let prefix = "_R";
let mut cx = SymbolMangler {
tcx,
compress: Some(Box::new(CompressionCaches {
start_offset: prefix.len(),
paths: FxHashMap::default(),
types: FxHashMap::default(),
consts: FxHashMap::default(),
})),
binders: vec![],
out: String::from(prefix),
};
cx = if instance.is_vtable_shim() {
cx.path_append_ns(
|cx| cx.print_def_path(def_id, substs),
'S',
0,
"",
).unwrap()
} else {
cx.print_def_path(def_id, substs).unwrap()
};
if let Some(instantiating_crate) = instantiating_crate {
cx = cx.print_def_path(instantiating_crate.as_def_id(), &[]).unwrap();
}
cx.out
}
struct CompressionCaches<'tcx> {
// The length of the prefix in `out` (e.g. 2 for `_R`).
start_offset: usize,
// The values are start positions in `out`, in bytes.
paths: FxHashMap<(DefId, &'tcx [GenericArg<'tcx>]), usize>,
types: FxHashMap<Ty<'tcx>, usize>,
consts: FxHashMap<&'tcx ty::Const<'tcx>, usize>,
}
struct BinderLevel {
/// The range of distances from the root of what's
/// being printed, to the lifetimes in a binder.
/// Specifically, a `BrAnon(i)` lifetime has depth
/// `lifetime_depths.start + i`, going away from the
/// the root and towards its use site, as `i` increases.
/// This is used to flatten rustc's pairing of `BrAnon`
/// (intra-binder disambiguation) with a `DebruijnIndex`
/// (binder addressing), to "true" de Bruijn indices,
/// by subtracting the depth of a certain lifetime, from
/// the innermost depth at its use site.
lifetime_depths: Range<u32>,
}
struct SymbolMangler<'tcx> {
tcx: TyCtxt<'tcx>,
compress: Option<Box<CompressionCaches<'tcx>>>,
binders: Vec<BinderLevel>,
out: String,
}
impl SymbolMangler<'tcx> {
fn push(&mut self, s: &str) {
self.out.push_str(s);
}
/// Push a `_`-terminated base 62 integer, using the format
/// specified in the RFC as `<base-62-number>`, that is:
/// * `x = 0` is encoded as just the `"_"` terminator
/// * `x > 0` is encoded as `x - 1` in base 62, followed by `"_"`,
/// e.g. `1` becomes `"0_"`, `62` becomes `"Z_"`, etc.
fn push_integer_62(&mut self, x: u64) {
if let Some(x) = x.checked_sub(1) {
base_n::push_str(x as u128, 62, &mut self.out);
}
self.push("_");
}
/// Push a `tag`-prefixed base 62 integer, when larger than `0`, that is:
/// * `x = 0` is encoded as `""` (nothing)
/// * `x > 0` is encoded as the `tag` followed by `push_integer_62(x - 1)`
/// e.g. `1` becomes `tag + "_"`, `2` becomes `tag + "0_"`, etc.
fn push_opt_integer_62(&mut self, tag: &str, x: u64) {
if let Some(x) = x.checked_sub(1) {
self.push(tag);
self.push_integer_62(x);
}
}
fn push_disambiguator(&mut self, dis: u64) {
self.push_opt_integer_62("s", dis);
}
fn push_ident(&mut self, ident: &str) {
let mut use_punycode = false;
for b in ident.bytes() {
match b {
b'_' | b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' => {}
0x80..=0xff => use_punycode = true,
_ => bug!("symbol_names: bad byte {} in ident {:?}", b, ident),
}
}
let punycode_string;
let ident = if use_punycode {
self.push("u");
// FIXME(eddyb) we should probably roll our own punycode implementation.
let mut punycode_bytes = match ::punycode::encode(ident) {
Ok(s) => s.into_bytes(),
Err(()) => bug!("symbol_names: punycode encoding failed for ident {:?}", ident),
};
// Replace `-` with `_`.
if let Some(c) = punycode_bytes.iter_mut().rfind(|&&mut c| c == b'-') {
*c = b'_';
}
// FIXME(eddyb) avoid rechecking UTF-8 validity.
punycode_string = String::from_utf8(punycode_bytes).unwrap();
&punycode_string
} else {
ident
};
let _ = write!(self.out, "{}", ident.len());
// Write a separating `_` if necessary (leading digit or `_`).
match ident.chars().next() {
Some('_') | Some('0'..='9') => {
self.push("_");
}
_ => {}
}
self.push(ident);
}
fn path_append_ns(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self, !>,
ns: char,
disambiguator: u64,
name: &str,
) -> Result<Self, !> {
self.push("N");
self.out.push(ns);
self = print_prefix(self)?;
self.push_disambiguator(disambiguator as u64);
self.push_ident(name);
Ok(self)
}
fn print_backref(mut self, i: usize) -> Result<Self, !> {
self.push("B");
self.push_integer_62((i - self.compress.as_ref().unwrap().start_offset) as u64);
Ok(self)
}
fn in_binder<T>(
mut self,
value: &ty::Binder<T>,
print_value: impl FnOnce(Self, &T) -> Result<Self, !>
) -> Result<Self, !>
where T: TypeFoldable<'tcx>
{
let regions = if value.has_late_bound_regions() {
self.tcx.collect_referenced_late_bound_regions(value)
} else {
FxHashSet::default()
};
let mut lifetime_depths =
self.binders.last().map(|b| b.lifetime_depths.end).map_or(0..0, |i| i..i);
let lifetimes = regions.into_iter().map(|br| {
match br {
ty::BrAnon(i) => {
// FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
assert_ne!(i, 0);
i - 1
},
_ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value),
}
}).max().map_or(0, |max| max + 1);
self.push_opt_integer_62("G", lifetimes as u64);
lifetime_depths.end += lifetimes;
self.binders.push(BinderLevel { lifetime_depths });
self = print_value(self, value.skip_binder())?;
self.binders.pop();
Ok(self)
}
}
impl Printer<'tcx> for SymbolMangler<'tcx> {
type Error = !;
type Path = Self;
type Region = Self;
type Type = Self;
type DynExistential = Self;
type Const = Self;
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn print_def_path(
mut self,
def_id: DefId,
substs: &'tcx [GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
if let Some(&i) = self.compress.as_ref().and_then(|c| c.paths.get(&(def_id, substs))) {
return self.print_backref(i);
}
let start = self.out.len();
self = self.default_print_def_path(def_id, substs)?;
// Only cache paths that do not refer to an enclosing
// binder (which would change depending on context).
if !substs.iter().any(|k| k.has_escaping_bound_vars()) {
if let Some(c) = &mut self.compress {
c.paths.insert((def_id, substs), start);
}
}
Ok(self)
}
fn print_impl_path(
self,
impl_def_id: DefId,
substs: &'tcx [GenericArg<'tcx>],
mut self_ty: Ty<'tcx>,
mut impl_trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
let key = self.tcx.def_key(impl_def_id);
let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id };
let mut param_env = self.tcx.param_env(impl_def_id)
.with_reveal_all();
if !substs.is_empty() {
param_env = param_env.subst(self.tcx, substs);
}
match &mut impl_trait_ref {
Some(impl_trait_ref) => {
assert_eq!(impl_trait_ref.self_ty(), self_ty);
*impl_trait_ref =
self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref);
self_ty = impl_trait_ref.self_ty();
}
None => {
self_ty = self.tcx.normalize_erasing_regions(param_env, self_ty);
}
}
self.path_append_impl(
|cx| cx.print_def_path(parent_def_id, &[]),
&key.disambiguated_data,
self_ty,
impl_trait_ref,
)
}
fn print_region(
mut self,
region: ty::Region<'_>,
) -> Result<Self::Region, Self::Error> {
let i = match *region {
// Erased lifetimes use the index 0, for a
// shorter mangling of `L_`.
ty::ReErased => 0,
// Late-bound lifetimes use indices starting at 1,
// see `BinderLevel` for more details.
ty::ReLateBound(debruijn, ty::BrAnon(i)) => {
// FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
assert_ne!(i, 0);
let i = i - 1;
let binder = &self.binders[self.binders.len() - 1 - debruijn.index()];
let depth = binder.lifetime_depths.start + i;
1 + (self.binders.last().unwrap().lifetime_depths.end - 1 - depth)
}
_ => bug!("symbol_names: non-erased region `{:?}`", region),
};
self.push("L");
self.push_integer_62(i as u64);
Ok(self)
}
fn print_type(
mut self,
ty: Ty<'tcx>,
) -> Result<Self::Type, Self::Error> {
// Basic types, never cached (single-character).
let basic_type = match ty.kind {
ty::Bool => "b",
ty::Char => "c",
ty::Str => "e",
ty::Tuple(_) if ty.is_unit() => "u",
ty::Int(IntTy::I8) => "a",
ty::Int(IntTy::I16) => "s",
ty::Int(IntTy::I32) => "l",
ty::Int(IntTy::I64) => "x",
ty::Int(IntTy::I128) => "n",
ty::Int(IntTy::Isize) => "i",
ty::Uint(UintTy::U8) => "h",
ty::Uint(UintTy::U16) => "t",
ty::Uint(UintTy::U32) => "m",
ty::Uint(UintTy::U64) => "y",
ty::Uint(UintTy::U128) => "o",
ty::Uint(UintTy::Usize) => "j",
ty::Float(FloatTy::F32) => "f",
ty::Float(FloatTy::F64) => "d",
ty::Never => "z",
// Placeholders (should be demangled as `_`).
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) |
ty::Infer(_) | ty::Error => "p",
_ => "",
};
if !basic_type.is_empty() {
self.push(basic_type);
return Ok(self);
}
if let Some(&i) = self.compress.as_ref().and_then(|c| c.types.get(&ty)) {
return self.print_backref(i);
}
let start = self.out.len();
match ty.kind {
// Basic types, handled above.
ty::Bool | ty::Char | ty::Str |
ty::Int(_) | ty::Uint(_) | ty::Float(_) |
ty::Never => unreachable!(),
ty::Tuple(_) if ty.is_unit() => unreachable!(),
// Placeholders, also handled as part of basic types.
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) |
ty::Infer(_) | ty::Error => unreachable!(),
ty::Ref(r, ty, mutbl) => {
self.push(match mutbl {
hir::MutImmutable => "R",
hir::MutMutable => "Q",
});
if *r != ty::ReErased {
self = r.print(self)?;
}
self = ty.print(self)?;
}
ty::RawPtr(mt) => {
self.push(match mt.mutbl {
hir::MutImmutable => "P",
hir::MutMutable => "O",
});
self = mt.ty.print(self)?;
}
ty::Array(ty, len) => {
self.push("A");
self = ty.print(self)?;
self = self.print_const(len)?;
}
ty::Slice(ty) => {
self.push("S");
self = ty.print(self)?;
}
ty::Tuple(tys) => {
self.push("T");
for ty in tys.iter().map(|k| k.expect_ty()) {
self = ty.print(self)?;
}
self.push("E");
}
// Mangle all nominal types as paths.
ty::Adt(&ty::AdtDef { did: def_id, .. }, substs) |
ty::FnDef(def_id, substs) |
ty::Opaque(def_id, substs) |
ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs }) |
ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs }) |
ty::Closure(def_id, substs) |
ty::Generator(def_id, substs, _) => {
self = self.print_def_path(def_id, substs)?; | self = self.print_def_path(def_id, &[])?;
}
ty::FnPtr(sig) => {
self.push("F");
self = self.in_binder(&sig, |mut cx, sig| {
if sig.unsafety == hir::Unsafety::Unsafe {
cx.push("U");
}
match sig.abi {
Abi::Rust => {}
Abi::C => cx.push("KC"),
abi => {
cx.push("K");
let name = abi.name();
if name.contains('-') {
cx.push_ident(&name.replace('-', "_"));
} else {
cx.push_ident(name);
}
}
}
for &ty in sig.inputs() {
cx = ty.print(cx)?;
}
if sig.c_variadic {
cx.push("v");
}
cx.push("E");
sig.output().print(cx)
})?;
}
ty::Dynamic(predicates, r) => {
self.push("D");
self = self.in_binder(&predicates, |cx, predicates| {
cx.print_dyn_existential(predicates)
})?;
self = r.print(self)?;
}
ty::GeneratorWitness(_) => {
bug!("symbol_names: unexpected `GeneratorWitness`")
}
}
// Only cache types that do not refer to an enclosing
// binder (which would change depending on context).
if !ty.has_escaping_bound_vars() {
if let Some(c) = &mut self.compress {
c.types.insert(ty, start);
}
}
Ok(self)
}
fn print_dyn_existential(
mut self,
predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>,
) -> Result<Self::DynExistential, Self::Error> {
for predicate in predicates {
match *predicate {
ty::ExistentialPredicate::Trait(trait_ref) => {
// Use a type that can't appear in defaults of type parameters.
let dummy_self = self.tcx.mk_ty_infer(ty::FreshTy(0));
let trait_ref = trait_ref.with_self_ty(self.tcx, dummy_self);
self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
}
ty::ExistentialPredicate::Projection(projection) => {
let name = self.tcx.associated_item(projection.item_def_id).ident;
self.push("p");
self.push_ident(&name.as_str());
self = projection.ty.print(self)?;
}
ty::ExistentialPredicate::AutoTrait(def_id) => {
self = self.print_def_path(def_id, &[])?;
}
}
}
self.push("E");
Ok(self)
}
fn print_const(
mut self,
ct: &'tcx ty::Const<'tcx>,
) -> Result<Self::Const, Self::Error> {
if let Some(&i) = self.compress.as_ref().and_then(|c| c.consts.get(&ct)) {
return self.print_backref(i);
}
let start = self.out.len();
match ct.ty.kind {
ty::Uint(_) => {}
_ => {
bug!("symbol_names: unsupported constant of type `{}` ({:?})",
ct.ty, ct);
}
}
self = ct.ty.print(self)?;
if let Some(bits) = ct.try_eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ct.ty) {
let _ = write!(self.out, "{:x}_", bits);
} else {
// NOTE(eddyb) despite having the path, we need to
// encode a placeholder, as the path could refer
// back to e.g. an `impl` using the constant.
self.push("p");
}
// Only cache consts that do not refer to an enclosing
// binder (which would change depending on context).
if !ct.has_escaping_bound_vars() {
if let Some(c) = &mut self.compress {
c.consts.insert(ct, start);
}
}
Ok(self)
}
fn path_crate(
mut self,
cnum: CrateNum,
) -> Result<Self::Path, Self::Error> {
self.push("C");
let fingerprint = self.tcx.crate_disambiguator(cnum).to_fingerprint();
self.push_disambiguator(fingerprint.to_smaller_hash());
let name = self.tcx.original_crate_name(cnum).as_str();
self.push_ident(&name);
Ok(self)
}
fn path_qualified(
mut self,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
assert!(trait_ref.is_some());
let trait_ref = trait_ref.unwrap();
self.push("Y");
self = self_ty.print(self)?;
self.print_def_path(trait_ref.def_id, trait_ref.substs)
}
fn path_append_impl(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
disambiguated_data: &DisambiguatedDefPathData,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
self.push(match trait_ref {
Some(_) => "X",
None => "M",
});
self.push_disambiguator(disambiguated_data.disambiguator as u64);
self = print_prefix(self)?;
self = self_ty.print(self)?;
if let Some(trait_ref) = trait_ref {
self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
}
Ok(self)
}
fn path_append(
self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
disambiguated_data: &DisambiguatedDefPathData,
) -> Result<Self::Path, Self::Error> {
let ns = match disambiguated_data.data {
// Uppercase categories are more stable than lowercase ones.
DefPathData::TypeNs(_) => 't',
DefPathData::ValueNs(_) => 'v',
DefPathData::ClosureExpr => 'C',
DefPathData::Ctor => 'c',
DefPathData::AnonConst => 'k',
DefPathData::ImplTrait => 'i',
// These should never show up as `path_append` arguments.
DefPathData::CrateRoot
| DefPathData::Misc
| DefPathData::Impl
| DefPathData::MacroNs(_)
| DefPathData::LifetimeNs(_)
| DefPathData::GlobalMetaData(_) => {
bug!("symbol_names: unexpected DefPathData: {:?}", disambiguated_data.data)
}
};
let name = disambiguated_data.data.get_opt_name().map(|s| s.as_str());
self.path_append_ns(
print_prefix,
ns,
disambiguated_data.disambiguator as u64,
name.as_ref().map_or("", |s| &s[..])
)
}
fn path_generic_args(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
args: &[GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
// Don't print any regions if they're all erased.
let print_regions = args.iter().any(|arg| {
match arg.unpack() {
GenericArgKind::Lifetime(r) => *r != ty::ReErased,
_ => false,
}
});
let args = args.iter().cloned().filter(|arg| {
match arg.unpack() {
GenericArgKind::Lifetime(_) => print_regions,
_ => true,
}
});
if args.clone().next().is_none() {
return print_prefix(self);
}
self.push("I");
self = print_prefix(self)?;
for arg in args {
match arg.unpack() {
GenericArgKind::Lifetime(lt) => {
self = lt.print(self)?;
}
GenericArgKind::Type(ty) => {
self = ty.print(self)?;
}
GenericArgKind::Const(c) => {
self.push("K");
// FIXME(const_generics) implement `ty::print::Print` on `ty::Const`.
// self = c.print(self)?;
self = self.print_const(c)?;
}
}
}
self.push("E");
Ok(self)
}
} | }
ty::Foreign(def_id) => { | random_line_split |
v0.rs | use rustc::hir;
use rustc::hir::def_id::{CrateNum, DefId};
use rustc::hir::map::{DefPathData, DisambiguatedDefPathData};
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Instance};
use rustc::ty::print::{Printer, Print};
use rustc::ty::subst::{GenericArg, Subst, GenericArgKind};
use rustc_data_structures::base_n;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_target::spec::abi::Abi;
use syntax::ast::{IntTy, UintTy, FloatTy};
use std::fmt::Write;
use std::ops::Range;
pub(super) fn mangle(
tcx: TyCtxt<'tcx>,
instance: Instance<'tcx>,
instantiating_crate: Option<CrateNum>,
) -> String {
let def_id = instance.def_id();
// FIXME(eddyb) this should ideally not be needed.
let substs =
tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), instance.substs);
let prefix = "_R";
let mut cx = SymbolMangler {
tcx,
compress: Some(Box::new(CompressionCaches {
start_offset: prefix.len(),
paths: FxHashMap::default(),
types: FxHashMap::default(),
consts: FxHashMap::default(),
})),
binders: vec![],
out: String::from(prefix),
};
cx = if instance.is_vtable_shim() {
cx.path_append_ns(
|cx| cx.print_def_path(def_id, substs),
'S',
0,
"",
).unwrap()
} else {
cx.print_def_path(def_id, substs).unwrap()
};
if let Some(instantiating_crate) = instantiating_crate {
cx = cx.print_def_path(instantiating_crate.as_def_id(), &[]).unwrap();
}
cx.out
}
struct CompressionCaches<'tcx> {
// The length of the prefix in `out` (e.g. 2 for `_R`).
start_offset: usize,
// The values are start positions in `out`, in bytes.
paths: FxHashMap<(DefId, &'tcx [GenericArg<'tcx>]), usize>,
types: FxHashMap<Ty<'tcx>, usize>,
consts: FxHashMap<&'tcx ty::Const<'tcx>, usize>,
}
struct BinderLevel {
/// The range of distances from the root of what's
/// being printed, to the lifetimes in a binder.
/// Specifically, a `BrAnon(i)` lifetime has depth
/// `lifetime_depths.start + i`, going away from the
/// the root and towards its use site, as `i` increases.
/// This is used to flatten rustc's pairing of `BrAnon`
/// (intra-binder disambiguation) with a `DebruijnIndex`
/// (binder addressing), to "true" de Bruijn indices,
/// by subtracting the depth of a certain lifetime, from
/// the innermost depth at its use site.
lifetime_depths: Range<u32>,
}
struct SymbolMangler<'tcx> {
tcx: TyCtxt<'tcx>,
compress: Option<Box<CompressionCaches<'tcx>>>,
binders: Vec<BinderLevel>,
out: String,
}
impl SymbolMangler<'tcx> {
fn push(&mut self, s: &str) {
self.out.push_str(s);
}
/// Push a `_`-terminated base 62 integer, using the format
/// specified in the RFC as `<base-62-number>`, that is:
/// * `x = 0` is encoded as just the `"_"` terminator
/// * `x > 0` is encoded as `x - 1` in base 62, followed by `"_"`,
/// e.g. `1` becomes `"0_"`, `62` becomes `"Z_"`, etc.
fn push_integer_62(&mut self, x: u64) {
if let Some(x) = x.checked_sub(1) {
base_n::push_str(x as u128, 62, &mut self.out);
}
self.push("_");
}
/// Push a `tag`-prefixed base 62 integer, when larger than `0`, that is:
/// * `x = 0` is encoded as `""` (nothing)
/// * `x > 0` is encoded as the `tag` followed by `push_integer_62(x - 1)`
/// e.g. `1` becomes `tag + "_"`, `2` becomes `tag + "0_"`, etc.
fn push_opt_integer_62(&mut self, tag: &str, x: u64) {
if let Some(x) = x.checked_sub(1) {
self.push(tag);
self.push_integer_62(x);
}
}
fn push_disambiguator(&mut self, dis: u64) {
self.push_opt_integer_62("s", dis);
}
fn push_ident(&mut self, ident: &str) {
let mut use_punycode = false;
for b in ident.bytes() {
match b {
b'_' | b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' => {}
0x80..=0xff => use_punycode = true,
_ => bug!("symbol_names: bad byte {} in ident {:?}", b, ident),
}
}
let punycode_string;
let ident = if use_punycode {
self.push("u");
// FIXME(eddyb) we should probably roll our own punycode implementation.
let mut punycode_bytes = match ::punycode::encode(ident) {
Ok(s) => s.into_bytes(),
Err(()) => bug!("symbol_names: punycode encoding failed for ident {:?}", ident),
};
// Replace `-` with `_`.
if let Some(c) = punycode_bytes.iter_mut().rfind(|&&mut c| c == b'-') {
*c = b'_';
}
// FIXME(eddyb) avoid rechecking UTF-8 validity.
punycode_string = String::from_utf8(punycode_bytes).unwrap();
&punycode_string
} else {
ident
};
let _ = write!(self.out, "{}", ident.len());
// Write a separating `_` if necessary (leading digit or `_`).
match ident.chars().next() {
Some('_') | Some('0'..='9') => {
self.push("_");
}
_ => {}
}
self.push(ident);
}
fn path_append_ns(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self, !>,
ns: char,
disambiguator: u64,
name: &str,
) -> Result<Self, !> {
self.push("N");
self.out.push(ns);
self = print_prefix(self)?;
self.push_disambiguator(disambiguator as u64);
self.push_ident(name);
Ok(self)
}
fn print_backref(mut self, i: usize) -> Result<Self, !> {
self.push("B");
self.push_integer_62((i - self.compress.as_ref().unwrap().start_offset) as u64);
Ok(self)
}
fn in_binder<T>(
mut self,
value: &ty::Binder<T>,
print_value: impl FnOnce(Self, &T) -> Result<Self, !>
) -> Result<Self, !>
where T: TypeFoldable<'tcx>
{
let regions = if value.has_late_bound_regions() {
self.tcx.collect_referenced_late_bound_regions(value)
} else {
FxHashSet::default()
};
let mut lifetime_depths =
self.binders.last().map(|b| b.lifetime_depths.end).map_or(0..0, |i| i..i);
let lifetimes = regions.into_iter().map(|br| {
match br {
ty::BrAnon(i) => {
// FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
assert_ne!(i, 0);
i - 1
},
_ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value),
}
}).max().map_or(0, |max| max + 1);
self.push_opt_integer_62("G", lifetimes as u64);
lifetime_depths.end += lifetimes;
self.binders.push(BinderLevel { lifetime_depths });
self = print_value(self, value.skip_binder())?;
self.binders.pop();
Ok(self)
}
}
impl Printer<'tcx> for SymbolMangler<'tcx> {
type Error = !;
type Path = Self;
type Region = Self;
type Type = Self;
type DynExistential = Self;
type Const = Self;
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn print_def_path(
mut self,
def_id: DefId,
substs: &'tcx [GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
if let Some(&i) = self.compress.as_ref().and_then(|c| c.paths.get(&(def_id, substs))) {
return self.print_backref(i);
}
let start = self.out.len();
self = self.default_print_def_path(def_id, substs)?;
// Only cache paths that do not refer to an enclosing
// binder (which would change depending on context).
if !substs.iter().any(|k| k.has_escaping_bound_vars()) {
if let Some(c) = &mut self.compress {
c.paths.insert((def_id, substs), start);
}
}
Ok(self)
}
fn print_impl_path(
self,
impl_def_id: DefId,
substs: &'tcx [GenericArg<'tcx>],
mut self_ty: Ty<'tcx>,
mut impl_trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
let key = self.tcx.def_key(impl_def_id);
let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id };
let mut param_env = self.tcx.param_env(impl_def_id)
.with_reveal_all();
if !substs.is_empty() {
param_env = param_env.subst(self.tcx, substs);
}
match &mut impl_trait_ref {
Some(impl_trait_ref) => {
assert_eq!(impl_trait_ref.self_ty(), self_ty);
*impl_trait_ref =
self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref);
self_ty = impl_trait_ref.self_ty();
}
None => {
self_ty = self.tcx.normalize_erasing_regions(param_env, self_ty);
}
}
self.path_append_impl(
|cx| cx.print_def_path(parent_def_id, &[]),
&key.disambiguated_data,
self_ty,
impl_trait_ref,
)
}
fn print_region(
mut self,
region: ty::Region<'_>,
) -> Result<Self::Region, Self::Error> {
let i = match *region {
// Erased lifetimes use the index 0, for a
// shorter mangling of `L_`.
ty::ReErased => 0,
// Late-bound lifetimes use indices starting at 1,
// see `BinderLevel` for more details.
ty::ReLateBound(debruijn, ty::BrAnon(i)) => {
// FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
assert_ne!(i, 0);
let i = i - 1;
let binder = &self.binders[self.binders.len() - 1 - debruijn.index()];
let depth = binder.lifetime_depths.start + i;
1 + (self.binders.last().unwrap().lifetime_depths.end - 1 - depth)
}
_ => bug!("symbol_names: non-erased region `{:?}`", region),
};
self.push("L");
self.push_integer_62(i as u64);
Ok(self)
}
fn print_type(
mut self,
ty: Ty<'tcx>,
) -> Result<Self::Type, Self::Error> {
// Basic types, never cached (single-character).
let basic_type = match ty.kind {
ty::Bool => "b",
ty::Char => "c",
ty::Str => "e",
ty::Tuple(_) if ty.is_unit() => "u",
ty::Int(IntTy::I8) => "a",
ty::Int(IntTy::I16) => "s",
ty::Int(IntTy::I32) => "l",
ty::Int(IntTy::I64) => "x",
ty::Int(IntTy::I128) => "n",
ty::Int(IntTy::Isize) => "i",
ty::Uint(UintTy::U8) => "h",
ty::Uint(UintTy::U16) => "t",
ty::Uint(UintTy::U32) => "m",
ty::Uint(UintTy::U64) => "y",
ty::Uint(UintTy::U128) => "o",
ty::Uint(UintTy::Usize) => "j",
ty::Float(FloatTy::F32) => "f",
ty::Float(FloatTy::F64) => "d",
ty::Never => "z",
// Placeholders (should be demangled as `_`).
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) |
ty::Infer(_) | ty::Error => "p",
_ => "",
};
if !basic_type.is_empty() {
self.push(basic_type);
return Ok(self);
}
if let Some(&i) = self.compress.as_ref().and_then(|c| c.types.get(&ty)) {
return self.print_backref(i);
}
let start = self.out.len();
match ty.kind {
// Basic types, handled above.
ty::Bool | ty::Char | ty::Str |
ty::Int(_) | ty::Uint(_) | ty::Float(_) |
ty::Never => unreachable!(),
ty::Tuple(_) if ty.is_unit() => unreachable!(),
// Placeholders, also handled as part of basic types.
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) |
ty::Infer(_) | ty::Error => unreachable!(),
ty::Ref(r, ty, mutbl) => {
self.push(match mutbl {
hir::MutImmutable => "R",
hir::MutMutable => "Q",
});
if *r != ty::ReErased {
self = r.print(self)?;
}
self = ty.print(self)?;
}
ty::RawPtr(mt) => {
self.push(match mt.mutbl {
hir::MutImmutable => "P",
hir::MutMutable => "O",
});
self = mt.ty.print(self)?;
}
ty::Array(ty, len) => {
self.push("A");
self = ty.print(self)?;
self = self.print_const(len)?;
}
ty::Slice(ty) => {
self.push("S");
self = ty.print(self)?;
}
ty::Tuple(tys) => {
self.push("T");
for ty in tys.iter().map(|k| k.expect_ty()) {
self = ty.print(self)?;
}
self.push("E");
}
// Mangle all nominal types as paths.
ty::Adt(&ty::AdtDef { did: def_id, .. }, substs) |
ty::FnDef(def_id, substs) |
ty::Opaque(def_id, substs) |
ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs }) |
ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs }) |
ty::Closure(def_id, substs) |
ty::Generator(def_id, substs, _) => {
self = self.print_def_path(def_id, substs)?;
}
ty::Foreign(def_id) => {
self = self.print_def_path(def_id, &[])?;
}
ty::FnPtr(sig) => {
self.push("F");
self = self.in_binder(&sig, |mut cx, sig| {
if sig.unsafety == hir::Unsafety::Unsafe {
cx.push("U");
}
match sig.abi {
Abi::Rust => {}
Abi::C => cx.push("KC"),
abi => {
cx.push("K");
let name = abi.name();
if name.contains('-') {
cx.push_ident(&name.replace('-', "_"));
} else {
cx.push_ident(name);
}
}
}
for &ty in sig.inputs() {
cx = ty.print(cx)?;
}
if sig.c_variadic {
cx.push("v");
}
cx.push("E");
sig.output().print(cx)
})?;
}
ty::Dynamic(predicates, r) => {
self.push("D");
self = self.in_binder(&predicates, |cx, predicates| {
cx.print_dyn_existential(predicates)
})?;
self = r.print(self)?;
}
ty::GeneratorWitness(_) => {
bug!("symbol_names: unexpected `GeneratorWitness`")
}
}
// Only cache types that do not refer to an enclosing
// binder (which would change depending on context).
if !ty.has_escaping_bound_vars() {
if let Some(c) = &mut self.compress {
c.types.insert(ty, start);
}
}
Ok(self)
}
fn | (
mut self,
predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>,
) -> Result<Self::DynExistential, Self::Error> {
for predicate in predicates {
match *predicate {
ty::ExistentialPredicate::Trait(trait_ref) => {
// Use a type that can't appear in defaults of type parameters.
let dummy_self = self.tcx.mk_ty_infer(ty::FreshTy(0));
let trait_ref = trait_ref.with_self_ty(self.tcx, dummy_self);
self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
}
ty::ExistentialPredicate::Projection(projection) => {
let name = self.tcx.associated_item(projection.item_def_id).ident;
self.push("p");
self.push_ident(&name.as_str());
self = projection.ty.print(self)?;
}
ty::ExistentialPredicate::AutoTrait(def_id) => {
self = self.print_def_path(def_id, &[])?;
}
}
}
self.push("E");
Ok(self)
}
fn print_const(
mut self,
ct: &'tcx ty::Const<'tcx>,
) -> Result<Self::Const, Self::Error> {
if let Some(&i) = self.compress.as_ref().and_then(|c| c.consts.get(&ct)) {
return self.print_backref(i);
}
let start = self.out.len();
match ct.ty.kind {
ty::Uint(_) => {}
_ => {
bug!("symbol_names: unsupported constant of type `{}` ({:?})",
ct.ty, ct);
}
}
self = ct.ty.print(self)?;
if let Some(bits) = ct.try_eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ct.ty) {
let _ = write!(self.out, "{:x}_", bits);
} else {
// NOTE(eddyb) despite having the path, we need to
// encode a placeholder, as the path could refer
// back to e.g. an `impl` using the constant.
self.push("p");
}
// Only cache consts that do not refer to an enclosing
// binder (which would change depending on context).
if !ct.has_escaping_bound_vars() {
if let Some(c) = &mut self.compress {
c.consts.insert(ct, start);
}
}
Ok(self)
}
fn path_crate(
mut self,
cnum: CrateNum,
) -> Result<Self::Path, Self::Error> {
self.push("C");
let fingerprint = self.tcx.crate_disambiguator(cnum).to_fingerprint();
self.push_disambiguator(fingerprint.to_smaller_hash());
let name = self.tcx.original_crate_name(cnum).as_str();
self.push_ident(&name);
Ok(self)
}
fn path_qualified(
mut self,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
assert!(trait_ref.is_some());
let trait_ref = trait_ref.unwrap();
self.push("Y");
self = self_ty.print(self)?;
self.print_def_path(trait_ref.def_id, trait_ref.substs)
}
fn path_append_impl(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
disambiguated_data: &DisambiguatedDefPathData,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
self.push(match trait_ref {
Some(_) => "X",
None => "M",
});
self.push_disambiguator(disambiguated_data.disambiguator as u64);
self = print_prefix(self)?;
self = self_ty.print(self)?;
if let Some(trait_ref) = trait_ref {
self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
}
Ok(self)
}
fn path_append(
self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
disambiguated_data: &DisambiguatedDefPathData,
) -> Result<Self::Path, Self::Error> {
let ns = match disambiguated_data.data {
// Uppercase categories are more stable than lowercase ones.
DefPathData::TypeNs(_) => 't',
DefPathData::ValueNs(_) => 'v',
DefPathData::ClosureExpr => 'C',
DefPathData::Ctor => 'c',
DefPathData::AnonConst => 'k',
DefPathData::ImplTrait => 'i',
// These should never show up as `path_append` arguments.
DefPathData::CrateRoot
| DefPathData::Misc
| DefPathData::Impl
| DefPathData::MacroNs(_)
| DefPathData::LifetimeNs(_)
| DefPathData::GlobalMetaData(_) => {
bug!("symbol_names: unexpected DefPathData: {:?}", disambiguated_data.data)
}
};
let name = disambiguated_data.data.get_opt_name().map(|s| s.as_str());
self.path_append_ns(
print_prefix,
ns,
disambiguated_data.disambiguator as u64,
name.as_ref().map_or("", |s| &s[..])
)
}
fn path_generic_args(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
args: &[GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
// Don't print any regions if they're all erased.
let print_regions = args.iter().any(|arg| {
match arg.unpack() {
GenericArgKind::Lifetime(r) => *r != ty::ReErased,
_ => false,
}
});
let args = args.iter().cloned().filter(|arg| {
match arg.unpack() {
GenericArgKind::Lifetime(_) => print_regions,
_ => true,
}
});
if args.clone().next().is_none() {
return print_prefix(self);
}
self.push("I");
self = print_prefix(self)?;
for arg in args {
match arg.unpack() {
GenericArgKind::Lifetime(lt) => {
self = lt.print(self)?;
}
GenericArgKind::Type(ty) => {
self = ty.print(self)?;
}
GenericArgKind::Const(c) => {
self.push("K");
// FIXME(const_generics) implement `ty::print::Print` on `ty::Const`.
// self = c.print(self)?;
self = self.print_const(c)?;
}
}
}
self.push("E");
Ok(self)
}
}
| print_dyn_existential | identifier_name |
v0.rs | use rustc::hir;
use rustc::hir::def_id::{CrateNum, DefId};
use rustc::hir::map::{DefPathData, DisambiguatedDefPathData};
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Instance};
use rustc::ty::print::{Printer, Print};
use rustc::ty::subst::{GenericArg, Subst, GenericArgKind};
use rustc_data_structures::base_n;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_target::spec::abi::Abi;
use syntax::ast::{IntTy, UintTy, FloatTy};
use std::fmt::Write;
use std::ops::Range;
pub(super) fn mangle(
tcx: TyCtxt<'tcx>,
instance: Instance<'tcx>,
instantiating_crate: Option<CrateNum>,
) -> String {
let def_id = instance.def_id();
// FIXME(eddyb) this should ideally not be needed.
let substs =
tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), instance.substs);
let prefix = "_R";
let mut cx = SymbolMangler {
tcx,
compress: Some(Box::new(CompressionCaches {
start_offset: prefix.len(),
paths: FxHashMap::default(),
types: FxHashMap::default(),
consts: FxHashMap::default(),
})),
binders: vec![],
out: String::from(prefix),
};
cx = if instance.is_vtable_shim() {
cx.path_append_ns(
|cx| cx.print_def_path(def_id, substs),
'S',
0,
"",
).unwrap()
} else {
cx.print_def_path(def_id, substs).unwrap()
};
if let Some(instantiating_crate) = instantiating_crate {
cx = cx.print_def_path(instantiating_crate.as_def_id(), &[]).unwrap();
}
cx.out
}
struct CompressionCaches<'tcx> {
// The length of the prefix in `out` (e.g. 2 for `_R`).
start_offset: usize,
// The values are start positions in `out`, in bytes.
paths: FxHashMap<(DefId, &'tcx [GenericArg<'tcx>]), usize>,
types: FxHashMap<Ty<'tcx>, usize>,
consts: FxHashMap<&'tcx ty::Const<'tcx>, usize>,
}
struct BinderLevel {
/// The range of distances from the root of what's
/// being printed, to the lifetimes in a binder.
/// Specifically, a `BrAnon(i)` lifetime has depth
/// `lifetime_depths.start + i`, going away from the
/// the root and towards its use site, as `i` increases.
/// This is used to flatten rustc's pairing of `BrAnon`
/// (intra-binder disambiguation) with a `DebruijnIndex`
/// (binder addressing), to "true" de Bruijn indices,
/// by subtracting the depth of a certain lifetime, from
/// the innermost depth at its use site.
lifetime_depths: Range<u32>,
}
struct SymbolMangler<'tcx> {
tcx: TyCtxt<'tcx>,
compress: Option<Box<CompressionCaches<'tcx>>>,
binders: Vec<BinderLevel>,
out: String,
}
impl SymbolMangler<'tcx> {
fn push(&mut self, s: &str) {
self.out.push_str(s);
}
/// Push a `_`-terminated base 62 integer, using the format
/// specified in the RFC as `<base-62-number>`, that is:
/// * `x = 0` is encoded as just the `"_"` terminator
/// * `x > 0` is encoded as `x - 1` in base 62, followed by `"_"`,
/// e.g. `1` becomes `"0_"`, `62` becomes `"Z_"`, etc.
fn push_integer_62(&mut self, x: u64) {
if let Some(x) = x.checked_sub(1) {
base_n::push_str(x as u128, 62, &mut self.out);
}
self.push("_");
}
/// Push a `tag`-prefixed base 62 integer, when larger than `0`, that is:
/// * `x = 0` is encoded as `""` (nothing)
/// * `x > 0` is encoded as the `tag` followed by `push_integer_62(x - 1)`
/// e.g. `1` becomes `tag + "_"`, `2` becomes `tag + "0_"`, etc.
fn push_opt_integer_62(&mut self, tag: &str, x: u64) {
if let Some(x) = x.checked_sub(1) {
self.push(tag);
self.push_integer_62(x);
}
}
fn push_disambiguator(&mut self, dis: u64) {
self.push_opt_integer_62("s", dis);
}
fn push_ident(&mut self, ident: &str) {
let mut use_punycode = false;
for b in ident.bytes() {
match b {
b'_' | b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' => {}
0x80..=0xff => use_punycode = true,
_ => bug!("symbol_names: bad byte {} in ident {:?}", b, ident),
}
}
let punycode_string;
let ident = if use_punycode {
self.push("u");
// FIXME(eddyb) we should probably roll our own punycode implementation.
let mut punycode_bytes = match ::punycode::encode(ident) {
Ok(s) => s.into_bytes(),
Err(()) => bug!("symbol_names: punycode encoding failed for ident {:?}", ident),
};
// Replace `-` with `_`.
if let Some(c) = punycode_bytes.iter_mut().rfind(|&&mut c| c == b'-') |
// FIXME(eddyb) avoid rechecking UTF-8 validity.
punycode_string = String::from_utf8(punycode_bytes).unwrap();
&punycode_string
} else {
ident
};
let _ = write!(self.out, "{}", ident.len());
// Write a separating `_` if necessary (leading digit or `_`).
match ident.chars().next() {
Some('_') | Some('0'..='9') => {
self.push("_");
}
_ => {}
}
self.push(ident);
}
fn path_append_ns(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self, !>,
ns: char,
disambiguator: u64,
name: &str,
) -> Result<Self, !> {
self.push("N");
self.out.push(ns);
self = print_prefix(self)?;
self.push_disambiguator(disambiguator as u64);
self.push_ident(name);
Ok(self)
}
fn print_backref(mut self, i: usize) -> Result<Self, !> {
self.push("B");
self.push_integer_62((i - self.compress.as_ref().unwrap().start_offset) as u64);
Ok(self)
}
fn in_binder<T>(
mut self,
value: &ty::Binder<T>,
print_value: impl FnOnce(Self, &T) -> Result<Self, !>
) -> Result<Self, !>
where T: TypeFoldable<'tcx>
{
let regions = if value.has_late_bound_regions() {
self.tcx.collect_referenced_late_bound_regions(value)
} else {
FxHashSet::default()
};
let mut lifetime_depths =
self.binders.last().map(|b| b.lifetime_depths.end).map_or(0..0, |i| i..i);
let lifetimes = regions.into_iter().map(|br| {
match br {
ty::BrAnon(i) => {
// FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
assert_ne!(i, 0);
i - 1
},
_ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value),
}
}).max().map_or(0, |max| max + 1);
self.push_opt_integer_62("G", lifetimes as u64);
lifetime_depths.end += lifetimes;
self.binders.push(BinderLevel { lifetime_depths });
self = print_value(self, value.skip_binder())?;
self.binders.pop();
Ok(self)
}
}
impl Printer<'tcx> for SymbolMangler<'tcx> {
type Error = !;
type Path = Self;
type Region = Self;
type Type = Self;
type DynExistential = Self;
type Const = Self;
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn print_def_path(
mut self,
def_id: DefId,
substs: &'tcx [GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
if let Some(&i) = self.compress.as_ref().and_then(|c| c.paths.get(&(def_id, substs))) {
return self.print_backref(i);
}
let start = self.out.len();
self = self.default_print_def_path(def_id, substs)?;
// Only cache paths that do not refer to an enclosing
// binder (which would change depending on context).
if !substs.iter().any(|k| k.has_escaping_bound_vars()) {
if let Some(c) = &mut self.compress {
c.paths.insert((def_id, substs), start);
}
}
Ok(self)
}
fn print_impl_path(
self,
impl_def_id: DefId,
substs: &'tcx [GenericArg<'tcx>],
mut self_ty: Ty<'tcx>,
mut impl_trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
let key = self.tcx.def_key(impl_def_id);
let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id };
let mut param_env = self.tcx.param_env(impl_def_id)
.with_reveal_all();
if !substs.is_empty() {
param_env = param_env.subst(self.tcx, substs);
}
match &mut impl_trait_ref {
Some(impl_trait_ref) => {
assert_eq!(impl_trait_ref.self_ty(), self_ty);
*impl_trait_ref =
self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref);
self_ty = impl_trait_ref.self_ty();
}
None => {
self_ty = self.tcx.normalize_erasing_regions(param_env, self_ty);
}
}
self.path_append_impl(
|cx| cx.print_def_path(parent_def_id, &[]),
&key.disambiguated_data,
self_ty,
impl_trait_ref,
)
}
fn print_region(
mut self,
region: ty::Region<'_>,
) -> Result<Self::Region, Self::Error> {
let i = match *region {
// Erased lifetimes use the index 0, for a
// shorter mangling of `L_`.
ty::ReErased => 0,
// Late-bound lifetimes use indices starting at 1,
// see `BinderLevel` for more details.
ty::ReLateBound(debruijn, ty::BrAnon(i)) => {
// FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
assert_ne!(i, 0);
let i = i - 1;
let binder = &self.binders[self.binders.len() - 1 - debruijn.index()];
let depth = binder.lifetime_depths.start + i;
1 + (self.binders.last().unwrap().lifetime_depths.end - 1 - depth)
}
_ => bug!("symbol_names: non-erased region `{:?}`", region),
};
self.push("L");
self.push_integer_62(i as u64);
Ok(self)
}
fn print_type(
mut self,
ty: Ty<'tcx>,
) -> Result<Self::Type, Self::Error> {
// Basic types, never cached (single-character).
let basic_type = match ty.kind {
ty::Bool => "b",
ty::Char => "c",
ty::Str => "e",
ty::Tuple(_) if ty.is_unit() => "u",
ty::Int(IntTy::I8) => "a",
ty::Int(IntTy::I16) => "s",
ty::Int(IntTy::I32) => "l",
ty::Int(IntTy::I64) => "x",
ty::Int(IntTy::I128) => "n",
ty::Int(IntTy::Isize) => "i",
ty::Uint(UintTy::U8) => "h",
ty::Uint(UintTy::U16) => "t",
ty::Uint(UintTy::U32) => "m",
ty::Uint(UintTy::U64) => "y",
ty::Uint(UintTy::U128) => "o",
ty::Uint(UintTy::Usize) => "j",
ty::Float(FloatTy::F32) => "f",
ty::Float(FloatTy::F64) => "d",
ty::Never => "z",
// Placeholders (should be demangled as `_`).
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) |
ty::Infer(_) | ty::Error => "p",
_ => "",
};
if !basic_type.is_empty() {
self.push(basic_type);
return Ok(self);
}
if let Some(&i) = self.compress.as_ref().and_then(|c| c.types.get(&ty)) {
return self.print_backref(i);
}
let start = self.out.len();
match ty.kind {
// Basic types, handled above.
ty::Bool | ty::Char | ty::Str |
ty::Int(_) | ty::Uint(_) | ty::Float(_) |
ty::Never => unreachable!(),
ty::Tuple(_) if ty.is_unit() => unreachable!(),
// Placeholders, also handled as part of basic types.
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) |
ty::Infer(_) | ty::Error => unreachable!(),
ty::Ref(r, ty, mutbl) => {
self.push(match mutbl {
hir::MutImmutable => "R",
hir::MutMutable => "Q",
});
if *r != ty::ReErased {
self = r.print(self)?;
}
self = ty.print(self)?;
}
ty::RawPtr(mt) => {
self.push(match mt.mutbl {
hir::MutImmutable => "P",
hir::MutMutable => "O",
});
self = mt.ty.print(self)?;
}
ty::Array(ty, len) => {
self.push("A");
self = ty.print(self)?;
self = self.print_const(len)?;
}
ty::Slice(ty) => {
self.push("S");
self = ty.print(self)?;
}
ty::Tuple(tys) => {
self.push("T");
for ty in tys.iter().map(|k| k.expect_ty()) {
self = ty.print(self)?;
}
self.push("E");
}
// Mangle all nominal types as paths.
ty::Adt(&ty::AdtDef { did: def_id, .. }, substs) |
ty::FnDef(def_id, substs) |
ty::Opaque(def_id, substs) |
ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs }) |
ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs }) |
ty::Closure(def_id, substs) |
ty::Generator(def_id, substs, _) => {
self = self.print_def_path(def_id, substs)?;
}
ty::Foreign(def_id) => {
self = self.print_def_path(def_id, &[])?;
}
ty::FnPtr(sig) => {
self.push("F");
self = self.in_binder(&sig, |mut cx, sig| {
if sig.unsafety == hir::Unsafety::Unsafe {
cx.push("U");
}
match sig.abi {
Abi::Rust => {}
Abi::C => cx.push("KC"),
abi => {
cx.push("K");
let name = abi.name();
if name.contains('-') {
cx.push_ident(&name.replace('-', "_"));
} else {
cx.push_ident(name);
}
}
}
for &ty in sig.inputs() {
cx = ty.print(cx)?;
}
if sig.c_variadic {
cx.push("v");
}
cx.push("E");
sig.output().print(cx)
})?;
}
ty::Dynamic(predicates, r) => {
self.push("D");
self = self.in_binder(&predicates, |cx, predicates| {
cx.print_dyn_existential(predicates)
})?;
self = r.print(self)?;
}
ty::GeneratorWitness(_) => {
bug!("symbol_names: unexpected `GeneratorWitness`")
}
}
// Only cache types that do not refer to an enclosing
// binder (which would change depending on context).
if !ty.has_escaping_bound_vars() {
if let Some(c) = &mut self.compress {
c.types.insert(ty, start);
}
}
Ok(self)
}
fn print_dyn_existential(
mut self,
predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>,
) -> Result<Self::DynExistential, Self::Error> {
for predicate in predicates {
match *predicate {
ty::ExistentialPredicate::Trait(trait_ref) => {
// Use a type that can't appear in defaults of type parameters.
let dummy_self = self.tcx.mk_ty_infer(ty::FreshTy(0));
let trait_ref = trait_ref.with_self_ty(self.tcx, dummy_self);
self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
}
ty::ExistentialPredicate::Projection(projection) => {
let name = self.tcx.associated_item(projection.item_def_id).ident;
self.push("p");
self.push_ident(&name.as_str());
self = projection.ty.print(self)?;
}
ty::ExistentialPredicate::AutoTrait(def_id) => {
self = self.print_def_path(def_id, &[])?;
}
}
}
self.push("E");
Ok(self)
}
fn print_const(
mut self,
ct: &'tcx ty::Const<'tcx>,
) -> Result<Self::Const, Self::Error> {
if let Some(&i) = self.compress.as_ref().and_then(|c| c.consts.get(&ct)) {
return self.print_backref(i);
}
let start = self.out.len();
match ct.ty.kind {
ty::Uint(_) => {}
_ => {
bug!("symbol_names: unsupported constant of type `{}` ({:?})",
ct.ty, ct);
}
}
self = ct.ty.print(self)?;
if let Some(bits) = ct.try_eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ct.ty) {
let _ = write!(self.out, "{:x}_", bits);
} else {
// NOTE(eddyb) despite having the path, we need to
// encode a placeholder, as the path could refer
// back to e.g. an `impl` using the constant.
self.push("p");
}
// Only cache consts that do not refer to an enclosing
// binder (which would change depending on context).
if !ct.has_escaping_bound_vars() {
if let Some(c) = &mut self.compress {
c.consts.insert(ct, start);
}
}
Ok(self)
}
fn path_crate(
mut self,
cnum: CrateNum,
) -> Result<Self::Path, Self::Error> {
self.push("C");
let fingerprint = self.tcx.crate_disambiguator(cnum).to_fingerprint();
self.push_disambiguator(fingerprint.to_smaller_hash());
let name = self.tcx.original_crate_name(cnum).as_str();
self.push_ident(&name);
Ok(self)
}
fn path_qualified(
mut self,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
assert!(trait_ref.is_some());
let trait_ref = trait_ref.unwrap();
self.push("Y");
self = self_ty.print(self)?;
self.print_def_path(trait_ref.def_id, trait_ref.substs)
}
fn path_append_impl(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
disambiguated_data: &DisambiguatedDefPathData,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<Self::Path, Self::Error> {
self.push(match trait_ref {
Some(_) => "X",
None => "M",
});
self.push_disambiguator(disambiguated_data.disambiguator as u64);
self = print_prefix(self)?;
self = self_ty.print(self)?;
if let Some(trait_ref) = trait_ref {
self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
}
Ok(self)
}
fn path_append(
self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
disambiguated_data: &DisambiguatedDefPathData,
) -> Result<Self::Path, Self::Error> {
let ns = match disambiguated_data.data {
// Uppercase categories are more stable than lowercase ones.
DefPathData::TypeNs(_) => 't',
DefPathData::ValueNs(_) => 'v',
DefPathData::ClosureExpr => 'C',
DefPathData::Ctor => 'c',
DefPathData::AnonConst => 'k',
DefPathData::ImplTrait => 'i',
// These should never show up as `path_append` arguments.
DefPathData::CrateRoot
| DefPathData::Misc
| DefPathData::Impl
| DefPathData::MacroNs(_)
| DefPathData::LifetimeNs(_)
| DefPathData::GlobalMetaData(_) => {
bug!("symbol_names: unexpected DefPathData: {:?}", disambiguated_data.data)
}
};
let name = disambiguated_data.data.get_opt_name().map(|s| s.as_str());
self.path_append_ns(
print_prefix,
ns,
disambiguated_data.disambiguator as u64,
name.as_ref().map_or("", |s| &s[..])
)
}
fn path_generic_args(
mut self,
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
args: &[GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
// Don't print any regions if they're all erased.
let print_regions = args.iter().any(|arg| {
match arg.unpack() {
GenericArgKind::Lifetime(r) => *r != ty::ReErased,
_ => false,
}
});
let args = args.iter().cloned().filter(|arg| {
match arg.unpack() {
GenericArgKind::Lifetime(_) => print_regions,
_ => true,
}
});
if args.clone().next().is_none() {
return print_prefix(self);
}
self.push("I");
self = print_prefix(self)?;
for arg in args {
match arg.unpack() {
GenericArgKind::Lifetime(lt) => {
self = lt.print(self)?;
}
GenericArgKind::Type(ty) => {
self = ty.print(self)?;
}
GenericArgKind::Const(c) => {
self.push("K");
// FIXME(const_generics) implement `ty::print::Print` on `ty::Const`.
// self = c.print(self)?;
self = self.print_const(c)?;
}
}
}
self.push("E");
Ok(self)
}
}
| {
*c = b'_';
} | conditional_block |
executor.rs | //! The executor runs on all nodes and is resonsible for reconciling the requested state (from the
//! cluster's master), and the locally running containers.
//!
//! When a new state is received, it queries Docker Engine to see if:
//! 1) any scheduled containers are not currently running
//! 2) and running containers aren't still scheduled
//!
//! This actor is not responsible for modifying the cluster state. For that, see Scheduler.
use crate::scheduler::*;
use actix::fut::{ActorFuture, WrapFuture};
use actix::prelude::*;
use actix::registry::SystemService;
use actix_web::client;
use failure::{err_msg, Error};
use futures::future::{err, join_all, ok, Future};
use futures::stream::Stream;
use shiplift::builder::*;
use shiplift::Docker;
use std::collections::HashMap;
use std::fmt::Debug;
use std::net::{SocketAddr, ToSocketAddrs};
use sysinfo::{ProcessorExt, SystemExt};
// Labels to apply to containers
const LABEL_NODE_ID: &str = "com.aluminous.cacophony.node-id";
const LABEL_ALLOCATION_ID: &str = "com.aluminous.cacophony.allocation-id";
/// Details about the current resource usage of a node
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct NodeResources {
pub total_memory: u64,
pub used_memory: u64,
pub cpu_usage: Vec<f32>,
}
/// Updates the local state (running containers) to match the cluster state.
#[derive(Default)]
pub struct | {
state: ClusterState,
node_id: NodeId,
system: sysinfo::System,
}
impl Executor {
fn join<S: ToSocketAddrs>(
&mut self,
join_host: S,
local_port: u16,
) -> impl Future<Item = (), Error = Error> {
let join_host = join_host.to_socket_addrs().unwrap().next().unwrap();
info!("Attempting to join cluster at {}", join_host);
client::post(format!(
"http://{}/cluster/node/{}",
join_host, self.node_id
))
.json(local_port)
.unwrap()
.send()
.from_err()
.and_then(|res| {
if res.status().is_success() {
info!("Announced our presence to cluster.");
Ok(())
} else {
Err(format_err!("Failed to join cluster: {:?}", res))
}
})
}
/// Update currently running containers to match allocated services.
fn update_state(
&mut self,
state: ClusterState,
) -> impl ActorFuture<Item = (), Error = (), Actor = Self> {
self.state = state;
let docker = Docker::new();
docker
.containers()
.list(
// Get all the containers created by this node
&ContainerListOptions::builder()
.filter(vec![ContainerFilter::Label(
String::from(LABEL_NODE_ID),
self.node_id.clone(),
)])
.all()
.build(),
)
.map_err(|_| ())
.into_actor(self)
.and_then(move |containers, executor, _ctx| {
let desired_allocs: HashMap<&str, &Allocation> = executor
.state
.allocations
.values()
.filter(|a| a.node_id == executor.node_id)
.map(|a| (&*a.allocation_id, a))
.collect();
let current_containers: HashMap<&str, &str> = containers
.iter()
.map(|c| (&*c.labels[LABEL_ALLOCATION_ID], &*c.id))
.collect();
// Remove containers which exist but aren't part of an allocataion
let remove_fut: Vec<_> = containers
.iter()
.filter(|container| {
!desired_allocs.contains_key(&*container.labels[LABEL_ALLOCATION_ID])
})
.map(|container| {
docker
.containers()
.get(&container.id)
.remove(RmContainerOptions::builder().force(true).build())
.from_err()
})
.collect();
// Create containers which are allocated but aren't known to Docker
let create_fut: Vec<_> = desired_allocs
.iter()
.filter(|(id, _)| !current_containers.contains_key(*id))
.map(|(_, alloc)| executor.create_container(alloc))
.collect();
info!(
"Updating running containers: {} -> {} (create {}, kill {})",
containers.len(),
desired_allocs.len(),
create_fut.len(),
remove_fut.len()
);
join_all(create_fut)
.join(join_all(remove_fut))
.then(|res| check_err("Execute containers", res))
.into_actor(executor)
})
}
/// Create and start a container for an allocation. Pulls the image if needed.
fn create_container(&self, alloc: &Allocation) -> Box<Future<Item = (), Error = Error>> {
let docker = Docker::new();
let job_services = &self.state.jobs.get(&alloc.job_id);
let service = match job_services.and_then(|j| j.services.get(&alloc.service_name)) {
Some(service) => service,
None => {
return Box::new(err(format_err!(
"Service '{}' '{}' allocated but not defined",
alloc.job_id,
alloc.service_name
)));
}
};
let image = if service.image.contains(':') {
service.image.clone()
} else {
format!("{}:latest", service.image)
};
let mut labels = HashMap::new();
labels.insert(LABEL_ALLOCATION_ID, &*alloc.allocation_id);
labels.insert(LABEL_NODE_ID, &self.node_id);
let pull_opts = PullOptions::builder().image(&*image).build();
let create_opts = service
.build_container_options()
.unwrap()
.labels(&labels)
.restart_policy("on-failure", 4)
.build();
Box::new(
docker
.images()
.get(&image)
.inspect()
.map(move |_| info!("Image already pulled: {:?}", image))
.or_else(move |_| {
docker.images().pull(&pull_opts).for_each(|p| {
debug!("Pull: {:?}", p);
Ok(())
})
})
.and_then(move |_| Docker::new().containers().create(&create_opts))
.and_then(|res| Docker::new().containers().get(&*res.id).start())
.from_err(),
)
}
}
impl Actor for Executor {
type Context = Context<Self>;
}
impl Supervised for Executor {}
impl SystemService for Executor {}
/// Fire-and-forget command messages for Executor
#[derive(Clone, Debug)]
pub enum ExecutorCommand {
UpdateState(ClusterState),
JoinCluster {
local_port: u16,
join_addr: SocketAddr,
},
SetNodeId(NodeId),
}
impl Message for ExecutorCommand {
type Result = Result<(), Error>;
}
impl Handler<ExecutorCommand> for Executor {
type Result = ResponseFuture<(), Error>;
fn handle(&mut self, cmd: ExecutorCommand, ctx: &mut Context<Self>) -> Self::Result {
debug!("Executor handling command: {:?}", cmd);
match cmd {
ExecutorCommand::UpdateState(state) => {
ctx.spawn(self.update_state(state));
Box::new(ok(()))
}
ExecutorCommand::JoinCluster {
local_port,
join_addr,
} => Box::new(self.join(join_addr, local_port)),
ExecutorCommand::SetNodeId(node_id) => {
self.node_id = node_id;
Box::new(ok(()))
}
}
}
}
/// Get the address of the master node (if this node is not master)
pub struct GetRemoteMaster;
impl Message for GetRemoteMaster {
type Result = Result<Option<SocketAddr>, Error>;
}
impl Handler<GetRemoteMaster> for Executor {
type Result = Result<Option<SocketAddr>, Error>;
fn handle(&mut self, _: GetRemoteMaster, _: &mut Context<Self>) -> Self::Result {
match self.state.master_node() {
Some(master) => {
if master.node_id == self.node_id {
Ok(None)
} else {
Ok(Some(master.cluster_address))
}
}
None => Err(err_msg("Master unknown.")),
}
}
}
/// Message requesting resource usage of the local node
pub struct GetNodeResources;
impl Message for GetNodeResources {
type Result = Result<NodeResources, Error>;
}
impl Handler<GetNodeResources> for Executor {
type Result = Result<NodeResources, Error>;
fn handle(&mut self, _: GetNodeResources, _: &mut Context<Self>) -> Self::Result {
self.system.refresh_system();
Ok(NodeResources {
total_memory: self.system.get_total_memory(),
used_memory: self.system.get_used_memory(),
cpu_usage: self.system.get_processor_list()[1..]
.iter()
.map(|p| p.get_cpu_usage())
.collect(),
})
}
}
/// Logs the result of async fire-and-forget futures.
pub fn check_err<T, U>(msg: &str, res: Result<T, U>) -> impl Future<Item = (), Error = ()>
where
T: Debug,
U: Debug,
{
match res {
Ok(ok_res) => debug!("{}: {:?}", msg, ok_res),
Err(err_res) => error!("{}: {:?}", msg, err_res),
};
ok(())
}
#[cfg(test)]
mod test {
use crate::executor::*;
use crate::test_support::*;
#[test]
fn test_node_resources() {
with_node("127.0.0.1:9001", || {
Executor::from_registry()
.send(GetNodeResources)
.and_then(|res| {
let resources = res.expect("Get resources failed");
assert!(resources.total_memory - resources.used_memory > 0);
assert!(!resources.cpu_usage.is_empty());
Ok(())
})
});
}
}
| Executor | identifier_name |
executor.rs | //! The executor runs on all nodes and is resonsible for reconciling the requested state (from the
//! cluster's master), and the locally running containers.
//!
//! When a new state is received, it queries Docker Engine to see if:
//! 1) any scheduled containers are not currently running
//! 2) and running containers aren't still scheduled
//!
//! This actor is not responsible for modifying the cluster state. For that, see Scheduler.
use crate::scheduler::*;
use actix::fut::{ActorFuture, WrapFuture};
use actix::prelude::*;
use actix::registry::SystemService;
use actix_web::client;
use failure::{err_msg, Error};
use futures::future::{err, join_all, ok, Future};
use futures::stream::Stream;
use shiplift::builder::*;
use shiplift::Docker;
use std::collections::HashMap;
use std::fmt::Debug;
use std::net::{SocketAddr, ToSocketAddrs};
use sysinfo::{ProcessorExt, SystemExt};
// Labels to apply to containers
const LABEL_NODE_ID: &str = "com.aluminous.cacophony.node-id";
const LABEL_ALLOCATION_ID: &str = "com.aluminous.cacophony.allocation-id";
/// Details about the current resource usage of a node
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct NodeResources {
pub total_memory: u64,
pub used_memory: u64,
pub cpu_usage: Vec<f32>,
}
/// Updates the local state (running containers) to match the cluster state.
#[derive(Default)]
pub struct Executor {
state: ClusterState,
node_id: NodeId,
system: sysinfo::System,
}
impl Executor {
fn join<S: ToSocketAddrs>(
&mut self,
join_host: S,
local_port: u16,
) -> impl Future<Item = (), Error = Error> {
let join_host = join_host.to_socket_addrs().unwrap().next().unwrap();
info!("Attempting to join cluster at {}", join_host);
client::post(format!(
"http://{}/cluster/node/{}",
join_host, self.node_id
))
.json(local_port)
.unwrap()
.send()
.from_err()
.and_then(|res| {
if res.status().is_success() {
info!("Announced our presence to cluster.");
Ok(())
} else {
Err(format_err!("Failed to join cluster: {:?}", res))
}
})
}
/// Update currently running containers to match allocated services.
fn update_state(
&mut self,
state: ClusterState,
) -> impl ActorFuture<Item = (), Error = (), Actor = Self> {
self.state = state;
let docker = Docker::new();
docker
.containers()
.list(
// Get all the containers created by this node
&ContainerListOptions::builder()
.filter(vec![ContainerFilter::Label(
String::from(LABEL_NODE_ID),
self.node_id.clone(),
)])
.all()
.build(),
)
.map_err(|_| ())
.into_actor(self)
.and_then(move |containers, executor, _ctx| {
let desired_allocs: HashMap<&str, &Allocation> = executor
.state
.allocations
.values()
.filter(|a| a.node_id == executor.node_id)
.map(|a| (&*a.allocation_id, a))
.collect();
let current_containers: HashMap<&str, &str> = containers
.iter()
.map(|c| (&*c.labels[LABEL_ALLOCATION_ID], &*c.id))
.collect();
// Remove containers which exist but aren't part of an allocataion
let remove_fut: Vec<_> = containers
.iter()
.filter(|container| {
!desired_allocs.contains_key(&*container.labels[LABEL_ALLOCATION_ID])
})
.map(|container| {
docker
.containers()
.get(&container.id)
.remove(RmContainerOptions::builder().force(true).build())
.from_err()
})
.collect();
// Create containers which are allocated but aren't known to Docker
let create_fut: Vec<_> = desired_allocs
.iter()
.filter(|(id, _)| !current_containers.contains_key(*id))
.map(|(_, alloc)| executor.create_container(alloc))
.collect();
info!(
"Updating running containers: {} -> {} (create {}, kill {})",
containers.len(),
desired_allocs.len(),
create_fut.len(),
remove_fut.len()
);
join_all(create_fut)
.join(join_all(remove_fut))
.then(|res| check_err("Execute containers", res))
.into_actor(executor)
})
}
/// Create and start a container for an allocation. Pulls the image if needed.
fn create_container(&self, alloc: &Allocation) -> Box<Future<Item = (), Error = Error>> {
let docker = Docker::new();
let job_services = &self.state.jobs.get(&alloc.job_id);
let service = match job_services.and_then(|j| j.services.get(&alloc.service_name)) {
Some(service) => service,
None => {
return Box::new(err(format_err!(
"Service '{}' '{}' allocated but not defined",
alloc.job_id,
alloc.service_name
)));
}
};
let image = if service.image.contains(':') {
service.image.clone()
} else {
format!("{}:latest", service.image)
};
let mut labels = HashMap::new();
labels.insert(LABEL_ALLOCATION_ID, &*alloc.allocation_id);
labels.insert(LABEL_NODE_ID, &self.node_id);
let pull_opts = PullOptions::builder().image(&*image).build();
let create_opts = service
.build_container_options()
.unwrap()
.labels(&labels)
.restart_policy("on-failure", 4)
.build();
Box::new(
docker
.images()
.get(&image)
.inspect()
.map(move |_| info!("Image already pulled: {:?}", image))
.or_else(move |_| {
docker.images().pull(&pull_opts).for_each(|p| {
debug!("Pull: {:?}", p);
Ok(())
})
})
.and_then(move |_| Docker::new().containers().create(&create_opts))
.and_then(|res| Docker::new().containers().get(&*res.id).start())
.from_err(),
)
}
}
impl Actor for Executor {
type Context = Context<Self>;
}
impl Supervised for Executor {}
impl SystemService for Executor {}
/// Fire-and-forget command messages for Executor
#[derive(Clone, Debug)]
pub enum ExecutorCommand {
UpdateState(ClusterState),
JoinCluster {
local_port: u16,
join_addr: SocketAddr,
},
SetNodeId(NodeId),
}
impl Message for ExecutorCommand {
type Result = Result<(), Error>;
}
impl Handler<ExecutorCommand> for Executor {
type Result = ResponseFuture<(), Error>;
fn handle(&mut self, cmd: ExecutorCommand, ctx: &mut Context<Self>) -> Self::Result {
debug!("Executor handling command: {:?}", cmd);
match cmd {
ExecutorCommand::UpdateState(state) => {
ctx.spawn(self.update_state(state));
Box::new(ok(()))
}
ExecutorCommand::JoinCluster {
local_port,
join_addr,
} => Box::new(self.join(join_addr, local_port)),
ExecutorCommand::SetNodeId(node_id) => {
self.node_id = node_id;
Box::new(ok(()))
}
}
}
}
/// Get the address of the master node (if this node is not master)
pub struct GetRemoteMaster;
impl Message for GetRemoteMaster {
type Result = Result<Option<SocketAddr>, Error>;
}
impl Handler<GetRemoteMaster> for Executor {
type Result = Result<Option<SocketAddr>, Error>;
fn handle(&mut self, _: GetRemoteMaster, _: &mut Context<Self>) -> Self::Result {
match self.state.master_node() {
Some(master) => {
if master.node_id == self.node_id {
Ok(None)
} else |
}
None => Err(err_msg("Master unknown.")),
}
}
}
/// Message requesting resource usage of the local node
pub struct GetNodeResources;
impl Message for GetNodeResources {
type Result = Result<NodeResources, Error>;
}
impl Handler<GetNodeResources> for Executor {
type Result = Result<NodeResources, Error>;
fn handle(&mut self, _: GetNodeResources, _: &mut Context<Self>) -> Self::Result {
self.system.refresh_system();
Ok(NodeResources {
total_memory: self.system.get_total_memory(),
used_memory: self.system.get_used_memory(),
cpu_usage: self.system.get_processor_list()[1..]
.iter()
.map(|p| p.get_cpu_usage())
.collect(),
})
}
}
/// Logs the result of async fire-and-forget futures.
pub fn check_err<T, U>(msg: &str, res: Result<T, U>) -> impl Future<Item = (), Error = ()>
where
T: Debug,
U: Debug,
{
match res {
Ok(ok_res) => debug!("{}: {:?}", msg, ok_res),
Err(err_res) => error!("{}: {:?}", msg, err_res),
};
ok(())
}
#[cfg(test)]
mod test {
use crate::executor::*;
use crate::test_support::*;
#[test]
fn test_node_resources() {
with_node("127.0.0.1:9001", || {
Executor::from_registry()
.send(GetNodeResources)
.and_then(|res| {
let resources = res.expect("Get resources failed");
assert!(resources.total_memory - resources.used_memory > 0);
assert!(!resources.cpu_usage.is_empty());
Ok(())
})
});
}
}
| {
Ok(Some(master.cluster_address))
} | conditional_block |
executor.rs | //! The executor runs on all nodes and is resonsible for reconciling the requested state (from the
//! cluster's master), and the locally running containers.
//!
//! When a new state is received, it queries Docker Engine to see if:
//! 1) any scheduled containers are not currently running
//! 2) and running containers aren't still scheduled
//!
//! This actor is not responsible for modifying the cluster state. For that, see Scheduler.
use crate::scheduler::*;
use actix::fut::{ActorFuture, WrapFuture};
use actix::prelude::*;
use actix::registry::SystemService;
use actix_web::client;
use failure::{err_msg, Error};
use futures::future::{err, join_all, ok, Future};
use futures::stream::Stream;
use shiplift::builder::*;
use shiplift::Docker;
use std::collections::HashMap;
use std::fmt::Debug;
use std::net::{SocketAddr, ToSocketAddrs};
use sysinfo::{ProcessorExt, SystemExt};
// Labels to apply to containers
const LABEL_NODE_ID: &str = "com.aluminous.cacophony.node-id";
const LABEL_ALLOCATION_ID: &str = "com.aluminous.cacophony.allocation-id";
/// Details about the current resource usage of a node
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct NodeResources {
pub total_memory: u64,
pub used_memory: u64,
pub cpu_usage: Vec<f32>,
}
/// Updates the local state (running containers) to match the cluster state.
#[derive(Default)]
pub struct Executor {
state: ClusterState,
node_id: NodeId,
system: sysinfo::System,
}
impl Executor {
fn join<S: ToSocketAddrs>(
&mut self,
join_host: S,
local_port: u16,
) -> impl Future<Item = (), Error = Error> {
let join_host = join_host.to_socket_addrs().unwrap().next().unwrap();
info!("Attempting to join cluster at {}", join_host);
client::post(format!(
"http://{}/cluster/node/{}",
join_host, self.node_id
))
.json(local_port)
.unwrap()
.send()
.from_err()
.and_then(|res| {
if res.status().is_success() {
info!("Announced our presence to cluster.");
Ok(())
} else {
Err(format_err!("Failed to join cluster: {:?}", res))
}
})
}
/// Update currently running containers to match allocated services.
fn update_state(
&mut self,
state: ClusterState,
) -> impl ActorFuture<Item = (), Error = (), Actor = Self> {
self.state = state;
let docker = Docker::new();
docker
.containers()
.list(
// Get all the containers created by this node
&ContainerListOptions::builder()
.filter(vec![ContainerFilter::Label(
String::from(LABEL_NODE_ID),
self.node_id.clone(),
)])
.all()
.build(),
)
.map_err(|_| ())
.into_actor(self)
.and_then(move |containers, executor, _ctx| {
let desired_allocs: HashMap<&str, &Allocation> = executor
.state
.allocations
.values()
.filter(|a| a.node_id == executor.node_id)
.map(|a| (&*a.allocation_id, a))
.collect();
let current_containers: HashMap<&str, &str> = containers
.iter()
.map(|c| (&*c.labels[LABEL_ALLOCATION_ID], &*c.id))
.collect();
// Remove containers which exist but aren't part of an allocataion
let remove_fut: Vec<_> = containers
.iter()
.filter(|container| {
!desired_allocs.contains_key(&*container.labels[LABEL_ALLOCATION_ID])
})
.map(|container| {
docker
.containers()
.get(&container.id)
.remove(RmContainerOptions::builder().force(true).build())
.from_err()
})
.collect();
// Create containers which are allocated but aren't known to Docker
let create_fut: Vec<_> = desired_allocs
.iter()
.filter(|(id, _)| !current_containers.contains_key(*id))
.map(|(_, alloc)| executor.create_container(alloc))
.collect();
info!(
"Updating running containers: {} -> {} (create {}, kill {})",
containers.len(),
desired_allocs.len(),
create_fut.len(),
remove_fut.len()
);
join_all(create_fut)
.join(join_all(remove_fut))
.then(|res| check_err("Execute containers", res))
.into_actor(executor)
})
}
/// Create and start a container for an allocation. Pulls the image if needed.
fn create_container(&self, alloc: &Allocation) -> Box<Future<Item = (), Error = Error>> {
let docker = Docker::new();
let job_services = &self.state.jobs.get(&alloc.job_id);
let service = match job_services.and_then(|j| j.services.get(&alloc.service_name)) {
Some(service) => service,
None => {
return Box::new(err(format_err!(
"Service '{}' '{}' allocated but not defined",
alloc.job_id,
alloc.service_name
)));
}
};
let image = if service.image.contains(':') {
service.image.clone()
} else {
format!("{}:latest", service.image)
};
let mut labels = HashMap::new();
labels.insert(LABEL_ALLOCATION_ID, &*alloc.allocation_id);
labels.insert(LABEL_NODE_ID, &self.node_id);
let pull_opts = PullOptions::builder().image(&*image).build();
let create_opts = service
.build_container_options()
.unwrap()
.labels(&labels)
.restart_policy("on-failure", 4)
.build();
Box::new(
docker
.images()
.get(&image) | .map(move |_| info!("Image already pulled: {:?}", image))
.or_else(move |_| {
docker.images().pull(&pull_opts).for_each(|p| {
debug!("Pull: {:?}", p);
Ok(())
})
})
.and_then(move |_| Docker::new().containers().create(&create_opts))
.and_then(|res| Docker::new().containers().get(&*res.id).start())
.from_err(),
)
}
}
impl Actor for Executor {
type Context = Context<Self>;
}
impl Supervised for Executor {}
impl SystemService for Executor {}
/// Fire-and-forget command messages for Executor
#[derive(Clone, Debug)]
pub enum ExecutorCommand {
UpdateState(ClusterState),
JoinCluster {
local_port: u16,
join_addr: SocketAddr,
},
SetNodeId(NodeId),
}
impl Message for ExecutorCommand {
type Result = Result<(), Error>;
}
impl Handler<ExecutorCommand> for Executor {
type Result = ResponseFuture<(), Error>;
fn handle(&mut self, cmd: ExecutorCommand, ctx: &mut Context<Self>) -> Self::Result {
debug!("Executor handling command: {:?}", cmd);
match cmd {
ExecutorCommand::UpdateState(state) => {
ctx.spawn(self.update_state(state));
Box::new(ok(()))
}
ExecutorCommand::JoinCluster {
local_port,
join_addr,
} => Box::new(self.join(join_addr, local_port)),
ExecutorCommand::SetNodeId(node_id) => {
self.node_id = node_id;
Box::new(ok(()))
}
}
}
}
/// Get the address of the master node (if this node is not master)
pub struct GetRemoteMaster;
impl Message for GetRemoteMaster {
type Result = Result<Option<SocketAddr>, Error>;
}
impl Handler<GetRemoteMaster> for Executor {
type Result = Result<Option<SocketAddr>, Error>;
fn handle(&mut self, _: GetRemoteMaster, _: &mut Context<Self>) -> Self::Result {
match self.state.master_node() {
Some(master) => {
if master.node_id == self.node_id {
Ok(None)
} else {
Ok(Some(master.cluster_address))
}
}
None => Err(err_msg("Master unknown.")),
}
}
}
/// Message requesting resource usage of the local node
pub struct GetNodeResources;
impl Message for GetNodeResources {
type Result = Result<NodeResources, Error>;
}
impl Handler<GetNodeResources> for Executor {
type Result = Result<NodeResources, Error>;
fn handle(&mut self, _: GetNodeResources, _: &mut Context<Self>) -> Self::Result {
self.system.refresh_system();
Ok(NodeResources {
total_memory: self.system.get_total_memory(),
used_memory: self.system.get_used_memory(),
cpu_usage: self.system.get_processor_list()[1..]
.iter()
.map(|p| p.get_cpu_usage())
.collect(),
})
}
}
/// Logs the result of async fire-and-forget futures.
pub fn check_err<T, U>(msg: &str, res: Result<T, U>) -> impl Future<Item = (), Error = ()>
where
T: Debug,
U: Debug,
{
match res {
Ok(ok_res) => debug!("{}: {:?}", msg, ok_res),
Err(err_res) => error!("{}: {:?}", msg, err_res),
};
ok(())
}
#[cfg(test)]
mod test {
use crate::executor::*;
use crate::test_support::*;
#[test]
fn test_node_resources() {
with_node("127.0.0.1:9001", || {
Executor::from_registry()
.send(GetNodeResources)
.and_then(|res| {
let resources = res.expect("Get resources failed");
assert!(resources.total_memory - resources.used_memory > 0);
assert!(!resources.cpu_usage.is_empty());
Ok(())
})
});
}
} | .inspect() | random_line_split |
core.js | const DB = require("./database.js");
//var getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
//const Rooms = require("./roomsModel.js");
var CORE = {
num_clients: 0, //amount of connected clients
chatRooms: [], //has each room and the clients inside each
DB: require("./database.js"), //get the abstracted database
init: function() {
console.log("Inizialising CORE...");
},
onClientConnect: function(connection) {
connection.username = false; //starts as false until user logIn
let rooms = [];
CORE.chatRooms.forEach(room => {
rooms.push(room.roomName);
});
msg = {
type: "rooms",
rooms: rooms
}
connection.sendUTF(JSON.stringify(msg));
this.num_clients++;
},
onNewMessage: async function(connection, message) {
//console.log(` - ${connection.username || "'Unidentified User'"} sent us: ${JSON.stringify(message.utf8Data)}`);
let msg = JSON.parse(message.utf8Data);
switch (msg.type) {
case 'aut': //autentication to enter a room
// if (connection.username) //case if was in a different room before
// leaveRoom(connection.username, connection.roomName, connection.roomIndex, connection); **no hace falta si no cambia de sala
let validUser = await this.onLogIn(msg.username, msg.password);
let alreadyIn = false; //token true if username already inside any chat room
let otherClientsIn = []; //get the other users already in room
this.chatRooms.forEach(room => {
if (room) {
room.clients.forEach(client => {
if (client.username == msg.username) {
alreadyIn = true;
console.log(" > but the same username already is online.");
}
});
}
if (room.roomName == msg.room) {
room.clients.forEach(client => {
otherClientsIn.push({ username: client.username, cData: client.cData, avatar: client.avatar }); //save every client info
});
}
});
let validClient = false;
if ((otherClientsIn.length == 0 && msg.clientType == 'DJ') || (otherClientsIn.length > 0 && msg.clientType == 'partier')) {
validClient = true;
}
let validation = { type: "login", validConnection: (validUser != undefined) && !alreadyIn && validClient, validUser: validUser,
alreadyIn: alreadyIn, pos: validUser ? validUser.position : false, clients: otherClientsIn, avatar: validUser ? validUser.avatar : false, }; //sends if was a valid user
connection.sendUTF(JSON.stringify(validation)); //debugg **
if (validUser != undefined && !alreadyIn && validClient) {
setRoom(msg.room, msg.username, connection, validUser.avatar, validUser.position, msg.clientType);
let info = infoMsg(msg.username, `joined the room as "${msg.clientType}"`, true, validUser.position);
broadcastFromUser(info, connection);
}
break;
case 'create':
let validCreation = await this.onSignIn(msg.username, msg.password, msg.avatar);
console.log(validCreation);
let creation = { type: "signin", content: validCreation };
connection.sendUTF(JSON.stringify(creation));
break;
case 'text': //on text message
broadcastFromUser(msg, connection);
break;
case 'update': //updating data
let clients = this.chatRooms[connection.roomIndex].clients;
if (clients.length)
clients.forEach(client => {
if (client.connection == connection) {
client.pos = msg.cData.pos;
client.cData = msg.cData;
}
});
break;
case 'setsettings':
let validIdentification = await this.onLogIn(connection.username, msg.password); //not login but the same confirmation
let usernameTaken = await DB.findByUsername(msg.update.username); //check if already taken
let canUpdate = { type: "validUpdate", content: false};
if (validIdentification && (!usernameTaken.length || !msg.update.username) ) { //if valid old password and not taking a new username already taken
DB.updateUserData(validIdentification.username, false, msg.update.avatar, msg.update.password, msg.update.username);
updateInfo(connection, msg.update.username, msg.update.avatar); //pass the elements to update if unidentified checked inside
console.log(`User "${validIdentification.username}" updated: ${JSON.stringify(msg.update)}`);
canUpdate.content = true;
//message to broadcast other users with the updates
let userUpdate = { type: "userUpdate", oldUsername: validIdentification.username, updates: {username: msg.update.username, avatar: msg.update.avatar} };
broadcastFromUser(userUpdate, connection);
}
connection.sendUTF(JSON.stringify(canUpdate));
break;
case 'info-music':
broadcastFromUser(msg, connection);
console.log(msg); //music info mesage
break;
/* case 'room-info':
let clients = CORE.chatRooms[connection.roomIndex].clients; //clients in his same room
msg={
type: "list-clients",
content: clients
}
connection.sendUTF(JSON.stringify(msg))
break; */
default:
break;
}
},
onClientDisconnect: function(connection) {
if (connection.username)
leaveRoom(connection.username, connection.roomName, connection.roomIndex, connection);
this.num_clients--;
},
onLogIn: async function(username, password) {
let validUser = await this.DB.verifyLogIn(username, password); //needs await-async to handle the pending promise (also out if not wrong/ may be )
//console.log(validUser); //debugg see the valid user
return validUser; //return the user found to be valid (or none)
},
onSignIn: async function(username, password, avatar) {
let wasCreated = await this.DB.createUser(username, password, avatar);
console.log(wasCreated);
if (wasCreated) {
console.log(`The Client "${username}" was created`);
return true;
} else
return false;
}
};
module.exports = CORE;
// FUNCTIONS
function updateInfo(connection, username, avatar) {
CORE.chatRooms[connection.roomIndex].clients.forEach(client => {
if (client.connection == connection) {
if (username != undefined) {
client.username = connection.username = username;
}
if (avatar != undefined)
client.avatar = avatar;
}
});
}
function setRoom(room, username, connection, avatar, position, clientType) { //sets a connected user to a room
let roomIndex = CORE.chatRooms.findIndex(element => element.roomName == room);
let index = 0; //basic index if is a new room
let pos = position ? position : [0, 0.1, 0];
let cData = {};
if (roomIndex == -1) {
roomIndex = CORE.chatRooms.length; //if is a new room added at the end
CORE.chatRooms.push({
roomName: room,
clients: [{ connection, username, avatar, pos, cData, clientType }]
});
} else {
index = CORE.chatRooms[roomIndex].clients.push({ connection, username, avatar, pos, cData, clientType }) - 1;
}
//save the data used on connection
connection.username = username;
connection.roomName = room;
connection.roomIndex = roomIndex;
console.log(`<- User "${connection.username}" [avatar: ${avatar}] connected to room "${connection.roomName}" [${index}, ${connection.roomIndex}] in the position "${pos}"`);
}
async function leaveRoom(username, roomName, roomIndex, connection) { // remove user from it room connected clients
let userIndex = -1;
let i = 0;
let clients = CORE.chatRooms[roomIndex].clients;
clients.forEach(element => {
if (element.connection == connection)
userIndex = i;
i++;
//console.log(element.connection == connection, userIndex); //see the actual user leaving connection
});
if (userIndex != -1) {
if (DB.updateUserData(username, position = clients[userIndex].pos)) { //on leave we update the data
console.log(` on leave, user "${username}" position was updated to ${position}.`);
}
let outClientType = clients[userIndex].clientType;
await clients.splice(userIndex, 1);
console.log(`-> User "${username}" disconected from room "${roomName}".`);
if(clients.length && outClientType == "DJ") { //if there is clients remaining in the room
clients[userIndex].clientType = "DJ"; //update next client after old DJ as the actual one
broadcastFromUser({type: "newDJ", username: clients[userIndex].username}, connection);
console.log(` - User "${clients[userIndex].username}" is the new DJ.`);
}
let info = infoMsg(username, "left the room.", false);
broadcastFromUser(info, connection);
}
}
function infoMsg(user, text, inRoom, position) { //username, message, bool is in room
let pos = {};
if (position) {
pos = position
}
let info = { type: "info", username: user, content: text, exists: inRoom, pos: pos };
return info;
}
function broadcastFromUser(msg, connection) { //broadcas all people except the incoming user
let clients = CORE.chatRooms[connection.roomIndex].clients; //clients in his same room
//var sendPos = clients.findIndex(client => client.connection == connection); //sender position
if (msg.type == "text") {
if (msg.to_user != "general-chat") {
var receiver = clients.findIndex(client => client.username == msg.to_user);
clients[receiver].connection.sendUTF(JSON.stringify(msg));
} else {
for (var k = 0; k < clients.length; k++) {
if (clients[k].connection != connection) |
}
}
} else {
for (var k = 0; k < clients.length; k++) {
if (clients[k].connection != connection) {
clients[k].connection.sendUTF(JSON.stringify(msg)); //*only stringify for chat messages
}
}
}
} | {
//if (msg.type != "text" || validDistance(clients[sendPos].pos, clients[k].pos)) { //only see if is in a valid distance for messages **Not used
clients[k].connection.sendUTF(JSON.stringify(msg)); //*only stringify for chat messages
//}
} | conditional_block |
core.js | const DB = require("./database.js");
//var getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
//const Rooms = require("./roomsModel.js");
var CORE = {
num_clients: 0, //amount of connected clients
chatRooms: [], //has each room and the clients inside each
DB: require("./database.js"), //get the abstracted database
init: function() {
console.log("Inizialising CORE...");
},
onClientConnect: function(connection) {
connection.username = false; //starts as false until user logIn
let rooms = [];
CORE.chatRooms.forEach(room => {
rooms.push(room.roomName);
});
msg = {
type: "rooms",
rooms: rooms
}
connection.sendUTF(JSON.stringify(msg));
this.num_clients++;
},
onNewMessage: async function(connection, message) {
//console.log(` - ${connection.username || "'Unidentified User'"} sent us: ${JSON.stringify(message.utf8Data)}`);
let msg = JSON.parse(message.utf8Data);
switch (msg.type) {
case 'aut': //autentication to enter a room
// if (connection.username) //case if was in a different room before
// leaveRoom(connection.username, connection.roomName, connection.roomIndex, connection); **no hace falta si no cambia de sala
let validUser = await this.onLogIn(msg.username, msg.password);
let alreadyIn = false; //token true if username already inside any chat room
let otherClientsIn = []; //get the other users already in room
this.chatRooms.forEach(room => {
if (room) {
room.clients.forEach(client => {
if (client.username == msg.username) {
alreadyIn = true;
console.log(" > but the same username already is online.");
}
});
}
if (room.roomName == msg.room) {
room.clients.forEach(client => {
otherClientsIn.push({ username: client.username, cData: client.cData, avatar: client.avatar }); //save every client info
});
}
});
let validClient = false;
if ((otherClientsIn.length == 0 && msg.clientType == 'DJ') || (otherClientsIn.length > 0 && msg.clientType == 'partier')) {
validClient = true;
}
let validation = { type: "login", validConnection: (validUser != undefined) && !alreadyIn && validClient, validUser: validUser,
alreadyIn: alreadyIn, pos: validUser ? validUser.position : false, clients: otherClientsIn, avatar: validUser ? validUser.avatar : false, }; //sends if was a valid user
connection.sendUTF(JSON.stringify(validation)); //debugg **
if (validUser != undefined && !alreadyIn && validClient) {
setRoom(msg.room, msg.username, connection, validUser.avatar, validUser.position, msg.clientType);
let info = infoMsg(msg.username, `joined the room as "${msg.clientType}"`, true, validUser.position);
broadcastFromUser(info, connection);
}
break;
case 'create':
let validCreation = await this.onSignIn(msg.username, msg.password, msg.avatar);
console.log(validCreation);
let creation = { type: "signin", content: validCreation };
connection.sendUTF(JSON.stringify(creation));
break;
case 'text': //on text message
broadcastFromUser(msg, connection);
break;
case 'update': //updating data
let clients = this.chatRooms[connection.roomIndex].clients;
if (clients.length)
clients.forEach(client => {
if (client.connection == connection) {
client.pos = msg.cData.pos;
client.cData = msg.cData;
}
});
break;
case 'setsettings':
let validIdentification = await this.onLogIn(connection.username, msg.password); //not login but the same confirmation
let usernameTaken = await DB.findByUsername(msg.update.username); //check if already taken
let canUpdate = { type: "validUpdate", content: false};
if (validIdentification && (!usernameTaken.length || !msg.update.username) ) { //if valid old password and not taking a new username already taken
DB.updateUserData(validIdentification.username, false, msg.update.avatar, msg.update.password, msg.update.username);
updateInfo(connection, msg.update.username, msg.update.avatar); //pass the elements to update if unidentified checked inside
console.log(`User "${validIdentification.username}" updated: ${JSON.stringify(msg.update)}`);
canUpdate.content = true;
//message to broadcast other users with the updates
let userUpdate = { type: "userUpdate", oldUsername: validIdentification.username, updates: {username: msg.update.username, avatar: msg.update.avatar} };
broadcastFromUser(userUpdate, connection);
}
connection.sendUTF(JSON.stringify(canUpdate));
break;
case 'info-music':
broadcastFromUser(msg, connection);
console.log(msg); //music info mesage
break;
/* case 'room-info':
let clients = CORE.chatRooms[connection.roomIndex].clients; //clients in his same room
msg={
type: "list-clients",
content: clients
}
connection.sendUTF(JSON.stringify(msg))
break; */
default:
break;
}
},
onClientDisconnect: function(connection) {
if (connection.username)
leaveRoom(connection.username, connection.roomName, connection.roomIndex, connection);
this.num_clients--;
},
onLogIn: async function(username, password) {
let validUser = await this.DB.verifyLogIn(username, password); //needs await-async to handle the pending promise (also out if not wrong/ may be )
//console.log(validUser); //debugg see the valid user
return validUser; //return the user found to be valid (or none)
},
onSignIn: async function(username, password, avatar) {
let wasCreated = await this.DB.createUser(username, password, avatar);
console.log(wasCreated);
if (wasCreated) {
console.log(`The Client "${username}" was created`);
return true;
} else
return false;
}
};
module.exports = CORE;
// FUNCTIONS
function updateInfo(connection, username, avatar) |
function setRoom(room, username, connection, avatar, position, clientType) { //sets a connected user to a room
let roomIndex = CORE.chatRooms.findIndex(element => element.roomName == room);
let index = 0; //basic index if is a new room
let pos = position ? position : [0, 0.1, 0];
let cData = {};
if (roomIndex == -1) {
roomIndex = CORE.chatRooms.length; //if is a new room added at the end
CORE.chatRooms.push({
roomName: room,
clients: [{ connection, username, avatar, pos, cData, clientType }]
});
} else {
index = CORE.chatRooms[roomIndex].clients.push({ connection, username, avatar, pos, cData, clientType }) - 1;
}
//save the data used on connection
connection.username = username;
connection.roomName = room;
connection.roomIndex = roomIndex;
console.log(`<- User "${connection.username}" [avatar: ${avatar}] connected to room "${connection.roomName}" [${index}, ${connection.roomIndex}] in the position "${pos}"`);
}
async function leaveRoom(username, roomName, roomIndex, connection) { // remove user from it room connected clients
let userIndex = -1;
let i = 0;
let clients = CORE.chatRooms[roomIndex].clients;
clients.forEach(element => {
if (element.connection == connection)
userIndex = i;
i++;
//console.log(element.connection == connection, userIndex); //see the actual user leaving connection
});
if (userIndex != -1) {
if (DB.updateUserData(username, position = clients[userIndex].pos)) { //on leave we update the data
console.log(` on leave, user "${username}" position was updated to ${position}.`);
}
let outClientType = clients[userIndex].clientType;
await clients.splice(userIndex, 1);
console.log(`-> User "${username}" disconected from room "${roomName}".`);
if(clients.length && outClientType == "DJ") { //if there is clients remaining in the room
clients[userIndex].clientType = "DJ"; //update next client after old DJ as the actual one
broadcastFromUser({type: "newDJ", username: clients[userIndex].username}, connection);
console.log(` - User "${clients[userIndex].username}" is the new DJ.`);
}
let info = infoMsg(username, "left the room.", false);
broadcastFromUser(info, connection);
}
}
function infoMsg(user, text, inRoom, position) { //username, message, bool is in room
let pos = {};
if (position) {
pos = position
}
let info = { type: "info", username: user, content: text, exists: inRoom, pos: pos };
return info;
}
function broadcastFromUser(msg, connection) { //broadcas all people except the incoming user
let clients = CORE.chatRooms[connection.roomIndex].clients; //clients in his same room
//var sendPos = clients.findIndex(client => client.connection == connection); //sender position
if (msg.type == "text") {
if (msg.to_user != "general-chat") {
var receiver = clients.findIndex(client => client.username == msg.to_user);
clients[receiver].connection.sendUTF(JSON.stringify(msg));
} else {
for (var k = 0; k < clients.length; k++) {
if (clients[k].connection != connection) {
//if (msg.type != "text" || validDistance(clients[sendPos].pos, clients[k].pos)) { //only see if is in a valid distance for messages **Not used
clients[k].connection.sendUTF(JSON.stringify(msg)); //*only stringify for chat messages
//}
}
}
}
} else {
for (var k = 0; k < clients.length; k++) {
if (clients[k].connection != connection) {
clients[k].connection.sendUTF(JSON.stringify(msg)); //*only stringify for chat messages
}
}
}
} | {
CORE.chatRooms[connection.roomIndex].clients.forEach(client => {
if (client.connection == connection) {
if (username != undefined) {
client.username = connection.username = username;
}
if (avatar != undefined)
client.avatar = avatar;
}
});
} | identifier_body |
core.js | const DB = require("./database.js");
//var getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
//const Rooms = require("./roomsModel.js");
var CORE = {
num_clients: 0, //amount of connected clients
chatRooms: [], //has each room and the clients inside each
DB: require("./database.js"), //get the abstracted database
init: function() {
console.log("Inizialising CORE...");
},
onClientConnect: function(connection) {
connection.username = false; //starts as false until user logIn
let rooms = [];
CORE.chatRooms.forEach(room => {
rooms.push(room.roomName);
});
msg = {
type: "rooms",
rooms: rooms
}
connection.sendUTF(JSON.stringify(msg));
this.num_clients++;
},
| //console.log(` - ${connection.username || "'Unidentified User'"} sent us: ${JSON.stringify(message.utf8Data)}`);
let msg = JSON.parse(message.utf8Data);
switch (msg.type) {
case 'aut': //autentication to enter a room
// if (connection.username) //case if was in a different room before
// leaveRoom(connection.username, connection.roomName, connection.roomIndex, connection); **no hace falta si no cambia de sala
let validUser = await this.onLogIn(msg.username, msg.password);
let alreadyIn = false; //token true if username already inside any chat room
let otherClientsIn = []; //get the other users already in room
this.chatRooms.forEach(room => {
if (room) {
room.clients.forEach(client => {
if (client.username == msg.username) {
alreadyIn = true;
console.log(" > but the same username already is online.");
}
});
}
if (room.roomName == msg.room) {
room.clients.forEach(client => {
otherClientsIn.push({ username: client.username, cData: client.cData, avatar: client.avatar }); //save every client info
});
}
});
let validClient = false;
if ((otherClientsIn.length == 0 && msg.clientType == 'DJ') || (otherClientsIn.length > 0 && msg.clientType == 'partier')) {
validClient = true;
}
let validation = { type: "login", validConnection: (validUser != undefined) && !alreadyIn && validClient, validUser: validUser,
alreadyIn: alreadyIn, pos: validUser ? validUser.position : false, clients: otherClientsIn, avatar: validUser ? validUser.avatar : false, }; //sends if was a valid user
connection.sendUTF(JSON.stringify(validation)); //debugg **
if (validUser != undefined && !alreadyIn && validClient) {
setRoom(msg.room, msg.username, connection, validUser.avatar, validUser.position, msg.clientType);
let info = infoMsg(msg.username, `joined the room as "${msg.clientType}"`, true, validUser.position);
broadcastFromUser(info, connection);
}
break;
case 'create':
let validCreation = await this.onSignIn(msg.username, msg.password, msg.avatar);
console.log(validCreation);
let creation = { type: "signin", content: validCreation };
connection.sendUTF(JSON.stringify(creation));
break;
case 'text': //on text message
broadcastFromUser(msg, connection);
break;
case 'update': //updating data
let clients = this.chatRooms[connection.roomIndex].clients;
if (clients.length)
clients.forEach(client => {
if (client.connection == connection) {
client.pos = msg.cData.pos;
client.cData = msg.cData;
}
});
break;
case 'setsettings':
let validIdentification = await this.onLogIn(connection.username, msg.password); //not login but the same confirmation
let usernameTaken = await DB.findByUsername(msg.update.username); //check if already taken
let canUpdate = { type: "validUpdate", content: false};
if (validIdentification && (!usernameTaken.length || !msg.update.username) ) { //if valid old password and not taking a new username already taken
DB.updateUserData(validIdentification.username, false, msg.update.avatar, msg.update.password, msg.update.username);
updateInfo(connection, msg.update.username, msg.update.avatar); //pass the elements to update if unidentified checked inside
console.log(`User "${validIdentification.username}" updated: ${JSON.stringify(msg.update)}`);
canUpdate.content = true;
//message to broadcast other users with the updates
let userUpdate = { type: "userUpdate", oldUsername: validIdentification.username, updates: {username: msg.update.username, avatar: msg.update.avatar} };
broadcastFromUser(userUpdate, connection);
}
connection.sendUTF(JSON.stringify(canUpdate));
break;
case 'info-music':
broadcastFromUser(msg, connection);
console.log(msg); //music info mesage
break;
/* case 'room-info':
let clients = CORE.chatRooms[connection.roomIndex].clients; //clients in his same room
msg={
type: "list-clients",
content: clients
}
connection.sendUTF(JSON.stringify(msg))
break; */
default:
break;
}
},
onClientDisconnect: function(connection) {
if (connection.username)
leaveRoom(connection.username, connection.roomName, connection.roomIndex, connection);
this.num_clients--;
},
onLogIn: async function(username, password) {
let validUser = await this.DB.verifyLogIn(username, password); //needs await-async to handle the pending promise (also out if not wrong/ may be )
//console.log(validUser); //debugg see the valid user
return validUser; //return the user found to be valid (or none)
},
onSignIn: async function(username, password, avatar) {
let wasCreated = await this.DB.createUser(username, password, avatar);
console.log(wasCreated);
if (wasCreated) {
console.log(`The Client "${username}" was created`);
return true;
} else
return false;
}
};
module.exports = CORE;
// FUNCTIONS
function updateInfo(connection, username, avatar) {
CORE.chatRooms[connection.roomIndex].clients.forEach(client => {
if (client.connection == connection) {
if (username != undefined) {
client.username = connection.username = username;
}
if (avatar != undefined)
client.avatar = avatar;
}
});
}
function setRoom(room, username, connection, avatar, position, clientType) { //sets a connected user to a room
let roomIndex = CORE.chatRooms.findIndex(element => element.roomName == room);
let index = 0; //basic index if is a new room
let pos = position ? position : [0, 0.1, 0];
let cData = {};
if (roomIndex == -1) {
roomIndex = CORE.chatRooms.length; //if is a new room added at the end
CORE.chatRooms.push({
roomName: room,
clients: [{ connection, username, avatar, pos, cData, clientType }]
});
} else {
index = CORE.chatRooms[roomIndex].clients.push({ connection, username, avatar, pos, cData, clientType }) - 1;
}
//save the data used on connection
connection.username = username;
connection.roomName = room;
connection.roomIndex = roomIndex;
console.log(`<- User "${connection.username}" [avatar: ${avatar}] connected to room "${connection.roomName}" [${index}, ${connection.roomIndex}] in the position "${pos}"`);
}
async function leaveRoom(username, roomName, roomIndex, connection) { // remove user from it room connected clients
let userIndex = -1;
let i = 0;
let clients = CORE.chatRooms[roomIndex].clients;
clients.forEach(element => {
if (element.connection == connection)
userIndex = i;
i++;
//console.log(element.connection == connection, userIndex); //see the actual user leaving connection
});
if (userIndex != -1) {
if (DB.updateUserData(username, position = clients[userIndex].pos)) { //on leave we update the data
console.log(` on leave, user "${username}" position was updated to ${position}.`);
}
let outClientType = clients[userIndex].clientType;
await clients.splice(userIndex, 1);
console.log(`-> User "${username}" disconected from room "${roomName}".`);
if(clients.length && outClientType == "DJ") { //if there is clients remaining in the room
clients[userIndex].clientType = "DJ"; //update next client after old DJ as the actual one
broadcastFromUser({type: "newDJ", username: clients[userIndex].username}, connection);
console.log(` - User "${clients[userIndex].username}" is the new DJ.`);
}
let info = infoMsg(username, "left the room.", false);
broadcastFromUser(info, connection);
}
}
function infoMsg(user, text, inRoom, position) { //username, message, bool is in room
let pos = {};
if (position) {
pos = position
}
let info = { type: "info", username: user, content: text, exists: inRoom, pos: pos };
return info;
}
function broadcastFromUser(msg, connection) { //broadcas all people except the incoming user
let clients = CORE.chatRooms[connection.roomIndex].clients; //clients in his same room
//var sendPos = clients.findIndex(client => client.connection == connection); //sender position
if (msg.type == "text") {
if (msg.to_user != "general-chat") {
var receiver = clients.findIndex(client => client.username == msg.to_user);
clients[receiver].connection.sendUTF(JSON.stringify(msg));
} else {
for (var k = 0; k < clients.length; k++) {
if (clients[k].connection != connection) {
//if (msg.type != "text" || validDistance(clients[sendPos].pos, clients[k].pos)) { //only see if is in a valid distance for messages **Not used
clients[k].connection.sendUTF(JSON.stringify(msg)); //*only stringify for chat messages
//}
}
}
}
} else {
for (var k = 0; k < clients.length; k++) {
if (clients[k].connection != connection) {
clients[k].connection.sendUTF(JSON.stringify(msg)); //*only stringify for chat messages
}
}
}
} | onNewMessage: async function(connection, message) { | random_line_split |
core.js | const DB = require("./database.js");
//var getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
//const Rooms = require("./roomsModel.js");
var CORE = {
num_clients: 0, //amount of connected clients
chatRooms: [], //has each room and the clients inside each
DB: require("./database.js"), //get the abstracted database
init: function() {
console.log("Inizialising CORE...");
},
onClientConnect: function(connection) {
connection.username = false; //starts as false until user logIn
let rooms = [];
CORE.chatRooms.forEach(room => {
rooms.push(room.roomName);
});
msg = {
type: "rooms",
rooms: rooms
}
connection.sendUTF(JSON.stringify(msg));
this.num_clients++;
},
onNewMessage: async function(connection, message) {
//console.log(` - ${connection.username || "'Unidentified User'"} sent us: ${JSON.stringify(message.utf8Data)}`);
let msg = JSON.parse(message.utf8Data);
switch (msg.type) {
case 'aut': //autentication to enter a room
// if (connection.username) //case if was in a different room before
// leaveRoom(connection.username, connection.roomName, connection.roomIndex, connection); **no hace falta si no cambia de sala
let validUser = await this.onLogIn(msg.username, msg.password);
let alreadyIn = false; //token true if username already inside any chat room
let otherClientsIn = []; //get the other users already in room
this.chatRooms.forEach(room => {
if (room) {
room.clients.forEach(client => {
if (client.username == msg.username) {
alreadyIn = true;
console.log(" > but the same username already is online.");
}
});
}
if (room.roomName == msg.room) {
room.clients.forEach(client => {
otherClientsIn.push({ username: client.username, cData: client.cData, avatar: client.avatar }); //save every client info
});
}
});
let validClient = false;
if ((otherClientsIn.length == 0 && msg.clientType == 'DJ') || (otherClientsIn.length > 0 && msg.clientType == 'partier')) {
validClient = true;
}
let validation = { type: "login", validConnection: (validUser != undefined) && !alreadyIn && validClient, validUser: validUser,
alreadyIn: alreadyIn, pos: validUser ? validUser.position : false, clients: otherClientsIn, avatar: validUser ? validUser.avatar : false, }; //sends if was a valid user
connection.sendUTF(JSON.stringify(validation)); //debugg **
if (validUser != undefined && !alreadyIn && validClient) {
setRoom(msg.room, msg.username, connection, validUser.avatar, validUser.position, msg.clientType);
let info = infoMsg(msg.username, `joined the room as "${msg.clientType}"`, true, validUser.position);
broadcastFromUser(info, connection);
}
break;
case 'create':
let validCreation = await this.onSignIn(msg.username, msg.password, msg.avatar);
console.log(validCreation);
let creation = { type: "signin", content: validCreation };
connection.sendUTF(JSON.stringify(creation));
break;
case 'text': //on text message
broadcastFromUser(msg, connection);
break;
case 'update': //updating data
let clients = this.chatRooms[connection.roomIndex].clients;
if (clients.length)
clients.forEach(client => {
if (client.connection == connection) {
client.pos = msg.cData.pos;
client.cData = msg.cData;
}
});
break;
case 'setsettings':
let validIdentification = await this.onLogIn(connection.username, msg.password); //not login but the same confirmation
let usernameTaken = await DB.findByUsername(msg.update.username); //check if already taken
let canUpdate = { type: "validUpdate", content: false};
if (validIdentification && (!usernameTaken.length || !msg.update.username) ) { //if valid old password and not taking a new username already taken
DB.updateUserData(validIdentification.username, false, msg.update.avatar, msg.update.password, msg.update.username);
updateInfo(connection, msg.update.username, msg.update.avatar); //pass the elements to update if unidentified checked inside
console.log(`User "${validIdentification.username}" updated: ${JSON.stringify(msg.update)}`);
canUpdate.content = true;
//message to broadcast other users with the updates
let userUpdate = { type: "userUpdate", oldUsername: validIdentification.username, updates: {username: msg.update.username, avatar: msg.update.avatar} };
broadcastFromUser(userUpdate, connection);
}
connection.sendUTF(JSON.stringify(canUpdate));
break;
case 'info-music':
broadcastFromUser(msg, connection);
console.log(msg); //music info mesage
break;
/* case 'room-info':
let clients = CORE.chatRooms[connection.roomIndex].clients; //clients in his same room
msg={
type: "list-clients",
content: clients
}
connection.sendUTF(JSON.stringify(msg))
break; */
default:
break;
}
},
onClientDisconnect: function(connection) {
if (connection.username)
leaveRoom(connection.username, connection.roomName, connection.roomIndex, connection);
this.num_clients--;
},
onLogIn: async function(username, password) {
let validUser = await this.DB.verifyLogIn(username, password); //needs await-async to handle the pending promise (also out if not wrong/ may be )
//console.log(validUser); //debugg see the valid user
return validUser; //return the user found to be valid (or none)
},
onSignIn: async function(username, password, avatar) {
let wasCreated = await this.DB.createUser(username, password, avatar);
console.log(wasCreated);
if (wasCreated) {
console.log(`The Client "${username}" was created`);
return true;
} else
return false;
}
};
module.exports = CORE;
// FUNCTIONS
function updateInfo(connection, username, avatar) {
CORE.chatRooms[connection.roomIndex].clients.forEach(client => {
if (client.connection == connection) {
if (username != undefined) {
client.username = connection.username = username;
}
if (avatar != undefined)
client.avatar = avatar;
}
});
}
function setRoom(room, username, connection, avatar, position, clientType) { //sets a connected user to a room
let roomIndex = CORE.chatRooms.findIndex(element => element.roomName == room);
let index = 0; //basic index if is a new room
let pos = position ? position : [0, 0.1, 0];
let cData = {};
if (roomIndex == -1) {
roomIndex = CORE.chatRooms.length; //if is a new room added at the end
CORE.chatRooms.push({
roomName: room,
clients: [{ connection, username, avatar, pos, cData, clientType }]
});
} else {
index = CORE.chatRooms[roomIndex].clients.push({ connection, username, avatar, pos, cData, clientType }) - 1;
}
//save the data used on connection
connection.username = username;
connection.roomName = room;
connection.roomIndex = roomIndex;
console.log(`<- User "${connection.username}" [avatar: ${avatar}] connected to room "${connection.roomName}" [${index}, ${connection.roomIndex}] in the position "${pos}"`);
}
async function leaveRoom(username, roomName, roomIndex, connection) { // remove user from it room connected clients
let userIndex = -1;
let i = 0;
let clients = CORE.chatRooms[roomIndex].clients;
clients.forEach(element => {
if (element.connection == connection)
userIndex = i;
i++;
//console.log(element.connection == connection, userIndex); //see the actual user leaving connection
});
if (userIndex != -1) {
if (DB.updateUserData(username, position = clients[userIndex].pos)) { //on leave we update the data
console.log(` on leave, user "${username}" position was updated to ${position}.`);
}
let outClientType = clients[userIndex].clientType;
await clients.splice(userIndex, 1);
console.log(`-> User "${username}" disconected from room "${roomName}".`);
if(clients.length && outClientType == "DJ") { //if there is clients remaining in the room
clients[userIndex].clientType = "DJ"; //update next client after old DJ as the actual one
broadcastFromUser({type: "newDJ", username: clients[userIndex].username}, connection);
console.log(` - User "${clients[userIndex].username}" is the new DJ.`);
}
let info = infoMsg(username, "left the room.", false);
broadcastFromUser(info, connection);
}
}
function | (user, text, inRoom, position) { //username, message, bool is in room
let pos = {};
if (position) {
pos = position
}
let info = { type: "info", username: user, content: text, exists: inRoom, pos: pos };
return info;
}
function broadcastFromUser(msg, connection) { //broadcas all people except the incoming user
let clients = CORE.chatRooms[connection.roomIndex].clients; //clients in his same room
//var sendPos = clients.findIndex(client => client.connection == connection); //sender position
if (msg.type == "text") {
if (msg.to_user != "general-chat") {
var receiver = clients.findIndex(client => client.username == msg.to_user);
clients[receiver].connection.sendUTF(JSON.stringify(msg));
} else {
for (var k = 0; k < clients.length; k++) {
if (clients[k].connection != connection) {
//if (msg.type != "text" || validDistance(clients[sendPos].pos, clients[k].pos)) { //only see if is in a valid distance for messages **Not used
clients[k].connection.sendUTF(JSON.stringify(msg)); //*only stringify for chat messages
//}
}
}
}
} else {
for (var k = 0; k < clients.length; k++) {
if (clients[k].connection != connection) {
clients[k].connection.sendUTF(JSON.stringify(msg)); //*only stringify for chat messages
}
}
}
} | infoMsg | identifier_name |
ann_utils.py | import sklearn
import datetime
from os import listdir
from os.path import isfile, join
from nlp_to_phenome import EDIRDoc
from annotation_docs import EDIRAnn
import reportreader as rr
import re
import utils
import logging
from operator import itemgetter
import xml.etree.ElementTree as ET
class eHostGenedDoc(EDIRDoc):
def __init__(self, file_path):
super(eHostGenedDoc, self).__init__(file_path)
def get_ess_entities(self):
if self._entities is not None:
return self._entities
root = self._root
entities = []
for e in root.findall('.//classMention'):
mcs = e.findall('./mentionClass')
mention_id = e.attrib['id']
if len(mcs) > 0:
mc = mcs[0]
cls = mc.attrib['id']
mentions = root.findall('.//mention[@id="' + mention_id + '"]/..')
if len(mentions) > 0:
span = mentions[0].findall('./span')
ent_start = span[0].attrib['start']
ent_end = span[0].attrib['end']
spannedText = mentions[0].findall('./spannedText')
str = spannedText[0].text
ann = EDIRAnn(str=str, start=int(ent_start), end=int(ent_end), type=cls)
ann.id = len(entities)
entities.append(ann)
self._entities = entities
return self._entities
class eHostAnnDoc(EDIRDoc):
"""
a document class for ehost annotation file
"""
def __init__(self, file_path):
super(eHostAnnDoc, self).__init__(file_path)
def get_ess_entities(self, no_context=False):
if self._entities is not None:
return self._entities
root = self._root
entities = []
for e in root.findall('.//classMention'):
mcs = e.findall('./mentionClass')
mention_id = e.attrib['id']
if len(mcs) > 0:
mc = mcs[0]
m = re.match(r'VERIFIED\_([^\(]+)', mc.attrib['id'])
if m is None:
m = re.match(r'(IRRELEVANT_LABELS)', mc.attrib['id'])
if m is None:
m = re.match(r'(ADDED)\_([^\(]+)', mc.attrib['id'])
if m is not None:
cls = m.group(1)
if no_context and cls != 'IRRELEVANT_LABELS':
if cls.find('_') >= 0:
cls = cls[cls.find('_')+1:]
mentions = root.findall('.//mention[@id="' + mention_id + '"]/..')
if len(mentions) > 0:
span = mentions[0].findall('./span')
ent_start = span[0].attrib['start']
ent_end = span[0].attrib['end']
spannedText = mentions[0].findall('./spannedText')
str = spannedText[0].text
ann = EDIRAnn(str=str, start=int(ent_start), end=int(ent_end), type=cls)
ann.id = len(entities)
entities.append(ann)
self._entities = entities
return self._entities
def ehost_iaa_compute(folder1, folder2, no_context=False):
"""
compute inter annotator agreement
:param folder1:
:param folder2:
:param no_context:
:return:
"""
annotator1 = read_ehost_annotated_result(folder1, no_context=no_context)
annotator2 = read_ehost_annotated_result(folder2, no_context=no_context)
merged_keys = list(set(annotator1.keys()) | set(annotator2.keys()))
y1 = []
y2 = []
for key in merged_keys:
if key in annotator1 and key in annotator2:
y1.append(annotator1[key])
y2.append(annotator2[key])
else:
print('%s not matched in all' % key)
iaa = sklearn.metrics.cohen_kappa_score(y1, y2)
print('IAA is %s on %s' % (iaa, len(annotator1)))
return iaa
def read_ehost_annotated_result(folder, no_context=False):
"""
read ehost annotated documents as a dictionary object: id -> entity label
:param folder:
:param no_context:
:return:
"""
id2label = {}
files = [f for f in listdir(folder) if isfile(join(folder, f))]
for f in files:
d = eHostAnnDoc(join(folder, f))
for e in d.get_ess_entities(no_context=no_context):
id = '%s-%s-%s' % (f, e.start, e.end)
id2label[id] = e.label
print(id2label)
return id2label
def get_what_is_changing(ann_folder, text_folder, output_file, eHostAnnFile=True):
"""
get what is getting better/worse
:param ann_folder:
:param text_folder:
:param output_file:
:return:
"""
nlp = rr.get_nlp_instance()
files = [f for f in listdir(ann_folder) if isfile(join(ann_folder, f))]
type2abstractions = {}
for f in files:
anns = []
text_file = join(text_folder, f[0:-14])
if eHostAnnFile:
d = eHostAnnDoc(join(ann_folder, f))
anns = d.get_ess_entities(no_context=True)
else:
d = eHostGenedDoc(join(ann_folder, f))
anns = d.get_ess_entities()
if len(anns) == 0:
logging.info('anns is empty for [{:s}]'.format(f))
text = utils.read_text_file_as_string(join(text_folder, f[0:-14]), encoding='cp1252')
sents = rr.get_sentences_as_anns(nlp, text)
for ann in anns:
for s in sents:
if ann.overlap(s):
abss = rr.AbstractedSentence(1)
abss.text = s.str
result = abss.get_abstaction_by_pos(abss.locate_pos(ann.str), nlp)
if result is None:
logging.info('%s not found in %s' % (ann.str, f))
continue
type = ann.label
if type not in type2abstractions:
type2abstractions[type] = []
type2abstractions[type].append(result.to_dict())
logging.debug(type2abstractions)
utils.save_json_array(type2abstractions, output_file)
def compute_iaa():
folder_lia = "S:/NLP/annotation_it02/overlaps/k"
folder_rob = "S:/NLP/annotation_it02/overlaps/s"
folder_nadia = "nadia"
ehost_iaa_compute(folder_lia, folder_rob, no_context=True)
def analysing_label_performance(folder, output_file):
s2t = {}
files = [f for f in listdir(folder) if isfile(join(folder, f))]
for f in files:
d = eHostAnnDoc(join(folder, f))
for ann in d.get_ess_entities():
s = ann.str
if not (s in s2t):
s2t[s] = {}
if ann.type in s2t[s]:
s2t[s][ann.type] = s2t[s][ann.type] + 1
else:
s2t[s][ann.type] = 1
sts = sorted([(s, s2t[s]['CORRECT'] if 'CORRECT' in s2t[s] else 0, s2t[s]['IRRELEVANT_LABELS'] if 'IRRELEVANT_LABELS' in s2t[s] else 0, s2t[s]['ADDED'] if 'ADDED' in s2t[s] else 0) for s in s2t], key=itemgetter(2), reverse=True)
s = ('\n'.join(['%s\t%s\t%s\t%s' % (t[0], t[1], t[2], t[3]) for t in sts]))
utils.save_string(s, output_file)
def generate_gold_stand_from_validation(generated_ann_folder, validated_ann_folder, gold_standard_folder):
files = [f for f in listdir(generated_ann_folder) if isfile(join(generated_ann_folder, f))]
for f in files:
logging.debug('processing: %s / %s' % (generated_ann_folder, f))
# ignore added annotations for now
gd_anns = []
gen_doc = eHostGenedDoc(join(generated_ann_folder, f))
logging.debug('ann number: %s' % len(gen_doc.get_ess_entities()))
val_doc = eHostAnnDoc(join(validated_ann_folder, f))
for g in gen_doc.get_ess_entities():
logging.debug('validation label: %s' % g.type)
for v in val_doc.get_ess_entities():
if g.start == v.start and g.end == v.end:
logging.debug('validation label: %s' % v.type)
if v.type == 'CORRECT':
gd_anns.append(g)
elem_annotations = ET.Element("annotations")
elem_annotations.set('textSource', f)
idx = 0
for ann in gd_anns:
if ann.str.lower() == 'haematoma':
continue
idx += 1
mention_id = '%s-%s' % (f, idx)
elem_ann = ET.SubElement(elem_annotations, "annotation")
elem_mention = ET.SubElement(elem_ann, "mention")
elem_mention.set('id', mention_id)
elem_annotator = ET.SubElement(elem_ann, "annotator")
elem_annotator.set('id', 'semehr')
elem_annotator.text = 'semehr'
elem_span = ET.SubElement(elem_ann, "span")
elem_span.set('start', '%s' % ann.start)
elem_span.set('end', '%s' % ann.end)
elem_spanText = ET.SubElement(elem_ann, "spannedText")
elem_spanText.text = ann.str
elem_date = ET.SubElement(elem_ann, "creationDate")
elem_date.text = datetime.datetime.now().strftime("%a %B %d %X %Z %Y")
#
elem_class = ET.SubElement(elem_annotations, "classMention")
elem_class.set('id', mention_id)
elem_mention_class = ET.SubElement(elem_class, "mentionClass")
if ann.str.lower() == 'haemorrhage' or ann.str.lower() == 'blood' or ann.str.lower() == 'bleed' or ann.str.lower().startswith('collection'):
ann.type = 'bleeding'
elem_mention_class.set('id', ann.type)
elem_mention_class.text = ann.str
tree = ET.ElementTree(elem_annotations)
logging.info('gd file saved to %s - %s' % (gold_standard_folder, f))
utils.save_string(ET.tostring(elem_annotations, encoding='utf8', method='xml'), join(gold_standard_folder, f))
def analyse_trajectory_subjects(file, output_file):
t2subs = utils.load_json_data(file)
t2freq = {}
for t in t2subs:
if t not in t2freq:
t2freq[t] = {'subject': {}, 'root': {}}
for sub in t2subs[t]:
add_key_freq(t2freq[t]['subject'], ','.join(sub['subject']))
add_key_freq(t2freq[t]['root'], sub['root'])
s = ''
for t in t2freq:
freqs = t2freq[t]
subs = sorted([(k, freqs['subject'][k]) for k in freqs['subject']], key=itemgetter(1), reverse=True)
s += '***%s [subjects]***\n%s\n\n' % (t, freq_to_str(subs))
roots = sorted([(k, freqs['root'][k]) for k in freqs['root']], key=itemgetter(1), reverse=True)
s += '***%s [roots]***\n%s\n\n' % (t, freq_to_str(roots))
logging.info(s)
utils.save_string(s, output_file)
def freq_to_str(freq):
return '\n'.join(['%s\t%s' % (t[0], t[1]) for t in freq])
def add_key_freq(d, key):
if key in d:
d[key] += 1
else:
d[key] = 1
def | (folder):
files = [f for f in listdir(folder) if isfile(join(folder, f))]
t2freq = {}
for f in files:
gen_doc = eHostGenedDoc(join(folder, f))
logging.debug('processing: %s / %s' % (folder, f))
for g in gen_doc.get_ess_entities():
logging.debug('validation label: %s' % g.type)
if g.type not in t2freq:
t2freq[g.type] = 0
t2freq[g.type] += 1
s = '\n'.join(['%s\t%s' % (t, t2freq[t]) for t in t2freq])
logging.info(s)
return s
if __name__ == "__main__":
log_level = 'DEBUG'
log_format = '[%(filename)s:%(lineno)d] %(name)s %(asctime)s %(message)s'
logging.basicConfig(level='DEBUG', format=log_format)
# compute_iaa()
# analysing_label_performance('S:/NLP/annotation_it02/annotation_Steven/iteration_02/saved',
# 'P:/wuh/label2performce_steve.tsv')
# generate_gold_stand_from_validation('P:/wuh/SemEHR-working/outputs_it2/nlp2phenome',
# 'S:/NLP/annotation_it02/annotation_Steven/iteration_02/saved',
# 'P:/wuh/SemEHR-working/outputs_it2/gold_stand_results')
sub_json_file = './diabetes_subs.json'
analyse_trajectory_subjects(sub_json_file, './traject_sub_analysis_result.txt')
# if len(sys.argv) != 4:
# print('the syntax is [python ann_utils.py ann_folder, text_folder, result_file]')
# else:
# logging.info('working...')
# get_what_is_changing(sys.argv[1], sys.argv[2], sys.argv[3], eHostAnnFile=False)
# summarise_validation_results('/data/val/it2') | summarise_validation_results | identifier_name |
ann_utils.py | import sklearn
import datetime
from os import listdir
from os.path import isfile, join
from nlp_to_phenome import EDIRDoc
from annotation_docs import EDIRAnn
import reportreader as rr
import re
import utils
import logging
from operator import itemgetter
import xml.etree.ElementTree as ET
class eHostGenedDoc(EDIRDoc):
def __init__(self, file_path):
super(eHostGenedDoc, self).__init__(file_path)
def get_ess_entities(self):
if self._entities is not None:
return self._entities
root = self._root
entities = []
for e in root.findall('.//classMention'):
mcs = e.findall('./mentionClass')
mention_id = e.attrib['id']
if len(mcs) > 0:
mc = mcs[0]
cls = mc.attrib['id']
mentions = root.findall('.//mention[@id="' + mention_id + '"]/..')
if len(mentions) > 0:
span = mentions[0].findall('./span')
ent_start = span[0].attrib['start']
ent_end = span[0].attrib['end']
spannedText = mentions[0].findall('./spannedText')
str = spannedText[0].text
ann = EDIRAnn(str=str, start=int(ent_start), end=int(ent_end), type=cls)
ann.id = len(entities)
entities.append(ann)
self._entities = entities
return self._entities
class eHostAnnDoc(EDIRDoc):
"""
a document class for ehost annotation file
"""
def __init__(self, file_path):
super(eHostAnnDoc, self).__init__(file_path)
def get_ess_entities(self, no_context=False):
if self._entities is not None:
return self._entities
root = self._root
entities = []
for e in root.findall('.//classMention'):
mcs = e.findall('./mentionClass')
mention_id = e.attrib['id']
if len(mcs) > 0:
mc = mcs[0]
m = re.match(r'VERIFIED\_([^\(]+)', mc.attrib['id'])
if m is None:
m = re.match(r'(IRRELEVANT_LABELS)', mc.attrib['id'])
if m is None:
m = re.match(r'(ADDED)\_([^\(]+)', mc.attrib['id'])
if m is not None:
cls = m.group(1)
if no_context and cls != 'IRRELEVANT_LABELS':
if cls.find('_') >= 0:
cls = cls[cls.find('_')+1:]
mentions = root.findall('.//mention[@id="' + mention_id + '"]/..')
if len(mentions) > 0:
span = mentions[0].findall('./span')
ent_start = span[0].attrib['start']
ent_end = span[0].attrib['end']
spannedText = mentions[0].findall('./spannedText')
str = spannedText[0].text
ann = EDIRAnn(str=str, start=int(ent_start), end=int(ent_end), type=cls)
ann.id = len(entities)
entities.append(ann)
self._entities = entities
return self._entities
def ehost_iaa_compute(folder1, folder2, no_context=False):
"""
compute inter annotator agreement
:param folder1:
:param folder2:
:param no_context:
:return:
"""
annotator1 = read_ehost_annotated_result(folder1, no_context=no_context)
annotator2 = read_ehost_annotated_result(folder2, no_context=no_context)
merged_keys = list(set(annotator1.keys()) | set(annotator2.keys()))
y1 = []
y2 = []
for key in merged_keys:
if key in annotator1 and key in annotator2:
y1.append(annotator1[key])
y2.append(annotator2[key])
else:
print('%s not matched in all' % key)
iaa = sklearn.metrics.cohen_kappa_score(y1, y2)
print('IAA is %s on %s' % (iaa, len(annotator1)))
return iaa
def read_ehost_annotated_result(folder, no_context=False):
"""
read ehost annotated documents as a dictionary object: id -> entity label
:param folder:
:param no_context:
:return:
"""
id2label = {}
files = [f for f in listdir(folder) if isfile(join(folder, f))]
for f in files:
d = eHostAnnDoc(join(folder, f))
for e in d.get_ess_entities(no_context=no_context):
id = '%s-%s-%s' % (f, e.start, e.end)
id2label[id] = e.label
print(id2label)
return id2label
def get_what_is_changing(ann_folder, text_folder, output_file, eHostAnnFile=True):
"""
get what is getting better/worse
:param ann_folder:
:param text_folder:
:param output_file:
:return:
"""
nlp = rr.get_nlp_instance()
files = [f for f in listdir(ann_folder) if isfile(join(ann_folder, f))]
type2abstractions = {}
for f in files:
anns = []
text_file = join(text_folder, f[0:-14])
if eHostAnnFile:
d = eHostAnnDoc(join(ann_folder, f))
anns = d.get_ess_entities(no_context=True)
else:
d = eHostGenedDoc(join(ann_folder, f))
anns = d.get_ess_entities() | text = utils.read_text_file_as_string(join(text_folder, f[0:-14]), encoding='cp1252')
sents = rr.get_sentences_as_anns(nlp, text)
for ann in anns:
for s in sents:
if ann.overlap(s):
abss = rr.AbstractedSentence(1)
abss.text = s.str
result = abss.get_abstaction_by_pos(abss.locate_pos(ann.str), nlp)
if result is None:
logging.info('%s not found in %s' % (ann.str, f))
continue
type = ann.label
if type not in type2abstractions:
type2abstractions[type] = []
type2abstractions[type].append(result.to_dict())
logging.debug(type2abstractions)
utils.save_json_array(type2abstractions, output_file)
def compute_iaa():
folder_lia = "S:/NLP/annotation_it02/overlaps/k"
folder_rob = "S:/NLP/annotation_it02/overlaps/s"
folder_nadia = "nadia"
ehost_iaa_compute(folder_lia, folder_rob, no_context=True)
def analysing_label_performance(folder, output_file):
s2t = {}
files = [f for f in listdir(folder) if isfile(join(folder, f))]
for f in files:
d = eHostAnnDoc(join(folder, f))
for ann in d.get_ess_entities():
s = ann.str
if not (s in s2t):
s2t[s] = {}
if ann.type in s2t[s]:
s2t[s][ann.type] = s2t[s][ann.type] + 1
else:
s2t[s][ann.type] = 1
sts = sorted([(s, s2t[s]['CORRECT'] if 'CORRECT' in s2t[s] else 0, s2t[s]['IRRELEVANT_LABELS'] if 'IRRELEVANT_LABELS' in s2t[s] else 0, s2t[s]['ADDED'] if 'ADDED' in s2t[s] else 0) for s in s2t], key=itemgetter(2), reverse=True)
s = ('\n'.join(['%s\t%s\t%s\t%s' % (t[0], t[1], t[2], t[3]) for t in sts]))
utils.save_string(s, output_file)
def generate_gold_stand_from_validation(generated_ann_folder, validated_ann_folder, gold_standard_folder):
files = [f for f in listdir(generated_ann_folder) if isfile(join(generated_ann_folder, f))]
for f in files:
logging.debug('processing: %s / %s' % (generated_ann_folder, f))
# ignore added annotations for now
gd_anns = []
gen_doc = eHostGenedDoc(join(generated_ann_folder, f))
logging.debug('ann number: %s' % len(gen_doc.get_ess_entities()))
val_doc = eHostAnnDoc(join(validated_ann_folder, f))
for g in gen_doc.get_ess_entities():
logging.debug('validation label: %s' % g.type)
for v in val_doc.get_ess_entities():
if g.start == v.start and g.end == v.end:
logging.debug('validation label: %s' % v.type)
if v.type == 'CORRECT':
gd_anns.append(g)
elem_annotations = ET.Element("annotations")
elem_annotations.set('textSource', f)
idx = 0
for ann in gd_anns:
if ann.str.lower() == 'haematoma':
continue
idx += 1
mention_id = '%s-%s' % (f, idx)
elem_ann = ET.SubElement(elem_annotations, "annotation")
elem_mention = ET.SubElement(elem_ann, "mention")
elem_mention.set('id', mention_id)
elem_annotator = ET.SubElement(elem_ann, "annotator")
elem_annotator.set('id', 'semehr')
elem_annotator.text = 'semehr'
elem_span = ET.SubElement(elem_ann, "span")
elem_span.set('start', '%s' % ann.start)
elem_span.set('end', '%s' % ann.end)
elem_spanText = ET.SubElement(elem_ann, "spannedText")
elem_spanText.text = ann.str
elem_date = ET.SubElement(elem_ann, "creationDate")
elem_date.text = datetime.datetime.now().strftime("%a %B %d %X %Z %Y")
#
elem_class = ET.SubElement(elem_annotations, "classMention")
elem_class.set('id', mention_id)
elem_mention_class = ET.SubElement(elem_class, "mentionClass")
if ann.str.lower() == 'haemorrhage' or ann.str.lower() == 'blood' or ann.str.lower() == 'bleed' or ann.str.lower().startswith('collection'):
ann.type = 'bleeding'
elem_mention_class.set('id', ann.type)
elem_mention_class.text = ann.str
tree = ET.ElementTree(elem_annotations)
logging.info('gd file saved to %s - %s' % (gold_standard_folder, f))
utils.save_string(ET.tostring(elem_annotations, encoding='utf8', method='xml'), join(gold_standard_folder, f))
def analyse_trajectory_subjects(file, output_file):
t2subs = utils.load_json_data(file)
t2freq = {}
for t in t2subs:
if t not in t2freq:
t2freq[t] = {'subject': {}, 'root': {}}
for sub in t2subs[t]:
add_key_freq(t2freq[t]['subject'], ','.join(sub['subject']))
add_key_freq(t2freq[t]['root'], sub['root'])
s = ''
for t in t2freq:
freqs = t2freq[t]
subs = sorted([(k, freqs['subject'][k]) for k in freqs['subject']], key=itemgetter(1), reverse=True)
s += '***%s [subjects]***\n%s\n\n' % (t, freq_to_str(subs))
roots = sorted([(k, freqs['root'][k]) for k in freqs['root']], key=itemgetter(1), reverse=True)
s += '***%s [roots]***\n%s\n\n' % (t, freq_to_str(roots))
logging.info(s)
utils.save_string(s, output_file)
def freq_to_str(freq):
return '\n'.join(['%s\t%s' % (t[0], t[1]) for t in freq])
def add_key_freq(d, key):
if key in d:
d[key] += 1
else:
d[key] = 1
def summarise_validation_results(folder):
files = [f for f in listdir(folder) if isfile(join(folder, f))]
t2freq = {}
for f in files:
gen_doc = eHostGenedDoc(join(folder, f))
logging.debug('processing: %s / %s' % (folder, f))
for g in gen_doc.get_ess_entities():
logging.debug('validation label: %s' % g.type)
if g.type not in t2freq:
t2freq[g.type] = 0
t2freq[g.type] += 1
s = '\n'.join(['%s\t%s' % (t, t2freq[t]) for t in t2freq])
logging.info(s)
return s
if __name__ == "__main__":
log_level = 'DEBUG'
log_format = '[%(filename)s:%(lineno)d] %(name)s %(asctime)s %(message)s'
logging.basicConfig(level='DEBUG', format=log_format)
# compute_iaa()
# analysing_label_performance('S:/NLP/annotation_it02/annotation_Steven/iteration_02/saved',
# 'P:/wuh/label2performce_steve.tsv')
# generate_gold_stand_from_validation('P:/wuh/SemEHR-working/outputs_it2/nlp2phenome',
# 'S:/NLP/annotation_it02/annotation_Steven/iteration_02/saved',
# 'P:/wuh/SemEHR-working/outputs_it2/gold_stand_results')
sub_json_file = './diabetes_subs.json'
analyse_trajectory_subjects(sub_json_file, './traject_sub_analysis_result.txt')
# if len(sys.argv) != 4:
# print('the syntax is [python ann_utils.py ann_folder, text_folder, result_file]')
# else:
# logging.info('working...')
# get_what_is_changing(sys.argv[1], sys.argv[2], sys.argv[3], eHostAnnFile=False)
# summarise_validation_results('/data/val/it2') | if len(anns) == 0:
logging.info('anns is empty for [{:s}]'.format(f)) | random_line_split |
ann_utils.py | import sklearn
import datetime
from os import listdir
from os.path import isfile, join
from nlp_to_phenome import EDIRDoc
from annotation_docs import EDIRAnn
import reportreader as rr
import re
import utils
import logging
from operator import itemgetter
import xml.etree.ElementTree as ET
class eHostGenedDoc(EDIRDoc):
def __init__(self, file_path):
super(eHostGenedDoc, self).__init__(file_path)
def get_ess_entities(self):
if self._entities is not None:
return self._entities
root = self._root
entities = []
for e in root.findall('.//classMention'):
mcs = e.findall('./mentionClass')
mention_id = e.attrib['id']
if len(mcs) > 0:
mc = mcs[0]
cls = mc.attrib['id']
mentions = root.findall('.//mention[@id="' + mention_id + '"]/..')
if len(mentions) > 0:
span = mentions[0].findall('./span')
ent_start = span[0].attrib['start']
ent_end = span[0].attrib['end']
spannedText = mentions[0].findall('./spannedText')
str = spannedText[0].text
ann = EDIRAnn(str=str, start=int(ent_start), end=int(ent_end), type=cls)
ann.id = len(entities)
entities.append(ann)
self._entities = entities
return self._entities
class eHostAnnDoc(EDIRDoc):
"""
a document class for ehost annotation file
"""
def __init__(self, file_path):
super(eHostAnnDoc, self).__init__(file_path)
def get_ess_entities(self, no_context=False):
if self._entities is not None:
return self._entities
root = self._root
entities = []
for e in root.findall('.//classMention'):
mcs = e.findall('./mentionClass')
mention_id = e.attrib['id']
if len(mcs) > 0:
mc = mcs[0]
m = re.match(r'VERIFIED\_([^\(]+)', mc.attrib['id'])
if m is None:
m = re.match(r'(IRRELEVANT_LABELS)', mc.attrib['id'])
if m is None:
m = re.match(r'(ADDED)\_([^\(]+)', mc.attrib['id'])
if m is not None:
cls = m.group(1)
if no_context and cls != 'IRRELEVANT_LABELS':
if cls.find('_') >= 0:
cls = cls[cls.find('_')+1:]
mentions = root.findall('.//mention[@id="' + mention_id + '"]/..')
if len(mentions) > 0:
span = mentions[0].findall('./span')
ent_start = span[0].attrib['start']
ent_end = span[0].attrib['end']
spannedText = mentions[0].findall('./spannedText')
str = spannedText[0].text
ann = EDIRAnn(str=str, start=int(ent_start), end=int(ent_end), type=cls)
ann.id = len(entities)
entities.append(ann)
self._entities = entities
return self._entities
def ehost_iaa_compute(folder1, folder2, no_context=False):
"""
compute inter annotator agreement
:param folder1:
:param folder2:
:param no_context:
:return:
"""
annotator1 = read_ehost_annotated_result(folder1, no_context=no_context)
annotator2 = read_ehost_annotated_result(folder2, no_context=no_context)
merged_keys = list(set(annotator1.keys()) | set(annotator2.keys()))
y1 = []
y2 = []
for key in merged_keys:
if key in annotator1 and key in annotator2:
y1.append(annotator1[key])
y2.append(annotator2[key])
else:
print('%s not matched in all' % key)
iaa = sklearn.metrics.cohen_kappa_score(y1, y2)
print('IAA is %s on %s' % (iaa, len(annotator1)))
return iaa
def read_ehost_annotated_result(folder, no_context=False):
"""
read ehost annotated documents as a dictionary object: id -> entity label
:param folder:
:param no_context:
:return:
"""
id2label = {}
files = [f for f in listdir(folder) if isfile(join(folder, f))]
for f in files:
d = eHostAnnDoc(join(folder, f))
for e in d.get_ess_entities(no_context=no_context):
id = '%s-%s-%s' % (f, e.start, e.end)
id2label[id] = e.label
print(id2label)
return id2label
def get_what_is_changing(ann_folder, text_folder, output_file, eHostAnnFile=True):
"""
get what is getting better/worse
:param ann_folder:
:param text_folder:
:param output_file:
:return:
"""
nlp = rr.get_nlp_instance()
files = [f for f in listdir(ann_folder) if isfile(join(ann_folder, f))]
type2abstractions = {}
for f in files:
anns = []
text_file = join(text_folder, f[0:-14])
if eHostAnnFile:
d = eHostAnnDoc(join(ann_folder, f))
anns = d.get_ess_entities(no_context=True)
else:
d = eHostGenedDoc(join(ann_folder, f))
anns = d.get_ess_entities()
if len(anns) == 0:
logging.info('anns is empty for [{:s}]'.format(f))
text = utils.read_text_file_as_string(join(text_folder, f[0:-14]), encoding='cp1252')
sents = rr.get_sentences_as_anns(nlp, text)
for ann in anns:
for s in sents:
if ann.overlap(s):
abss = rr.AbstractedSentence(1)
abss.text = s.str
result = abss.get_abstaction_by_pos(abss.locate_pos(ann.str), nlp)
if result is None:
logging.info('%s not found in %s' % (ann.str, f))
continue
type = ann.label
if type not in type2abstractions:
type2abstractions[type] = []
type2abstractions[type].append(result.to_dict())
logging.debug(type2abstractions)
utils.save_json_array(type2abstractions, output_file)
def compute_iaa():
folder_lia = "S:/NLP/annotation_it02/overlaps/k"
folder_rob = "S:/NLP/annotation_it02/overlaps/s"
folder_nadia = "nadia"
ehost_iaa_compute(folder_lia, folder_rob, no_context=True)
def analysing_label_performance(folder, output_file):
s2t = {}
files = [f for f in listdir(folder) if isfile(join(folder, f))]
for f in files:
d = eHostAnnDoc(join(folder, f))
for ann in d.get_ess_entities():
s = ann.str
if not (s in s2t):
s2t[s] = {}
if ann.type in s2t[s]:
s2t[s][ann.type] = s2t[s][ann.type] + 1
else:
s2t[s][ann.type] = 1
sts = sorted([(s, s2t[s]['CORRECT'] if 'CORRECT' in s2t[s] else 0, s2t[s]['IRRELEVANT_LABELS'] if 'IRRELEVANT_LABELS' in s2t[s] else 0, s2t[s]['ADDED'] if 'ADDED' in s2t[s] else 0) for s in s2t], key=itemgetter(2), reverse=True)
s = ('\n'.join(['%s\t%s\t%s\t%s' % (t[0], t[1], t[2], t[3]) for t in sts]))
utils.save_string(s, output_file)
def generate_gold_stand_from_validation(generated_ann_folder, validated_ann_folder, gold_standard_folder):
files = [f for f in listdir(generated_ann_folder) if isfile(join(generated_ann_folder, f))]
for f in files:
logging.debug('processing: %s / %s' % (generated_ann_folder, f))
# ignore added annotations for now
gd_anns = []
gen_doc = eHostGenedDoc(join(generated_ann_folder, f))
logging.debug('ann number: %s' % len(gen_doc.get_ess_entities()))
val_doc = eHostAnnDoc(join(validated_ann_folder, f))
for g in gen_doc.get_ess_entities():
logging.debug('validation label: %s' % g.type)
for v in val_doc.get_ess_entities():
if g.start == v.start and g.end == v.end:
logging.debug('validation label: %s' % v.type)
if v.type == 'CORRECT':
gd_anns.append(g)
elem_annotations = ET.Element("annotations")
elem_annotations.set('textSource', f)
idx = 0
for ann in gd_anns:
if ann.str.lower() == 'haematoma':
continue
idx += 1
mention_id = '%s-%s' % (f, idx)
elem_ann = ET.SubElement(elem_annotations, "annotation")
elem_mention = ET.SubElement(elem_ann, "mention")
elem_mention.set('id', mention_id)
elem_annotator = ET.SubElement(elem_ann, "annotator")
elem_annotator.set('id', 'semehr')
elem_annotator.text = 'semehr'
elem_span = ET.SubElement(elem_ann, "span")
elem_span.set('start', '%s' % ann.start)
elem_span.set('end', '%s' % ann.end)
elem_spanText = ET.SubElement(elem_ann, "spannedText")
elem_spanText.text = ann.str
elem_date = ET.SubElement(elem_ann, "creationDate")
elem_date.text = datetime.datetime.now().strftime("%a %B %d %X %Z %Y")
#
elem_class = ET.SubElement(elem_annotations, "classMention")
elem_class.set('id', mention_id)
elem_mention_class = ET.SubElement(elem_class, "mentionClass")
if ann.str.lower() == 'haemorrhage' or ann.str.lower() == 'blood' or ann.str.lower() == 'bleed' or ann.str.lower().startswith('collection'):
ann.type = 'bleeding'
elem_mention_class.set('id', ann.type)
elem_mention_class.text = ann.str
tree = ET.ElementTree(elem_annotations)
logging.info('gd file saved to %s - %s' % (gold_standard_folder, f))
utils.save_string(ET.tostring(elem_annotations, encoding='utf8', method='xml'), join(gold_standard_folder, f))
def analyse_trajectory_subjects(file, output_file):
t2subs = utils.load_json_data(file)
t2freq = {}
for t in t2subs:
if t not in t2freq:
t2freq[t] = {'subject': {}, 'root': {}}
for sub in t2subs[t]:
add_key_freq(t2freq[t]['subject'], ','.join(sub['subject']))
add_key_freq(t2freq[t]['root'], sub['root'])
s = ''
for t in t2freq:
freqs = t2freq[t]
subs = sorted([(k, freqs['subject'][k]) for k in freqs['subject']], key=itemgetter(1), reverse=True)
s += '***%s [subjects]***\n%s\n\n' % (t, freq_to_str(subs))
roots = sorted([(k, freqs['root'][k]) for k in freqs['root']], key=itemgetter(1), reverse=True)
s += '***%s [roots]***\n%s\n\n' % (t, freq_to_str(roots))
logging.info(s)
utils.save_string(s, output_file)
def freq_to_str(freq):
return '\n'.join(['%s\t%s' % (t[0], t[1]) for t in freq])
def add_key_freq(d, key):
|
def summarise_validation_results(folder):
files = [f for f in listdir(folder) if isfile(join(folder, f))]
t2freq = {}
for f in files:
gen_doc = eHostGenedDoc(join(folder, f))
logging.debug('processing: %s / %s' % (folder, f))
for g in gen_doc.get_ess_entities():
logging.debug('validation label: %s' % g.type)
if g.type not in t2freq:
t2freq[g.type] = 0
t2freq[g.type] += 1
s = '\n'.join(['%s\t%s' % (t, t2freq[t]) for t in t2freq])
logging.info(s)
return s
if __name__ == "__main__":
log_level = 'DEBUG'
log_format = '[%(filename)s:%(lineno)d] %(name)s %(asctime)s %(message)s'
logging.basicConfig(level='DEBUG', format=log_format)
# compute_iaa()
# analysing_label_performance('S:/NLP/annotation_it02/annotation_Steven/iteration_02/saved',
# 'P:/wuh/label2performce_steve.tsv')
# generate_gold_stand_from_validation('P:/wuh/SemEHR-working/outputs_it2/nlp2phenome',
# 'S:/NLP/annotation_it02/annotation_Steven/iteration_02/saved',
# 'P:/wuh/SemEHR-working/outputs_it2/gold_stand_results')
sub_json_file = './diabetes_subs.json'
analyse_trajectory_subjects(sub_json_file, './traject_sub_analysis_result.txt')
# if len(sys.argv) != 4:
# print('the syntax is [python ann_utils.py ann_folder, text_folder, result_file]')
# else:
# logging.info('working...')
# get_what_is_changing(sys.argv[1], sys.argv[2], sys.argv[3], eHostAnnFile=False)
# summarise_validation_results('/data/val/it2') | if key in d:
d[key] += 1
else:
d[key] = 1 | identifier_body |
ann_utils.py | import sklearn
import datetime
from os import listdir
from os.path import isfile, join
from nlp_to_phenome import EDIRDoc
from annotation_docs import EDIRAnn
import reportreader as rr
import re
import utils
import logging
from operator import itemgetter
import xml.etree.ElementTree as ET
class eHostGenedDoc(EDIRDoc):
def __init__(self, file_path):
super(eHostGenedDoc, self).__init__(file_path)
def get_ess_entities(self):
if self._entities is not None:
return self._entities
root = self._root
entities = []
for e in root.findall('.//classMention'):
mcs = e.findall('./mentionClass')
mention_id = e.attrib['id']
if len(mcs) > 0:
mc = mcs[0]
cls = mc.attrib['id']
mentions = root.findall('.//mention[@id="' + mention_id + '"]/..')
if len(mentions) > 0:
span = mentions[0].findall('./span')
ent_start = span[0].attrib['start']
ent_end = span[0].attrib['end']
spannedText = mentions[0].findall('./spannedText')
str = spannedText[0].text
ann = EDIRAnn(str=str, start=int(ent_start), end=int(ent_end), type=cls)
ann.id = len(entities)
entities.append(ann)
self._entities = entities
return self._entities
class eHostAnnDoc(EDIRDoc):
"""
a document class for ehost annotation file
"""
def __init__(self, file_path):
super(eHostAnnDoc, self).__init__(file_path)
def get_ess_entities(self, no_context=False):
if self._entities is not None:
return self._entities
root = self._root
entities = []
for e in root.findall('.//classMention'):
mcs = e.findall('./mentionClass')
mention_id = e.attrib['id']
if len(mcs) > 0:
mc = mcs[0]
m = re.match(r'VERIFIED\_([^\(]+)', mc.attrib['id'])
if m is None:
m = re.match(r'(IRRELEVANT_LABELS)', mc.attrib['id'])
if m is None:
m = re.match(r'(ADDED)\_([^\(]+)', mc.attrib['id'])
if m is not None:
cls = m.group(1)
if no_context and cls != 'IRRELEVANT_LABELS':
if cls.find('_') >= 0:
|
mentions = root.findall('.//mention[@id="' + mention_id + '"]/..')
if len(mentions) > 0:
span = mentions[0].findall('./span')
ent_start = span[0].attrib['start']
ent_end = span[0].attrib['end']
spannedText = mentions[0].findall('./spannedText')
str = spannedText[0].text
ann = EDIRAnn(str=str, start=int(ent_start), end=int(ent_end), type=cls)
ann.id = len(entities)
entities.append(ann)
self._entities = entities
return self._entities
def ehost_iaa_compute(folder1, folder2, no_context=False):
"""
compute inter annotator agreement
:param folder1:
:param folder2:
:param no_context:
:return:
"""
annotator1 = read_ehost_annotated_result(folder1, no_context=no_context)
annotator2 = read_ehost_annotated_result(folder2, no_context=no_context)
merged_keys = list(set(annotator1.keys()) | set(annotator2.keys()))
y1 = []
y2 = []
for key in merged_keys:
if key in annotator1 and key in annotator2:
y1.append(annotator1[key])
y2.append(annotator2[key])
else:
print('%s not matched in all' % key)
iaa = sklearn.metrics.cohen_kappa_score(y1, y2)
print('IAA is %s on %s' % (iaa, len(annotator1)))
return iaa
def read_ehost_annotated_result(folder, no_context=False):
"""
read ehost annotated documents as a dictionary object: id -> entity label
:param folder:
:param no_context:
:return:
"""
id2label = {}
files = [f for f in listdir(folder) if isfile(join(folder, f))]
for f in files:
d = eHostAnnDoc(join(folder, f))
for e in d.get_ess_entities(no_context=no_context):
id = '%s-%s-%s' % (f, e.start, e.end)
id2label[id] = e.label
print(id2label)
return id2label
def get_what_is_changing(ann_folder, text_folder, output_file, eHostAnnFile=True):
"""
get what is getting better/worse
:param ann_folder:
:param text_folder:
:param output_file:
:return:
"""
nlp = rr.get_nlp_instance()
files = [f for f in listdir(ann_folder) if isfile(join(ann_folder, f))]
type2abstractions = {}
for f in files:
anns = []
text_file = join(text_folder, f[0:-14])
if eHostAnnFile:
d = eHostAnnDoc(join(ann_folder, f))
anns = d.get_ess_entities(no_context=True)
else:
d = eHostGenedDoc(join(ann_folder, f))
anns = d.get_ess_entities()
if len(anns) == 0:
logging.info('anns is empty for [{:s}]'.format(f))
text = utils.read_text_file_as_string(join(text_folder, f[0:-14]), encoding='cp1252')
sents = rr.get_sentences_as_anns(nlp, text)
for ann in anns:
for s in sents:
if ann.overlap(s):
abss = rr.AbstractedSentence(1)
abss.text = s.str
result = abss.get_abstaction_by_pos(abss.locate_pos(ann.str), nlp)
if result is None:
logging.info('%s not found in %s' % (ann.str, f))
continue
type = ann.label
if type not in type2abstractions:
type2abstractions[type] = []
type2abstractions[type].append(result.to_dict())
logging.debug(type2abstractions)
utils.save_json_array(type2abstractions, output_file)
def compute_iaa():
folder_lia = "S:/NLP/annotation_it02/overlaps/k"
folder_rob = "S:/NLP/annotation_it02/overlaps/s"
folder_nadia = "nadia"
ehost_iaa_compute(folder_lia, folder_rob, no_context=True)
def analysing_label_performance(folder, output_file):
s2t = {}
files = [f for f in listdir(folder) if isfile(join(folder, f))]
for f in files:
d = eHostAnnDoc(join(folder, f))
for ann in d.get_ess_entities():
s = ann.str
if not (s in s2t):
s2t[s] = {}
if ann.type in s2t[s]:
s2t[s][ann.type] = s2t[s][ann.type] + 1
else:
s2t[s][ann.type] = 1
sts = sorted([(s, s2t[s]['CORRECT'] if 'CORRECT' in s2t[s] else 0, s2t[s]['IRRELEVANT_LABELS'] if 'IRRELEVANT_LABELS' in s2t[s] else 0, s2t[s]['ADDED'] if 'ADDED' in s2t[s] else 0) for s in s2t], key=itemgetter(2), reverse=True)
s = ('\n'.join(['%s\t%s\t%s\t%s' % (t[0], t[1], t[2], t[3]) for t in sts]))
utils.save_string(s, output_file)
def generate_gold_stand_from_validation(generated_ann_folder, validated_ann_folder, gold_standard_folder):
files = [f for f in listdir(generated_ann_folder) if isfile(join(generated_ann_folder, f))]
for f in files:
logging.debug('processing: %s / %s' % (generated_ann_folder, f))
# ignore added annotations for now
gd_anns = []
gen_doc = eHostGenedDoc(join(generated_ann_folder, f))
logging.debug('ann number: %s' % len(gen_doc.get_ess_entities()))
val_doc = eHostAnnDoc(join(validated_ann_folder, f))
for g in gen_doc.get_ess_entities():
logging.debug('validation label: %s' % g.type)
for v in val_doc.get_ess_entities():
if g.start == v.start and g.end == v.end:
logging.debug('validation label: %s' % v.type)
if v.type == 'CORRECT':
gd_anns.append(g)
elem_annotations = ET.Element("annotations")
elem_annotations.set('textSource', f)
idx = 0
for ann in gd_anns:
if ann.str.lower() == 'haematoma':
continue
idx += 1
mention_id = '%s-%s' % (f, idx)
elem_ann = ET.SubElement(elem_annotations, "annotation")
elem_mention = ET.SubElement(elem_ann, "mention")
elem_mention.set('id', mention_id)
elem_annotator = ET.SubElement(elem_ann, "annotator")
elem_annotator.set('id', 'semehr')
elem_annotator.text = 'semehr'
elem_span = ET.SubElement(elem_ann, "span")
elem_span.set('start', '%s' % ann.start)
elem_span.set('end', '%s' % ann.end)
elem_spanText = ET.SubElement(elem_ann, "spannedText")
elem_spanText.text = ann.str
elem_date = ET.SubElement(elem_ann, "creationDate")
elem_date.text = datetime.datetime.now().strftime("%a %B %d %X %Z %Y")
#
elem_class = ET.SubElement(elem_annotations, "classMention")
elem_class.set('id', mention_id)
elem_mention_class = ET.SubElement(elem_class, "mentionClass")
if ann.str.lower() == 'haemorrhage' or ann.str.lower() == 'blood' or ann.str.lower() == 'bleed' or ann.str.lower().startswith('collection'):
ann.type = 'bleeding'
elem_mention_class.set('id', ann.type)
elem_mention_class.text = ann.str
tree = ET.ElementTree(elem_annotations)
logging.info('gd file saved to %s - %s' % (gold_standard_folder, f))
utils.save_string(ET.tostring(elem_annotations, encoding='utf8', method='xml'), join(gold_standard_folder, f))
def analyse_trajectory_subjects(file, output_file):
t2subs = utils.load_json_data(file)
t2freq = {}
for t in t2subs:
if t not in t2freq:
t2freq[t] = {'subject': {}, 'root': {}}
for sub in t2subs[t]:
add_key_freq(t2freq[t]['subject'], ','.join(sub['subject']))
add_key_freq(t2freq[t]['root'], sub['root'])
s = ''
for t in t2freq:
freqs = t2freq[t]
subs = sorted([(k, freqs['subject'][k]) for k in freqs['subject']], key=itemgetter(1), reverse=True)
s += '***%s [subjects]***\n%s\n\n' % (t, freq_to_str(subs))
roots = sorted([(k, freqs['root'][k]) for k in freqs['root']], key=itemgetter(1), reverse=True)
s += '***%s [roots]***\n%s\n\n' % (t, freq_to_str(roots))
logging.info(s)
utils.save_string(s, output_file)
def freq_to_str(freq):
return '\n'.join(['%s\t%s' % (t[0], t[1]) for t in freq])
def add_key_freq(d, key):
if key in d:
d[key] += 1
else:
d[key] = 1
def summarise_validation_results(folder):
files = [f for f in listdir(folder) if isfile(join(folder, f))]
t2freq = {}
for f in files:
gen_doc = eHostGenedDoc(join(folder, f))
logging.debug('processing: %s / %s' % (folder, f))
for g in gen_doc.get_ess_entities():
logging.debug('validation label: %s' % g.type)
if g.type not in t2freq:
t2freq[g.type] = 0
t2freq[g.type] += 1
s = '\n'.join(['%s\t%s' % (t, t2freq[t]) for t in t2freq])
logging.info(s)
return s
if __name__ == "__main__":
log_level = 'DEBUG'
log_format = '[%(filename)s:%(lineno)d] %(name)s %(asctime)s %(message)s'
logging.basicConfig(level='DEBUG', format=log_format)
# compute_iaa()
# analysing_label_performance('S:/NLP/annotation_it02/annotation_Steven/iteration_02/saved',
# 'P:/wuh/label2performce_steve.tsv')
# generate_gold_stand_from_validation('P:/wuh/SemEHR-working/outputs_it2/nlp2phenome',
# 'S:/NLP/annotation_it02/annotation_Steven/iteration_02/saved',
# 'P:/wuh/SemEHR-working/outputs_it2/gold_stand_results')
sub_json_file = './diabetes_subs.json'
analyse_trajectory_subjects(sub_json_file, './traject_sub_analysis_result.txt')
# if len(sys.argv) != 4:
# print('the syntax is [python ann_utils.py ann_folder, text_folder, result_file]')
# else:
# logging.info('working...')
# get_what_is_changing(sys.argv[1], sys.argv[2], sys.argv[3], eHostAnnFile=False)
# summarise_validation_results('/data/val/it2') | cls = cls[cls.find('_')+1:] | conditional_block |
hap_to_domo.py | """"
Skrypt stanowi brame pomiedzy domoticzem a siecia HAPCAN - interfejs Ethernet
uruchomiony mosquitto
W domoticzu w sekcji sprzet dodajemy - MQTT Client Gateway with LAN interface port 1883 ip 127.0.0.1
uruchamiamy z konsoli: python3 hapcan_domo.py
"""
from __future__ import print_function
import paho.mqtt.client as mqtt
import json
import threading
import os
import socket
import binascii
import time
# dla Things
import http.client
import urllib
from urllib.parse import urlparse
import parser
import happroc
def setInterval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop():
while not stopped.wait(interval):
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True
t.start()
return stopped
return wrapper
return decorator
#Tutaj umieszczamy spis modułów naszego systemu
#formant (moduł,grupa):opis - możliwy zapis dziesiętny lub HEX
MAPOWANIE_MOD = {
(1,10):'BUT kotłownia',
}
# słownik opisujący nasz system Hapcan (moduł,grupa,kanał):(idx w Domoticzu, typ w Domoticzu, podtyp w Domoticzu
# dodatkowo można wykorzystać niektóre czujniki np. temeratury do zapisu do bazy ThingSpeac - wtedy należy dopisać dane pole_th - zgodne z nazwą w kanale i klucz czyli API key
#- możliwy zapis dziesiętny lub HEX
MAPOWANIE_HAP ={
# butony
(0x01, 0x0a, 0x11): {'idx': 26, 'pole_th': 'field1', 'klucz_th': ''},
(0xcf, 0x0b, 0x11): {'idx': 19},
# SW T1
(25,11,1):{'idx':37,'dtype': 'Light/Switch', 'switchType': 'On/Off','nazwa':'Lampa Gabinet PN'},
# termostat
(0x01, 0x0a, 0x14): {'idx': 70},
# rolety
(0x28, 0x0e, 0x01): {"idx": 62, "nvalue": 2, "svalue": "90",'czas_rol':20},
# dimmer
(32,6,1):{'idx':81,'dtype': 'Light/Switch', 'switchType': 'Dimmer','nazwa':'Dimmer'},
}
MAPOWANIE_DOM ={}
MAPOWANIE_MOD_SPS = {}
MAPOWANIE_THING = {}
IGNOROWANIE = {}
INDEKSY = {
1:0 # # lista modułów do sprawdzenia
}
OKRES_CZASU = {
1:1, # flaga okresowego odczytu podstawowego - na początku i potem co x minut
2:0, # flaga okresowego odczytu 1x na dobę
3:0,
}
FLAGI = {
1:0,
2:{'flaga':0,'nodes':(0,0,0),'procent':0}, # flaga sprawdzania rolety
}
# utworzenie słownika idx Domoticza
ks = list(MAPOWANIE_HAP.keys())
#ks. sort()
#indeks=0
for key in ks:
komendy = MAPOWANIE_HAP.get(key, None)
idx = komendy['idx']
map_temp = MAPOWANIE_HAP[key]
map_temp2 = {'nodes':key}
map_temp.update(map_temp2)
#print("Klucze MAP to ::::::", MAPOWANIE_HAP[key], 'klucz', key)
MAPOWANIE_DOM[idx]= map_temp
#MAPOWANIE_HAP.update(map_temp)
#indeks = indeks +1
#komendy = MAPOWANIE_DOM.get(key, None)
#print("Komenda to ...", komendy)
print("MAP HAP", MAPOWANIE_DOM)
indeks = 0
ks = list(MAPOWANIE_MOD.keys())
for key in ks:
indeks = indeks +1
map_temp ={'komunikat': 0x1090}
list_temp = [0xf0, 0xf0, 0xff, 0xff]
list_temp.append(key[0])
list_temp.append(key[1])
list_temp += [0xff, 0xff, 0xff, 0xff]
map_temp2 = {'dane': list_temp}
map_temp.update(map_temp2)
#map_temp2.update(hex(key[0]))
MAPOWANIE_MOD_SPS[indeks] = map_temp
print(MAPOWANIE_MOD_SPS)
@setInterval(2)
def odczyt_mod():
okres_czasu = OKRES_CZASU.get(1)
indeks_mod = INDEKSY.get(1)
if OKRES_CZASU.get(1):
indeks_mod = indeks_mod +1
komenda = MAPOWANIE_MOD_SPS.get(indeks_mod, None)
if komenda is not None:
#print("komenda do wysłania do Hapcana", komenda)
wyslij(komenda['komunikat'], komenda['dane'])
INDEKSY[1] = indeks_mod
else:
INDEKSY[1]=0 # kasujemy licznik listy modułów do odczytu
OKRES_CZASU[1]=0 # ustawiamy flagę następnego odczytu za 10 minut
@setInterval(600) # Wysylanie zapytania do 100 sekund
def pytanie_o_status():
print("pytanie_o_status do Hapcana",OKRES_CZASU, "Ignoruj", IGNOROWANIE)
OKRES_CZASU[1]=1
def on_connect(client, userdata, flags, rc):
print("Połączony z moskitem czyta domoticza... " + str(rc))
client.subscribe("domoticz/out")
def on_message(client, userdata, msg):
try:
payload = json.loads(msg.payload.decode('ascii'))
#print("wiadomosc od Domoticza ", payload)
idx = payload['idx']
typ_idx = payload['dtype']
nvalue = payload['nvalue']
svalue1 = payload['svalue1']
except ValueError:
print("Błąd formatu json", str(msg))
return
#print("wiadomosc od Domoticza ", payload)
if typ_idx == 'Light/Switch':
typ_switch = payload['switchType']
if typ_switch == 'Blinds Percentage':
print ('a to była roleta')
#print(IGNOROWANIE)
# ignorowanie jest potrzebna gdzy nie ma mozliwosci rozroznienia z wiadowmosci czy dostajemy odpowiedz na nasza wiadomosc
ignoruj = IGNOROWANIE.get(idx, 0)
# print("ignoruj", ignoruj)
if ignoruj is 0:
komunikat= 0x10A0
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
# dane': [0xF0, 0xF0, 0x01, 0x01, 0x19, 0x0b, 0x00, 0xFF, 0xFF, 0xFF]},
# znajdz komenda dla danego idx i nvalue
#print("Otrzymałem od Domoticza", "idx", idx, "nvalue", nvalue, "svalue1", svalue1, payload)
komendy = MAPOWANIE_DOM.get(idx, None)
#klucz = (nvalue, svalue1)
#print("komendy", komendy, "Klucz",klucz)
if komendy is not None:
nodes = komendy.get(('nodes'), None)
#print("komenda do wysłania do Hapcana od Domoticza", komenda, "Payload", payload)
if nodes is not None:
# sprawdzenie jaki rodzaj urządzenia w Domoticzu
if typ_idx == 'Light/Switch': # sprawdzamy czy switch
if typ_switch == 'On/Off': # tylko ON/OFF
dane[2]= nvalue
dane[3]=2**(nodes[2]-1)
dane[4]=nodes[0]
dane[5]=nodes[1]
print (dane)
wyslij(komunikat,dane)
if typ_switch == 'Blinds Percentage':
print('moja roleta !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!', payload)
FLAGI[1] =1
ustaw_roleta(nodes,nvalue,svalue1)
#print("Wysylem ", komenda['komunikat'], komenda['dane'])
# print("Wysylem ",komenda['komunikat'], komenda['dane'])
#wyslij(komenda['komunikat'], komenda['dane'])
if typ_switch == 'Dimmer': # tylko ON/OFF
dane[2]= 0
dane[3]= int(payload['Level']*2.55)
dane[4]=nodes[0]
dane[5]=nodes[1]
wyslij(komunikat,dane)
else:
IGNOROWANIE[idx] = ignoruj - 1
def ustaw_roleta(nodes,nvalue,svalue1):
map_temp = {'nodes': nodes}
if nvalue < 2:
procent = 100 * nvalue
else:
procent = int(svalue1)
map_temp2 = {'procent': procent}
map_temp.update(map_temp2)
FLAGI[2] = map_temp
# FLAGI = {
#'dane': [0xf0, 0xf0, 0xff, 0xff, 0x28, 0x0e, 0xff, 0xff, 0xff, 0xff]},
print("@@@ a roleta to flagi: ",FLAGI)
komunikat = 0x1090
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
dane[4] = nodes[0]
dane[5] = nodes[1]
wyslij(komunikat,dane)
def hap_crc(ramka):
h_crc = 0
for x in range(12):
h_crc = h_crc + ramka[x + 1]
h_crc = h_crc % 256
return h_crc
def wyslij(id_komunikatu, dane):
try:
proto = socket.getprotobyname('tcp')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto)
sock.connect(("192.168.1.201", 1001))
msg = bytearray()
msg.append(0xAA)
b2 = (id_komunikatu >> 8) & 0xFF;
msg.append(b2)
b1 = (id_komunikatu) & 0xFF;
msg.append(b1)
for val in dane:
msg.append(val)
msg.append(hap_crc(msg))
msg.append(0xA5)
sock.sendall(msg)
print('wyslano =', binascii.hexlify(msg))
except socket.error:
pass
finally:
sock.close()
def toHex(val):
return '0x{:02x}'.format(val)
def czytaj():
# główna pętla odczytująca status Hapcana z bramki LAN
proto = socket.getprotobyname('tcp')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto)
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = http.client.HTTPConnection("api.thingspeak.com:80")
try:
# 8. Ip i port modulu HAPCAN ethernet
sock.connect(("192.168.1.1", 1002))
while True:
#resp = bytearray()
resp = sock.recv(1)
# teraz sprawdzi czy początek odebranego ciągu to początek ramki
if resp[0] == 0xaa:
# pobranie pozostałej części ramki
for i in range(14):
resp += sock.recv(1)
# sprawdzenie sumy kontrolnej
if hap_crc(resp) == resp[13]:
modul = resp[3]
grupa = resp[4]
id_urzadzenia = resp[7]
stan = resp[8]
if resp[1] == 0x30: #rozkaz stanu
#print("Rozkaz stanu", "to hex",toHex(resp[2]))
if resp[2] == 0x00: # ramka czasu
print("Ramka czasu",toHex(resp[3]),toHex(resp[4]) )
czas_pracy = OKRES_CZASU.get(3)
czas_pracy = czas_pracy+1
OKRES_CZASU[3] = czas_pracy
if resp[2] == 0x20 or resp[2] == 0x21: # ramka przekaźnika
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
nazwa = komendy['nazwa']
print("Stan switcha",nazwa," ",str(stan & 1))
komenda = '{"idx": ' + str(idx) + ', "nvalue" : ' +str(stan &1) + ', "svalue" : "0"}'
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
client.publish("domoticz/in", komenda)
else:
print('Brak opisu przekaźnika !!!')
if resp[2] == 0x40 or resp[2] == 0x41: # ramka przycisku
#print("Ramka przycisku",)
if resp[7] == 0x11: # ramka temperatury
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
tt1 = happroc.spr_temp(resp[8], resp[9])
komenda = '{"idx": '+ str(idx) + ', "nvalue" : 0, "svalue" : "' + str(tt1) + '"}'
#print("Komenda to ...",komenda)
pole_th = komendy.get('pole_th',None)
if pole_th is not None:
print("Temp THING to ....",tt1,)
klucz_th = komendy.get('klucz_th',None)
params = urllib.parse.urlencode({pole_th : tt1, 'key': klucz_th})
try:
conn.request("POST", "/update", params, headers)
response = conn.getresponse()
data = response.read()
#print('odpowiedź od Th',data)
except Exception as bld:
print("connection failed z Thingiem")
| komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
tt1 = happroc.spr_temp(resp[10], resp[11])
komenda = '{"idx": ' + str(idx) + ', "nvalue" : 0, "svalue" : "' + str(tt1) + '"}'
#print("Komenda to ...", komenda)
client.publish("domoticz/in", komenda)
if resp[2] == 0x70 or resp[2] == 0x71: # ramka rolety
#print("Ramka rolety", procent)
if FLAGI[1]:
map_temp = FLAGI[2]
nodes = map_temp.get('nodes')
procent = map_temp.get('procent')
if id_urzadzenia == nodes[2]:
if resp[9] == 0: # sprawdza czy nie jest w ruchu
map_temp2 = MAPOWANIE_HAP.get(nodes)
czas_rol = map_temp2.get('czas_rol')
komunikat = 0x10A0
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
dane[3] = 2 ** (nodes[2] - 1)
dane[4] = nodes[0]
dane[5] = nodes[1]
#stan = stan / 255 * 100
ile_czasu = czas_rol*((stan/2.55) - procent)/100
if ile_czasu < 0:
dane[2]=0x04
else:
dane[2]=0x03
print("**********************************************", komunikat, "dane", dane,"; procent", procent, "; stan rolety", stan, "; ile czasu", ile_czasu)
wyslij(komunikat,dane)
dane[2]=0
dane[6]=int(round(abs(ile_czasu),0))
print("**********************************************", komunikat, "dane", dane)
wyslij(komunikat, dane)
FLAGI[1] = 0
if resp[2] == 0x60 or resp[2] == 0x61: # ramka Dimmera
#print("Ramka dimera nr kanału", resp[7], resp[8])
if resp[7] == 1:
proc_dimer = int((resp[8]/255)*100)
print("Żarówka na %", proc_dimer)
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
nazwa = komendy['nazwa']
print("Stan dimera", nazwa, " ", proc_dimer)
komenda = '{"command": "switchlight", "idx": ' + str(idx) + ', "switchcmd": "Set Level", "level": '+ str(proc_dimer) + ' }'
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
client.publish("domoticz/in", komenda)
else:
print('Brak opisu dimera !!!')
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
procent = stan / 255 * 100
komenda = '{"idx": ' + str(idx) + ', "nvalue" : 2, "svalue" : "' + str(procent) + '"}'
#print("Komenda to ...", komenda)
client.publish("domoticz/in", komenda)
# teraz tu umieszczę dalszy program :)
except socket.error as bld:
print("Error?")
plik = open("errory.log", mode="a+")
plik.write(time.asctime()+ ',' + str(bld)+ '\n')
except Exception as bld:
plik = open("errory.log", mode="a+")
plik.write(time.asctime() + ',' + str(bld) + '\n')
#pass
finally:
plik.close()
sock.close()
conn.close()
if __name__ == "__main__":
print("Start")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
# 7. ip i port mosquitto (domyslne ustawiania)
#client.connect("127.0.0.1", 1883, 60)
client.connect("", 1883, 60)
#http: // vps354642.ovh.net /
client.loop_start()
odczyt_mod() # wywołanie proedury odpytującej wszystkie moduły co 2
pytanie_o_status() # wywołanie procedury wykonywanej co 10 minut
czytaj() | plik = open("errory.log", mode="a+")
plik.write(time.asctime() + ',' + str(bld) + '\n')
client.publish("domoticz/in", komenda)
# teraz odczyt termostatu (id_urzadzenia ustawiony na 0x14)
id_urzadzenia = 0x14
| random_line_split |
hap_to_domo.py | """"
Skrypt stanowi brame pomiedzy domoticzem a siecia HAPCAN - interfejs Ethernet
uruchomiony mosquitto
W domoticzu w sekcji sprzet dodajemy - MQTT Client Gateway with LAN interface port 1883 ip 127.0.0.1
uruchamiamy z konsoli: python3 hapcan_domo.py
"""
from __future__ import print_function
import paho.mqtt.client as mqtt
import json
import threading
import os
import socket
import binascii
import time
# dla Things
import http.client
import urllib
from urllib.parse import urlparse
import parser
import happroc
def setInterval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop():
while not stopped.wait(interval):
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True
t.start()
return stopped
return wrapper
return decorator
#Tutaj umieszczamy spis modułów naszego systemu
#formant (moduł,grupa):opis - możliwy zapis dziesiętny lub HEX
MAPOWANIE_MOD = {
(1,10):'BUT kotłownia',
}
# słownik opisujący nasz system Hapcan (moduł,grupa,kanał):(idx w Domoticzu, typ w Domoticzu, podtyp w Domoticzu
# dodatkowo można wykorzystać niektóre czujniki np. temeratury do zapisu do bazy ThingSpeac - wtedy należy dopisać dane pole_th - zgodne z nazwą w kanale i klucz czyli API key
#- możliwy zapis dziesiętny lub HEX
MAPOWANIE_HAP ={
# butony
(0x01, 0x0a, 0x11): {'idx': 26, 'pole_th': 'field1', 'klucz_th': ''},
(0xcf, 0x0b, 0x11): {'idx': 19},
# SW T1
(25,11,1):{'idx':37,'dtype': 'Light/Switch', 'switchType': 'On/Off','nazwa':'Lampa Gabinet PN'},
# termostat
(0x01, 0x0a, 0x14): {'idx': 70},
# rolety
(0x28, 0x0e, 0x01): {"idx": 62, "nvalue": 2, "svalue": "90",'czas_rol':20},
# dimmer
(32,6,1):{'idx':81,'dtype': 'Light/Switch', 'switchType': 'Dimmer','nazwa':'Dimmer'},
}
MAPOWANIE_DOM ={}
MAPOWANIE_MOD_SPS = {}
MAPOWANIE_THING = {}
IGNOROWANIE = {}
INDEKSY = {
1:0 # # lista modułów do sprawdzenia
}
OKRES_CZASU = {
1:1, # flaga okresowego odczytu podstawowego - na początku i potem co x minut
2:0, # flaga okresowego odczytu 1x na dobę
3:0,
}
FLAGI = {
1:0,
2:{'flaga':0,'nodes':(0,0,0),'procent':0}, # flaga sprawdzania rolety
}
# utworzenie słownika idx Domoticza
ks = list(MAPOWANIE_HAP.keys())
#ks. sort()
#indeks=0
for key in ks:
komendy = MAPOWANIE_HAP.get(key, None)
idx = komendy['idx']
map_temp = MAPOWANIE_HAP[key]
map_temp2 = {'nodes':key}
map_temp.update(map_temp2)
#print("Klucze MAP to ::::::", MAPOWANIE_HAP[key], 'klucz', key)
MAPOWANIE_DOM[idx]= map_temp
#MAPOWANIE_HAP.update(map_temp)
#indeks = indeks +1
#komendy = MAPOWANIE_DOM.get(key, None)
#print("Komenda to ...", komendy)
print("MAP HAP", MAPOWANIE_DOM)
indeks = 0
ks = list(MAPOWANIE_MOD.keys())
for key in ks:
indeks = indeks +1
map_temp ={'komunikat': 0x1090}
list_temp = [0xf0, 0xf0, 0xff, 0xff]
list_temp.append(key[0])
list_temp.append(key[1])
list_temp += [0xff, 0xff, 0xff, 0xff]
map_temp2 = {'dane': list_temp}
map_temp.update(map_temp2)
#map_temp2.update(hex(key[0]))
MAPOWANIE_MOD_SPS[indeks] = map_temp
print(MAPOWANIE_MOD_SPS)
@setInterval(2)
def odczyt_mod():
okres_czasu = OKRES_CZASU.get(1)
indeks_mod = INDEKSY.get(1)
if OKRES_CZASU.get(1):
indeks_mod = indeks_mod +1
komenda = MAPOWANIE_MOD_SPS.get(indeks_mod, None)
if komenda is not None:
#print("komenda do wysłania do Hapcana", komenda)
wyslij(komenda['komunikat'], komenda['dane'])
INDEKSY[1] = indeks_mod
else:
INDEKSY[1]=0 # kasujemy licznik listy modułów do odczytu
OKRES_CZASU[1]=0 # ustawiamy flagę następnego odczytu za 10 minut
@setInterval(600) # Wysylanie zapytania do 100 sekund
def pytanie_o_status():
print("pytanie_o_status do Hapcana",OKRES_CZASU, "Ignoruj", IGNOROWANIE)
OKRES_CZASU[1]=1
def on_connect(client, userdata, flags, rc):
print("Połączony z moskitem czyta domoticza... " + str(rc))
client.subscribe("domoticz/out")
def on_message(client, userdata, msg):
try:
payload = json.loads(msg.payload.decode('ascii'))
#print("wiadomosc od Domoticza ", payload)
idx = payload['idx']
typ_idx = payload['dtype']
nvalue = payload['nvalue']
svalue1 = payload['svalue1']
except ValueError:
print("Błąd formatu json", str(msg))
return
#print("wiadomosc od Domoticza ", payload)
if typ_idx == 'Light/Switch':
typ_switch = payload['switchType']
if typ_switch == 'Blinds Percentage':
print ('a to była roleta')
#print(IGNOROWANIE)
# ignorowanie jest potrzebna gdzy nie ma mozliwosci rozroznienia z wiadowmosci czy dostajemy odpowiedz na nasza wiadomosc
ignoruj = IGNOROWANIE.get(idx, 0)
# print("ignoruj", ignoruj)
if ignoruj is 0:
komunikat= 0x10A0
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
# dane': [0xF0, 0xF0, 0x01, 0x01, 0x19, 0x0b, 0x00, 0xFF, 0xFF, 0xFF]},
# znajdz komenda dla danego idx i nvalue
#print("Otrzymałem od Domoticza", "idx", idx, "nvalue", nvalue, "svalue1", svalue1, payload)
komendy = MAPOWANIE_DOM.get(idx, None)
#klucz = (nvalue, svalue1)
#print("komendy", komendy, "Klucz",klucz)
if komendy is not None:
nodes = komendy.get(('nodes'), None)
#print("komenda do wysłania do Hapcana od Domoticza", komenda, "Payload", payload)
if nodes is not None:
# sprawdzenie jaki rodzaj urządzenia w Domoticzu
if typ_idx == 'Light/Switch': # sprawdzamy czy switch
if typ_switch == 'On/Off': # tylko ON/OFF
dane[2]= nvalue
dane[3]=2**(nodes[2]-1)
dane[4]=nodes[0]
dane[5]=nodes[1]
print (dane)
wyslij(komunikat,dane)
if typ_switch == 'Blinds Percentage':
print('moja roleta !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!', payload)
FLAGI[1] =1
ustaw_roleta(nodes,nvalue,svalue1)
#print("Wysylem ", komenda['komunikat'], komenda['dane'])
# print("Wysylem ",komenda['komunikat'], komenda['dane'])
#wyslij(komenda['komunikat'], komenda['dane'])
if typ_switch == 'Dimmer': # tylko ON/OFF
dane[2]= 0
dane[3]= int(payload['Level']*2.55)
dane[4]=nodes[0]
dane[5]=nodes[1]
wyslij(komunikat,dane)
else:
IGNOROWANIE[idx] = ignoruj - 1
def ustaw_roleta(nodes,nvalue,svalue1):
map_temp = {'nodes': nodes}
if nvalue < 2:
procent = 100 * nvalue
else:
procent = int(svalue1)
map_temp2 = {'procent': procent}
map_temp.update(map_temp2)
FLAGI[2] = map_temp
# FLAGI = {
#'dane': [0xf0, 0xf0, 0xff, 0xff, 0x28, 0x0e, 0xff, 0xff, 0xff, 0xff]},
print("@@@ a roleta to flagi: ",FLAGI)
komunikat = 0x1090
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
dane[4] = nodes[0]
dane[5] = nodes[1]
wyslij(komunikat,dane)
def hap_crc(ramka):
h_crc = 0
for x in range(12):
h_crc = h_crc + ramka[x + 1]
h_crc = h_crc % 256
return h_crc
def wyslij(id_komunikatu, dane):
try:
proto = socket.getprotobyname('tcp')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto)
sock.connect(("192.168.1.201", 1001))
msg = bytearray()
msg.append(0xAA)
b2 = (id_komunikatu >> 8) & 0xFF;
msg.append(b2)
b1 = (id_komunikatu) & 0xFF;
msg.append(b1)
for val in dane:
msg.append(val)
msg.append(hap_crc(msg))
msg.append(0xA5)
sock.sendall(msg)
print('wyslano =', binascii.hexlify(msg))
except socket.error:
pass
finally:
sock.close()
def toHex(val):
return '0x{:02x}'.format(val)
def czytaj():
# główna pętla odczytująca status Hapcana z bramki LAN
proto = socket.getprotobyname('tcp')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto)
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = http.client.HTTPConnection("api.thingspeak.com:80")
try:
# 8. Ip i port modulu HAPCAN ethernet
sock.connect(("192.168.1.1", 1002))
while True:
#resp = bytearray()
resp = sock.recv(1)
# teraz sprawdzi czy początek odebranego ciągu to początek ramki
if resp[0] == 0xaa:
# pobranie pozostałej części ramki
for i in range(14):
resp += sock.recv(1)
# sprawdze |
if hap_crc(resp) == resp[13]:
modul = resp[3]
grupa = resp[4]
id_urzadzenia = resp[7]
stan = resp[8]
if resp[1] == 0x30: #rozkaz stanu
#print("Rozkaz stanu", "to hex",toHex(resp[2]))
if resp[2] == 0x00: # ramka czasu
print("Ramka czasu",toHex(resp[3]),toHex(resp[4]) )
czas_pracy = OKRES_CZASU.get(3)
czas_pracy = czas_pracy+1
OKRES_CZASU[3] = czas_pracy
if resp[2] == 0x20 or resp[2] == 0x21: # ramka przekaźnika
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
nazwa = komendy['nazwa']
print("Stan switcha",nazwa," ",str(stan & 1))
komenda = '{"idx": ' + str(idx) + ', "nvalue" : ' +str(stan &1) + ', "svalue" : "0"}'
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
client.publish("domoticz/in", komenda)
else:
print('Brak opisu przekaźnika !!!')
if resp[2] == 0x40 or resp[2] == 0x41: # ramka przycisku
#print("Ramka przycisku",)
if resp[7] == 0x11: # ramka temperatury
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
tt1 = happroc.spr_temp(resp[8], resp[9])
komenda = '{"idx": '+ str(idx) + ', "nvalue" : 0, "svalue" : "' + str(tt1) + '"}'
#print("Komenda to ...",komenda)
pole_th = komendy.get('pole_th',None)
if pole_th is not None:
print("Temp THING to ....",tt1,)
klucz_th = komendy.get('klucz_th',None)
params = urllib.parse.urlencode({pole_th : tt1, 'key': klucz_th})
try:
conn.request("POST", "/update", params, headers)
response = conn.getresponse()
data = response.read()
#print('odpowiedź od Th',data)
except Exception as bld:
print("connection failed z Thingiem")
plik = open("errory.log", mode="a+")
plik.write(time.asctime() + ',' + str(bld) + '\n')
client.publish("domoticz/in", komenda)
# teraz odczyt termostatu (id_urzadzenia ustawiony na 0x14)
id_urzadzenia = 0x14
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
tt1 = happroc.spr_temp(resp[10], resp[11])
komenda = '{"idx": ' + str(idx) + ', "nvalue" : 0, "svalue" : "' + str(tt1) + '"}'
#print("Komenda to ...", komenda)
client.publish("domoticz/in", komenda)
if resp[2] == 0x70 or resp[2] == 0x71: # ramka rolety
#print("Ramka rolety", procent)
if FLAGI[1]:
map_temp = FLAGI[2]
nodes = map_temp.get('nodes')
procent = map_temp.get('procent')
if id_urzadzenia == nodes[2]:
if resp[9] == 0: # sprawdza czy nie jest w ruchu
map_temp2 = MAPOWANIE_HAP.get(nodes)
czas_rol = map_temp2.get('czas_rol')
komunikat = 0x10A0
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
dane[3] = 2 ** (nodes[2] - 1)
dane[4] = nodes[0]
dane[5] = nodes[1]
#stan = stan / 255 * 100
ile_czasu = czas_rol*((stan/2.55) - procent)/100
if ile_czasu < 0:
dane[2]=0x04
else:
dane[2]=0x03
print("**********************************************", komunikat, "dane", dane,"; procent", procent, "; stan rolety", stan, "; ile czasu", ile_czasu)
wyslij(komunikat,dane)
dane[2]=0
dane[6]=int(round(abs(ile_czasu),0))
print("**********************************************", komunikat, "dane", dane)
wyslij(komunikat, dane)
FLAGI[1] = 0
if resp[2] == 0x60 or resp[2] == 0x61: # ramka Dimmera
#print("Ramka dimera nr kanału", resp[7], resp[8])
if resp[7] == 1:
proc_dimer = int((resp[8]/255)*100)
print("Żarówka na %", proc_dimer)
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
nazwa = komendy['nazwa']
print("Stan dimera", nazwa, " ", proc_dimer)
komenda = '{"command": "switchlight", "idx": ' + str(idx) + ', "switchcmd": "Set Level", "level": '+ str(proc_dimer) + ' }'
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
client.publish("domoticz/in", komenda)
else:
print('Brak opisu dimera !!!')
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
procent = stan / 255 * 100
komenda = '{"idx": ' + str(idx) + ', "nvalue" : 2, "svalue" : "' + str(procent) + '"}'
#print("Komenda to ...", komenda)
client.publish("domoticz/in", komenda)
# teraz tu umieszczę dalszy program :)
except socket.error as bld:
print("Error?")
plik = open("errory.log", mode="a+")
plik.write(time.asctime()+ ',' + str(bld)+ '\n')
except Exception as bld:
plik = open("errory.log", mode="a+")
plik.write(time.asctime() + ',' + str(bld) + '\n')
#pass
finally:
plik.close()
sock.close()
conn.close()
if __name__ == "__main__":
print("Start")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
# 7. ip i port mosquitto (domyslne ustawiania)
#client.connect("127.0.0.1", 1883, 60)
client.connect("", 1883, 60)
#http: // vps354642.ovh.net /
client.loop_start()
odczyt_mod() # wywołanie proedury odpytującej wszystkie moduły co 2
pytanie_o_status() # wywołanie procedury wykonywanej co 10 minut
czytaj()
| nie sumy kontrolnej
| conditional_block |
hap_to_domo.py | """"
Skrypt stanowi brame pomiedzy domoticzem a siecia HAPCAN - interfejs Ethernet
uruchomiony mosquitto
W domoticzu w sekcji sprzet dodajemy - MQTT Client Gateway with LAN interface port 1883 ip 127.0.0.1
uruchamiamy z konsoli: python3 hapcan_domo.py
"""
from __future__ import print_function
import paho.mqtt.client as mqtt
import json
import threading
import os
import socket
import binascii
import time
# dla Things
import http.client
import urllib
from urllib.parse import urlparse
import parser
import happroc
def setInterval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop():
while not stopped.wait(interval):
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True
t.start()
return stopped
return wrapper
return decorator
#Tutaj umieszczamy spis modułów naszego systemu
#formant (moduł,grupa):opis - możliwy zapis dziesiętny lub HEX
MAPOWANIE_MOD = {
(1,10):'BUT kotłownia',
}
# słownik opisujący nasz system Hapcan (moduł,grupa,kanał):(idx w Domoticzu, typ w Domoticzu, podtyp w Domoticzu
# dodatkowo można wykorzystać niektóre czujniki np. temeratury do zapisu do bazy ThingSpeac - wtedy należy dopisać dane pole_th - zgodne z nazwą w kanale i klucz czyli API key
#- możliwy zapis dziesiętny lub HEX
MAPOWANIE_HAP ={
# butony
(0x01, 0x0a, 0x11): {'idx': 26, 'pole_th': 'field1', 'klucz_th': ''},
(0xcf, 0x0b, 0x11): {'idx': 19},
# SW T1
(25,11,1):{'idx':37,'dtype': 'Light/Switch', 'switchType': 'On/Off','nazwa':'Lampa Gabinet PN'},
# termostat
(0x01, 0x0a, 0x14): {'idx': 70},
# rolety
(0x28, 0x0e, 0x01): {"idx": 62, "nvalue": 2, "svalue": "90",'czas_rol':20},
# dimmer
(32,6,1):{'idx':81,'dtype': 'Light/Switch', 'switchType': 'Dimmer','nazwa':'Dimmer'},
}
MAPOWANIE_DOM ={}
MAPOWANIE_MOD_SPS = {}
MAPOWANIE_THING = {}
IGNOROWANIE = {}
INDEKSY = {
1:0 # # lista modułów do sprawdzenia
}
OKRES_CZASU = {
1:1, # flaga okresowego odczytu podstawowego - na początku i potem co x minut
2:0, # flaga okresowego odczytu 1x na dobę
3:0,
}
FLAGI = {
1:0,
2:{'flaga':0,'nodes':(0,0,0),'procent':0}, # flaga sprawdzania rolety
}
# utworzenie słownika idx Domoticza
ks = list(MAPOWANIE_HAP.keys())
#ks. sort()
#indeks=0
for key in ks:
komendy = MAPOWANIE_HAP.get(key, None)
idx = komendy['idx']
map_temp = MAPOWANIE_HAP[key]
map_temp2 = {'nodes':key}
map_temp.update(map_temp2)
#print("Klucze MAP to ::::::", MAPOWANIE_HAP[key], 'klucz', key)
MAPOWANIE_DOM[idx]= map_temp
#MAPOWANIE_HAP.update(map_temp)
#indeks = indeks +1
#komendy = MAPOWANIE_DOM.get(key, None)
#print("Komenda to ...", komendy)
print("MAP HAP", MAPOWANIE_DOM)
indeks = 0
ks = list(MAPOWANIE_MOD.keys())
for key in ks:
indeks = indeks +1
map_temp ={'komunikat': 0x1090}
list_temp = [0xf0, 0xf0, 0xff, 0xff]
list_temp.append(key[0])
list_temp.append(key[1])
list_temp += [0xff, 0xff, 0xff, 0xff]
map_temp2 = {'dane': list_temp}
map_temp.update(map_temp2)
#map_temp2.update(hex(key[0]))
MAPOWANIE_MOD_SPS[indeks] = map_temp
print(MAPOWANIE_MOD_SPS)
@setInterval(2)
def odczyt_mod():
okres_czasu = OKRES_CZASU.get(1)
indeks_mod = INDEKSY.get(1)
if OKRES_CZASU.get(1):
indeks_mod = indeks_mod +1
komenda = MAPOWANIE_MOD_SPS.get(indeks_mod, None)
if komenda is not None:
#print("komenda do wysłania do Hapcana", komenda)
wyslij(komenda['komunikat'], komenda['dane'])
INDEKSY[1] = indeks_mod
else:
INDEKSY[1]=0 # kasujemy licznik listy modułów do odczytu
OKRES_CZASU[1]=0 # ustawiamy flagę następnego odczytu za 10 minut
@setInterval(600) # Wysylanie zapytania do 100 sekund
def pytanie_o_status():
print("pytanie_o_status do Hapcana",OKRES_CZASU, "Ignoruj", IGNOROWANIE)
OKRES_CZASU[1]=1
def on_connect(client, userdata, flags, rc):
print("Połączony z moskitem czyta domoticza... " + str(rc))
client.subscribe("domoticz/out")
def on_message(client, userdata, msg):
try:
payload = json.loads(msg.payload.decode('ascii'))
#print("wiadomosc od Domoticza ", payload)
idx = payload['idx']
typ_idx = payload['dtype']
nvalue = payload['nvalue']
svalue1 = payload['svalue1']
except ValueError:
print("Błąd formatu json", str(msg))
return
#print("wiadomosc od Domoticza ", payload)
if typ_idx == 'Light/Switch':
typ_switch = payload['switchType']
if typ_switch == 'Blinds Percentage':
print ('a to była roleta')
#print(IGNOROWANIE)
# ignorowanie jest potrzebna gdzy nie ma mozliwosci rozroznienia z wiadowmosci czy dostajemy odpowiedz na nasza wiadomosc
ignoruj = IGNOROWANIE.get(idx, 0)
# print("ignoruj", ignoruj)
if ignoruj is 0:
komunikat= 0x10A0
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
# dane': [0xF0, 0xF0, 0x01, 0x01, 0x19, 0x0b, 0x00, 0xFF, 0xFF, 0xFF]},
# znajdz komenda dla danego idx i nvalue
#print("Otrzymałem od Domoticza", "idx", idx, "nvalue", nvalue, "svalue1", svalue1, payload)
komendy = MAPOWANIE_DOM.get(idx, None)
#klucz = (nvalue, svalue1)
#print("komendy", komendy, "Klucz",klucz)
if komendy is not None:
nodes = komendy.get(('nodes'), None)
#print("komenda do wysłania do Hapcana od Domoticza", komenda, "Payload", payload)
if nodes is not None:
# sprawdzenie jaki rodzaj urządzenia w Domoticzu
if typ_idx == 'Light/Switch': # sprawdzamy czy switch
if typ_switch == 'On/Off': # tylko ON/OFF
dane[2]= nvalue
dane[3]=2**(nodes[2]-1)
dane[4]=nodes[0]
dane[5]=nodes[1]
print (dane)
wyslij(komunikat,dane)
if typ_switch == 'Blinds Percentage':
print('moja roleta !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!', payload)
FLAGI[1] =1
ustaw_roleta(nodes,nvalue,svalue1)
#print("Wysylem ", komenda['komunikat'], komenda['dane'])
# print("Wysylem ",komenda['komunikat'], komenda['dane'])
#wyslij(komenda['komunikat'], komenda['dane'])
if typ_switch == 'Dimmer': # tylko ON/OFF
dane[2]= 0
dane[3]= int(payload['Level']*2.55)
dane[4]=nodes[0]
dane[5]=nodes[1]
wyslij(komunikat,dane)
else:
IGNOROWANIE[idx] = ignoruj - 1
def ustaw_roleta(nodes,nvalue,svalue1):
| = {'nodes': nodes}
if nvalue < 2:
procent = 100 * nvalue
else:
procent = int(svalue1)
map_temp2 = {'procent': procent}
map_temp.update(map_temp2)
FLAGI[2] = map_temp
# FLAGI = {
#'dane': [0xf0, 0xf0, 0xff, 0xff, 0x28, 0x0e, 0xff, 0xff, 0xff, 0xff]},
print("@@@ a roleta to flagi: ",FLAGI)
komunikat = 0x1090
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
dane[4] = nodes[0]
dane[5] = nodes[1]
wyslij(komunikat,dane)
def hap_crc(ramka):
h_crc = 0
for x in range(12):
h_crc = h_crc + ramka[x + 1]
h_crc = h_crc % 256
return h_crc
def wyslij(id_komunikatu, dane):
try:
proto = socket.getprotobyname('tcp')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto)
sock.connect(("192.168.1.201", 1001))
msg = bytearray()
msg.append(0xAA)
b2 = (id_komunikatu >> 8) & 0xFF;
msg.append(b2)
b1 = (id_komunikatu) & 0xFF;
msg.append(b1)
for val in dane:
msg.append(val)
msg.append(hap_crc(msg))
msg.append(0xA5)
sock.sendall(msg)
print('wyslano =', binascii.hexlify(msg))
except socket.error:
pass
finally:
sock.close()
def toHex(val):
return '0x{:02x}'.format(val)
def czytaj():
# główna pętla odczytująca status Hapcana z bramki LAN
proto = socket.getprotobyname('tcp')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto)
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = http.client.HTTPConnection("api.thingspeak.com:80")
try:
# 8. Ip i port modulu HAPCAN ethernet
sock.connect(("192.168.1.1", 1002))
while True:
#resp = bytearray()
resp = sock.recv(1)
# teraz sprawdzi czy początek odebranego ciągu to początek ramki
if resp[0] == 0xaa:
# pobranie pozostałej części ramki
for i in range(14):
resp += sock.recv(1)
# sprawdzenie sumy kontrolnej
if hap_crc(resp) == resp[13]:
modul = resp[3]
grupa = resp[4]
id_urzadzenia = resp[7]
stan = resp[8]
if resp[1] == 0x30: #rozkaz stanu
#print("Rozkaz stanu", "to hex",toHex(resp[2]))
if resp[2] == 0x00: # ramka czasu
print("Ramka czasu",toHex(resp[3]),toHex(resp[4]) )
czas_pracy = OKRES_CZASU.get(3)
czas_pracy = czas_pracy+1
OKRES_CZASU[3] = czas_pracy
if resp[2] == 0x20 or resp[2] == 0x21: # ramka przekaźnika
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
nazwa = komendy['nazwa']
print("Stan switcha",nazwa," ",str(stan & 1))
komenda = '{"idx": ' + str(idx) + ', "nvalue" : ' +str(stan &1) + ', "svalue" : "0"}'
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
client.publish("domoticz/in", komenda)
else:
print('Brak opisu przekaźnika !!!')
if resp[2] == 0x40 or resp[2] == 0x41: # ramka przycisku
#print("Ramka przycisku",)
if resp[7] == 0x11: # ramka temperatury
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
tt1 = happroc.spr_temp(resp[8], resp[9])
komenda = '{"idx": '+ str(idx) + ', "nvalue" : 0, "svalue" : "' + str(tt1) + '"}'
#print("Komenda to ...",komenda)
pole_th = komendy.get('pole_th',None)
if pole_th is not None:
print("Temp THING to ....",tt1,)
klucz_th = komendy.get('klucz_th',None)
params = urllib.parse.urlencode({pole_th : tt1, 'key': klucz_th})
try:
conn.request("POST", "/update", params, headers)
response = conn.getresponse()
data = response.read()
#print('odpowiedź od Th',data)
except Exception as bld:
print("connection failed z Thingiem")
plik = open("errory.log", mode="a+")
plik.write(time.asctime() + ',' + str(bld) + '\n')
client.publish("domoticz/in", komenda)
# teraz odczyt termostatu (id_urzadzenia ustawiony na 0x14)
id_urzadzenia = 0x14
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
tt1 = happroc.spr_temp(resp[10], resp[11])
komenda = '{"idx": ' + str(idx) + ', "nvalue" : 0, "svalue" : "' + str(tt1) + '"}'
#print("Komenda to ...", komenda)
client.publish("domoticz/in", komenda)
if resp[2] == 0x70 or resp[2] == 0x71: # ramka rolety
#print("Ramka rolety", procent)
if FLAGI[1]:
map_temp = FLAGI[2]
nodes = map_temp.get('nodes')
procent = map_temp.get('procent')
if id_urzadzenia == nodes[2]:
if resp[9] == 0: # sprawdza czy nie jest w ruchu
map_temp2 = MAPOWANIE_HAP.get(nodes)
czas_rol = map_temp2.get('czas_rol')
komunikat = 0x10A0
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
dane[3] = 2 ** (nodes[2] - 1)
dane[4] = nodes[0]
dane[5] = nodes[1]
#stan = stan / 255 * 100
ile_czasu = czas_rol*((stan/2.55) - procent)/100
if ile_czasu < 0:
dane[2]=0x04
else:
dane[2]=0x03
print("**********************************************", komunikat, "dane", dane,"; procent", procent, "; stan rolety", stan, "; ile czasu", ile_czasu)
wyslij(komunikat,dane)
dane[2]=0
dane[6]=int(round(abs(ile_czasu),0))
print("**********************************************", komunikat, "dane", dane)
wyslij(komunikat, dane)
FLAGI[1] = 0
if resp[2] == 0x60 or resp[2] == 0x61: # ramka Dimmera
#print("Ramka dimera nr kanału", resp[7], resp[8])
if resp[7] == 1:
proc_dimer = int((resp[8]/255)*100)
print("Żarówka na %", proc_dimer)
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
nazwa = komendy['nazwa']
print("Stan dimera", nazwa, " ", proc_dimer)
komenda = '{"command": "switchlight", "idx": ' + str(idx) + ', "switchcmd": "Set Level", "level": '+ str(proc_dimer) + ' }'
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
client.publish("domoticz/in", komenda)
else:
print('Brak opisu dimera !!!')
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
procent = stan / 255 * 100
komenda = '{"idx": ' + str(idx) + ', "nvalue" : 2, "svalue" : "' + str(procent) + '"}'
#print("Komenda to ...", komenda)
client.publish("domoticz/in", komenda)
# teraz tu umieszczę dalszy program :)
except socket.error as bld:
print("Error?")
plik = open("errory.log", mode="a+")
plik.write(time.asctime()+ ',' + str(bld)+ '\n')
except Exception as bld:
plik = open("errory.log", mode="a+")
plik.write(time.asctime() + ',' + str(bld) + '\n')
#pass
finally:
plik.close()
sock.close()
conn.close()
if __name__ == "__main__":
print("Start")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
# 7. ip i port mosquitto (domyslne ustawiania)
#client.connect("127.0.0.1", 1883, 60)
client.connect("", 1883, 60)
#http: // vps354642.ovh.net /
client.loop_start()
odczyt_mod() # wywołanie proedury odpytującej wszystkie moduły co 2
pytanie_o_status() # wywołanie procedury wykonywanej co 10 minut
czytaj()
| map_temp | identifier_name |
hap_to_domo.py | """"
Skrypt stanowi brame pomiedzy domoticzem a siecia HAPCAN - interfejs Ethernet
uruchomiony mosquitto
W domoticzu w sekcji sprzet dodajemy - MQTT Client Gateway with LAN interface port 1883 ip 127.0.0.1
uruchamiamy z konsoli: python3 hapcan_domo.py
"""
from __future__ import print_function
import paho.mqtt.client as mqtt
import json
import threading
import os
import socket
import binascii
import time
# dla Things
import http.client
import urllib
from urllib.parse import urlparse
import parser
import happroc
def setInterval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop():
while not stopped.wait(interval):
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True
t.start()
return stopped
return wrapper
return decorator
#Tutaj umieszczamy spis modułów naszego systemu
#formant (moduł,grupa):opis - możliwy zapis dziesiętny lub HEX
MAPOWANIE_MOD = {
(1,10):'BUT kotłownia',
}
# słownik opisujący nasz system Hapcan (moduł,grupa,kanał):(idx w Domoticzu, typ w Domoticzu, podtyp w Domoticzu
# dodatkowo można wykorzystać niektóre czujniki np. temeratury do zapisu do bazy ThingSpeac - wtedy należy dopisać dane pole_th - zgodne z nazwą w kanale i klucz czyli API key
#- możliwy zapis dziesiętny lub HEX
MAPOWANIE_HAP ={
# butony
(0x01, 0x0a, 0x11): {'idx': 26, 'pole_th': 'field1', 'klucz_th': ''},
(0xcf, 0x0b, 0x11): {'idx': 19},
# SW T1
(25,11,1):{'idx':37,'dtype': 'Light/Switch', 'switchType': 'On/Off','nazwa':'Lampa Gabinet PN'},
# termostat
(0x01, 0x0a, 0x14): {'idx': 70},
# rolety
(0x28, 0x0e, 0x01): {"idx": 62, "nvalue": 2, "svalue": "90",'czas_rol':20},
# dimmer
(32,6,1):{'idx':81,'dtype': 'Light/Switch', 'switchType': 'Dimmer','nazwa':'Dimmer'},
}
MAPOWANIE_DOM ={}
MAPOWANIE_MOD_SPS = {}
MAPOWANIE_THING = {}
IGNOROWANIE = {}
INDEKSY = {
1:0 # # lista modułów do sprawdzenia
}
OKRES_CZASU = {
1:1, # flaga okresowego odczytu podstawowego - na początku i potem co x minut
2:0, # flaga okresowego odczytu 1x na dobę
3:0,
}
FLAGI = {
1:0,
2:{'flaga':0,'nodes':(0,0,0),'procent':0}, # flaga sprawdzania rolety
}
# utworzenie słownika idx Domoticza
ks = list(MAPOWANIE_HAP.keys())
#ks. sort()
#indeks=0
for key in ks:
komendy = MAPOWANIE_HAP.get(key, None)
idx = komendy['idx']
map_temp = MAPOWANIE_HAP[key]
map_temp2 = {'nodes':key}
map_temp.update(map_temp2)
#print("Klucze MAP to ::::::", MAPOWANIE_HAP[key], 'klucz', key)
MAPOWANIE_DOM[idx]= map_temp
#MAPOWANIE_HAP.update(map_temp)
#indeks = indeks +1
#komendy = MAPOWANIE_DOM.get(key, None)
#print("Komenda to ...", komendy)
print("MAP HAP", MAPOWANIE_DOM)
indeks = 0
ks = list(MAPOWANIE_MOD.keys())
for key in ks:
indeks = indeks +1
map_temp ={'komunikat': 0x1090}
list_temp = [0xf0, 0xf0, 0xff, 0xff]
list_temp.append(key[0])
list_temp.append(key[1])
list_temp += [0xff, 0xff, 0xff, 0xff]
map_temp2 = {'dane': list_temp}
map_temp.update(map_temp2)
#map_temp2.update(hex(key[0]))
MAPOWANIE_MOD_SPS[indeks] = map_temp
print(MAPOWANIE_MOD_SPS)
@setInterval(2)
def odczyt_mod():
okres_czasu = OKRES_CZASU.get(1)
indeks_mod = INDEKSY.get(1)
if OKRES_CZASU.get(1):
indeks_mod = indeks_mod +1
komenda = MAPOWANIE_MOD_SPS.get(indeks_mod, None)
if komenda is not None:
#print("komenda do wysłania do Hapcana", komenda)
wyslij(komenda['komunikat'], komenda['dane'])
INDEKSY[1] = indeks_mod
else:
INDEKSY[1]=0 # kasujemy licznik listy modułów do odczytu
OKRES_CZASU[1]=0 # ustawiamy flagę następnego odczytu za 10 minut
@setInterval(600) # Wysylanie zapytania do 100 sekund
def pytanie_o_status():
print("pytanie_o_status do Hap | serdata, flags, rc):
print("Połączony z moskitem czyta domoticza... " + str(rc))
client.subscribe("domoticz/out")
def on_message(client, userdata, msg):
try:
payload = json.loads(msg.payload.decode('ascii'))
#print("wiadomosc od Domoticza ", payload)
idx = payload['idx']
typ_idx = payload['dtype']
nvalue = payload['nvalue']
svalue1 = payload['svalue1']
except ValueError:
print("Błąd formatu json", str(msg))
return
#print("wiadomosc od Domoticza ", payload)
if typ_idx == 'Light/Switch':
typ_switch = payload['switchType']
if typ_switch == 'Blinds Percentage':
print ('a to była roleta')
#print(IGNOROWANIE)
# ignorowanie jest potrzebna gdzy nie ma mozliwosci rozroznienia z wiadowmosci czy dostajemy odpowiedz na nasza wiadomosc
ignoruj = IGNOROWANIE.get(idx, 0)
# print("ignoruj", ignoruj)
if ignoruj is 0:
komunikat= 0x10A0
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
# dane': [0xF0, 0xF0, 0x01, 0x01, 0x19, 0x0b, 0x00, 0xFF, 0xFF, 0xFF]},
# znajdz komenda dla danego idx i nvalue
#print("Otrzymałem od Domoticza", "idx", idx, "nvalue", nvalue, "svalue1", svalue1, payload)
komendy = MAPOWANIE_DOM.get(idx, None)
#klucz = (nvalue, svalue1)
#print("komendy", komendy, "Klucz",klucz)
if komendy is not None:
nodes = komendy.get(('nodes'), None)
#print("komenda do wysłania do Hapcana od Domoticza", komenda, "Payload", payload)
if nodes is not None:
# sprawdzenie jaki rodzaj urządzenia w Domoticzu
if typ_idx == 'Light/Switch': # sprawdzamy czy switch
if typ_switch == 'On/Off': # tylko ON/OFF
dane[2]= nvalue
dane[3]=2**(nodes[2]-1)
dane[4]=nodes[0]
dane[5]=nodes[1]
print (dane)
wyslij(komunikat,dane)
if typ_switch == 'Blinds Percentage':
print('moja roleta !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!', payload)
FLAGI[1] =1
ustaw_roleta(nodes,nvalue,svalue1)
#print("Wysylem ", komenda['komunikat'], komenda['dane'])
# print("Wysylem ",komenda['komunikat'], komenda['dane'])
#wyslij(komenda['komunikat'], komenda['dane'])
if typ_switch == 'Dimmer': # tylko ON/OFF
dane[2]= 0
dane[3]= int(payload['Level']*2.55)
dane[4]=nodes[0]
dane[5]=nodes[1]
wyslij(komunikat,dane)
else:
IGNOROWANIE[idx] = ignoruj - 1
def ustaw_roleta(nodes,nvalue,svalue1):
map_temp = {'nodes': nodes}
if nvalue < 2:
procent = 100 * nvalue
else:
procent = int(svalue1)
map_temp2 = {'procent': procent}
map_temp.update(map_temp2)
FLAGI[2] = map_temp
# FLAGI = {
#'dane': [0xf0, 0xf0, 0xff, 0xff, 0x28, 0x0e, 0xff, 0xff, 0xff, 0xff]},
print("@@@ a roleta to flagi: ",FLAGI)
komunikat = 0x1090
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
dane[4] = nodes[0]
dane[5] = nodes[1]
wyslij(komunikat,dane)
def hap_crc(ramka):
h_crc = 0
for x in range(12):
h_crc = h_crc + ramka[x + 1]
h_crc = h_crc % 256
return h_crc
def wyslij(id_komunikatu, dane):
try:
proto = socket.getprotobyname('tcp')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto)
sock.connect(("192.168.1.201", 1001))
msg = bytearray()
msg.append(0xAA)
b2 = (id_komunikatu >> 8) & 0xFF;
msg.append(b2)
b1 = (id_komunikatu) & 0xFF;
msg.append(b1)
for val in dane:
msg.append(val)
msg.append(hap_crc(msg))
msg.append(0xA5)
sock.sendall(msg)
print('wyslano =', binascii.hexlify(msg))
except socket.error:
pass
finally:
sock.close()
def toHex(val):
return '0x{:02x}'.format(val)
def czytaj():
# główna pętla odczytująca status Hapcana z bramki LAN
proto = socket.getprotobyname('tcp')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto)
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = http.client.HTTPConnection("api.thingspeak.com:80")
try:
# 8. Ip i port modulu HAPCAN ethernet
sock.connect(("192.168.1.1", 1002))
while True:
#resp = bytearray()
resp = sock.recv(1)
# teraz sprawdzi czy początek odebranego ciągu to początek ramki
if resp[0] == 0xaa:
# pobranie pozostałej części ramki
for i in range(14):
resp += sock.recv(1)
# sprawdzenie sumy kontrolnej
if hap_crc(resp) == resp[13]:
modul = resp[3]
grupa = resp[4]
id_urzadzenia = resp[7]
stan = resp[8]
if resp[1] == 0x30: #rozkaz stanu
#print("Rozkaz stanu", "to hex",toHex(resp[2]))
if resp[2] == 0x00: # ramka czasu
print("Ramka czasu",toHex(resp[3]),toHex(resp[4]) )
czas_pracy = OKRES_CZASU.get(3)
czas_pracy = czas_pracy+1
OKRES_CZASU[3] = czas_pracy
if resp[2] == 0x20 or resp[2] == 0x21: # ramka przekaźnika
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
nazwa = komendy['nazwa']
print("Stan switcha",nazwa," ",str(stan & 1))
komenda = '{"idx": ' + str(idx) + ', "nvalue" : ' +str(stan &1) + ', "svalue" : "0"}'
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
client.publish("domoticz/in", komenda)
else:
print('Brak opisu przekaźnika !!!')
if resp[2] == 0x40 or resp[2] == 0x41: # ramka przycisku
#print("Ramka przycisku",)
if resp[7] == 0x11: # ramka temperatury
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
tt1 = happroc.spr_temp(resp[8], resp[9])
komenda = '{"idx": '+ str(idx) + ', "nvalue" : 0, "svalue" : "' + str(tt1) + '"}'
#print("Komenda to ...",komenda)
pole_th = komendy.get('pole_th',None)
if pole_th is not None:
print("Temp THING to ....",tt1,)
klucz_th = komendy.get('klucz_th',None)
params = urllib.parse.urlencode({pole_th : tt1, 'key': klucz_th})
try:
conn.request("POST", "/update", params, headers)
response = conn.getresponse()
data = response.read()
#print('odpowiedź od Th',data)
except Exception as bld:
print("connection failed z Thingiem")
plik = open("errory.log", mode="a+")
plik.write(time.asctime() + ',' + str(bld) + '\n')
client.publish("domoticz/in", komenda)
# teraz odczyt termostatu (id_urzadzenia ustawiony na 0x14)
id_urzadzenia = 0x14
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
tt1 = happroc.spr_temp(resp[10], resp[11])
komenda = '{"idx": ' + str(idx) + ', "nvalue" : 0, "svalue" : "' + str(tt1) + '"}'
#print("Komenda to ...", komenda)
client.publish("domoticz/in", komenda)
if resp[2] == 0x70 or resp[2] == 0x71: # ramka rolety
#print("Ramka rolety", procent)
if FLAGI[1]:
map_temp = FLAGI[2]
nodes = map_temp.get('nodes')
procent = map_temp.get('procent')
if id_urzadzenia == nodes[2]:
if resp[9] == 0: # sprawdza czy nie jest w ruchu
map_temp2 = MAPOWANIE_HAP.get(nodes)
czas_rol = map_temp2.get('czas_rol')
komunikat = 0x10A0
dane = [0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF]
dane[3] = 2 ** (nodes[2] - 1)
dane[4] = nodes[0]
dane[5] = nodes[1]
#stan = stan / 255 * 100
ile_czasu = czas_rol*((stan/2.55) - procent)/100
if ile_czasu < 0:
dane[2]=0x04
else:
dane[2]=0x03
print("**********************************************", komunikat, "dane", dane,"; procent", procent, "; stan rolety", stan, "; ile czasu", ile_czasu)
wyslij(komunikat,dane)
dane[2]=0
dane[6]=int(round(abs(ile_czasu),0))
print("**********************************************", komunikat, "dane", dane)
wyslij(komunikat, dane)
FLAGI[1] = 0
if resp[2] == 0x60 or resp[2] == 0x61: # ramka Dimmera
#print("Ramka dimera nr kanału", resp[7], resp[8])
if resp[7] == 1:
proc_dimer = int((resp[8]/255)*100)
print("Żarówka na %", proc_dimer)
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
nazwa = komendy['nazwa']
print("Stan dimera", nazwa, " ", proc_dimer)
komenda = '{"command": "switchlight", "idx": ' + str(idx) + ', "switchcmd": "Set Level", "level": '+ str(proc_dimer) + ' }'
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
client.publish("domoticz/in", komenda)
else:
print('Brak opisu dimera !!!')
komendy = MAPOWANIE_HAP.get((modul, grupa, id_urzadzenia), None)
if komendy is not None:
idx = komendy['idx']
IGNOROWANIE[idx] = IGNOROWANIE.get(idx, 0) + 1
procent = stan / 255 * 100
komenda = '{"idx": ' + str(idx) + ', "nvalue" : 2, "svalue" : "' + str(procent) + '"}'
#print("Komenda to ...", komenda)
client.publish("domoticz/in", komenda)
# teraz tu umieszczę dalszy program :)
except socket.error as bld:
print("Error?")
plik = open("errory.log", mode="a+")
plik.write(time.asctime()+ ',' + str(bld)+ '\n')
except Exception as bld:
plik = open("errory.log", mode="a+")
plik.write(time.asctime() + ',' + str(bld) + '\n')
#pass
finally:
plik.close()
sock.close()
conn.close()
if __name__ == "__main__":
print("Start")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
# 7. ip i port mosquitto (domyslne ustawiania)
#client.connect("127.0.0.1", 1883, 60)
client.connect("", 1883, 60)
#http: // vps354642.ovh.net /
client.loop_start()
odczyt_mod() # wywołanie proedury odpytującej wszystkie moduły co 2
pytanie_o_status() # wywołanie procedury wykonywanej co 10 minut
czytaj()
| cana",OKRES_CZASU, "Ignoruj", IGNOROWANIE)
OKRES_CZASU[1]=1
def on_connect(client, u | identifier_body |
main.py | import argparse
import os
import json
import numpy as np
import random
from tqdm import tqdm
import torch
from torch.optim import SGD, Adam
import torch.utils.data as data
from torch.utils.data import DataLoader, Subset
import torch.nn.functional as F
from torch import nn
import torchnet as tnt
from torchnet.engine import Engine
from utils import cast, data_parallel, print_tensor_dict, x_u_split, calculate_accuracy
from torch.backends import cudnn
from resnet import resnet
from datasets import get_CIFAR10, get_SVHN, Joint, get_AwA2
from flows import Invertible1x1Conv, NormalizingFlowModel
from spline_flows import NSF_CL
from torch.distributions import MultivariateNormal
import itertools
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.categorical import Categorical
from torch.distributions.bernoulli import Bernoulli
from torch.distributions.beta import Beta
cudnn.benchmark = True
parser = argparse.ArgumentParser()
# Model options
parser.add_argument('--depth', default=28, type=int)
parser.add_argument('--width', default=2, type=float)
parser.add_argument('--dataset', default='cifar10', type=str)
parser.add_argument('--dataroot', default='.', type=str)
parser.add_argument('--dtype', default='float', type=str)
parser.add_argument('--groups', default=1, type=int)
parser.add_argument('--n_workers', default=4, type=int)
parser.add_argument('--seed', default=1, type=int)
# Training options
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--eval_batch_size', default=512, type=int)
parser.add_argument('--lr', default=0.1, type=float)
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--weight_decay', default=0.0005, type=float)
parser.add_argument('--epoch_step', default='[60, 120, 160]', type=str,
help='json list with epochs to drop lr on')
parser.add_argument('--lr_decay_ratio', default=0.2, type=float)
parser.add_argument('--resume', default='', type=str)
parser.add_argument('--note', default='', type=str)
parser.add_argument("--no_augment", action="store_false",
dest="augment", help="Augment training data")
# Device options
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--save', default='.', type=str,
help='save parameters and logs in this folder')
parser.add_argument('--ngpu', default=1, type=int,
help='number of GPUs to use for training')
parser.add_argument('--gpu_id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument("--download", action="store_true",
help="downloads dataset")
# SSL options
parser.add_argument("--ssl", action="store_true",
help="Do semi-supervised learning")
parser.add_argument("--num_labelled", type=int, default=4000,
help="Number of labelled data points")
parser.add_argument("--min_entropy", action="store_true",
help="Add the minimum entropy loss") | parser.add_argument("--lp", action="store_true",
help="Add the learned prior (LP) loss")
parser.add_argument("--semantic_loss", action="store_true",
help="Add the semantic loss")
parser.add_argument("--unl_weight", type=float, default=0.1,
help="Weight for unlabelled regularizer loss")
parser.add_argument("--unl2_weight", type=float, default=0.1,
help="Weight for unlabelled regularizer loss")
parser.add_argument("--num_hidden", type=int, default=10,
help="Dim of the latent dimension used")
def one_hot_embedding(labels, num_classes, device="cuda:0"):
"""Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N, #classes].
"""
y = torch.eye(num_classes).to(device)
return y[labels]
def log_normal(x, m, log_v):
"""
Computes the elem-wise log probability of a Gaussian and then sum over the
last dim. Basically we're assuming all dims are batch dims except for the
last dim.
Args:
x: tensor: (batch, ..., dim): Observation
m: tensor: (batch, ..., dim): Mean
v: tensor: (batch, ..., dim): Variance
Return:
kl: tensor: (batch1, batch2, ...): log probability of each sample. Note
that the summation dimension (dim=-1) is not kept
"""
################################################################################
# TODO: Modify/complete the code here
# Compute element-wise log probability of normal and remember to sum over
# the last dimension
################################################################################
# print("q_m", m.size())
# print("q_v", v.size())
const = -0.5 * x.size(-1) * torch.log(2 * torch.tensor(np.pi))
# print(const.size())
log_det = -0.5 * torch.sum(log_v, dim=-1)
# print("log_det", log_det.size())
log_exp = -0.5 * torch.sum((x - m) ** 2 / (log_v.exp()), dim=-1)
log_prob = const + log_det + log_exp
################################################################################
# End of code modification
################################################################################
return log_prob
def gaussian_parameters(h, dim=-1):
"""
Thanks: https://github.com/divymurli/VAEs/blob/master/codebase/utils.py
Converts generic real-valued representations into mean and variance
parameters of a Gaussian distribution
Args:
h: tensor: (batch, ..., dim, ...): Arbitrary tensor
dim: int: (): Dimension along which to split the tensor for mean and
variance
Returns:z
m: tensor: (batch, ..., dim / 2, ...): Mean
v: tensor: (batch, ..., dim / 2, ...): Variance
"""
m, h = torch.split(h, h.size(dim) // 2, dim=dim)
v = F.softplus(h) + 1e-8
return m, v
def check_dataset(dataset, dataroot, augment, download):
if dataset == "cifar10":
dataset = get_CIFAR10(augment, dataroot, download)
if dataset == "svhn":
dataset = get_SVHN(augment, dataroot, download)
if dataset == "awa2":
dataset = get_AwA2(augment, dataroot)
return dataset
def check_manual_seed(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def reparameterise(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps*std
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
class DecoderModel(nn.Module):
def __init__(self, num_classes, z_dim=2):
super().__init__()
self.mu = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.logvar = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
# self.net = nn.Sequential(nn.Linear(num_classes, 100), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.net = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.apply(init_weights)
def forward(self, x):
mu = self.mu(x)
logvar = self.logvar(x)
z = reparameterise(mu, logvar)
predictions = self.net(z)
return predictions, (mu, logvar)
def main():
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# device = "cpu"
args = parser.parse_args()
print('parsed options:', vars(args))
epoch_step = json.loads(args.epoch_step)
check_manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
ds = check_dataset(args.dataset, args.dataroot, args.augment, args.download)
if args.dataset == "awa2":
image_shape, num_classes, train_dataset, test_dataset, all_labels = ds
all_labels = all_labels.to(device)
else:
image_shape, num_classes, train_dataset, test_dataset = ds
all_labels = torch.eye(num_classes).to(device)
if args.ssl:
num_labelled = args.num_labelled
num_unlabelled = len(train_dataset) - num_labelled
if args.dataset == "awa2":
labelled_set, unlabelled_set = data.random_split(train_dataset, [num_labelled, num_unlabelled])
else:
td_targets = train_dataset.targets if args.dataset == "cifar10" else train_dataset.labels
labelled_idxs, unlabelled_idxs = x_u_split(td_targets, num_labelled, num_classes)
labelled_set, unlabelled_set = [Subset(train_dataset, labelled_idxs),
Subset(train_dataset, unlabelled_idxs)]
labelled_set = data.ConcatDataset([labelled_set for i in range(num_unlabelled // num_labelled + 1)])
labelled_set, _ = data.random_split(labelled_set, [num_unlabelled, len(labelled_set) - num_unlabelled])
train_dataset = Joint(labelled_set, unlabelled_set)
def _init_fn(worker_id):
np.random.seed(args.seed)
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.n_workers,
worker_init_fn=_init_fn
)
test_loader = data.DataLoader(
test_dataset,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.n_workers,
worker_init_fn=_init_fn
)
z_dim = args.num_hidden
model, params = resnet(args.depth, args.width, num_classes, image_shape[0])
if args.lp:
model_y = DecoderModel(num_classes, z_dim)
model_y.to(device)
model_y.apply(init_weights)
# optimizer_y = Adam(model_y.get_decoder_params(), lr=1e-3, weight_decay=1e-5)
def create_optimizer(args, lr):
print('creating optimizer with lr = ', lr)
params_ = [v for v in params.values() if v.requires_grad]
# params_ += model_y.get_encoder_params()
params_ += list(model_y.parameters())
return SGD(params_, lr, momentum=0.9, weight_decay=args.weight_decay)
optimizer = create_optimizer(args, args.lr)
epoch = 0
print('\nParameters:')
print_tensor_dict(params)
n_parameters = sum(p.numel() for p in params.values() if p.requires_grad)
print('\nTotal number of parameters:', n_parameters)
meter_loss = tnt.meter.AverageValueMeter()
if args.dataset == "awa2":
classacc = tnt.meter.AverageValueMeter()
else:
classacc = tnt.meter.ClassErrorMeter(accuracy=True)
timer_train = tnt.meter.TimeMeter('s')
timer_test = tnt.meter.TimeMeter('s')
if not os.path.exists(args.save):
os.mkdir(args.save)
global counter, aggressive
counter = 0
aggressive = False
# device = torch.cuda.current_device()
# print(f"On GPU: {device}")
#
# print(f"{torch.cuda.device(device)}")
#
# print(f"# devices: {torch.cuda.device_count()}")
#
# print(f"Device name: {torch.cuda.get_device_name(device)}")
#
# print(f"{torch.cuda.is_available()}")
def compute_loss(sample):
alpha = 1./num_classes
mu_prior = np.log(alpha) - 1/num_classes*num_classes*np.log(alpha)
sigma_prior = (1. / alpha * (1 - 2. / num_classes) + 1 / (num_classes ** 2) * num_classes / alpha)
log_det_sigma = num_classes * np.log(sigma_prior)
model_y.train()
if not args.ssl:
inputs = cast(sample[0], args.dtype)
targets = cast(sample[1], 'long')
y = data_parallel(model, inputs, params, sample[2], list(range(args.ngpu))).float()
if args.dataset == "awa2":
return F.binary_cross_entropy_with_logits(y, targets.float()), y
else:
return F.cross_entropy(y, targets), y
else:
global counter
l = sample[0]
u = sample[1]
inputs_l = cast(l[0], args.dtype)
targets_l = cast(l[1], 'long')
inputs_u = cast(u[0], args.dtype)
y_l = data_parallel(model, inputs_l, params, sample[2], list(range(args.ngpu))).float()
y_u = data_parallel(model, inputs_u, params, sample[2], list(range(args.ngpu))).float()
if args.dataset == "awa2":
loss = F.binary_cross_entropy_with_logits(y_l, targets_l.float())
else:
loss = F.cross_entropy(y_l, targets_l)
if args.min_entropy:
if args.dataset == "awa2":
labels_pred = F.sigmoid(y_u)
entropy = -torch.sum(labels_pred * torch.log(labels_pred), dim=1)
else:
labels_pred = F.softmax(y_u, dim=1)
entropy = -torch.sum(labels_pred * torch.log(labels_pred), dim=1)
if counter >= 10:
loss_entropy = args.unl_weight * torch.mean(entropy)
loss += loss_entropy
elif args.semantic_loss:
if args.dataset == "awa2":
labels_pred = F.sigmoid(y_u)
else:
labels_pred = F.softmax(y_u, dim=1)
part1 = torch.stack([labels_pred ** all_labels[i] for i in range(all_labels.shape[0])])
part2 = torch.stack([(1 - labels_pred) ** (1 - all_labels[i]) for i in range(all_labels.shape[0])])
sem_loss = -torch.log(torch.sum(torch.prod(part1 * part2, dim=2), dim=0))
if counter >= 10:
semantic_loss = args.unl_weight * torch.mean(sem_loss)
loss += semantic_loss
elif args.lp:
weight = np.min([1., np.max([0, 0.05 * (counter - 20)])])
recon_losses = []
for i in range(10):
y_preds, latent = model_y(y_l)
recon_losses.append(F.cross_entropy(y_preds, targets_l, reduction="none"))
loss = torch.stack(recon_losses, dim=1).mean(dim=1).mean()
mu1, lv1 = latent
mu2, lv2 = mu_prior, np.log(sigma_prior)
kld = weight * (0.5 * ((lv2 - lv1) + (lv1.exp() + (mu1 - mu2).pow(2)) / (sigma_prior) - 1).sum(dim=1)).mean()
loss += kld
# now do the unsup part
if counter > 50:
y_preds_u, latent_u = model_y(y_u)
log_prob = torch.log_softmax(y_preds_u, dim=1)
# evaluate the kl for each cluster
mu1u, lv1u = latent_u
kldu = weight * (0.5 * ((lv2 - lv1u) + (lv1u.exp() + (mu1u - mu2).pow(2)) / (sigma_prior) - 1).sum(dim=1)).mean()
unsup_loss = (log_prob.exp()*(-log_prob)).sum(dim=1).mean() + kldu
loss += args.unl_weight * unsup_loss
return loss, y_preds
return loss, y_l
def compute_loss_test(sample):
model_y.eval()
inputs = cast(sample[0], args.dtype)
targets = cast(sample[1], 'long')
y = data_parallel(model, inputs, params, sample[2], list(range(args.ngpu))).float()
if args.lp:
y_full, latent = model_y(y)
# q_mu, q_logvar, log_alpha = latent
# preds = (log_alpha.exp().unsqueeze(-1) * y_full).sum(dim=1)
#
# tgts = one_hot_embedding(targets, num_classes, device=device)
# recon_loss = F.binary_cross_entropy_with_logits(y_full, tgts)\
recon_loss = F.cross_entropy(y_full, targets)
return recon_loss.mean(), y_full
if args.dataset == "awa2":
return F.binary_cross_entropy_with_logits(y, targets.float()), y
else:
return F.cross_entropy(y, targets), y
def log(t, state):
torch.save(dict(params=params, epoch=t['epoch'], optimizer=state['optimizer'].state_dict()),
os.path.join(args.save, 'model.pt7'))
z = {**vars(args), **t}
with open(os.path.join(args.save, 'log.txt'), 'a') as flog:
flog.write('json_stats: ' + json.dumps(z) + '\n')
print(z)
def on_sample(state):
state['sample'].append(state['train'])
def on_forward(state):
loss = float(state['loss'])
if args.dataset == "awa2":
if not args.ssl or not state['train']:
acc = calculate_accuracy(F.sigmoid(state['output'].data), state['sample'][1])
else:
acc = calculate_accuracy(F.sigmoid(state['output'].data), state['sample'][0][1])
classacc.add(acc)
else:
if not args.ssl or not state['train']:
classacc.add(state['output'].data, state['sample'][1])
else:
classacc.add(state['output'].data, state['sample'][0][1])
meter_loss.add(loss)
if state['train']:
state['iterator'].set_postfix(loss=loss)
def on_start(state):
state['epoch'] = epoch
def on_start_epoch(state):
classacc.reset()
meter_loss.reset()
timer_train.reset()
state['iterator'] = tqdm(train_loader, dynamic_ncols=True)
epoch = state['epoch'] + 1
if epoch in epoch_step:
lr = state['optimizer'].param_groups[0]['lr']
state['optimizer'] = create_optimizer(args, lr * args.lr_decay_ratio)
def on_end_epoch(state):
train_loss = meter_loss.value()
train_acc = classacc.value()[0]
train_time = timer_train.value()
meter_loss.reset()
classacc.reset()
timer_test.reset()
with torch.no_grad():
engine.test(compute_loss_test, test_loader)
test_acc = classacc.value()[0]
print(log({
"train_loss": train_loss[0],
"train_acc": train_acc,
"test_loss": meter_loss.value()[0],
"test_acc": test_acc,
"epoch": state['epoch'],
"num_classes": num_classes,
"n_parameters": n_parameters,
"train_time": train_time,
"test_time": timer_test.value(),
}, state))
print('==> id: %s (%d/%d), test_acc: \33[91m%.2f\033[0m' %
(args.save, state['epoch'], args.epochs, test_acc))
global counter
counter += 1
engine = Engine()
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.hooks['on_start'] = on_start
engine.train(compute_loss, train_loader, args.epochs, optimizer)
if __name__ == '__main__':
main() | random_line_split | |
main.py | import argparse
import os
import json
import numpy as np
import random
from tqdm import tqdm
import torch
from torch.optim import SGD, Adam
import torch.utils.data as data
from torch.utils.data import DataLoader, Subset
import torch.nn.functional as F
from torch import nn
import torchnet as tnt
from torchnet.engine import Engine
from utils import cast, data_parallel, print_tensor_dict, x_u_split, calculate_accuracy
from torch.backends import cudnn
from resnet import resnet
from datasets import get_CIFAR10, get_SVHN, Joint, get_AwA2
from flows import Invertible1x1Conv, NormalizingFlowModel
from spline_flows import NSF_CL
from torch.distributions import MultivariateNormal
import itertools
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.categorical import Categorical
from torch.distributions.bernoulli import Bernoulli
from torch.distributions.beta import Beta
cudnn.benchmark = True
parser = argparse.ArgumentParser()
# Model options
parser.add_argument('--depth', default=28, type=int)
parser.add_argument('--width', default=2, type=float)
parser.add_argument('--dataset', default='cifar10', type=str)
parser.add_argument('--dataroot', default='.', type=str)
parser.add_argument('--dtype', default='float', type=str)
parser.add_argument('--groups', default=1, type=int)
parser.add_argument('--n_workers', default=4, type=int)
parser.add_argument('--seed', default=1, type=int)
# Training options
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--eval_batch_size', default=512, type=int)
parser.add_argument('--lr', default=0.1, type=float)
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--weight_decay', default=0.0005, type=float)
parser.add_argument('--epoch_step', default='[60, 120, 160]', type=str,
help='json list with epochs to drop lr on')
parser.add_argument('--lr_decay_ratio', default=0.2, type=float)
parser.add_argument('--resume', default='', type=str)
parser.add_argument('--note', default='', type=str)
parser.add_argument("--no_augment", action="store_false",
dest="augment", help="Augment training data")
# Device options
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--save', default='.', type=str,
help='save parameters and logs in this folder')
parser.add_argument('--ngpu', default=1, type=int,
help='number of GPUs to use for training')
parser.add_argument('--gpu_id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument("--download", action="store_true",
help="downloads dataset")
# SSL options
parser.add_argument("--ssl", action="store_true",
help="Do semi-supervised learning")
parser.add_argument("--num_labelled", type=int, default=4000,
help="Number of labelled data points")
parser.add_argument("--min_entropy", action="store_true",
help="Add the minimum entropy loss")
parser.add_argument("--lp", action="store_true",
help="Add the learned prior (LP) loss")
parser.add_argument("--semantic_loss", action="store_true",
help="Add the semantic loss")
parser.add_argument("--unl_weight", type=float, default=0.1,
help="Weight for unlabelled regularizer loss")
parser.add_argument("--unl2_weight", type=float, default=0.1,
help="Weight for unlabelled regularizer loss")
parser.add_argument("--num_hidden", type=int, default=10,
help="Dim of the latent dimension used")
def one_hot_embedding(labels, num_classes, device="cuda:0"):
"""Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N, #classes].
"""
y = torch.eye(num_classes).to(device)
return y[labels]
def log_normal(x, m, log_v):
"""
Computes the elem-wise log probability of a Gaussian and then sum over the
last dim. Basically we're assuming all dims are batch dims except for the
last dim.
Args:
x: tensor: (batch, ..., dim): Observation
m: tensor: (batch, ..., dim): Mean
v: tensor: (batch, ..., dim): Variance
Return:
kl: tensor: (batch1, batch2, ...): log probability of each sample. Note
that the summation dimension (dim=-1) is not kept
"""
################################################################################
# TODO: Modify/complete the code here
# Compute element-wise log probability of normal and remember to sum over
# the last dimension
################################################################################
# print("q_m", m.size())
# print("q_v", v.size())
const = -0.5 * x.size(-1) * torch.log(2 * torch.tensor(np.pi))
# print(const.size())
log_det = -0.5 * torch.sum(log_v, dim=-1)
# print("log_det", log_det.size())
log_exp = -0.5 * torch.sum((x - m) ** 2 / (log_v.exp()), dim=-1)
log_prob = const + log_det + log_exp
################################################################################
# End of code modification
################################################################################
return log_prob
def gaussian_parameters(h, dim=-1):
"""
Thanks: https://github.com/divymurli/VAEs/blob/master/codebase/utils.py
Converts generic real-valued representations into mean and variance
parameters of a Gaussian distribution
Args:
h: tensor: (batch, ..., dim, ...): Arbitrary tensor
dim: int: (): Dimension along which to split the tensor for mean and
variance
Returns:z
m: tensor: (batch, ..., dim / 2, ...): Mean
v: tensor: (batch, ..., dim / 2, ...): Variance
"""
m, h = torch.split(h, h.size(dim) // 2, dim=dim)
v = F.softplus(h) + 1e-8
return m, v
def check_dataset(dataset, dataroot, augment, download):
if dataset == "cifar10":
dataset = get_CIFAR10(augment, dataroot, download)
if dataset == "svhn":
dataset = get_SVHN(augment, dataroot, download)
if dataset == "awa2":
dataset = get_AwA2(augment, dataroot)
return dataset
def check_manual_seed(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def reparameterise(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps*std
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
class DecoderModel(nn.Module):
def __init__(self, num_classes, z_dim=2):
super().__init__()
self.mu = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.logvar = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
# self.net = nn.Sequential(nn.Linear(num_classes, 100), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.net = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.apply(init_weights)
def forward(self, x):
mu = self.mu(x)
logvar = self.logvar(x)
z = reparameterise(mu, logvar)
predictions = self.net(z)
return predictions, (mu, logvar)
def main():
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# device = "cpu"
args = parser.parse_args()
print('parsed options:', vars(args))
epoch_step = json.loads(args.epoch_step)
check_manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
ds = check_dataset(args.dataset, args.dataroot, args.augment, args.download)
if args.dataset == "awa2":
image_shape, num_classes, train_dataset, test_dataset, all_labels = ds
all_labels = all_labels.to(device)
else:
image_shape, num_classes, train_dataset, test_dataset = ds
all_labels = torch.eye(num_classes).to(device)
if args.ssl:
num_labelled = args.num_labelled
num_unlabelled = len(train_dataset) - num_labelled
if args.dataset == "awa2":
labelled_set, unlabelled_set = data.random_split(train_dataset, [num_labelled, num_unlabelled])
else:
td_targets = train_dataset.targets if args.dataset == "cifar10" else train_dataset.labels
labelled_idxs, unlabelled_idxs = x_u_split(td_targets, num_labelled, num_classes)
labelled_set, unlabelled_set = [Subset(train_dataset, labelled_idxs),
Subset(train_dataset, unlabelled_idxs)]
labelled_set = data.ConcatDataset([labelled_set for i in range(num_unlabelled // num_labelled + 1)])
labelled_set, _ = data.random_split(labelled_set, [num_unlabelled, len(labelled_set) - num_unlabelled])
train_dataset = Joint(labelled_set, unlabelled_set)
def _init_fn(worker_id):
np.random.seed(args.seed)
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.n_workers,
worker_init_fn=_init_fn
)
test_loader = data.DataLoader(
test_dataset,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.n_workers,
worker_init_fn=_init_fn
)
z_dim = args.num_hidden
model, params = resnet(args.depth, args.width, num_classes, image_shape[0])
if args.lp:
model_y = DecoderModel(num_classes, z_dim)
model_y.to(device)
model_y.apply(init_weights)
# optimizer_y = Adam(model_y.get_decoder_params(), lr=1e-3, weight_decay=1e-5)
def create_optimizer(args, lr):
print('creating optimizer with lr = ', lr)
params_ = [v for v in params.values() if v.requires_grad]
# params_ += model_y.get_encoder_params()
params_ += list(model_y.parameters())
return SGD(params_, lr, momentum=0.9, weight_decay=args.weight_decay)
optimizer = create_optimizer(args, args.lr)
epoch = 0
print('\nParameters:')
print_tensor_dict(params)
n_parameters = sum(p.numel() for p in params.values() if p.requires_grad)
print('\nTotal number of parameters:', n_parameters)
meter_loss = tnt.meter.AverageValueMeter()
if args.dataset == "awa2":
classacc = tnt.meter.AverageValueMeter()
else:
classacc = tnt.meter.ClassErrorMeter(accuracy=True)
timer_train = tnt.meter.TimeMeter('s')
timer_test = tnt.meter.TimeMeter('s')
if not os.path.exists(args.save):
os.mkdir(args.save)
global counter, aggressive
counter = 0
aggressive = False
# device = torch.cuda.current_device()
# print(f"On GPU: {device}")
#
# print(f"{torch.cuda.device(device)}")
#
# print(f"# devices: {torch.cuda.device_count()}")
#
# print(f"Device name: {torch.cuda.get_device_name(device)}")
#
# print(f"{torch.cuda.is_available()}")
def compute_loss(sample):
alpha = 1./num_classes
mu_prior = np.log(alpha) - 1/num_classes*num_classes*np.log(alpha)
sigma_prior = (1. / alpha * (1 - 2. / num_classes) + 1 / (num_classes ** 2) * num_classes / alpha)
log_det_sigma = num_classes * np.log(sigma_prior)
model_y.train()
if not args.ssl:
inputs = cast(sample[0], args.dtype)
targets = cast(sample[1], 'long')
y = data_parallel(model, inputs, params, sample[2], list(range(args.ngpu))).float()
if args.dataset == "awa2":
|
else:
return F.cross_entropy(y, targets), y
else:
global counter
l = sample[0]
u = sample[1]
inputs_l = cast(l[0], args.dtype)
targets_l = cast(l[1], 'long')
inputs_u = cast(u[0], args.dtype)
y_l = data_parallel(model, inputs_l, params, sample[2], list(range(args.ngpu))).float()
y_u = data_parallel(model, inputs_u, params, sample[2], list(range(args.ngpu))).float()
if args.dataset == "awa2":
loss = F.binary_cross_entropy_with_logits(y_l, targets_l.float())
else:
loss = F.cross_entropy(y_l, targets_l)
if args.min_entropy:
if args.dataset == "awa2":
labels_pred = F.sigmoid(y_u)
entropy = -torch.sum(labels_pred * torch.log(labels_pred), dim=1)
else:
labels_pred = F.softmax(y_u, dim=1)
entropy = -torch.sum(labels_pred * torch.log(labels_pred), dim=1)
if counter >= 10:
loss_entropy = args.unl_weight * torch.mean(entropy)
loss += loss_entropy
elif args.semantic_loss:
if args.dataset == "awa2":
labels_pred = F.sigmoid(y_u)
else:
labels_pred = F.softmax(y_u, dim=1)
part1 = torch.stack([labels_pred ** all_labels[i] for i in range(all_labels.shape[0])])
part2 = torch.stack([(1 - labels_pred) ** (1 - all_labels[i]) for i in range(all_labels.shape[0])])
sem_loss = -torch.log(torch.sum(torch.prod(part1 * part2, dim=2), dim=0))
if counter >= 10:
semantic_loss = args.unl_weight * torch.mean(sem_loss)
loss += semantic_loss
elif args.lp:
weight = np.min([1., np.max([0, 0.05 * (counter - 20)])])
recon_losses = []
for i in range(10):
y_preds, latent = model_y(y_l)
recon_losses.append(F.cross_entropy(y_preds, targets_l, reduction="none"))
loss = torch.stack(recon_losses, dim=1).mean(dim=1).mean()
mu1, lv1 = latent
mu2, lv2 = mu_prior, np.log(sigma_prior)
kld = weight * (0.5 * ((lv2 - lv1) + (lv1.exp() + (mu1 - mu2).pow(2)) / (sigma_prior) - 1).sum(dim=1)).mean()
loss += kld
# now do the unsup part
if counter > 50:
y_preds_u, latent_u = model_y(y_u)
log_prob = torch.log_softmax(y_preds_u, dim=1)
# evaluate the kl for each cluster
mu1u, lv1u = latent_u
kldu = weight * (0.5 * ((lv2 - lv1u) + (lv1u.exp() + (mu1u - mu2).pow(2)) / (sigma_prior) - 1).sum(dim=1)).mean()
unsup_loss = (log_prob.exp()*(-log_prob)).sum(dim=1).mean() + kldu
loss += args.unl_weight * unsup_loss
return loss, y_preds
return loss, y_l
def compute_loss_test(sample):
model_y.eval()
inputs = cast(sample[0], args.dtype)
targets = cast(sample[1], 'long')
y = data_parallel(model, inputs, params, sample[2], list(range(args.ngpu))).float()
if args.lp:
y_full, latent = model_y(y)
# q_mu, q_logvar, log_alpha = latent
# preds = (log_alpha.exp().unsqueeze(-1) * y_full).sum(dim=1)
#
# tgts = one_hot_embedding(targets, num_classes, device=device)
# recon_loss = F.binary_cross_entropy_with_logits(y_full, tgts)\
recon_loss = F.cross_entropy(y_full, targets)
return recon_loss.mean(), y_full
if args.dataset == "awa2":
return F.binary_cross_entropy_with_logits(y, targets.float()), y
else:
return F.cross_entropy(y, targets), y
def log(t, state):
torch.save(dict(params=params, epoch=t['epoch'], optimizer=state['optimizer'].state_dict()),
os.path.join(args.save, 'model.pt7'))
z = {**vars(args), **t}
with open(os.path.join(args.save, 'log.txt'), 'a') as flog:
flog.write('json_stats: ' + json.dumps(z) + '\n')
print(z)
def on_sample(state):
state['sample'].append(state['train'])
def on_forward(state):
loss = float(state['loss'])
if args.dataset == "awa2":
if not args.ssl or not state['train']:
acc = calculate_accuracy(F.sigmoid(state['output'].data), state['sample'][1])
else:
acc = calculate_accuracy(F.sigmoid(state['output'].data), state['sample'][0][1])
classacc.add(acc)
else:
if not args.ssl or not state['train']:
classacc.add(state['output'].data, state['sample'][1])
else:
classacc.add(state['output'].data, state['sample'][0][1])
meter_loss.add(loss)
if state['train']:
state['iterator'].set_postfix(loss=loss)
def on_start(state):
state['epoch'] = epoch
def on_start_epoch(state):
classacc.reset()
meter_loss.reset()
timer_train.reset()
state['iterator'] = tqdm(train_loader, dynamic_ncols=True)
epoch = state['epoch'] + 1
if epoch in epoch_step:
lr = state['optimizer'].param_groups[0]['lr']
state['optimizer'] = create_optimizer(args, lr * args.lr_decay_ratio)
def on_end_epoch(state):
train_loss = meter_loss.value()
train_acc = classacc.value()[0]
train_time = timer_train.value()
meter_loss.reset()
classacc.reset()
timer_test.reset()
with torch.no_grad():
engine.test(compute_loss_test, test_loader)
test_acc = classacc.value()[0]
print(log({
"train_loss": train_loss[0],
"train_acc": train_acc,
"test_loss": meter_loss.value()[0],
"test_acc": test_acc,
"epoch": state['epoch'],
"num_classes": num_classes,
"n_parameters": n_parameters,
"train_time": train_time,
"test_time": timer_test.value(),
}, state))
print('==> id: %s (%d/%d), test_acc: \33[91m%.2f\033[0m' %
(args.save, state['epoch'], args.epochs, test_acc))
global counter
counter += 1
engine = Engine()
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.hooks['on_start'] = on_start
engine.train(compute_loss, train_loader, args.epochs, optimizer)
if __name__ == '__main__':
main()
| return F.binary_cross_entropy_with_logits(y, targets.float()), y | conditional_block |
main.py | import argparse
import os
import json
import numpy as np
import random
from tqdm import tqdm
import torch
from torch.optim import SGD, Adam
import torch.utils.data as data
from torch.utils.data import DataLoader, Subset
import torch.nn.functional as F
from torch import nn
import torchnet as tnt
from torchnet.engine import Engine
from utils import cast, data_parallel, print_tensor_dict, x_u_split, calculate_accuracy
from torch.backends import cudnn
from resnet import resnet
from datasets import get_CIFAR10, get_SVHN, Joint, get_AwA2
from flows import Invertible1x1Conv, NormalizingFlowModel
from spline_flows import NSF_CL
from torch.distributions import MultivariateNormal
import itertools
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.categorical import Categorical
from torch.distributions.bernoulli import Bernoulli
from torch.distributions.beta import Beta
cudnn.benchmark = True
parser = argparse.ArgumentParser()
# Model options
parser.add_argument('--depth', default=28, type=int)
parser.add_argument('--width', default=2, type=float)
parser.add_argument('--dataset', default='cifar10', type=str)
parser.add_argument('--dataroot', default='.', type=str)
parser.add_argument('--dtype', default='float', type=str)
parser.add_argument('--groups', default=1, type=int)
parser.add_argument('--n_workers', default=4, type=int)
parser.add_argument('--seed', default=1, type=int)
# Training options
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--eval_batch_size', default=512, type=int)
parser.add_argument('--lr', default=0.1, type=float)
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--weight_decay', default=0.0005, type=float)
parser.add_argument('--epoch_step', default='[60, 120, 160]', type=str,
help='json list with epochs to drop lr on')
parser.add_argument('--lr_decay_ratio', default=0.2, type=float)
parser.add_argument('--resume', default='', type=str)
parser.add_argument('--note', default='', type=str)
parser.add_argument("--no_augment", action="store_false",
dest="augment", help="Augment training data")
# Device options
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--save', default='.', type=str,
help='save parameters and logs in this folder')
parser.add_argument('--ngpu', default=1, type=int,
help='number of GPUs to use for training')
parser.add_argument('--gpu_id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument("--download", action="store_true",
help="downloads dataset")
# SSL options
parser.add_argument("--ssl", action="store_true",
help="Do semi-supervised learning")
parser.add_argument("--num_labelled", type=int, default=4000,
help="Number of labelled data points")
parser.add_argument("--min_entropy", action="store_true",
help="Add the minimum entropy loss")
parser.add_argument("--lp", action="store_true",
help="Add the learned prior (LP) loss")
parser.add_argument("--semantic_loss", action="store_true",
help="Add the semantic loss")
parser.add_argument("--unl_weight", type=float, default=0.1,
help="Weight for unlabelled regularizer loss")
parser.add_argument("--unl2_weight", type=float, default=0.1,
help="Weight for unlabelled regularizer loss")
parser.add_argument("--num_hidden", type=int, default=10,
help="Dim of the latent dimension used")
def one_hot_embedding(labels, num_classes, device="cuda:0"):
"""Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N, #classes].
"""
y = torch.eye(num_classes).to(device)
return y[labels]
def log_normal(x, m, log_v):
"""
Computes the elem-wise log probability of a Gaussian and then sum over the
last dim. Basically we're assuming all dims are batch dims except for the
last dim.
Args:
x: tensor: (batch, ..., dim): Observation
m: tensor: (batch, ..., dim): Mean
v: tensor: (batch, ..., dim): Variance
Return:
kl: tensor: (batch1, batch2, ...): log probability of each sample. Note
that the summation dimension (dim=-1) is not kept
"""
################################################################################
# TODO: Modify/complete the code here
# Compute element-wise log probability of normal and remember to sum over
# the last dimension
################################################################################
# print("q_m", m.size())
# print("q_v", v.size())
const = -0.5 * x.size(-1) * torch.log(2 * torch.tensor(np.pi))
# print(const.size())
log_det = -0.5 * torch.sum(log_v, dim=-1)
# print("log_det", log_det.size())
log_exp = -0.5 * torch.sum((x - m) ** 2 / (log_v.exp()), dim=-1)
log_prob = const + log_det + log_exp
################################################################################
# End of code modification
################################################################################
return log_prob
def gaussian_parameters(h, dim=-1):
"""
Thanks: https://github.com/divymurli/VAEs/blob/master/codebase/utils.py
Converts generic real-valued representations into mean and variance
parameters of a Gaussian distribution
Args:
h: tensor: (batch, ..., dim, ...): Arbitrary tensor
dim: int: (): Dimension along which to split the tensor for mean and
variance
Returns:z
m: tensor: (batch, ..., dim / 2, ...): Mean
v: tensor: (batch, ..., dim / 2, ...): Variance
"""
m, h = torch.split(h, h.size(dim) // 2, dim=dim)
v = F.softplus(h) + 1e-8
return m, v
def check_dataset(dataset, dataroot, augment, download):
if dataset == "cifar10":
dataset = get_CIFAR10(augment, dataroot, download)
if dataset == "svhn":
dataset = get_SVHN(augment, dataroot, download)
if dataset == "awa2":
dataset = get_AwA2(augment, dataroot)
return dataset
def check_manual_seed(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def reparameterise(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps*std
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
class DecoderModel(nn.Module):
def __init__(self, num_classes, z_dim=2):
super().__init__()
self.mu = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.logvar = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
# self.net = nn.Sequential(nn.Linear(num_classes, 100), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.net = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.apply(init_weights)
def forward(self, x):
mu = self.mu(x)
logvar = self.logvar(x)
z = reparameterise(mu, logvar)
predictions = self.net(z)
return predictions, (mu, logvar)
def main():
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# device = "cpu"
args = parser.parse_args()
print('parsed options:', vars(args))
epoch_step = json.loads(args.epoch_step)
check_manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
ds = check_dataset(args.dataset, args.dataroot, args.augment, args.download)
if args.dataset == "awa2":
image_shape, num_classes, train_dataset, test_dataset, all_labels = ds
all_labels = all_labels.to(device)
else:
image_shape, num_classes, train_dataset, test_dataset = ds
all_labels = torch.eye(num_classes).to(device)
if args.ssl:
num_labelled = args.num_labelled
num_unlabelled = len(train_dataset) - num_labelled
if args.dataset == "awa2":
labelled_set, unlabelled_set = data.random_split(train_dataset, [num_labelled, num_unlabelled])
else:
td_targets = train_dataset.targets if args.dataset == "cifar10" else train_dataset.labels
labelled_idxs, unlabelled_idxs = x_u_split(td_targets, num_labelled, num_classes)
labelled_set, unlabelled_set = [Subset(train_dataset, labelled_idxs),
Subset(train_dataset, unlabelled_idxs)]
labelled_set = data.ConcatDataset([labelled_set for i in range(num_unlabelled // num_labelled + 1)])
labelled_set, _ = data.random_split(labelled_set, [num_unlabelled, len(labelled_set) - num_unlabelled])
train_dataset = Joint(labelled_set, unlabelled_set)
def _init_fn(worker_id):
np.random.seed(args.seed)
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.n_workers,
worker_init_fn=_init_fn
)
test_loader = data.DataLoader(
test_dataset,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.n_workers,
worker_init_fn=_init_fn
)
z_dim = args.num_hidden
model, params = resnet(args.depth, args.width, num_classes, image_shape[0])
if args.lp:
model_y = DecoderModel(num_classes, z_dim)
model_y.to(device)
model_y.apply(init_weights)
# optimizer_y = Adam(model_y.get_decoder_params(), lr=1e-3, weight_decay=1e-5)
def create_optimizer(args, lr):
print('creating optimizer with lr = ', lr)
params_ = [v for v in params.values() if v.requires_grad]
# params_ += model_y.get_encoder_params()
params_ += list(model_y.parameters())
return SGD(params_, lr, momentum=0.9, weight_decay=args.weight_decay)
optimizer = create_optimizer(args, args.lr)
epoch = 0
print('\nParameters:')
print_tensor_dict(params)
n_parameters = sum(p.numel() for p in params.values() if p.requires_grad)
print('\nTotal number of parameters:', n_parameters)
meter_loss = tnt.meter.AverageValueMeter()
if args.dataset == "awa2":
classacc = tnt.meter.AverageValueMeter()
else:
classacc = tnt.meter.ClassErrorMeter(accuracy=True)
timer_train = tnt.meter.TimeMeter('s')
timer_test = tnt.meter.TimeMeter('s')
if not os.path.exists(args.save):
os.mkdir(args.save)
global counter, aggressive
counter = 0
aggressive = False
# device = torch.cuda.current_device()
# print(f"On GPU: {device}")
#
# print(f"{torch.cuda.device(device)}")
#
# print(f"# devices: {torch.cuda.device_count()}")
#
# print(f"Device name: {torch.cuda.get_device_name(device)}")
#
# print(f"{torch.cuda.is_available()}")
def | (sample):
alpha = 1./num_classes
mu_prior = np.log(alpha) - 1/num_classes*num_classes*np.log(alpha)
sigma_prior = (1. / alpha * (1 - 2. / num_classes) + 1 / (num_classes ** 2) * num_classes / alpha)
log_det_sigma = num_classes * np.log(sigma_prior)
model_y.train()
if not args.ssl:
inputs = cast(sample[0], args.dtype)
targets = cast(sample[1], 'long')
y = data_parallel(model, inputs, params, sample[2], list(range(args.ngpu))).float()
if args.dataset == "awa2":
return F.binary_cross_entropy_with_logits(y, targets.float()), y
else:
return F.cross_entropy(y, targets), y
else:
global counter
l = sample[0]
u = sample[1]
inputs_l = cast(l[0], args.dtype)
targets_l = cast(l[1], 'long')
inputs_u = cast(u[0], args.dtype)
y_l = data_parallel(model, inputs_l, params, sample[2], list(range(args.ngpu))).float()
y_u = data_parallel(model, inputs_u, params, sample[2], list(range(args.ngpu))).float()
if args.dataset == "awa2":
loss = F.binary_cross_entropy_with_logits(y_l, targets_l.float())
else:
loss = F.cross_entropy(y_l, targets_l)
if args.min_entropy:
if args.dataset == "awa2":
labels_pred = F.sigmoid(y_u)
entropy = -torch.sum(labels_pred * torch.log(labels_pred), dim=1)
else:
labels_pred = F.softmax(y_u, dim=1)
entropy = -torch.sum(labels_pred * torch.log(labels_pred), dim=1)
if counter >= 10:
loss_entropy = args.unl_weight * torch.mean(entropy)
loss += loss_entropy
elif args.semantic_loss:
if args.dataset == "awa2":
labels_pred = F.sigmoid(y_u)
else:
labels_pred = F.softmax(y_u, dim=1)
part1 = torch.stack([labels_pred ** all_labels[i] for i in range(all_labels.shape[0])])
part2 = torch.stack([(1 - labels_pred) ** (1 - all_labels[i]) for i in range(all_labels.shape[0])])
sem_loss = -torch.log(torch.sum(torch.prod(part1 * part2, dim=2), dim=0))
if counter >= 10:
semantic_loss = args.unl_weight * torch.mean(sem_loss)
loss += semantic_loss
elif args.lp:
weight = np.min([1., np.max([0, 0.05 * (counter - 20)])])
recon_losses = []
for i in range(10):
y_preds, latent = model_y(y_l)
recon_losses.append(F.cross_entropy(y_preds, targets_l, reduction="none"))
loss = torch.stack(recon_losses, dim=1).mean(dim=1).mean()
mu1, lv1 = latent
mu2, lv2 = mu_prior, np.log(sigma_prior)
kld = weight * (0.5 * ((lv2 - lv1) + (lv1.exp() + (mu1 - mu2).pow(2)) / (sigma_prior) - 1).sum(dim=1)).mean()
loss += kld
# now do the unsup part
if counter > 50:
y_preds_u, latent_u = model_y(y_u)
log_prob = torch.log_softmax(y_preds_u, dim=1)
# evaluate the kl for each cluster
mu1u, lv1u = latent_u
kldu = weight * (0.5 * ((lv2 - lv1u) + (lv1u.exp() + (mu1u - mu2).pow(2)) / (sigma_prior) - 1).sum(dim=1)).mean()
unsup_loss = (log_prob.exp()*(-log_prob)).sum(dim=1).mean() + kldu
loss += args.unl_weight * unsup_loss
return loss, y_preds
return loss, y_l
def compute_loss_test(sample):
model_y.eval()
inputs = cast(sample[0], args.dtype)
targets = cast(sample[1], 'long')
y = data_parallel(model, inputs, params, sample[2], list(range(args.ngpu))).float()
if args.lp:
y_full, latent = model_y(y)
# q_mu, q_logvar, log_alpha = latent
# preds = (log_alpha.exp().unsqueeze(-1) * y_full).sum(dim=1)
#
# tgts = one_hot_embedding(targets, num_classes, device=device)
# recon_loss = F.binary_cross_entropy_with_logits(y_full, tgts)\
recon_loss = F.cross_entropy(y_full, targets)
return recon_loss.mean(), y_full
if args.dataset == "awa2":
return F.binary_cross_entropy_with_logits(y, targets.float()), y
else:
return F.cross_entropy(y, targets), y
def log(t, state):
torch.save(dict(params=params, epoch=t['epoch'], optimizer=state['optimizer'].state_dict()),
os.path.join(args.save, 'model.pt7'))
z = {**vars(args), **t}
with open(os.path.join(args.save, 'log.txt'), 'a') as flog:
flog.write('json_stats: ' + json.dumps(z) + '\n')
print(z)
def on_sample(state):
state['sample'].append(state['train'])
def on_forward(state):
loss = float(state['loss'])
if args.dataset == "awa2":
if not args.ssl or not state['train']:
acc = calculate_accuracy(F.sigmoid(state['output'].data), state['sample'][1])
else:
acc = calculate_accuracy(F.sigmoid(state['output'].data), state['sample'][0][1])
classacc.add(acc)
else:
if not args.ssl or not state['train']:
classacc.add(state['output'].data, state['sample'][1])
else:
classacc.add(state['output'].data, state['sample'][0][1])
meter_loss.add(loss)
if state['train']:
state['iterator'].set_postfix(loss=loss)
def on_start(state):
state['epoch'] = epoch
def on_start_epoch(state):
classacc.reset()
meter_loss.reset()
timer_train.reset()
state['iterator'] = tqdm(train_loader, dynamic_ncols=True)
epoch = state['epoch'] + 1
if epoch in epoch_step:
lr = state['optimizer'].param_groups[0]['lr']
state['optimizer'] = create_optimizer(args, lr * args.lr_decay_ratio)
def on_end_epoch(state):
train_loss = meter_loss.value()
train_acc = classacc.value()[0]
train_time = timer_train.value()
meter_loss.reset()
classacc.reset()
timer_test.reset()
with torch.no_grad():
engine.test(compute_loss_test, test_loader)
test_acc = classacc.value()[0]
print(log({
"train_loss": train_loss[0],
"train_acc": train_acc,
"test_loss": meter_loss.value()[0],
"test_acc": test_acc,
"epoch": state['epoch'],
"num_classes": num_classes,
"n_parameters": n_parameters,
"train_time": train_time,
"test_time": timer_test.value(),
}, state))
print('==> id: %s (%d/%d), test_acc: \33[91m%.2f\033[0m' %
(args.save, state['epoch'], args.epochs, test_acc))
global counter
counter += 1
engine = Engine()
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.hooks['on_start'] = on_start
engine.train(compute_loss, train_loader, args.epochs, optimizer)
if __name__ == '__main__':
main()
| compute_loss | identifier_name |
main.py | import argparse
import os
import json
import numpy as np
import random
from tqdm import tqdm
import torch
from torch.optim import SGD, Adam
import torch.utils.data as data
from torch.utils.data import DataLoader, Subset
import torch.nn.functional as F
from torch import nn
import torchnet as tnt
from torchnet.engine import Engine
from utils import cast, data_parallel, print_tensor_dict, x_u_split, calculate_accuracy
from torch.backends import cudnn
from resnet import resnet
from datasets import get_CIFAR10, get_SVHN, Joint, get_AwA2
from flows import Invertible1x1Conv, NormalizingFlowModel
from spline_flows import NSF_CL
from torch.distributions import MultivariateNormal
import itertools
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.categorical import Categorical
from torch.distributions.bernoulli import Bernoulli
from torch.distributions.beta import Beta
cudnn.benchmark = True
parser = argparse.ArgumentParser()
# Model options
parser.add_argument('--depth', default=28, type=int)
parser.add_argument('--width', default=2, type=float)
parser.add_argument('--dataset', default='cifar10', type=str)
parser.add_argument('--dataroot', default='.', type=str)
parser.add_argument('--dtype', default='float', type=str)
parser.add_argument('--groups', default=1, type=int)
parser.add_argument('--n_workers', default=4, type=int)
parser.add_argument('--seed', default=1, type=int)
# Training options
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--eval_batch_size', default=512, type=int)
parser.add_argument('--lr', default=0.1, type=float)
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--weight_decay', default=0.0005, type=float)
parser.add_argument('--epoch_step', default='[60, 120, 160]', type=str,
help='json list with epochs to drop lr on')
parser.add_argument('--lr_decay_ratio', default=0.2, type=float)
parser.add_argument('--resume', default='', type=str)
parser.add_argument('--note', default='', type=str)
parser.add_argument("--no_augment", action="store_false",
dest="augment", help="Augment training data")
# Device options
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--save', default='.', type=str,
help='save parameters and logs in this folder')
parser.add_argument('--ngpu', default=1, type=int,
help='number of GPUs to use for training')
parser.add_argument('--gpu_id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument("--download", action="store_true",
help="downloads dataset")
# SSL options
parser.add_argument("--ssl", action="store_true",
help="Do semi-supervised learning")
parser.add_argument("--num_labelled", type=int, default=4000,
help="Number of labelled data points")
parser.add_argument("--min_entropy", action="store_true",
help="Add the minimum entropy loss")
parser.add_argument("--lp", action="store_true",
help="Add the learned prior (LP) loss")
parser.add_argument("--semantic_loss", action="store_true",
help="Add the semantic loss")
parser.add_argument("--unl_weight", type=float, default=0.1,
help="Weight for unlabelled regularizer loss")
parser.add_argument("--unl2_weight", type=float, default=0.1,
help="Weight for unlabelled regularizer loss")
parser.add_argument("--num_hidden", type=int, default=10,
help="Dim of the latent dimension used")
def one_hot_embedding(labels, num_classes, device="cuda:0"):
"""Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N, #classes].
"""
y = torch.eye(num_classes).to(device)
return y[labels]
def log_normal(x, m, log_v):
"""
Computes the elem-wise log probability of a Gaussian and then sum over the
last dim. Basically we're assuming all dims are batch dims except for the
last dim.
Args:
x: tensor: (batch, ..., dim): Observation
m: tensor: (batch, ..., dim): Mean
v: tensor: (batch, ..., dim): Variance
Return:
kl: tensor: (batch1, batch2, ...): log probability of each sample. Note
that the summation dimension (dim=-1) is not kept
"""
################################################################################
# TODO: Modify/complete the code here
# Compute element-wise log probability of normal and remember to sum over
# the last dimension
################################################################################
# print("q_m", m.size())
# print("q_v", v.size())
const = -0.5 * x.size(-1) * torch.log(2 * torch.tensor(np.pi))
# print(const.size())
log_det = -0.5 * torch.sum(log_v, dim=-1)
# print("log_det", log_det.size())
log_exp = -0.5 * torch.sum((x - m) ** 2 / (log_v.exp()), dim=-1)
log_prob = const + log_det + log_exp
################################################################################
# End of code modification
################################################################################
return log_prob
def gaussian_parameters(h, dim=-1):
"""
Thanks: https://github.com/divymurli/VAEs/blob/master/codebase/utils.py
Converts generic real-valued representations into mean and variance
parameters of a Gaussian distribution
Args:
h: tensor: (batch, ..., dim, ...): Arbitrary tensor
dim: int: (): Dimension along which to split the tensor for mean and
variance
Returns:z
m: tensor: (batch, ..., dim / 2, ...): Mean
v: tensor: (batch, ..., dim / 2, ...): Variance
"""
m, h = torch.split(h, h.size(dim) // 2, dim=dim)
v = F.softplus(h) + 1e-8
return m, v
def check_dataset(dataset, dataroot, augment, download):
if dataset == "cifar10":
dataset = get_CIFAR10(augment, dataroot, download)
if dataset == "svhn":
dataset = get_SVHN(augment, dataroot, download)
if dataset == "awa2":
dataset = get_AwA2(augment, dataroot)
return dataset
def check_manual_seed(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def reparameterise(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps*std
def init_weights(m):
|
class DecoderModel(nn.Module):
def __init__(self, num_classes, z_dim=2):
super().__init__()
self.mu = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.logvar = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
# self.net = nn.Sequential(nn.Linear(num_classes, 100), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.net = nn.Sequential(nn.Linear(num_classes, 50), nn.LeakyReLU(.2), nn.Linear(50, num_classes))
self.apply(init_weights)
def forward(self, x):
mu = self.mu(x)
logvar = self.logvar(x)
z = reparameterise(mu, logvar)
predictions = self.net(z)
return predictions, (mu, logvar)
def main():
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# device = "cpu"
args = parser.parse_args()
print('parsed options:', vars(args))
epoch_step = json.loads(args.epoch_step)
check_manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
ds = check_dataset(args.dataset, args.dataroot, args.augment, args.download)
if args.dataset == "awa2":
image_shape, num_classes, train_dataset, test_dataset, all_labels = ds
all_labels = all_labels.to(device)
else:
image_shape, num_classes, train_dataset, test_dataset = ds
all_labels = torch.eye(num_classes).to(device)
if args.ssl:
num_labelled = args.num_labelled
num_unlabelled = len(train_dataset) - num_labelled
if args.dataset == "awa2":
labelled_set, unlabelled_set = data.random_split(train_dataset, [num_labelled, num_unlabelled])
else:
td_targets = train_dataset.targets if args.dataset == "cifar10" else train_dataset.labels
labelled_idxs, unlabelled_idxs = x_u_split(td_targets, num_labelled, num_classes)
labelled_set, unlabelled_set = [Subset(train_dataset, labelled_idxs),
Subset(train_dataset, unlabelled_idxs)]
labelled_set = data.ConcatDataset([labelled_set for i in range(num_unlabelled // num_labelled + 1)])
labelled_set, _ = data.random_split(labelled_set, [num_unlabelled, len(labelled_set) - num_unlabelled])
train_dataset = Joint(labelled_set, unlabelled_set)
def _init_fn(worker_id):
np.random.seed(args.seed)
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.n_workers,
worker_init_fn=_init_fn
)
test_loader = data.DataLoader(
test_dataset,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.n_workers,
worker_init_fn=_init_fn
)
z_dim = args.num_hidden
model, params = resnet(args.depth, args.width, num_classes, image_shape[0])
if args.lp:
model_y = DecoderModel(num_classes, z_dim)
model_y.to(device)
model_y.apply(init_weights)
# optimizer_y = Adam(model_y.get_decoder_params(), lr=1e-3, weight_decay=1e-5)
def create_optimizer(args, lr):
print('creating optimizer with lr = ', lr)
params_ = [v for v in params.values() if v.requires_grad]
# params_ += model_y.get_encoder_params()
params_ += list(model_y.parameters())
return SGD(params_, lr, momentum=0.9, weight_decay=args.weight_decay)
optimizer = create_optimizer(args, args.lr)
epoch = 0
print('\nParameters:')
print_tensor_dict(params)
n_parameters = sum(p.numel() for p in params.values() if p.requires_grad)
print('\nTotal number of parameters:', n_parameters)
meter_loss = tnt.meter.AverageValueMeter()
if args.dataset == "awa2":
classacc = tnt.meter.AverageValueMeter()
else:
classacc = tnt.meter.ClassErrorMeter(accuracy=True)
timer_train = tnt.meter.TimeMeter('s')
timer_test = tnt.meter.TimeMeter('s')
if not os.path.exists(args.save):
os.mkdir(args.save)
global counter, aggressive
counter = 0
aggressive = False
# device = torch.cuda.current_device()
# print(f"On GPU: {device}")
#
# print(f"{torch.cuda.device(device)}")
#
# print(f"# devices: {torch.cuda.device_count()}")
#
# print(f"Device name: {torch.cuda.get_device_name(device)}")
#
# print(f"{torch.cuda.is_available()}")
def compute_loss(sample):
alpha = 1./num_classes
mu_prior = np.log(alpha) - 1/num_classes*num_classes*np.log(alpha)
sigma_prior = (1. / alpha * (1 - 2. / num_classes) + 1 / (num_classes ** 2) * num_classes / alpha)
log_det_sigma = num_classes * np.log(sigma_prior)
model_y.train()
if not args.ssl:
inputs = cast(sample[0], args.dtype)
targets = cast(sample[1], 'long')
y = data_parallel(model, inputs, params, sample[2], list(range(args.ngpu))).float()
if args.dataset == "awa2":
return F.binary_cross_entropy_with_logits(y, targets.float()), y
else:
return F.cross_entropy(y, targets), y
else:
global counter
l = sample[0]
u = sample[1]
inputs_l = cast(l[0], args.dtype)
targets_l = cast(l[1], 'long')
inputs_u = cast(u[0], args.dtype)
y_l = data_parallel(model, inputs_l, params, sample[2], list(range(args.ngpu))).float()
y_u = data_parallel(model, inputs_u, params, sample[2], list(range(args.ngpu))).float()
if args.dataset == "awa2":
loss = F.binary_cross_entropy_with_logits(y_l, targets_l.float())
else:
loss = F.cross_entropy(y_l, targets_l)
if args.min_entropy:
if args.dataset == "awa2":
labels_pred = F.sigmoid(y_u)
entropy = -torch.sum(labels_pred * torch.log(labels_pred), dim=1)
else:
labels_pred = F.softmax(y_u, dim=1)
entropy = -torch.sum(labels_pred * torch.log(labels_pred), dim=1)
if counter >= 10:
loss_entropy = args.unl_weight * torch.mean(entropy)
loss += loss_entropy
elif args.semantic_loss:
if args.dataset == "awa2":
labels_pred = F.sigmoid(y_u)
else:
labels_pred = F.softmax(y_u, dim=1)
part1 = torch.stack([labels_pred ** all_labels[i] for i in range(all_labels.shape[0])])
part2 = torch.stack([(1 - labels_pred) ** (1 - all_labels[i]) for i in range(all_labels.shape[0])])
sem_loss = -torch.log(torch.sum(torch.prod(part1 * part2, dim=2), dim=0))
if counter >= 10:
semantic_loss = args.unl_weight * torch.mean(sem_loss)
loss += semantic_loss
elif args.lp:
weight = np.min([1., np.max([0, 0.05 * (counter - 20)])])
recon_losses = []
for i in range(10):
y_preds, latent = model_y(y_l)
recon_losses.append(F.cross_entropy(y_preds, targets_l, reduction="none"))
loss = torch.stack(recon_losses, dim=1).mean(dim=1).mean()
mu1, lv1 = latent
mu2, lv2 = mu_prior, np.log(sigma_prior)
kld = weight * (0.5 * ((lv2 - lv1) + (lv1.exp() + (mu1 - mu2).pow(2)) / (sigma_prior) - 1).sum(dim=1)).mean()
loss += kld
# now do the unsup part
if counter > 50:
y_preds_u, latent_u = model_y(y_u)
log_prob = torch.log_softmax(y_preds_u, dim=1)
# evaluate the kl for each cluster
mu1u, lv1u = latent_u
kldu = weight * (0.5 * ((lv2 - lv1u) + (lv1u.exp() + (mu1u - mu2).pow(2)) / (sigma_prior) - 1).sum(dim=1)).mean()
unsup_loss = (log_prob.exp()*(-log_prob)).sum(dim=1).mean() + kldu
loss += args.unl_weight * unsup_loss
return loss, y_preds
return loss, y_l
def compute_loss_test(sample):
model_y.eval()
inputs = cast(sample[0], args.dtype)
targets = cast(sample[1], 'long')
y = data_parallel(model, inputs, params, sample[2], list(range(args.ngpu))).float()
if args.lp:
y_full, latent = model_y(y)
# q_mu, q_logvar, log_alpha = latent
# preds = (log_alpha.exp().unsqueeze(-1) * y_full).sum(dim=1)
#
# tgts = one_hot_embedding(targets, num_classes, device=device)
# recon_loss = F.binary_cross_entropy_with_logits(y_full, tgts)\
recon_loss = F.cross_entropy(y_full, targets)
return recon_loss.mean(), y_full
if args.dataset == "awa2":
return F.binary_cross_entropy_with_logits(y, targets.float()), y
else:
return F.cross_entropy(y, targets), y
def log(t, state):
torch.save(dict(params=params, epoch=t['epoch'], optimizer=state['optimizer'].state_dict()),
os.path.join(args.save, 'model.pt7'))
z = {**vars(args), **t}
with open(os.path.join(args.save, 'log.txt'), 'a') as flog:
flog.write('json_stats: ' + json.dumps(z) + '\n')
print(z)
def on_sample(state):
state['sample'].append(state['train'])
def on_forward(state):
loss = float(state['loss'])
if args.dataset == "awa2":
if not args.ssl or not state['train']:
acc = calculate_accuracy(F.sigmoid(state['output'].data), state['sample'][1])
else:
acc = calculate_accuracy(F.sigmoid(state['output'].data), state['sample'][0][1])
classacc.add(acc)
else:
if not args.ssl or not state['train']:
classacc.add(state['output'].data, state['sample'][1])
else:
classacc.add(state['output'].data, state['sample'][0][1])
meter_loss.add(loss)
if state['train']:
state['iterator'].set_postfix(loss=loss)
def on_start(state):
state['epoch'] = epoch
def on_start_epoch(state):
classacc.reset()
meter_loss.reset()
timer_train.reset()
state['iterator'] = tqdm(train_loader, dynamic_ncols=True)
epoch = state['epoch'] + 1
if epoch in epoch_step:
lr = state['optimizer'].param_groups[0]['lr']
state['optimizer'] = create_optimizer(args, lr * args.lr_decay_ratio)
def on_end_epoch(state):
train_loss = meter_loss.value()
train_acc = classacc.value()[0]
train_time = timer_train.value()
meter_loss.reset()
classacc.reset()
timer_test.reset()
with torch.no_grad():
engine.test(compute_loss_test, test_loader)
test_acc = classacc.value()[0]
print(log({
"train_loss": train_loss[0],
"train_acc": train_acc,
"test_loss": meter_loss.value()[0],
"test_acc": test_acc,
"epoch": state['epoch'],
"num_classes": num_classes,
"n_parameters": n_parameters,
"train_time": train_time,
"test_time": timer_test.value(),
}, state))
print('==> id: %s (%d/%d), test_acc: \33[91m%.2f\033[0m' %
(args.save, state['epoch'], args.epochs, test_acc))
global counter
counter += 1
engine = Engine()
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.hooks['on_start'] = on_start
engine.train(compute_loss, train_loader, args.epochs, optimizer)
if __name__ == '__main__':
main()
| if type(m) == nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01) | identifier_body |
bluepay.go | package bluepay
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"github.com/astaxie/beego"
"github.com/astaxie/beego/logs"
uuid "github.com/satori/go.uuid"
"micro-loan/common/dao"
"micro-loan/common/lib/device"
"micro-loan/common/lib/payment"
"micro-loan/common/models"
"micro-loan/common/pkg/event"
"micro-loan/common/pkg/event/evtypes"
"micro-loan/common/pkg/monitor"
"micro-loan/common/thirdparty"
"micro-loan/common/tools"
"micro-loan/common/types"
)
type BluepayCreateVAResponse struct {
Data BluepayCreateVADataDetailResponse `json:"data"`
Message string `json:"message"`
Status int `json:"status"`
VaFee int64 `json:"vaFee"`
IsStatic int `json:"isStatic"`
OtcFee int64 `json:"otcFee"`
PaymentCode string `json:"payment_code"`
}
type BluepayCreateVADataDetailResponse struct {
Msisdn string `json:"msisdn"`
Paymentcode string `json:"paymentCode"`
TransactionId string `json:transactionId`
}
type BluepayCreateDisburseResponse struct {
TransactionId string `json:"transactionId"`
TransferStatus string `json:"transferStatus"`
Code string `json:"code"`
}
type BluepayApi struct {
payment.PaymentApi
}
type NpwpResp struct {
Status int `json:"status"`
Message string `json:"message"`
Npwp string `json:"npwp"`
CustomerName string `json:"customerName"`
}
var bluePayBankNameCodeMap = map[string]string{
"Bank Rakyat Indonesia (BRI)": "BRI",
"Bank Mandiri": "MANDIRI",
"Bank Negara Indonesia (BNI)": "BNI",
"Bank Danamon": "DANAMON",
"Bank Permata": "PERMATA",
"Bank Central Asia (BCA)": "BCA",
"Bank Maybank": "BII",
"Bank Panin": "PANIN",
"Bank CIMB Niaga": "CIMB",
"Bank UOB Indonesia": "UOB",
"Bank Artha Graha International": "ARTA GRAHA",
"Bank BJB": "BANK BJB",
"Bank Jatim": "BANK JATIM",
"BPD Kalimantan Barat": "BPD NUSA TENGGARA BARAT",
"Bank Nusantara Parahyangan": "BANK NUSANTARA PARAHYANGAN",
"Bank Muamalat Indonesia": "BANK MUAMALAT INDONESIA",
"Sinarmas": "SINARMAS",
"Bank Tabungan Negara (BTN)": "BANK TABUNGAN NEGARA",
"Bank Mega": "MEGA",
"Bank Bukopin": "BUKOPIN",
"Bank Hana": "BANK HANA",
"Centratama Nasional Bank": "BANK CENTRATAMA NASIONAL",
"Bank Tabungan Pensiunan Nasional": "BANK TABUNGAN PENSIUNAN NASIONAL/BTPN",
}
func BluepayBankNameCodeMap() map[string]string {
return bluePayBankNameCodeMap
}
func BluepayBankName2Code(name string) (code string, err error) {
bankNameCodeMap := BluepayBankNameCodeMap()
if v, ok := bankNameCodeMap[name]; ok {
code = v
return
}
err = fmt.Errorf("bank code undefined")
return
}
func BankName2BluepaySupportCode(name string) (code string, err error) {
conf := map[string]bool{
"PERMATA": true,
"BNI": true,
}
code, err = BluepayBankName2Code(name)
if err != nil {
return
}
if !conf[code] {
code = "BNI"
}
return
}
func (c *BluepayApi) CreateVirtualAccount(datas map[string]interface{}) (res []byte, err error) {
//curl 'http://120.76.101.146:21921/indonesia/express/gather/mo?price=30000&productId=1483&payType=atm&transactionId=14615984398y&ui=none&promotionId=1000&bankType=permata'
productId, _ := beego.AppConfig.Int64("bluepay_product_id")
virtualAccountsUrl := beego.AppConfig.String("bluepay_create_va_url")
bankName := datas["bank_name"].(string)
bankCode, err := BankName2BluepaySupportCode(bankName)
if err != nil {
return []byte{}, err
}
bankType := strings.ToLower(bankCode)
mobile := datas["mobile"].(string)
headerStr := tools.SubString(mobile, 0, 2)
if headerStr != "62" {
mobile = fmt.Sprintf("%s%s", "62", mobile)
}
price := datas["amount"].(int64)
orderId := datas["order_id"].(int64)
externalId := datas["account_id"].(int64)
virtualAccountsUrl = fmt.Sprintf("%s?msisdn=%s&price=%d&productId=%d&payType=atm&transactionId=%d&ui=none&promotionId=1000&bankType=%s", virtualAccountsUrl, mobile, price, productId, orderId, bankType)
client := &http.Client{}
req, err := http.NewRequest("GET", virtualAccountsUrl, nil)
if err != nil {
logs.Error("[CreateVirtualAccount] http.NewRequest url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
//req.SetBasicAuth(secretKey, "")
resp, err := client.Do(req)
monitor.IncrThirdpartyCount(models.ThirdpartyBluepay, resp.StatusCode)
if err != nil {
logs.Error("[CreateVirtualAccount] client.Do url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
logs.Error("[CreateVirtualAccount] ioutil.ReadAll url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
responstType, fee := thirdparty.CalcFeeByApi(virtualAccountsUrl, "", string(body))
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, virtualAccountsUrl, externalId, "", string(body), responstType, fee, resp.StatusCode)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: externalId,
OrderId: orderId,
ApiMd5: tools.Md5(virtualAccountsUrl),
Fee: int64(fee),
Result: responstType,
})
return body, err
}
func (c *BluepayApi) Disburse(datas map[string]interface{}) (res []byte, err error) {
orderId := datas["order_id"].(int64)
bankName := datas["bank_name"].(string)
bankCode, err := BankName2BluepaySupportCode(bankName)
if err != nil {
return []byte{}, err
}
accountHolderName := datas["account_name"].(string)
accountNumber := datas["account_num"].(string)
amount := datas["amount"].(int64)
paramStr := fmt.Sprintf("transactionId=%d&promotionId=1000&payeeCountry=%s&payeeBankName=%s&payeeName=%s&payeeAccount=%s&payeeMsisdn=%d&payeeType=%s&amount=%d¤cy=%s",
orderId, types.PayeeCountryIDId, bankCode, accountHolderName, accountNumber, types.PayeeMsisdnID, types.PayeeTypePersonal, amount, types.PayeeTypeIDCurrency)
hash := OpenSSLEncrypt(paramStr)
keyStr := beego.AppConfig.String("bluepay_secret_key")
productId := beego.AppConfig.String("bluepay_product_id")
logs.Debug("[Disburse] hash:%s, keyStr:%s", hash, keyStr)
disburseUrl := beego.AppConfig.String("bluepay_disburse_url")
md5val := tools.Md5(fmt.Sprintf("productId=%s&data=%s%s", productId, hash, keyStr))
disburseUrl = fmt.Sprintf("%s?productId=%s&data=%s&encrypt=%s", disburseUrl, productId, hash, md5val)
client := &http.Client{}
req, err := http.NewRequest("GET", disburseUrl, nil)
if err != nil {
logs.Error("[Disburse] http.NewRequest url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := client.Do(req)
monitor.IncrThirdpartyCount(models.ThirdpartyBluepay, resp.StatusCode)
if err != nil {
logs.Error("[Disburse] client.Do url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
logs.Error("[Disburse] ioutil.ReadAll url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
responstType, fee := thirdparty.CalcFeeByApi(disburseUrl, "", string(body))
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, disburseUrl, orderId, "", string(body), responstType, fee, resp.StatusCode)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: 0,
OrderId: orderId,
ApiMd5: tools.Md5(disburseUrl),
Fee: int64(fee),
Result: responstType,
})
return body, err
}
func (c *BluepayApi) CheckVirtualAccount(datas map[string]interface{}) (res []byte, err error) {
return []byte{}, err
}
func (c *BluepayApi) CreateVirtualAccountResponse(jsonData []byte, datas map[string]interface{}) error {
var resp BluepayCreateVAResponse = BluepayCreateVAResponse{}
err := json.Unmarshal(jsonData, &resp)
if err != nil {
logs.Error("[CreateVirtualAccountResponse] json.Unmarshal err:%v, json:%s", jsonData, err)
return err
}
//if resJson.Status != 200 {
if resp.Status != 201 {
errStr := fmt.Sprintf("[CreateVirtualAccountResponse] response status is wrong retJson:%s", string(jsonData))
err := fmt.Errorf(errStr)
return err
}
userAccountId := datas["account_id"].(int64)
_, err = models.GetEAccount(userAccountId, types.Bluepay)
if err != nil {
//不存在则创建
eAccount := models.User_E_Account{}
eAccount.Id, _ = device.GenerateBizId(types.UserEAccountBiz)
eAccount.UserAccountId = userAccountId
eAccount.EAccountNumber = resp.PaymentCode
eAccount.VaCompanyCode = types.Bluepay
eAccount.Status = "pending"
eAccount.Ctime = tools.GetUnixMillis()
eAccount.Utime = tools.GetUnixMillis()
_, err = eAccount.AddEAccount(&eAccount)
}
return err
}
func (c *BluepayApi) DisburseResponse(jsonData []byte, datas map[string]interface{}) (err error) {
var resp BluepayCreateDisburseResponse = BluepayCreateDisburseResponse{}
err = json.Unmarshal(jsonData, &resp)
if err != nil {
logs.Error("[DisburseResponse] json.Unmarshal err:%s, json:%s", err, string(jsonData))
return err
}
orderId := datas["order_id"].(int64)
bankCode := datas["bank_code"].(string)
accountHolderName := datas["account_name"].(string)
transactionId, _ := tools.Str2Int64(resp.TransactionId)
if transactionId != orderId {
//response数据如果和请求的不一致,直接报警
errStr := fmt.Sprintf("[DisburseResponse] response error orderId:%d, restJson:%s", orderId, string(jsonData))
logs.Error(errStr)
err = fmt.Errorf(errStr)
return err
}
order, err := models.GetOrder(orderId)
if err != nil {
return err
}
o := models.Mobi_E_Trans{}
orderIdStr := tools.Int642Str(orderId)
o.UserAcccountId = order.UserAccountId
o.VaCompanyCode = types.Bluepay
o.Amount = order.Loan
//向上取整,百位取整
o.PayType = types.PayTypeMoneyOut
o.BankCode = bankCode
o.AccountHolderName = accountHolderName
o.DisbursementDescription = orderIdStr
o.DisbursementId = orderIdStr
o.Status = resp.TransferStatus
o.Utime = tools.GetUnixMillis()
o.Ctime = tools.GetUnixMillis()
_, err = o.AddMobiEtrans(&o)
return err
}
func OpenSSLEncrypt(x string) string {
keyStr := beego.AppConfig.String("bluepay_secret_key")
ivStr := beego.AppConfig.String("bluepay_secret_iv")
logs.Debug("ivStr is: ", ivStr)
key := []byte(keyStr)
iv := []byte(ivStr)
var plaintextblock []byte
// Turn struct into byte slice
plaintext := x
// Make sure the block size is a multiple of 16
length := len(plaintext)
extendBlock := 16 - (length % 16)
plaintextblock = make([]byte, length+extendBlock)
copy(plaintextblock[length:], bytes.Repeat([]byte{uint8(extendBlock)}, extendBlock))
copy(plaintextblock, plaintext)
cb, err := aes.NewCipher(key)
if err != nil {
log.Println("error NewCipher(): ", err)
}
ciphertext := make([]byte, len(plaintextblock))
mode := cipher.NewCBCEncrypter(cb, iv)
mode.CryptBlocks(ciphertext, plaintextblock)
text := hex.EncodeToString(ciphertext)
//二进制转换十六进制
str := tools.UrlEncode(base64.StdEncoding.EncodeToString([]byte(text)))
//urlencode
return str
}
func NameValidator(accountId int64) (bankNo string, err error) {
productId := beego.AppConfig.String("bluepay_product_id")
checkUrl := beego.AppConfig.String("bluepay_name_validator")
accountBase, err := models.OneAccountBaseByPkId(accountId)
if err != nil {
logs.Error("can not get account_base by accountId:", accountId)
return
}
profile, err := dao.CustomerProfile(accountBase.Id)
if err != nil {
logs.Error("can not get account_profile by account_id:", accountBase.Id)
return
}
name := accountBase.Realname
bankNumber := profile.BankNo
bankCode, err := BluepayBankName2Code(profile.BankName)
if err == nil {
//目前bluepay只支持放款银行列表的二要素检查
//所以只有支持的银行,再调用此列表,不然没有意义
uuidStr := uuid.Mu | beego.AppConfig.String("bluepay_secret_key")
encrypt := tools.Md5(fmt.Sprintf("productId=%s&npwp=%s%s", productId, npwp, keyStr))
reqParm := fmt.Sprintf("productId=%s&npwp=%s&encrypt=%s", productId, npwp, encrypt)
checkUrl := router + "/" + reqParm
logs.Debug(checkUrl)
reqHeaders := map[string]string{}
httpBody, httpCode, err := tools.SimpleHttpClient("GET", router, reqHeaders, "", tools.DefaultHttpTimeout())
if err != nil {
logs.Error(err)
return
}
logs.Debug("httpBody:%v , httpCode:%d ", string(httpBody), httpCode)
err = json.Unmarshal(httpBody, &resp)
if err != nil {
err = fmt.Errorf("[NpwpVerify] bluepay response json unmarshal failed, err is %s httpBody:%s", err.Error(), string(httpBody))
logs.Error(err)
return
}
responstType, fee := thirdparty.CalcFeeByApi(checkUrl, reqParm, httpBody)
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, checkUrl, accountId, reqParm, httpBody, responstType, fee, 200)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: accountId,
OrderId: 0,
ApiMd5: tools.Md5(router),
Fee: int64(fee),
Result: responstType,
})
if httpCode != 200 {
err = fmt.Errorf("[NpwpVerify] bluepay response httpCode is wrong [%d]", httpCode)
logs.Error(err)
return
}
logs.Warn("resp:%#v", resp)
return
}
| st(uuid.NewV4()).String()
paramStr := fmt.Sprintf("phoneNum=%s&customerName=%s&accountNo=%s&bankName=%s&transactionId=%s", tools.UrlEncode(accountBase.Mobile), tools.UrlEncode(name), tools.UrlEncode(bankNumber), tools.UrlEncode(bankCode), tools.UrlEncode(uuidStr))
logs.Debug(paramStr)
hash := OpenSSLEncrypt(paramStr)
keyStr := beego.AppConfig.String("bluepay_secret_key")
logs.Debug("[NameValidator] hash:%s, keyStr:%s", hash, keyStr)
md5val := tools.Md5(fmt.Sprintf("productId=%s&data=%s%s", productId, hash, keyStr))
checkUrl = fmt.Sprintf("%s?productId=%s&data=%s&encrypt=%s", checkUrl, productId, hash, md5val)
logs.Debug(checkUrl)
reqHeaders := map[string]string{}
httpBody, httpCode, err1 := tools.SimpleHttpClient("GET", checkUrl, reqHeaders, "", tools.DefaultHttpTimeout())
//此处报错。。。err is shadowed during return
//只能新申请一个err1变量了
if err1 != nil {
logs.Error(err1)
err = err1
return
}
var nameValidatorResp struct {
Message string `json:"message"`
Status int `json:"status"`
}
err1 = json.Unmarshal(httpBody, &nameValidatorResp)
if err1 != nil {
err1 = fmt.Errorf("bluepay name validator response json unmarshal failed, err is %s", err.Error())
logs.Error(err1)
err = err1
return
}
logs.Debug(string(httpBody))
if httpCode != 200 {
err1 = fmt.Errorf("bluepay name validator httpCode is wrong [%d]", httpCode)
logs.Error(err1)
err = err1
return
}
if nameValidatorResp.Status != 200 {
//如果没匹配上,就返回银行账号,让客户去展示给用户,让用户可以修改
bankNo = bankNumber
}
}
return
}
func NpwpVerify(accountId int64, npwp string) (resp NpwpResp, err error) {
productId := "1493"
//checkUrl := "http://idtool.bluepay.asia//charge/express/npwpQuery"
router := "http://120.76.101.146:21811/charge/express/npwpQuery"
keyStr := | conditional_block |
bluepay.go | package bluepay
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"github.com/astaxie/beego"
"github.com/astaxie/beego/logs"
uuid "github.com/satori/go.uuid"
"micro-loan/common/dao"
"micro-loan/common/lib/device"
"micro-loan/common/lib/payment"
"micro-loan/common/models"
"micro-loan/common/pkg/event"
"micro-loan/common/pkg/event/evtypes"
"micro-loan/common/pkg/monitor"
"micro-loan/common/thirdparty"
"micro-loan/common/tools"
"micro-loan/common/types"
)
type BluepayCreateVAResponse struct {
Data BluepayCreateVADataDetailResponse `json:"data"`
Message string `json:"message"`
Status int `json:"status"`
VaFee int64 `json:"vaFee"`
IsStatic int `json:"isStatic"`
OtcFee int64 `json:"otcFee"`
PaymentCode string `json:"payment_code"`
}
type BluepayCreateVADataDetailResponse struct {
Msisdn string `json:"msisdn"`
Paymentcode string `json:"paymentCode"`
TransactionId string `json:transactionId`
}
type BluepayCreateDisburseResponse struct {
TransactionId string `json:"transactionId"`
TransferStatus string `json:"transferStatus"`
Code string `json:"code"`
}
type BluepayApi struct {
payment.PaymentApi
}
type NpwpResp struct {
Status int `json:"status"`
Message string `json:"message"`
Npwp string `json:"npwp"`
CustomerName string `json:"customerName"`
}
var bluePayBankNameCodeMap = map[string]string{
"Bank Rakyat Indonesia (BRI)": "BRI",
"Bank Mandiri": "MANDIRI",
"Bank Negara Indonesia (BNI)": "BNI",
"Bank Danamon": "DANAMON",
"Bank Permata": "PERMATA",
"Bank Central Asia (BCA)": "BCA",
"Bank Maybank": "BII",
"Bank Panin": "PANIN",
"Bank CIMB Niaga": "CIMB",
"Bank UOB Indonesia": "UOB",
"Bank Artha Graha International": "ARTA GRAHA",
"Bank BJB": "BANK BJB",
"Bank Jatim": "BANK JATIM",
"BPD Kalimantan Barat": "BPD NUSA TENGGARA BARAT",
"Bank Nusantara Parahyangan": "BANK NUSANTARA PARAHYANGAN",
"Bank Muamalat Indonesia": "BANK MUAMALAT INDONESIA",
"Sinarmas": "SINARMAS",
"Bank Tabungan Negara (BTN)": "BANK TABUNGAN NEGARA",
"Bank Mega": "MEGA",
"Bank Bukopin": "BUKOPIN",
"Bank Hana": "BANK HANA",
"Centratama Nasional Bank": "BANK CENTRATAMA NASIONAL",
"Bank Tabungan Pensiunan Nasional": "BANK TABUNGAN PENSIUNAN NASIONAL/BTPN",
}
func BluepayBankNameCodeMap() map[string]string {
return bluePayBankNameCodeMap
}
func BluepayBankName2Code(name string) (code string, err error) {
bankNameCodeMap := BluepayBankNameCodeMap()
if v, ok := bankNameCodeMap[name]; ok {
code = v
return
}
err = fmt.Errorf("bank code undefined")
return
}
func BankName2BluepaySupportCode(name string) (code string, err error) {
conf := map[string]bool{
"PERMATA": true,
"BNI": true,
}
code, err = BluepayBankName2Code(name)
if err != nil {
return
}
if !conf[code] {
code = "BNI"
}
return
}
func (c *BluepayApi) CreateVirtualAccount(datas map[string]interface{}) (res []byte, err error) |
func (c *BluepayApi) Disburse(datas map[string]interface{}) (res []byte, err error) {
orderId := datas["order_id"].(int64)
bankName := datas["bank_name"].(string)
bankCode, err := BankName2BluepaySupportCode(bankName)
if err != nil {
return []byte{}, err
}
accountHolderName := datas["account_name"].(string)
accountNumber := datas["account_num"].(string)
amount := datas["amount"].(int64)
paramStr := fmt.Sprintf("transactionId=%d&promotionId=1000&payeeCountry=%s&payeeBankName=%s&payeeName=%s&payeeAccount=%s&payeeMsisdn=%d&payeeType=%s&amount=%d¤cy=%s",
orderId, types.PayeeCountryIDId, bankCode, accountHolderName, accountNumber, types.PayeeMsisdnID, types.PayeeTypePersonal, amount, types.PayeeTypeIDCurrency)
hash := OpenSSLEncrypt(paramStr)
keyStr := beego.AppConfig.String("bluepay_secret_key")
productId := beego.AppConfig.String("bluepay_product_id")
logs.Debug("[Disburse] hash:%s, keyStr:%s", hash, keyStr)
disburseUrl := beego.AppConfig.String("bluepay_disburse_url")
md5val := tools.Md5(fmt.Sprintf("productId=%s&data=%s%s", productId, hash, keyStr))
disburseUrl = fmt.Sprintf("%s?productId=%s&data=%s&encrypt=%s", disburseUrl, productId, hash, md5val)
client := &http.Client{}
req, err := http.NewRequest("GET", disburseUrl, nil)
if err != nil {
logs.Error("[Disburse] http.NewRequest url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := client.Do(req)
monitor.IncrThirdpartyCount(models.ThirdpartyBluepay, resp.StatusCode)
if err != nil {
logs.Error("[Disburse] client.Do url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
logs.Error("[Disburse] ioutil.ReadAll url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
responstType, fee := thirdparty.CalcFeeByApi(disburseUrl, "", string(body))
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, disburseUrl, orderId, "", string(body), responstType, fee, resp.StatusCode)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: 0,
OrderId: orderId,
ApiMd5: tools.Md5(disburseUrl),
Fee: int64(fee),
Result: responstType,
})
return body, err
}
func (c *BluepayApi) CheckVirtualAccount(datas map[string]interface{}) (res []byte, err error) {
return []byte{}, err
}
func (c *BluepayApi) CreateVirtualAccountResponse(jsonData []byte, datas map[string]interface{}) error {
var resp BluepayCreateVAResponse = BluepayCreateVAResponse{}
err := json.Unmarshal(jsonData, &resp)
if err != nil {
logs.Error("[CreateVirtualAccountResponse] json.Unmarshal err:%v, json:%s", jsonData, err)
return err
}
//if resJson.Status != 200 {
if resp.Status != 201 {
errStr := fmt.Sprintf("[CreateVirtualAccountResponse] response status is wrong retJson:%s", string(jsonData))
err := fmt.Errorf(errStr)
return err
}
userAccountId := datas["account_id"].(int64)
_, err = models.GetEAccount(userAccountId, types.Bluepay)
if err != nil {
//不存在则创建
eAccount := models.User_E_Account{}
eAccount.Id, _ = device.GenerateBizId(types.UserEAccountBiz)
eAccount.UserAccountId = userAccountId
eAccount.EAccountNumber = resp.PaymentCode
eAccount.VaCompanyCode = types.Bluepay
eAccount.Status = "pending"
eAccount.Ctime = tools.GetUnixMillis()
eAccount.Utime = tools.GetUnixMillis()
_, err = eAccount.AddEAccount(&eAccount)
}
return err
}
func (c *BluepayApi) DisburseResponse(jsonData []byte, datas map[string]interface{}) (err error) {
var resp BluepayCreateDisburseResponse = BluepayCreateDisburseResponse{}
err = json.Unmarshal(jsonData, &resp)
if err != nil {
logs.Error("[DisburseResponse] json.Unmarshal err:%s, json:%s", err, string(jsonData))
return err
}
orderId := datas["order_id"].(int64)
bankCode := datas["bank_code"].(string)
accountHolderName := datas["account_name"].(string)
transactionId, _ := tools.Str2Int64(resp.TransactionId)
if transactionId != orderId {
//response数据如果和请求的不一致,直接报警
errStr := fmt.Sprintf("[DisburseResponse] response error orderId:%d, restJson:%s", orderId, string(jsonData))
logs.Error(errStr)
err = fmt.Errorf(errStr)
return err
}
order, err := models.GetOrder(orderId)
if err != nil {
return err
}
o := models.Mobi_E_Trans{}
orderIdStr := tools.Int642Str(orderId)
o.UserAcccountId = order.UserAccountId
o.VaCompanyCode = types.Bluepay
o.Amount = order.Loan
//向上取整,百位取整
o.PayType = types.PayTypeMoneyOut
o.BankCode = bankCode
o.AccountHolderName = accountHolderName
o.DisbursementDescription = orderIdStr
o.DisbursementId = orderIdStr
o.Status = resp.TransferStatus
o.Utime = tools.GetUnixMillis()
o.Ctime = tools.GetUnixMillis()
_, err = o.AddMobiEtrans(&o)
return err
}
func OpenSSLEncrypt(x string) string {
keyStr := beego.AppConfig.String("bluepay_secret_key")
ivStr := beego.AppConfig.String("bluepay_secret_iv")
logs.Debug("ivStr is: ", ivStr)
key := []byte(keyStr)
iv := []byte(ivStr)
var plaintextblock []byte
// Turn struct into byte slice
plaintext := x
// Make sure the block size is a multiple of 16
length := len(plaintext)
extendBlock := 16 - (length % 16)
plaintextblock = make([]byte, length+extendBlock)
copy(plaintextblock[length:], bytes.Repeat([]byte{uint8(extendBlock)}, extendBlock))
copy(plaintextblock, plaintext)
cb, err := aes.NewCipher(key)
if err != nil {
log.Println("error NewCipher(): ", err)
}
ciphertext := make([]byte, len(plaintextblock))
mode := cipher.NewCBCEncrypter(cb, iv)
mode.CryptBlocks(ciphertext, plaintextblock)
text := hex.EncodeToString(ciphertext)
//二进制转换十六进制
str := tools.UrlEncode(base64.StdEncoding.EncodeToString([]byte(text)))
//urlencode
return str
}
func NameValidator(accountId int64) (bankNo string, err error) {
productId := beego.AppConfig.String("bluepay_product_id")
checkUrl := beego.AppConfig.String("bluepay_name_validator")
accountBase, err := models.OneAccountBaseByPkId(accountId)
if err != nil {
logs.Error("can not get account_base by accountId:", accountId)
return
}
profile, err := dao.CustomerProfile(accountBase.Id)
if err != nil {
logs.Error("can not get account_profile by account_id:", accountBase.Id)
return
}
name := accountBase.Realname
bankNumber := profile.BankNo
bankCode, err := BluepayBankName2Code(profile.BankName)
if err == nil {
//目前bluepay只支持放款银行列表的二要素检查
//所以只有支持的银行,再调用此列表,不然没有意义
uuidStr := uuid.Must(uuid.NewV4()).String()
paramStr := fmt.Sprintf("phoneNum=%s&customerName=%s&accountNo=%s&bankName=%s&transactionId=%s", tools.UrlEncode(accountBase.Mobile), tools.UrlEncode(name), tools.UrlEncode(bankNumber), tools.UrlEncode(bankCode), tools.UrlEncode(uuidStr))
logs.Debug(paramStr)
hash := OpenSSLEncrypt(paramStr)
keyStr := beego.AppConfig.String("bluepay_secret_key")
logs.Debug("[NameValidator] hash:%s, keyStr:%s", hash, keyStr)
md5val := tools.Md5(fmt.Sprintf("productId=%s&data=%s%s", productId, hash, keyStr))
checkUrl = fmt.Sprintf("%s?productId=%s&data=%s&encrypt=%s", checkUrl, productId, hash, md5val)
logs.Debug(checkUrl)
reqHeaders := map[string]string{}
httpBody, httpCode, err1 := tools.SimpleHttpClient("GET", checkUrl, reqHeaders, "", tools.DefaultHttpTimeout())
//此处报错。。。err is shadowed during return
//只能新申请一个err1变量了
if err1 != nil {
logs.Error(err1)
err = err1
return
}
var nameValidatorResp struct {
Message string `json:"message"`
Status int `json:"status"`
}
err1 = json.Unmarshal(httpBody, &nameValidatorResp)
if err1 != nil {
err1 = fmt.Errorf("bluepay name validator response json unmarshal failed, err is %s", err.Error())
logs.Error(err1)
err = err1
return
}
logs.Debug(string(httpBody))
if httpCode != 200 {
err1 = fmt.Errorf("bluepay name validator httpCode is wrong [%d]", httpCode)
logs.Error(err1)
err = err1
return
}
if nameValidatorResp.Status != 200 {
//如果没匹配上,就返回银行账号,让客户去展示给用户,让用户可以修改
bankNo = bankNumber
}
}
return
}
func NpwpVerify(accountId int64, npwp string) (resp NpwpResp, err error) {
productId := "1493"
//checkUrl := "http://idtool.bluepay.asia//charge/express/npwpQuery"
router := "http://120.76.101.146:21811/charge/express/npwpQuery"
keyStr := beego.AppConfig.String("bluepay_secret_key")
encrypt := tools.Md5(fmt.Sprintf("productId=%s&npwp=%s%s", productId, npwp, keyStr))
reqParm := fmt.Sprintf("productId=%s&npwp=%s&encrypt=%s", productId, npwp, encrypt)
checkUrl := router + "/" + reqParm
logs.Debug(checkUrl)
reqHeaders := map[string]string{}
httpBody, httpCode, err := tools.SimpleHttpClient("GET", router, reqHeaders, "", tools.DefaultHttpTimeout())
if err != nil {
logs.Error(err)
return
}
logs.Debug("httpBody:%v , httpCode:%d ", string(httpBody), httpCode)
err = json.Unmarshal(httpBody, &resp)
if err != nil {
err = fmt.Errorf("[NpwpVerify] bluepay response json unmarshal failed, err is %s httpBody:%s", err.Error(), string(httpBody))
logs.Error(err)
return
}
responstType, fee := thirdparty.CalcFeeByApi(checkUrl, reqParm, httpBody)
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, checkUrl, accountId, reqParm, httpBody, responstType, fee, 200)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: accountId,
OrderId: 0,
ApiMd5: tools.Md5(router),
Fee: int64(fee),
Result: responstType,
})
if httpCode != 200 {
err = fmt.Errorf("[NpwpVerify] bluepay response httpCode is wrong [%d]", httpCode)
logs.Error(err)
return
}
logs.Warn("resp:%#v", resp)
return
}
| {
//curl 'http://120.76.101.146:21921/indonesia/express/gather/mo?price=30000&productId=1483&payType=atm&transactionId=14615984398y&ui=none&promotionId=1000&bankType=permata'
productId, _ := beego.AppConfig.Int64("bluepay_product_id")
virtualAccountsUrl := beego.AppConfig.String("bluepay_create_va_url")
bankName := datas["bank_name"].(string)
bankCode, err := BankName2BluepaySupportCode(bankName)
if err != nil {
return []byte{}, err
}
bankType := strings.ToLower(bankCode)
mobile := datas["mobile"].(string)
headerStr := tools.SubString(mobile, 0, 2)
if headerStr != "62" {
mobile = fmt.Sprintf("%s%s", "62", mobile)
}
price := datas["amount"].(int64)
orderId := datas["order_id"].(int64)
externalId := datas["account_id"].(int64)
virtualAccountsUrl = fmt.Sprintf("%s?msisdn=%s&price=%d&productId=%d&payType=atm&transactionId=%d&ui=none&promotionId=1000&bankType=%s", virtualAccountsUrl, mobile, price, productId, orderId, bankType)
client := &http.Client{}
req, err := http.NewRequest("GET", virtualAccountsUrl, nil)
if err != nil {
logs.Error("[CreateVirtualAccount] http.NewRequest url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
//req.SetBasicAuth(secretKey, "")
resp, err := client.Do(req)
monitor.IncrThirdpartyCount(models.ThirdpartyBluepay, resp.StatusCode)
if err != nil {
logs.Error("[CreateVirtualAccount] client.Do url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
logs.Error("[CreateVirtualAccount] ioutil.ReadAll url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
responstType, fee := thirdparty.CalcFeeByApi(virtualAccountsUrl, "", string(body))
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, virtualAccountsUrl, externalId, "", string(body), responstType, fee, resp.StatusCode)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: externalId,
OrderId: orderId,
ApiMd5: tools.Md5(virtualAccountsUrl),
Fee: int64(fee),
Result: responstType,
})
return body, err
} | identifier_body |
bluepay.go | package bluepay
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"github.com/astaxie/beego"
"github.com/astaxie/beego/logs"
uuid "github.com/satori/go.uuid"
"micro-loan/common/dao"
"micro-loan/common/lib/device"
"micro-loan/common/lib/payment"
"micro-loan/common/models"
"micro-loan/common/pkg/event"
"micro-loan/common/pkg/event/evtypes"
"micro-loan/common/pkg/monitor"
"micro-loan/common/thirdparty"
"micro-loan/common/tools"
"micro-loan/common/types"
)
type BluepayCreateVAResponse struct {
Data BluepayCreateVADataDetailResponse `json:"data"`
Message string `json:"message"`
Status int `json:"status"`
VaFee int64 `json:"vaFee"`
IsStatic int `json:"isStatic"`
OtcFee int64 `json:"otcFee"`
PaymentCode string `json:"payment_code"`
}
type BluepayCreateVADataDetailResponse struct {
Msisdn string `json:"msisdn"`
Paymentcode string `json:"paymentCode"`
TransactionId string `json:transactionId`
}
type BluepayCreateDisburseResponse struct {
TransactionId string `json:"transactionId"`
TransferStatus string `json:"transferStatus"`
Code string `json:"code"`
}
type BluepayApi struct {
payment.PaymentApi
}
type NpwpResp struct {
Status int `json:"status"`
Message string `json:"message"`
Npwp string `json:"npwp"`
CustomerName string `json:"customerName"`
}
var bluePayBankNameCodeMap = map[string]string{
"Bank Rakyat Indonesia (BRI)": "BRI",
"Bank Mandiri": "MANDIRI",
"Bank Negara Indonesia (BNI)": "BNI",
"Bank Danamon": "DANAMON",
"Bank Permata": "PERMATA",
"Bank Central Asia (BCA)": "BCA",
"Bank Maybank": "BII",
"Bank Panin": "PANIN",
"Bank CIMB Niaga": "CIMB",
"Bank UOB Indonesia": "UOB",
"Bank Artha Graha International": "ARTA GRAHA",
"Bank BJB": "BANK BJB",
"Bank Jatim": "BANK JATIM",
"BPD Kalimantan Barat": "BPD NUSA TENGGARA BARAT",
"Bank Nusantara Parahyangan": "BANK NUSANTARA PARAHYANGAN",
"Bank Muamalat Indonesia": "BANK MUAMALAT INDONESIA",
"Sinarmas": "SINARMAS",
"Bank Tabungan Negara (BTN)": "BANK TABUNGAN NEGARA",
"Bank Mega": "MEGA",
"Bank Bukopin": "BUKOPIN",
"Bank Hana": "BANK HANA",
"Centratama Nasional Bank": "BANK CENTRATAMA NASIONAL",
"Bank Tabungan Pensiunan Nasional": "BANK TABUNGAN PENSIUNAN NASIONAL/BTPN",
}
func BluepayBankNameCodeMap() map[string]string {
return bluePayBankNameCodeMap
}
func BluepayBankName2Code(name string) (code string, err error) {
bankNameCodeMap := BluepayBankNameCodeMap()
if v, ok := bankNameCodeMap[name]; ok {
code = v
return
}
err = fmt.Errorf("bank code undefined")
return
}
func BankName2BluepaySupportCode(name string) (code string, err error) {
conf := map[string]bool{
"PERMATA": true,
"BNI": true,
}
code, err = BluepayBankName2Code(name)
if err != nil {
return
}
if !conf[code] {
code = "BNI"
}
return
}
func (c *BluepayApi) CreateVirtualAccount(datas map[string]interface{}) (res []byte, err error) {
//curl 'http://120.76.101.146:21921/indonesia/express/gather/mo?price=30000&productId=1483&payType=atm&transactionId=14615984398y&ui=none&promotionId=1000&bankType=permata'
productId, _ := beego.AppConfig.Int64("bluepay_product_id")
virtualAccountsUrl := beego.AppConfig.String("bluepay_create_va_url")
bankName := datas["bank_name"].(string)
bankCode, err := BankName2BluepaySupportCode(bankName)
if err != nil {
return []byte{}, err
}
bankType := strings.ToLower(bankCode)
mobile := datas["mobile"].(string)
headerStr := tools.SubString(mobile, 0, 2)
if headerStr != "62" {
mobile = fmt.Sprintf("%s%s", "62", mobile)
}
price := datas["amount"].(int64)
orderId := datas["order_id"].(int64)
externalId := datas["account_id"].(int64)
virtualAccountsUrl = fmt.Sprintf("%s?msisdn=%s&price=%d&productId=%d&payType=atm&transactionId=%d&ui=none&promotionId=1000&bankType=%s", virtualAccountsUrl, mobile, price, productId, orderId, bankType)
client := &http.Client{}
req, err := http.NewRequest("GET", virtualAccountsUrl, nil)
if err != nil {
logs.Error("[CreateVirtualAccount] http.NewRequest url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
//req.SetBasicAuth(secretKey, "")
resp, err := client.Do(req)
monitor.IncrThirdpartyCount(models.ThirdpartyBluepay, resp.StatusCode)
if err != nil {
logs.Error("[CreateVirtualAccount] client.Do url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
logs.Error("[CreateVirtualAccount] ioutil.ReadAll url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
responstType, fee := thirdparty.CalcFeeByApi(virtualAccountsUrl, "", string(body))
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, virtualAccountsUrl, externalId, "", string(body), responstType, fee, resp.StatusCode)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: externalId,
OrderId: orderId,
ApiMd5: tools.Md5(virtualAccountsUrl),
Fee: int64(fee),
Result: responstType,
})
return body, err
}
func (c *BluepayApi) Disburse(datas map[string]interface{}) (res []byte, err error) {
orderId := datas["order_id"].(int64)
bankName := datas["bank_name"].(string)
bankCode, err := BankName2BluepaySupportCode(bankName)
if err != nil {
return []byte{}, err
}
accountHolderName := datas["account_name"].(string)
accountNumber := datas["account_num"].(string)
amount := datas["amount"].(int64)
paramStr := fmt.Sprintf("transactionId=%d&promotionId=1000&payeeCountry=%s&payeeBankName=%s&payeeName=%s&payeeAccount=%s&payeeMsisdn=%d&payeeType=%s&amount=%d¤cy=%s",
orderId, types.PayeeCountryIDId, bankCode, accountHolderName, accountNumber, types.PayeeMsisdnID, types.PayeeTypePersonal, amount, types.PayeeTypeIDCurrency)
hash := OpenSSLEncrypt(paramStr)
keyStr := beego.AppConfig.String("bluepay_secret_key")
productId := beego.AppConfig.String("bluepay_product_id")
logs.Debug("[Disburse] hash:%s, keyStr:%s", hash, keyStr)
disburseUrl := beego.AppConfig.String("bluepay_disburse_url")
md5val := tools.Md5(fmt.Sprintf("productId=%s&data=%s%s", productId, hash, keyStr))
disburseUrl = fmt.Sprintf("%s?productId=%s&data=%s&encrypt=%s", disburseUrl, productId, hash, md5val)
client := &http.Client{}
req, err := http.NewRequest("GET", disburseUrl, nil)
if err != nil {
logs.Error("[Disburse] http.NewRequest url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := client.Do(req)
monitor.IncrThirdpartyCount(models.ThirdpartyBluepay, resp.StatusCode)
if err != nil {
logs.Error("[Disburse] client.Do url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
logs.Error("[Disburse] ioutil.ReadAll url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
responstType, fee := thirdparty.CalcFeeByApi(disburseUrl, "", string(body))
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, disburseUrl, orderId, "", string(body), responstType, fee, resp.StatusCode)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: 0,
OrderId: orderId,
ApiMd5: tools.Md5(disburseUrl),
Fee: int64(fee),
Result: responstType,
})
return body, err
}
func (c *BluepayApi) CheckVirtualAccount(datas map[string]interface{}) (res []byte, err error) {
return []byte{}, err
}
func (c *BluepayApi) CreateVirtualAccountResponse(jsonData []byte, datas map[string]interface{}) error {
var resp BluepayCreateVAResponse = BluepayCreateVAResponse{}
err := json.Unmarshal(jsonData, &resp)
if err != nil {
logs.Error("[CreateVirtualAccountResponse] json.Unmarshal err:%v, json:%s", jsonData, err)
return err
}
//if resJson.Status != 200 {
if resp.Status != 201 {
errStr := fmt.Sprintf("[CreateVirtualAccountResponse] response status is wrong retJson:%s", string(jsonData))
err := fmt.Errorf(errStr)
return err
}
userAccountId := datas["account_id"].(int64)
_, err = models.GetEAccount(userAccountId, types.Bluepay)
if err != nil {
//不存在则创建
eAccount := models.User_E_Account{}
eAccount.Id, _ = device.GenerateBizId(types.UserEAccountBiz)
eAccount.UserAccountId = userAccountId
eAccount.EAccountNumber = resp.PaymentCode
eAccount.VaCompanyCode = types.Bluepay
eAccount.Status = "pending"
eAccount.Ctime = tools.GetUnixMillis()
eAccount.Utime = tools.GetUnixMillis()
_, err = eAccount.AddEAccount(&eAccount)
}
return err
}
func (c *BluepayApi) DisburseResponse(jsonData []byte, datas map[string]interface{}) (err error) {
var resp BluepayCreateDisburseResponse = BluepayCreateDisburseResponse{}
err = json.Unmarshal(jsonData, &resp)
if err != nil {
logs.Error("[DisburseResponse] json.Unmarshal err:%s, json:%s", err, string(jsonData))
return err
}
orderId := datas["order_id"].(int64)
bankCode := datas["bank_code"].(string)
accountHolderName := datas["account_name"].(string)
transactionId, _ := tools.Str2Int64(resp.TransactionId)
if transactionId != orderId {
//response数据如果和请求的不一致,直接报警
errStr := fmt.Sprintf("[DisburseResponse] response error orderId:%d, restJson:%s", orderId, string(jsonData))
logs.Error(errStr)
err = fmt.Errorf(errStr)
return err
}
order, err := models.GetOrder(orderId)
if err != nil {
return err
}
o := models.Mobi_E_Trans{}
orderIdStr := tools.Int642Str(orderId)
o.UserAcccountId = order.UserAccountId
o.VaCompanyCode = types.Bluepay
o.Amount = order.Loan
//向上取整,百位取整
o.PayType = types.PayTypeMoneyOut
o.BankCode = bankCode
o.AccountHolderName = accountHolderName
o.DisbursementDescription = orderIdStr
o.DisbursementId = orderIdStr
o.Status = resp.TransferStatus
o.Utime = tools.GetUnixMillis()
o.Ctime = tools.GetUnixMillis()
_, err = o.AddMobiEtrans(&o)
return err
}
func OpenSSLEncrypt(x string) string {
keyStr := beego.AppConfig.String("bluepay_secret_key")
ivStr := beego.AppConfig.String("bluepay_secret_iv")
logs.Debug("ivStr is: ", ivStr)
key := []byte(keyStr)
iv := []byte(ivStr)
var plaintextblock []byte
// Turn struct into byte slice
plaintext := x
// Make sure the block size is a multiple of 16
length := len(plaintext)
extendBlock := 16 - (length % 16)
plaintextblock = make([]byte, length+extendBlock)
copy(plaintextblock[length:], bytes.Repeat([]byte{uint8(extendBlock)}, extendBlock))
copy(plaintextblock, plaintext)
cb, err := aes.NewCipher(key)
if err != nil {
log.Println("error NewCipher(): ", err)
}
ciphertext := make([]byte, len(plaintextblock))
mode := cipher.NewCBCEncrypter(cb, iv)
mode.CryptBlocks(ciphertext, plaintextblock)
text := hex.EncodeToString(ciphertext)
//二进制转换十六进制
str := tools.UrlEncode(base64.StdEncoding.EncodeToString([]byte(text)))
//urlencode
return str
}
func NameValidator(accountId int64) (bankNo string, err error) {
productId := beego.AppConfig.String("bluepay_product_id")
checkUrl := beego.AppConfig.String("bluepay_name_validator")
accountBase, err := models.OneAccountBaseByPkId(accountId)
if err != nil {
logs.Error("can not get account_base by accountId:", accountId)
return
}
profile, err := dao.CustomerProfile(accountBase.Id)
if err != nil {
logs.Error("can not get account_profile by account_id:", accountBase.Id)
return
}
name := accountBase.Realname
bankNumber := profile.BankNo
bankCode, err := BluepayBankName2Code(profile.BankName)
if err == nil {
//目前bluepay只支持放款银行列表的二要素检查
//所以只有支持的银行,再调用此列表,不然没有意义
uuidStr := uuid.Must(uuid.NewV4()).String()
paramStr := fmt.Sprintf("phoneNum=%s&customerName=%s&accountNo=%s&bankName=%s&transactionId=%s", tools.UrlEncode(accountBase.Mobile), tools.UrlEncode(name), tools.UrlEncode(bankNumber), tools.UrlEncode(bankCode), tools.UrlEncode(uuidStr))
logs.Debug(paramStr)
| keyStr := beego.AppConfig.String("bluepay_secret_key")
logs.Debug("[NameValidator] hash:%s, keyStr:%s", hash, keyStr)
md5val := tools.Md5(fmt.Sprintf("productId=%s&data=%s%s", productId, hash, keyStr))
checkUrl = fmt.Sprintf("%s?productId=%s&data=%s&encrypt=%s", checkUrl, productId, hash, md5val)
logs.Debug(checkUrl)
reqHeaders := map[string]string{}
httpBody, httpCode, err1 := tools.SimpleHttpClient("GET", checkUrl, reqHeaders, "", tools.DefaultHttpTimeout())
//此处报错。。。err is shadowed during return
//只能新申请一个err1变量了
if err1 != nil {
logs.Error(err1)
err = err1
return
}
var nameValidatorResp struct {
Message string `json:"message"`
Status int `json:"status"`
}
err1 = json.Unmarshal(httpBody, &nameValidatorResp)
if err1 != nil {
err1 = fmt.Errorf("bluepay name validator response json unmarshal failed, err is %s", err.Error())
logs.Error(err1)
err = err1
return
}
logs.Debug(string(httpBody))
if httpCode != 200 {
err1 = fmt.Errorf("bluepay name validator httpCode is wrong [%d]", httpCode)
logs.Error(err1)
err = err1
return
}
if nameValidatorResp.Status != 200 {
//如果没匹配上,就返回银行账号,让客户去展示给用户,让用户可以修改
bankNo = bankNumber
}
}
return
}
func NpwpVerify(accountId int64, npwp string) (resp NpwpResp, err error) {
productId := "1493"
//checkUrl := "http://idtool.bluepay.asia//charge/express/npwpQuery"
router := "http://120.76.101.146:21811/charge/express/npwpQuery"
keyStr := beego.AppConfig.String("bluepay_secret_key")
encrypt := tools.Md5(fmt.Sprintf("productId=%s&npwp=%s%s", productId, npwp, keyStr))
reqParm := fmt.Sprintf("productId=%s&npwp=%s&encrypt=%s", productId, npwp, encrypt)
checkUrl := router + "/" + reqParm
logs.Debug(checkUrl)
reqHeaders := map[string]string{}
httpBody, httpCode, err := tools.SimpleHttpClient("GET", router, reqHeaders, "", tools.DefaultHttpTimeout())
if err != nil {
logs.Error(err)
return
}
logs.Debug("httpBody:%v , httpCode:%d ", string(httpBody), httpCode)
err = json.Unmarshal(httpBody, &resp)
if err != nil {
err = fmt.Errorf("[NpwpVerify] bluepay response json unmarshal failed, err is %s httpBody:%s", err.Error(), string(httpBody))
logs.Error(err)
return
}
responstType, fee := thirdparty.CalcFeeByApi(checkUrl, reqParm, httpBody)
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, checkUrl, accountId, reqParm, httpBody, responstType, fee, 200)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: accountId,
OrderId: 0,
ApiMd5: tools.Md5(router),
Fee: int64(fee),
Result: responstType,
})
if httpCode != 200 {
err = fmt.Errorf("[NpwpVerify] bluepay response httpCode is wrong [%d]", httpCode)
logs.Error(err)
return
}
logs.Warn("resp:%#v", resp)
return
} | hash := OpenSSLEncrypt(paramStr) | random_line_split |
bluepay.go | package bluepay
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"github.com/astaxie/beego"
"github.com/astaxie/beego/logs"
uuid "github.com/satori/go.uuid"
"micro-loan/common/dao"
"micro-loan/common/lib/device"
"micro-loan/common/lib/payment"
"micro-loan/common/models"
"micro-loan/common/pkg/event"
"micro-loan/common/pkg/event/evtypes"
"micro-loan/common/pkg/monitor"
"micro-loan/common/thirdparty"
"micro-loan/common/tools"
"micro-loan/common/types"
)
type BluepayCreateVAResponse struct {
Data BluepayCreateVADataDetailResponse `json:"data"`
Message string `json:"message"`
Status int `json:"status"`
VaFee int64 `json:"vaFee"`
IsStatic int `json:"isStatic"`
OtcFee int64 `json:"otcFee"`
PaymentCode string `json:"payment_code"`
}
type BluepayCreateVADataDetailResponse struct {
Msisdn string `json:"msisdn"`
Paymentcode string `json:"paymentCode"`
TransactionId string `json:transactionId`
}
type BluepayCreateDisburseResponse struct {
TransactionId string `json:"transactionId"`
TransferStatus string `json:"transferStatus"`
Code string `json:"code"`
}
type BluepayApi struct {
payment.PaymentApi
}
type NpwpResp struct {
Status int `json:"status"`
Message string `json:"message"`
Npwp string `json:"npwp"`
CustomerName string `json:"customerName"`
}
var bluePayBankNameCodeMap = map[string]string{
"Bank Rakyat Indonesia (BRI)": "BRI",
"Bank Mandiri": "MANDIRI",
"Bank Negara Indonesia (BNI)": "BNI",
"Bank Danamon": "DANAMON",
"Bank Permata": "PERMATA",
"Bank Central Asia (BCA)": "BCA",
"Bank Maybank": "BII",
"Bank Panin": "PANIN",
"Bank CIMB Niaga": "CIMB",
"Bank UOB Indonesia": "UOB",
"Bank Artha Graha International": "ARTA GRAHA",
"Bank BJB": "BANK BJB",
"Bank Jatim": "BANK JATIM",
"BPD Kalimantan Barat": "BPD NUSA TENGGARA BARAT",
"Bank Nusantara Parahyangan": "BANK NUSANTARA PARAHYANGAN",
"Bank Muamalat Indonesia": "BANK MUAMALAT INDONESIA",
"Sinarmas": "SINARMAS",
"Bank Tabungan Negara (BTN)": "BANK TABUNGAN NEGARA",
"Bank Mega": "MEGA",
"Bank Bukopin": "BUKOPIN",
"Bank Hana": "BANK HANA",
"Centratama Nasional Bank": "BANK CENTRATAMA NASIONAL",
"Bank Tabungan Pensiunan Nasional": "BANK TABUNGAN PENSIUNAN NASIONAL/BTPN",
}
func | () map[string]string {
return bluePayBankNameCodeMap
}
func BluepayBankName2Code(name string) (code string, err error) {
bankNameCodeMap := BluepayBankNameCodeMap()
if v, ok := bankNameCodeMap[name]; ok {
code = v
return
}
err = fmt.Errorf("bank code undefined")
return
}
func BankName2BluepaySupportCode(name string) (code string, err error) {
conf := map[string]bool{
"PERMATA": true,
"BNI": true,
}
code, err = BluepayBankName2Code(name)
if err != nil {
return
}
if !conf[code] {
code = "BNI"
}
return
}
func (c *BluepayApi) CreateVirtualAccount(datas map[string]interface{}) (res []byte, err error) {
//curl 'http://120.76.101.146:21921/indonesia/express/gather/mo?price=30000&productId=1483&payType=atm&transactionId=14615984398y&ui=none&promotionId=1000&bankType=permata'
productId, _ := beego.AppConfig.Int64("bluepay_product_id")
virtualAccountsUrl := beego.AppConfig.String("bluepay_create_va_url")
bankName := datas["bank_name"].(string)
bankCode, err := BankName2BluepaySupportCode(bankName)
if err != nil {
return []byte{}, err
}
bankType := strings.ToLower(bankCode)
mobile := datas["mobile"].(string)
headerStr := tools.SubString(mobile, 0, 2)
if headerStr != "62" {
mobile = fmt.Sprintf("%s%s", "62", mobile)
}
price := datas["amount"].(int64)
orderId := datas["order_id"].(int64)
externalId := datas["account_id"].(int64)
virtualAccountsUrl = fmt.Sprintf("%s?msisdn=%s&price=%d&productId=%d&payType=atm&transactionId=%d&ui=none&promotionId=1000&bankType=%s", virtualAccountsUrl, mobile, price, productId, orderId, bankType)
client := &http.Client{}
req, err := http.NewRequest("GET", virtualAccountsUrl, nil)
if err != nil {
logs.Error("[CreateVirtualAccount] http.NewRequest url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
//req.SetBasicAuth(secretKey, "")
resp, err := client.Do(req)
monitor.IncrThirdpartyCount(models.ThirdpartyBluepay, resp.StatusCode)
if err != nil {
logs.Error("[CreateVirtualAccount] client.Do url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
logs.Error("[CreateVirtualAccount] ioutil.ReadAll url:%s, err:%s", virtualAccountsUrl, err.Error())
return []byte{}, err
}
responstType, fee := thirdparty.CalcFeeByApi(virtualAccountsUrl, "", string(body))
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, virtualAccountsUrl, externalId, "", string(body), responstType, fee, resp.StatusCode)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: externalId,
OrderId: orderId,
ApiMd5: tools.Md5(virtualAccountsUrl),
Fee: int64(fee),
Result: responstType,
})
return body, err
}
func (c *BluepayApi) Disburse(datas map[string]interface{}) (res []byte, err error) {
orderId := datas["order_id"].(int64)
bankName := datas["bank_name"].(string)
bankCode, err := BankName2BluepaySupportCode(bankName)
if err != nil {
return []byte{}, err
}
accountHolderName := datas["account_name"].(string)
accountNumber := datas["account_num"].(string)
amount := datas["amount"].(int64)
paramStr := fmt.Sprintf("transactionId=%d&promotionId=1000&payeeCountry=%s&payeeBankName=%s&payeeName=%s&payeeAccount=%s&payeeMsisdn=%d&payeeType=%s&amount=%d¤cy=%s",
orderId, types.PayeeCountryIDId, bankCode, accountHolderName, accountNumber, types.PayeeMsisdnID, types.PayeeTypePersonal, amount, types.PayeeTypeIDCurrency)
hash := OpenSSLEncrypt(paramStr)
keyStr := beego.AppConfig.String("bluepay_secret_key")
productId := beego.AppConfig.String("bluepay_product_id")
logs.Debug("[Disburse] hash:%s, keyStr:%s", hash, keyStr)
disburseUrl := beego.AppConfig.String("bluepay_disburse_url")
md5val := tools.Md5(fmt.Sprintf("productId=%s&data=%s%s", productId, hash, keyStr))
disburseUrl = fmt.Sprintf("%s?productId=%s&data=%s&encrypt=%s", disburseUrl, productId, hash, md5val)
client := &http.Client{}
req, err := http.NewRequest("GET", disburseUrl, nil)
if err != nil {
logs.Error("[Disburse] http.NewRequest url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := client.Do(req)
monitor.IncrThirdpartyCount(models.ThirdpartyBluepay, resp.StatusCode)
if err != nil {
logs.Error("[Disburse] client.Do url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
logs.Error("[Disburse] ioutil.ReadAll url:%s, err:%s", disburseUrl, err.Error())
return []byte{}, err
}
responstType, fee := thirdparty.CalcFeeByApi(disburseUrl, "", string(body))
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, disburseUrl, orderId, "", string(body), responstType, fee, resp.StatusCode)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: 0,
OrderId: orderId,
ApiMd5: tools.Md5(disburseUrl),
Fee: int64(fee),
Result: responstType,
})
return body, err
}
func (c *BluepayApi) CheckVirtualAccount(datas map[string]interface{}) (res []byte, err error) {
return []byte{}, err
}
func (c *BluepayApi) CreateVirtualAccountResponse(jsonData []byte, datas map[string]interface{}) error {
var resp BluepayCreateVAResponse = BluepayCreateVAResponse{}
err := json.Unmarshal(jsonData, &resp)
if err != nil {
logs.Error("[CreateVirtualAccountResponse] json.Unmarshal err:%v, json:%s", jsonData, err)
return err
}
//if resJson.Status != 200 {
if resp.Status != 201 {
errStr := fmt.Sprintf("[CreateVirtualAccountResponse] response status is wrong retJson:%s", string(jsonData))
err := fmt.Errorf(errStr)
return err
}
userAccountId := datas["account_id"].(int64)
_, err = models.GetEAccount(userAccountId, types.Bluepay)
if err != nil {
//不存在则创建
eAccount := models.User_E_Account{}
eAccount.Id, _ = device.GenerateBizId(types.UserEAccountBiz)
eAccount.UserAccountId = userAccountId
eAccount.EAccountNumber = resp.PaymentCode
eAccount.VaCompanyCode = types.Bluepay
eAccount.Status = "pending"
eAccount.Ctime = tools.GetUnixMillis()
eAccount.Utime = tools.GetUnixMillis()
_, err = eAccount.AddEAccount(&eAccount)
}
return err
}
func (c *BluepayApi) DisburseResponse(jsonData []byte, datas map[string]interface{}) (err error) {
var resp BluepayCreateDisburseResponse = BluepayCreateDisburseResponse{}
err = json.Unmarshal(jsonData, &resp)
if err != nil {
logs.Error("[DisburseResponse] json.Unmarshal err:%s, json:%s", err, string(jsonData))
return err
}
orderId := datas["order_id"].(int64)
bankCode := datas["bank_code"].(string)
accountHolderName := datas["account_name"].(string)
transactionId, _ := tools.Str2Int64(resp.TransactionId)
if transactionId != orderId {
//response数据如果和请求的不一致,直接报警
errStr := fmt.Sprintf("[DisburseResponse] response error orderId:%d, restJson:%s", orderId, string(jsonData))
logs.Error(errStr)
err = fmt.Errorf(errStr)
return err
}
order, err := models.GetOrder(orderId)
if err != nil {
return err
}
o := models.Mobi_E_Trans{}
orderIdStr := tools.Int642Str(orderId)
o.UserAcccountId = order.UserAccountId
o.VaCompanyCode = types.Bluepay
o.Amount = order.Loan
//向上取整,百位取整
o.PayType = types.PayTypeMoneyOut
o.BankCode = bankCode
o.AccountHolderName = accountHolderName
o.DisbursementDescription = orderIdStr
o.DisbursementId = orderIdStr
o.Status = resp.TransferStatus
o.Utime = tools.GetUnixMillis()
o.Ctime = tools.GetUnixMillis()
_, err = o.AddMobiEtrans(&o)
return err
}
func OpenSSLEncrypt(x string) string {
keyStr := beego.AppConfig.String("bluepay_secret_key")
ivStr := beego.AppConfig.String("bluepay_secret_iv")
logs.Debug("ivStr is: ", ivStr)
key := []byte(keyStr)
iv := []byte(ivStr)
var plaintextblock []byte
// Turn struct into byte slice
plaintext := x
// Make sure the block size is a multiple of 16
length := len(plaintext)
extendBlock := 16 - (length % 16)
plaintextblock = make([]byte, length+extendBlock)
copy(plaintextblock[length:], bytes.Repeat([]byte{uint8(extendBlock)}, extendBlock))
copy(plaintextblock, plaintext)
cb, err := aes.NewCipher(key)
if err != nil {
log.Println("error NewCipher(): ", err)
}
ciphertext := make([]byte, len(plaintextblock))
mode := cipher.NewCBCEncrypter(cb, iv)
mode.CryptBlocks(ciphertext, plaintextblock)
text := hex.EncodeToString(ciphertext)
//二进制转换十六进制
str := tools.UrlEncode(base64.StdEncoding.EncodeToString([]byte(text)))
//urlencode
return str
}
func NameValidator(accountId int64) (bankNo string, err error) {
productId := beego.AppConfig.String("bluepay_product_id")
checkUrl := beego.AppConfig.String("bluepay_name_validator")
accountBase, err := models.OneAccountBaseByPkId(accountId)
if err != nil {
logs.Error("can not get account_base by accountId:", accountId)
return
}
profile, err := dao.CustomerProfile(accountBase.Id)
if err != nil {
logs.Error("can not get account_profile by account_id:", accountBase.Id)
return
}
name := accountBase.Realname
bankNumber := profile.BankNo
bankCode, err := BluepayBankName2Code(profile.BankName)
if err == nil {
//目前bluepay只支持放款银行列表的二要素检查
//所以只有支持的银行,再调用此列表,不然没有意义
uuidStr := uuid.Must(uuid.NewV4()).String()
paramStr := fmt.Sprintf("phoneNum=%s&customerName=%s&accountNo=%s&bankName=%s&transactionId=%s", tools.UrlEncode(accountBase.Mobile), tools.UrlEncode(name), tools.UrlEncode(bankNumber), tools.UrlEncode(bankCode), tools.UrlEncode(uuidStr))
logs.Debug(paramStr)
hash := OpenSSLEncrypt(paramStr)
keyStr := beego.AppConfig.String("bluepay_secret_key")
logs.Debug("[NameValidator] hash:%s, keyStr:%s", hash, keyStr)
md5val := tools.Md5(fmt.Sprintf("productId=%s&data=%s%s", productId, hash, keyStr))
checkUrl = fmt.Sprintf("%s?productId=%s&data=%s&encrypt=%s", checkUrl, productId, hash, md5val)
logs.Debug(checkUrl)
reqHeaders := map[string]string{}
httpBody, httpCode, err1 := tools.SimpleHttpClient("GET", checkUrl, reqHeaders, "", tools.DefaultHttpTimeout())
//此处报错。。。err is shadowed during return
//只能新申请一个err1变量了
if err1 != nil {
logs.Error(err1)
err = err1
return
}
var nameValidatorResp struct {
Message string `json:"message"`
Status int `json:"status"`
}
err1 = json.Unmarshal(httpBody, &nameValidatorResp)
if err1 != nil {
err1 = fmt.Errorf("bluepay name validator response json unmarshal failed, err is %s", err.Error())
logs.Error(err1)
err = err1
return
}
logs.Debug(string(httpBody))
if httpCode != 200 {
err1 = fmt.Errorf("bluepay name validator httpCode is wrong [%d]", httpCode)
logs.Error(err1)
err = err1
return
}
if nameValidatorResp.Status != 200 {
//如果没匹配上,就返回银行账号,让客户去展示给用户,让用户可以修改
bankNo = bankNumber
}
}
return
}
func NpwpVerify(accountId int64, npwp string) (resp NpwpResp, err error) {
productId := "1493"
//checkUrl := "http://idtool.bluepay.asia//charge/express/npwpQuery"
router := "http://120.76.101.146:21811/charge/express/npwpQuery"
keyStr := beego.AppConfig.String("bluepay_secret_key")
encrypt := tools.Md5(fmt.Sprintf("productId=%s&npwp=%s%s", productId, npwp, keyStr))
reqParm := fmt.Sprintf("productId=%s&npwp=%s&encrypt=%s", productId, npwp, encrypt)
checkUrl := router + "/" + reqParm
logs.Debug(checkUrl)
reqHeaders := map[string]string{}
httpBody, httpCode, err := tools.SimpleHttpClient("GET", router, reqHeaders, "", tools.DefaultHttpTimeout())
if err != nil {
logs.Error(err)
return
}
logs.Debug("httpBody:%v , httpCode:%d ", string(httpBody), httpCode)
err = json.Unmarshal(httpBody, &resp)
if err != nil {
err = fmt.Errorf("[NpwpVerify] bluepay response json unmarshal failed, err is %s httpBody:%s", err.Error(), string(httpBody))
logs.Error(err)
return
}
responstType, fee := thirdparty.CalcFeeByApi(checkUrl, reqParm, httpBody)
models.AddOneThirdpartyRecord(models.ThirdpartyBluepay, checkUrl, accountId, reqParm, httpBody, responstType, fee, 200)
event.Trigger(&evtypes.CustomerStatisticEv{
UserAccountId: accountId,
OrderId: 0,
ApiMd5: tools.Md5(router),
Fee: int64(fee),
Result: responstType,
})
if httpCode != 200 {
err = fmt.Errorf("[NpwpVerify] bluepay response httpCode is wrong [%d]", httpCode)
logs.Error(err)
return
}
logs.Warn("resp:%#v", resp)
return
}
| BluepayBankNameCodeMap | identifier_name |
Analytics.js | "use strict";
/*
* Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
var core_1 = require("@aws-amplify/core");
var AWSPinpointProvider_1 = require("./Providers/AWSPinpointProvider");
var trackers_1 = require("./trackers");
var logger = new core_1.ConsoleLogger('AnalyticsClass');
var AMPLIFY_SYMBOL = (typeof Symbol !== 'undefined' &&
typeof Symbol.for === 'function'
? Symbol.for('amplify_default')
: '@@amplify_default');
var dispatchAnalyticsEvent = function (event, data, message) {
core_1.Hub.dispatch('analytics', { event: event, data: data, message: message }, 'Analytics', AMPLIFY_SYMBOL);
};
var trackers = {
pageView: trackers_1.PageViewTracker,
event: trackers_1.EventTracker,
session: trackers_1.SessionTracker,
};
var _instance = null;
/**
* Provide mobile analytics client functions
*/
var AnalyticsClass = /** @class */ (function () {
/**
* Initialize Analtyics
* @param config - Configuration of the Analytics
*/
function AnalyticsClass() {
this._config = {};
this._pluggables = [];
this._disabled = false;
this._trackers = {};
_instance = this;
this.record = this.record.bind(this);
core_1.Hub.listen('auth', listener);
core_1.Hub.listen('storage', listener);
core_1.Hub.listen('analytics', listener);
}
AnalyticsClass.prototype.getModuleName = function () {
return 'Analytics';
};
/**
* configure Analytics
* @param {Object} config - Configuration of the Analytics
*/
AnalyticsClass.prototype.configure = function (config) {
var _this = this;
if (!config)
return this._config;
logger.debug('configure Analytics', config);
var amplifyConfig = core_1.Parser.parseMobilehubConfig(config);
this._config = Object.assign({}, this._config, amplifyConfig.Analytics, config);
if (this._config['disabled']) {
this._disabled = true;
}
// turn on the autoSessionRecord if not specified
if (this._config['autoSessionRecord'] === undefined) {
this._config['autoSessionRecord'] = true;
}
this._pluggables.forEach(function (pluggable) {
// for backward compatibility
var providerConfig = pluggable.getProviderName() === 'AWSPinpoint' &&
!_this._config['AWSPinpoint']
? _this._config
: _this._config[pluggable.getProviderName()];
pluggable.configure(__assign({ disabled: _this._config['disabled'], autoSessionRecord: _this._config['autoSessionRecord'] }, providerConfig));
});
if (this._pluggables.length === 0) {
this.addPluggable(new AWSPinpointProvider_1.AWSPinpointProvider());
}
dispatchAnalyticsEvent('configured', null, "The Analytics category has been configured successfully");
logger.debug('current configuration', this._config);
return this._config;
};
/**
* add plugin into Analytics category
* @param {Object} pluggable - an instance of the plugin
*/
AnalyticsClass.prototype.addPluggable = function (pluggable) {
if (pluggable && pluggable.getCategory() === 'Analytics') {
this._pluggables.push(pluggable);
// for backward compatibility
var providerConfig = pluggable.getProviderName() === 'AWSPinpoint' &&
!this._config['AWSPinpoint']
? this._config
: this._config[pluggable.getProviderName()];
var config = __assign({ disabled: this._config['disabled'] }, providerConfig);
pluggable.configure(config);
return config;
}
};
/**
* Get the plugin object
* @param providerName - the name of the plugin
*/
AnalyticsClass.prototype.getPluggable = function (providerName) {
for (var i = 0; i < this._pluggables.length; i += 1) {
var pluggable = this._pluggables[i];
if (pluggable.getProviderName() === providerName) {
return pluggable;
}
}
logger.debug('No plugin found with providerName', providerName);
return null;
};
/**
* Remove the plugin object
* @param providerName - the name of the plugin
*/
AnalyticsClass.prototype.removePluggable = function (providerName) {
var idx = 0;
while (idx < this._pluggables.length) {
if (this._pluggables[idx].getProviderName() === providerName) {
break;
}
idx += 1;
}
if (idx === this._pluggables.length) |
else {
this._pluggables.splice(idx, idx + 1);
return;
}
};
/**
* stop sending events
*/
AnalyticsClass.prototype.disable = function () {
this._disabled = true;
};
/**
* start sending events
*/
AnalyticsClass.prototype.enable = function () {
this._disabled = false;
};
/**
* Record Session start
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.startSession = function (provider) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = { event: { name: '_session.start' }, provider: provider };
return [2 /*return*/, this._sendEvent(params)];
});
});
};
/**
* Record Session stop
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.stopSession = function (provider) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = { event: { name: '_session.stop' }, provider: provider };
return [2 /*return*/, this._sendEvent(params)];
});
});
};
/**
* Record one analytic event and send it to Pinpoint
* @param {String} name - The name of the event
* @param {Object} [attributes] - Attributes of the event
* @param {Object} [metrics] - Event metrics
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.record = function (event, provider, metrics) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = null;
// this is just for compatibility, going to be deprecated
if (typeof event === 'string') {
params = {
event: {
name: event,
attributes: provider,
metrics: metrics,
},
provider: 'AWSPinpoint',
};
}
else {
params = { event: event, provider: provider };
}
return [2 /*return*/, this._sendEvent(params)];
});
});
};
AnalyticsClass.prototype.updateEndpoint = function (attrs, provider) {
return __awaiter(this, void 0, void 0, function () {
var event;
return __generator(this, function (_a) {
event = __assign(__assign({}, attrs), { name: '_update_endpoint' });
return [2 /*return*/, this.record(event, provider)];
});
});
};
AnalyticsClass.prototype._sendEvent = function (params) {
var _this = this;
if (this._disabled) {
logger.debug('Analytics has been disabled');
return Promise.resolve();
}
var provider = params.provider ? params.provider : 'AWSPinpoint';
return new Promise(function (resolve, reject) {
_this._pluggables.forEach(function (pluggable) {
if (pluggable.getProviderName() === provider) {
pluggable.record(params, { resolve: resolve, reject: reject });
}
});
});
};
AnalyticsClass.prototype.autoTrack = function (trackerType, opts) {
if (!trackers[trackerType]) {
logger.debug('invalid tracker type');
return;
}
// to sync up two different configuration ways of auto session tracking
if (trackerType === 'session') {
this._config['autoSessionRecord'] = opts['enable'];
}
var tracker = this._trackers[trackerType];
if (!tracker) {
this._trackers[trackerType] = new trackers[trackerType](this.record, opts);
}
else {
tracker.configure(opts);
}
};
return AnalyticsClass;
}());
exports.AnalyticsClass = AnalyticsClass;
var endpointUpdated = false;
var authConfigured = false;
var analyticsConfigured = false;
var listener = function (capsule) {
var channel = capsule.channel, payload = capsule.payload;
logger.debug('on hub capsule ' + channel, payload);
switch (channel) {
case 'auth':
authEvent(payload);
break;
case 'storage':
storageEvent(payload);
break;
case 'analytics':
analyticsEvent(payload);
break;
default:
break;
}
};
var storageEvent = function (payload) {
var _a = payload.data, attrs = _a.attrs, metrics = _a.metrics;
if (!attrs)
return;
if (analyticsConfigured) {
_instance
.record({
name: 'Storage',
attributes: attrs,
metrics: metrics,
})
.catch(function (e) {
logger.debug('Failed to send the storage event automatically', e);
});
}
};
var authEvent = function (payload) {
var event = payload.event;
if (!event) {
return;
}
var recordAuthEvent = function (eventName) { return __awaiter(void 0, void 0, void 0, function () {
var err_1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(authConfigured && analyticsConfigured)) return [3 /*break*/, 4];
_a.label = 1;
case 1:
_a.trys.push([1, 3, , 4]);
return [4 /*yield*/, _instance.record({ name: "_userauth." + eventName })];
case 2: return [2 /*return*/, _a.sent()];
case 3:
err_1 = _a.sent();
logger.debug("Failed to send the " + eventName + " event automatically", err_1);
return [3 /*break*/, 4];
case 4: return [2 /*return*/];
}
});
}); };
switch (event) {
case 'signIn':
return recordAuthEvent('sign_in');
case 'signUp':
return recordAuthEvent('sign_up');
case 'signOut':
return recordAuthEvent('sign_out');
case 'signIn_failure':
return recordAuthEvent('auth_fail');
case 'configured':
authConfigured = true;
if (authConfigured && analyticsConfigured) {
sendEvents();
}
break;
}
};
var analyticsEvent = function (payload) {
var event = payload.event;
if (!event)
return;
switch (event) {
case 'pinpointProvider_configured':
analyticsConfigured = true;
if (authConfigured && analyticsConfigured) {
sendEvents();
}
break;
}
};
var sendEvents = function () {
var config = _instance.configure();
if (!endpointUpdated && config['autoSessionRecord']) {
_instance.updateEndpoint({ immediate: true }).catch(function (e) {
logger.debug('Failed to update the endpoint', e);
});
endpointUpdated = true;
}
_instance.autoTrack('session', {
enable: config['autoSessionRecord'],
});
};
exports.Analytics = new AnalyticsClass();
core_1.Amplify.register(exports.Analytics);
//# sourceMappingURL=Analytics.js.map | {
logger.debug('No plugin found with providerName', providerName);
return;
} | conditional_block |
Analytics.js | "use strict";
/*
* Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) |
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
var core_1 = require("@aws-amplify/core");
var AWSPinpointProvider_1 = require("./Providers/AWSPinpointProvider");
var trackers_1 = require("./trackers");
var logger = new core_1.ConsoleLogger('AnalyticsClass');
var AMPLIFY_SYMBOL = (typeof Symbol !== 'undefined' &&
typeof Symbol.for === 'function'
? Symbol.for('amplify_default')
: '@@amplify_default');
var dispatchAnalyticsEvent = function (event, data, message) {
core_1.Hub.dispatch('analytics', { event: event, data: data, message: message }, 'Analytics', AMPLIFY_SYMBOL);
};
var trackers = {
pageView: trackers_1.PageViewTracker,
event: trackers_1.EventTracker,
session: trackers_1.SessionTracker,
};
var _instance = null;
/**
* Provide mobile analytics client functions
*/
var AnalyticsClass = /** @class */ (function () {
/**
* Initialize Analtyics
* @param config - Configuration of the Analytics
*/
function AnalyticsClass() {
this._config = {};
this._pluggables = [];
this._disabled = false;
this._trackers = {};
_instance = this;
this.record = this.record.bind(this);
core_1.Hub.listen('auth', listener);
core_1.Hub.listen('storage', listener);
core_1.Hub.listen('analytics', listener);
}
AnalyticsClass.prototype.getModuleName = function () {
return 'Analytics';
};
/**
* configure Analytics
* @param {Object} config - Configuration of the Analytics
*/
AnalyticsClass.prototype.configure = function (config) {
var _this = this;
if (!config)
return this._config;
logger.debug('configure Analytics', config);
var amplifyConfig = core_1.Parser.parseMobilehubConfig(config);
this._config = Object.assign({}, this._config, amplifyConfig.Analytics, config);
if (this._config['disabled']) {
this._disabled = true;
}
// turn on the autoSessionRecord if not specified
if (this._config['autoSessionRecord'] === undefined) {
this._config['autoSessionRecord'] = true;
}
this._pluggables.forEach(function (pluggable) {
// for backward compatibility
var providerConfig = pluggable.getProviderName() === 'AWSPinpoint' &&
!_this._config['AWSPinpoint']
? _this._config
: _this._config[pluggable.getProviderName()];
pluggable.configure(__assign({ disabled: _this._config['disabled'], autoSessionRecord: _this._config['autoSessionRecord'] }, providerConfig));
});
if (this._pluggables.length === 0) {
this.addPluggable(new AWSPinpointProvider_1.AWSPinpointProvider());
}
dispatchAnalyticsEvent('configured', null, "The Analytics category has been configured successfully");
logger.debug('current configuration', this._config);
return this._config;
};
/**
* add plugin into Analytics category
* @param {Object} pluggable - an instance of the plugin
*/
AnalyticsClass.prototype.addPluggable = function (pluggable) {
if (pluggable && pluggable.getCategory() === 'Analytics') {
this._pluggables.push(pluggable);
// for backward compatibility
var providerConfig = pluggable.getProviderName() === 'AWSPinpoint' &&
!this._config['AWSPinpoint']
? this._config
: this._config[pluggable.getProviderName()];
var config = __assign({ disabled: this._config['disabled'] }, providerConfig);
pluggable.configure(config);
return config;
}
};
/**
* Get the plugin object
* @param providerName - the name of the plugin
*/
AnalyticsClass.prototype.getPluggable = function (providerName) {
for (var i = 0; i < this._pluggables.length; i += 1) {
var pluggable = this._pluggables[i];
if (pluggable.getProviderName() === providerName) {
return pluggable;
}
}
logger.debug('No plugin found with providerName', providerName);
return null;
};
/**
* Remove the plugin object
* @param providerName - the name of the plugin
*/
AnalyticsClass.prototype.removePluggable = function (providerName) {
var idx = 0;
while (idx < this._pluggables.length) {
if (this._pluggables[idx].getProviderName() === providerName) {
break;
}
idx += 1;
}
if (idx === this._pluggables.length) {
logger.debug('No plugin found with providerName', providerName);
return;
}
else {
this._pluggables.splice(idx, idx + 1);
return;
}
};
/**
* stop sending events
*/
AnalyticsClass.prototype.disable = function () {
this._disabled = true;
};
/**
* start sending events
*/
AnalyticsClass.prototype.enable = function () {
this._disabled = false;
};
/**
* Record Session start
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.startSession = function (provider) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = { event: { name: '_session.start' }, provider: provider };
return [2 /*return*/, this._sendEvent(params)];
});
});
};
/**
* Record Session stop
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.stopSession = function (provider) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = { event: { name: '_session.stop' }, provider: provider };
return [2 /*return*/, this._sendEvent(params)];
});
});
};
/**
* Record one analytic event and send it to Pinpoint
* @param {String} name - The name of the event
* @param {Object} [attributes] - Attributes of the event
* @param {Object} [metrics] - Event metrics
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.record = function (event, provider, metrics) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = null;
// this is just for compatibility, going to be deprecated
if (typeof event === 'string') {
params = {
event: {
name: event,
attributes: provider,
metrics: metrics,
},
provider: 'AWSPinpoint',
};
}
else {
params = { event: event, provider: provider };
}
return [2 /*return*/, this._sendEvent(params)];
});
});
};
AnalyticsClass.prototype.updateEndpoint = function (attrs, provider) {
return __awaiter(this, void 0, void 0, function () {
var event;
return __generator(this, function (_a) {
event = __assign(__assign({}, attrs), { name: '_update_endpoint' });
return [2 /*return*/, this.record(event, provider)];
});
});
};
AnalyticsClass.prototype._sendEvent = function (params) {
var _this = this;
if (this._disabled) {
logger.debug('Analytics has been disabled');
return Promise.resolve();
}
var provider = params.provider ? params.provider : 'AWSPinpoint';
return new Promise(function (resolve, reject) {
_this._pluggables.forEach(function (pluggable) {
if (pluggable.getProviderName() === provider) {
pluggable.record(params, { resolve: resolve, reject: reject });
}
});
});
};
AnalyticsClass.prototype.autoTrack = function (trackerType, opts) {
if (!trackers[trackerType]) {
logger.debug('invalid tracker type');
return;
}
// to sync up two different configuration ways of auto session tracking
if (trackerType === 'session') {
this._config['autoSessionRecord'] = opts['enable'];
}
var tracker = this._trackers[trackerType];
if (!tracker) {
this._trackers[trackerType] = new trackers[trackerType](this.record, opts);
}
else {
tracker.configure(opts);
}
};
return AnalyticsClass;
}());
exports.AnalyticsClass = AnalyticsClass;
var endpointUpdated = false;
var authConfigured = false;
var analyticsConfigured = false;
var listener = function (capsule) {
var channel = capsule.channel, payload = capsule.payload;
logger.debug('on hub capsule ' + channel, payload);
switch (channel) {
case 'auth':
authEvent(payload);
break;
case 'storage':
storageEvent(payload);
break;
case 'analytics':
analyticsEvent(payload);
break;
default:
break;
}
};
var storageEvent = function (payload) {
var _a = payload.data, attrs = _a.attrs, metrics = _a.metrics;
if (!attrs)
return;
if (analyticsConfigured) {
_instance
.record({
name: 'Storage',
attributes: attrs,
metrics: metrics,
})
.catch(function (e) {
logger.debug('Failed to send the storage event automatically', e);
});
}
};
var authEvent = function (payload) {
var event = payload.event;
if (!event) {
return;
}
var recordAuthEvent = function (eventName) { return __awaiter(void 0, void 0, void 0, function () {
var err_1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(authConfigured && analyticsConfigured)) return [3 /*break*/, 4];
_a.label = 1;
case 1:
_a.trys.push([1, 3, , 4]);
return [4 /*yield*/, _instance.record({ name: "_userauth." + eventName })];
case 2: return [2 /*return*/, _a.sent()];
case 3:
err_1 = _a.sent();
logger.debug("Failed to send the " + eventName + " event automatically", err_1);
return [3 /*break*/, 4];
case 4: return [2 /*return*/];
}
});
}); };
switch (event) {
case 'signIn':
return recordAuthEvent('sign_in');
case 'signUp':
return recordAuthEvent('sign_up');
case 'signOut':
return recordAuthEvent('sign_out');
case 'signIn_failure':
return recordAuthEvent('auth_fail');
case 'configured':
authConfigured = true;
if (authConfigured && analyticsConfigured) {
sendEvents();
}
break;
}
};
var analyticsEvent = function (payload) {
var event = payload.event;
if (!event)
return;
switch (event) {
case 'pinpointProvider_configured':
analyticsConfigured = true;
if (authConfigured && analyticsConfigured) {
sendEvents();
}
break;
}
};
var sendEvents = function () {
var config = _instance.configure();
if (!endpointUpdated && config['autoSessionRecord']) {
_instance.updateEndpoint({ immediate: true }).catch(function (e) {
logger.debug('Failed to update the endpoint', e);
});
endpointUpdated = true;
}
_instance.autoTrack('session', {
enable: config['autoSessionRecord'],
});
};
exports.Analytics = new AnalyticsClass();
core_1.Amplify.register(exports.Analytics);
//# sourceMappingURL=Analytics.js.map | { try { step(generator.next(value)); } catch (e) { reject(e); } } | identifier_body |
Analytics.js | "use strict";
/*
* Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function | (result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
var core_1 = require("@aws-amplify/core");
var AWSPinpointProvider_1 = require("./Providers/AWSPinpointProvider");
var trackers_1 = require("./trackers");
var logger = new core_1.ConsoleLogger('AnalyticsClass');
var AMPLIFY_SYMBOL = (typeof Symbol !== 'undefined' &&
typeof Symbol.for === 'function'
? Symbol.for('amplify_default')
: '@@amplify_default');
var dispatchAnalyticsEvent = function (event, data, message) {
core_1.Hub.dispatch('analytics', { event: event, data: data, message: message }, 'Analytics', AMPLIFY_SYMBOL);
};
var trackers = {
pageView: trackers_1.PageViewTracker,
event: trackers_1.EventTracker,
session: trackers_1.SessionTracker,
};
var _instance = null;
/**
* Provide mobile analytics client functions
*/
var AnalyticsClass = /** @class */ (function () {
/**
* Initialize Analtyics
* @param config - Configuration of the Analytics
*/
function AnalyticsClass() {
this._config = {};
this._pluggables = [];
this._disabled = false;
this._trackers = {};
_instance = this;
this.record = this.record.bind(this);
core_1.Hub.listen('auth', listener);
core_1.Hub.listen('storage', listener);
core_1.Hub.listen('analytics', listener);
}
AnalyticsClass.prototype.getModuleName = function () {
return 'Analytics';
};
/**
* configure Analytics
* @param {Object} config - Configuration of the Analytics
*/
AnalyticsClass.prototype.configure = function (config) {
var _this = this;
if (!config)
return this._config;
logger.debug('configure Analytics', config);
var amplifyConfig = core_1.Parser.parseMobilehubConfig(config);
this._config = Object.assign({}, this._config, amplifyConfig.Analytics, config);
if (this._config['disabled']) {
this._disabled = true;
}
// turn on the autoSessionRecord if not specified
if (this._config['autoSessionRecord'] === undefined) {
this._config['autoSessionRecord'] = true;
}
this._pluggables.forEach(function (pluggable) {
// for backward compatibility
var providerConfig = pluggable.getProviderName() === 'AWSPinpoint' &&
!_this._config['AWSPinpoint']
? _this._config
: _this._config[pluggable.getProviderName()];
pluggable.configure(__assign({ disabled: _this._config['disabled'], autoSessionRecord: _this._config['autoSessionRecord'] }, providerConfig));
});
if (this._pluggables.length === 0) {
this.addPluggable(new AWSPinpointProvider_1.AWSPinpointProvider());
}
dispatchAnalyticsEvent('configured', null, "The Analytics category has been configured successfully");
logger.debug('current configuration', this._config);
return this._config;
};
/**
* add plugin into Analytics category
* @param {Object} pluggable - an instance of the plugin
*/
AnalyticsClass.prototype.addPluggable = function (pluggable) {
if (pluggable && pluggable.getCategory() === 'Analytics') {
this._pluggables.push(pluggable);
// for backward compatibility
var providerConfig = pluggable.getProviderName() === 'AWSPinpoint' &&
!this._config['AWSPinpoint']
? this._config
: this._config[pluggable.getProviderName()];
var config = __assign({ disabled: this._config['disabled'] }, providerConfig);
pluggable.configure(config);
return config;
}
};
/**
* Get the plugin object
* @param providerName - the name of the plugin
*/
AnalyticsClass.prototype.getPluggable = function (providerName) {
for (var i = 0; i < this._pluggables.length; i += 1) {
var pluggable = this._pluggables[i];
if (pluggable.getProviderName() === providerName) {
return pluggable;
}
}
logger.debug('No plugin found with providerName', providerName);
return null;
};
/**
* Remove the plugin object
* @param providerName - the name of the plugin
*/
AnalyticsClass.prototype.removePluggable = function (providerName) {
var idx = 0;
while (idx < this._pluggables.length) {
if (this._pluggables[idx].getProviderName() === providerName) {
break;
}
idx += 1;
}
if (idx === this._pluggables.length) {
logger.debug('No plugin found with providerName', providerName);
return;
}
else {
this._pluggables.splice(idx, idx + 1);
return;
}
};
/**
* stop sending events
*/
AnalyticsClass.prototype.disable = function () {
this._disabled = true;
};
/**
* start sending events
*/
AnalyticsClass.prototype.enable = function () {
this._disabled = false;
};
/**
* Record Session start
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.startSession = function (provider) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = { event: { name: '_session.start' }, provider: provider };
return [2 /*return*/, this._sendEvent(params)];
});
});
};
/**
* Record Session stop
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.stopSession = function (provider) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = { event: { name: '_session.stop' }, provider: provider };
return [2 /*return*/, this._sendEvent(params)];
});
});
};
/**
* Record one analytic event and send it to Pinpoint
* @param {String} name - The name of the event
* @param {Object} [attributes] - Attributes of the event
* @param {Object} [metrics] - Event metrics
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.record = function (event, provider, metrics) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = null;
// this is just for compatibility, going to be deprecated
if (typeof event === 'string') {
params = {
event: {
name: event,
attributes: provider,
metrics: metrics,
},
provider: 'AWSPinpoint',
};
}
else {
params = { event: event, provider: provider };
}
return [2 /*return*/, this._sendEvent(params)];
});
});
};
AnalyticsClass.prototype.updateEndpoint = function (attrs, provider) {
return __awaiter(this, void 0, void 0, function () {
var event;
return __generator(this, function (_a) {
event = __assign(__assign({}, attrs), { name: '_update_endpoint' });
return [2 /*return*/, this.record(event, provider)];
});
});
};
AnalyticsClass.prototype._sendEvent = function (params) {
var _this = this;
if (this._disabled) {
logger.debug('Analytics has been disabled');
return Promise.resolve();
}
var provider = params.provider ? params.provider : 'AWSPinpoint';
return new Promise(function (resolve, reject) {
_this._pluggables.forEach(function (pluggable) {
if (pluggable.getProviderName() === provider) {
pluggable.record(params, { resolve: resolve, reject: reject });
}
});
});
};
AnalyticsClass.prototype.autoTrack = function (trackerType, opts) {
if (!trackers[trackerType]) {
logger.debug('invalid tracker type');
return;
}
// to sync up two different configuration ways of auto session tracking
if (trackerType === 'session') {
this._config['autoSessionRecord'] = opts['enable'];
}
var tracker = this._trackers[trackerType];
if (!tracker) {
this._trackers[trackerType] = new trackers[trackerType](this.record, opts);
}
else {
tracker.configure(opts);
}
};
return AnalyticsClass;
}());
exports.AnalyticsClass = AnalyticsClass;
var endpointUpdated = false;
var authConfigured = false;
var analyticsConfigured = false;
var listener = function (capsule) {
var channel = capsule.channel, payload = capsule.payload;
logger.debug('on hub capsule ' + channel, payload);
switch (channel) {
case 'auth':
authEvent(payload);
break;
case 'storage':
storageEvent(payload);
break;
case 'analytics':
analyticsEvent(payload);
break;
default:
break;
}
};
var storageEvent = function (payload) {
var _a = payload.data, attrs = _a.attrs, metrics = _a.metrics;
if (!attrs)
return;
if (analyticsConfigured) {
_instance
.record({
name: 'Storage',
attributes: attrs,
metrics: metrics,
})
.catch(function (e) {
logger.debug('Failed to send the storage event automatically', e);
});
}
};
var authEvent = function (payload) {
var event = payload.event;
if (!event) {
return;
}
var recordAuthEvent = function (eventName) { return __awaiter(void 0, void 0, void 0, function () {
var err_1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(authConfigured && analyticsConfigured)) return [3 /*break*/, 4];
_a.label = 1;
case 1:
_a.trys.push([1, 3, , 4]);
return [4 /*yield*/, _instance.record({ name: "_userauth." + eventName })];
case 2: return [2 /*return*/, _a.sent()];
case 3:
err_1 = _a.sent();
logger.debug("Failed to send the " + eventName + " event automatically", err_1);
return [3 /*break*/, 4];
case 4: return [2 /*return*/];
}
});
}); };
switch (event) {
case 'signIn':
return recordAuthEvent('sign_in');
case 'signUp':
return recordAuthEvent('sign_up');
case 'signOut':
return recordAuthEvent('sign_out');
case 'signIn_failure':
return recordAuthEvent('auth_fail');
case 'configured':
authConfigured = true;
if (authConfigured && analyticsConfigured) {
sendEvents();
}
break;
}
};
var analyticsEvent = function (payload) {
var event = payload.event;
if (!event)
return;
switch (event) {
case 'pinpointProvider_configured':
analyticsConfigured = true;
if (authConfigured && analyticsConfigured) {
sendEvents();
}
break;
}
};
var sendEvents = function () {
var config = _instance.configure();
if (!endpointUpdated && config['autoSessionRecord']) {
_instance.updateEndpoint({ immediate: true }).catch(function (e) {
logger.debug('Failed to update the endpoint', e);
});
endpointUpdated = true;
}
_instance.autoTrack('session', {
enable: config['autoSessionRecord'],
});
};
exports.Analytics = new AnalyticsClass();
core_1.Amplify.register(exports.Analytics);
//# sourceMappingURL=Analytics.js.map | step | identifier_name |
Analytics.js | "use strict";
/*
* Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
var core_1 = require("@aws-amplify/core");
var AWSPinpointProvider_1 = require("./Providers/AWSPinpointProvider");
var trackers_1 = require("./trackers");
var logger = new core_1.ConsoleLogger('AnalyticsClass');
var AMPLIFY_SYMBOL = (typeof Symbol !== 'undefined' &&
typeof Symbol.for === 'function'
? Symbol.for('amplify_default')
: '@@amplify_default');
var dispatchAnalyticsEvent = function (event, data, message) {
core_1.Hub.dispatch('analytics', { event: event, data: data, message: message }, 'Analytics', AMPLIFY_SYMBOL);
};
var trackers = {
pageView: trackers_1.PageViewTracker,
event: trackers_1.EventTracker,
session: trackers_1.SessionTracker,
};
var _instance = null;
/**
* Provide mobile analytics client functions
*/
var AnalyticsClass = /** @class */ (function () {
/**
* Initialize Analtyics
* @param config - Configuration of the Analytics
*/
function AnalyticsClass() {
this._config = {};
this._pluggables = [];
this._disabled = false;
this._trackers = {};
_instance = this;
this.record = this.record.bind(this);
core_1.Hub.listen('auth', listener);
core_1.Hub.listen('storage', listener);
core_1.Hub.listen('analytics', listener);
}
AnalyticsClass.prototype.getModuleName = function () {
return 'Analytics';
};
/**
* configure Analytics
* @param {Object} config - Configuration of the Analytics
*/
AnalyticsClass.prototype.configure = function (config) {
var _this = this;
if (!config)
return this._config;
logger.debug('configure Analytics', config);
var amplifyConfig = core_1.Parser.parseMobilehubConfig(config);
this._config = Object.assign({}, this._config, amplifyConfig.Analytics, config);
if (this._config['disabled']) {
this._disabled = true;
}
// turn on the autoSessionRecord if not specified
if (this._config['autoSessionRecord'] === undefined) {
this._config['autoSessionRecord'] = true;
}
this._pluggables.forEach(function (pluggable) {
// for backward compatibility
var providerConfig = pluggable.getProviderName() === 'AWSPinpoint' &&
!_this._config['AWSPinpoint']
? _this._config
: _this._config[pluggable.getProviderName()];
pluggable.configure(__assign({ disabled: _this._config['disabled'], autoSessionRecord: _this._config['autoSessionRecord'] }, providerConfig));
});
if (this._pluggables.length === 0) {
this.addPluggable(new AWSPinpointProvider_1.AWSPinpointProvider());
}
dispatchAnalyticsEvent('configured', null, "The Analytics category has been configured successfully");
logger.debug('current configuration', this._config);
return this._config;
};
/**
* add plugin into Analytics category
* @param {Object} pluggable - an instance of the plugin
*/
AnalyticsClass.prototype.addPluggable = function (pluggable) {
if (pluggable && pluggable.getCategory() === 'Analytics') {
this._pluggables.push(pluggable);
// for backward compatibility
var providerConfig = pluggable.getProviderName() === 'AWSPinpoint' &&
!this._config['AWSPinpoint']
? this._config
: this._config[pluggable.getProviderName()];
var config = __assign({ disabled: this._config['disabled'] }, providerConfig);
pluggable.configure(config);
return config;
}
};
/**
* Get the plugin object
* @param providerName - the name of the plugin
*/
AnalyticsClass.prototype.getPluggable = function (providerName) {
for (var i = 0; i < this._pluggables.length; i += 1) {
var pluggable = this._pluggables[i];
if (pluggable.getProviderName() === providerName) {
return pluggable;
}
}
logger.debug('No plugin found with providerName', providerName);
return null;
};
/**
* Remove the plugin object
* @param providerName - the name of the plugin
*/
AnalyticsClass.prototype.removePluggable = function (providerName) {
var idx = 0;
while (idx < this._pluggables.length) {
if (this._pluggables[idx].getProviderName() === providerName) {
break;
}
idx += 1;
}
if (idx === this._pluggables.length) {
logger.debug('No plugin found with providerName', providerName);
return;
}
else {
this._pluggables.splice(idx, idx + 1);
return;
}
};
/**
* stop sending events
*/
AnalyticsClass.prototype.disable = function () {
this._disabled = true;
};
/**
* start sending events
*/
AnalyticsClass.prototype.enable = function () {
this._disabled = false;
};
/**
* Record Session start
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.startSession = function (provider) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = { event: { name: '_session.start' }, provider: provider };
return [2 /*return*/, this._sendEvent(params)];
});
});
};
/**
* Record Session stop
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.stopSession = function (provider) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = { event: { name: '_session.stop' }, provider: provider };
return [2 /*return*/, this._sendEvent(params)];
});
});
};
/**
* Record one analytic event and send it to Pinpoint
* @param {String} name - The name of the event
* @param {Object} [attributes] - Attributes of the event
* @param {Object} [metrics] - Event metrics
* @return - A promise which resolves if buffer doesn't overflow
*/
AnalyticsClass.prototype.record = function (event, provider, metrics) {
return __awaiter(this, void 0, void 0, function () {
var params;
return __generator(this, function (_a) {
params = null;
// this is just for compatibility, going to be deprecated
if (typeof event === 'string') {
params = {
event: {
name: event,
attributes: provider,
metrics: metrics,
},
provider: 'AWSPinpoint',
};
}
else {
params = { event: event, provider: provider };
}
return [2 /*return*/, this._sendEvent(params)];
});
});
};
AnalyticsClass.prototype.updateEndpoint = function (attrs, provider) {
return __awaiter(this, void 0, void 0, function () {
var event;
return __generator(this, function (_a) {
event = __assign(__assign({}, attrs), { name: '_update_endpoint' });
return [2 /*return*/, this.record(event, provider)];
});
});
};
AnalyticsClass.prototype._sendEvent = function (params) {
var _this = this;
if (this._disabled) {
logger.debug('Analytics has been disabled');
return Promise.resolve();
}
var provider = params.provider ? params.provider : 'AWSPinpoint';
return new Promise(function (resolve, reject) {
_this._pluggables.forEach(function (pluggable) {
if (pluggable.getProviderName() === provider) {
pluggable.record(params, { resolve: resolve, reject: reject });
}
});
});
};
AnalyticsClass.prototype.autoTrack = function (trackerType, opts) {
if (!trackers[trackerType]) {
logger.debug('invalid tracker type');
return;
}
// to sync up two different configuration ways of auto session tracking
if (trackerType === 'session') {
this._config['autoSessionRecord'] = opts['enable'];
}
var tracker = this._trackers[trackerType];
if (!tracker) {
this._trackers[trackerType] = new trackers[trackerType](this.record, opts);
}
else {
tracker.configure(opts);
}
};
return AnalyticsClass;
}());
exports.AnalyticsClass = AnalyticsClass;
var endpointUpdated = false;
var authConfigured = false;
var analyticsConfigured = false;
var listener = function (capsule) {
var channel = capsule.channel, payload = capsule.payload;
logger.debug('on hub capsule ' + channel, payload);
switch (channel) {
case 'auth':
authEvent(payload);
break;
case 'storage':
storageEvent(payload);
break;
case 'analytics':
analyticsEvent(payload);
break;
default:
break;
}
};
var storageEvent = function (payload) {
var _a = payload.data, attrs = _a.attrs, metrics = _a.metrics;
if (!attrs)
return;
if (analyticsConfigured) {
_instance
.record({
name: 'Storage',
attributes: attrs,
metrics: metrics,
})
.catch(function (e) {
logger.debug('Failed to send the storage event automatically', e);
});
}
};
var authEvent = function (payload) {
var event = payload.event;
if (!event) {
return;
}
var recordAuthEvent = function (eventName) { return __awaiter(void 0, void 0, void 0, function () {
var err_1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(authConfigured && analyticsConfigured)) return [3 /*break*/, 4];
_a.label = 1;
case 1:
_a.trys.push([1, 3, , 4]);
return [4 /*yield*/, _instance.record({ name: "_userauth." + eventName })];
case 2: return [2 /*return*/, _a.sent()];
case 3:
err_1 = _a.sent();
logger.debug("Failed to send the " + eventName + " event automatically", err_1);
return [3 /*break*/, 4];
case 4: return [2 /*return*/];
}
});
}); };
switch (event) {
case 'signIn':
return recordAuthEvent('sign_in');
case 'signUp':
return recordAuthEvent('sign_up');
case 'signOut':
return recordAuthEvent('sign_out');
case 'signIn_failure':
return recordAuthEvent('auth_fail');
case 'configured':
authConfigured = true; | }
break;
}
};
var analyticsEvent = function (payload) {
var event = payload.event;
if (!event)
return;
switch (event) {
case 'pinpointProvider_configured':
analyticsConfigured = true;
if (authConfigured && analyticsConfigured) {
sendEvents();
}
break;
}
};
var sendEvents = function () {
var config = _instance.configure();
if (!endpointUpdated && config['autoSessionRecord']) {
_instance.updateEndpoint({ immediate: true }).catch(function (e) {
logger.debug('Failed to update the endpoint', e);
});
endpointUpdated = true;
}
_instance.autoTrack('session', {
enable: config['autoSessionRecord'],
});
};
exports.Analytics = new AnalyticsClass();
core_1.Amplify.register(exports.Analytics);
//# sourceMappingURL=Analytics.js.map | if (authConfigured && analyticsConfigured) {
sendEvents(); | random_line_split |
miner_corrections.go | package nv4
import (
"bytes"
"context"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
miner "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/util/adt"
)
func (m *minerMigrator) | (ctx context.Context, store cbor.IpldStore, head cid.Cid,
priorEpoch abi.ChainEpoch, a addr.Address) (*StateMigrationResult, error) {
result := &StateMigrationResult{
NewHead: head,
Transfer: big.Zero(),
}
epoch := priorEpoch + 1
var st miner.State
if err := store.Get(ctx, head, &st); err != nil {
return nil, err
}
// If the miner's proving period hasn't started yet, it's a new v0
// miner.
//
// 1. There's no need to fix any state.
// 2. We definitely don't want to reschedule the proving period
// start/deadlines.
if st.ProvingPeriodStart > epoch {
return result, nil
}
adtStore := adt.WrapStore(ctx, store)
sectors, err := miner.LoadSectors(adtStore, st.Sectors)
if err != nil {
return nil, err
}
info, err := st.GetInfo(adtStore)
if err != nil {
return nil, err
}
powerClaimSuspect, err := m.correctForCCUpgradeThenFaultIssue(ctx, store, &st, sectors, epoch, info.SectorSize)
if err != nil {
return nil, err
}
if powerClaimSuspect {
claimUpdate, err := m.computeClaim(ctx, adtStore, &st, a)
if err != nil {
return nil, err
}
if claimUpdate != nil {
result.PowerUpdates = append(result.PowerUpdates, claimUpdate)
}
cronUpdate, err := m.computeProvingPeriodCron(a, &st, epoch, adtStore)
if err != nil {
return nil, err
}
if cronUpdate != nil {
result.PowerUpdates = append(result.PowerUpdates, cronUpdate)
}
}
newHead, err := store.Put(ctx, &st)
result.NewHead = newHead
return result, err
}
func (m *minerMigrator) correctForCCUpgradeThenFaultIssue(
ctx context.Context, store cbor.IpldStore, st *miner.State, sectors miner.Sectors, epoch abi.ChainEpoch,
sectorSize abi.SectorSize,
) (bool, error) {
quantSpec := st.QuantSpecEveryDeadline()
deadlines, err := st.LoadDeadlines(adt.WrapStore(ctx, store))
if err != nil {
return false, err
}
missedProvingPeriodCron := false
for st.ProvingPeriodStart+miner.WPoStProvingPeriod <= epoch {
st.ProvingPeriodStart += miner.WPoStProvingPeriod
missedProvingPeriodCron = true
}
expectedDeadlline := uint64((epoch - st.ProvingPeriodStart) / miner.WPoStChallengeWindow)
if expectedDeadlline != st.CurrentDeadline {
st.CurrentDeadline = expectedDeadlline
missedProvingPeriodCron = true
}
deadlinesModified := false
err = deadlines.ForEach(adt.WrapStore(ctx, store), func(dlIdx uint64, deadline *miner.Deadline) error {
partitions, err := adt.AsArray(adt.WrapStore(ctx, store), deadline.Partitions)
if err != nil {
return err
}
alteredPartitions := make(map[uint64]miner.Partition)
allFaultyPower := miner.NewPowerPairZero()
var part miner.Partition
err = partitions.ForEach(&part, func(partIdx int64) error {
exq, err := miner.LoadExpirationQueue(adt.WrapStore(ctx, store), part.ExpirationsEpochs, quantSpec)
if err != nil {
return err
}
exqRoot, stats, err := m.correctExpirationQueue(exq, sectors, part.Terminated, part.Faults, sectorSize)
if err != nil {
return err
}
// if unmodified, we're done
if exqRoot.Equals(cid.Undef) {
return nil
}
if !part.ExpirationsEpochs.Equals(exqRoot) {
part.ExpirationsEpochs = exqRoot
alteredPartitions[uint64(partIdx)] = part
}
if !part.LivePower.Equals(stats.totalActivePower.Add(stats.totalFaultyPower)) {
part.LivePower = stats.totalActivePower.Add(stats.totalFaultyPower)
alteredPartitions[uint64(partIdx)] = part
}
if !part.FaultyPower.Equals(stats.totalFaultyPower) {
part.FaultyPower = stats.totalFaultyPower
alteredPartitions[uint64(partIdx)] = part
}
if missedProvingPeriodCron {
part.Recoveries = bitfield.New()
part.RecoveringPower = miner.NewPowerPairZero()
alteredPartitions[uint64(partIdx)] = part
}
allFaultyPower = allFaultyPower.Add(part.FaultyPower)
return nil
})
if err != nil {
return err
}
// if we've failed to update at last proving period, expect post submissions to contain bits it shouldn't
if missedProvingPeriodCron {
deadline.PostSubmissions = bitfield.New()
if err := deadlines.UpdateDeadline(adt.WrapStore(ctx, store), dlIdx, deadline); err != nil {
return err
}
deadlinesModified = true
}
// if partitions have been updates, record that in deadline
if len(alteredPartitions) > 0 {
for partIdx, part := range alteredPartitions { // nolint:nomaprange
if err := partitions.Set(partIdx, &part); err != nil {
return err
}
}
deadline.Partitions, err = partitions.Root()
if err != nil {
return err
}
deadline.FaultyPower = allFaultyPower
if err := deadlines.UpdateDeadline(adt.WrapStore(ctx, store), dlIdx, deadline); err != nil {
return err
}
deadlinesModified = true
}
return nil
})
if err != nil {
return false, err
}
if !deadlinesModified {
return false, nil
}
if err = st.SaveDeadlines(adt.WrapStore(ctx, store), deadlines); err != nil {
return false, err
}
return true, err
}
func (m *minerMigrator) computeProvingPeriodCron(a addr.Address, st *miner.State, epoch abi.ChainEpoch, store adt.Store) (*cronUpdate, error) {
var buf bytes.Buffer
payload := &miner.CronEventPayload{
EventType: miner.CronEventProvingDeadline,
}
err := payload.MarshalCBOR(&buf)
if err != nil {
return nil, err
}
dlInfo := st.DeadlineInfo(epoch)
return &cronUpdate{
epoch: dlInfo.Last(),
event: power0.CronEvent{
MinerAddr: a,
CallbackPayload: buf.Bytes(),
},
}, nil
}
func (m *minerMigrator) computeClaim(ctx context.Context, store adt.Store, st *miner.State, a addr.Address) (*claimUpdate, error) {
deadlines, err := st.LoadDeadlines(store)
if err != nil {
return nil, err
}
activePower := miner.NewPowerPairZero()
err = deadlines.ForEach(store, func(dlIdx uint64, dl *miner.Deadline) error {
partitions, err := dl.PartitionsArray(store)
if err != nil {
return err
}
var part miner.Partition
return partitions.ForEach(&part, func(pIdx int64) error {
activePower = activePower.Add(part.LivePower.Sub(part.FaultyPower))
return nil
})
})
if err != nil {
return nil, err
}
return &claimUpdate{
addr: a,
claim: power0.Claim{
RawBytePower: activePower.Raw,
QualityAdjPower: activePower.QA,
},
}, nil
}
type expirationQueueStats struct {
// total of all active power in the expiration queue
totalActivePower miner.PowerPair
// total of all faulty power in the expiration queue
totalFaultyPower miner.PowerPair
}
// Updates the expiration queue by correcting any duplicate entries and their fallout.
// If no changes need to be made cid.Undef will be returned.
// Returns the new root of the expiration queue
func (m *minerMigrator) correctExpirationQueue(exq miner.ExpirationQueue, sectors miner.Sectors,
allTerminated bitfield.BitField, allFaults bitfield.BitField, sectorSize abi.SectorSize,
) (cid.Cid, expirationQueueStats, error) {
// processed expired sectors includes all terminated and all sectors seen in earlier expiration sets
processedExpiredSectors := allTerminated
expirationSetPowerSuspect := false
var exs miner.ExpirationSet
// Check for faults that need to be erased.
// Erased faults will be removed from bitfields and the power will be recomputed
// in the subsequent loop.
err := exq.ForEach(&exs, func(epoch int64) error { //nolint:nomaprange
// Detect sectors that are present in this expiration set as "early", but that
// have already terminated or duplicate a prior entry in the queue, and thus will
// be terminated before this entry is processed. The sector was rescheduled here
// upon fault, but the entry is stale and should not exist.
modified := false
earlyDuplicates, err := bitfield.IntersectBitField(exs.EarlySectors, processedExpiredSectors)
if err != nil {
return err
} else if empty, err := earlyDuplicates.IsEmpty(); err != nil {
return err
} else if !empty {
modified = true
exs.EarlySectors, err = bitfield.SubtractBitField(exs.EarlySectors, earlyDuplicates)
if err != nil {
return err
}
}
// Detect sectors that are terminating on time, but have either already terminated or duplicate
// an entry in the queue. The sector might be faulty, but were expiring here anyway so not
// rescheduled as "early".
onTimeDuplicates, err := bitfield.IntersectBitField(exs.OnTimeSectors, processedExpiredSectors)
if err != nil {
return err
} else if empty, err := onTimeDuplicates.IsEmpty(); err != nil {
return err
} else if !empty {
modified = true
exs.OnTimeSectors, err = bitfield.SubtractBitField(exs.OnTimeSectors, onTimeDuplicates)
if err != nil {
return err
}
}
if modified {
expirationSetPowerSuspect = true
exs2, err := copyES(exs)
if err != nil {
return err
}
if err := exq.Set(uint64(epoch), &exs2); err != nil {
return err
}
}
// Record all sectors that would be terminated after this queue entry is processed.
if processedExpiredSectors, err = bitfield.MultiMerge(processedExpiredSectors, exs.EarlySectors, exs.OnTimeSectors); err != nil {
return err
}
return nil
})
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
// If we didn't find any duplicate sectors, we're done.
if !expirationSetPowerSuspect {
return cid.Undef, expirationQueueStats{}, nil
}
partitionActivePower := miner.NewPowerPairZero()
partitionFaultyPower := miner.NewPowerPairZero()
err = exq.ForEach(&exs, func(epoch int64) error {
modified, activePower, faultyPower, err := correctExpirationSetPowerAndPledge(&exs, sectors, allFaults, sectorSize)
if err != nil {
return err
}
partitionActivePower = partitionActivePower.Add(activePower)
partitionFaultyPower = partitionFaultyPower.Add(faultyPower)
if modified {
exs2, err := copyES(exs)
if err != nil {
return err
}
if err := exq.Set(uint64(epoch), &exs2); err != nil {
return err
}
}
return nil
})
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
expirationQueueRoot, err := exq.Root()
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
return expirationQueueRoot, expirationQueueStats{
partitionActivePower,
partitionFaultyPower,
}, nil
}
// Recompute active and faulty power for an expiration set.
// The active power for an expiration set should be the sum of the power of all its active sectors,
// where active means all sectors not labeled as a fault in the partition. Similarly, faulty power
// is the sum of faulty sectors.
// If a sector has been rescheduled from ES3 to both ES1 as active and ES2
// as a fault, we expect it to be labeled as a fault in the partition. We have already
// removed the sector from ES2, so this correction should move its active power to faulty power in ES1
// because it is labeled as a fault, remove its power altogether from ES2 because its been removed from
// ES2's bitfields, and correct the double subtraction of power from ES3.
func correctExpirationSetPowerAndPledge(exs *miner.ExpirationSet, sectors miner.Sectors,
allFaults bitfield.BitField, sectorSize abi.SectorSize,
) (bool, miner.PowerPair, miner.PowerPair, error) {
modified := false
allSectors, err := bitfield.MergeBitFields(exs.OnTimeSectors, exs.EarlySectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
// correct errors in active power
activeSectors, err := bitfield.SubtractBitField(allSectors, allFaults)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
as, err := sectors.Load(activeSectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
activePower := miner.PowerForSectors(sectorSize, as)
if !activePower.Equals(exs.ActivePower) {
exs.ActivePower = activePower
modified = true
}
// correct errors in faulty power
faultySectors, err := bitfield.IntersectBitField(allSectors, allFaults)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
fs, err := sectors.Load(faultySectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
faultyPower := miner.PowerForSectors(sectorSize, fs)
if !faultyPower.Equals(exs.FaultyPower) {
exs.FaultyPower = faultyPower
modified = true
}
// correct errors in pledge
expectedPledge := big.Zero()
ots, err := sectors.Load(exs.OnTimeSectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
for _, sector := range ots {
expectedPledge = big.Add(expectedPledge, sector.InitialPledge)
}
if !expectedPledge.Equals(exs.OnTimePledge) {
exs.OnTimePledge = expectedPledge
modified = true
}
return modified, activePower, faultyPower, nil
}
func copyES(in miner.ExpirationSet) (miner.ExpirationSet, error) {
ots, err := in.OnTimeSectors.Copy()
if err != nil {
return miner.ExpirationSet{}, err
}
es, err := in.EarlySectors.Copy()
if err != nil {
return miner.ExpirationSet{}, err
}
return miner.ExpirationSet{
OnTimeSectors: ots,
EarlySectors: es,
OnTimePledge: in.OnTimePledge,
ActivePower: in.ActivePower,
FaultyPower: in.FaultyPower,
}, nil
}
| CorrectState | identifier_name |
miner_corrections.go | package nv4
import (
"bytes"
"context"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
miner "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/util/adt"
)
func (m *minerMigrator) CorrectState(ctx context.Context, store cbor.IpldStore, head cid.Cid,
priorEpoch abi.ChainEpoch, a addr.Address) (*StateMigrationResult, error) {
result := &StateMigrationResult{
NewHead: head,
Transfer: big.Zero(),
}
epoch := priorEpoch + 1
var st miner.State
if err := store.Get(ctx, head, &st); err != nil {
return nil, err
}
// If the miner's proving period hasn't started yet, it's a new v0
// miner.
//
// 1. There's no need to fix any state.
// 2. We definitely don't want to reschedule the proving period
// start/deadlines.
if st.ProvingPeriodStart > epoch {
return result, nil
}
adtStore := adt.WrapStore(ctx, store)
sectors, err := miner.LoadSectors(adtStore, st.Sectors)
if err != nil {
return nil, err
}
info, err := st.GetInfo(adtStore)
if err != nil {
return nil, err
}
powerClaimSuspect, err := m.correctForCCUpgradeThenFaultIssue(ctx, store, &st, sectors, epoch, info.SectorSize)
if err != nil {
return nil, err
}
if powerClaimSuspect {
claimUpdate, err := m.computeClaim(ctx, adtStore, &st, a)
if err != nil {
return nil, err
}
if claimUpdate != nil {
result.PowerUpdates = append(result.PowerUpdates, claimUpdate)
}
cronUpdate, err := m.computeProvingPeriodCron(a, &st, epoch, adtStore)
if err != nil {
return nil, err
}
if cronUpdate != nil {
result.PowerUpdates = append(result.PowerUpdates, cronUpdate)
}
}
newHead, err := store.Put(ctx, &st)
result.NewHead = newHead
return result, err
}
func (m *minerMigrator) correctForCCUpgradeThenFaultIssue(
ctx context.Context, store cbor.IpldStore, st *miner.State, sectors miner.Sectors, epoch abi.ChainEpoch,
sectorSize abi.SectorSize,
) (bool, error) {
quantSpec := st.QuantSpecEveryDeadline()
deadlines, err := st.LoadDeadlines(adt.WrapStore(ctx, store))
if err != nil {
return false, err
}
missedProvingPeriodCron := false
for st.ProvingPeriodStart+miner.WPoStProvingPeriod <= epoch {
st.ProvingPeriodStart += miner.WPoStProvingPeriod
missedProvingPeriodCron = true
}
expectedDeadlline := uint64((epoch - st.ProvingPeriodStart) / miner.WPoStChallengeWindow)
if expectedDeadlline != st.CurrentDeadline {
st.CurrentDeadline = expectedDeadlline
missedProvingPeriodCron = true
}
deadlinesModified := false
err = deadlines.ForEach(adt.WrapStore(ctx, store), func(dlIdx uint64, deadline *miner.Deadline) error {
partitions, err := adt.AsArray(adt.WrapStore(ctx, store), deadline.Partitions)
if err != nil {
return err
}
alteredPartitions := make(map[uint64]miner.Partition)
allFaultyPower := miner.NewPowerPairZero()
var part miner.Partition
err = partitions.ForEach(&part, func(partIdx int64) error {
exq, err := miner.LoadExpirationQueue(adt.WrapStore(ctx, store), part.ExpirationsEpochs, quantSpec)
if err != nil {
return err
}
exqRoot, stats, err := m.correctExpirationQueue(exq, sectors, part.Terminated, part.Faults, sectorSize)
if err != nil {
return err
}
// if unmodified, we're done
if exqRoot.Equals(cid.Undef) {
return nil
}
if !part.ExpirationsEpochs.Equals(exqRoot) {
part.ExpirationsEpochs = exqRoot
alteredPartitions[uint64(partIdx)] = part
}
if !part.LivePower.Equals(stats.totalActivePower.Add(stats.totalFaultyPower)) {
part.LivePower = stats.totalActivePower.Add(stats.totalFaultyPower)
alteredPartitions[uint64(partIdx)] = part
}
if !part.FaultyPower.Equals(stats.totalFaultyPower) {
part.FaultyPower = stats.totalFaultyPower
alteredPartitions[uint64(partIdx)] = part
}
if missedProvingPeriodCron {
part.Recoveries = bitfield.New()
part.RecoveringPower = miner.NewPowerPairZero()
alteredPartitions[uint64(partIdx)] = part
}
allFaultyPower = allFaultyPower.Add(part.FaultyPower)
return nil
})
if err != nil {
return err
}
// if we've failed to update at last proving period, expect post submissions to contain bits it shouldn't
if missedProvingPeriodCron {
deadline.PostSubmissions = bitfield.New()
if err := deadlines.UpdateDeadline(adt.WrapStore(ctx, store), dlIdx, deadline); err != nil {
return err
}
deadlinesModified = true
}
// if partitions have been updates, record that in deadline
if len(alteredPartitions) > 0 {
for partIdx, part := range alteredPartitions { // nolint:nomaprange
if err := partitions.Set(partIdx, &part); err != nil {
return err
}
}
deadline.Partitions, err = partitions.Root()
if err != nil {
return err
}
deadline.FaultyPower = allFaultyPower
if err := deadlines.UpdateDeadline(adt.WrapStore(ctx, store), dlIdx, deadline); err != nil {
return err
}
deadlinesModified = true
}
return nil
})
if err != nil {
return false, err
}
if !deadlinesModified {
return false, nil
}
if err = st.SaveDeadlines(adt.WrapStore(ctx, store), deadlines); err != nil {
return false, err
}
return true, err
}
func (m *minerMigrator) computeProvingPeriodCron(a addr.Address, st *miner.State, epoch abi.ChainEpoch, store adt.Store) (*cronUpdate, error) {
var buf bytes.Buffer
payload := &miner.CronEventPayload{
EventType: miner.CronEventProvingDeadline,
}
err := payload.MarshalCBOR(&buf)
if err != nil {
return nil, err
}
dlInfo := st.DeadlineInfo(epoch)
return &cronUpdate{
epoch: dlInfo.Last(),
event: power0.CronEvent{
MinerAddr: a,
CallbackPayload: buf.Bytes(),
},
}, nil
}
func (m *minerMigrator) computeClaim(ctx context.Context, store adt.Store, st *miner.State, a addr.Address) (*claimUpdate, error) {
deadlines, err := st.LoadDeadlines(store)
if err != nil {
return nil, err
}
activePower := miner.NewPowerPairZero()
err = deadlines.ForEach(store, func(dlIdx uint64, dl *miner.Deadline) error {
partitions, err := dl.PartitionsArray(store)
if err != nil {
return err
}
var part miner.Partition
return partitions.ForEach(&part, func(pIdx int64) error {
activePower = activePower.Add(part.LivePower.Sub(part.FaultyPower))
return nil
})
})
if err != nil {
return nil, err
}
return &claimUpdate{
addr: a,
claim: power0.Claim{
RawBytePower: activePower.Raw,
QualityAdjPower: activePower.QA,
},
}, nil
}
type expirationQueueStats struct {
// total of all active power in the expiration queue
totalActivePower miner.PowerPair
// total of all faulty power in the expiration queue
totalFaultyPower miner.PowerPair
}
// Updates the expiration queue by correcting any duplicate entries and their fallout.
// If no changes need to be made cid.Undef will be returned.
// Returns the new root of the expiration queue
func (m *minerMigrator) correctExpirationQueue(exq miner.ExpirationQueue, sectors miner.Sectors,
allTerminated bitfield.BitField, allFaults bitfield.BitField, sectorSize abi.SectorSize,
) (cid.Cid, expirationQueueStats, error) {
// processed expired sectors includes all terminated and all sectors seen in earlier expiration sets
processedExpiredSectors := allTerminated
expirationSetPowerSuspect := false
var exs miner.ExpirationSet
// Check for faults that need to be erased.
// Erased faults will be removed from bitfields and the power will be recomputed
// in the subsequent loop.
err := exq.ForEach(&exs, func(epoch int64) error { //nolint:nomaprange
// Detect sectors that are present in this expiration set as "early", but that
// have already terminated or duplicate a prior entry in the queue, and thus will
// be terminated before this entry is processed. The sector was rescheduled here
// upon fault, but the entry is stale and should not exist.
modified := false
earlyDuplicates, err := bitfield.IntersectBitField(exs.EarlySectors, processedExpiredSectors)
if err != nil {
return err
} else if empty, err := earlyDuplicates.IsEmpty(); err != nil {
return err
} else if !empty {
modified = true
exs.EarlySectors, err = bitfield.SubtractBitField(exs.EarlySectors, earlyDuplicates)
if err != nil {
return err
}
}
// Detect sectors that are terminating on time, but have either already terminated or duplicate
// an entry in the queue. The sector might be faulty, but were expiring here anyway so not
// rescheduled as "early".
onTimeDuplicates, err := bitfield.IntersectBitField(exs.OnTimeSectors, processedExpiredSectors)
if err != nil {
return err
} else if empty, err := onTimeDuplicates.IsEmpty(); err != nil {
return err
} else if !empty {
modified = true
exs.OnTimeSectors, err = bitfield.SubtractBitField(exs.OnTimeSectors, onTimeDuplicates)
if err != nil {
return err
}
}
if modified {
expirationSetPowerSuspect = true
exs2, err := copyES(exs)
if err != nil {
return err
}
if err := exq.Set(uint64(epoch), &exs2); err != nil {
return err
}
}
// Record all sectors that would be terminated after this queue entry is processed.
if processedExpiredSectors, err = bitfield.MultiMerge(processedExpiredSectors, exs.EarlySectors, exs.OnTimeSectors); err != nil {
return err
}
return nil
})
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
// If we didn't find any duplicate sectors, we're done.
if !expirationSetPowerSuspect {
return cid.Undef, expirationQueueStats{}, nil
}
partitionActivePower := miner.NewPowerPairZero()
partitionFaultyPower := miner.NewPowerPairZero()
err = exq.ForEach(&exs, func(epoch int64) error {
modified, activePower, faultyPower, err := correctExpirationSetPowerAndPledge(&exs, sectors, allFaults, sectorSize)
if err != nil {
return err
}
partitionActivePower = partitionActivePower.Add(activePower)
partitionFaultyPower = partitionFaultyPower.Add(faultyPower)
if modified {
exs2, err := copyES(exs)
if err != nil {
return err
}
if err := exq.Set(uint64(epoch), &exs2); err != nil {
return err
}
}
return nil
})
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
expirationQueueRoot, err := exq.Root()
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
return expirationQueueRoot, expirationQueueStats{
partitionActivePower,
partitionFaultyPower,
}, nil
}
| // Recompute active and faulty power for an expiration set.
// The active power for an expiration set should be the sum of the power of all its active sectors,
// where active means all sectors not labeled as a fault in the partition. Similarly, faulty power
// is the sum of faulty sectors.
// If a sector has been rescheduled from ES3 to both ES1 as active and ES2
// as a fault, we expect it to be labeled as a fault in the partition. We have already
// removed the sector from ES2, so this correction should move its active power to faulty power in ES1
// because it is labeled as a fault, remove its power altogether from ES2 because its been removed from
// ES2's bitfields, and correct the double subtraction of power from ES3.
func correctExpirationSetPowerAndPledge(exs *miner.ExpirationSet, sectors miner.Sectors,
allFaults bitfield.BitField, sectorSize abi.SectorSize,
) (bool, miner.PowerPair, miner.PowerPair, error) {
modified := false
allSectors, err := bitfield.MergeBitFields(exs.OnTimeSectors, exs.EarlySectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
// correct errors in active power
activeSectors, err := bitfield.SubtractBitField(allSectors, allFaults)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
as, err := sectors.Load(activeSectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
activePower := miner.PowerForSectors(sectorSize, as)
if !activePower.Equals(exs.ActivePower) {
exs.ActivePower = activePower
modified = true
}
// correct errors in faulty power
faultySectors, err := bitfield.IntersectBitField(allSectors, allFaults)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
fs, err := sectors.Load(faultySectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
faultyPower := miner.PowerForSectors(sectorSize, fs)
if !faultyPower.Equals(exs.FaultyPower) {
exs.FaultyPower = faultyPower
modified = true
}
// correct errors in pledge
expectedPledge := big.Zero()
ots, err := sectors.Load(exs.OnTimeSectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
for _, sector := range ots {
expectedPledge = big.Add(expectedPledge, sector.InitialPledge)
}
if !expectedPledge.Equals(exs.OnTimePledge) {
exs.OnTimePledge = expectedPledge
modified = true
}
return modified, activePower, faultyPower, nil
}
func copyES(in miner.ExpirationSet) (miner.ExpirationSet, error) {
ots, err := in.OnTimeSectors.Copy()
if err != nil {
return miner.ExpirationSet{}, err
}
es, err := in.EarlySectors.Copy()
if err != nil {
return miner.ExpirationSet{}, err
}
return miner.ExpirationSet{
OnTimeSectors: ots,
EarlySectors: es,
OnTimePledge: in.OnTimePledge,
ActivePower: in.ActivePower,
FaultyPower: in.FaultyPower,
}, nil
} | random_line_split | |
miner_corrections.go | package nv4
import (
"bytes"
"context"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
miner "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/util/adt"
)
func (m *minerMigrator) CorrectState(ctx context.Context, store cbor.IpldStore, head cid.Cid,
priorEpoch abi.ChainEpoch, a addr.Address) (*StateMigrationResult, error) {
result := &StateMigrationResult{
NewHead: head,
Transfer: big.Zero(),
}
epoch := priorEpoch + 1
var st miner.State
if err := store.Get(ctx, head, &st); err != nil {
return nil, err
}
// If the miner's proving period hasn't started yet, it's a new v0
// miner.
//
// 1. There's no need to fix any state.
// 2. We definitely don't want to reschedule the proving period
// start/deadlines.
if st.ProvingPeriodStart > epoch {
return result, nil
}
adtStore := adt.WrapStore(ctx, store)
sectors, err := miner.LoadSectors(adtStore, st.Sectors)
if err != nil {
return nil, err
}
info, err := st.GetInfo(adtStore)
if err != nil {
return nil, err
}
powerClaimSuspect, err := m.correctForCCUpgradeThenFaultIssue(ctx, store, &st, sectors, epoch, info.SectorSize)
if err != nil {
return nil, err
}
if powerClaimSuspect {
claimUpdate, err := m.computeClaim(ctx, adtStore, &st, a)
if err != nil {
return nil, err
}
if claimUpdate != nil {
result.PowerUpdates = append(result.PowerUpdates, claimUpdate)
}
cronUpdate, err := m.computeProvingPeriodCron(a, &st, epoch, adtStore)
if err != nil {
return nil, err
}
if cronUpdate != nil {
result.PowerUpdates = append(result.PowerUpdates, cronUpdate)
}
}
newHead, err := store.Put(ctx, &st)
result.NewHead = newHead
return result, err
}
func (m *minerMigrator) correctForCCUpgradeThenFaultIssue(
ctx context.Context, store cbor.IpldStore, st *miner.State, sectors miner.Sectors, epoch abi.ChainEpoch,
sectorSize abi.SectorSize,
) (bool, error) {
quantSpec := st.QuantSpecEveryDeadline()
deadlines, err := st.LoadDeadlines(adt.WrapStore(ctx, store))
if err != nil {
return false, err
}
missedProvingPeriodCron := false
for st.ProvingPeriodStart+miner.WPoStProvingPeriod <= epoch {
st.ProvingPeriodStart += miner.WPoStProvingPeriod
missedProvingPeriodCron = true
}
expectedDeadlline := uint64((epoch - st.ProvingPeriodStart) / miner.WPoStChallengeWindow)
if expectedDeadlline != st.CurrentDeadline {
st.CurrentDeadline = expectedDeadlline
missedProvingPeriodCron = true
}
deadlinesModified := false
err = deadlines.ForEach(adt.WrapStore(ctx, store), func(dlIdx uint64, deadline *miner.Deadline) error {
partitions, err := adt.AsArray(adt.WrapStore(ctx, store), deadline.Partitions)
if err != nil {
return err
}
alteredPartitions := make(map[uint64]miner.Partition)
allFaultyPower := miner.NewPowerPairZero()
var part miner.Partition
err = partitions.ForEach(&part, func(partIdx int64) error {
exq, err := miner.LoadExpirationQueue(adt.WrapStore(ctx, store), part.ExpirationsEpochs, quantSpec)
if err != nil {
return err
}
exqRoot, stats, err := m.correctExpirationQueue(exq, sectors, part.Terminated, part.Faults, sectorSize)
if err != nil {
return err
}
// if unmodified, we're done
if exqRoot.Equals(cid.Undef) {
return nil
}
if !part.ExpirationsEpochs.Equals(exqRoot) {
part.ExpirationsEpochs = exqRoot
alteredPartitions[uint64(partIdx)] = part
}
if !part.LivePower.Equals(stats.totalActivePower.Add(stats.totalFaultyPower)) {
part.LivePower = stats.totalActivePower.Add(stats.totalFaultyPower)
alteredPartitions[uint64(partIdx)] = part
}
if !part.FaultyPower.Equals(stats.totalFaultyPower) {
part.FaultyPower = stats.totalFaultyPower
alteredPartitions[uint64(partIdx)] = part
}
if missedProvingPeriodCron {
part.Recoveries = bitfield.New()
part.RecoveringPower = miner.NewPowerPairZero()
alteredPartitions[uint64(partIdx)] = part
}
allFaultyPower = allFaultyPower.Add(part.FaultyPower)
return nil
})
if err != nil {
return err
}
// if we've failed to update at last proving period, expect post submissions to contain bits it shouldn't
if missedProvingPeriodCron {
deadline.PostSubmissions = bitfield.New()
if err := deadlines.UpdateDeadline(adt.WrapStore(ctx, store), dlIdx, deadline); err != nil {
return err
}
deadlinesModified = true
}
// if partitions have been updates, record that in deadline
if len(alteredPartitions) > 0 {
for partIdx, part := range alteredPartitions { // nolint:nomaprange
if err := partitions.Set(partIdx, &part); err != nil {
return err
}
}
deadline.Partitions, err = partitions.Root()
if err != nil {
return err
}
deadline.FaultyPower = allFaultyPower
if err := deadlines.UpdateDeadline(adt.WrapStore(ctx, store), dlIdx, deadline); err != nil {
return err
}
deadlinesModified = true
}
return nil
})
if err != nil {
return false, err
}
if !deadlinesModified {
return false, nil
}
if err = st.SaveDeadlines(adt.WrapStore(ctx, store), deadlines); err != nil {
return false, err
}
return true, err
}
func (m *minerMigrator) computeProvingPeriodCron(a addr.Address, st *miner.State, epoch abi.ChainEpoch, store adt.Store) (*cronUpdate, error) {
var buf bytes.Buffer
payload := &miner.CronEventPayload{
EventType: miner.CronEventProvingDeadline,
}
err := payload.MarshalCBOR(&buf)
if err != nil {
return nil, err
}
dlInfo := st.DeadlineInfo(epoch)
return &cronUpdate{
epoch: dlInfo.Last(),
event: power0.CronEvent{
MinerAddr: a,
CallbackPayload: buf.Bytes(),
},
}, nil
}
func (m *minerMigrator) computeClaim(ctx context.Context, store adt.Store, st *miner.State, a addr.Address) (*claimUpdate, error) {
deadlines, err := st.LoadDeadlines(store)
if err != nil {
return nil, err
}
activePower := miner.NewPowerPairZero()
err = deadlines.ForEach(store, func(dlIdx uint64, dl *miner.Deadline) error {
partitions, err := dl.PartitionsArray(store)
if err != nil {
return err
}
var part miner.Partition
return partitions.ForEach(&part, func(pIdx int64) error {
activePower = activePower.Add(part.LivePower.Sub(part.FaultyPower))
return nil
})
})
if err != nil {
return nil, err
}
return &claimUpdate{
addr: a,
claim: power0.Claim{
RawBytePower: activePower.Raw,
QualityAdjPower: activePower.QA,
},
}, nil
}
type expirationQueueStats struct {
// total of all active power in the expiration queue
totalActivePower miner.PowerPair
// total of all faulty power in the expiration queue
totalFaultyPower miner.PowerPair
}
// Updates the expiration queue by correcting any duplicate entries and their fallout.
// If no changes need to be made cid.Undef will be returned.
// Returns the new root of the expiration queue
func (m *minerMigrator) correctExpirationQueue(exq miner.ExpirationQueue, sectors miner.Sectors,
allTerminated bitfield.BitField, allFaults bitfield.BitField, sectorSize abi.SectorSize,
) (cid.Cid, expirationQueueStats, error) {
// processed expired sectors includes all terminated and all sectors seen in earlier expiration sets
processedExpiredSectors := allTerminated
expirationSetPowerSuspect := false
var exs miner.ExpirationSet
// Check for faults that need to be erased.
// Erased faults will be removed from bitfields and the power will be recomputed
// in the subsequent loop.
err := exq.ForEach(&exs, func(epoch int64) error { //nolint:nomaprange
// Detect sectors that are present in this expiration set as "early", but that
// have already terminated or duplicate a prior entry in the queue, and thus will
// be terminated before this entry is processed. The sector was rescheduled here
// upon fault, but the entry is stale and should not exist.
modified := false
earlyDuplicates, err := bitfield.IntersectBitField(exs.EarlySectors, processedExpiredSectors)
if err != nil {
return err
} else if empty, err := earlyDuplicates.IsEmpty(); err != nil {
return err
} else if !empty {
modified = true
exs.EarlySectors, err = bitfield.SubtractBitField(exs.EarlySectors, earlyDuplicates)
if err != nil {
return err
}
}
// Detect sectors that are terminating on time, but have either already terminated or duplicate
// an entry in the queue. The sector might be faulty, but were expiring here anyway so not
// rescheduled as "early".
onTimeDuplicates, err := bitfield.IntersectBitField(exs.OnTimeSectors, processedExpiredSectors)
if err != nil {
return err
} else if empty, err := onTimeDuplicates.IsEmpty(); err != nil {
return err
} else if !empty {
modified = true
exs.OnTimeSectors, err = bitfield.SubtractBitField(exs.OnTimeSectors, onTimeDuplicates)
if err != nil {
return err
}
}
if modified {
expirationSetPowerSuspect = true
exs2, err := copyES(exs)
if err != nil {
return err
}
if err := exq.Set(uint64(epoch), &exs2); err != nil {
return err
}
}
// Record all sectors that would be terminated after this queue entry is processed.
if processedExpiredSectors, err = bitfield.MultiMerge(processedExpiredSectors, exs.EarlySectors, exs.OnTimeSectors); err != nil {
return err
}
return nil
})
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
// If we didn't find any duplicate sectors, we're done.
if !expirationSetPowerSuspect {
return cid.Undef, expirationQueueStats{}, nil
}
partitionActivePower := miner.NewPowerPairZero()
partitionFaultyPower := miner.NewPowerPairZero()
err = exq.ForEach(&exs, func(epoch int64) error {
modified, activePower, faultyPower, err := correctExpirationSetPowerAndPledge(&exs, sectors, allFaults, sectorSize)
if err != nil {
return err
}
partitionActivePower = partitionActivePower.Add(activePower)
partitionFaultyPower = partitionFaultyPower.Add(faultyPower)
if modified {
exs2, err := copyES(exs)
if err != nil {
return err
}
if err := exq.Set(uint64(epoch), &exs2); err != nil |
}
return nil
})
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
expirationQueueRoot, err := exq.Root()
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
return expirationQueueRoot, expirationQueueStats{
partitionActivePower,
partitionFaultyPower,
}, nil
}
// Recompute active and faulty power for an expiration set.
// The active power for an expiration set should be the sum of the power of all its active sectors,
// where active means all sectors not labeled as a fault in the partition. Similarly, faulty power
// is the sum of faulty sectors.
// If a sector has been rescheduled from ES3 to both ES1 as active and ES2
// as a fault, we expect it to be labeled as a fault in the partition. We have already
// removed the sector from ES2, so this correction should move its active power to faulty power in ES1
// because it is labeled as a fault, remove its power altogether from ES2 because its been removed from
// ES2's bitfields, and correct the double subtraction of power from ES3.
func correctExpirationSetPowerAndPledge(exs *miner.ExpirationSet, sectors miner.Sectors,
allFaults bitfield.BitField, sectorSize abi.SectorSize,
) (bool, miner.PowerPair, miner.PowerPair, error) {
modified := false
allSectors, err := bitfield.MergeBitFields(exs.OnTimeSectors, exs.EarlySectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
// correct errors in active power
activeSectors, err := bitfield.SubtractBitField(allSectors, allFaults)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
as, err := sectors.Load(activeSectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
activePower := miner.PowerForSectors(sectorSize, as)
if !activePower.Equals(exs.ActivePower) {
exs.ActivePower = activePower
modified = true
}
// correct errors in faulty power
faultySectors, err := bitfield.IntersectBitField(allSectors, allFaults)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
fs, err := sectors.Load(faultySectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
faultyPower := miner.PowerForSectors(sectorSize, fs)
if !faultyPower.Equals(exs.FaultyPower) {
exs.FaultyPower = faultyPower
modified = true
}
// correct errors in pledge
expectedPledge := big.Zero()
ots, err := sectors.Load(exs.OnTimeSectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
for _, sector := range ots {
expectedPledge = big.Add(expectedPledge, sector.InitialPledge)
}
if !expectedPledge.Equals(exs.OnTimePledge) {
exs.OnTimePledge = expectedPledge
modified = true
}
return modified, activePower, faultyPower, nil
}
func copyES(in miner.ExpirationSet) (miner.ExpirationSet, error) {
ots, err := in.OnTimeSectors.Copy()
if err != nil {
return miner.ExpirationSet{}, err
}
es, err := in.EarlySectors.Copy()
if err != nil {
return miner.ExpirationSet{}, err
}
return miner.ExpirationSet{
OnTimeSectors: ots,
EarlySectors: es,
OnTimePledge: in.OnTimePledge,
ActivePower: in.ActivePower,
FaultyPower: in.FaultyPower,
}, nil
}
| {
return err
} | conditional_block |
miner_corrections.go | package nv4
import (
"bytes"
"context"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
miner "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/util/adt"
)
func (m *minerMigrator) CorrectState(ctx context.Context, store cbor.IpldStore, head cid.Cid,
priorEpoch abi.ChainEpoch, a addr.Address) (*StateMigrationResult, error) {
result := &StateMigrationResult{
NewHead: head,
Transfer: big.Zero(),
}
epoch := priorEpoch + 1
var st miner.State
if err := store.Get(ctx, head, &st); err != nil {
return nil, err
}
// If the miner's proving period hasn't started yet, it's a new v0
// miner.
//
// 1. There's no need to fix any state.
// 2. We definitely don't want to reschedule the proving period
// start/deadlines.
if st.ProvingPeriodStart > epoch {
return result, nil
}
adtStore := adt.WrapStore(ctx, store)
sectors, err := miner.LoadSectors(adtStore, st.Sectors)
if err != nil {
return nil, err
}
info, err := st.GetInfo(adtStore)
if err != nil {
return nil, err
}
powerClaimSuspect, err := m.correctForCCUpgradeThenFaultIssue(ctx, store, &st, sectors, epoch, info.SectorSize)
if err != nil {
return nil, err
}
if powerClaimSuspect {
claimUpdate, err := m.computeClaim(ctx, adtStore, &st, a)
if err != nil {
return nil, err
}
if claimUpdate != nil {
result.PowerUpdates = append(result.PowerUpdates, claimUpdate)
}
cronUpdate, err := m.computeProvingPeriodCron(a, &st, epoch, adtStore)
if err != nil {
return nil, err
}
if cronUpdate != nil {
result.PowerUpdates = append(result.PowerUpdates, cronUpdate)
}
}
newHead, err := store.Put(ctx, &st)
result.NewHead = newHead
return result, err
}
func (m *minerMigrator) correctForCCUpgradeThenFaultIssue(
ctx context.Context, store cbor.IpldStore, st *miner.State, sectors miner.Sectors, epoch abi.ChainEpoch,
sectorSize abi.SectorSize,
) (bool, error) {
quantSpec := st.QuantSpecEveryDeadline()
deadlines, err := st.LoadDeadlines(adt.WrapStore(ctx, store))
if err != nil {
return false, err
}
missedProvingPeriodCron := false
for st.ProvingPeriodStart+miner.WPoStProvingPeriod <= epoch {
st.ProvingPeriodStart += miner.WPoStProvingPeriod
missedProvingPeriodCron = true
}
expectedDeadlline := uint64((epoch - st.ProvingPeriodStart) / miner.WPoStChallengeWindow)
if expectedDeadlline != st.CurrentDeadline {
st.CurrentDeadline = expectedDeadlline
missedProvingPeriodCron = true
}
deadlinesModified := false
err = deadlines.ForEach(adt.WrapStore(ctx, store), func(dlIdx uint64, deadline *miner.Deadline) error {
partitions, err := adt.AsArray(adt.WrapStore(ctx, store), deadline.Partitions)
if err != nil {
return err
}
alteredPartitions := make(map[uint64]miner.Partition)
allFaultyPower := miner.NewPowerPairZero()
var part miner.Partition
err = partitions.ForEach(&part, func(partIdx int64) error {
exq, err := miner.LoadExpirationQueue(adt.WrapStore(ctx, store), part.ExpirationsEpochs, quantSpec)
if err != nil {
return err
}
exqRoot, stats, err := m.correctExpirationQueue(exq, sectors, part.Terminated, part.Faults, sectorSize)
if err != nil {
return err
}
// if unmodified, we're done
if exqRoot.Equals(cid.Undef) {
return nil
}
if !part.ExpirationsEpochs.Equals(exqRoot) {
part.ExpirationsEpochs = exqRoot
alteredPartitions[uint64(partIdx)] = part
}
if !part.LivePower.Equals(stats.totalActivePower.Add(stats.totalFaultyPower)) {
part.LivePower = stats.totalActivePower.Add(stats.totalFaultyPower)
alteredPartitions[uint64(partIdx)] = part
}
if !part.FaultyPower.Equals(stats.totalFaultyPower) {
part.FaultyPower = stats.totalFaultyPower
alteredPartitions[uint64(partIdx)] = part
}
if missedProvingPeriodCron {
part.Recoveries = bitfield.New()
part.RecoveringPower = miner.NewPowerPairZero()
alteredPartitions[uint64(partIdx)] = part
}
allFaultyPower = allFaultyPower.Add(part.FaultyPower)
return nil
})
if err != nil {
return err
}
// if we've failed to update at last proving period, expect post submissions to contain bits it shouldn't
if missedProvingPeriodCron {
deadline.PostSubmissions = bitfield.New()
if err := deadlines.UpdateDeadline(adt.WrapStore(ctx, store), dlIdx, deadline); err != nil {
return err
}
deadlinesModified = true
}
// if partitions have been updates, record that in deadline
if len(alteredPartitions) > 0 {
for partIdx, part := range alteredPartitions { // nolint:nomaprange
if err := partitions.Set(partIdx, &part); err != nil {
return err
}
}
deadline.Partitions, err = partitions.Root()
if err != nil {
return err
}
deadline.FaultyPower = allFaultyPower
if err := deadlines.UpdateDeadline(adt.WrapStore(ctx, store), dlIdx, deadline); err != nil {
return err
}
deadlinesModified = true
}
return nil
})
if err != nil {
return false, err
}
if !deadlinesModified {
return false, nil
}
if err = st.SaveDeadlines(adt.WrapStore(ctx, store), deadlines); err != nil {
return false, err
}
return true, err
}
func (m *minerMigrator) computeProvingPeriodCron(a addr.Address, st *miner.State, epoch abi.ChainEpoch, store adt.Store) (*cronUpdate, error) {
var buf bytes.Buffer
payload := &miner.CronEventPayload{
EventType: miner.CronEventProvingDeadline,
}
err := payload.MarshalCBOR(&buf)
if err != nil {
return nil, err
}
dlInfo := st.DeadlineInfo(epoch)
return &cronUpdate{
epoch: dlInfo.Last(),
event: power0.CronEvent{
MinerAddr: a,
CallbackPayload: buf.Bytes(),
},
}, nil
}
func (m *minerMigrator) computeClaim(ctx context.Context, store adt.Store, st *miner.State, a addr.Address) (*claimUpdate, error) {
deadlines, err := st.LoadDeadlines(store)
if err != nil {
return nil, err
}
activePower := miner.NewPowerPairZero()
err = deadlines.ForEach(store, func(dlIdx uint64, dl *miner.Deadline) error {
partitions, err := dl.PartitionsArray(store)
if err != nil {
return err
}
var part miner.Partition
return partitions.ForEach(&part, func(pIdx int64) error {
activePower = activePower.Add(part.LivePower.Sub(part.FaultyPower))
return nil
})
})
if err != nil {
return nil, err
}
return &claimUpdate{
addr: a,
claim: power0.Claim{
RawBytePower: activePower.Raw,
QualityAdjPower: activePower.QA,
},
}, nil
}
type expirationQueueStats struct {
// total of all active power in the expiration queue
totalActivePower miner.PowerPair
// total of all faulty power in the expiration queue
totalFaultyPower miner.PowerPair
}
// Updates the expiration queue by correcting any duplicate entries and their fallout.
// If no changes need to be made cid.Undef will be returned.
// Returns the new root of the expiration queue
func (m *minerMigrator) correctExpirationQueue(exq miner.ExpirationQueue, sectors miner.Sectors,
allTerminated bitfield.BitField, allFaults bitfield.BitField, sectorSize abi.SectorSize,
) (cid.Cid, expirationQueueStats, error) |
// Recompute active and faulty power for an expiration set.
// The active power for an expiration set should be the sum of the power of all its active sectors,
// where active means all sectors not labeled as a fault in the partition. Similarly, faulty power
// is the sum of faulty sectors.
// If a sector has been rescheduled from ES3 to both ES1 as active and ES2
// as a fault, we expect it to be labeled as a fault in the partition. We have already
// removed the sector from ES2, so this correction should move its active power to faulty power in ES1
// because it is labeled as a fault, remove its power altogether from ES2 because its been removed from
// ES2's bitfields, and correct the double subtraction of power from ES3.
func correctExpirationSetPowerAndPledge(exs *miner.ExpirationSet, sectors miner.Sectors,
allFaults bitfield.BitField, sectorSize abi.SectorSize,
) (bool, miner.PowerPair, miner.PowerPair, error) {
modified := false
allSectors, err := bitfield.MergeBitFields(exs.OnTimeSectors, exs.EarlySectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
// correct errors in active power
activeSectors, err := bitfield.SubtractBitField(allSectors, allFaults)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
as, err := sectors.Load(activeSectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
activePower := miner.PowerForSectors(sectorSize, as)
if !activePower.Equals(exs.ActivePower) {
exs.ActivePower = activePower
modified = true
}
// correct errors in faulty power
faultySectors, err := bitfield.IntersectBitField(allSectors, allFaults)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
fs, err := sectors.Load(faultySectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
faultyPower := miner.PowerForSectors(sectorSize, fs)
if !faultyPower.Equals(exs.FaultyPower) {
exs.FaultyPower = faultyPower
modified = true
}
// correct errors in pledge
expectedPledge := big.Zero()
ots, err := sectors.Load(exs.OnTimeSectors)
if err != nil {
return false, miner.PowerPair{}, miner.PowerPair{}, err
}
for _, sector := range ots {
expectedPledge = big.Add(expectedPledge, sector.InitialPledge)
}
if !expectedPledge.Equals(exs.OnTimePledge) {
exs.OnTimePledge = expectedPledge
modified = true
}
return modified, activePower, faultyPower, nil
}
func copyES(in miner.ExpirationSet) (miner.ExpirationSet, error) {
ots, err := in.OnTimeSectors.Copy()
if err != nil {
return miner.ExpirationSet{}, err
}
es, err := in.EarlySectors.Copy()
if err != nil {
return miner.ExpirationSet{}, err
}
return miner.ExpirationSet{
OnTimeSectors: ots,
EarlySectors: es,
OnTimePledge: in.OnTimePledge,
ActivePower: in.ActivePower,
FaultyPower: in.FaultyPower,
}, nil
}
| {
// processed expired sectors includes all terminated and all sectors seen in earlier expiration sets
processedExpiredSectors := allTerminated
expirationSetPowerSuspect := false
var exs miner.ExpirationSet
// Check for faults that need to be erased.
// Erased faults will be removed from bitfields and the power will be recomputed
// in the subsequent loop.
err := exq.ForEach(&exs, func(epoch int64) error { //nolint:nomaprange
// Detect sectors that are present in this expiration set as "early", but that
// have already terminated or duplicate a prior entry in the queue, and thus will
// be terminated before this entry is processed. The sector was rescheduled here
// upon fault, but the entry is stale and should not exist.
modified := false
earlyDuplicates, err := bitfield.IntersectBitField(exs.EarlySectors, processedExpiredSectors)
if err != nil {
return err
} else if empty, err := earlyDuplicates.IsEmpty(); err != nil {
return err
} else if !empty {
modified = true
exs.EarlySectors, err = bitfield.SubtractBitField(exs.EarlySectors, earlyDuplicates)
if err != nil {
return err
}
}
// Detect sectors that are terminating on time, but have either already terminated or duplicate
// an entry in the queue. The sector might be faulty, but were expiring here anyway so not
// rescheduled as "early".
onTimeDuplicates, err := bitfield.IntersectBitField(exs.OnTimeSectors, processedExpiredSectors)
if err != nil {
return err
} else if empty, err := onTimeDuplicates.IsEmpty(); err != nil {
return err
} else if !empty {
modified = true
exs.OnTimeSectors, err = bitfield.SubtractBitField(exs.OnTimeSectors, onTimeDuplicates)
if err != nil {
return err
}
}
if modified {
expirationSetPowerSuspect = true
exs2, err := copyES(exs)
if err != nil {
return err
}
if err := exq.Set(uint64(epoch), &exs2); err != nil {
return err
}
}
// Record all sectors that would be terminated after this queue entry is processed.
if processedExpiredSectors, err = bitfield.MultiMerge(processedExpiredSectors, exs.EarlySectors, exs.OnTimeSectors); err != nil {
return err
}
return nil
})
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
// If we didn't find any duplicate sectors, we're done.
if !expirationSetPowerSuspect {
return cid.Undef, expirationQueueStats{}, nil
}
partitionActivePower := miner.NewPowerPairZero()
partitionFaultyPower := miner.NewPowerPairZero()
err = exq.ForEach(&exs, func(epoch int64) error {
modified, activePower, faultyPower, err := correctExpirationSetPowerAndPledge(&exs, sectors, allFaults, sectorSize)
if err != nil {
return err
}
partitionActivePower = partitionActivePower.Add(activePower)
partitionFaultyPower = partitionFaultyPower.Add(faultyPower)
if modified {
exs2, err := copyES(exs)
if err != nil {
return err
}
if err := exq.Set(uint64(epoch), &exs2); err != nil {
return err
}
}
return nil
})
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
expirationQueueRoot, err := exq.Root()
if err != nil {
return cid.Undef, expirationQueueStats{}, err
}
return expirationQueueRoot, expirationQueueStats{
partitionActivePower,
partitionFaultyPower,
}, nil
} | identifier_body |
logging_service_v2_api.js | /*
* Copyright 2016 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* EDITING INSTRUCTIONS
* This file was generated from the file
* https://github.com/googleapis/googleapis/blob/master/google/logging/v2/logging.proto,
* and updates to that file get reflected here through a refresh process.
* For the short term, the refresh process will only be runnable by Google
* engineers.
*
* The only allowed edits are to method and file documentation. A 3-way
* merge preserves those additions if the generated source changes.
*/
/* TODO: introduce line-wrapping so that it never exceeds the limit. */
/* jscs: disable maximumLineLength */
'use strict';
var configData = require('./logging_service_v2_client_config');
var extend = require('extend');
var gax = require('google-gax');
var SERVICE_ADDRESS = 'logging.googleapis.com';
var DEFAULT_SERVICE_PORT = 443;
var CODE_GEN_NAME_VERSION = 'gapic/0.1.0';
var PAGE_DESCRIPTORS = {
listLogEntries: new gax.PageDescriptor(
'pageToken',
'nextPageToken',
'entries'),
listMonitoredResourceDescriptors: new gax.PageDescriptor(
'pageToken',
'nextPageToken',
'resourceDescriptors')
};
/**
* The scopes needed to make gRPC calls to all of the methods defined in
* this service.
*/
var ALL_SCOPES = [
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write'
];
/**
* Service for ingesting and querying logs.
*
* This will be created through a builder function which can be obtained by the module.
* See the following example of how to initialize the module and how to access to the builder.
* @see {@link loggingServiceV2Api}
*
* @example
* var loggingV2 = require('@google-cloud/logging').v2({
* // optional auth parameters.
* });
* var api = loggingV2.loggingServiceV2Api();
*
* @class
*/
function LoggingServiceV2Api(gaxGrpc, grpcClients, opts) |
// Path templates
var PARENT_PATH_TEMPLATE = new gax.PathTemplate(
'projects/{project}');
var LOG_PATH_TEMPLATE = new gax.PathTemplate(
'projects/{project}/logs/{log}');
/**
* Returns a fully-qualified parent resource name string.
* @param {String} project
* @returns {String}
*/
LoggingServiceV2Api.prototype.parentPath = function parentPath(project) {
return PARENT_PATH_TEMPLATE.render({
project: project
});
};
/**
* Parses the parentName from a parent resource.
* @param {String} parentName
* A fully-qualified path representing a parent resources.
* @returns {String} - A string representing the project.
*/
LoggingServiceV2Api.prototype.matchProjectFromParentName =
function matchProjectFromParentName(parentName) {
return PARENT_PATH_TEMPLATE.match(parentName).project;
};
/**
* Returns a fully-qualified log resource name string.
* @param {String} project
* @param {String} log
* @returns {String}
*/
LoggingServiceV2Api.prototype.logPath = function logPath(project, log) {
return LOG_PATH_TEMPLATE.render({
project: project,
log: log
});
};
/**
* Parses the logName from a log resource.
* @param {String} logName
* A fully-qualified path representing a log resources.
* @returns {String} - A string representing the project.
*/
LoggingServiceV2Api.prototype.matchProjectFromLogName =
function matchProjectFromLogName(logName) {
return LOG_PATH_TEMPLATE.match(logName).project;
};
/**
* Parses the logName from a log resource.
* @param {String} logName
* A fully-qualified path representing a log resources.
* @returns {String} - A string representing the log.
*/
LoggingServiceV2Api.prototype.matchLogFromLogName =
function matchLogFromLogName(logName) {
return LOG_PATH_TEMPLATE.match(logName).log;
};
// Service calls
/**
* Deletes a log and all its log entries.
* The log will reappear if it receives new entries.
*
* @param {string} logName
* Required. The resource name of the log to delete. Example:
* `"projects/my-project/logs/syslog"`.
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
* @param {function(?Error)=} callback
* The function which will be called with the result of the API call.
* @returns {gax.EventEmitter} - the event emitter to handle the call
* status.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var formattedLogName = api.logPath("[PROJECT]", "[LOG]");
* api.deleteLog(formattedLogName, function(err) {
* if (err) {
* console.error(err);
* }
* });
*/
LoggingServiceV2Api.prototype.deleteLog = function deleteLog(
logName,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
logName: logName
};
return this._deleteLog(req, options, callback);
};
/**
* Writes log entries to Stackdriver Logging. All log entries are
* written by this method.
*
* @param {Object[]} entries
* Required. The log entries to write. Values supplied for the fields
* `log_name`, `resource`, and `labels` in this `entries.write` request are
* added to those log entries that do not provide their own values for the
* fields.
*
* To improve throughput and to avoid exceeding the
* [quota limit](https://cloud.google.com/logging/quota-policy) for calls to `entries.write`,
* you should write multiple log entries at once rather than
* calling this method for each individual log entry.
*
* This object should have the same structure as [LogEntry]{@link LogEntry}
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {string=} options.logName
* Optional. A default log resource name that is assigned to all log entries
* in `entries` that do not specify a value for `log_name`. Example:
* `"projects/my-project/logs/syslog"`. See
* {@link LogEntry}.
* @param {Object=} options.resource
* Optional. A default monitored resource object that is assigned to all log
* entries in `entries` that do not specify a value for `resource`. Example:
*
* { "type": "gce_instance",
* "labels": {
* "zone": "us-central1-a", "instance_id": "00000000000000000000" }}
*
* See {@link LogEntry}.
*
* This object should have the same structure as [google.api.MonitoredResource]{@link external:"google.api.MonitoredResource"}
* @param {Object.<string, string>=} options.labels
* Optional. Default labels that are added to the `labels` field of all log
* entries in `entries`. If a log entry already has a label with the same key
* as a label in this parameter, then the log entry's label is not changed.
* See {@link LogEntry}.
* @param {boolean=} options.partialSuccess
* Optional. Whether valid entries should be written even if some other
* entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any
* entry is not written, the response status will be the error associated
* with one of the failed entries and include error details in the form of
* WriteLogEntriesPartialErrors.
*
* @param {function(?Error, ?Object)=} callback
* The function which will be called with the result of the API call.
*
* The second parameter to the callback is an object representing [WriteLogEntriesResponse]{@link WriteLogEntriesResponse}
* @returns {gax.EventEmitter} - the event emitter to handle the call
* status.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var entries = [];
* api.writeLogEntries(entries, function(err, response) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* });
*/
LoggingServiceV2Api.prototype.writeLogEntries = function writeLogEntries(
entries,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
entries: entries
};
if ('logName' in options) {
req.logName = options.logName;
}
if ('resource' in options) {
req.resource = options.resource;
}
if ('labels' in options) {
req.labels = options.labels;
}
if ('partialSuccess' in options) {
req.partialSuccess = options.partialSuccess;
}
return this._writeLogEntries(req, options, callback);
};
/**
* Lists log entries. Use this method to retrieve log entries from Cloud
* Logging. For ways to export log entries, see
* [Exporting Logs](https://cloud.google.com/logging/docs/export).
*
* @param {string[]} projectIds
* Deprecated. One or more project identifiers or project numbers from which
* to retrieve log entries. Examples: `"my-project-1A"`, `"1234567890"`. If
* present, these project identifiers are converted to resource format and
* added to the list of resources in `resourceNames`. Callers should use
* `resourceNames` rather than this parameter.
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {string[]=} options.resourceNames
* Optional. One or more cloud resources from which to retrieve log entries.
* Example: `"projects/my-project-1A"`, `"projects/1234567890"`. Projects
* listed in `projectIds` are added to this list.
* @param {string=} options.filter
* Optional. A filter that chooses which log entries to return. See [Advanced
* Logs Filters](https://cloud.google.com/logging/docs/view/advanced_filters). Only log entries that
* match the filter are returned. An empty filter matches all log entries.
* @param {string=} options.orderBy
* Optional. How the results should be sorted. Presently, the only permitted
* values are `"timestamp asc"` (default) and `"timestamp desc"`. The first
* option returns entries in order of increasing values of
* `LogEntry.timestamp` (oldest first), and the second option returns entries
* in order of decreasing timestamps (newest first). Entries with equal
* timestamps are returned in order of `LogEntry.insertId`.
* @param {number=} options.pageSize
* The maximum number of resources contained in the underlying API
* response. If page streaming is performed per-resource, this
* parameter does not affect the return value. If page streaming is
* performed per-page, this determines the maximum number of
* resources in a page.
*
* @param {function(?Error, ?Object, ?string)=} callback
* When specified, the results are not streamed but this callback
* will be called with the response object representing [ListLogEntriesResponse]{@link ListLogEntriesResponse}.
* The third item will be set if the response contains the token for the further results
* and can be reused to `pageToken` field in the options in the next request.
* @returns {Stream|gax.EventEmitter}
* An object stream which emits an object representing
* [LogEntry]{@link LogEntry} on 'data' event.
* When the callback is specified or streaming is suppressed through options,
* it will return an event emitter to handle the call status and the callback
* will be called with the response object.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var projectIds = [];
* // Iterate over all elements.
* api.listLogEntries(projectIds).on('data', function(element) {
* // doThingsWith(element)
* });
*
* // Or obtain the paged response through the callback.
* function callback(err, response, nextPageToken) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* if (nextPageToken) {
* // fetch the next page.
* api.listLogEntries(projectIds, {pageToken: nextPageToken}, callback);
* }
* }
* api.listLogEntries(projectIds, {flattenPages: false}, callback);
*/
LoggingServiceV2Api.prototype.listLogEntries = function listLogEntries(
projectIds,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
projectIds: projectIds
};
if ('resourceNames' in options) {
req.resourceNames = options.resourceNames;
}
if ('filter' in options) {
req.filter = options.filter;
}
if ('orderBy' in options) {
req.orderBy = options.orderBy;
}
if ('pageSize' in options) {
req.pageSize = options.pageSize;
}
return this._listLogEntries(req, options, callback);
};
/**
* Lists the monitored resource descriptors used by Stackdriver Logging.
*
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {number=} options.pageSize
* The maximum number of resources contained in the underlying API
* response. If page streaming is performed per-resource, this
* parameter does not affect the return value. If page streaming is
* performed per-page, this determines the maximum number of
* resources in a page.
*
* @param {function(?Error, ?Object, ?string)=} callback
* When specified, the results are not streamed but this callback
* will be called with the response object representing [ListMonitoredResourceDescriptorsResponse]{@link ListMonitoredResourceDescriptorsResponse}.
* The third item will be set if the response contains the token for the further results
* and can be reused to `pageToken` field in the options in the next request.
* @returns {Stream|gax.EventEmitter}
* An object stream which emits an object representing
* [google.api.MonitoredResourceDescriptor]{@link external:"google.api.MonitoredResourceDescriptor"} on 'data' event.
* When the callback is specified or streaming is suppressed through options,
* it will return an event emitter to handle the call status and the callback
* will be called with the response object.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
*
* // Iterate over all elements.
* api.listMonitoredResourceDescriptors().on('data', function(element) {
* // doThingsWith(element)
* });
*
* // Or obtain the paged response through the callback.
* function callback(err, response, nextPageToken) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* if (nextPageToken) {
* // fetch the next page.
* api.listMonitoredResourceDescriptors({pageToken: nextPageToken}, callback);
* }
* }
* api.listMonitoredResourceDescriptors({flattenPages: false}, callback);
* api.listMonitoredResourceDescriptors(function(err, response) {
*/
LoggingServiceV2Api.prototype.listMonitoredResourceDescriptors = function listMonitoredResourceDescriptors(
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
};
if ('pageSize' in options) {
req.pageSize = options.pageSize;
}
return this._listMonitoredResourceDescriptors(req, options, callback);
};
function LoggingServiceV2ApiBuilder(gaxGrpc) {
if (!(this instanceof LoggingServiceV2ApiBuilder)) {
return new LoggingServiceV2ApiBuilder(gaxGrpc);
}
var loggingServiceV2Client = gaxGrpc.load([{
root: require('google-proto-files')('..'),
file: 'google/logging/v2/logging.proto'
}]);
extend(this, loggingServiceV2Client.google.logging.v2);
var grpcClients = {
loggingServiceV2Client: loggingServiceV2Client
};
/**
* Build a new instance of {@link LoggingServiceV2Api}.
*
* @param {Object=} opts - The optional parameters.
* @param {String=} opts.servicePath
* The domain name of the API remote host.
* @param {number=} opts.port
* The port on which to connect to the remote host.
* @param {grpc.ClientCredentials=} opts.sslCreds
* A ClientCredentials for use with an SSL-enabled channel.
* @param {Object=} opts.clientConfig
* The customized config to build the call settings. See
* {@link gax.constructSettings} for the format.
* @param {number=} opts.appName
* The codename of the calling service.
* @param {String=} opts.appVersion
* The version of the calling service.
*/
this.loggingServiceV2Api = function(opts) {
return new LoggingServiceV2Api(gaxGrpc, grpcClients, opts);
};
extend(this.loggingServiceV2Api, LoggingServiceV2Api);
}
module.exports = LoggingServiceV2ApiBuilder;
module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS;
module.exports.ALL_SCOPES = ALL_SCOPES; | {
opts = opts || {};
var servicePath = opts.servicePath || SERVICE_ADDRESS;
var port = opts.port || DEFAULT_SERVICE_PORT;
var sslCreds = opts.sslCreds || null;
var clientConfig = opts.clientConfig || {};
var appName = opts.appName || 'gax';
var appVersion = opts.appVersion || gax.version;
var googleApiClient = [
appName + '/' + appVersion,
CODE_GEN_NAME_VERSION,
'gax/' + gax.version,
'nodejs/' + process.version].join(' ');
var defaults = gaxGrpc.constructSettings(
'google.logging.v2.LoggingServiceV2',
configData,
clientConfig,
PAGE_DESCRIPTORS,
null,
{'x-goog-api-client': googleApiClient});
var loggingServiceV2Stub = gaxGrpc.createStub(
servicePath,
port,
grpcClients.loggingServiceV2Client.google.logging.v2.LoggingServiceV2,
{sslCreds: sslCreds});
var loggingServiceV2StubMethods = [
'deleteLog',
'writeLogEntries',
'listLogEntries',
'listMonitoredResourceDescriptors'
];
loggingServiceV2StubMethods.forEach(function(methodName) {
this['_' + methodName] = gax.createApiCall(
loggingServiceV2Stub.then(function(loggingServiceV2Stub) {
return loggingServiceV2Stub[methodName].bind(loggingServiceV2Stub);
}),
defaults[methodName]);
}.bind(this));
} | identifier_body |
logging_service_v2_api.js | /*
* Copyright 2016 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* EDITING INSTRUCTIONS
* This file was generated from the file
* https://github.com/googleapis/googleapis/blob/master/google/logging/v2/logging.proto,
* and updates to that file get reflected here through a refresh process.
* For the short term, the refresh process will only be runnable by Google
* engineers.
*
* The only allowed edits are to method and file documentation. A 3-way
* merge preserves those additions if the generated source changes.
*/
/* TODO: introduce line-wrapping so that it never exceeds the limit. */
/* jscs: disable maximumLineLength */
'use strict';
var configData = require('./logging_service_v2_client_config');
var extend = require('extend');
var gax = require('google-gax');
var SERVICE_ADDRESS = 'logging.googleapis.com';
var DEFAULT_SERVICE_PORT = 443;
var CODE_GEN_NAME_VERSION = 'gapic/0.1.0';
var PAGE_DESCRIPTORS = {
listLogEntries: new gax.PageDescriptor(
'pageToken',
'nextPageToken',
'entries'),
listMonitoredResourceDescriptors: new gax.PageDescriptor(
'pageToken',
'nextPageToken',
'resourceDescriptors')
};
/**
* The scopes needed to make gRPC calls to all of the methods defined in
* this service.
*/
var ALL_SCOPES = [
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write'
];
/**
* Service for ingesting and querying logs.
*
* This will be created through a builder function which can be obtained by the module.
* See the following example of how to initialize the module and how to access to the builder.
* @see {@link loggingServiceV2Api}
*
* @example
* var loggingV2 = require('@google-cloud/logging').v2({
* // optional auth parameters.
* });
* var api = loggingV2.loggingServiceV2Api();
*
* @class
*/
function LoggingServiceV2Api(gaxGrpc, grpcClients, opts) {
opts = opts || {};
var servicePath = opts.servicePath || SERVICE_ADDRESS;
var port = opts.port || DEFAULT_SERVICE_PORT;
var sslCreds = opts.sslCreds || null;
var clientConfig = opts.clientConfig || {};
var appName = opts.appName || 'gax';
var appVersion = opts.appVersion || gax.version;
var googleApiClient = [
appName + '/' + appVersion,
CODE_GEN_NAME_VERSION,
'gax/' + gax.version,
'nodejs/' + process.version].join(' ');
var defaults = gaxGrpc.constructSettings(
'google.logging.v2.LoggingServiceV2',
configData,
clientConfig,
PAGE_DESCRIPTORS,
null,
{'x-goog-api-client': googleApiClient});
var loggingServiceV2Stub = gaxGrpc.createStub(
servicePath,
port,
grpcClients.loggingServiceV2Client.google.logging.v2.LoggingServiceV2,
{sslCreds: sslCreds});
var loggingServiceV2StubMethods = [
'deleteLog',
'writeLogEntries',
'listLogEntries',
'listMonitoredResourceDescriptors'
];
loggingServiceV2StubMethods.forEach(function(methodName) {
this['_' + methodName] = gax.createApiCall(
loggingServiceV2Stub.then(function(loggingServiceV2Stub) {
return loggingServiceV2Stub[methodName].bind(loggingServiceV2Stub);
}),
defaults[methodName]);
}.bind(this));
}
// Path templates
var PARENT_PATH_TEMPLATE = new gax.PathTemplate(
'projects/{project}');
var LOG_PATH_TEMPLATE = new gax.PathTemplate(
'projects/{project}/logs/{log}');
/**
* Returns a fully-qualified parent resource name string.
* @param {String} project
* @returns {String}
*/
LoggingServiceV2Api.prototype.parentPath = function parentPath(project) {
return PARENT_PATH_TEMPLATE.render({
project: project
});
};
/**
* Parses the parentName from a parent resource.
* @param {String} parentName
* A fully-qualified path representing a parent resources.
* @returns {String} - A string representing the project.
*/
LoggingServiceV2Api.prototype.matchProjectFromParentName =
function matchProjectFromParentName(parentName) {
return PARENT_PATH_TEMPLATE.match(parentName).project;
};
/**
* Returns a fully-qualified log resource name string.
* @param {String} project
* @param {String} log
* @returns {String}
*/
LoggingServiceV2Api.prototype.logPath = function logPath(project, log) {
return LOG_PATH_TEMPLATE.render({
project: project,
log: log
});
};
/**
* Parses the logName from a log resource.
* @param {String} logName
* A fully-qualified path representing a log resources.
* @returns {String} - A string representing the project.
*/
LoggingServiceV2Api.prototype.matchProjectFromLogName =
function matchProjectFromLogName(logName) {
return LOG_PATH_TEMPLATE.match(logName).project;
};
/**
* Parses the logName from a log resource.
* @param {String} logName
* A fully-qualified path representing a log resources.
* @returns {String} - A string representing the log.
*/
LoggingServiceV2Api.prototype.matchLogFromLogName =
function matchLogFromLogName(logName) {
return LOG_PATH_TEMPLATE.match(logName).log;
};
// Service calls
/**
* Deletes a log and all its log entries.
* The log will reappear if it receives new entries.
*
* @param {string} logName
* Required. The resource name of the log to delete. Example:
* `"projects/my-project/logs/syslog"`.
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
* @param {function(?Error)=} callback
* The function which will be called with the result of the API call.
* @returns {gax.EventEmitter} - the event emitter to handle the call
* status.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var formattedLogName = api.logPath("[PROJECT]", "[LOG]");
* api.deleteLog(formattedLogName, function(err) {
* if (err) {
* console.error(err);
* }
* });
*/
LoggingServiceV2Api.prototype.deleteLog = function deleteLog(
logName,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
logName: logName
};
return this._deleteLog(req, options, callback);
};
/**
* Writes log entries to Stackdriver Logging. All log entries are
* written by this method.
*
* @param {Object[]} entries
* Required. The log entries to write. Values supplied for the fields
* `log_name`, `resource`, and `labels` in this `entries.write` request are
* added to those log entries that do not provide their own values for the
* fields.
*
* To improve throughput and to avoid exceeding the
* [quota limit](https://cloud.google.com/logging/quota-policy) for calls to `entries.write`,
* you should write multiple log entries at once rather than
* calling this method for each individual log entry.
*
* This object should have the same structure as [LogEntry]{@link LogEntry}
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {string=} options.logName
* Optional. A default log resource name that is assigned to all log entries
* in `entries` that do not specify a value for `log_name`. Example:
* `"projects/my-project/logs/syslog"`. See
* {@link LogEntry}.
* @param {Object=} options.resource
* Optional. A default monitored resource object that is assigned to all log
* entries in `entries` that do not specify a value for `resource`. Example:
*
* { "type": "gce_instance",
* "labels": {
* "zone": "us-central1-a", "instance_id": "00000000000000000000" }}
*
* See {@link LogEntry}.
*
* This object should have the same structure as [google.api.MonitoredResource]{@link external:"google.api.MonitoredResource"}
* @param {Object.<string, string>=} options.labels
* Optional. Default labels that are added to the `labels` field of all log
* entries in `entries`. If a log entry already has a label with the same key
* as a label in this parameter, then the log entry's label is not changed.
* See {@link LogEntry}.
* @param {boolean=} options.partialSuccess
* Optional. Whether valid entries should be written even if some other
* entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any
* entry is not written, the response status will be the error associated
* with one of the failed entries and include error details in the form of
* WriteLogEntriesPartialErrors.
*
* @param {function(?Error, ?Object)=} callback
* The function which will be called with the result of the API call.
*
* The second parameter to the callback is an object representing [WriteLogEntriesResponse]{@link WriteLogEntriesResponse}
* @returns {gax.EventEmitter} - the event emitter to handle the call
* status.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var entries = [];
* api.writeLogEntries(entries, function(err, response) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* });
*/
LoggingServiceV2Api.prototype.writeLogEntries = function writeLogEntries(
entries,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
entries: entries
};
if ('logName' in options) {
req.logName = options.logName;
}
if ('resource' in options) {
req.resource = options.resource;
}
if ('labels' in options) {
req.labels = options.labels;
}
if ('partialSuccess' in options) {
req.partialSuccess = options.partialSuccess;
}
return this._writeLogEntries(req, options, callback);
};
/**
* Lists log entries. Use this method to retrieve log entries from Cloud
* Logging. For ways to export log entries, see
* [Exporting Logs](https://cloud.google.com/logging/docs/export).
*
* @param {string[]} projectIds
* Deprecated. One or more project identifiers or project numbers from which
* to retrieve log entries. Examples: `"my-project-1A"`, `"1234567890"`. If
* present, these project identifiers are converted to resource format and
* added to the list of resources in `resourceNames`. Callers should use
* `resourceNames` rather than this parameter.
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {string[]=} options.resourceNames
* Optional. One or more cloud resources from which to retrieve log entries.
* Example: `"projects/my-project-1A"`, `"projects/1234567890"`. Projects
* listed in `projectIds` are added to this list.
* @param {string=} options.filter
* Optional. A filter that chooses which log entries to return. See [Advanced
* Logs Filters](https://cloud.google.com/logging/docs/view/advanced_filters). Only log entries that
* match the filter are returned. An empty filter matches all log entries.
* @param {string=} options.orderBy
* Optional. How the results should be sorted. Presently, the only permitted
* values are `"timestamp asc"` (default) and `"timestamp desc"`. The first
* option returns entries in order of increasing values of
* `LogEntry.timestamp` (oldest first), and the second option returns entries
* in order of decreasing timestamps (newest first). Entries with equal
* timestamps are returned in order of `LogEntry.insertId`.
* @param {number=} options.pageSize
* The maximum number of resources contained in the underlying API
* response. If page streaming is performed per-resource, this
* parameter does not affect the return value. If page streaming is
* performed per-page, this determines the maximum number of
* resources in a page.
*
* @param {function(?Error, ?Object, ?string)=} callback
* When specified, the results are not streamed but this callback
* will be called with the response object representing [ListLogEntriesResponse]{@link ListLogEntriesResponse}.
* The third item will be set if the response contains the token for the further results
* and can be reused to `pageToken` field in the options in the next request.
* @returns {Stream|gax.EventEmitter}
* An object stream which emits an object representing
* [LogEntry]{@link LogEntry} on 'data' event.
* When the callback is specified or streaming is suppressed through options,
* it will return an event emitter to handle the call status and the callback
* will be called with the response object.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var projectIds = [];
* // Iterate over all elements.
* api.listLogEntries(projectIds).on('data', function(element) {
* // doThingsWith(element)
* });
*
* // Or obtain the paged response through the callback.
* function callback(err, response, nextPageToken) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* if (nextPageToken) {
* // fetch the next page.
* api.listLogEntries(projectIds, {pageToken: nextPageToken}, callback);
* }
* }
* api.listLogEntries(projectIds, {flattenPages: false}, callback);
*/
LoggingServiceV2Api.prototype.listLogEntries = function listLogEntries(
projectIds,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
projectIds: projectIds
};
if ('resourceNames' in options) {
req.resourceNames = options.resourceNames;
}
if ('filter' in options) {
req.filter = options.filter;
}
if ('orderBy' in options) {
req.orderBy = options.orderBy;
}
if ('pageSize' in options) {
req.pageSize = options.pageSize;
}
return this._listLogEntries(req, options, callback);
};
/**
* Lists the monitored resource descriptors used by Stackdriver Logging.
*
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {number=} options.pageSize
* The maximum number of resources contained in the underlying API
* response. If page streaming is performed per-resource, this
* parameter does not affect the return value. If page streaming is
* performed per-page, this determines the maximum number of
* resources in a page.
*
* @param {function(?Error, ?Object, ?string)=} callback
* When specified, the results are not streamed but this callback
* will be called with the response object representing [ListMonitoredResourceDescriptorsResponse]{@link ListMonitoredResourceDescriptorsResponse}.
* The third item will be set if the response contains the token for the further results
* and can be reused to `pageToken` field in the options in the next request.
* @returns {Stream|gax.EventEmitter} | * [google.api.MonitoredResourceDescriptor]{@link external:"google.api.MonitoredResourceDescriptor"} on 'data' event.
* When the callback is specified or streaming is suppressed through options,
* it will return an event emitter to handle the call status and the callback
* will be called with the response object.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
*
* // Iterate over all elements.
* api.listMonitoredResourceDescriptors().on('data', function(element) {
* // doThingsWith(element)
* });
*
* // Or obtain the paged response through the callback.
* function callback(err, response, nextPageToken) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* if (nextPageToken) {
* // fetch the next page.
* api.listMonitoredResourceDescriptors({pageToken: nextPageToken}, callback);
* }
* }
* api.listMonitoredResourceDescriptors({flattenPages: false}, callback);
* api.listMonitoredResourceDescriptors(function(err, response) {
*/
LoggingServiceV2Api.prototype.listMonitoredResourceDescriptors = function listMonitoredResourceDescriptors(
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
};
if ('pageSize' in options) {
req.pageSize = options.pageSize;
}
return this._listMonitoredResourceDescriptors(req, options, callback);
};
function LoggingServiceV2ApiBuilder(gaxGrpc) {
if (!(this instanceof LoggingServiceV2ApiBuilder)) {
return new LoggingServiceV2ApiBuilder(gaxGrpc);
}
var loggingServiceV2Client = gaxGrpc.load([{
root: require('google-proto-files')('..'),
file: 'google/logging/v2/logging.proto'
}]);
extend(this, loggingServiceV2Client.google.logging.v2);
var grpcClients = {
loggingServiceV2Client: loggingServiceV2Client
};
/**
* Build a new instance of {@link LoggingServiceV2Api}.
*
* @param {Object=} opts - The optional parameters.
* @param {String=} opts.servicePath
* The domain name of the API remote host.
* @param {number=} opts.port
* The port on which to connect to the remote host.
* @param {grpc.ClientCredentials=} opts.sslCreds
* A ClientCredentials for use with an SSL-enabled channel.
* @param {Object=} opts.clientConfig
* The customized config to build the call settings. See
* {@link gax.constructSettings} for the format.
* @param {number=} opts.appName
* The codename of the calling service.
* @param {String=} opts.appVersion
* The version of the calling service.
*/
this.loggingServiceV2Api = function(opts) {
return new LoggingServiceV2Api(gaxGrpc, grpcClients, opts);
};
extend(this.loggingServiceV2Api, LoggingServiceV2Api);
}
module.exports = LoggingServiceV2ApiBuilder;
module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS;
module.exports.ALL_SCOPES = ALL_SCOPES; | * An object stream which emits an object representing | random_line_split |
logging_service_v2_api.js | /*
* Copyright 2016 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* EDITING INSTRUCTIONS
* This file was generated from the file
* https://github.com/googleapis/googleapis/blob/master/google/logging/v2/logging.proto,
* and updates to that file get reflected here through a refresh process.
* For the short term, the refresh process will only be runnable by Google
* engineers.
*
* The only allowed edits are to method and file documentation. A 3-way
* merge preserves those additions if the generated source changes.
*/
/* TODO: introduce line-wrapping so that it never exceeds the limit. */
/* jscs: disable maximumLineLength */
'use strict';
var configData = require('./logging_service_v2_client_config');
var extend = require('extend');
var gax = require('google-gax');
var SERVICE_ADDRESS = 'logging.googleapis.com';
var DEFAULT_SERVICE_PORT = 443;
var CODE_GEN_NAME_VERSION = 'gapic/0.1.0';
var PAGE_DESCRIPTORS = {
listLogEntries: new gax.PageDescriptor(
'pageToken',
'nextPageToken',
'entries'),
listMonitoredResourceDescriptors: new gax.PageDescriptor(
'pageToken',
'nextPageToken',
'resourceDescriptors')
};
/**
* The scopes needed to make gRPC calls to all of the methods defined in
* this service.
*/
var ALL_SCOPES = [
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write'
];
/**
* Service for ingesting and querying logs.
*
* This will be created through a builder function which can be obtained by the module.
* See the following example of how to initialize the module and how to access to the builder.
* @see {@link loggingServiceV2Api}
*
* @example
* var loggingV2 = require('@google-cloud/logging').v2({
* // optional auth parameters.
* });
* var api = loggingV2.loggingServiceV2Api();
*
* @class
*/
function LoggingServiceV2Api(gaxGrpc, grpcClients, opts) {
opts = opts || {};
var servicePath = opts.servicePath || SERVICE_ADDRESS;
var port = opts.port || DEFAULT_SERVICE_PORT;
var sslCreds = opts.sslCreds || null;
var clientConfig = opts.clientConfig || {};
var appName = opts.appName || 'gax';
var appVersion = opts.appVersion || gax.version;
var googleApiClient = [
appName + '/' + appVersion,
CODE_GEN_NAME_VERSION,
'gax/' + gax.version,
'nodejs/' + process.version].join(' ');
var defaults = gaxGrpc.constructSettings(
'google.logging.v2.LoggingServiceV2',
configData,
clientConfig,
PAGE_DESCRIPTORS,
null,
{'x-goog-api-client': googleApiClient});
var loggingServiceV2Stub = gaxGrpc.createStub(
servicePath,
port,
grpcClients.loggingServiceV2Client.google.logging.v2.LoggingServiceV2,
{sslCreds: sslCreds});
var loggingServiceV2StubMethods = [
'deleteLog',
'writeLogEntries',
'listLogEntries',
'listMonitoredResourceDescriptors'
];
loggingServiceV2StubMethods.forEach(function(methodName) {
this['_' + methodName] = gax.createApiCall(
loggingServiceV2Stub.then(function(loggingServiceV2Stub) {
return loggingServiceV2Stub[methodName].bind(loggingServiceV2Stub);
}),
defaults[methodName]);
}.bind(this));
}
// Path templates
var PARENT_PATH_TEMPLATE = new gax.PathTemplate(
'projects/{project}');
var LOG_PATH_TEMPLATE = new gax.PathTemplate(
'projects/{project}/logs/{log}');
/**
* Returns a fully-qualified parent resource name string.
* @param {String} project
* @returns {String}
*/
LoggingServiceV2Api.prototype.parentPath = function parentPath(project) {
return PARENT_PATH_TEMPLATE.render({
project: project
});
};
/**
* Parses the parentName from a parent resource.
* @param {String} parentName
* A fully-qualified path representing a parent resources.
* @returns {String} - A string representing the project.
*/
LoggingServiceV2Api.prototype.matchProjectFromParentName =
function matchProjectFromParentName(parentName) {
return PARENT_PATH_TEMPLATE.match(parentName).project;
};
/**
* Returns a fully-qualified log resource name string.
* @param {String} project
* @param {String} log
* @returns {String}
*/
LoggingServiceV2Api.prototype.logPath = function logPath(project, log) {
return LOG_PATH_TEMPLATE.render({
project: project,
log: log
});
};
/**
* Parses the logName from a log resource.
* @param {String} logName
* A fully-qualified path representing a log resources.
* @returns {String} - A string representing the project.
*/
LoggingServiceV2Api.prototype.matchProjectFromLogName =
function matchProjectFromLogName(logName) {
return LOG_PATH_TEMPLATE.match(logName).project;
};
/**
* Parses the logName from a log resource.
* @param {String} logName
* A fully-qualified path representing a log resources.
* @returns {String} - A string representing the log.
*/
LoggingServiceV2Api.prototype.matchLogFromLogName =
function matchLogFromLogName(logName) {
return LOG_PATH_TEMPLATE.match(logName).log;
};
// Service calls
/**
* Deletes a log and all its log entries.
* The log will reappear if it receives new entries.
*
* @param {string} logName
* Required. The resource name of the log to delete. Example:
* `"projects/my-project/logs/syslog"`.
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
* @param {function(?Error)=} callback
* The function which will be called with the result of the API call.
* @returns {gax.EventEmitter} - the event emitter to handle the call
* status.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var formattedLogName = api.logPath("[PROJECT]", "[LOG]");
* api.deleteLog(formattedLogName, function(err) {
* if (err) {
* console.error(err);
* }
* });
*/
LoggingServiceV2Api.prototype.deleteLog = function deleteLog(
logName,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
logName: logName
};
return this._deleteLog(req, options, callback);
};
/**
* Writes log entries to Stackdriver Logging. All log entries are
* written by this method.
*
* @param {Object[]} entries
* Required. The log entries to write. Values supplied for the fields
* `log_name`, `resource`, and `labels` in this `entries.write` request are
* added to those log entries that do not provide their own values for the
* fields.
*
* To improve throughput and to avoid exceeding the
* [quota limit](https://cloud.google.com/logging/quota-policy) for calls to `entries.write`,
* you should write multiple log entries at once rather than
* calling this method for each individual log entry.
*
* This object should have the same structure as [LogEntry]{@link LogEntry}
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {string=} options.logName
* Optional. A default log resource name that is assigned to all log entries
* in `entries` that do not specify a value for `log_name`. Example:
* `"projects/my-project/logs/syslog"`. See
* {@link LogEntry}.
* @param {Object=} options.resource
* Optional. A default monitored resource object that is assigned to all log
* entries in `entries` that do not specify a value for `resource`. Example:
*
* { "type": "gce_instance",
* "labels": {
* "zone": "us-central1-a", "instance_id": "00000000000000000000" }}
*
* See {@link LogEntry}.
*
* This object should have the same structure as [google.api.MonitoredResource]{@link external:"google.api.MonitoredResource"}
* @param {Object.<string, string>=} options.labels
* Optional. Default labels that are added to the `labels` field of all log
* entries in `entries`. If a log entry already has a label with the same key
* as a label in this parameter, then the log entry's label is not changed.
* See {@link LogEntry}.
* @param {boolean=} options.partialSuccess
* Optional. Whether valid entries should be written even if some other
* entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any
* entry is not written, the response status will be the error associated
* with one of the failed entries and include error details in the form of
* WriteLogEntriesPartialErrors.
*
* @param {function(?Error, ?Object)=} callback
* The function which will be called with the result of the API call.
*
* The second parameter to the callback is an object representing [WriteLogEntriesResponse]{@link WriteLogEntriesResponse}
* @returns {gax.EventEmitter} - the event emitter to handle the call
* status.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var entries = [];
* api.writeLogEntries(entries, function(err, response) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* });
*/
LoggingServiceV2Api.prototype.writeLogEntries = function writeLogEntries(
entries,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
entries: entries
};
if ('logName' in options) {
req.logName = options.logName;
}
if ('resource' in options) |
if ('labels' in options) {
req.labels = options.labels;
}
if ('partialSuccess' in options) {
req.partialSuccess = options.partialSuccess;
}
return this._writeLogEntries(req, options, callback);
};
/**
* Lists log entries. Use this method to retrieve log entries from Cloud
* Logging. For ways to export log entries, see
* [Exporting Logs](https://cloud.google.com/logging/docs/export).
*
* @param {string[]} projectIds
* Deprecated. One or more project identifiers or project numbers from which
* to retrieve log entries. Examples: `"my-project-1A"`, `"1234567890"`. If
* present, these project identifiers are converted to resource format and
* added to the list of resources in `resourceNames`. Callers should use
* `resourceNames` rather than this parameter.
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {string[]=} options.resourceNames
* Optional. One or more cloud resources from which to retrieve log entries.
* Example: `"projects/my-project-1A"`, `"projects/1234567890"`. Projects
* listed in `projectIds` are added to this list.
* @param {string=} options.filter
* Optional. A filter that chooses which log entries to return. See [Advanced
* Logs Filters](https://cloud.google.com/logging/docs/view/advanced_filters). Only log entries that
* match the filter are returned. An empty filter matches all log entries.
* @param {string=} options.orderBy
* Optional. How the results should be sorted. Presently, the only permitted
* values are `"timestamp asc"` (default) and `"timestamp desc"`. The first
* option returns entries in order of increasing values of
* `LogEntry.timestamp` (oldest first), and the second option returns entries
* in order of decreasing timestamps (newest first). Entries with equal
* timestamps are returned in order of `LogEntry.insertId`.
* @param {number=} options.pageSize
* The maximum number of resources contained in the underlying API
* response. If page streaming is performed per-resource, this
* parameter does not affect the return value. If page streaming is
* performed per-page, this determines the maximum number of
* resources in a page.
*
* @param {function(?Error, ?Object, ?string)=} callback
* When specified, the results are not streamed but this callback
* will be called with the response object representing [ListLogEntriesResponse]{@link ListLogEntriesResponse}.
* The third item will be set if the response contains the token for the further results
* and can be reused to `pageToken` field in the options in the next request.
* @returns {Stream|gax.EventEmitter}
* An object stream which emits an object representing
* [LogEntry]{@link LogEntry} on 'data' event.
* When the callback is specified or streaming is suppressed through options,
* it will return an event emitter to handle the call status and the callback
* will be called with the response object.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var projectIds = [];
* // Iterate over all elements.
* api.listLogEntries(projectIds).on('data', function(element) {
* // doThingsWith(element)
* });
*
* // Or obtain the paged response through the callback.
* function callback(err, response, nextPageToken) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* if (nextPageToken) {
* // fetch the next page.
* api.listLogEntries(projectIds, {pageToken: nextPageToken}, callback);
* }
* }
* api.listLogEntries(projectIds, {flattenPages: false}, callback);
*/
LoggingServiceV2Api.prototype.listLogEntries = function listLogEntries(
projectIds,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
projectIds: projectIds
};
if ('resourceNames' in options) {
req.resourceNames = options.resourceNames;
}
if ('filter' in options) {
req.filter = options.filter;
}
if ('orderBy' in options) {
req.orderBy = options.orderBy;
}
if ('pageSize' in options) {
req.pageSize = options.pageSize;
}
return this._listLogEntries(req, options, callback);
};
/**
* Lists the monitored resource descriptors used by Stackdriver Logging.
*
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {number=} options.pageSize
* The maximum number of resources contained in the underlying API
* response. If page streaming is performed per-resource, this
* parameter does not affect the return value. If page streaming is
* performed per-page, this determines the maximum number of
* resources in a page.
*
* @param {function(?Error, ?Object, ?string)=} callback
* When specified, the results are not streamed but this callback
* will be called with the response object representing [ListMonitoredResourceDescriptorsResponse]{@link ListMonitoredResourceDescriptorsResponse}.
* The third item will be set if the response contains the token for the further results
* and can be reused to `pageToken` field in the options in the next request.
* @returns {Stream|gax.EventEmitter}
* An object stream which emits an object representing
* [google.api.MonitoredResourceDescriptor]{@link external:"google.api.MonitoredResourceDescriptor"} on 'data' event.
* When the callback is specified or streaming is suppressed through options,
* it will return an event emitter to handle the call status and the callback
* will be called with the response object.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
*
* // Iterate over all elements.
* api.listMonitoredResourceDescriptors().on('data', function(element) {
* // doThingsWith(element)
* });
*
* // Or obtain the paged response through the callback.
* function callback(err, response, nextPageToken) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* if (nextPageToken) {
* // fetch the next page.
* api.listMonitoredResourceDescriptors({pageToken: nextPageToken}, callback);
* }
* }
* api.listMonitoredResourceDescriptors({flattenPages: false}, callback);
* api.listMonitoredResourceDescriptors(function(err, response) {
*/
LoggingServiceV2Api.prototype.listMonitoredResourceDescriptors = function listMonitoredResourceDescriptors(
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
};
if ('pageSize' in options) {
req.pageSize = options.pageSize;
}
return this._listMonitoredResourceDescriptors(req, options, callback);
};
function LoggingServiceV2ApiBuilder(gaxGrpc) {
if (!(this instanceof LoggingServiceV2ApiBuilder)) {
return new LoggingServiceV2ApiBuilder(gaxGrpc);
}
var loggingServiceV2Client = gaxGrpc.load([{
root: require('google-proto-files')('..'),
file: 'google/logging/v2/logging.proto'
}]);
extend(this, loggingServiceV2Client.google.logging.v2);
var grpcClients = {
loggingServiceV2Client: loggingServiceV2Client
};
/**
* Build a new instance of {@link LoggingServiceV2Api}.
*
* @param {Object=} opts - The optional parameters.
* @param {String=} opts.servicePath
* The domain name of the API remote host.
* @param {number=} opts.port
* The port on which to connect to the remote host.
* @param {grpc.ClientCredentials=} opts.sslCreds
* A ClientCredentials for use with an SSL-enabled channel.
* @param {Object=} opts.clientConfig
* The customized config to build the call settings. See
* {@link gax.constructSettings} for the format.
* @param {number=} opts.appName
* The codename of the calling service.
* @param {String=} opts.appVersion
* The version of the calling service.
*/
this.loggingServiceV2Api = function(opts) {
return new LoggingServiceV2Api(gaxGrpc, grpcClients, opts);
};
extend(this.loggingServiceV2Api, LoggingServiceV2Api);
}
module.exports = LoggingServiceV2ApiBuilder;
module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS;
module.exports.ALL_SCOPES = ALL_SCOPES; | {
req.resource = options.resource;
} | conditional_block |
logging_service_v2_api.js | /*
* Copyright 2016 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* EDITING INSTRUCTIONS
* This file was generated from the file
* https://github.com/googleapis/googleapis/blob/master/google/logging/v2/logging.proto,
* and updates to that file get reflected here through a refresh process.
* For the short term, the refresh process will only be runnable by Google
* engineers.
*
* The only allowed edits are to method and file documentation. A 3-way
* merge preserves those additions if the generated source changes.
*/
/* TODO: introduce line-wrapping so that it never exceeds the limit. */
/* jscs: disable maximumLineLength */
'use strict';
var configData = require('./logging_service_v2_client_config');
var extend = require('extend');
var gax = require('google-gax');
var SERVICE_ADDRESS = 'logging.googleapis.com';
var DEFAULT_SERVICE_PORT = 443;
var CODE_GEN_NAME_VERSION = 'gapic/0.1.0';
var PAGE_DESCRIPTORS = {
listLogEntries: new gax.PageDescriptor(
'pageToken',
'nextPageToken',
'entries'),
listMonitoredResourceDescriptors: new gax.PageDescriptor(
'pageToken',
'nextPageToken',
'resourceDescriptors')
};
/**
* The scopes needed to make gRPC calls to all of the methods defined in
* this service.
*/
var ALL_SCOPES = [
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write'
];
/**
* Service for ingesting and querying logs.
*
* This will be created through a builder function which can be obtained by the module.
* See the following example of how to initialize the module and how to access to the builder.
* @see {@link loggingServiceV2Api}
*
* @example
* var loggingV2 = require('@google-cloud/logging').v2({
* // optional auth parameters.
* });
* var api = loggingV2.loggingServiceV2Api();
*
* @class
*/
function LoggingServiceV2Api(gaxGrpc, grpcClients, opts) {
opts = opts || {};
var servicePath = opts.servicePath || SERVICE_ADDRESS;
var port = opts.port || DEFAULT_SERVICE_PORT;
var sslCreds = opts.sslCreds || null;
var clientConfig = opts.clientConfig || {};
var appName = opts.appName || 'gax';
var appVersion = opts.appVersion || gax.version;
var googleApiClient = [
appName + '/' + appVersion,
CODE_GEN_NAME_VERSION,
'gax/' + gax.version,
'nodejs/' + process.version].join(' ');
var defaults = gaxGrpc.constructSettings(
'google.logging.v2.LoggingServiceV2',
configData,
clientConfig,
PAGE_DESCRIPTORS,
null,
{'x-goog-api-client': googleApiClient});
var loggingServiceV2Stub = gaxGrpc.createStub(
servicePath,
port,
grpcClients.loggingServiceV2Client.google.logging.v2.LoggingServiceV2,
{sslCreds: sslCreds});
var loggingServiceV2StubMethods = [
'deleteLog',
'writeLogEntries',
'listLogEntries',
'listMonitoredResourceDescriptors'
];
loggingServiceV2StubMethods.forEach(function(methodName) {
this['_' + methodName] = gax.createApiCall(
loggingServiceV2Stub.then(function(loggingServiceV2Stub) {
return loggingServiceV2Stub[methodName].bind(loggingServiceV2Stub);
}),
defaults[methodName]);
}.bind(this));
}
// Path templates
var PARENT_PATH_TEMPLATE = new gax.PathTemplate(
'projects/{project}');
var LOG_PATH_TEMPLATE = new gax.PathTemplate(
'projects/{project}/logs/{log}');
/**
* Returns a fully-qualified parent resource name string.
* @param {String} project
* @returns {String}
*/
LoggingServiceV2Api.prototype.parentPath = function parentPath(project) {
return PARENT_PATH_TEMPLATE.render({
project: project
});
};
/**
* Parses the parentName from a parent resource.
* @param {String} parentName
* A fully-qualified path representing a parent resources.
* @returns {String} - A string representing the project.
*/
LoggingServiceV2Api.prototype.matchProjectFromParentName =
function matchProjectFromParentName(parentName) {
return PARENT_PATH_TEMPLATE.match(parentName).project;
};
/**
* Returns a fully-qualified log resource name string.
* @param {String} project
* @param {String} log
* @returns {String}
*/
LoggingServiceV2Api.prototype.logPath = function logPath(project, log) {
return LOG_PATH_TEMPLATE.render({
project: project,
log: log
});
};
/**
* Parses the logName from a log resource.
* @param {String} logName
* A fully-qualified path representing a log resources.
* @returns {String} - A string representing the project.
*/
LoggingServiceV2Api.prototype.matchProjectFromLogName =
function matchProjectFromLogName(logName) {
return LOG_PATH_TEMPLATE.match(logName).project;
};
/**
* Parses the logName from a log resource.
* @param {String} logName
* A fully-qualified path representing a log resources.
* @returns {String} - A string representing the log.
*/
LoggingServiceV2Api.prototype.matchLogFromLogName =
function matchLogFromLogName(logName) {
return LOG_PATH_TEMPLATE.match(logName).log;
};
// Service calls
/**
* Deletes a log and all its log entries.
* The log will reappear if it receives new entries.
*
* @param {string} logName
* Required. The resource name of the log to delete. Example:
* `"projects/my-project/logs/syslog"`.
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
* @param {function(?Error)=} callback
* The function which will be called with the result of the API call.
* @returns {gax.EventEmitter} - the event emitter to handle the call
* status.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var formattedLogName = api.logPath("[PROJECT]", "[LOG]");
* api.deleteLog(formattedLogName, function(err) {
* if (err) {
* console.error(err);
* }
* });
*/
LoggingServiceV2Api.prototype.deleteLog = function deleteLog(
logName,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
logName: logName
};
return this._deleteLog(req, options, callback);
};
/**
* Writes log entries to Stackdriver Logging. All log entries are
* written by this method.
*
* @param {Object[]} entries
* Required. The log entries to write. Values supplied for the fields
* `log_name`, `resource`, and `labels` in this `entries.write` request are
* added to those log entries that do not provide their own values for the
* fields.
*
* To improve throughput and to avoid exceeding the
* [quota limit](https://cloud.google.com/logging/quota-policy) for calls to `entries.write`,
* you should write multiple log entries at once rather than
* calling this method for each individual log entry.
*
* This object should have the same structure as [LogEntry]{@link LogEntry}
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {string=} options.logName
* Optional. A default log resource name that is assigned to all log entries
* in `entries` that do not specify a value for `log_name`. Example:
* `"projects/my-project/logs/syslog"`. See
* {@link LogEntry}.
* @param {Object=} options.resource
* Optional. A default monitored resource object that is assigned to all log
* entries in `entries` that do not specify a value for `resource`. Example:
*
* { "type": "gce_instance",
* "labels": {
* "zone": "us-central1-a", "instance_id": "00000000000000000000" }}
*
* See {@link LogEntry}.
*
* This object should have the same structure as [google.api.MonitoredResource]{@link external:"google.api.MonitoredResource"}
* @param {Object.<string, string>=} options.labels
* Optional. Default labels that are added to the `labels` field of all log
* entries in `entries`. If a log entry already has a label with the same key
* as a label in this parameter, then the log entry's label is not changed.
* See {@link LogEntry}.
* @param {boolean=} options.partialSuccess
* Optional. Whether valid entries should be written even if some other
* entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any
* entry is not written, the response status will be the error associated
* with one of the failed entries and include error details in the form of
* WriteLogEntriesPartialErrors.
*
* @param {function(?Error, ?Object)=} callback
* The function which will be called with the result of the API call.
*
* The second parameter to the callback is an object representing [WriteLogEntriesResponse]{@link WriteLogEntriesResponse}
* @returns {gax.EventEmitter} - the event emitter to handle the call
* status.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var entries = [];
* api.writeLogEntries(entries, function(err, response) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* });
*/
LoggingServiceV2Api.prototype.writeLogEntries = function writeLogEntries(
entries,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
entries: entries
};
if ('logName' in options) {
req.logName = options.logName;
}
if ('resource' in options) {
req.resource = options.resource;
}
if ('labels' in options) {
req.labels = options.labels;
}
if ('partialSuccess' in options) {
req.partialSuccess = options.partialSuccess;
}
return this._writeLogEntries(req, options, callback);
};
/**
* Lists log entries. Use this method to retrieve log entries from Cloud
* Logging. For ways to export log entries, see
* [Exporting Logs](https://cloud.google.com/logging/docs/export).
*
* @param {string[]} projectIds
* Deprecated. One or more project identifiers or project numbers from which
* to retrieve log entries. Examples: `"my-project-1A"`, `"1234567890"`. If
* present, these project identifiers are converted to resource format and
* added to the list of resources in `resourceNames`. Callers should use
* `resourceNames` rather than this parameter.
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {string[]=} options.resourceNames
* Optional. One or more cloud resources from which to retrieve log entries.
* Example: `"projects/my-project-1A"`, `"projects/1234567890"`. Projects
* listed in `projectIds` are added to this list.
* @param {string=} options.filter
* Optional. A filter that chooses which log entries to return. See [Advanced
* Logs Filters](https://cloud.google.com/logging/docs/view/advanced_filters). Only log entries that
* match the filter are returned. An empty filter matches all log entries.
* @param {string=} options.orderBy
* Optional. How the results should be sorted. Presently, the only permitted
* values are `"timestamp asc"` (default) and `"timestamp desc"`. The first
* option returns entries in order of increasing values of
* `LogEntry.timestamp` (oldest first), and the second option returns entries
* in order of decreasing timestamps (newest first). Entries with equal
* timestamps are returned in order of `LogEntry.insertId`.
* @param {number=} options.pageSize
* The maximum number of resources contained in the underlying API
* response. If page streaming is performed per-resource, this
* parameter does not affect the return value. If page streaming is
* performed per-page, this determines the maximum number of
* resources in a page.
*
* @param {function(?Error, ?Object, ?string)=} callback
* When specified, the results are not streamed but this callback
* will be called with the response object representing [ListLogEntriesResponse]{@link ListLogEntriesResponse}.
* The third item will be set if the response contains the token for the further results
* and can be reused to `pageToken` field in the options in the next request.
* @returns {Stream|gax.EventEmitter}
* An object stream which emits an object representing
* [LogEntry]{@link LogEntry} on 'data' event.
* When the callback is specified or streaming is suppressed through options,
* it will return an event emitter to handle the call status and the callback
* will be called with the response object.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
* var projectIds = [];
* // Iterate over all elements.
* api.listLogEntries(projectIds).on('data', function(element) {
* // doThingsWith(element)
* });
*
* // Or obtain the paged response through the callback.
* function callback(err, response, nextPageToken) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* if (nextPageToken) {
* // fetch the next page.
* api.listLogEntries(projectIds, {pageToken: nextPageToken}, callback);
* }
* }
* api.listLogEntries(projectIds, {flattenPages: false}, callback);
*/
LoggingServiceV2Api.prototype.listLogEntries = function listLogEntries(
projectIds,
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
projectIds: projectIds
};
if ('resourceNames' in options) {
req.resourceNames = options.resourceNames;
}
if ('filter' in options) {
req.filter = options.filter;
}
if ('orderBy' in options) {
req.orderBy = options.orderBy;
}
if ('pageSize' in options) {
req.pageSize = options.pageSize;
}
return this._listLogEntries(req, options, callback);
};
/**
* Lists the monitored resource descriptors used by Stackdriver Logging.
*
* @param {Object=} options
* Optional parameters. You can override the default settings for this call, e.g, timeout,
* retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.
*
* In addition, options may contain the following optional parameters.
* @param {number=} options.pageSize
* The maximum number of resources contained in the underlying API
* response. If page streaming is performed per-resource, this
* parameter does not affect the return value. If page streaming is
* performed per-page, this determines the maximum number of
* resources in a page.
*
* @param {function(?Error, ?Object, ?string)=} callback
* When specified, the results are not streamed but this callback
* will be called with the response object representing [ListMonitoredResourceDescriptorsResponse]{@link ListMonitoredResourceDescriptorsResponse}.
* The third item will be set if the response contains the token for the further results
* and can be reused to `pageToken` field in the options in the next request.
* @returns {Stream|gax.EventEmitter}
* An object stream which emits an object representing
* [google.api.MonitoredResourceDescriptor]{@link external:"google.api.MonitoredResourceDescriptor"} on 'data' event.
* When the callback is specified or streaming is suppressed through options,
* it will return an event emitter to handle the call status and the callback
* will be called with the response object.
*
* @example
*
* var api = loggingV2.loggingServiceV2Api();
*
* // Iterate over all elements.
* api.listMonitoredResourceDescriptors().on('data', function(element) {
* // doThingsWith(element)
* });
*
* // Or obtain the paged response through the callback.
* function callback(err, response, nextPageToken) {
* if (err) {
* console.error(err);
* return;
* }
* // doThingsWith(response)
* if (nextPageToken) {
* // fetch the next page.
* api.listMonitoredResourceDescriptors({pageToken: nextPageToken}, callback);
* }
* }
* api.listMonitoredResourceDescriptors({flattenPages: false}, callback);
* api.listMonitoredResourceDescriptors(function(err, response) {
*/
LoggingServiceV2Api.prototype.listMonitoredResourceDescriptors = function listMonitoredResourceDescriptors(
options,
callback) {
if (options instanceof Function && callback === undefined) {
callback = options;
options = {};
}
if (options === undefined) {
options = {};
}
var req = {
};
if ('pageSize' in options) {
req.pageSize = options.pageSize;
}
return this._listMonitoredResourceDescriptors(req, options, callback);
};
function | (gaxGrpc) {
if (!(this instanceof LoggingServiceV2ApiBuilder)) {
return new LoggingServiceV2ApiBuilder(gaxGrpc);
}
var loggingServiceV2Client = gaxGrpc.load([{
root: require('google-proto-files')('..'),
file: 'google/logging/v2/logging.proto'
}]);
extend(this, loggingServiceV2Client.google.logging.v2);
var grpcClients = {
loggingServiceV2Client: loggingServiceV2Client
};
/**
* Build a new instance of {@link LoggingServiceV2Api}.
*
* @param {Object=} opts - The optional parameters.
* @param {String=} opts.servicePath
* The domain name of the API remote host.
* @param {number=} opts.port
* The port on which to connect to the remote host.
* @param {grpc.ClientCredentials=} opts.sslCreds
* A ClientCredentials for use with an SSL-enabled channel.
* @param {Object=} opts.clientConfig
* The customized config to build the call settings. See
* {@link gax.constructSettings} for the format.
* @param {number=} opts.appName
* The codename of the calling service.
* @param {String=} opts.appVersion
* The version of the calling service.
*/
this.loggingServiceV2Api = function(opts) {
return new LoggingServiceV2Api(gaxGrpc, grpcClients, opts);
};
extend(this.loggingServiceV2Api, LoggingServiceV2Api);
}
module.exports = LoggingServiceV2ApiBuilder;
module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS;
module.exports.ALL_SCOPES = ALL_SCOPES; | LoggingServiceV2ApiBuilder | identifier_name |
mutableBag.go | // Copyright 2016 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
"github.com/golang/glog"
me "github.com/hashicorp/go-multierror"
mixerpb "istio.io/api/mixer/v1"
"istio.io/mixer/pkg/pool"
)
// MutableBag is a generic mechanism to read and write a set of attributes.
//
// Bags can be chained together in a parent/child relationship. A child bag
// represents a delta over a parent. By default a child looks identical to
// the parent. But as mutations occur to the child, the two start to diverge.
// Resetting a child makes it look identical to its parent again.
type MutableBag struct {
parent Bag
values map[string]interface{}
id int64 // strictly for use in diagnostic messages
}
var id int64
var mutableBags = sync.Pool{
New: func() interface{} {
return &MutableBag{
values: make(map[string]interface{}),
id: atomic.AddInt64(&id, 1),
}
},
}
// GetMutableBag returns an initialized bag.
//
// Bags can be chained in a parent/child relationship. You can pass nil if the
// bag has no parent.
//
// When you are done using the mutable bag, call the Done method to recycle it.
func GetMutableBag(parent Bag) *MutableBag {
mb := mutableBags.Get().(*MutableBag)
mb.parent = parent
if parent == nil {
mb.parent = empty
}
return mb
}
// CopyBag makes a deep copy of MutableBag.
func CopyBag(b Bag) *MutableBag {
mb := GetMutableBag(nil)
for _, k := range b.Names() {
v, _ := b.Get(k)
mb.Set(k, copyValue(v))
}
return mb
}
// Given an attribute value, create a deep copy of it
func copyValue(v interface{}) interface{} |
// Done indicates the bag can be reclaimed.
func (mb *MutableBag) Done() {
mb.Reset()
mb.parent = nil
mutableBags.Put(mb)
}
// Get returns an attribute value.
func (mb *MutableBag) Get(name string) (interface{}, bool) {
var r interface{}
var b bool
if r, b = mb.values[name]; !b {
r, b = mb.parent.Get(name)
}
return r, b
}
// Names return the names of all the attributes known to this bag.
func (mb *MutableBag) Names() []string {
i := 0
keys := make([]string, len(mb.values))
for k := range mb.values {
keys[i] = k
i++
}
return append(keys, mb.parent.Names()...)
}
// Set creates an override for a named attribute.
func (mb *MutableBag) Set(name string, value interface{}) {
mb.values[name] = value
}
// Reset removes all local state.
func (mb *MutableBag) Reset() {
// my kingdom for a clear method on maps!
for k := range mb.values {
delete(mb.values, k)
}
}
// Merge combines an array of bags into the current bag.
//
// The individual bags may not contain any conflicting attribute
// values. If that happens, then the merge fails and no mutation
// will have occurred to the current bag.
func (mb *MutableBag) Merge(bags ...*MutableBag) error {
// first step is to make sure there are no redundant definitions of the same attribute
keys := make(map[string]bool)
for _, bag := range bags {
if bag == nil {
continue
}
for k := range bag.values {
if keys[k] {
return fmt.Errorf("conflicting value for attribute %s", k)
}
keys[k] = true
}
}
// now that we know there are no conflicting definitions, do the actual merging...
for _, bag := range bags {
if bag == nil {
continue
}
for k, v := range bag.values {
mb.values[k] = copyValue(v)
}
}
return nil
}
// Child allocates a derived mutable bag.
//
// Mutating a child doesn't affect the parent's state, all mutations are deltas.
func (mb *MutableBag) Child() *MutableBag {
return GetMutableBag(mb)
}
// Ensure that all dictionary indices are valid and that all values
// are in range.
//
// Note that since we don't have the attribute schema, this doesn't validate
// that a given attribute is being treated as the right type. That is, an
// attribute called 'source.ip' which is of type IP_ADDRESS could be listed as
// a string or an int, and we wouldn't catch it here.
func checkPreconditions(dictionary dictionary, attrs *mixerpb.Attributes) error {
var e *me.Error
for k := range attrs.StringAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.Int64Attributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.DoubleAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.BoolAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.TimestampAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.DurationAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.BytesAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k, v := range attrs.StringMapAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
for k2 := range v.Map {
if _, present := dictionary[k2]; !present {
e = me.Append(e, fmt.Errorf("string map index %d is not defined in the current dictionary", k2))
}
}
}
// TODO: we should catch the case where the same attribute is being repeated in different types
// (that is, an attribute called FOO which is both an int and a string for example)
return e.ErrorOrNil()
}
// Update the state of the bag based on the content of an Attributes struct
func (mb *MutableBag) update(dictionary dictionary, attrs *mixerpb.Attributes) error {
// check preconditions up front and bail if there are any
// errors without mutating the bag.
if err := checkPreconditions(dictionary, attrs); err != nil {
return err
}
var log *bytes.Buffer
if glog.V(2) {
log = pool.GetBuffer()
}
if attrs.ResetContext {
if log != nil {
log.WriteString(" resetting bag to empty state\n")
}
mb.Reset()
}
// delete requested attributes
for _, d := range attrs.DeletedAttributes {
if name, present := dictionary[d]; present {
if log != nil {
log.WriteString(fmt.Sprintf(" attempting to delete attribute %s\n", name))
}
delete(mb.values, name)
}
}
// apply all attributes
for k, v := range attrs.StringAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating string attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.Int64Attributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating int64 attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.DoubleAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating double attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.BoolAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating bool attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.TimestampAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating time attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.DurationAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating duration attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.BytesAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating bytes attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.StringMapAttributes {
m, ok := mb.values[dictionary[k]].(map[string]string)
if !ok {
m = make(map[string]string)
mb.values[dictionary[k]] = m
}
if log != nil {
log.WriteString(fmt.Sprintf(" updating stringmap attribute %s from\n", dictionary[k]))
if len(m) > 0 {
for k2, v2 := range m {
log.WriteString(fmt.Sprintf(" %s:%s\n", k2, v2))
}
} else {
log.WriteString(" <empty>\n")
}
log.WriteString(" to\n")
}
for k2, v2 := range v.Map {
m[dictionary[k2]] = v2
}
if log != nil {
if len(m) > 0 {
for k2, v2 := range m {
log.WriteString(fmt.Sprintf(" %s:%s\n", k2, v2))
}
} else {
log.WriteString(" <empty>\n")
}
}
}
if log != nil {
if log.Len() > 0 {
glog.Infof("Updating attribute bag %d:\n%s", mb.id, log.String())
}
}
return nil
}
| {
switch t := v.(type) {
case []byte:
c := make([]byte, len(t))
copy(c, t)
return c
case map[string]string:
c := make(map[string]string, len(t))
for k2, v2 := range t {
c[k2] = v2
}
return c
}
return v
} | identifier_body |
mutableBag.go | // Copyright 2016 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
"github.com/golang/glog"
me "github.com/hashicorp/go-multierror"
mixerpb "istio.io/api/mixer/v1"
"istio.io/mixer/pkg/pool"
)
// MutableBag is a generic mechanism to read and write a set of attributes.
//
// Bags can be chained together in a parent/child relationship. A child bag
// represents a delta over a parent. By default a child looks identical to
// the parent. But as mutations occur to the child, the two start to diverge.
// Resetting a child makes it look identical to its parent again.
type MutableBag struct {
parent Bag
values map[string]interface{}
id int64 // strictly for use in diagnostic messages
}
var id int64
var mutableBags = sync.Pool{
New: func() interface{} {
return &MutableBag{
values: make(map[string]interface{}),
id: atomic.AddInt64(&id, 1),
}
},
}
// GetMutableBag returns an initialized bag.
//
// Bags can be chained in a parent/child relationship. You can pass nil if the
// bag has no parent.
//
// When you are done using the mutable bag, call the Done method to recycle it.
func GetMutableBag(parent Bag) *MutableBag {
mb := mutableBags.Get().(*MutableBag)
mb.parent = parent
if parent == nil {
mb.parent = empty
}
return mb
}
// CopyBag makes a deep copy of MutableBag.
func CopyBag(b Bag) *MutableBag {
mb := GetMutableBag(nil)
for _, k := range b.Names() {
v, _ := b.Get(k)
mb.Set(k, copyValue(v))
}
return mb
}
// Given an attribute value, create a deep copy of it
func copyValue(v interface{}) interface{} {
switch t := v.(type) {
case []byte:
c := make([]byte, len(t))
copy(c, t)
return c
case map[string]string:
c := make(map[string]string, len(t))
for k2, v2 := range t {
c[k2] = v2
}
return c
}
return v
}
// Done indicates the bag can be reclaimed.
func (mb *MutableBag) Done() {
mb.Reset()
mb.parent = nil
mutableBags.Put(mb)
}
// Get returns an attribute value.
func (mb *MutableBag) Get(name string) (interface{}, bool) {
var r interface{}
var b bool
if r, b = mb.values[name]; !b {
r, b = mb.parent.Get(name)
}
return r, b
}
// Names return the names of all the attributes known to this bag.
func (mb *MutableBag) Names() []string {
i := 0
keys := make([]string, len(mb.values))
for k := range mb.values {
keys[i] = k
i++
}
return append(keys, mb.parent.Names()...)
}
// Set creates an override for a named attribute.
func (mb *MutableBag) Set(name string, value interface{}) {
mb.values[name] = value
}
// Reset removes all local state.
func (mb *MutableBag) Reset() {
// my kingdom for a clear method on maps!
for k := range mb.values {
delete(mb.values, k)
}
}
// Merge combines an array of bags into the current bag.
//
// The individual bags may not contain any conflicting attribute
// values. If that happens, then the merge fails and no mutation
// will have occurred to the current bag.
func (mb *MutableBag) Merge(bags ...*MutableBag) error {
// first step is to make sure there are no redundant definitions of the same attribute
keys := make(map[string]bool)
for _, bag := range bags {
if bag == nil {
continue
}
for k := range bag.values {
if keys[k] {
return fmt.Errorf("conflicting value for attribute %s", k)
}
keys[k] = true
}
}
// now that we know there are no conflicting definitions, do the actual merging...
for _, bag := range bags {
if bag == nil {
continue
}
for k, v := range bag.values {
mb.values[k] = copyValue(v)
}
}
return nil
}
// Child allocates a derived mutable bag.
//
// Mutating a child doesn't affect the parent's state, all mutations are deltas.
func (mb *MutableBag) Child() *MutableBag {
return GetMutableBag(mb)
}
// Ensure that all dictionary indices are valid and that all values
// are in range.
//
// Note that since we don't have the attribute schema, this doesn't validate
// that a given attribute is being treated as the right type. That is, an
// attribute called 'source.ip' which is of type IP_ADDRESS could be listed as
// a string or an int, and we wouldn't catch it here.
func checkPreconditions(dictionary dictionary, attrs *mixerpb.Attributes) error {
var e *me.Error
for k := range attrs.StringAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.Int64Attributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.DoubleAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.BoolAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.TimestampAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.DurationAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.BytesAttributes {
if _, present := dictionary[k]; !present |
}
for k, v := range attrs.StringMapAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
for k2 := range v.Map {
if _, present := dictionary[k2]; !present {
e = me.Append(e, fmt.Errorf("string map index %d is not defined in the current dictionary", k2))
}
}
}
// TODO: we should catch the case where the same attribute is being repeated in different types
// (that is, an attribute called FOO which is both an int and a string for example)
return e.ErrorOrNil()
}
// Update the state of the bag based on the content of an Attributes struct
func (mb *MutableBag) update(dictionary dictionary, attrs *mixerpb.Attributes) error {
// check preconditions up front and bail if there are any
// errors without mutating the bag.
if err := checkPreconditions(dictionary, attrs); err != nil {
return err
}
var log *bytes.Buffer
if glog.V(2) {
log = pool.GetBuffer()
}
if attrs.ResetContext {
if log != nil {
log.WriteString(" resetting bag to empty state\n")
}
mb.Reset()
}
// delete requested attributes
for _, d := range attrs.DeletedAttributes {
if name, present := dictionary[d]; present {
if log != nil {
log.WriteString(fmt.Sprintf(" attempting to delete attribute %s\n", name))
}
delete(mb.values, name)
}
}
// apply all attributes
for k, v := range attrs.StringAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating string attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.Int64Attributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating int64 attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.DoubleAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating double attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.BoolAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating bool attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.TimestampAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating time attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.DurationAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating duration attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.BytesAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating bytes attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.StringMapAttributes {
m, ok := mb.values[dictionary[k]].(map[string]string)
if !ok {
m = make(map[string]string)
mb.values[dictionary[k]] = m
}
if log != nil {
log.WriteString(fmt.Sprintf(" updating stringmap attribute %s from\n", dictionary[k]))
if len(m) > 0 {
for k2, v2 := range m {
log.WriteString(fmt.Sprintf(" %s:%s\n", k2, v2))
}
} else {
log.WriteString(" <empty>\n")
}
log.WriteString(" to\n")
}
for k2, v2 := range v.Map {
m[dictionary[k2]] = v2
}
if log != nil {
if len(m) > 0 {
for k2, v2 := range m {
log.WriteString(fmt.Sprintf(" %s:%s\n", k2, v2))
}
} else {
log.WriteString(" <empty>\n")
}
}
}
if log != nil {
if log.Len() > 0 {
glog.Infof("Updating attribute bag %d:\n%s", mb.id, log.String())
}
}
return nil
}
| {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
} | conditional_block |
mutableBag.go | // Copyright 2016 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
"github.com/golang/glog"
me "github.com/hashicorp/go-multierror"
mixerpb "istio.io/api/mixer/v1"
"istio.io/mixer/pkg/pool"
)
// MutableBag is a generic mechanism to read and write a set of attributes.
//
// Bags can be chained together in a parent/child relationship. A child bag
// represents a delta over a parent. By default a child looks identical to
// the parent. But as mutations occur to the child, the two start to diverge.
// Resetting a child makes it look identical to its parent again.
type MutableBag struct {
parent Bag
values map[string]interface{}
id int64 // strictly for use in diagnostic messages
}
var id int64
var mutableBags = sync.Pool{
New: func() interface{} {
return &MutableBag{
values: make(map[string]interface{}),
id: atomic.AddInt64(&id, 1),
}
},
}
// GetMutableBag returns an initialized bag.
//
// Bags can be chained in a parent/child relationship. You can pass nil if the
// bag has no parent.
//
// When you are done using the mutable bag, call the Done method to recycle it.
func GetMutableBag(parent Bag) *MutableBag {
mb := mutableBags.Get().(*MutableBag)
mb.parent = parent
if parent == nil {
mb.parent = empty
}
return mb
}
// CopyBag makes a deep copy of MutableBag.
func CopyBag(b Bag) *MutableBag {
mb := GetMutableBag(nil)
for _, k := range b.Names() {
v, _ := b.Get(k)
mb.Set(k, copyValue(v))
}
return mb
}
// Given an attribute value, create a deep copy of it
func | (v interface{}) interface{} {
switch t := v.(type) {
case []byte:
c := make([]byte, len(t))
copy(c, t)
return c
case map[string]string:
c := make(map[string]string, len(t))
for k2, v2 := range t {
c[k2] = v2
}
return c
}
return v
}
// Done indicates the bag can be reclaimed.
func (mb *MutableBag) Done() {
mb.Reset()
mb.parent = nil
mutableBags.Put(mb)
}
// Get returns an attribute value.
func (mb *MutableBag) Get(name string) (interface{}, bool) {
var r interface{}
var b bool
if r, b = mb.values[name]; !b {
r, b = mb.parent.Get(name)
}
return r, b
}
// Names return the names of all the attributes known to this bag.
func (mb *MutableBag) Names() []string {
i := 0
keys := make([]string, len(mb.values))
for k := range mb.values {
keys[i] = k
i++
}
return append(keys, mb.parent.Names()...)
}
// Set creates an override for a named attribute.
func (mb *MutableBag) Set(name string, value interface{}) {
mb.values[name] = value
}
// Reset removes all local state.
func (mb *MutableBag) Reset() {
// my kingdom for a clear method on maps!
for k := range mb.values {
delete(mb.values, k)
}
}
// Merge combines an array of bags into the current bag.
//
// The individual bags may not contain any conflicting attribute
// values. If that happens, then the merge fails and no mutation
// will have occurred to the current bag.
func (mb *MutableBag) Merge(bags ...*MutableBag) error {
// first step is to make sure there are no redundant definitions of the same attribute
keys := make(map[string]bool)
for _, bag := range bags {
if bag == nil {
continue
}
for k := range bag.values {
if keys[k] {
return fmt.Errorf("conflicting value for attribute %s", k)
}
keys[k] = true
}
}
// now that we know there are no conflicting definitions, do the actual merging...
for _, bag := range bags {
if bag == nil {
continue
}
for k, v := range bag.values {
mb.values[k] = copyValue(v)
}
}
return nil
}
// Child allocates a derived mutable bag.
//
// Mutating a child doesn't affect the parent's state, all mutations are deltas.
func (mb *MutableBag) Child() *MutableBag {
return GetMutableBag(mb)
}
// Ensure that all dictionary indices are valid and that all values
// are in range.
//
// Note that since we don't have the attribute schema, this doesn't validate
// that a given attribute is being treated as the right type. That is, an
// attribute called 'source.ip' which is of type IP_ADDRESS could be listed as
// a string or an int, and we wouldn't catch it here.
func checkPreconditions(dictionary dictionary, attrs *mixerpb.Attributes) error {
var e *me.Error
for k := range attrs.StringAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.Int64Attributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.DoubleAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.BoolAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.TimestampAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.DurationAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.BytesAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k, v := range attrs.StringMapAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
for k2 := range v.Map {
if _, present := dictionary[k2]; !present {
e = me.Append(e, fmt.Errorf("string map index %d is not defined in the current dictionary", k2))
}
}
}
// TODO: we should catch the case where the same attribute is being repeated in different types
// (that is, an attribute called FOO which is both an int and a string for example)
return e.ErrorOrNil()
}
// Update the state of the bag based on the content of an Attributes struct
func (mb *MutableBag) update(dictionary dictionary, attrs *mixerpb.Attributes) error {
// check preconditions up front and bail if there are any
// errors without mutating the bag.
if err := checkPreconditions(dictionary, attrs); err != nil {
return err
}
var log *bytes.Buffer
if glog.V(2) {
log = pool.GetBuffer()
}
if attrs.ResetContext {
if log != nil {
log.WriteString(" resetting bag to empty state\n")
}
mb.Reset()
}
// delete requested attributes
for _, d := range attrs.DeletedAttributes {
if name, present := dictionary[d]; present {
if log != nil {
log.WriteString(fmt.Sprintf(" attempting to delete attribute %s\n", name))
}
delete(mb.values, name)
}
}
// apply all attributes
for k, v := range attrs.StringAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating string attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.Int64Attributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating int64 attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.DoubleAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating double attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.BoolAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating bool attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.TimestampAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating time attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.DurationAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating duration attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.BytesAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating bytes attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.StringMapAttributes {
m, ok := mb.values[dictionary[k]].(map[string]string)
if !ok {
m = make(map[string]string)
mb.values[dictionary[k]] = m
}
if log != nil {
log.WriteString(fmt.Sprintf(" updating stringmap attribute %s from\n", dictionary[k]))
if len(m) > 0 {
for k2, v2 := range m {
log.WriteString(fmt.Sprintf(" %s:%s\n", k2, v2))
}
} else {
log.WriteString(" <empty>\n")
}
log.WriteString(" to\n")
}
for k2, v2 := range v.Map {
m[dictionary[k2]] = v2
}
if log != nil {
if len(m) > 0 {
for k2, v2 := range m {
log.WriteString(fmt.Sprintf(" %s:%s\n", k2, v2))
}
} else {
log.WriteString(" <empty>\n")
}
}
}
if log != nil {
if log.Len() > 0 {
glog.Infof("Updating attribute bag %d:\n%s", mb.id, log.String())
}
}
return nil
}
| copyValue | identifier_name |
mutableBag.go | // Copyright 2016 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
"github.com/golang/glog"
me "github.com/hashicorp/go-multierror"
mixerpb "istio.io/api/mixer/v1"
"istio.io/mixer/pkg/pool"
)
// MutableBag is a generic mechanism to read and write a set of attributes.
//
// Bags can be chained together in a parent/child relationship. A child bag
// represents a delta over a parent. By default a child looks identical to
// the parent. But as mutations occur to the child, the two start to diverge.
// Resetting a child makes it look identical to its parent again.
type MutableBag struct {
parent Bag
values map[string]interface{}
id int64 // strictly for use in diagnostic messages
}
var id int64
var mutableBags = sync.Pool{
New: func() interface{} {
return &MutableBag{
values: make(map[string]interface{}),
id: atomic.AddInt64(&id, 1),
}
},
}
// GetMutableBag returns an initialized bag.
//
// Bags can be chained in a parent/child relationship. You can pass nil if the
// bag has no parent.
//
// When you are done using the mutable bag, call the Done method to recycle it.
func GetMutableBag(parent Bag) *MutableBag {
mb := mutableBags.Get().(*MutableBag)
mb.parent = parent
if parent == nil {
mb.parent = empty
}
return mb
}
// CopyBag makes a deep copy of MutableBag.
func CopyBag(b Bag) *MutableBag {
mb := GetMutableBag(nil)
for _, k := range b.Names() {
v, _ := b.Get(k)
mb.Set(k, copyValue(v))
}
return mb
}
// Given an attribute value, create a deep copy of it
func copyValue(v interface{}) interface{} {
switch t := v.(type) {
case []byte:
c := make([]byte, len(t))
copy(c, t)
return c
case map[string]string:
c := make(map[string]string, len(t))
for k2, v2 := range t {
c[k2] = v2
}
return c
}
return v
}
// Done indicates the bag can be reclaimed.
func (mb *MutableBag) Done() {
mb.Reset()
mb.parent = nil
mutableBags.Put(mb)
}
// Get returns an attribute value.
func (mb *MutableBag) Get(name string) (interface{}, bool) {
var r interface{}
var b bool
if r, b = mb.values[name]; !b {
r, b = mb.parent.Get(name)
}
return r, b
}
// Names return the names of all the attributes known to this bag.
func (mb *MutableBag) Names() []string {
i := 0
keys := make([]string, len(mb.values))
for k := range mb.values {
keys[i] = k
i++
}
return append(keys, mb.parent.Names()...)
}
// Set creates an override for a named attribute.
func (mb *MutableBag) Set(name string, value interface{}) {
mb.values[name] = value
}
// Reset removes all local state.
func (mb *MutableBag) Reset() {
// my kingdom for a clear method on maps!
for k := range mb.values {
delete(mb.values, k)
}
}
// Merge combines an array of bags into the current bag.
//
// The individual bags may not contain any conflicting attribute
// values. If that happens, then the merge fails and no mutation
// will have occurred to the current bag.
func (mb *MutableBag) Merge(bags ...*MutableBag) error {
// first step is to make sure there are no redundant definitions of the same attribute
keys := make(map[string]bool)
for _, bag := range bags {
if bag == nil {
continue
}
for k := range bag.values {
if keys[k] {
return fmt.Errorf("conflicting value for attribute %s", k)
}
keys[k] = true
}
}
// now that we know there are no conflicting definitions, do the actual merging...
for _, bag := range bags {
if bag == nil {
continue
}
for k, v := range bag.values {
mb.values[k] = copyValue(v)
}
}
return nil
}
// Child allocates a derived mutable bag.
//
// Mutating a child doesn't affect the parent's state, all mutations are deltas.
func (mb *MutableBag) Child() *MutableBag {
return GetMutableBag(mb)
}
// Ensure that all dictionary indices are valid and that all values
// are in range.
//
// Note that since we don't have the attribute schema, this doesn't validate
// that a given attribute is being treated as the right type. That is, an
// attribute called 'source.ip' which is of type IP_ADDRESS could be listed as
// a string or an int, and we wouldn't catch it here.
func checkPreconditions(dictionary dictionary, attrs *mixerpb.Attributes) error {
var e *me.Error
for k := range attrs.StringAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.Int64Attributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.DoubleAttributes {
if _, present := dictionary[k]; !present { | e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.BoolAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.TimestampAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.DurationAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k := range attrs.BytesAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
}
for k, v := range attrs.StringMapAttributes {
if _, present := dictionary[k]; !present {
e = me.Append(e, fmt.Errorf("attribute index %d is not defined in the current dictionary", k))
}
for k2 := range v.Map {
if _, present := dictionary[k2]; !present {
e = me.Append(e, fmt.Errorf("string map index %d is not defined in the current dictionary", k2))
}
}
}
// TODO: we should catch the case where the same attribute is being repeated in different types
// (that is, an attribute called FOO which is both an int and a string for example)
return e.ErrorOrNil()
}
// Update the state of the bag based on the content of an Attributes struct
func (mb *MutableBag) update(dictionary dictionary, attrs *mixerpb.Attributes) error {
// check preconditions up front and bail if there are any
// errors without mutating the bag.
if err := checkPreconditions(dictionary, attrs); err != nil {
return err
}
var log *bytes.Buffer
if glog.V(2) {
log = pool.GetBuffer()
}
if attrs.ResetContext {
if log != nil {
log.WriteString(" resetting bag to empty state\n")
}
mb.Reset()
}
// delete requested attributes
for _, d := range attrs.DeletedAttributes {
if name, present := dictionary[d]; present {
if log != nil {
log.WriteString(fmt.Sprintf(" attempting to delete attribute %s\n", name))
}
delete(mb.values, name)
}
}
// apply all attributes
for k, v := range attrs.StringAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating string attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.Int64Attributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating int64 attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.DoubleAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating double attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.BoolAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating bool attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.TimestampAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating time attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.DurationAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating duration attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.BytesAttributes {
if log != nil {
log.WriteString(fmt.Sprintf(" updating bytes attribute %s from '%v' to '%v'\n", dictionary[k], mb.values[dictionary[k]], v))
}
mb.values[dictionary[k]] = v
}
for k, v := range attrs.StringMapAttributes {
m, ok := mb.values[dictionary[k]].(map[string]string)
if !ok {
m = make(map[string]string)
mb.values[dictionary[k]] = m
}
if log != nil {
log.WriteString(fmt.Sprintf(" updating stringmap attribute %s from\n", dictionary[k]))
if len(m) > 0 {
for k2, v2 := range m {
log.WriteString(fmt.Sprintf(" %s:%s\n", k2, v2))
}
} else {
log.WriteString(" <empty>\n")
}
log.WriteString(" to\n")
}
for k2, v2 := range v.Map {
m[dictionary[k2]] = v2
}
if log != nil {
if len(m) > 0 {
for k2, v2 := range m {
log.WriteString(fmt.Sprintf(" %s:%s\n", k2, v2))
}
} else {
log.WriteString(" <empty>\n")
}
}
}
if log != nil {
if log.Len() > 0 {
glog.Infof("Updating attribute bag %d:\n%s", mb.id, log.String())
}
}
return nil
} | random_line_split | |
parse_rfc.py | #! /usr/bin/env python3
# Copyright (c) 2021, The University of Southern California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import pathlib
import sys
from tabulate import tabulate
from termcolor import colored
from Parser import Parser, preprocess_sent, tokenize, string_to_predicate, is_quote_word
from dictionary import RAW_LEXICON
CUR_DIR = pathlib.Path(__file__).parent.absolute()
LFC_DIR = CUR_DIR / '..' / 'logic_form_checker'
sys.path.insert(0, str(LFC_DIR))
import check_logic_forms as clf
MDS_DIR = CUR_DIR / '..' / 'metadata_system'
sys.path.insert(0, str(MDS_DIR))
import sentence_record
def rfc_lex_parse(cli_args: argparse.Namespace):
""" All-in-one functon to parse and process a sentence
Parameter:
cli_args (argparse.Namespace): parsed CLI args
"""
sent = cli_args.str
new_sent = preprocess_sent(sent)
sent_tokenized = tokenize(new_sent)
if cli_args.debug:
print(sent)
print(new_sent)
print(sent_tokenized)
parser = Parser()
results = []
denylist = ('\\', 'None')
for tokenized in sent_tokenized:
parses, names, lex_dict, child_dict, bp_exception = parser.parse(tokenized)
if cli_args.debug:
print(parses)
if bp_exception:
print(f'beam_parse: {bp_exception.__class__}: {bp_exception}')
for parse, name in zip(parses, names):
if not any(s in parse for s in denylist):
results.append(parse)
if cli_args.debug:
format_dict = {'parse': parse,
'name': name,
'lex': lex_dict[name],
'child': child_dict[name]}
print("{parse} --> \n\t{name}: {lex}\n{child}\n".format(**format_dict))
num_results = len(results)
if num_results == 0:
return '', None
ir_results = list(set(results))
print(f"IR numbers: {len(ir_results)}")
if cli_args.debug:
print(ir_results)
if num_results > 1:
print(colored("multiple logical forms", 'red'))
lf_graphs = []
if cli_args.check and num_results > 1:
print('Find equivalent logical forms:')
lf_graphs = clf.check_all(ir_results, checks=cli_args.checks, verbose=True)
else:
lf_graphs = clf.convert_all(ir_results)
print(colored('Final logical forms:', 'green'))
clf.print_all(lf_graphs)
# file/db IO
if cli_args.wrtdot:
clf.export_all(lf_graphs, out_dir='/tmp', out_format='pdf')
if not cli_args.norecord:
if cli_args.debug:
print('Recording logical forms to metada system')
record_logical_form_graphs(sent, lf_graphs, cli_args.env,
cli_args.msg_type, cli_args.field_name)
try:
lf = lf_graphs[0]['graph'].logic_form
except IndexError:
lf = ''
return lf, retrieve_sentence_and_id(sent)
def retrieve_sentence_and_id(label_sent: str) -> tuple:
""" Retrieve sentence and id by labelled sentence
Parameter:
label_sent (str): labelled sentence
"""
sentence_db = sentence_record.SentenceDB()
mapping = sentence_db.get_mapping_by_label(label_sent)
try:
sentence = mapping[0][0]
sentence_id = mapping[0][1] |
def record_logical_form_graphs(label_sent: str, logical_form_graphs: list, env: str,
msg_type: str, field: str):
""" Write logical forms and logical form graphs to metadata system.
Parameter:
label_sent (str): labelled sentence
logical_form_graphs (list): dicts of id (int) and a graph (LogicalFormGraph)
"""
label_sent = label_sent.lstrip(' ')
sentence_db = sentence_record.SentenceDB()
for lf_graph in logical_form_graphs:
mapping = sentence_db.get_mapping_by_label(label_sent)
try:
sentence = mapping[0][0].lstrip(' ')
sentence_id = mapping[0][1]
msg = msg_type
lf = lf_graph['graph'].logic_form
sent_record = sentence_record.SentenceRecord(sentence=sentence,
sent_id=sentence_id,
msg_type=msg,
field=field,
label=label_sent,
lf=lf,
env=env)
sentence_db.replace_value(sent_record)
sentence_db.update_lf_graph(sent_record, lf_graph['graph'].graph)
except IndexError:
txt = (f'Error in recording logical form: '
f'no entry for label "{label_sent}".')
print(txt)
def display_debug_information(cli_args: argparse.Namespace):
""" Debug and analyze how a sentence is parsed
Parameter:
cli_args (argparse.Namespace): parsed CLI args
"""
sent = cli_args.str
new_sent = preprocess_sent(sent)
print(f'Parsed sentence: {sent}')
print(f'Split the sentence: {new_sent}')
raw_lexicon = RAW_LEXICON.split("\n")
raw_lexicon = [x.lstrip() for x in raw_lexicon]
lexicon_mapping = {}
for lexicon in raw_lexicon:
try:
lexicon_split = lexicon.split(" => ")
pred, lex = lexicon_split[0], lexicon_split[1]
if pred not in lexicon_mapping:
lexicon_mapping[pred] = [lex]
else:
lexicon_mapping[pred].append(lex)
except:
continue
result = []
for token in new_sent:
predicates = string_to_predicate(token)
if not predicates:
print('Bad token:')
print(f'\t\"{token}\" has no predicate mapping')
new_sent.remove(token)
else:
for predicate in predicates:
if is_quote_word(predicate):
lexicon_mapping[predicate] = 'NP'
mapped_lex = lexicon_mapping[predicate]
for mapping in mapped_lex:
result.append([token, predicate, mapping])
print('\n')
print(tabulate(result, headers=["Token", "Predicate", "Lexicon"]))
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument(
'--str', '-s',
help='Specify target filename',
default="The 'checksum' is zero",
)
argparser.add_argument(
'--check', '-c',
help='Check equivalent logic forms',
action="store_true",
)
argparser.add_argument(
'--checks', '-C',
help='Checks to execute',
choices=clf.CHECKS,
nargs='+',
)
argparser.add_argument(
'--pushir', '-pi',
help='Push IR to MDS for assign/associate',
action="store_true",
)
argparser.add_argument(
'--irstr', '-is',
help='Pushed IR string (only one)',
type=str,
default='',
)
argparser.add_argument(
'--norecord', '-nr',
help='Do not store LF graphs in Metadata System',
action="store_true",
)
argparser.add_argument(
'--debug', '-d',
help='Enable debug mode',
action="store_true",
)
argparser.add_argument(
'--wrtdot', '-w',
help='Write logical form graphs to pdf to /tmp/lfg-*.',
action="store_true",
)
argparser.add_argument(
'--env', '-e',
help='Environment to aid logic form processing',
type=str,
default='',
)
argparser.add_argument(
'--msg_type', '-m',
help='msg_type to aid register to MDS',
default=''
)
argparser.add_argument(
'--field_name', '-n',
help='field to aid register to MDS',
default=''
)
argparser.add_argument(
'--display_debug', '-dd',
help='Display debug message without filtering non-complete sentence parsing',
action="store_true",
)
args = argparser.parse_args()
if args.display_debug:
display_debug_information(args)
else:
parsed, recv = rfc_lex_parse(args)
if not None in (parsed, recv):
send_back_data = f"{parsed}~{recv[0]}~{str(recv[1])}~"
ccg_result_file = CUR_DIR / 'CCGresult.txt'
with open(ccg_result_file, "w") as f:
f.write(send_back_data) | except IndexError:
print('Failed to retrieve sentence and sentence_id')
sentence = ''
sentence_id = -1
return (sentence, sentence_id) | random_line_split |
parse_rfc.py | #! /usr/bin/env python3
# Copyright (c) 2021, The University of Southern California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import pathlib
import sys
from tabulate import tabulate
from termcolor import colored
from Parser import Parser, preprocess_sent, tokenize, string_to_predicate, is_quote_word
from dictionary import RAW_LEXICON
CUR_DIR = pathlib.Path(__file__).parent.absolute()
LFC_DIR = CUR_DIR / '..' / 'logic_form_checker'
sys.path.insert(0, str(LFC_DIR))
import check_logic_forms as clf
MDS_DIR = CUR_DIR / '..' / 'metadata_system'
sys.path.insert(0, str(MDS_DIR))
import sentence_record
def rfc_lex_parse(cli_args: argparse.Namespace):
""" All-in-one functon to parse and process a sentence
Parameter:
cli_args (argparse.Namespace): parsed CLI args
"""
sent = cli_args.str
new_sent = preprocess_sent(sent)
sent_tokenized = tokenize(new_sent)
if cli_args.debug:
print(sent)
print(new_sent)
print(sent_tokenized)
parser = Parser()
results = []
denylist = ('\\', 'None')
for tokenized in sent_tokenized:
parses, names, lex_dict, child_dict, bp_exception = parser.parse(tokenized)
if cli_args.debug:
print(parses)
if bp_exception:
print(f'beam_parse: {bp_exception.__class__}: {bp_exception}')
for parse, name in zip(parses, names):
if not any(s in parse for s in denylist):
results.append(parse)
if cli_args.debug:
format_dict = {'parse': parse,
'name': name,
'lex': lex_dict[name],
'child': child_dict[name]}
print("{parse} --> \n\t{name}: {lex}\n{child}\n".format(**format_dict))
num_results = len(results)
if num_results == 0:
return '', None
ir_results = list(set(results))
print(f"IR numbers: {len(ir_results)}")
if cli_args.debug:
print(ir_results)
if num_results > 1:
print(colored("multiple logical forms", 'red'))
lf_graphs = []
if cli_args.check and num_results > 1:
print('Find equivalent logical forms:')
lf_graphs = clf.check_all(ir_results, checks=cli_args.checks, verbose=True)
else:
lf_graphs = clf.convert_all(ir_results)
print(colored('Final logical forms:', 'green'))
clf.print_all(lf_graphs)
# file/db IO
if cli_args.wrtdot:
clf.export_all(lf_graphs, out_dir='/tmp', out_format='pdf')
if not cli_args.norecord:
if cli_args.debug:
print('Recording logical forms to metada system')
record_logical_form_graphs(sent, lf_graphs, cli_args.env,
cli_args.msg_type, cli_args.field_name)
try:
lf = lf_graphs[0]['graph'].logic_form
except IndexError:
lf = ''
return lf, retrieve_sentence_and_id(sent)
def retrieve_sentence_and_id(label_sent: str) -> tuple:
""" Retrieve sentence and id by labelled sentence
Parameter:
label_sent (str): labelled sentence
"""
sentence_db = sentence_record.SentenceDB()
mapping = sentence_db.get_mapping_by_label(label_sent)
try:
sentence = mapping[0][0]
sentence_id = mapping[0][1]
except IndexError:
print('Failed to retrieve sentence and sentence_id')
sentence = ''
sentence_id = -1
return (sentence, sentence_id)
def record_logical_form_graphs(label_sent: str, logical_form_graphs: list, env: str,
msg_type: str, field: str):
""" Write logical forms and logical form graphs to metadata system.
Parameter:
label_sent (str): labelled sentence
logical_form_graphs (list): dicts of id (int) and a graph (LogicalFormGraph)
"""
label_sent = label_sent.lstrip(' ')
sentence_db = sentence_record.SentenceDB()
for lf_graph in logical_form_graphs:
mapping = sentence_db.get_mapping_by_label(label_sent)
try:
sentence = mapping[0][0].lstrip(' ')
sentence_id = mapping[0][1]
msg = msg_type
lf = lf_graph['graph'].logic_form
sent_record = sentence_record.SentenceRecord(sentence=sentence,
sent_id=sentence_id,
msg_type=msg,
field=field,
label=label_sent,
lf=lf,
env=env)
sentence_db.replace_value(sent_record)
sentence_db.update_lf_graph(sent_record, lf_graph['graph'].graph)
except IndexError:
txt = (f'Error in recording logical form: '
f'no entry for label "{label_sent}".')
print(txt)
def display_debug_information(cli_args: argparse.Namespace):
""" Debug and analyze how a sentence is parsed
Parameter:
cli_args (argparse.Namespace): parsed CLI args
"""
sent = cli_args.str
new_sent = preprocess_sent(sent)
print(f'Parsed sentence: {sent}')
print(f'Split the sentence: {new_sent}')
raw_lexicon = RAW_LEXICON.split("\n")
raw_lexicon = [x.lstrip() for x in raw_lexicon]
lexicon_mapping = {}
for lexicon in raw_lexicon:
try:
lexicon_split = lexicon.split(" => ")
pred, lex = lexicon_split[0], lexicon_split[1]
if pred not in lexicon_mapping:
lexicon_mapping[pred] = [lex]
else:
lexicon_mapping[pred].append(lex)
except:
continue
result = []
for token in new_sent:
predicates = string_to_predicate(token)
if not predicates:
print('Bad token:')
print(f'\t\"{token}\" has no predicate mapping')
new_sent.remove(token)
else:
for predicate in predicates:
|
print('\n')
print(tabulate(result, headers=["Token", "Predicate", "Lexicon"]))
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument(
'--str', '-s',
help='Specify target filename',
default="The 'checksum' is zero",
)
argparser.add_argument(
'--check', '-c',
help='Check equivalent logic forms',
action="store_true",
)
argparser.add_argument(
'--checks', '-C',
help='Checks to execute',
choices=clf.CHECKS,
nargs='+',
)
argparser.add_argument(
'--pushir', '-pi',
help='Push IR to MDS for assign/associate',
action="store_true",
)
argparser.add_argument(
'--irstr', '-is',
help='Pushed IR string (only one)',
type=str,
default='',
)
argparser.add_argument(
'--norecord', '-nr',
help='Do not store LF graphs in Metadata System',
action="store_true",
)
argparser.add_argument(
'--debug', '-d',
help='Enable debug mode',
action="store_true",
)
argparser.add_argument(
'--wrtdot', '-w',
help='Write logical form graphs to pdf to /tmp/lfg-*.',
action="store_true",
)
argparser.add_argument(
'--env', '-e',
help='Environment to aid logic form processing',
type=str,
default='',
)
argparser.add_argument(
'--msg_type', '-m',
help='msg_type to aid register to MDS',
default=''
)
argparser.add_argument(
'--field_name', '-n',
help='field to aid register to MDS',
default=''
)
argparser.add_argument(
'--display_debug', '-dd',
help='Display debug message without filtering non-complete sentence parsing',
action="store_true",
)
args = argparser.parse_args()
if args.display_debug:
display_debug_information(args)
else:
parsed, recv = rfc_lex_parse(args)
if not None in (parsed, recv):
send_back_data = f"{parsed}~{recv[0]}~{str(recv[1])}~"
ccg_result_file = CUR_DIR / 'CCGresult.txt'
with open(ccg_result_file, "w") as f:
f.write(send_back_data)
| if is_quote_word(predicate):
lexicon_mapping[predicate] = 'NP'
mapped_lex = lexicon_mapping[predicate]
for mapping in mapped_lex:
result.append([token, predicate, mapping]) | conditional_block |
parse_rfc.py | #! /usr/bin/env python3
# Copyright (c) 2021, The University of Southern California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import pathlib
import sys
from tabulate import tabulate
from termcolor import colored
from Parser import Parser, preprocess_sent, tokenize, string_to_predicate, is_quote_word
from dictionary import RAW_LEXICON
CUR_DIR = pathlib.Path(__file__).parent.absolute()
LFC_DIR = CUR_DIR / '..' / 'logic_form_checker'
sys.path.insert(0, str(LFC_DIR))
import check_logic_forms as clf
MDS_DIR = CUR_DIR / '..' / 'metadata_system'
sys.path.insert(0, str(MDS_DIR))
import sentence_record
def rfc_lex_parse(cli_args: argparse.Namespace):
""" All-in-one functon to parse and process a sentence
Parameter:
cli_args (argparse.Namespace): parsed CLI args
"""
sent = cli_args.str
new_sent = preprocess_sent(sent)
sent_tokenized = tokenize(new_sent)
if cli_args.debug:
print(sent)
print(new_sent)
print(sent_tokenized)
parser = Parser()
results = []
denylist = ('\\', 'None')
for tokenized in sent_tokenized:
parses, names, lex_dict, child_dict, bp_exception = parser.parse(tokenized)
if cli_args.debug:
print(parses)
if bp_exception:
print(f'beam_parse: {bp_exception.__class__}: {bp_exception}')
for parse, name in zip(parses, names):
if not any(s in parse for s in denylist):
results.append(parse)
if cli_args.debug:
format_dict = {'parse': parse,
'name': name,
'lex': lex_dict[name],
'child': child_dict[name]}
print("{parse} --> \n\t{name}: {lex}\n{child}\n".format(**format_dict))
num_results = len(results)
if num_results == 0:
return '', None
ir_results = list(set(results))
print(f"IR numbers: {len(ir_results)}")
if cli_args.debug:
print(ir_results)
if num_results > 1:
print(colored("multiple logical forms", 'red'))
lf_graphs = []
if cli_args.check and num_results > 1:
print('Find equivalent logical forms:')
lf_graphs = clf.check_all(ir_results, checks=cli_args.checks, verbose=True)
else:
lf_graphs = clf.convert_all(ir_results)
print(colored('Final logical forms:', 'green'))
clf.print_all(lf_graphs)
# file/db IO
if cli_args.wrtdot:
clf.export_all(lf_graphs, out_dir='/tmp', out_format='pdf')
if not cli_args.norecord:
if cli_args.debug:
print('Recording logical forms to metada system')
record_logical_form_graphs(sent, lf_graphs, cli_args.env,
cli_args.msg_type, cli_args.field_name)
try:
lf = lf_graphs[0]['graph'].logic_form
except IndexError:
lf = ''
return lf, retrieve_sentence_and_id(sent)
def retrieve_sentence_and_id(label_sent: str) -> tuple:
""" Retrieve sentence and id by labelled sentence
Parameter:
label_sent (str): labelled sentence
"""
sentence_db = sentence_record.SentenceDB()
mapping = sentence_db.get_mapping_by_label(label_sent)
try:
sentence = mapping[0][0]
sentence_id = mapping[0][1]
except IndexError:
print('Failed to retrieve sentence and sentence_id')
sentence = ''
sentence_id = -1
return (sentence, sentence_id)
def record_logical_form_graphs(label_sent: str, logical_form_graphs: list, env: str,
msg_type: str, field: str):
""" Write logical forms and logical form graphs to metadata system.
Parameter:
label_sent (str): labelled sentence
logical_form_graphs (list): dicts of id (int) and a graph (LogicalFormGraph)
"""
label_sent = label_sent.lstrip(' ')
sentence_db = sentence_record.SentenceDB()
for lf_graph in logical_form_graphs:
mapping = sentence_db.get_mapping_by_label(label_sent)
try:
sentence = mapping[0][0].lstrip(' ')
sentence_id = mapping[0][1]
msg = msg_type
lf = lf_graph['graph'].logic_form
sent_record = sentence_record.SentenceRecord(sentence=sentence,
sent_id=sentence_id,
msg_type=msg,
field=field,
label=label_sent,
lf=lf,
env=env)
sentence_db.replace_value(sent_record)
sentence_db.update_lf_graph(sent_record, lf_graph['graph'].graph)
except IndexError:
txt = (f'Error in recording logical form: '
f'no entry for label "{label_sent}".')
print(txt)
def display_debug_information(cli_args: argparse.Namespace):
|
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument(
'--str', '-s',
help='Specify target filename',
default="The 'checksum' is zero",
)
argparser.add_argument(
'--check', '-c',
help='Check equivalent logic forms',
action="store_true",
)
argparser.add_argument(
'--checks', '-C',
help='Checks to execute',
choices=clf.CHECKS,
nargs='+',
)
argparser.add_argument(
'--pushir', '-pi',
help='Push IR to MDS for assign/associate',
action="store_true",
)
argparser.add_argument(
'--irstr', '-is',
help='Pushed IR string (only one)',
type=str,
default='',
)
argparser.add_argument(
'--norecord', '-nr',
help='Do not store LF graphs in Metadata System',
action="store_true",
)
argparser.add_argument(
'--debug', '-d',
help='Enable debug mode',
action="store_true",
)
argparser.add_argument(
'--wrtdot', '-w',
help='Write logical form graphs to pdf to /tmp/lfg-*.',
action="store_true",
)
argparser.add_argument(
'--env', '-e',
help='Environment to aid logic form processing',
type=str,
default='',
)
argparser.add_argument(
'--msg_type', '-m',
help='msg_type to aid register to MDS',
default=''
)
argparser.add_argument(
'--field_name', '-n',
help='field to aid register to MDS',
default=''
)
argparser.add_argument(
'--display_debug', '-dd',
help='Display debug message without filtering non-complete sentence parsing',
action="store_true",
)
args = argparser.parse_args()
if args.display_debug:
display_debug_information(args)
else:
parsed, recv = rfc_lex_parse(args)
if not None in (parsed, recv):
send_back_data = f"{parsed}~{recv[0]}~{str(recv[1])}~"
ccg_result_file = CUR_DIR / 'CCGresult.txt'
with open(ccg_result_file, "w") as f:
f.write(send_back_data)
| """ Debug and analyze how a sentence is parsed
Parameter:
cli_args (argparse.Namespace): parsed CLI args
"""
sent = cli_args.str
new_sent = preprocess_sent(sent)
print(f'Parsed sentence: {sent}')
print(f'Split the sentence: {new_sent}')
raw_lexicon = RAW_LEXICON.split("\n")
raw_lexicon = [x.lstrip() for x in raw_lexicon]
lexicon_mapping = {}
for lexicon in raw_lexicon:
try:
lexicon_split = lexicon.split(" => ")
pred, lex = lexicon_split[0], lexicon_split[1]
if pred not in lexicon_mapping:
lexicon_mapping[pred] = [lex]
else:
lexicon_mapping[pred].append(lex)
except:
continue
result = []
for token in new_sent:
predicates = string_to_predicate(token)
if not predicates:
print('Bad token:')
print(f'\t\"{token}\" has no predicate mapping')
new_sent.remove(token)
else:
for predicate in predicates:
if is_quote_word(predicate):
lexicon_mapping[predicate] = 'NP'
mapped_lex = lexicon_mapping[predicate]
for mapping in mapped_lex:
result.append([token, predicate, mapping])
print('\n')
print(tabulate(result, headers=["Token", "Predicate", "Lexicon"])) | identifier_body |
parse_rfc.py | #! /usr/bin/env python3
# Copyright (c) 2021, The University of Southern California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import pathlib
import sys
from tabulate import tabulate
from termcolor import colored
from Parser import Parser, preprocess_sent, tokenize, string_to_predicate, is_quote_word
from dictionary import RAW_LEXICON
CUR_DIR = pathlib.Path(__file__).parent.absolute()
LFC_DIR = CUR_DIR / '..' / 'logic_form_checker'
sys.path.insert(0, str(LFC_DIR))
import check_logic_forms as clf
MDS_DIR = CUR_DIR / '..' / 'metadata_system'
sys.path.insert(0, str(MDS_DIR))
import sentence_record
def rfc_lex_parse(cli_args: argparse.Namespace):
""" All-in-one functon to parse and process a sentence
Parameter:
cli_args (argparse.Namespace): parsed CLI args
"""
sent = cli_args.str
new_sent = preprocess_sent(sent)
sent_tokenized = tokenize(new_sent)
if cli_args.debug:
print(sent)
print(new_sent)
print(sent_tokenized)
parser = Parser()
results = []
denylist = ('\\', 'None')
for tokenized in sent_tokenized:
parses, names, lex_dict, child_dict, bp_exception = parser.parse(tokenized)
if cli_args.debug:
print(parses)
if bp_exception:
print(f'beam_parse: {bp_exception.__class__}: {bp_exception}')
for parse, name in zip(parses, names):
if not any(s in parse for s in denylist):
results.append(parse)
if cli_args.debug:
format_dict = {'parse': parse,
'name': name,
'lex': lex_dict[name],
'child': child_dict[name]}
print("{parse} --> \n\t{name}: {lex}\n{child}\n".format(**format_dict))
num_results = len(results)
if num_results == 0:
return '', None
ir_results = list(set(results))
print(f"IR numbers: {len(ir_results)}")
if cli_args.debug:
print(ir_results)
if num_results > 1:
print(colored("multiple logical forms", 'red'))
lf_graphs = []
if cli_args.check and num_results > 1:
print('Find equivalent logical forms:')
lf_graphs = clf.check_all(ir_results, checks=cli_args.checks, verbose=True)
else:
lf_graphs = clf.convert_all(ir_results)
print(colored('Final logical forms:', 'green'))
clf.print_all(lf_graphs)
# file/db IO
if cli_args.wrtdot:
clf.export_all(lf_graphs, out_dir='/tmp', out_format='pdf')
if not cli_args.norecord:
if cli_args.debug:
print('Recording logical forms to metada system')
record_logical_form_graphs(sent, lf_graphs, cli_args.env,
cli_args.msg_type, cli_args.field_name)
try:
lf = lf_graphs[0]['graph'].logic_form
except IndexError:
lf = ''
return lf, retrieve_sentence_and_id(sent)
def | (label_sent: str) -> tuple:
""" Retrieve sentence and id by labelled sentence
Parameter:
label_sent (str): labelled sentence
"""
sentence_db = sentence_record.SentenceDB()
mapping = sentence_db.get_mapping_by_label(label_sent)
try:
sentence = mapping[0][0]
sentence_id = mapping[0][1]
except IndexError:
print('Failed to retrieve sentence and sentence_id')
sentence = ''
sentence_id = -1
return (sentence, sentence_id)
def record_logical_form_graphs(label_sent: str, logical_form_graphs: list, env: str,
msg_type: str, field: str):
""" Write logical forms and logical form graphs to metadata system.
Parameter:
label_sent (str): labelled sentence
logical_form_graphs (list): dicts of id (int) and a graph (LogicalFormGraph)
"""
label_sent = label_sent.lstrip(' ')
sentence_db = sentence_record.SentenceDB()
for lf_graph in logical_form_graphs:
mapping = sentence_db.get_mapping_by_label(label_sent)
try:
sentence = mapping[0][0].lstrip(' ')
sentence_id = mapping[0][1]
msg = msg_type
lf = lf_graph['graph'].logic_form
sent_record = sentence_record.SentenceRecord(sentence=sentence,
sent_id=sentence_id,
msg_type=msg,
field=field,
label=label_sent,
lf=lf,
env=env)
sentence_db.replace_value(sent_record)
sentence_db.update_lf_graph(sent_record, lf_graph['graph'].graph)
except IndexError:
txt = (f'Error in recording logical form: '
f'no entry for label "{label_sent}".')
print(txt)
def display_debug_information(cli_args: argparse.Namespace):
""" Debug and analyze how a sentence is parsed
Parameter:
cli_args (argparse.Namespace): parsed CLI args
"""
sent = cli_args.str
new_sent = preprocess_sent(sent)
print(f'Parsed sentence: {sent}')
print(f'Split the sentence: {new_sent}')
raw_lexicon = RAW_LEXICON.split("\n")
raw_lexicon = [x.lstrip() for x in raw_lexicon]
lexicon_mapping = {}
for lexicon in raw_lexicon:
try:
lexicon_split = lexicon.split(" => ")
pred, lex = lexicon_split[0], lexicon_split[1]
if pred not in lexicon_mapping:
lexicon_mapping[pred] = [lex]
else:
lexicon_mapping[pred].append(lex)
except:
continue
result = []
for token in new_sent:
predicates = string_to_predicate(token)
if not predicates:
print('Bad token:')
print(f'\t\"{token}\" has no predicate mapping')
new_sent.remove(token)
else:
for predicate in predicates:
if is_quote_word(predicate):
lexicon_mapping[predicate] = 'NP'
mapped_lex = lexicon_mapping[predicate]
for mapping in mapped_lex:
result.append([token, predicate, mapping])
print('\n')
print(tabulate(result, headers=["Token", "Predicate", "Lexicon"]))
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument(
'--str', '-s',
help='Specify target filename',
default="The 'checksum' is zero",
)
argparser.add_argument(
'--check', '-c',
help='Check equivalent logic forms',
action="store_true",
)
argparser.add_argument(
'--checks', '-C',
help='Checks to execute',
choices=clf.CHECKS,
nargs='+',
)
argparser.add_argument(
'--pushir', '-pi',
help='Push IR to MDS for assign/associate',
action="store_true",
)
argparser.add_argument(
'--irstr', '-is',
help='Pushed IR string (only one)',
type=str,
default='',
)
argparser.add_argument(
'--norecord', '-nr',
help='Do not store LF graphs in Metadata System',
action="store_true",
)
argparser.add_argument(
'--debug', '-d',
help='Enable debug mode',
action="store_true",
)
argparser.add_argument(
'--wrtdot', '-w',
help='Write logical form graphs to pdf to /tmp/lfg-*.',
action="store_true",
)
argparser.add_argument(
'--env', '-e',
help='Environment to aid logic form processing',
type=str,
default='',
)
argparser.add_argument(
'--msg_type', '-m',
help='msg_type to aid register to MDS',
default=''
)
argparser.add_argument(
'--field_name', '-n',
help='field to aid register to MDS',
default=''
)
argparser.add_argument(
'--display_debug', '-dd',
help='Display debug message without filtering non-complete sentence parsing',
action="store_true",
)
args = argparser.parse_args()
if args.display_debug:
display_debug_information(args)
else:
parsed, recv = rfc_lex_parse(args)
if not None in (parsed, recv):
send_back_data = f"{parsed}~{recv[0]}~{str(recv[1])}~"
ccg_result_file = CUR_DIR / 'CCGresult.txt'
with open(ccg_result_file, "w") as f:
f.write(send_back_data)
| retrieve_sentence_and_id | identifier_name |
validation_test.go | package test
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"testing"
"text/template"
argocd "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
corev1 "k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
k8sYaml "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
)
const (
manifestDir = "../"
)
var (
excludeDirs = []string{
filepath.Join(manifestDir, "bin"),
filepath.Join(manifestDir, "docs"),
filepath.Join(manifestDir, "test"),
filepath.Join(manifestDir, "vendor"),
}
)
func isKustomizationFile(name string) bool {
if name == "kustomization.yaml" || name == "kustomization.yml" || name == "Kustomization" {
return true
}
return false
}
func kustomizeBuild(dir string) ([]byte, []byte, error) {
outBuf := new(bytes.Buffer)
errBuf := new(bytes.Buffer)
cmd := exec.Command("kustomize", "build", dir)
cmd.Stdout = outBuf
cmd.Stderr = errBuf
err := cmd.Run()
return outBuf.Bytes(), errBuf.Bytes(), err
}
func | (t *testing.T) {
targetRevisions := map[string]string{
"gcp": "release",
"neco-dev": "release",
"osaka0": "release",
"stage0": "stage",
"tokyo0": "release",
}
syncWaves := map[string]string{
"namespaces": "1",
"argocd": "2",
"local-pv-provisioner": "3",
"secrets": "3",
"cert-manager": "4",
"external-dns": "4",
"metallb": "4",
"ingress": "5",
"topolvm": "5",
"unbound": "5",
"elastic": "6",
"rook": "6",
"monitoring": "7",
"sandbox": "7",
"teleport": "7",
"network-policy": "8",
"argocd-ingress": "9",
"bmc-reverse-proxy": "9",
"metrics-server": "9",
"neco-admission": "9",
"team-management": "9",
"maneki-apps": "10",
}
// Getting overlays list
overlayDirs := map[string]string{}
err := filepath.Walk(filepath.Join(manifestDir, "argocd-config", "overlays"), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && info.Name() != "overlays" {
overlayDirs[info.Name()] = path
}
return nil
})
if err != nil {
t.Error(err)
}
t.Parallel()
for overlay, targetDir := range overlayDirs {
t.Run(overlay, func(t *testing.T) {
stdout, stderr, err := kustomizeBuild(targetDir)
if err != nil {
t.Error(fmt.Errorf("kustomize build faled. path: %s, stderr: %s, err: %v", targetDir, stderr, err))
}
y := k8sYaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(stdout)))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
t.Error(err)
}
var app argocd.Application
err = yaml.Unmarshal(data, &app)
if err != nil {
t.Error(err)
}
// Check the tergetRevision
if targetRevisions[overlay] == "" {
t.Error(fmt.Errorf("targetRevision should exist. overlay: %s", overlay))
}
if app.Spec.Source.TargetRevision != targetRevisions[overlay] {
t.Error(fmt.Errorf("invalid targetRevision. application: %s, targetRevision: %s (should be %s)", app.Name, app.Spec.Source.TargetRevision, targetRevisions[overlay]))
}
// Check the sync wave
if syncWaves[app.Name] == "" {
t.Error(fmt.Errorf("sync-wave should exist. application: %s", app.Name))
}
if app.GetAnnotations()["argocd.argoproj.io/sync-wave"] != syncWaves[app.Name] {
t.Error(fmt.Errorf("invalid sync-wave. application: %s, sync-wave: %s (should be %s)", app.Name, app.GetAnnotations()["argocd.argoproj.io/sync-wave"], syncWaves[app.Name]))
}
}
})
}
}
// Use to check the existence of the status field in manifest files for CRDs.
// `apiextensionsv1beta1.CustomResourceDefinition` cannot be used because the status field always exists in the struct.
type crdValidation struct {
Kind string `json:"kind"`
Metadata struct {
Name string `json:"name"`
} `json:"metadata"`
Status *apiextensionsv1beta1.CustomResourceDefinitionStatus `json:"status"`
}
func testCRDStatus(t *testing.T) {
t.Parallel()
doCheckKustomizedYaml(t, func(t *testing.T, data []byte) {
var crd crdValidation
err := yaml.Unmarshal(data, &crd)
if err != nil {
// Skip because this YAML might not be custom resource definition
return
}
if crd.Kind != "CustomResourceDefinition" {
// Skip because this YAML is not custom resource definition
return
}
if crd.Status != nil {
t.Error(errors.New(".status(Status) exists in " + crd.Metadata.Name + ", remove it to prevent occurring OutOfSync by Argo CD"))
}
})
}
type certificateValidation struct {
Kind string `json:"kind"`
Metadata struct {
Name string `json:"name"`
} `json:"metadata"`
Spec struct {
IsCA bool `json:"isCA"`
Usages []string `json:"usages"`
} `json:"spec"`
}
func testCertificateUsages(t *testing.T) {
t.Parallel()
doCheckKustomizedYaml(t, func(t *testing.T, data []byte) {
var cert certificateValidation
err := yaml.Unmarshal(data, &cert)
if err != nil {
// Skip because this YAML might not be certificate
return
}
if cert.Kind != "Certificate" {
// Skip because this YAML is not certificate
return
}
var expected []string
if cert.Spec.IsCA {
expected = []string{"digital signature", "key encipherment", "cert sign"}
} else {
expected = []string{"digital signature", "key encipherment", "server auth", "client auth"}
}
if !reflect.DeepEqual(cert.Spec.Usages, expected) {
t.Error(errors.New(".spec.usages has incorrect list in " + cert.Metadata.Name))
}
})
}
func doCheckKustomizedYaml(t *testing.T, checkFunc func(*testing.T, []byte)) {
targets := []string{}
err := filepath.Walk(manifestDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
for _, exDir := range excludeDirs {
if strings.HasPrefix(path, exDir) {
// Skip files in the directory
return filepath.SkipDir
}
}
if !isKustomizationFile(info.Name()) {
return nil
}
targets = append(targets, filepath.Dir(path))
// Skip other files in the directory
return filepath.SkipDir
})
if err != nil {
t.Error(err)
}
for _, path := range targets {
t.Run(path, func(t *testing.T) {
stdout, stderr, err := kustomizeBuild(path)
if err != nil {
t.Error(fmt.Errorf("kustomize build faled. path: %s, stderr: %s, err: %v", path, stderr, err))
}
y := k8sYaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(stdout)))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
t.Error(err)
}
checkFunc(t, data)
}
})
}
}
func readSecret(path string) ([]corev1.Secret, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var secrets []corev1.Secret
y := k8sYaml.NewYAMLReader(bufio.NewReader(f))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
var s corev1.Secret
err = yaml.Unmarshal(data, &s)
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
return secrets, nil
}
func testGeneratedSecretName(t *testing.T) {
const currentSecretFile = "./current-secret.yaml"
expectedSecretFiles := []string{
"./expected-secret-osaka0.yaml",
"./expected-secret-stage0.yaml",
"./expected-secret-tokyo0.yaml",
}
t.Parallel()
defer func() {
for _, f := range expectedSecretFiles {
os.Remove(f)
}
os.Remove(currentSecretFile)
}()
dummySecrets, err := readSecret(currentSecretFile)
if err != nil {
t.Fatal(err)
}
for _, f := range expectedSecretFiles {
expected, err := readSecret(f)
if err != nil {
t.Fatal(err)
}
OUTER:
for _, es := range expected {
var appeared bool
err = filepath.Walk(manifestDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
for _, exDir := range excludeDirs {
if strings.HasPrefix(path, exDir) {
// Skip files in the directory
return filepath.SkipDir
}
}
if info.IsDir() || !strings.HasSuffix(path, ".yaml") {
return nil
}
str, err := ioutil.ReadFile(path)
if err != nil {
return err
}
// These lines test all secrets to be used.
// grafana-admin-credentials is skipped because it is used internally in Grafana Operator.
if es.Name == "grafana-admin-credentials" {
appeared = true
}
if strings.Contains(string(str), "secretName: "+es.Name) {
appeared = true
}
// These lines test secrets to be used as references, such like:
// - secretRef:
// name: <key>
strCondensed := strings.Join(strings.Fields(string(str)), "")
if strings.Contains(strCondensed, "secretRef:name:"+es.Name) {
appeared = true
}
return nil
})
if err != nil {
t.Fatal("failed to walk manifest directories")
}
if !appeared {
t.Error("secret:", es.Name, "was not found in any manifests")
}
for _, cs := range dummySecrets {
if cs.Name == es.Name && cs.Namespace == es.Namespace {
continue OUTER
}
}
t.Error("secret:", es.Namespace+"/"+es.Name, "was not found in dummy secrets")
}
}
}
// These struct types are copied from the following link:
// https://github.com/prometheus/prometheus/blob/master/pkg/rulefmt/rulefmt.go
type alertRuleGroups struct {
Groups []alertRuleGroup `json:"groups"`
}
type alertRuleGroup struct {
Name string `json:"name"`
Alerts []alertRule `json:"rules"`
}
type alertRule struct {
Record string `json:"record,omitempty"`
Alert string `json:"alert,omitempty"`
Expr string `json:"expr"`
Labels map[string]string `json:"labels,omitempty"`
Annotations map[string]string `json:"annotations"`
}
type recordRuleGroups struct {
Groups []recordRuleGroup `json:"groups"`
}
type recordRuleGroup struct {
Name string `json:"name"`
Records []recordRule `json:"rules"`
}
type recordRule struct {
Record string `json:"record,omitempty"`
}
func testAlertRules(t *testing.T) {
var groups alertRuleGroups
str, err := ioutil.ReadFile("../monitoring/base/alertmanager/neco.template")
if err != nil {
t.Fatal(err)
}
tmpl := template.Must(template.New("alert").Parse(string(str))).Option("missingkey=error")
err = filepath.Walk("../monitoring/base/prometheus/alert_rules", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
str, err := ioutil.ReadFile(path)
if err != nil {
return err
}
err = yaml.Unmarshal(str, &groups)
if err != nil {
return fmt.Errorf("failed to unmarshal %s, err: %v", path, err)
}
for _, g := range groups.Groups {
t.Run(g.Name, func(t *testing.T) {
t.Parallel()
var buf bytes.Buffer
err := tmpl.ExecuteTemplate(&buf, "slack.neco.text", g)
if err != nil {
t.Error(err)
}
})
}
return nil
})
if err != nil {
t.Error(err)
}
}
func TestValidation(t *testing.T) {
if os.Getenv("SSH_PRIVKEY") != "" {
t.Skip("SSH_PRIVKEY envvar is defined as running e2e test")
}
t.Run("ApplicationTargetRevision", testApplicationResources)
t.Run("CRDStatus", testCRDStatus)
t.Run("CertificateUsages", testCertificateUsages)
t.Run("GeneratedSecretName", testGeneratedSecretName)
t.Run("AlertRules", testAlertRules)
}
| testApplicationResources | identifier_name |
validation_test.go | package test
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"testing"
"text/template"
argocd "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
corev1 "k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
k8sYaml "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
)
const (
manifestDir = "../"
)
var (
excludeDirs = []string{
filepath.Join(manifestDir, "bin"),
filepath.Join(manifestDir, "docs"),
filepath.Join(manifestDir, "test"),
filepath.Join(manifestDir, "vendor"),
}
)
func isKustomizationFile(name string) bool {
if name == "kustomization.yaml" || name == "kustomization.yml" || name == "Kustomization" {
return true
}
return false
}
func kustomizeBuild(dir string) ([]byte, []byte, error) {
outBuf := new(bytes.Buffer)
errBuf := new(bytes.Buffer)
cmd := exec.Command("kustomize", "build", dir)
cmd.Stdout = outBuf
cmd.Stderr = errBuf
err := cmd.Run()
return outBuf.Bytes(), errBuf.Bytes(), err
}
func testApplicationResources(t *testing.T) {
targetRevisions := map[string]string{
"gcp": "release",
"neco-dev": "release",
"osaka0": "release",
"stage0": "stage",
"tokyo0": "release",
}
syncWaves := map[string]string{
"namespaces": "1",
"argocd": "2",
"local-pv-provisioner": "3",
"secrets": "3",
"cert-manager": "4",
"external-dns": "4",
"metallb": "4",
"ingress": "5",
"topolvm": "5",
"unbound": "5",
"elastic": "6",
"rook": "6",
"monitoring": "7",
"sandbox": "7",
"teleport": "7",
"network-policy": "8",
"argocd-ingress": "9",
"bmc-reverse-proxy": "9",
"metrics-server": "9",
"neco-admission": "9",
"team-management": "9",
"maneki-apps": "10",
}
// Getting overlays list
overlayDirs := map[string]string{}
err := filepath.Walk(filepath.Join(manifestDir, "argocd-config", "overlays"), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && info.Name() != "overlays" {
overlayDirs[info.Name()] = path
}
return nil
})
if err != nil {
t.Error(err)
}
t.Parallel()
for overlay, targetDir := range overlayDirs {
t.Run(overlay, func(t *testing.T) {
stdout, stderr, err := kustomizeBuild(targetDir)
if err != nil {
t.Error(fmt.Errorf("kustomize build faled. path: %s, stderr: %s, err: %v", targetDir, stderr, err))
}
y := k8sYaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(stdout)))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
t.Error(err)
}
var app argocd.Application
err = yaml.Unmarshal(data, &app)
if err != nil {
t.Error(err)
}
// Check the tergetRevision
if targetRevisions[overlay] == "" {
t.Error(fmt.Errorf("targetRevision should exist. overlay: %s", overlay))
}
if app.Spec.Source.TargetRevision != targetRevisions[overlay] {
t.Error(fmt.Errorf("invalid targetRevision. application: %s, targetRevision: %s (should be %s)", app.Name, app.Spec.Source.TargetRevision, targetRevisions[overlay]))
}
// Check the sync wave
if syncWaves[app.Name] == "" {
t.Error(fmt.Errorf("sync-wave should exist. application: %s", app.Name))
}
if app.GetAnnotations()["argocd.argoproj.io/sync-wave"] != syncWaves[app.Name] {
t.Error(fmt.Errorf("invalid sync-wave. application: %s, sync-wave: %s (should be %s)", app.Name, app.GetAnnotations()["argocd.argoproj.io/sync-wave"], syncWaves[app.Name]))
}
}
})
}
}
// Use to check the existence of the status field in manifest files for CRDs.
// `apiextensionsv1beta1.CustomResourceDefinition` cannot be used because the status field always exists in the struct.
type crdValidation struct {
Kind string `json:"kind"`
Metadata struct {
Name string `json:"name"`
} `json:"metadata"`
Status *apiextensionsv1beta1.CustomResourceDefinitionStatus `json:"status"`
}
func testCRDStatus(t *testing.T) {
t.Parallel()
doCheckKustomizedYaml(t, func(t *testing.T, data []byte) {
var crd crdValidation
err := yaml.Unmarshal(data, &crd)
if err != nil {
// Skip because this YAML might not be custom resource definition
return
}
if crd.Kind != "CustomResourceDefinition" {
// Skip because this YAML is not custom resource definition
return
}
if crd.Status != nil {
t.Error(errors.New(".status(Status) exists in " + crd.Metadata.Name + ", remove it to prevent occurring OutOfSync by Argo CD"))
}
})
}
type certificateValidation struct {
Kind string `json:"kind"`
Metadata struct {
Name string `json:"name"`
} `json:"metadata"`
Spec struct {
IsCA bool `json:"isCA"`
Usages []string `json:"usages"`
} `json:"spec"`
}
func testCertificateUsages(t *testing.T) {
t.Parallel()
doCheckKustomizedYaml(t, func(t *testing.T, data []byte) {
var cert certificateValidation
err := yaml.Unmarshal(data, &cert)
if err != nil {
// Skip because this YAML might not be certificate
return
}
if cert.Kind != "Certificate" {
// Skip because this YAML is not certificate
return
}
var expected []string
if cert.Spec.IsCA {
expected = []string{"digital signature", "key encipherment", "cert sign"}
} else {
expected = []string{"digital signature", "key encipherment", "server auth", "client auth"}
}
if !reflect.DeepEqual(cert.Spec.Usages, expected) {
t.Error(errors.New(".spec.usages has incorrect list in " + cert.Metadata.Name))
}
})
}
func doCheckKustomizedYaml(t *testing.T, checkFunc func(*testing.T, []byte)) {
targets := []string{}
err := filepath.Walk(manifestDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
for _, exDir := range excludeDirs {
if strings.HasPrefix(path, exDir) {
// Skip files in the directory
return filepath.SkipDir
}
}
if !isKustomizationFile(info.Name()) {
return nil
}
targets = append(targets, filepath.Dir(path))
// Skip other files in the directory
return filepath.SkipDir
})
if err != nil {
t.Error(err)
}
for _, path := range targets {
t.Run(path, func(t *testing.T) {
stdout, stderr, err := kustomizeBuild(path)
if err != nil {
t.Error(fmt.Errorf("kustomize build faled. path: %s, stderr: %s, err: %v", path, stderr, err))
}
y := k8sYaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(stdout)))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
t.Error(err)
}
checkFunc(t, data)
}
})
}
}
func readSecret(path string) ([]corev1.Secret, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var secrets []corev1.Secret
y := k8sYaml.NewYAMLReader(bufio.NewReader(f))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
var s corev1.Secret
err = yaml.Unmarshal(data, &s)
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
return secrets, nil
}
func testGeneratedSecretName(t *testing.T) {
const currentSecretFile = "./current-secret.yaml"
expectedSecretFiles := []string{
"./expected-secret-osaka0.yaml",
"./expected-secret-stage0.yaml",
"./expected-secret-tokyo0.yaml",
}
t.Parallel()
defer func() {
for _, f := range expectedSecretFiles {
os.Remove(f)
}
os.Remove(currentSecretFile)
}()
dummySecrets, err := readSecret(currentSecretFile)
if err != nil {
t.Fatal(err)
}
| expected, err := readSecret(f)
if err != nil {
t.Fatal(err)
}
OUTER:
for _, es := range expected {
var appeared bool
err = filepath.Walk(manifestDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
for _, exDir := range excludeDirs {
if strings.HasPrefix(path, exDir) {
// Skip files in the directory
return filepath.SkipDir
}
}
if info.IsDir() || !strings.HasSuffix(path, ".yaml") {
return nil
}
str, err := ioutil.ReadFile(path)
if err != nil {
return err
}
// These lines test all secrets to be used.
// grafana-admin-credentials is skipped because it is used internally in Grafana Operator.
if es.Name == "grafana-admin-credentials" {
appeared = true
}
if strings.Contains(string(str), "secretName: "+es.Name) {
appeared = true
}
// These lines test secrets to be used as references, such like:
// - secretRef:
// name: <key>
strCondensed := strings.Join(strings.Fields(string(str)), "")
if strings.Contains(strCondensed, "secretRef:name:"+es.Name) {
appeared = true
}
return nil
})
if err != nil {
t.Fatal("failed to walk manifest directories")
}
if !appeared {
t.Error("secret:", es.Name, "was not found in any manifests")
}
for _, cs := range dummySecrets {
if cs.Name == es.Name && cs.Namespace == es.Namespace {
continue OUTER
}
}
t.Error("secret:", es.Namespace+"/"+es.Name, "was not found in dummy secrets")
}
}
}
// These struct types are copied from the following link:
// https://github.com/prometheus/prometheus/blob/master/pkg/rulefmt/rulefmt.go
type alertRuleGroups struct {
Groups []alertRuleGroup `json:"groups"`
}
type alertRuleGroup struct {
Name string `json:"name"`
Alerts []alertRule `json:"rules"`
}
type alertRule struct {
Record string `json:"record,omitempty"`
Alert string `json:"alert,omitempty"`
Expr string `json:"expr"`
Labels map[string]string `json:"labels,omitempty"`
Annotations map[string]string `json:"annotations"`
}
type recordRuleGroups struct {
Groups []recordRuleGroup `json:"groups"`
}
type recordRuleGroup struct {
Name string `json:"name"`
Records []recordRule `json:"rules"`
}
type recordRule struct {
Record string `json:"record,omitempty"`
}
func testAlertRules(t *testing.T) {
var groups alertRuleGroups
str, err := ioutil.ReadFile("../monitoring/base/alertmanager/neco.template")
if err != nil {
t.Fatal(err)
}
tmpl := template.Must(template.New("alert").Parse(string(str))).Option("missingkey=error")
err = filepath.Walk("../monitoring/base/prometheus/alert_rules", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
str, err := ioutil.ReadFile(path)
if err != nil {
return err
}
err = yaml.Unmarshal(str, &groups)
if err != nil {
return fmt.Errorf("failed to unmarshal %s, err: %v", path, err)
}
for _, g := range groups.Groups {
t.Run(g.Name, func(t *testing.T) {
t.Parallel()
var buf bytes.Buffer
err := tmpl.ExecuteTemplate(&buf, "slack.neco.text", g)
if err != nil {
t.Error(err)
}
})
}
return nil
})
if err != nil {
t.Error(err)
}
}
func TestValidation(t *testing.T) {
if os.Getenv("SSH_PRIVKEY") != "" {
t.Skip("SSH_PRIVKEY envvar is defined as running e2e test")
}
t.Run("ApplicationTargetRevision", testApplicationResources)
t.Run("CRDStatus", testCRDStatus)
t.Run("CertificateUsages", testCertificateUsages)
t.Run("GeneratedSecretName", testGeneratedSecretName)
t.Run("AlertRules", testAlertRules)
} | for _, f := range expectedSecretFiles { | random_line_split |
validation_test.go | package test
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"testing"
"text/template"
argocd "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
corev1 "k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
k8sYaml "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
)
const (
manifestDir = "../"
)
var (
excludeDirs = []string{
filepath.Join(manifestDir, "bin"),
filepath.Join(manifestDir, "docs"),
filepath.Join(manifestDir, "test"),
filepath.Join(manifestDir, "vendor"),
}
)
func isKustomizationFile(name string) bool {
if name == "kustomization.yaml" || name == "kustomization.yml" || name == "Kustomization" {
return true
}
return false
}
func kustomizeBuild(dir string) ([]byte, []byte, error) {
outBuf := new(bytes.Buffer)
errBuf := new(bytes.Buffer)
cmd := exec.Command("kustomize", "build", dir)
cmd.Stdout = outBuf
cmd.Stderr = errBuf
err := cmd.Run()
return outBuf.Bytes(), errBuf.Bytes(), err
}
func testApplicationResources(t *testing.T) {
targetRevisions := map[string]string{
"gcp": "release",
"neco-dev": "release",
"osaka0": "release",
"stage0": "stage",
"tokyo0": "release",
}
syncWaves := map[string]string{
"namespaces": "1",
"argocd": "2",
"local-pv-provisioner": "3",
"secrets": "3",
"cert-manager": "4",
"external-dns": "4",
"metallb": "4",
"ingress": "5",
"topolvm": "5",
"unbound": "5",
"elastic": "6",
"rook": "6",
"monitoring": "7",
"sandbox": "7",
"teleport": "7",
"network-policy": "8",
"argocd-ingress": "9",
"bmc-reverse-proxy": "9",
"metrics-server": "9",
"neco-admission": "9",
"team-management": "9",
"maneki-apps": "10",
}
// Getting overlays list
overlayDirs := map[string]string{}
err := filepath.Walk(filepath.Join(manifestDir, "argocd-config", "overlays"), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && info.Name() != "overlays" |
return nil
})
if err != nil {
t.Error(err)
}
t.Parallel()
for overlay, targetDir := range overlayDirs {
t.Run(overlay, func(t *testing.T) {
stdout, stderr, err := kustomizeBuild(targetDir)
if err != nil {
t.Error(fmt.Errorf("kustomize build faled. path: %s, stderr: %s, err: %v", targetDir, stderr, err))
}
y := k8sYaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(stdout)))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
t.Error(err)
}
var app argocd.Application
err = yaml.Unmarshal(data, &app)
if err != nil {
t.Error(err)
}
// Check the tergetRevision
if targetRevisions[overlay] == "" {
t.Error(fmt.Errorf("targetRevision should exist. overlay: %s", overlay))
}
if app.Spec.Source.TargetRevision != targetRevisions[overlay] {
t.Error(fmt.Errorf("invalid targetRevision. application: %s, targetRevision: %s (should be %s)", app.Name, app.Spec.Source.TargetRevision, targetRevisions[overlay]))
}
// Check the sync wave
if syncWaves[app.Name] == "" {
t.Error(fmt.Errorf("sync-wave should exist. application: %s", app.Name))
}
if app.GetAnnotations()["argocd.argoproj.io/sync-wave"] != syncWaves[app.Name] {
t.Error(fmt.Errorf("invalid sync-wave. application: %s, sync-wave: %s (should be %s)", app.Name, app.GetAnnotations()["argocd.argoproj.io/sync-wave"], syncWaves[app.Name]))
}
}
})
}
}
// Use to check the existence of the status field in manifest files for CRDs.
// `apiextensionsv1beta1.CustomResourceDefinition` cannot be used because the status field always exists in the struct.
type crdValidation struct {
Kind string `json:"kind"`
Metadata struct {
Name string `json:"name"`
} `json:"metadata"`
Status *apiextensionsv1beta1.CustomResourceDefinitionStatus `json:"status"`
}
func testCRDStatus(t *testing.T) {
t.Parallel()
doCheckKustomizedYaml(t, func(t *testing.T, data []byte) {
var crd crdValidation
err := yaml.Unmarshal(data, &crd)
if err != nil {
// Skip because this YAML might not be custom resource definition
return
}
if crd.Kind != "CustomResourceDefinition" {
// Skip because this YAML is not custom resource definition
return
}
if crd.Status != nil {
t.Error(errors.New(".status(Status) exists in " + crd.Metadata.Name + ", remove it to prevent occurring OutOfSync by Argo CD"))
}
})
}
type certificateValidation struct {
Kind string `json:"kind"`
Metadata struct {
Name string `json:"name"`
} `json:"metadata"`
Spec struct {
IsCA bool `json:"isCA"`
Usages []string `json:"usages"`
} `json:"spec"`
}
func testCertificateUsages(t *testing.T) {
t.Parallel()
doCheckKustomizedYaml(t, func(t *testing.T, data []byte) {
var cert certificateValidation
err := yaml.Unmarshal(data, &cert)
if err != nil {
// Skip because this YAML might not be certificate
return
}
if cert.Kind != "Certificate" {
// Skip because this YAML is not certificate
return
}
var expected []string
if cert.Spec.IsCA {
expected = []string{"digital signature", "key encipherment", "cert sign"}
} else {
expected = []string{"digital signature", "key encipherment", "server auth", "client auth"}
}
if !reflect.DeepEqual(cert.Spec.Usages, expected) {
t.Error(errors.New(".spec.usages has incorrect list in " + cert.Metadata.Name))
}
})
}
func doCheckKustomizedYaml(t *testing.T, checkFunc func(*testing.T, []byte)) {
targets := []string{}
err := filepath.Walk(manifestDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
for _, exDir := range excludeDirs {
if strings.HasPrefix(path, exDir) {
// Skip files in the directory
return filepath.SkipDir
}
}
if !isKustomizationFile(info.Name()) {
return nil
}
targets = append(targets, filepath.Dir(path))
// Skip other files in the directory
return filepath.SkipDir
})
if err != nil {
t.Error(err)
}
for _, path := range targets {
t.Run(path, func(t *testing.T) {
stdout, stderr, err := kustomizeBuild(path)
if err != nil {
t.Error(fmt.Errorf("kustomize build faled. path: %s, stderr: %s, err: %v", path, stderr, err))
}
y := k8sYaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(stdout)))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
t.Error(err)
}
checkFunc(t, data)
}
})
}
}
func readSecret(path string) ([]corev1.Secret, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var secrets []corev1.Secret
y := k8sYaml.NewYAMLReader(bufio.NewReader(f))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
var s corev1.Secret
err = yaml.Unmarshal(data, &s)
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
return secrets, nil
}
func testGeneratedSecretName(t *testing.T) {
const currentSecretFile = "./current-secret.yaml"
expectedSecretFiles := []string{
"./expected-secret-osaka0.yaml",
"./expected-secret-stage0.yaml",
"./expected-secret-tokyo0.yaml",
}
t.Parallel()
defer func() {
for _, f := range expectedSecretFiles {
os.Remove(f)
}
os.Remove(currentSecretFile)
}()
dummySecrets, err := readSecret(currentSecretFile)
if err != nil {
t.Fatal(err)
}
for _, f := range expectedSecretFiles {
expected, err := readSecret(f)
if err != nil {
t.Fatal(err)
}
OUTER:
for _, es := range expected {
var appeared bool
err = filepath.Walk(manifestDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
for _, exDir := range excludeDirs {
if strings.HasPrefix(path, exDir) {
// Skip files in the directory
return filepath.SkipDir
}
}
if info.IsDir() || !strings.HasSuffix(path, ".yaml") {
return nil
}
str, err := ioutil.ReadFile(path)
if err != nil {
return err
}
// These lines test all secrets to be used.
// grafana-admin-credentials is skipped because it is used internally in Grafana Operator.
if es.Name == "grafana-admin-credentials" {
appeared = true
}
if strings.Contains(string(str), "secretName: "+es.Name) {
appeared = true
}
// These lines test secrets to be used as references, such like:
// - secretRef:
// name: <key>
strCondensed := strings.Join(strings.Fields(string(str)), "")
if strings.Contains(strCondensed, "secretRef:name:"+es.Name) {
appeared = true
}
return nil
})
if err != nil {
t.Fatal("failed to walk manifest directories")
}
if !appeared {
t.Error("secret:", es.Name, "was not found in any manifests")
}
for _, cs := range dummySecrets {
if cs.Name == es.Name && cs.Namespace == es.Namespace {
continue OUTER
}
}
t.Error("secret:", es.Namespace+"/"+es.Name, "was not found in dummy secrets")
}
}
}
// These struct types are copied from the following link:
// https://github.com/prometheus/prometheus/blob/master/pkg/rulefmt/rulefmt.go
type alertRuleGroups struct {
Groups []alertRuleGroup `json:"groups"`
}
type alertRuleGroup struct {
Name string `json:"name"`
Alerts []alertRule `json:"rules"`
}
type alertRule struct {
Record string `json:"record,omitempty"`
Alert string `json:"alert,omitempty"`
Expr string `json:"expr"`
Labels map[string]string `json:"labels,omitempty"`
Annotations map[string]string `json:"annotations"`
}
type recordRuleGroups struct {
Groups []recordRuleGroup `json:"groups"`
}
type recordRuleGroup struct {
Name string `json:"name"`
Records []recordRule `json:"rules"`
}
type recordRule struct {
Record string `json:"record,omitempty"`
}
func testAlertRules(t *testing.T) {
var groups alertRuleGroups
str, err := ioutil.ReadFile("../monitoring/base/alertmanager/neco.template")
if err != nil {
t.Fatal(err)
}
tmpl := template.Must(template.New("alert").Parse(string(str))).Option("missingkey=error")
err = filepath.Walk("../monitoring/base/prometheus/alert_rules", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
str, err := ioutil.ReadFile(path)
if err != nil {
return err
}
err = yaml.Unmarshal(str, &groups)
if err != nil {
return fmt.Errorf("failed to unmarshal %s, err: %v", path, err)
}
for _, g := range groups.Groups {
t.Run(g.Name, func(t *testing.T) {
t.Parallel()
var buf bytes.Buffer
err := tmpl.ExecuteTemplate(&buf, "slack.neco.text", g)
if err != nil {
t.Error(err)
}
})
}
return nil
})
if err != nil {
t.Error(err)
}
}
func TestValidation(t *testing.T) {
if os.Getenv("SSH_PRIVKEY") != "" {
t.Skip("SSH_PRIVKEY envvar is defined as running e2e test")
}
t.Run("ApplicationTargetRevision", testApplicationResources)
t.Run("CRDStatus", testCRDStatus)
t.Run("CertificateUsages", testCertificateUsages)
t.Run("GeneratedSecretName", testGeneratedSecretName)
t.Run("AlertRules", testAlertRules)
}
| {
overlayDirs[info.Name()] = path
} | conditional_block |
validation_test.go | package test
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"testing"
"text/template"
argocd "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
corev1 "k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
k8sYaml "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
)
const (
manifestDir = "../"
)
var (
excludeDirs = []string{
filepath.Join(manifestDir, "bin"),
filepath.Join(manifestDir, "docs"),
filepath.Join(manifestDir, "test"),
filepath.Join(manifestDir, "vendor"),
}
)
func isKustomizationFile(name string) bool {
if name == "kustomization.yaml" || name == "kustomization.yml" || name == "Kustomization" {
return true
}
return false
}
func kustomizeBuild(dir string) ([]byte, []byte, error) {
outBuf := new(bytes.Buffer)
errBuf := new(bytes.Buffer)
cmd := exec.Command("kustomize", "build", dir)
cmd.Stdout = outBuf
cmd.Stderr = errBuf
err := cmd.Run()
return outBuf.Bytes(), errBuf.Bytes(), err
}
func testApplicationResources(t *testing.T) {
targetRevisions := map[string]string{
"gcp": "release",
"neco-dev": "release",
"osaka0": "release",
"stage0": "stage",
"tokyo0": "release",
}
syncWaves := map[string]string{
"namespaces": "1",
"argocd": "2",
"local-pv-provisioner": "3",
"secrets": "3",
"cert-manager": "4",
"external-dns": "4",
"metallb": "4",
"ingress": "5",
"topolvm": "5",
"unbound": "5",
"elastic": "6",
"rook": "6",
"monitoring": "7",
"sandbox": "7",
"teleport": "7",
"network-policy": "8",
"argocd-ingress": "9",
"bmc-reverse-proxy": "9",
"metrics-server": "9",
"neco-admission": "9",
"team-management": "9",
"maneki-apps": "10",
}
// Getting overlays list
overlayDirs := map[string]string{}
err := filepath.Walk(filepath.Join(manifestDir, "argocd-config", "overlays"), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && info.Name() != "overlays" {
overlayDirs[info.Name()] = path
}
return nil
})
if err != nil {
t.Error(err)
}
t.Parallel()
for overlay, targetDir := range overlayDirs {
t.Run(overlay, func(t *testing.T) {
stdout, stderr, err := kustomizeBuild(targetDir)
if err != nil {
t.Error(fmt.Errorf("kustomize build faled. path: %s, stderr: %s, err: %v", targetDir, stderr, err))
}
y := k8sYaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(stdout)))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
t.Error(err)
}
var app argocd.Application
err = yaml.Unmarshal(data, &app)
if err != nil {
t.Error(err)
}
// Check the tergetRevision
if targetRevisions[overlay] == "" {
t.Error(fmt.Errorf("targetRevision should exist. overlay: %s", overlay))
}
if app.Spec.Source.TargetRevision != targetRevisions[overlay] {
t.Error(fmt.Errorf("invalid targetRevision. application: %s, targetRevision: %s (should be %s)", app.Name, app.Spec.Source.TargetRevision, targetRevisions[overlay]))
}
// Check the sync wave
if syncWaves[app.Name] == "" {
t.Error(fmt.Errorf("sync-wave should exist. application: %s", app.Name))
}
if app.GetAnnotations()["argocd.argoproj.io/sync-wave"] != syncWaves[app.Name] {
t.Error(fmt.Errorf("invalid sync-wave. application: %s, sync-wave: %s (should be %s)", app.Name, app.GetAnnotations()["argocd.argoproj.io/sync-wave"], syncWaves[app.Name]))
}
}
})
}
}
// Use to check the existence of the status field in manifest files for CRDs.
// `apiextensionsv1beta1.CustomResourceDefinition` cannot be used because the status field always exists in the struct.
type crdValidation struct {
Kind string `json:"kind"`
Metadata struct {
Name string `json:"name"`
} `json:"metadata"`
Status *apiextensionsv1beta1.CustomResourceDefinitionStatus `json:"status"`
}
func testCRDStatus(t *testing.T) {
t.Parallel()
doCheckKustomizedYaml(t, func(t *testing.T, data []byte) {
var crd crdValidation
err := yaml.Unmarshal(data, &crd)
if err != nil {
// Skip because this YAML might not be custom resource definition
return
}
if crd.Kind != "CustomResourceDefinition" {
// Skip because this YAML is not custom resource definition
return
}
if crd.Status != nil {
t.Error(errors.New(".status(Status) exists in " + crd.Metadata.Name + ", remove it to prevent occurring OutOfSync by Argo CD"))
}
})
}
type certificateValidation struct {
Kind string `json:"kind"`
Metadata struct {
Name string `json:"name"`
} `json:"metadata"`
Spec struct {
IsCA bool `json:"isCA"`
Usages []string `json:"usages"`
} `json:"spec"`
}
func testCertificateUsages(t *testing.T) {
t.Parallel()
doCheckKustomizedYaml(t, func(t *testing.T, data []byte) {
var cert certificateValidation
err := yaml.Unmarshal(data, &cert)
if err != nil {
// Skip because this YAML might not be certificate
return
}
if cert.Kind != "Certificate" {
// Skip because this YAML is not certificate
return
}
var expected []string
if cert.Spec.IsCA {
expected = []string{"digital signature", "key encipherment", "cert sign"}
} else {
expected = []string{"digital signature", "key encipherment", "server auth", "client auth"}
}
if !reflect.DeepEqual(cert.Spec.Usages, expected) {
t.Error(errors.New(".spec.usages has incorrect list in " + cert.Metadata.Name))
}
})
}
func doCheckKustomizedYaml(t *testing.T, checkFunc func(*testing.T, []byte)) {
targets := []string{}
err := filepath.Walk(manifestDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
for _, exDir := range excludeDirs {
if strings.HasPrefix(path, exDir) {
// Skip files in the directory
return filepath.SkipDir
}
}
if !isKustomizationFile(info.Name()) {
return nil
}
targets = append(targets, filepath.Dir(path))
// Skip other files in the directory
return filepath.SkipDir
})
if err != nil {
t.Error(err)
}
for _, path := range targets {
t.Run(path, func(t *testing.T) {
stdout, stderr, err := kustomizeBuild(path)
if err != nil {
t.Error(fmt.Errorf("kustomize build faled. path: %s, stderr: %s, err: %v", path, stderr, err))
}
y := k8sYaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(stdout)))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
t.Error(err)
}
checkFunc(t, data)
}
})
}
}
func readSecret(path string) ([]corev1.Secret, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var secrets []corev1.Secret
y := k8sYaml.NewYAMLReader(bufio.NewReader(f))
for {
data, err := y.Read()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
var s corev1.Secret
err = yaml.Unmarshal(data, &s)
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
return secrets, nil
}
func testGeneratedSecretName(t *testing.T) {
const currentSecretFile = "./current-secret.yaml"
expectedSecretFiles := []string{
"./expected-secret-osaka0.yaml",
"./expected-secret-stage0.yaml",
"./expected-secret-tokyo0.yaml",
}
t.Parallel()
defer func() {
for _, f := range expectedSecretFiles {
os.Remove(f)
}
os.Remove(currentSecretFile)
}()
dummySecrets, err := readSecret(currentSecretFile)
if err != nil {
t.Fatal(err)
}
for _, f := range expectedSecretFiles {
expected, err := readSecret(f)
if err != nil {
t.Fatal(err)
}
OUTER:
for _, es := range expected {
var appeared bool
err = filepath.Walk(manifestDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
for _, exDir := range excludeDirs {
if strings.HasPrefix(path, exDir) {
// Skip files in the directory
return filepath.SkipDir
}
}
if info.IsDir() || !strings.HasSuffix(path, ".yaml") {
return nil
}
str, err := ioutil.ReadFile(path)
if err != nil {
return err
}
// These lines test all secrets to be used.
// grafana-admin-credentials is skipped because it is used internally in Grafana Operator.
if es.Name == "grafana-admin-credentials" {
appeared = true
}
if strings.Contains(string(str), "secretName: "+es.Name) {
appeared = true
}
// These lines test secrets to be used as references, such like:
// - secretRef:
// name: <key>
strCondensed := strings.Join(strings.Fields(string(str)), "")
if strings.Contains(strCondensed, "secretRef:name:"+es.Name) {
appeared = true
}
return nil
})
if err != nil {
t.Fatal("failed to walk manifest directories")
}
if !appeared {
t.Error("secret:", es.Name, "was not found in any manifests")
}
for _, cs := range dummySecrets {
if cs.Name == es.Name && cs.Namespace == es.Namespace {
continue OUTER
}
}
t.Error("secret:", es.Namespace+"/"+es.Name, "was not found in dummy secrets")
}
}
}
// These struct types are copied from the following link:
// https://github.com/prometheus/prometheus/blob/master/pkg/rulefmt/rulefmt.go
type alertRuleGroups struct {
Groups []alertRuleGroup `json:"groups"`
}
type alertRuleGroup struct {
Name string `json:"name"`
Alerts []alertRule `json:"rules"`
}
type alertRule struct {
Record string `json:"record,omitempty"`
Alert string `json:"alert,omitempty"`
Expr string `json:"expr"`
Labels map[string]string `json:"labels,omitempty"`
Annotations map[string]string `json:"annotations"`
}
type recordRuleGroups struct {
Groups []recordRuleGroup `json:"groups"`
}
type recordRuleGroup struct {
Name string `json:"name"`
Records []recordRule `json:"rules"`
}
type recordRule struct {
Record string `json:"record,omitempty"`
}
func testAlertRules(t *testing.T) {
var groups alertRuleGroups
str, err := ioutil.ReadFile("../monitoring/base/alertmanager/neco.template")
if err != nil {
t.Fatal(err)
}
tmpl := template.Must(template.New("alert").Parse(string(str))).Option("missingkey=error")
err = filepath.Walk("../monitoring/base/prometheus/alert_rules", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
str, err := ioutil.ReadFile(path)
if err != nil {
return err
}
err = yaml.Unmarshal(str, &groups)
if err != nil {
return fmt.Errorf("failed to unmarshal %s, err: %v", path, err)
}
for _, g := range groups.Groups {
t.Run(g.Name, func(t *testing.T) {
t.Parallel()
var buf bytes.Buffer
err := tmpl.ExecuteTemplate(&buf, "slack.neco.text", g)
if err != nil {
t.Error(err)
}
})
}
return nil
})
if err != nil {
t.Error(err)
}
}
func TestValidation(t *testing.T) | {
if os.Getenv("SSH_PRIVKEY") != "" {
t.Skip("SSH_PRIVKEY envvar is defined as running e2e test")
}
t.Run("ApplicationTargetRevision", testApplicationResources)
t.Run("CRDStatus", testCRDStatus)
t.Run("CertificateUsages", testCertificateUsages)
t.Run("GeneratedSecretName", testGeneratedSecretName)
t.Run("AlertRules", testAlertRules)
} | identifier_body | |
messages.rs | //! Structures for some of the messages used in the Marionette protocol, these can
//! be used with the traits in serde to convert into the corresponding json.
//!
#![allow(non_snake_case)]
use std::fmt;
use std::path::Path;
use std::collections::HashMap;
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use serde::ser::SerializeStruct;
use serde_json::{Value, to_value};
use serde::de::{Visitor, MapAccess};
use serde::de::Error as DeError;
use super::MarionetteError;
#[derive(Deserialize, Debug)]
pub struct ServerInfo {
pub marionetteProtocol: u64,
}
#[derive(Deserialize, Debug)]
pub struct ErrorObject {
pub error: String,
pub message: String,
pub stacktrace: String,
}
pub enum Capability {
PageLoadStrategy(String),
}
#[derive(Serialize, Debug)]
pub struct CapabilityRequest {
requiredCapabilities: HashMap<String, Value>,
}
#[derive(Deserialize, Debug)]
pub struct Capabilities {
pub timeouts: Option<Timeouts>,
}
#[derive(Serialize, Debug)]
pub struct NewSessionRequest {
capabilities: CapabilityRequest,
}
impl NewSessionRequest {
pub fn new() -> Self {
NewSessionRequest {
capabilities: CapabilityRequest {
requiredCapabilities: HashMap::new(),
}
}
}
pub fn required(&mut self, cap: Capability) {
match cap {
Capability::PageLoadStrategy(s) =>
self.capabilities.requiredCapabilities.insert("pageLoadStrategy".to_string(), Value::String(s)),
};
}
}
#[derive(Deserialize, Debug)]
pub struct NewSessionResponse {
pub sessionId: String,
pub capabilities: Capabilities,
}
#[derive(Deserialize, Debug, Serialize)]
pub struct Empty {}
/// Sets various timeout parameters (in ms)
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub struct Timeouts {
/// when to interrupt a script that is being evaluated
pub script: u64,
/// the timeout limit used to interrupt navigation of the browsing context
pub pageLoad: u64,
/// the timeout of when to abort when locating an element
pub implicit: u64,
}
/// Some responses use a type wrapped in a json object
/// with the value attribute
#[derive(Deserialize, Serialize, Debug)]
pub struct ResponseValue<T> {
pub value: T,
}
#[derive(Serialize, Debug)]
pub struct GetCommand {
pub url: String,
}
impl GetCommand {
pub fn from(url: &str) -> Self {
Self { url: url.to_owned() }
}
}
/// A log message to push to the marionette server. The message
/// includes an arbitrary level (INFO, DEBUG, etc).
#[derive(Serialize, Debug)]
pub struct LogMsg {
value: String,
level: String,
}
impl LogMsg {
pub fn new(msg: &str, lvl: &str) -> Self {
LogMsg {
value: msg.to_owned(),
level: lvl.to_owned(),
}
}
}
/// A log entry as returned by the getLogs command. This includes a message,
/// an arbitrary log level and a date.
#[derive(Deserialize, Debug)]
pub struct LogEntry(String, String, String);
impl LogEntry {
pub fn level(&self) -> &str { &self.0 }
pub fn msg(&self) -> &str { &self.1 }
}
/// An opaque handle to a window
///
/// This is deserialized from a regular string. But serialization creates
/// an object `{'name': 'handle'}`.
#[derive(Deserialize, Debug, PartialEq)]
pub struct WindowHandle(String);
impl WindowHandle {
pub fn from_str(handle: &str) -> Self {
WindowHandle(handle.to_owned())
}
}
impl fmt::Display for WindowHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl Serialize for WindowHandle {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
let mut ss = s.serialize_struct("WindowHandle", 1)?;
ss.serialize_field("name", &self.0)?;
// Starting with firefox 81, name is ignored and
// handle is used instead
ss.serialize_field("handle", &self.0)?;
ss.end()
}
}
/// The execution context
pub type ContextValue = ResponseValue<String>;
#[derive(Serialize, Debug)]
pub struct Script {
script: String,
sandbox: String,
args: Value,
scriptTimeout: Option<u64>,
}
impl Script {
pub fn new(src: &str) -> Self {
Script {
script: src.to_owned(),
sandbox: "default".to_owned(),
// execute_script accepts null here, but execute_async_script does not
// default to an empty array
args: Value::Array(Vec::new()),
scriptTimeout: None,
}
}
/// Set arguments for this script. This is usually an array that
/// is used as the `arguments` variable.
pub fn arguments<S: Serialize>(&mut self, args: S) -> Result<(), MarionetteError>{
self.args = to_value(args)?;
Ok(())
}
/// Execute the script in a named sandbox
pub fn sandbox(&mut self, name: &str) {
self.sandbox = name.to_owned()
}
/// Set execution timeout for script (ms)
///
/// This value overrides the global scriptTimeout.
///
/// This option was removed from firefox in Jan/2019, see
///
/// 9ed472d43600ca6ba1ced8a563dbaa4abdef5eaa
///
/// https://bugzilla.mozilla.org/show_bug.cgi?id=1510929
/// https://phabricator.services.mozilla.com/D15584
///
#[deprecated = "Unsupported since Jan/2009 see bug 1510929"]
pub fn timeout(&mut self, timeout_ms: u64) {
self.scriptTimeout = Some(timeout_ms)
}
}
#[derive(Debug)]
pub enum QueryMethod {
Id,
Name,
ClassName,
TagName,
CssSelector,
LinkText,
PartialLinkText,
XPath,
}
impl Serialize for QueryMethod {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
match self {
&QueryMethod::Id => s.serialize_str("id"),
&QueryMethod::Name => s.serialize_str("name"),
&QueryMethod::ClassName => s.serialize_str("class name"),
&QueryMethod::TagName => s.serialize_str("tag name"),
&QueryMethod::CssSelector => s.serialize_str("css selector"),
&QueryMethod::LinkText => s.serialize_str("link text"),
&QueryMethod::PartialLinkText => s.serialize_str("partial link text"),
&QueryMethod::XPath => s.serialize_str("xpath"),
}
}
}
#[derive(Serialize, Debug)]
pub struct FindElementQuery {
/// A query
pub value: String,
/// The method use to perform the query
pub using: QueryMethod,
// In recent versions of firefox (60) this field must not
// be set to null, skip it instead
#[serde(skip_serializing_if = "Option::is_none")]
pub element: Option<String>,
}
#[derive(PartialEq, Debug, Clone)]
pub struct ElementRef {
pub reference: String,
}
impl ElementRef {
pub fn from_str(handle: &str) -> ElementRef {
ElementRef { reference: handle.to_string() }
}
}
impl Serialize for ElementRef {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
let mut ss = s.serialize_struct("ElementRef", 2)?;
ss.serialize_field("ELEMENT", &self.reference)?;
ss.serialize_field("element-6066-11e4-a52e-4f735466cecf", &self.reference)?;
ss.end()
}
}
impl<'a> Deserialize<'a> for ElementRef {
fn deserialize<D: Deserializer<'a>>(d: D) -> Result<Self, D::Error> {
enum Field { Reference, Ignored }
impl<'b> Deserialize<'b> for Field {
fn deserialize<D: Deserializer<'b>>(d: D) -> Result<Self, D::Error> {
struct FieldVisitor;
impl<'c> Visitor<'c> for FieldVisitor {
type Value = Field;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("element-6066-11e4-a52e-4f735466cecf")
}
fn visit_str<E: DeError>(self, value: &str) -> Result<Field, E>
{
match value {
"element-6066-11e4-a52e-4f735466cecf" => Ok(Field::Reference),
// Ignore all other fields
_ => Ok(Field::Ignored),
}
}
}
d.deserialize_identifier(FieldVisitor)
}
}
struct ElementRefVisitor;
impl<'d> Visitor<'d> for ElementRefVisitor {
type Value = ElementRef;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct ElementRef")
}
fn visit_map<V>(self, mut visitor: V) -> Result<ElementRef, V::Error>
where V: MapAccess<'d>
{
let mut reference = None;
while let Some(key) = visitor.next_key()? {
match key {
Field::Reference => {
if reference.is_some() {
return Err(DeError::duplicate_field("element-6066-11e4-a52e-4f735466cecf"));
}
reference = Some(visitor.next_value()?);
}
Field::Ignored => (),
}
}
match reference {
Some(r) => Ok(ElementRef { reference: r }),
None => return Err(DeError::missing_field("element-6066-11e4-a52e-4f735466cecf")),
}
}
}
const FIELDS: &'static [&'static str] = &["element-6066-11e4-a52e-4f735466cecf"];
d.deserialize_struct("ElementRef", FIELDS, ElementRefVisitor)
}
}
/// Element operations are use a named id to select the Element
/// and other attributes to specify the operation.
#[derive(Serialize, Debug)]
pub struct ElementOp {
/// The element identifier
pub id: String,
/// The name of the attribute/property
pub name: Option<String>,
}
/// A `switchToFrame` request
#[derive(Serialize, Debug)]
pub struct | {
focus: bool,
element: Option<String>,
}
impl FrameSwitch {
/// Switch to the top level frame
pub fn top(focus: bool) -> Self {
FrameSwitch {
focus: focus,
element: None,
}
}
/// Switch to the frame given by passed element
pub fn from_element(focus: bool, element: Option<ElementRef>) -> Self {
FrameSwitch {
focus: focus,
element: element.map(|elem| elem.reference.to_owned()),
}
}
}
#[derive(Serialize, Debug)]
pub struct AddonInstall<'a> {
pub path: &'a Path,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct Cookie {
pub name: String,
pub value: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub domain: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub secure: Option<bool>,
}
#[derive(Serialize, Debug, PartialEq)]
pub struct AddCookie<'a> {
pub cookie: &'a Cookie,
}
| FrameSwitch | identifier_name |
messages.rs | //! Structures for some of the messages used in the Marionette protocol, these can
//! be used with the traits in serde to convert into the corresponding json.
//!
#![allow(non_snake_case)]
use std::fmt;
use std::path::Path;
use std::collections::HashMap;
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use serde::ser::SerializeStruct;
use serde_json::{Value, to_value};
use serde::de::{Visitor, MapAccess};
use serde::de::Error as DeError;
use super::MarionetteError;
#[derive(Deserialize, Debug)]
pub struct ServerInfo {
pub marionetteProtocol: u64,
}
#[derive(Deserialize, Debug)]
pub struct ErrorObject {
pub error: String,
pub message: String,
pub stacktrace: String,
}
pub enum Capability {
PageLoadStrategy(String),
}
#[derive(Serialize, Debug)]
pub struct CapabilityRequest {
requiredCapabilities: HashMap<String, Value>,
}
#[derive(Deserialize, Debug)]
pub struct Capabilities {
pub timeouts: Option<Timeouts>,
}
#[derive(Serialize, Debug)]
pub struct NewSessionRequest {
capabilities: CapabilityRequest,
}
impl NewSessionRequest {
pub fn new() -> Self {
NewSessionRequest {
capabilities: CapabilityRequest {
requiredCapabilities: HashMap::new(),
}
}
}
pub fn required(&mut self, cap: Capability) {
match cap {
Capability::PageLoadStrategy(s) =>
self.capabilities.requiredCapabilities.insert("pageLoadStrategy".to_string(), Value::String(s)),
};
}
}
#[derive(Deserialize, Debug)]
pub struct NewSessionResponse {
pub sessionId: String,
pub capabilities: Capabilities,
}
#[derive(Deserialize, Debug, Serialize)]
pub struct Empty {}
/// Sets various timeout parameters (in ms)
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub struct Timeouts {
/// when to interrupt a script that is being evaluated
pub script: u64,
/// the timeout limit used to interrupt navigation of the browsing context
pub pageLoad: u64,
/// the timeout of when to abort when locating an element
pub implicit: u64,
}
/// Some responses use a type wrapped in a json object
/// with the value attribute
#[derive(Deserialize, Serialize, Debug)]
pub struct ResponseValue<T> {
pub value: T,
}
#[derive(Serialize, Debug)]
pub struct GetCommand {
pub url: String,
}
impl GetCommand {
pub fn from(url: &str) -> Self {
Self { url: url.to_owned() }
}
}
/// A log message to push to the marionette server. The message
/// includes an arbitrary level (INFO, DEBUG, etc).
#[derive(Serialize, Debug)]
pub struct LogMsg {
value: String,
level: String,
}
impl LogMsg {
pub fn new(msg: &str, lvl: &str) -> Self {
LogMsg {
value: msg.to_owned(),
level: lvl.to_owned(),
}
}
}
/// A log entry as returned by the getLogs command. This includes a message,
/// an arbitrary log level and a date.
#[derive(Deserialize, Debug)]
pub struct LogEntry(String, String, String);
impl LogEntry {
pub fn level(&self) -> &str { &self.0 }
pub fn msg(&self) -> &str { &self.1 }
}
/// An opaque handle to a window
///
/// This is deserialized from a regular string. But serialization creates
/// an object `{'name': 'handle'}`.
#[derive(Deserialize, Debug, PartialEq)]
pub struct WindowHandle(String);
impl WindowHandle {
pub fn from_str(handle: &str) -> Self {
WindowHandle(handle.to_owned())
}
}
impl fmt::Display for WindowHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl Serialize for WindowHandle {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
let mut ss = s.serialize_struct("WindowHandle", 1)?;
ss.serialize_field("name", &self.0)?;
// Starting with firefox 81, name is ignored and
// handle is used instead
ss.serialize_field("handle", &self.0)?;
ss.end()
}
}
/// The execution context
pub type ContextValue = ResponseValue<String>;
#[derive(Serialize, Debug)]
pub struct Script {
script: String,
sandbox: String,
args: Value,
scriptTimeout: Option<u64>,
}
impl Script {
pub fn new(src: &str) -> Self {
Script {
script: src.to_owned(),
sandbox: "default".to_owned(),
// execute_script accepts null here, but execute_async_script does not
// default to an empty array
args: Value::Array(Vec::new()),
scriptTimeout: None,
}
}
/// Set arguments for this script. This is usually an array that
/// is used as the `arguments` variable.
pub fn arguments<S: Serialize>(&mut self, args: S) -> Result<(), MarionetteError>{
self.args = to_value(args)?;
Ok(())
}
/// Execute the script in a named sandbox
pub fn sandbox(&mut self, name: &str) {
self.sandbox = name.to_owned()
}
/// Set execution timeout for script (ms)
///
/// This value overrides the global scriptTimeout.
///
/// This option was removed from firefox in Jan/2019, see
///
/// 9ed472d43600ca6ba1ced8a563dbaa4abdef5eaa
///
/// https://bugzilla.mozilla.org/show_bug.cgi?id=1510929
/// https://phabricator.services.mozilla.com/D15584
///
#[deprecated = "Unsupported since Jan/2009 see bug 1510929"]
pub fn timeout(&mut self, timeout_ms: u64) {
self.scriptTimeout = Some(timeout_ms)
}
}
#[derive(Debug)]
pub enum QueryMethod {
Id,
Name,
ClassName,
TagName,
CssSelector,
LinkText,
PartialLinkText,
XPath,
}
impl Serialize for QueryMethod {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
match self {
&QueryMethod::Id => s.serialize_str("id"),
&QueryMethod::Name => s.serialize_str("name"),
&QueryMethod::ClassName => s.serialize_str("class name"),
&QueryMethod::TagName => s.serialize_str("tag name"),
&QueryMethod::CssSelector => s.serialize_str("css selector"),
&QueryMethod::LinkText => s.serialize_str("link text"),
&QueryMethod::PartialLinkText => s.serialize_str("partial link text"),
&QueryMethod::XPath => s.serialize_str("xpath"),
}
}
}
#[derive(Serialize, Debug)]
pub struct FindElementQuery {
/// A query
pub value: String,
/// The method use to perform the query
pub using: QueryMethod,
// In recent versions of firefox (60) this field must not
// be set to null, skip it instead
#[serde(skip_serializing_if = "Option::is_none")]
pub element: Option<String>,
}
#[derive(PartialEq, Debug, Clone)]
pub struct ElementRef {
pub reference: String,
}
impl ElementRef {
pub fn from_str(handle: &str) -> ElementRef {
ElementRef { reference: handle.to_string() }
}
}
impl Serialize for ElementRef {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
let mut ss = s.serialize_struct("ElementRef", 2)?;
ss.serialize_field("ELEMENT", &self.reference)?;
ss.serialize_field("element-6066-11e4-a52e-4f735466cecf", &self.reference)?;
ss.end()
}
}
impl<'a> Deserialize<'a> for ElementRef {
fn deserialize<D: Deserializer<'a>>(d: D) -> Result<Self, D::Error> {
enum Field { Reference, Ignored }
impl<'b> Deserialize<'b> for Field {
fn deserialize<D: Deserializer<'b>>(d: D) -> Result<Self, D::Error> {
struct FieldVisitor;
impl<'c> Visitor<'c> for FieldVisitor {
type Value = Field;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("element-6066-11e4-a52e-4f735466cecf")
}
fn visit_str<E: DeError>(self, value: &str) -> Result<Field, E>
{
match value {
"element-6066-11e4-a52e-4f735466cecf" => Ok(Field::Reference),
// Ignore all other fields
_ => Ok(Field::Ignored),
}
}
}
d.deserialize_identifier(FieldVisitor)
}
}
struct ElementRefVisitor;
impl<'d> Visitor<'d> for ElementRefVisitor {
type Value = ElementRef;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct ElementRef")
}
fn visit_map<V>(self, mut visitor: V) -> Result<ElementRef, V::Error>
where V: MapAccess<'d>
{
let mut reference = None;
while let Some(key) = visitor.next_key()? {
match key {
Field::Reference => {
if reference.is_some() {
return Err(DeError::duplicate_field("element-6066-11e4-a52e-4f735466cecf"));
}
reference = Some(visitor.next_value()?);
}
Field::Ignored => (),
}
}
match reference {
Some(r) => Ok(ElementRef { reference: r }),
None => return Err(DeError::missing_field("element-6066-11e4-a52e-4f735466cecf")),
}
}
}
const FIELDS: &'static [&'static str] = &["element-6066-11e4-a52e-4f735466cecf"];
d.deserialize_struct("ElementRef", FIELDS, ElementRefVisitor)
}
}
/// Element operations are use a named id to select the Element
/// and other attributes to specify the operation.
#[derive(Serialize, Debug)]
pub struct ElementOp {
/// The element identifier
pub id: String,
/// The name of the attribute/property
pub name: Option<String>,
}
/// A `switchToFrame` request
#[derive(Serialize, Debug)]
pub struct FrameSwitch {
focus: bool,
element: Option<String>,
}
impl FrameSwitch {
/// Switch to the top level frame
pub fn top(focus: bool) -> Self {
FrameSwitch {
focus: focus,
element: None,
}
} | /// Switch to the frame given by passed element
pub fn from_element(focus: bool, element: Option<ElementRef>) -> Self {
FrameSwitch {
focus: focus,
element: element.map(|elem| elem.reference.to_owned()),
}
}
}
#[derive(Serialize, Debug)]
pub struct AddonInstall<'a> {
pub path: &'a Path,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct Cookie {
pub name: String,
pub value: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub domain: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub secure: Option<bool>,
}
#[derive(Serialize, Debug, PartialEq)]
pub struct AddCookie<'a> {
pub cookie: &'a Cookie,
} | random_line_split | |
rpc.rs | //! Types related the `ActorRef` Remote Procedure Call (RPC) mechanism.
//!
//! RPC is implemented by sending a [`RpcMessage`] to the actor, which contains
//! the request message and a [`RpcResponse`]. The `RpcResponse` allows the
//! receiving actor to send back a response to the sending actor.
//!
//! To support RPC the receiving actor needs to implement
//! [`From`]`<`[`RpcMessage`]`<Req, Res>>`, where `Req` is the type of the
//! request message and `Res` the type of the response. This can be done easily
//! by using the [`from_message`] macro. The RPC message can then be received
//! like any other message.
//!
//! The sending actor needs to call [`ActorRef::rpc`] with the correct request
//! type. That will return an [`Rpc`] [`Future`] which returns the response to
//! the call, or [`RpcError`] in case of an error. | //! [`from_message`]: crate::from_message
//!
//! # Examples
//!
//! Using RPC to communicate with another actor.
//!
//! ```
//! # #![feature(never_type)]
//! #
//! use heph::actor;
//! use heph::actor_ref::{ActorRef, RpcMessage};
//! use heph::rt::{self, Runtime, ThreadLocal};
//! use heph::spawn::ActorOptions;
//! use heph::supervisor::NoSupervisor;
//!
//! /// Message type for [`counter`].
//! struct Add(RpcMessage<usize, usize>);
//!
//! /// Required to support RPC.
//! impl From<RpcMessage<usize, usize>> for Add {
//! fn from(msg: RpcMessage<usize, usize>) -> Add {
//! Add(msg)
//! }
//! }
//!
//! /// Receiving actor of the RPC.
//! async fn counter(mut ctx: actor::Context<Add, ThreadLocal>) {
//! // State of the counter.
//! let mut count: usize = 0;
//! // Receive a message like normal.
//! while let Ok(Add(RpcMessage { request, response })) = ctx.receive_next().await {
//! count += request;
//! // Send back the current state, ignoring any errors.
//! let _ = response.respond(count);
//! }
//! }
//!
//! /// Sending actor of the RPC.
//! async fn requester(_: actor::Context<!, ThreadLocal>, actor_ref: ActorRef<Add>) {
//! // Make the procedure call.
//! let response = actor_ref.rpc(10).await;
//! # assert!(response.is_ok());
//! match response {
//! // We got a response.
//! Ok(count) => println!("Current count: {}", count),
//! // Actor failed to respond.
//! Err(err) => eprintln!("Counter didn't reply: {}", err),
//! }
//! }
//!
//! # fn main() -> Result<(), rt::Error> {
//! # let mut runtime = Runtime::new()?;
//! # runtime.run_on_workers(|mut runtime_ref| -> Result<(), !> {
//! # let counter = counter as fn(_) -> _;
//! # let actor_ref = runtime_ref.spawn_local(NoSupervisor, counter, (), ActorOptions::default());
//! #
//! # let requester = requester as fn(_, _) -> _;
//! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default());
//! # Ok(())
//! # })?;
//! # runtime.start()
//! # }
//! ```
//!
//! Supporting multiple procedure within the same actor is possible by making
//! the message an `enum` as the example below shows. Furthermore synchronous
//! actors are supported.
//!
// FIXME: doesn't stop on CI.
//! ```ignore
//! # #![feature(never_type)]
//! #
//! use heph::actor::{self, SyncContext};
//! use heph::actor_ref::{ActorRef, RpcMessage};
//! use heph::from_message;
//! use heph::rt::{self, Runtime, ActorOptions, SyncActorOptions};
//! use heph::supervisor::NoSupervisor;
//!
//! /// Message type for [`counter`].
//! enum Message {
//! /// Increase the counter, returning the current state.
//! Add(RpcMessage<usize, usize>),
//! /// Get the current state of the counter.
//! Get(RpcMessage<(), usize>),
//! }
//!
//! // Implement the `From` trait for `Message`.
//! from_message!(Message::Add(usize) -> usize);
//! from_message!(Message::Get(()) -> usize);
//!
//! /// Receiving synchronous actor of the RPC.
//! fn counter(mut ctx: SyncContext<Message>) {
//! // State of the counter.
//! let mut count: usize = 0;
//!
//! // Receive messages in a loop.
//! while let Ok(msg) = ctx.receive_next() {
//! match msg {
//! Message::Add(RpcMessage { request, response }) => {
//! count += request;
//! // Send back the current state, ignoring any errors.
//! let _ = response.respond(count);
//! },
//! Message::Get(RpcMessage { response, .. }) => {
//! // Send back the current state, ignoring any errors.
//! let _ = response.respond(count);
//! },
//! }
//! }
//! }
//!
//! /// Sending actor of the RPC.
//! async fn requester(_: actor::Context<!>, actor_ref: ActorRef<Message>) {
//! // Increase the counter by ten.
//! // NOTE: do handle the errors correctly in practice, this is just an
//! // example.
//! let count = actor_ref.rpc(10).await.unwrap();
//! println!("Increased count to {}", count);
//!
//! // Retrieve the current count.
//! let count = actor_ref.rpc(()).await.unwrap();
//! # assert_eq!(count, 10);
//! println!("Current count {}", count);
//! }
//!
//! # fn main() -> Result<(), rt::Error> {
//! # let mut runtime = Runtime::new()?;
//! # let counter = counter as fn(_) -> _;
//! # let options = SyncActorOptions::default();
//! # let actor_ref = runtime.spawn_sync_actor(NoSupervisor, counter, (), options)?;
//! # runtime.run_on_workers(move |mut runtime_ref| -> Result<(), !> {
//! # let requester = requester as fn(_, _) -> _;
//! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default());
//! # Ok(())
//! # })?;
//! # runtime.start()
//! # }
//! ```
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::task::{self, Poll};
use inbox::oneshot::{new_oneshot, RecvOnce, Sender};
use crate::actor_ref::{ActorRef, SendError, SendValue};
/// [`Future`] that resolves to a Remote Procedure Call (RPC) response.
///
/// Created by [`ActorRef::rpc`].
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Rpc<'r, 'fut, M, Res> {
send: Option<SendValue<'r, 'fut, M>>,
recv: RecvOnce<Res>,
}
impl<'r, 'fut, M, Res> Rpc<'r, 'fut, M, Res>
where
'r: 'fut,
{
/// Create a new RPC.
pub(super) fn new<Req>(actor_ref: &'r ActorRef<M>, request: Req) -> Rpc<'r, 'fut, M, Res>
where
M: From<RpcMessage<Req, Res>>,
{
let (sender, receiver) = new_oneshot();
let response = RpcResponse { sender };
let msg = RpcMessage { request, response };
let send = actor_ref.send(msg);
Rpc {
send: Some(send),
recv: receiver.recv_once(),
}
}
}
impl<'r, 'fut, M, Res> Future for Rpc<'r, 'fut, M, Res> {
type Output = Result<Res, RpcError>;
#[track_caller]
fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<Self::Output> {
// Safety: we're not moving `send` so this is safe.
let send = unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.as_pin_mut();
if let Some(send) = send {
match send.poll(ctx) {
Poll::Ready(Ok(())) => {}
Poll::Ready(Err(err)) => return Poll::Ready(Err(err.into())),
Poll::Pending => return Poll::Pending,
}
// Don't take this branch again.
// Safety: we're not moving `send` so this is safe.
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.set(None);
}
// Safety: we're not moving `recv` so this is safe.
match unsafe { self.map_unchecked_mut(|s| &mut s.recv) }.poll(ctx) {
Poll::Ready(Some(response)) => Poll::Ready(Ok(response)),
Poll::Ready(None) => Poll::Ready(Err(RpcError::NoResponse)),
Poll::Pending => Poll::Pending,
}
}
}
/// Error returned by [`Rpc`].
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum RpcError {
/// Same error as [`SendError`].
SendError,
/// Returned when the other side returned no response.
NoResponse,
}
impl From<SendError> for RpcError {
fn from(_: SendError) -> RpcError {
RpcError::SendError
}
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RpcError::SendError => SendError.fmt(f),
RpcError::NoResponse => f.write_str("no RPC response"),
}
}
}
impl Error for RpcError {}
/// Message type that holds an RPC request.
///
/// It holds both the request (`Req`) and the way to respond [`RpcResponse`].
#[derive(Debug)]
pub struct RpcMessage<Req, Res> {
/// The request object.
pub request: Req,
/// A way to [`respond`] to the call.
///
/// [`respond`]: RpcResponse::respond
pub response: RpcResponse<Res>,
}
impl<Req, Res> RpcMessage<Req, Res> {
/// Convenience method to handle a `Req`uest and return a `Res`ponse.
///
/// The function `f` is called with [`self.request`], the response returned by
/// the function `f` is than returned to the request maker via
/// [`self.response.respond`].
///
/// [`self.request`]: RpcMessage::request
/// [`self.response.respond`]: RpcResponse::respond
///
/// # Notes
///
/// If the receiving end is [no longer connected] the function `f` is not
/// called and `Ok(())` is returned instead.
///
/// [no longer connected]: RpcResponse::is_connected
pub fn handle<F>(self, f: F) -> Result<(), SendError>
where
F: FnOnce(Req) -> Res,
{
if self.response.is_connected() {
let response = f(self.request);
self.response.respond(response)
} else {
// If the receiving actor is no longer waiting we can skip the
// request.
Ok(())
}
}
}
/// Structure to respond to an [`Rpc`] request.
#[derive(Debug)]
pub struct RpcResponse<Res> {
sender: Sender<Res>,
}
impl<Res> RpcResponse<Res> {
/// Respond to a RPC request.
pub fn respond(self, response: Res) -> Result<(), SendError> {
self.sender.try_send(response).map_err(|_| SendError)
}
/// Returns `false` if the receiving side is disconnected.
///
/// # Notes
///
/// If this method returns `true` it doesn't mean that `respond` will
/// succeed. In fact the moment this function returns a result it could
/// already be invalid.
pub fn is_connected(&self) -> bool {
self.sender.is_connected()
}
} | //! | random_line_split |
rpc.rs | //! Types related the `ActorRef` Remote Procedure Call (RPC) mechanism.
//!
//! RPC is implemented by sending a [`RpcMessage`] to the actor, which contains
//! the request message and a [`RpcResponse`]. The `RpcResponse` allows the
//! receiving actor to send back a response to the sending actor.
//!
//! To support RPC the receiving actor needs to implement
//! [`From`]`<`[`RpcMessage`]`<Req, Res>>`, where `Req` is the type of the
//! request message and `Res` the type of the response. This can be done easily
//! by using the [`from_message`] macro. The RPC message can then be received
//! like any other message.
//!
//! The sending actor needs to call [`ActorRef::rpc`] with the correct request
//! type. That will return an [`Rpc`] [`Future`] which returns the response to
//! the call, or [`RpcError`] in case of an error.
//!
//! [`from_message`]: crate::from_message
//!
//! # Examples
//!
//! Using RPC to communicate with another actor.
//!
//! ```
//! # #![feature(never_type)]
//! #
//! use heph::actor;
//! use heph::actor_ref::{ActorRef, RpcMessage};
//! use heph::rt::{self, Runtime, ThreadLocal};
//! use heph::spawn::ActorOptions;
//! use heph::supervisor::NoSupervisor;
//!
//! /// Message type for [`counter`].
//! struct Add(RpcMessage<usize, usize>);
//!
//! /// Required to support RPC.
//! impl From<RpcMessage<usize, usize>> for Add {
//! fn from(msg: RpcMessage<usize, usize>) -> Add {
//! Add(msg)
//! }
//! }
//!
//! /// Receiving actor of the RPC.
//! async fn counter(mut ctx: actor::Context<Add, ThreadLocal>) {
//! // State of the counter.
//! let mut count: usize = 0;
//! // Receive a message like normal.
//! while let Ok(Add(RpcMessage { request, response })) = ctx.receive_next().await {
//! count += request;
//! // Send back the current state, ignoring any errors.
//! let _ = response.respond(count);
//! }
//! }
//!
//! /// Sending actor of the RPC.
//! async fn requester(_: actor::Context<!, ThreadLocal>, actor_ref: ActorRef<Add>) {
//! // Make the procedure call.
//! let response = actor_ref.rpc(10).await;
//! # assert!(response.is_ok());
//! match response {
//! // We got a response.
//! Ok(count) => println!("Current count: {}", count),
//! // Actor failed to respond.
//! Err(err) => eprintln!("Counter didn't reply: {}", err),
//! }
//! }
//!
//! # fn main() -> Result<(), rt::Error> {
//! # let mut runtime = Runtime::new()?;
//! # runtime.run_on_workers(|mut runtime_ref| -> Result<(), !> {
//! # let counter = counter as fn(_) -> _;
//! # let actor_ref = runtime_ref.spawn_local(NoSupervisor, counter, (), ActorOptions::default());
//! #
//! # let requester = requester as fn(_, _) -> _;
//! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default());
//! # Ok(())
//! # })?;
//! # runtime.start()
//! # }
//! ```
//!
//! Supporting multiple procedure within the same actor is possible by making
//! the message an `enum` as the example below shows. Furthermore synchronous
//! actors are supported.
//!
// FIXME: doesn't stop on CI.
//! ```ignore
//! # #![feature(never_type)]
//! #
//! use heph::actor::{self, SyncContext};
//! use heph::actor_ref::{ActorRef, RpcMessage};
//! use heph::from_message;
//! use heph::rt::{self, Runtime, ActorOptions, SyncActorOptions};
//! use heph::supervisor::NoSupervisor;
//!
//! /// Message type for [`counter`].
//! enum Message {
//! /// Increase the counter, returning the current state.
//! Add(RpcMessage<usize, usize>),
//! /// Get the current state of the counter.
//! Get(RpcMessage<(), usize>),
//! }
//!
//! // Implement the `From` trait for `Message`.
//! from_message!(Message::Add(usize) -> usize);
//! from_message!(Message::Get(()) -> usize);
//!
//! /// Receiving synchronous actor of the RPC.
//! fn counter(mut ctx: SyncContext<Message>) {
//! // State of the counter.
//! let mut count: usize = 0;
//!
//! // Receive messages in a loop.
//! while let Ok(msg) = ctx.receive_next() {
//! match msg {
//! Message::Add(RpcMessage { request, response }) => {
//! count += request;
//! // Send back the current state, ignoring any errors.
//! let _ = response.respond(count);
//! },
//! Message::Get(RpcMessage { response, .. }) => {
//! // Send back the current state, ignoring any errors.
//! let _ = response.respond(count);
//! },
//! }
//! }
//! }
//!
//! /// Sending actor of the RPC.
//! async fn requester(_: actor::Context<!>, actor_ref: ActorRef<Message>) {
//! // Increase the counter by ten.
//! // NOTE: do handle the errors correctly in practice, this is just an
//! // example.
//! let count = actor_ref.rpc(10).await.unwrap();
//! println!("Increased count to {}", count);
//!
//! // Retrieve the current count.
//! let count = actor_ref.rpc(()).await.unwrap();
//! # assert_eq!(count, 10);
//! println!("Current count {}", count);
//! }
//!
//! # fn main() -> Result<(), rt::Error> {
//! # let mut runtime = Runtime::new()?;
//! # let counter = counter as fn(_) -> _;
//! # let options = SyncActorOptions::default();
//! # let actor_ref = runtime.spawn_sync_actor(NoSupervisor, counter, (), options)?;
//! # runtime.run_on_workers(move |mut runtime_ref| -> Result<(), !> {
//! # let requester = requester as fn(_, _) -> _;
//! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default());
//! # Ok(())
//! # })?;
//! # runtime.start()
//! # }
//! ```
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::task::{self, Poll};
use inbox::oneshot::{new_oneshot, RecvOnce, Sender};
use crate::actor_ref::{ActorRef, SendError, SendValue};
/// [`Future`] that resolves to a Remote Procedure Call (RPC) response.
///
/// Created by [`ActorRef::rpc`].
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Rpc<'r, 'fut, M, Res> {
send: Option<SendValue<'r, 'fut, M>>,
recv: RecvOnce<Res>,
}
impl<'r, 'fut, M, Res> Rpc<'r, 'fut, M, Res>
where
'r: 'fut,
{
/// Create a new RPC.
pub(super) fn new<Req>(actor_ref: &'r ActorRef<M>, request: Req) -> Rpc<'r, 'fut, M, Res>
where
M: From<RpcMessage<Req, Res>>,
{
let (sender, receiver) = new_oneshot();
let response = RpcResponse { sender };
let msg = RpcMessage { request, response };
let send = actor_ref.send(msg);
Rpc {
send: Some(send),
recv: receiver.recv_once(),
}
}
}
impl<'r, 'fut, M, Res> Future for Rpc<'r, 'fut, M, Res> {
type Output = Result<Res, RpcError>;
#[track_caller]
fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<Self::Output> {
// Safety: we're not moving `send` so this is safe.
let send = unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.as_pin_mut();
if let Some(send) = send {
match send.poll(ctx) {
Poll::Ready(Ok(())) => {}
Poll::Ready(Err(err)) => return Poll::Ready(Err(err.into())),
Poll::Pending => return Poll::Pending,
}
// Don't take this branch again.
// Safety: we're not moving `send` so this is safe.
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.set(None);
}
// Safety: we're not moving `recv` so this is safe.
match unsafe { self.map_unchecked_mut(|s| &mut s.recv) }.poll(ctx) {
Poll::Ready(Some(response)) => Poll::Ready(Ok(response)),
Poll::Ready(None) => Poll::Ready(Err(RpcError::NoResponse)),
Poll::Pending => Poll::Pending,
}
}
}
/// Error returned by [`Rpc`].
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum RpcError {
/// Same error as [`SendError`].
SendError,
/// Returned when the other side returned no response.
NoResponse,
}
impl From<SendError> for RpcError {
fn from(_: SendError) -> RpcError {
RpcError::SendError
}
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RpcError::SendError => SendError.fmt(f),
RpcError::NoResponse => f.write_str("no RPC response"),
}
}
}
impl Error for RpcError {}
/// Message type that holds an RPC request.
///
/// It holds both the request (`Req`) and the way to respond [`RpcResponse`].
#[derive(Debug)]
pub struct | <Req, Res> {
/// The request object.
pub request: Req,
/// A way to [`respond`] to the call.
///
/// [`respond`]: RpcResponse::respond
pub response: RpcResponse<Res>,
}
impl<Req, Res> RpcMessage<Req, Res> {
/// Convenience method to handle a `Req`uest and return a `Res`ponse.
///
/// The function `f` is called with [`self.request`], the response returned by
/// the function `f` is than returned to the request maker via
/// [`self.response.respond`].
///
/// [`self.request`]: RpcMessage::request
/// [`self.response.respond`]: RpcResponse::respond
///
/// # Notes
///
/// If the receiving end is [no longer connected] the function `f` is not
/// called and `Ok(())` is returned instead.
///
/// [no longer connected]: RpcResponse::is_connected
pub fn handle<F>(self, f: F) -> Result<(), SendError>
where
F: FnOnce(Req) -> Res,
{
if self.response.is_connected() {
let response = f(self.request);
self.response.respond(response)
} else {
// If the receiving actor is no longer waiting we can skip the
// request.
Ok(())
}
}
}
/// Structure to respond to an [`Rpc`] request.
#[derive(Debug)]
pub struct RpcResponse<Res> {
sender: Sender<Res>,
}
impl<Res> RpcResponse<Res> {
/// Respond to a RPC request.
pub fn respond(self, response: Res) -> Result<(), SendError> {
self.sender.try_send(response).map_err(|_| SendError)
}
/// Returns `false` if the receiving side is disconnected.
///
/// # Notes
///
/// If this method returns `true` it doesn't mean that `respond` will
/// succeed. In fact the moment this function returns a result it could
/// already be invalid.
pub fn is_connected(&self) -> bool {
self.sender.is_connected()
}
}
| RpcMessage | identifier_name |
rpc.rs | //! Types related the `ActorRef` Remote Procedure Call (RPC) mechanism.
//!
//! RPC is implemented by sending a [`RpcMessage`] to the actor, which contains
//! the request message and a [`RpcResponse`]. The `RpcResponse` allows the
//! receiving actor to send back a response to the sending actor.
//!
//! To support RPC the receiving actor needs to implement
//! [`From`]`<`[`RpcMessage`]`<Req, Res>>`, where `Req` is the type of the
//! request message and `Res` the type of the response. This can be done easily
//! by using the [`from_message`] macro. The RPC message can then be received
//! like any other message.
//!
//! The sending actor needs to call [`ActorRef::rpc`] with the correct request
//! type. That will return an [`Rpc`] [`Future`] which returns the response to
//! the call, or [`RpcError`] in case of an error.
//!
//! [`from_message`]: crate::from_message
//!
//! # Examples
//!
//! Using RPC to communicate with another actor.
//!
//! ```
//! # #![feature(never_type)]
//! #
//! use heph::actor;
//! use heph::actor_ref::{ActorRef, RpcMessage};
//! use heph::rt::{self, Runtime, ThreadLocal};
//! use heph::spawn::ActorOptions;
//! use heph::supervisor::NoSupervisor;
//!
//! /// Message type for [`counter`].
//! struct Add(RpcMessage<usize, usize>);
//!
//! /// Required to support RPC.
//! impl From<RpcMessage<usize, usize>> for Add {
//! fn from(msg: RpcMessage<usize, usize>) -> Add {
//! Add(msg)
//! }
//! }
//!
//! /// Receiving actor of the RPC.
//! async fn counter(mut ctx: actor::Context<Add, ThreadLocal>) {
//! // State of the counter.
//! let mut count: usize = 0;
//! // Receive a message like normal.
//! while let Ok(Add(RpcMessage { request, response })) = ctx.receive_next().await {
//! count += request;
//! // Send back the current state, ignoring any errors.
//! let _ = response.respond(count);
//! }
//! }
//!
//! /// Sending actor of the RPC.
//! async fn requester(_: actor::Context<!, ThreadLocal>, actor_ref: ActorRef<Add>) {
//! // Make the procedure call.
//! let response = actor_ref.rpc(10).await;
//! # assert!(response.is_ok());
//! match response {
//! // We got a response.
//! Ok(count) => println!("Current count: {}", count),
//! // Actor failed to respond.
//! Err(err) => eprintln!("Counter didn't reply: {}", err),
//! }
//! }
//!
//! # fn main() -> Result<(), rt::Error> {
//! # let mut runtime = Runtime::new()?;
//! # runtime.run_on_workers(|mut runtime_ref| -> Result<(), !> {
//! # let counter = counter as fn(_) -> _;
//! # let actor_ref = runtime_ref.spawn_local(NoSupervisor, counter, (), ActorOptions::default());
//! #
//! # let requester = requester as fn(_, _) -> _;
//! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default());
//! # Ok(())
//! # })?;
//! # runtime.start()
//! # }
//! ```
//!
//! Supporting multiple procedure within the same actor is possible by making
//! the message an `enum` as the example below shows. Furthermore synchronous
//! actors are supported.
//!
// FIXME: doesn't stop on CI.
//! ```ignore
//! # #![feature(never_type)]
//! #
//! use heph::actor::{self, SyncContext};
//! use heph::actor_ref::{ActorRef, RpcMessage};
//! use heph::from_message;
//! use heph::rt::{self, Runtime, ActorOptions, SyncActorOptions};
//! use heph::supervisor::NoSupervisor;
//!
//! /// Message type for [`counter`].
//! enum Message {
//! /// Increase the counter, returning the current state.
//! Add(RpcMessage<usize, usize>),
//! /// Get the current state of the counter.
//! Get(RpcMessage<(), usize>),
//! }
//!
//! // Implement the `From` trait for `Message`.
//! from_message!(Message::Add(usize) -> usize);
//! from_message!(Message::Get(()) -> usize);
//!
//! /// Receiving synchronous actor of the RPC.
//! fn counter(mut ctx: SyncContext<Message>) {
//! // State of the counter.
//! let mut count: usize = 0;
//!
//! // Receive messages in a loop.
//! while let Ok(msg) = ctx.receive_next() {
//! match msg {
//! Message::Add(RpcMessage { request, response }) => {
//! count += request;
//! // Send back the current state, ignoring any errors.
//! let _ = response.respond(count);
//! },
//! Message::Get(RpcMessage { response, .. }) => {
//! // Send back the current state, ignoring any errors.
//! let _ = response.respond(count);
//! },
//! }
//! }
//! }
//!
//! /// Sending actor of the RPC.
//! async fn requester(_: actor::Context<!>, actor_ref: ActorRef<Message>) {
//! // Increase the counter by ten.
//! // NOTE: do handle the errors correctly in practice, this is just an
//! // example.
//! let count = actor_ref.rpc(10).await.unwrap();
//! println!("Increased count to {}", count);
//!
//! // Retrieve the current count.
//! let count = actor_ref.rpc(()).await.unwrap();
//! # assert_eq!(count, 10);
//! println!("Current count {}", count);
//! }
//!
//! # fn main() -> Result<(), rt::Error> {
//! # let mut runtime = Runtime::new()?;
//! # let counter = counter as fn(_) -> _;
//! # let options = SyncActorOptions::default();
//! # let actor_ref = runtime.spawn_sync_actor(NoSupervisor, counter, (), options)?;
//! # runtime.run_on_workers(move |mut runtime_ref| -> Result<(), !> {
//! # let requester = requester as fn(_, _) -> _;
//! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default());
//! # Ok(())
//! # })?;
//! # runtime.start()
//! # }
//! ```
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::task::{self, Poll};
use inbox::oneshot::{new_oneshot, RecvOnce, Sender};
use crate::actor_ref::{ActorRef, SendError, SendValue};
/// [`Future`] that resolves to a Remote Procedure Call (RPC) response.
///
/// Created by [`ActorRef::rpc`].
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Rpc<'r, 'fut, M, Res> {
send: Option<SendValue<'r, 'fut, M>>,
recv: RecvOnce<Res>,
}
impl<'r, 'fut, M, Res> Rpc<'r, 'fut, M, Res>
where
'r: 'fut,
{
/// Create a new RPC.
pub(super) fn new<Req>(actor_ref: &'r ActorRef<M>, request: Req) -> Rpc<'r, 'fut, M, Res>
where
M: From<RpcMessage<Req, Res>>,
{
let (sender, receiver) = new_oneshot();
let response = RpcResponse { sender };
let msg = RpcMessage { request, response };
let send = actor_ref.send(msg);
Rpc {
send: Some(send),
recv: receiver.recv_once(),
}
}
}
impl<'r, 'fut, M, Res> Future for Rpc<'r, 'fut, M, Res> {
type Output = Result<Res, RpcError>;
#[track_caller]
fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<Self::Output> {
// Safety: we're not moving `send` so this is safe.
let send = unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.as_pin_mut();
if let Some(send) = send {
match send.poll(ctx) {
Poll::Ready(Ok(())) => {}
Poll::Ready(Err(err)) => return Poll::Ready(Err(err.into())),
Poll::Pending => return Poll::Pending,
}
// Don't take this branch again.
// Safety: we're not moving `send` so this is safe.
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.set(None);
}
// Safety: we're not moving `recv` so this is safe.
match unsafe { self.map_unchecked_mut(|s| &mut s.recv) }.poll(ctx) {
Poll::Ready(Some(response)) => Poll::Ready(Ok(response)),
Poll::Ready(None) => Poll::Ready(Err(RpcError::NoResponse)),
Poll::Pending => Poll::Pending,
}
}
}
/// Error returned by [`Rpc`].
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum RpcError {
/// Same error as [`SendError`].
SendError,
/// Returned when the other side returned no response.
NoResponse,
}
impl From<SendError> for RpcError {
fn from(_: SendError) -> RpcError {
RpcError::SendError
}
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RpcError::SendError => SendError.fmt(f),
RpcError::NoResponse => f.write_str("no RPC response"),
}
}
}
impl Error for RpcError {}
/// Message type that holds an RPC request.
///
/// It holds both the request (`Req`) and the way to respond [`RpcResponse`].
#[derive(Debug)]
pub struct RpcMessage<Req, Res> {
/// The request object.
pub request: Req,
/// A way to [`respond`] to the call.
///
/// [`respond`]: RpcResponse::respond
pub response: RpcResponse<Res>,
}
impl<Req, Res> RpcMessage<Req, Res> {
/// Convenience method to handle a `Req`uest and return a `Res`ponse.
///
/// The function `f` is called with [`self.request`], the response returned by
/// the function `f` is than returned to the request maker via
/// [`self.response.respond`].
///
/// [`self.request`]: RpcMessage::request
/// [`self.response.respond`]: RpcResponse::respond
///
/// # Notes
///
/// If the receiving end is [no longer connected] the function `f` is not
/// called and `Ok(())` is returned instead.
///
/// [no longer connected]: RpcResponse::is_connected
pub fn handle<F>(self, f: F) -> Result<(), SendError>
where
F: FnOnce(Req) -> Res,
|
}
/// Structure to respond to an [`Rpc`] request.
#[derive(Debug)]
pub struct RpcResponse<Res> {
sender: Sender<Res>,
}
impl<Res> RpcResponse<Res> {
/// Respond to a RPC request.
pub fn respond(self, response: Res) -> Result<(), SendError> {
self.sender.try_send(response).map_err(|_| SendError)
}
/// Returns `false` if the receiving side is disconnected.
///
/// # Notes
///
/// If this method returns `true` it doesn't mean that `respond` will
/// succeed. In fact the moment this function returns a result it could
/// already be invalid.
pub fn is_connected(&self) -> bool {
self.sender.is_connected()
}
}
| {
if self.response.is_connected() {
let response = f(self.request);
self.response.respond(response)
} else {
// If the receiving actor is no longer waiting we can skip the
// request.
Ok(())
}
} | identifier_body |
model_training.py | __author__ = "alvaro barbeira"
import sys
import os
import logging
import gzip
import collections
import numpy
import pandas
import scipy
import math
import statsmodels.formula.api as smf
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
import pyarrow.parquet as pq
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.data_management import TextFileTools
from genomic_tools_lib.individual_data import Utilities as StudyUtilities
from genomic_tools_lib.file_formats import Parquet, Miscellaneous
from genomic_tools_lib.miscellaneous import matrices, Genomics, Math
###############################################################################
import rpy2.robjects as robjects
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from rpy2.robjects import pandas2ri
pandas2ri.activate()
def initialize():
global train_elastic_net
global set_seed
path = os.path.split(sys.argv[0])[0]
path = os.path.join(path, "elastic_net.R")
robjects.r['source'](path)
train_elastic_net = robjects.r['train_elastic_net']
set_seed = robjects.r['set_seed']
###############################################################################
def _save(d_, features_, features_data_, gene):
pandas.DataFrame({gene:d_[gene]}).to_csv("y.txt", index=False, sep="\t")
pandas.DataFrame(collections.OrderedDict([(v,features_data_[v]) for v in features_.id.values])).to_csv("x.txt", index=False, sep="\t")
def get_weights(x_weights, id_whitelist):
if x_weights[1] == "PIP":
w = Miscellaneous.dapg_signals(x_weights[0], float(x_weights[2]), id_whitelist)
w = w.rename(columns={"gene":"gene_id", "pip":"w", "variant_id":"id"})
w.w = 1 - w.w #Less penalty to the more probable snps
else:
raise RuntimeError("unsupported weights argument")
return w
###############################################################################
def | (features_data_, features_, d_, data_annotation_, x_w=None, prune=True, nested_folds=10):
x = numpy.array([features_data_[v] for v in features_.id.values])
dimnames = robjects.ListVector(
[(1, robjects.StrVector(d_["individual"])), (2, robjects.StrVector(features_.id.values))])
x = robjects.r["matrix"](robjects.FloatVector(x.flatten()), ncol=features_.shape[0], dimnames=dimnames)
y = robjects.FloatVector(d_[data_annotation_.gene_id])
nested_folds = robjects.FloatVector([nested_folds])
#py2ri chokes on None.
if x_w is None:
res = train_elastic_net(y, x, n_train_test_folds=nested_folds)
else:
res = train_elastic_net(y, x, penalty_factor=x_w, n_train_test_folds=nested_folds) # observation weights, not explanatory variable weight :( , x_weight = x_w)
return pandas2ri.ri2py(res[0]), pandas2ri.ri2py(res[1])
###############################################################################
def ols_pred_perf(data, n_folds=10):
kf = KFold(n_splits=n_folds, shuffle=True)
rho_f=[]
R2_f=[]
zscore_f=[]
for train_index, test_index in kf.split(data):
train_ = data.iloc[train_index]
fold_= smf.ols('y ~ {}'.format(" + ".join([x for x in train_.columns if x !="y"])), data=train_).fit()
test_ = data.iloc[test_index]
y_predicted = fold_.predict(test_)
if numpy.std(y_predicted) != 0:
score = r2_score(test_.y, y_predicted)
rho = numpy.corrcoef(test_.y, y_predicted)[0,1]
zscore = numpy.arctanh(rho)*numpy.sqrt(len(y_predicted) - 3)
else:
score = 0
rho = 0
zscore = 0
R2_f.append(score)
rho_f.append(rho)
zscore_f.append(zscore)
rho_avg = numpy.average(rho_f)
zscore_est = numpy.sum(zscore_f)/numpy.sqrt(n_folds)
zscore_pval = scipy.stats.norm.sf(zscore_est)
d = {"test_R2_avg": [numpy.average(R2_f)], "test_R2_sd": [numpy.std(R2_f)],
"rho_avg": [rho_avg], "rho_avg_squared": [rho_avg**2], "rho_se":[numpy.std(rho_f)],
"rho_zscore":[zscore_est], "zscore_pval": [zscore_pval], "nested_cv_fisher_pval":[None], "nested_cv_converged":n_folds}
return pandas.DataFrame(d)
def prune(data):
if data.shape[1] == 1:
return data
cor = numpy.corrcoef(data.values.T)
discard=set()
for i in range(0, cor.shape[0]):
for j in range(i, cor.shape[1]):
if i==j:
continue
if i in discard:
continue
if math.abs(cor[i][j]) >= 0.95:
discard.add(j)
discard = data.columns[list(discard)].values
return data.drop(discard, axis=1)
def train_ols(features_data_, features_, d_, data_annotation_, x_w=None, prune=True, nested_folds=10):
ids=[]
data = {}
for v in features_.id.values:
x = Math.standardize(features_data_[v])
if x is not None:
data[v] = x
ids.append(v)
data = pandas.DataFrame(data)
if prune:
data = prune(data)
ids = data.columns.values
if len(ids) == 0:
w = pandas.DataFrame({"feature":[], "weight":[]})
s = pandas.DataFrame({"test_R2_avg": [], "test_R2_sd": [],
"rho_avg": [], "rho_avg_squared": [], "rho_se":[],
"rho_zscore":[], "zscore_pval": [], "nested_cv_fisher_pval":[],
"alpha":[], "n_snps_in_window":[], "cv_R2_avg":[], "cv_R2_sd":[], "in_sample_R2":[], "n.snps.in.model":[]})
return w,s
data["y"] = Math.standardize(d_[data_annotation_.gene_id])
results = smf.ols('y ~ {}'.format(" + ".join(ids)), data=data).fit()
weights = results.params[1:].to_frame().reset_index().rename(columns={"index": "feature", 0: "weight"})
summary = ols_pred_perf(data, nested_folds)
summary = summary.assign(alpha=None, n_snps_in_window=features_.shape[0],
cv_R2_avg=None, cv_R2_sd=None, in_sample_R2=None)
summary["n.snps.in.model"] = len(ids)
return weights, summary
########################################################################################################################
def process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, summary_fields, train, postfix=None, nested_folds=10):
gene_id_ = data_annotation_.gene_id if postfix is None else "{}-{}".format(data_annotation_.gene_id, postfix)
logging.log(8, "loading data")
d_ = Parquet._read(data, [data_annotation_.gene_id])
features_ = Genomics.entries_for_gene_annotation(data_annotation_, args.window, features_metadata)
if x_weights is not None:
x_w = features_[["id"]].merge(x_weights[x_weights.gene_id == data_annotation_.gene_id], on="id")
features_ = features_[features_.id.isin(x_w.id)]
x_w = robjects.FloatVector(x_w.w.values)
else:
x_w = None
if features_.shape[0] == 0:
logging.log(9, "No features available")
return
features_data_ = Parquet._read(features, [x for x in features_.id.values],
specific_individuals=[x for x in d_["individual"]])
logging.log(8, "training")
weights, summary = train(features_data_, features_, d_, data_annotation_, x_w, not args.dont_prune, nested_folds)
if weights.shape[0] == 0:
logging.log(9, "no weights, skipping")
return
logging.log(8, "saving")
weights = weights.assign(gene=data_annotation_.gene_id). \
merge(features_.rename(columns={"id": "feature", "allele_0": "ref_allele", "allele_1": "eff_allele"}), on="feature"). \
rename(columns={"feature": "varID"}). \
assign(gene=gene_id_)
weights = weights[["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]]
if args.output_rsids:
weights.loc[weights.rsid == "NA", "rsid"] = weights.loc[weights.rsid == "NA", "varID"]
w.write(weights.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())
summary = summary. \
assign(gene=gene_id_, genename=data_annotation_.gene_name,
gene_type=data_annotation_.gene_type). \
rename(columns={"n_features": "n_snps_in_window", "n_features_in_model": "n.snps.in.model",
"zscore_pval": "pred.perf.pval", "rho_avg_squared": "pred.perf.R2",
"cv_converged":"nested_cv_converged"})
summary["pred.perf.qval"] = None
summary = summary[summary_fields]
s.write(summary.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())
var_ids = [x for x in weights.varID.values]
cov = numpy.cov([features_data_[k] for k in var_ids], ddof=1)
ids = [x for x in weights.rsid.values] if args.output_rsids else var_ids
cov = matrices._flatten_matrix_data([(gene_id_, ids, cov)])
for cov_ in cov:
l = "{} {} {} {}\n".format(cov_[0], cov_[1], cov_[2], cov_[3]).encode()
c.write(l)
########################################################################################################################
def run(args):
wp = args.output_prefix + "_weights.txt.gz"
if os.path.exists(wp):
logging.info("Weights output exists already, delete it or move it")
return
sp = args.output_prefix + "_summary.txt.gz"
if os.path.exists(sp):
logging.info("Summary output exists already, delete it or move it")
return
cp = args.output_prefix + "_covariance.txt.gz"
if os.path.exists(wp):
logging.info("covariance output exists already, delete it or move it")
return
r = args.output_prefix + "_run.txt.gz"
if os.path.exists(wp):
logging.info("run output exists already, delete it or move it")
return
logging.info("Starting")
Utilities.ensure_requisite_folders(args.output_prefix)
logging.info("Opening data")
data = pq.ParquetFile(args.data)
available_data = {x for x in data.metadata.schema.names}
logging.info("Loading data annotation")
data_annotation = StudyUtilities.load_gene_annotation(args.data_annotation, args.chromosome, args.sub_batches, args.sub_batch)
data_annotation = data_annotation[data_annotation.gene_id.isin(available_data)]
if args.gene_whitelist:
logging.info("Applying gene whitelist")
data_annotation = data_annotation[data_annotation.gene_id.isin(set(args.gene_whitelist))]
logging.info("Kept %i entries", data_annotation.shape[0])
logging.info("Opening features annotation")
if not args.chromosome:
features_metadata = pq.read_table(args.features_annotation).to_pandas()
else:
features_metadata = pq.ParquetFile(args.features_annotation).read_row_group(args.chromosome-1).to_pandas()
if args.chromosome and args.sub_batches:
logging.info("Trimming variants")
features_metadata = StudyUtilities.trim_variant_metadata_on_gene_annotation(features_metadata, data_annotation, args.window)
if args.rsid_whitelist:
logging.info("Filtering features annotation")
whitelist = TextFileTools.load_list(args.rsid_whitelist)
whitelist = set(whitelist)
features_metadata = features_metadata[features_metadata.rsid.isin(whitelist)]
if args.features_weights:
logging.info("Loading weights")
x_weights = get_weights(args.features_weights, {x for x in features_metadata.id})
logging.info("Filtering features metadata to those available in weights")
features_metadata = features_metadata[features_metadata.id.isin(x_weights.id)]
logging.info("Kept %d entries", features_metadata.shape[0])
else:
x_weights = None
logging.info("Opening features")
features = pq.ParquetFile(args.features)
logging.info("Setting R seed")
s = numpy.random.randint(1e8)
set_seed(s)
if args.run_tag:
d = pandas.DataFrame({"run":[args.run_tag], "cv_seed":[s]})[["run", "cv_seed"]]
Utilities.save_dataframe(d, r)
WEIGHTS_FIELDS=["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]
SUMMARY_FIELDS=["gene", "genename", "gene_type", "alpha", "n_snps_in_window", "n.snps.in.model",
"test_R2_avg", "test_R2_sd", "cv_R2_avg", "cv_R2_sd", "in_sample_R2", "nested_cv_fisher_pval",
"nested_cv_converged", "rho_avg", "rho_se", "rho_zscore", "pred.perf.R2", "pred.perf.pval", "pred.perf.qval"]
train = train_elastic_net_wrapper if args.mode == "elastic_net" else train_ols
with gzip.open(wp, "w") as w:
w.write(("\t".join(WEIGHTS_FIELDS) + "\n").encode())
with gzip.open(sp, "w") as s:
s.write(("\t".join(SUMMARY_FIELDS) + "\n").encode())
with gzip.open(cp, "w") as c:
c.write("GENE RSID1 RSID2 VALUE\n".encode())
for i,data_annotation_ in enumerate(data_annotation.itertuples()):
if args.MAX_M and i>=args.MAX_M:
logging.info("Early abort")
break
logging.log(9, "processing %i/%i:%s", i+1, data_annotation.shape[0], data_annotation_.gene_id)
if args.repeat:
for j in range(0, args.repeat):
logging.log(9, "%i-th reiteration", j)
process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, SUMMARY_FIELDS, train, j, args.nested_cv_folds)
else:
process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, SUMMARY_FIELDS, train, nested_folds=args.nested_cv_folds)
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Train Elastic Net prediction models from GLMNET")
parser.add_argument("--features_weights", nargs="+")
parser.add_argument("-features")
parser.add_argument("-features_annotation")
parser.add_argument("-data")
parser.add_argument("-data_annotation")
parser.add_argument("-window", type = int)
parser.add_argument("--run_tag")
parser.add_argument("--output_rsids", action="store_true")
parser.add_argument("--chromosome", type = int)
parser.add_argument("--sub_batches", type = int)
parser.add_argument("--sub_batch", type =int)
parser.add_argument("--rsid_whitelist")
parser.add_argument("--MAX_M", type=int)
parser.add_argument("--mode", default="elastic_net", help="'elastic_net' or 'ols'")
parser.add_argument("--gene_whitelist", nargs="+", default=None)
parser.add_argument("--dont_prune", action="store_true")
parser.add_argument("-output_prefix")
parser.add_argument("-parsimony", default=10, type=int)
parser.add_argument("--repeat", default=None, type=int)
parser.add_argument("--nested_cv_folds", default=5, type=int)
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
initialize()
run(args) | train_elastic_net_wrapper | identifier_name |
model_training.py | __author__ = "alvaro barbeira"
import sys
import os
import logging
import gzip
import collections
import numpy
import pandas
import scipy
import math
import statsmodels.formula.api as smf
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
import pyarrow.parquet as pq
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.data_management import TextFileTools
from genomic_tools_lib.individual_data import Utilities as StudyUtilities
from genomic_tools_lib.file_formats import Parquet, Miscellaneous
from genomic_tools_lib.miscellaneous import matrices, Genomics, Math
###############################################################################
import rpy2.robjects as robjects
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from rpy2.robjects import pandas2ri
pandas2ri.activate()
def initialize():
global train_elastic_net
global set_seed
path = os.path.split(sys.argv[0])[0]
path = os.path.join(path, "elastic_net.R")
robjects.r['source'](path)
train_elastic_net = robjects.r['train_elastic_net']
set_seed = robjects.r['set_seed']
###############################################################################
def _save(d_, features_, features_data_, gene):
pandas.DataFrame({gene:d_[gene]}).to_csv("y.txt", index=False, sep="\t")
pandas.DataFrame(collections.OrderedDict([(v,features_data_[v]) for v in features_.id.values])).to_csv("x.txt", index=False, sep="\t")
def get_weights(x_weights, id_whitelist):
if x_weights[1] == "PIP":
w = Miscellaneous.dapg_signals(x_weights[0], float(x_weights[2]), id_whitelist)
w = w.rename(columns={"gene":"gene_id", "pip":"w", "variant_id":"id"})
w.w = 1 - w.w #Less penalty to the more probable snps
else:
raise RuntimeError("unsupported weights argument")
return w
###############################################################################
def train_elastic_net_wrapper(features_data_, features_, d_, data_annotation_, x_w=None, prune=True, nested_folds=10):
x = numpy.array([features_data_[v] for v in features_.id.values])
dimnames = robjects.ListVector(
[(1, robjects.StrVector(d_["individual"])), (2, robjects.StrVector(features_.id.values))])
x = robjects.r["matrix"](robjects.FloatVector(x.flatten()), ncol=features_.shape[0], dimnames=dimnames)
y = robjects.FloatVector(d_[data_annotation_.gene_id])
nested_folds = robjects.FloatVector([nested_folds])
#py2ri chokes on None.
if x_w is None:
res = train_elastic_net(y, x, n_train_test_folds=nested_folds)
else:
res = train_elastic_net(y, x, penalty_factor=x_w, n_train_test_folds=nested_folds) # observation weights, not explanatory variable weight :( , x_weight = x_w)
return pandas2ri.ri2py(res[0]), pandas2ri.ri2py(res[1])
###############################################################################
def ols_pred_perf(data, n_folds=10):
kf = KFold(n_splits=n_folds, shuffle=True)
rho_f=[]
R2_f=[]
zscore_f=[]
for train_index, test_index in kf.split(data):
train_ = data.iloc[train_index]
fold_= smf.ols('y ~ {}'.format(" + ".join([x for x in train_.columns if x !="y"])), data=train_).fit()
test_ = data.iloc[test_index]
y_predicted = fold_.predict(test_)
if numpy.std(y_predicted) != 0:
score = r2_score(test_.y, y_predicted)
rho = numpy.corrcoef(test_.y, y_predicted)[0,1]
zscore = numpy.arctanh(rho)*numpy.sqrt(len(y_predicted) - 3)
else:
score = 0
rho = 0
zscore = 0
R2_f.append(score)
rho_f.append(rho)
zscore_f.append(zscore)
rho_avg = numpy.average(rho_f)
zscore_est = numpy.sum(zscore_f)/numpy.sqrt(n_folds)
zscore_pval = scipy.stats.norm.sf(zscore_est)
d = {"test_R2_avg": [numpy.average(R2_f)], "test_R2_sd": [numpy.std(R2_f)],
"rho_avg": [rho_avg], "rho_avg_squared": [rho_avg**2], "rho_se":[numpy.std(rho_f)],
"rho_zscore":[zscore_est], "zscore_pval": [zscore_pval], "nested_cv_fisher_pval":[None], "nested_cv_converged":n_folds}
return pandas.DataFrame(d)
def prune(data):
if data.shape[1] == 1:
return data
cor = numpy.corrcoef(data.values.T)
discard=set()
for i in range(0, cor.shape[0]):
for j in range(i, cor.shape[1]):
if i==j:
continue
if i in discard:
continue
if math.abs(cor[i][j]) >= 0.95:
discard.add(j)
discard = data.columns[list(discard)].values
return data.drop(discard, axis=1)
def train_ols(features_data_, features_, d_, data_annotation_, x_w=None, prune=True, nested_folds=10):
ids=[]
data = {}
for v in features_.id.values:
x = Math.standardize(features_data_[v])
if x is not None:
data[v] = x
ids.append(v)
data = pandas.DataFrame(data)
if prune:
data = prune(data)
ids = data.columns.values
if len(ids) == 0:
w = pandas.DataFrame({"feature":[], "weight":[]})
s = pandas.DataFrame({"test_R2_avg": [], "test_R2_sd": [],
"rho_avg": [], "rho_avg_squared": [], "rho_se":[],
"rho_zscore":[], "zscore_pval": [], "nested_cv_fisher_pval":[],
"alpha":[], "n_snps_in_window":[], "cv_R2_avg":[], "cv_R2_sd":[], "in_sample_R2":[], "n.snps.in.model":[]})
return w,s
data["y"] = Math.standardize(d_[data_annotation_.gene_id])
results = smf.ols('y ~ {}'.format(" + ".join(ids)), data=data).fit()
weights = results.params[1:].to_frame().reset_index().rename(columns={"index": "feature", 0: "weight"})
summary = ols_pred_perf(data, nested_folds)
summary = summary.assign(alpha=None, n_snps_in_window=features_.shape[0],
cv_R2_avg=None, cv_R2_sd=None, in_sample_R2=None)
summary["n.snps.in.model"] = len(ids)
return weights, summary
########################################################################################################################
def process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, summary_fields, train, postfix=None, nested_folds=10):
gene_id_ = data_annotation_.gene_id if postfix is None else "{}-{}".format(data_annotation_.gene_id, postfix)
logging.log(8, "loading data")
d_ = Parquet._read(data, [data_annotation_.gene_id])
features_ = Genomics.entries_for_gene_annotation(data_annotation_, args.window, features_metadata)
if x_weights is not None:
x_w = features_[["id"]].merge(x_weights[x_weights.gene_id == data_annotation_.gene_id], on="id")
features_ = features_[features_.id.isin(x_w.id)]
x_w = robjects.FloatVector(x_w.w.values)
else:
x_w = None
if features_.shape[0] == 0:
logging.log(9, "No features available")
return
features_data_ = Parquet._read(features, [x for x in features_.id.values],
specific_individuals=[x for x in d_["individual"]])
logging.log(8, "training")
weights, summary = train(features_data_, features_, d_, data_annotation_, x_w, not args.dont_prune, nested_folds)
if weights.shape[0] == 0:
logging.log(9, "no weights, skipping")
return
logging.log(8, "saving")
weights = weights.assign(gene=data_annotation_.gene_id). \
merge(features_.rename(columns={"id": "feature", "allele_0": "ref_allele", "allele_1": "eff_allele"}), on="feature"). \
rename(columns={"feature": "varID"}). \
assign(gene=gene_id_)
weights = weights[["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]]
if args.output_rsids:
weights.loc[weights.rsid == "NA", "rsid"] = weights.loc[weights.rsid == "NA", "varID"]
w.write(weights.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())
summary = summary. \
assign(gene=gene_id_, genename=data_annotation_.gene_name,
gene_type=data_annotation_.gene_type). \
rename(columns={"n_features": "n_snps_in_window", "n_features_in_model": "n.snps.in.model",
"zscore_pval": "pred.perf.pval", "rho_avg_squared": "pred.perf.R2",
"cv_converged":"nested_cv_converged"})
summary["pred.perf.qval"] = None
summary = summary[summary_fields]
s.write(summary.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())
var_ids = [x for x in weights.varID.values]
cov = numpy.cov([features_data_[k] for k in var_ids], ddof=1)
ids = [x for x in weights.rsid.values] if args.output_rsids else var_ids
cov = matrices._flatten_matrix_data([(gene_id_, ids, cov)])
for cov_ in cov:
l = "{} {} {} {}\n".format(cov_[0], cov_[1], cov_[2], cov_[3]).encode()
c.write(l)
########################################################################################################################
def run(args):
wp = args.output_prefix + "_weights.txt.gz"
if os.path.exists(wp):
logging.info("Weights output exists already, delete it or move it")
return
sp = args.output_prefix + "_summary.txt.gz"
if os.path.exists(sp):
logging.info("Summary output exists already, delete it or move it")
return
cp = args.output_prefix + "_covariance.txt.gz"
if os.path.exists(wp):
logging.info("covariance output exists already, delete it or move it")
return
r = args.output_prefix + "_run.txt.gz"
if os.path.exists(wp):
logging.info("run output exists already, delete it or move it")
return
logging.info("Starting")
Utilities.ensure_requisite_folders(args.output_prefix)
logging.info("Opening data")
data = pq.ParquetFile(args.data)
available_data = {x for x in data.metadata.schema.names}
logging.info("Loading data annotation")
data_annotation = StudyUtilities.load_gene_annotation(args.data_annotation, args.chromosome, args.sub_batches, args.sub_batch)
data_annotation = data_annotation[data_annotation.gene_id.isin(available_data)]
if args.gene_whitelist:
logging.info("Applying gene whitelist")
data_annotation = data_annotation[data_annotation.gene_id.isin(set(args.gene_whitelist))]
logging.info("Kept %i entries", data_annotation.shape[0])
logging.info("Opening features annotation")
if not args.chromosome:
features_metadata = pq.read_table(args.features_annotation).to_pandas()
else:
|
if args.chromosome and args.sub_batches:
logging.info("Trimming variants")
features_metadata = StudyUtilities.trim_variant_metadata_on_gene_annotation(features_metadata, data_annotation, args.window)
if args.rsid_whitelist:
logging.info("Filtering features annotation")
whitelist = TextFileTools.load_list(args.rsid_whitelist)
whitelist = set(whitelist)
features_metadata = features_metadata[features_metadata.rsid.isin(whitelist)]
if args.features_weights:
logging.info("Loading weights")
x_weights = get_weights(args.features_weights, {x for x in features_metadata.id})
logging.info("Filtering features metadata to those available in weights")
features_metadata = features_metadata[features_metadata.id.isin(x_weights.id)]
logging.info("Kept %d entries", features_metadata.shape[0])
else:
x_weights = None
logging.info("Opening features")
features = pq.ParquetFile(args.features)
logging.info("Setting R seed")
s = numpy.random.randint(1e8)
set_seed(s)
if args.run_tag:
d = pandas.DataFrame({"run":[args.run_tag], "cv_seed":[s]})[["run", "cv_seed"]]
Utilities.save_dataframe(d, r)
WEIGHTS_FIELDS=["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]
SUMMARY_FIELDS=["gene", "genename", "gene_type", "alpha", "n_snps_in_window", "n.snps.in.model",
"test_R2_avg", "test_R2_sd", "cv_R2_avg", "cv_R2_sd", "in_sample_R2", "nested_cv_fisher_pval",
"nested_cv_converged", "rho_avg", "rho_se", "rho_zscore", "pred.perf.R2", "pred.perf.pval", "pred.perf.qval"]
train = train_elastic_net_wrapper if args.mode == "elastic_net" else train_ols
with gzip.open(wp, "w") as w:
w.write(("\t".join(WEIGHTS_FIELDS) + "\n").encode())
with gzip.open(sp, "w") as s:
s.write(("\t".join(SUMMARY_FIELDS) + "\n").encode())
with gzip.open(cp, "w") as c:
c.write("GENE RSID1 RSID2 VALUE\n".encode())
for i,data_annotation_ in enumerate(data_annotation.itertuples()):
if args.MAX_M and i>=args.MAX_M:
logging.info("Early abort")
break
logging.log(9, "processing %i/%i:%s", i+1, data_annotation.shape[0], data_annotation_.gene_id)
if args.repeat:
for j in range(0, args.repeat):
logging.log(9, "%i-th reiteration", j)
process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, SUMMARY_FIELDS, train, j, args.nested_cv_folds)
else:
process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, SUMMARY_FIELDS, train, nested_folds=args.nested_cv_folds)
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Train Elastic Net prediction models from GLMNET")
parser.add_argument("--features_weights", nargs="+")
parser.add_argument("-features")
parser.add_argument("-features_annotation")
parser.add_argument("-data")
parser.add_argument("-data_annotation")
parser.add_argument("-window", type = int)
parser.add_argument("--run_tag")
parser.add_argument("--output_rsids", action="store_true")
parser.add_argument("--chromosome", type = int)
parser.add_argument("--sub_batches", type = int)
parser.add_argument("--sub_batch", type =int)
parser.add_argument("--rsid_whitelist")
parser.add_argument("--MAX_M", type=int)
parser.add_argument("--mode", default="elastic_net", help="'elastic_net' or 'ols'")
parser.add_argument("--gene_whitelist", nargs="+", default=None)
parser.add_argument("--dont_prune", action="store_true")
parser.add_argument("-output_prefix")
parser.add_argument("-parsimony", default=10, type=int)
parser.add_argument("--repeat", default=None, type=int)
parser.add_argument("--nested_cv_folds", default=5, type=int)
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
initialize()
run(args) | features_metadata = pq.ParquetFile(args.features_annotation).read_row_group(args.chromosome-1).to_pandas() | conditional_block |
model_training.py | __author__ = "alvaro barbeira"
import sys
import os
import logging
import gzip
import collections
import numpy
import pandas
import scipy
import math
import statsmodels.formula.api as smf
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
import pyarrow.parquet as pq
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.data_management import TextFileTools
from genomic_tools_lib.individual_data import Utilities as StudyUtilities
from genomic_tools_lib.file_formats import Parquet, Miscellaneous
from genomic_tools_lib.miscellaneous import matrices, Genomics, Math
###############################################################################
import rpy2.robjects as robjects
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from rpy2.robjects import pandas2ri
pandas2ri.activate()
def initialize():
global train_elastic_net
global set_seed
path = os.path.split(sys.argv[0])[0]
path = os.path.join(path, "elastic_net.R")
robjects.r['source'](path)
train_elastic_net = robjects.r['train_elastic_net']
set_seed = robjects.r['set_seed']
###############################################################################
def _save(d_, features_, features_data_, gene):
pandas.DataFrame({gene:d_[gene]}).to_csv("y.txt", index=False, sep="\t")
pandas.DataFrame(collections.OrderedDict([(v,features_data_[v]) for v in features_.id.values])).to_csv("x.txt", index=False, sep="\t")
def get_weights(x_weights, id_whitelist):
if x_weights[1] == "PIP":
w = Miscellaneous.dapg_signals(x_weights[0], float(x_weights[2]), id_whitelist)
w = w.rename(columns={"gene":"gene_id", "pip":"w", "variant_id":"id"})
w.w = 1 - w.w #Less penalty to the more probable snps
else:
raise RuntimeError("unsupported weights argument")
return w
###############################################################################
def train_elastic_net_wrapper(features_data_, features_, d_, data_annotation_, x_w=None, prune=True, nested_folds=10):
x = numpy.array([features_data_[v] for v in features_.id.values])
dimnames = robjects.ListVector(
[(1, robjects.StrVector(d_["individual"])), (2, robjects.StrVector(features_.id.values))])
x = robjects.r["matrix"](robjects.FloatVector(x.flatten()), ncol=features_.shape[0], dimnames=dimnames)
y = robjects.FloatVector(d_[data_annotation_.gene_id])
nested_folds = robjects.FloatVector([nested_folds])
#py2ri chokes on None.
if x_w is None:
res = train_elastic_net(y, x, n_train_test_folds=nested_folds)
else:
res = train_elastic_net(y, x, penalty_factor=x_w, n_train_test_folds=nested_folds) # observation weights, not explanatory variable weight :( , x_weight = x_w)
return pandas2ri.ri2py(res[0]), pandas2ri.ri2py(res[1])
###############################################################################
def ols_pred_perf(data, n_folds=10):
kf = KFold(n_splits=n_folds, shuffle=True)
rho_f=[]
R2_f=[]
zscore_f=[]
for train_index, test_index in kf.split(data):
train_ = data.iloc[train_index]
fold_= smf.ols('y ~ {}'.format(" + ".join([x for x in train_.columns if x !="y"])), data=train_).fit()
test_ = data.iloc[test_index]
y_predicted = fold_.predict(test_)
if numpy.std(y_predicted) != 0:
score = r2_score(test_.y, y_predicted)
rho = numpy.corrcoef(test_.y, y_predicted)[0,1]
zscore = numpy.arctanh(rho)*numpy.sqrt(len(y_predicted) - 3)
else:
score = 0
rho = 0
zscore = 0
R2_f.append(score)
rho_f.append(rho)
zscore_f.append(zscore)
rho_avg = numpy.average(rho_f)
zscore_est = numpy.sum(zscore_f)/numpy.sqrt(n_folds)
zscore_pval = scipy.stats.norm.sf(zscore_est)
d = {"test_R2_avg": [numpy.average(R2_f)], "test_R2_sd": [numpy.std(R2_f)],
"rho_avg": [rho_avg], "rho_avg_squared": [rho_avg**2], "rho_se":[numpy.std(rho_f)],
"rho_zscore":[zscore_est], "zscore_pval": [zscore_pval], "nested_cv_fisher_pval":[None], "nested_cv_converged":n_folds}
return pandas.DataFrame(d)
def prune(data):
if data.shape[1] == 1:
return data
cor = numpy.corrcoef(data.values.T)
discard=set()
for i in range(0, cor.shape[0]):
for j in range(i, cor.shape[1]):
if i==j:
continue
if i in discard:
continue
if math.abs(cor[i][j]) >= 0.95:
discard.add(j)
discard = data.columns[list(discard)].values
return data.drop(discard, axis=1)
def train_ols(features_data_, features_, d_, data_annotation_, x_w=None, prune=True, nested_folds=10):
ids=[]
data = {}
for v in features_.id.values:
x = Math.standardize(features_data_[v])
if x is not None:
data[v] = x
ids.append(v)
data = pandas.DataFrame(data)
if prune:
data = prune(data)
ids = data.columns.values
if len(ids) == 0:
w = pandas.DataFrame({"feature":[], "weight":[]})
s = pandas.DataFrame({"test_R2_avg": [], "test_R2_sd": [],
"rho_avg": [], "rho_avg_squared": [], "rho_se":[],
"rho_zscore":[], "zscore_pval": [], "nested_cv_fisher_pval":[],
"alpha":[], "n_snps_in_window":[], "cv_R2_avg":[], "cv_R2_sd":[], "in_sample_R2":[], "n.snps.in.model":[]})
return w,s
data["y"] = Math.standardize(d_[data_annotation_.gene_id])
results = smf.ols('y ~ {}'.format(" + ".join(ids)), data=data).fit()
weights = results.params[1:].to_frame().reset_index().rename(columns={"index": "feature", 0: "weight"})
summary = ols_pred_perf(data, nested_folds)
summary = summary.assign(alpha=None, n_snps_in_window=features_.shape[0],
cv_R2_avg=None, cv_R2_sd=None, in_sample_R2=None)
summary["n.snps.in.model"] = len(ids)
return weights, summary
########################################################################################################################
def process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, summary_fields, train, postfix=None, nested_folds=10):
|
########################################################################################################################
def run(args):
wp = args.output_prefix + "_weights.txt.gz"
if os.path.exists(wp):
logging.info("Weights output exists already, delete it or move it")
return
sp = args.output_prefix + "_summary.txt.gz"
if os.path.exists(sp):
logging.info("Summary output exists already, delete it or move it")
return
cp = args.output_prefix + "_covariance.txt.gz"
if os.path.exists(wp):
logging.info("covariance output exists already, delete it or move it")
return
r = args.output_prefix + "_run.txt.gz"
if os.path.exists(wp):
logging.info("run output exists already, delete it or move it")
return
logging.info("Starting")
Utilities.ensure_requisite_folders(args.output_prefix)
logging.info("Opening data")
data = pq.ParquetFile(args.data)
available_data = {x for x in data.metadata.schema.names}
logging.info("Loading data annotation")
data_annotation = StudyUtilities.load_gene_annotation(args.data_annotation, args.chromosome, args.sub_batches, args.sub_batch)
data_annotation = data_annotation[data_annotation.gene_id.isin(available_data)]
if args.gene_whitelist:
logging.info("Applying gene whitelist")
data_annotation = data_annotation[data_annotation.gene_id.isin(set(args.gene_whitelist))]
logging.info("Kept %i entries", data_annotation.shape[0])
logging.info("Opening features annotation")
if not args.chromosome:
features_metadata = pq.read_table(args.features_annotation).to_pandas()
else:
features_metadata = pq.ParquetFile(args.features_annotation).read_row_group(args.chromosome-1).to_pandas()
if args.chromosome and args.sub_batches:
logging.info("Trimming variants")
features_metadata = StudyUtilities.trim_variant_metadata_on_gene_annotation(features_metadata, data_annotation, args.window)
if args.rsid_whitelist:
logging.info("Filtering features annotation")
whitelist = TextFileTools.load_list(args.rsid_whitelist)
whitelist = set(whitelist)
features_metadata = features_metadata[features_metadata.rsid.isin(whitelist)]
if args.features_weights:
logging.info("Loading weights")
x_weights = get_weights(args.features_weights, {x for x in features_metadata.id})
logging.info("Filtering features metadata to those available in weights")
features_metadata = features_metadata[features_metadata.id.isin(x_weights.id)]
logging.info("Kept %d entries", features_metadata.shape[0])
else:
x_weights = None
logging.info("Opening features")
features = pq.ParquetFile(args.features)
logging.info("Setting R seed")
s = numpy.random.randint(1e8)
set_seed(s)
if args.run_tag:
d = pandas.DataFrame({"run":[args.run_tag], "cv_seed":[s]})[["run", "cv_seed"]]
Utilities.save_dataframe(d, r)
WEIGHTS_FIELDS=["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]
SUMMARY_FIELDS=["gene", "genename", "gene_type", "alpha", "n_snps_in_window", "n.snps.in.model",
"test_R2_avg", "test_R2_sd", "cv_R2_avg", "cv_R2_sd", "in_sample_R2", "nested_cv_fisher_pval",
"nested_cv_converged", "rho_avg", "rho_se", "rho_zscore", "pred.perf.R2", "pred.perf.pval", "pred.perf.qval"]
train = train_elastic_net_wrapper if args.mode == "elastic_net" else train_ols
with gzip.open(wp, "w") as w:
w.write(("\t".join(WEIGHTS_FIELDS) + "\n").encode())
with gzip.open(sp, "w") as s:
s.write(("\t".join(SUMMARY_FIELDS) + "\n").encode())
with gzip.open(cp, "w") as c:
c.write("GENE RSID1 RSID2 VALUE\n".encode())
for i,data_annotation_ in enumerate(data_annotation.itertuples()):
if args.MAX_M and i>=args.MAX_M:
logging.info("Early abort")
break
logging.log(9, "processing %i/%i:%s", i+1, data_annotation.shape[0], data_annotation_.gene_id)
if args.repeat:
for j in range(0, args.repeat):
logging.log(9, "%i-th reiteration", j)
process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, SUMMARY_FIELDS, train, j, args.nested_cv_folds)
else:
process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, SUMMARY_FIELDS, train, nested_folds=args.nested_cv_folds)
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Train Elastic Net prediction models from GLMNET")
parser.add_argument("--features_weights", nargs="+")
parser.add_argument("-features")
parser.add_argument("-features_annotation")
parser.add_argument("-data")
parser.add_argument("-data_annotation")
parser.add_argument("-window", type = int)
parser.add_argument("--run_tag")
parser.add_argument("--output_rsids", action="store_true")
parser.add_argument("--chromosome", type = int)
parser.add_argument("--sub_batches", type = int)
parser.add_argument("--sub_batch", type =int)
parser.add_argument("--rsid_whitelist")
parser.add_argument("--MAX_M", type=int)
parser.add_argument("--mode", default="elastic_net", help="'elastic_net' or 'ols'")
parser.add_argument("--gene_whitelist", nargs="+", default=None)
parser.add_argument("--dont_prune", action="store_true")
parser.add_argument("-output_prefix")
parser.add_argument("-parsimony", default=10, type=int)
parser.add_argument("--repeat", default=None, type=int)
parser.add_argument("--nested_cv_folds", default=5, type=int)
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
initialize()
run(args) | gene_id_ = data_annotation_.gene_id if postfix is None else "{}-{}".format(data_annotation_.gene_id, postfix)
logging.log(8, "loading data")
d_ = Parquet._read(data, [data_annotation_.gene_id])
features_ = Genomics.entries_for_gene_annotation(data_annotation_, args.window, features_metadata)
if x_weights is not None:
x_w = features_[["id"]].merge(x_weights[x_weights.gene_id == data_annotation_.gene_id], on="id")
features_ = features_[features_.id.isin(x_w.id)]
x_w = robjects.FloatVector(x_w.w.values)
else:
x_w = None
if features_.shape[0] == 0:
logging.log(9, "No features available")
return
features_data_ = Parquet._read(features, [x for x in features_.id.values],
specific_individuals=[x for x in d_["individual"]])
logging.log(8, "training")
weights, summary = train(features_data_, features_, d_, data_annotation_, x_w, not args.dont_prune, nested_folds)
if weights.shape[0] == 0:
logging.log(9, "no weights, skipping")
return
logging.log(8, "saving")
weights = weights.assign(gene=data_annotation_.gene_id). \
merge(features_.rename(columns={"id": "feature", "allele_0": "ref_allele", "allele_1": "eff_allele"}), on="feature"). \
rename(columns={"feature": "varID"}). \
assign(gene=gene_id_)
weights = weights[["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]]
if args.output_rsids:
weights.loc[weights.rsid == "NA", "rsid"] = weights.loc[weights.rsid == "NA", "varID"]
w.write(weights.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())
summary = summary. \
assign(gene=gene_id_, genename=data_annotation_.gene_name,
gene_type=data_annotation_.gene_type). \
rename(columns={"n_features": "n_snps_in_window", "n_features_in_model": "n.snps.in.model",
"zscore_pval": "pred.perf.pval", "rho_avg_squared": "pred.perf.R2",
"cv_converged":"nested_cv_converged"})
summary["pred.perf.qval"] = None
summary = summary[summary_fields]
s.write(summary.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())
var_ids = [x for x in weights.varID.values]
cov = numpy.cov([features_data_[k] for k in var_ids], ddof=1)
ids = [x for x in weights.rsid.values] if args.output_rsids else var_ids
cov = matrices._flatten_matrix_data([(gene_id_, ids, cov)])
for cov_ in cov:
l = "{} {} {} {}\n".format(cov_[0], cov_[1], cov_[2], cov_[3]).encode()
c.write(l) | identifier_body |
model_training.py | __author__ = "alvaro barbeira"
import sys
import os
import logging
import gzip
import collections
import numpy
import pandas
import scipy
import math
import statsmodels.formula.api as smf
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
import pyarrow.parquet as pq
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.data_management import TextFileTools
from genomic_tools_lib.individual_data import Utilities as StudyUtilities
from genomic_tools_lib.file_formats import Parquet, Miscellaneous
from genomic_tools_lib.miscellaneous import matrices, Genomics, Math
###############################################################################
import rpy2.robjects as robjects
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from rpy2.robjects import pandas2ri
pandas2ri.activate()
def initialize():
global train_elastic_net
global set_seed
path = os.path.split(sys.argv[0])[0]
path = os.path.join(path, "elastic_net.R")
robjects.r['source'](path)
train_elastic_net = robjects.r['train_elastic_net']
set_seed = robjects.r['set_seed']
###############################################################################
def _save(d_, features_, features_data_, gene):
pandas.DataFrame({gene:d_[gene]}).to_csv("y.txt", index=False, sep="\t")
pandas.DataFrame(collections.OrderedDict([(v,features_data_[v]) for v in features_.id.values])).to_csv("x.txt", index=False, sep="\t")
def get_weights(x_weights, id_whitelist):
if x_weights[1] == "PIP":
w = Miscellaneous.dapg_signals(x_weights[0], float(x_weights[2]), id_whitelist)
w = w.rename(columns={"gene":"gene_id", "pip":"w", "variant_id":"id"})
w.w = 1 - w.w #Less penalty to the more probable snps
else:
raise RuntimeError("unsupported weights argument")
return w
###############################################################################
def train_elastic_net_wrapper(features_data_, features_, d_, data_annotation_, x_w=None, prune=True, nested_folds=10):
x = numpy.array([features_data_[v] for v in features_.id.values])
dimnames = robjects.ListVector(
[(1, robjects.StrVector(d_["individual"])), (2, robjects.StrVector(features_.id.values))])
x = robjects.r["matrix"](robjects.FloatVector(x.flatten()), ncol=features_.shape[0], dimnames=dimnames)
y = robjects.FloatVector(d_[data_annotation_.gene_id])
nested_folds = robjects.FloatVector([nested_folds])
#py2ri chokes on None.
if x_w is None:
res = train_elastic_net(y, x, n_train_test_folds=nested_folds)
else:
res = train_elastic_net(y, x, penalty_factor=x_w, n_train_test_folds=nested_folds) # observation weights, not explanatory variable weight :( , x_weight = x_w)
return pandas2ri.ri2py(res[0]), pandas2ri.ri2py(res[1])
###############################################################################
def ols_pred_perf(data, n_folds=10):
kf = KFold(n_splits=n_folds, shuffle=True)
rho_f=[]
R2_f=[]
zscore_f=[]
for train_index, test_index in kf.split(data):
train_ = data.iloc[train_index]
fold_= smf.ols('y ~ {}'.format(" + ".join([x for x in train_.columns if x !="y"])), data=train_).fit()
test_ = data.iloc[test_index]
y_predicted = fold_.predict(test_)
if numpy.std(y_predicted) != 0:
score = r2_score(test_.y, y_predicted)
rho = numpy.corrcoef(test_.y, y_predicted)[0,1]
zscore = numpy.arctanh(rho)*numpy.sqrt(len(y_predicted) - 3)
else:
score = 0
rho = 0
zscore = 0
R2_f.append(score)
rho_f.append(rho)
zscore_f.append(zscore)
rho_avg = numpy.average(rho_f)
zscore_est = numpy.sum(zscore_f)/numpy.sqrt(n_folds)
zscore_pval = scipy.stats.norm.sf(zscore_est)
d = {"test_R2_avg": [numpy.average(R2_f)], "test_R2_sd": [numpy.std(R2_f)],
"rho_avg": [rho_avg], "rho_avg_squared": [rho_avg**2], "rho_se":[numpy.std(rho_f)],
"rho_zscore":[zscore_est], "zscore_pval": [zscore_pval], "nested_cv_fisher_pval":[None], "nested_cv_converged":n_folds}
return pandas.DataFrame(d)
def prune(data):
if data.shape[1] == 1:
return data
cor = numpy.corrcoef(data.values.T)
discard=set()
for i in range(0, cor.shape[0]):
for j in range(i, cor.shape[1]):
if i==j:
continue
if i in discard:
continue
if math.abs(cor[i][j]) >= 0.95:
discard.add(j)
discard = data.columns[list(discard)].values
return data.drop(discard, axis=1)
def train_ols(features_data_, features_, d_, data_annotation_, x_w=None, prune=True, nested_folds=10):
ids=[]
data = {}
for v in features_.id.values:
x = Math.standardize(features_data_[v])
if x is not None:
data[v] = x
ids.append(v)
data = pandas.DataFrame(data)
if prune:
data = prune(data)
ids = data.columns.values
if len(ids) == 0:
w = pandas.DataFrame({"feature":[], "weight":[]})
s = pandas.DataFrame({"test_R2_avg": [], "test_R2_sd": [],
"rho_avg": [], "rho_avg_squared": [], "rho_se":[],
"rho_zscore":[], "zscore_pval": [], "nested_cv_fisher_pval":[],
"alpha":[], "n_snps_in_window":[], "cv_R2_avg":[], "cv_R2_sd":[], "in_sample_R2":[], "n.snps.in.model":[]})
return w,s
data["y"] = Math.standardize(d_[data_annotation_.gene_id])
results = smf.ols('y ~ {}'.format(" + ".join(ids)), data=data).fit()
weights = results.params[1:].to_frame().reset_index().rename(columns={"index": "feature", 0: "weight"})
summary = ols_pred_perf(data, nested_folds)
summary = summary.assign(alpha=None, n_snps_in_window=features_.shape[0],
cv_R2_avg=None, cv_R2_sd=None, in_sample_R2=None)
summary["n.snps.in.model"] = len(ids)
return weights, summary
########################################################################################################################
def process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, summary_fields, train, postfix=None, nested_folds=10):
gene_id_ = data_annotation_.gene_id if postfix is None else "{}-{}".format(data_annotation_.gene_id, postfix)
logging.log(8, "loading data")
d_ = Parquet._read(data, [data_annotation_.gene_id])
features_ = Genomics.entries_for_gene_annotation(data_annotation_, args.window, features_metadata)
if x_weights is not None:
x_w = features_[["id"]].merge(x_weights[x_weights.gene_id == data_annotation_.gene_id], on="id")
features_ = features_[features_.id.isin(x_w.id)]
x_w = robjects.FloatVector(x_w.w.values)
else:
x_w = None
if features_.shape[0] == 0:
logging.log(9, "No features available")
return
features_data_ = Parquet._read(features, [x for x in features_.id.values],
specific_individuals=[x for x in d_["individual"]])
logging.log(8, "training")
weights, summary = train(features_data_, features_, d_, data_annotation_, x_w, not args.dont_prune, nested_folds)
if weights.shape[0] == 0:
logging.log(9, "no weights, skipping")
return
logging.log(8, "saving")
weights = weights.assign(gene=data_annotation_.gene_id). \
merge(features_.rename(columns={"id": "feature", "allele_0": "ref_allele", "allele_1": "eff_allele"}), on="feature"). \
rename(columns={"feature": "varID"}). \
assign(gene=gene_id_)
weights = weights[["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]]
if args.output_rsids:
weights.loc[weights.rsid == "NA", "rsid"] = weights.loc[weights.rsid == "NA", "varID"]
w.write(weights.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())
summary = summary. \
assign(gene=gene_id_, genename=data_annotation_.gene_name,
gene_type=data_annotation_.gene_type). \
rename(columns={"n_features": "n_snps_in_window", "n_features_in_model": "n.snps.in.model",
"zscore_pval": "pred.perf.pval", "rho_avg_squared": "pred.perf.R2",
"cv_converged":"nested_cv_converged"})
summary["pred.perf.qval"] = None
summary = summary[summary_fields]
s.write(summary.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())
var_ids = [x for x in weights.varID.values]
cov = numpy.cov([features_data_[k] for k in var_ids], ddof=1)
ids = [x for x in weights.rsid.values] if args.output_rsids else var_ids
cov = matrices._flatten_matrix_data([(gene_id_, ids, cov)])
for cov_ in cov:
l = "{} {} {} {}\n".format(cov_[0], cov_[1], cov_[2], cov_[3]).encode()
c.write(l)
########################################################################################################################
def run(args):
wp = args.output_prefix + "_weights.txt.gz"
if os.path.exists(wp):
logging.info("Weights output exists already, delete it or move it")
return
sp = args.output_prefix + "_summary.txt.gz"
if os.path.exists(sp):
logging.info("Summary output exists already, delete it or move it")
return
cp = args.output_prefix + "_covariance.txt.gz"
if os.path.exists(wp):
logging.info("covariance output exists already, delete it or move it")
return
r = args.output_prefix + "_run.txt.gz"
if os.path.exists(wp):
logging.info("run output exists already, delete it or move it")
return
logging.info("Starting")
Utilities.ensure_requisite_folders(args.output_prefix)
logging.info("Opening data")
data = pq.ParquetFile(args.data) | available_data = {x for x in data.metadata.schema.names}
logging.info("Loading data annotation")
data_annotation = StudyUtilities.load_gene_annotation(args.data_annotation, args.chromosome, args.sub_batches, args.sub_batch)
data_annotation = data_annotation[data_annotation.gene_id.isin(available_data)]
if args.gene_whitelist:
logging.info("Applying gene whitelist")
data_annotation = data_annotation[data_annotation.gene_id.isin(set(args.gene_whitelist))]
logging.info("Kept %i entries", data_annotation.shape[0])
logging.info("Opening features annotation")
if not args.chromosome:
features_metadata = pq.read_table(args.features_annotation).to_pandas()
else:
features_metadata = pq.ParquetFile(args.features_annotation).read_row_group(args.chromosome-1).to_pandas()
if args.chromosome and args.sub_batches:
logging.info("Trimming variants")
features_metadata = StudyUtilities.trim_variant_metadata_on_gene_annotation(features_metadata, data_annotation, args.window)
if args.rsid_whitelist:
logging.info("Filtering features annotation")
whitelist = TextFileTools.load_list(args.rsid_whitelist)
whitelist = set(whitelist)
features_metadata = features_metadata[features_metadata.rsid.isin(whitelist)]
if args.features_weights:
logging.info("Loading weights")
x_weights = get_weights(args.features_weights, {x for x in features_metadata.id})
logging.info("Filtering features metadata to those available in weights")
features_metadata = features_metadata[features_metadata.id.isin(x_weights.id)]
logging.info("Kept %d entries", features_metadata.shape[0])
else:
x_weights = None
logging.info("Opening features")
features = pq.ParquetFile(args.features)
logging.info("Setting R seed")
s = numpy.random.randint(1e8)
set_seed(s)
if args.run_tag:
d = pandas.DataFrame({"run":[args.run_tag], "cv_seed":[s]})[["run", "cv_seed"]]
Utilities.save_dataframe(d, r)
WEIGHTS_FIELDS=["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]
SUMMARY_FIELDS=["gene", "genename", "gene_type", "alpha", "n_snps_in_window", "n.snps.in.model",
"test_R2_avg", "test_R2_sd", "cv_R2_avg", "cv_R2_sd", "in_sample_R2", "nested_cv_fisher_pval",
"nested_cv_converged", "rho_avg", "rho_se", "rho_zscore", "pred.perf.R2", "pred.perf.pval", "pred.perf.qval"]
train = train_elastic_net_wrapper if args.mode == "elastic_net" else train_ols
with gzip.open(wp, "w") as w:
w.write(("\t".join(WEIGHTS_FIELDS) + "\n").encode())
with gzip.open(sp, "w") as s:
s.write(("\t".join(SUMMARY_FIELDS) + "\n").encode())
with gzip.open(cp, "w") as c:
c.write("GENE RSID1 RSID2 VALUE\n".encode())
for i,data_annotation_ in enumerate(data_annotation.itertuples()):
if args.MAX_M and i>=args.MAX_M:
logging.info("Early abort")
break
logging.log(9, "processing %i/%i:%s", i+1, data_annotation.shape[0], data_annotation_.gene_id)
if args.repeat:
for j in range(0, args.repeat):
logging.log(9, "%i-th reiteration", j)
process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, SUMMARY_FIELDS, train, j, args.nested_cv_folds)
else:
process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, SUMMARY_FIELDS, train, nested_folds=args.nested_cv_folds)
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Train Elastic Net prediction models from GLMNET")
parser.add_argument("--features_weights", nargs="+")
parser.add_argument("-features")
parser.add_argument("-features_annotation")
parser.add_argument("-data")
parser.add_argument("-data_annotation")
parser.add_argument("-window", type = int)
parser.add_argument("--run_tag")
parser.add_argument("--output_rsids", action="store_true")
parser.add_argument("--chromosome", type = int)
parser.add_argument("--sub_batches", type = int)
parser.add_argument("--sub_batch", type =int)
parser.add_argument("--rsid_whitelist")
parser.add_argument("--MAX_M", type=int)
parser.add_argument("--mode", default="elastic_net", help="'elastic_net' or 'ols'")
parser.add_argument("--gene_whitelist", nargs="+", default=None)
parser.add_argument("--dont_prune", action="store_true")
parser.add_argument("-output_prefix")
parser.add_argument("-parsimony", default=10, type=int)
parser.add_argument("--repeat", default=None, type=int)
parser.add_argument("--nested_cv_folds", default=5, type=int)
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
initialize()
run(args) | random_line_split | |
sevencow.py | # -*- coding: utf-8 -*-
import os
import hmac
import time
import json
import mimetypes
import functools
from urlparse import urlparse
from urllib import urlencode
from base64 import urlsafe_b64encode
from hashlib import sha1
import requests
version_info = (0, 1, 2)
VERSION = __version__ = '.'.join( map(str, version_info) )
"""
Usage:
cow = Cow(ACCESS_KEY, SECRET_KEY)
b = cow.get_bucket(BUCKET)
b.put('a')
b.put('a', 'b')
b.put('a', names={'a': 'x'})
b.put('a', 'b', names={'a': 'x', 'b': 'y'})
b.stat('a')
b.stat('a', 'b')
b.delete('a')
b.delete('a', 'b')
b.copy('a', 'c')
b.copy(('a', 'c'), ('b', 'd'))
b.move('a', 'c')
b.move(('a', 'c'), ('b', 'd'))
"""
RS_HOST = 'http://rs.qbox.me'
UP_HOST = 'http://up.qbox.me'
RSF_HOST = 'http://rsf.qbox.me'
class CowException(Exception):
def __init__(self, url, status_code, reason, content):
self.url = url
self.status_code = status_code
self.reason = reason
self.content = content
Exception.__init__(self, '%s, %s' % (reason, content))
def signing(secret_key, data):
return urlsafe_b64encode(
hmac.new(secret_key, data, sha1).digest()
)
def requests_error_handler(func):
@functools.wraps(func)
def deco(*args, **kwargs):
try:
return func(*args, **kwargs)
except AssertionError as e:
req = e.args[0]
raise CowException(
req.url, req.status_code, req.reason, req.content
)
return deco
def expected_argument_type(pos, types):
def deco(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
if not isinstance(args[pos], types):
raise TypeError(
"{0} Type error, Expected {1}".format(args[pos], types)
)
return func(*args, **kwargs)
return wrap
return deco
class UploadToken(object):
def __init__(self, access_key, secret_key, scope, ttl=3600):
self.access_key = access_key
self.secret_key = secret_key
self.scope = scope
self.ttl = ttl
self._token = None
self.generated = int(time.time())
@property
def token(self):
if int(time.time()) - self.generated > self.ttl - 60:
# 还有一分钟也认为过期了, make new token
self._token = self._make_token()
if not self._token:
self._token = self._make_token()
return self._token
def _make_token(self):
self.generated = int(time.time())
info = {
'scope': self.scope,
'deadline': self.generated + self.ttl
}
info = urlsafe_b64encode(json.dumps(info))
token = signing(self.secret_key, info)
return '%s:%s:%s' % (self.access_key, token, info)
class Cow(object):
def __init__(self, access_key, secret_key):
self.access_key = access_key
self.secret_key = secret_key
self.upload_tokens = {}
self.stat = functools.partial(self._stat_rm_handler, 'stat')
self.delete = functools.partial(self._stat_rm_handler, 'delete')
self.copy = functools.partial(self._cp_mv_handler, 'copy')
self.move = functools.partial(self._cp_mv_handler, 'move')
def get_bucket(self, bucket):
"""对一个bucket的文件进行操作,
推荐使用此方法得到一个bucket对象,
然后对此bucket的操作就只用传递文件名即可
"""
return Bucket(self, bucket)
def generate_access_token(self, url, params=None):
uri = urlparse(url)
token = uri.path
if uri.query:
token = '%s?%s' % (token, uri.query)
token = '%s\n' % token
if params:
if isinstance(params, basestring):
token += params
else:
token += urlencode(params)
return '%s:%s' % (self.access_key, signing(self.secret_key, token))
def build_requests_headers(self, token):
return {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'QBox %s' % token
}
@requests_error_handler
def api_call(self, url, params=None):
token = self.generate_access_token(url, params=params)
if params:
res = requests.post(url, data=params, headers=self.build_requests_headers(token))
else:
| sert res.status_code == 200, res
return res.json() if res.text else ''
def list_buckets(self):
"""列出所有的buckets"""
url = '%s/buckets' % RS_HOST
return self.api_call(url)
def create_bucket(self, name):
"""不建议使用API建立bucket
测试发现API建立的bucket默认无法设置<bucket_name>.qiniudn.com的二级域名
请直接到web界面建立
"""
url = '%s/mkbucket/%s' % (RS_HOST, name)
return self.api_call(url)
def drop_bucket(self, bucket):
"""删除整个bucket"""
url = '%s/drop/%s' % (RS_HOST, bucket)
return self.api_call(url)
def list_files(self, bucket, marker=None, limit=None, prefix=None):
"""列出bucket中的文件"""
query = ['bucket=%s' % bucket]
if marker:
query.append('marker=%s' % marker)
if limit:
query.append('limit=%s' % limit)
if prefix:
query.append('prefix=%s' % prefix)
url = '%s/list?%s' % (RSF_HOST, '&'.join(query))
return self.api_call(url)
def generate_upload_token(self, scope, ttl=3600):
"""上传文件的uploadToken"""
if scope not in self.upload_tokens:
self.upload_tokens[scope] = UploadToken(self.access_key, self.secret_key, scope, ttl=ttl)
return self.upload_tokens[scope].token
@requests_error_handler
@expected_argument_type(2, (basestring, list, tuple))
def put(self, scope, filename, names=None):
"""上传文件
filename 如果是字符串,表示上传单个文件,
如果是list或者tuple,表示上传多个文件
names 是dict,key为filename, value为上传后的名字
如果不设置,默认为文件名
"""
url = '%s/upload' % UP_HOST
token = self.generate_upload_token(scope)
names = names or {}
def _uploaded_name(filename):
return names.get(filename, None) or os.path.basename(filename)
def _put(filename):
files = {
'file': (filename, open(filename, 'rb')),
}
action = '/rs-put/%s' % urlsafe_b64encode(
'%s:%s' % (scope, _uploaded_name(filename))
)
_type, _encoding = mimetypes.guess_type(filename)
if _type:
action += '/mimeType/%s' % urlsafe_b64encode(_type)
data = {
'auth': token,
'action': action,
}
res = requests.post(url, files=files, data=data)
assert res.status_code == 200, res
return res.json()
if isinstance(filename, basestring):
# 单个文件
return _put(filename)
# 多文件
return [_put(f) for f in filename]
@expected_argument_type(2, (list, tuple))
def _cp_mv_handler(self, action, args):
"""copy move方法
action: 'copy' or 'move'
args: [src_bucket, src_filename, des_bucket, des_filename]
or [(src_bucket, src_filename, des_bucket, des_filename), (), ...]
args 第一种形式就是对一个文件进行操作,第二种形式是多个文件批量操作
用户不用直接调用这个方法
"""
if isinstance(args[0], basestring):
return self._cp_mv_single(action, args)
if isinstance(args[0], (list, tuple)):
return self._cp_mv_batch(action, args)
@expected_argument_type(3, (basestring, list, tuple))
def _stat_rm_handler(self, action, bucket, filename):
"""stat delete方法
action: 'stat' or 'delete'
bucket: 哪个bucket
filenmae: 'aabb' or ['aabb', 'ccdd', ...]
filename 第一种形式就是对一个文件进行操作,第二种形式是多个文件批量操作
用户不用直接调用这个方法
"""
if isinstance(filename, basestring):
return self._stat_rm_single(action, bucket, filename)
if isinstance(filename, (list, tuple)):
return self._stat_rm_batch(action, bucket, filename)
def _cp_mv_single(self, action, args):
src_bucket, src_filename, des_bucket, des_filename = args
url = '%s/%s/%s/%s' % (
RS_HOST,
action,
urlsafe_b64encode('%s:%s' % (src_bucket, src_filename)),
urlsafe_b64encode('%s:%s' % (des_bucket, des_filename)),
)
return self.api_call(url)
def _cp_mv_batch(self, action, args):
url = '%s/batch' % RS_HOST
def _one_param(arg):
return 'op=/%s/%s/%s' % (
action,
urlsafe_b64encode('%s:%s' % (arg[0], arg[1])),
urlsafe_b64encode('%s:%s' % (arg[2], arg[3])),
)
param = '&'.join( map(_one_param, args) )
return self.api_call(url, param)
def _stat_rm_single(self, action, bucket, filename):
url = '%s/%s/%s' % (
RS_HOST, action, urlsafe_b64encode('%s:%s' % (bucket, filename))
)
return self.api_call(url)
def _stat_rm_batch(self, action, bucket, filenames):
url = '%s/batch' % RS_HOST
param = [
'op=/%s/%s' % (
action, urlsafe_b64encode('%s:%s' % (bucket, f))
) for f in filenames
]
param = '&'.join(param)
return self.api_call(url, param)
def transform_argument(func):
@functools.wraps(func)
def deco(self, *args, **kwargs):
filename = args[0] if len(args) == 1 else args
return func(self, filename, **kwargs)
return deco
class Bucket(object):
def __init__(self, cow, bucket):
self.cow = cow
self.bucket = bucket
@transform_argument
def put(self, *args, **kwargs):
names = kwargs.get('names', None)
if names and not isinstance(names, dict):
raise TypeError(
"names Type error, Expected dict, But got Type of {0}".format(type(names))
)
return self.cow.put(self.bucket, args[0], names=names)
@transform_argument
def stat(self, *args):
return self.cow.stat(self.bucket, args[0])
@transform_argument
def delete(self, *args):
return self.cow.delete(self.bucket, args[0])
@transform_argument
def copy(self, *args):
return self.cow.copy(self._build_cp_mv_args(args[0]))
@transform_argument
def move(self, *args):
return self.cow.move(self._build_cp_mv_args(args[0]))
def list_files(self, marker=None, limit=None, prefix=None):
return self.cow.list_files(self.bucket, marker=marker, limit=limit, prefix=prefix)
def _build_cp_mv_args(self, filename):
if isinstance(filename[0], basestring):
args = [self.bucket, filename[0], self.bucket, filename[1]]
else:
args = []
for src, des in filename:
args.append( (self.bucket, src, self.bucket, des) )
return args
| res = requests.post(url, headers=self.build_requests_headers(token))
as | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.