file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
dp.rs | // 我们开始学习动态规划吧
use std::cmp::min;
// https://leetcode-cn.com/problems/maximum-subarray | let mut ans = nums[0];
for i in 1..nums.len() {
if sum > 0 {
// add positive sum means larger
sum += nums[i];
} else {
// start from new one means larger
sum = nums[i];
}
// ans always store the largest sum
ans = std::cmp::max(sum, ans);
}
ans
}
// https://leetcode-cn.com/problems/climbing-stairs/solution/
// basic dynamic programming
pub fn climb_stairs(n: i32) -> i32 {
if n == 0 || n == 1 {
return 1;
}
// f(n) = f(n-1) + f(n-2)
// iterative is harder than recursive
let mut n_1 = 1; // f(n-1)
let mut n_2 = 1; // f(n-2)
let mut ans = 0;
for _ in 1..n {
ans = n_1 + n_2;
n_1 = n_2;
n_2 = ans;
}
ans
}
// https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/solution/yi-ge-fang-fa-tuan-mie-6-dao-gu-piao-wen-ti-by-l-3/
// sell stock using state machine
// this is the solution for infinite k
pub fn max_profit_infinite(prices: Vec<i32>) -> i32 {
let mut s_keep = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty = 0;
for price in prices {
s_keep = std::cmp::max(s_keep, s_empty - price);
s_empty = std::cmp::max(s_empty, s_keep + price);
}
return s_empty;
}
// https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/solution/zhuang-tai-ji-mo-xing-dp-by-acw_wangdh15/
// 用有限状态机的方式去解题
use std::i32;
pub fn max_profit_cool(prices: Vec<i32>) -> i32 {
let n = prices.len();
let mut dp = vec![vec![i32::MIN; 3]; n+1];
// 0 可以买入的状态,买入之后转移到状态1。可以原地保持状态,或者从冷冻态转过来
// 1 可以卖出的状态,卖出之后转移到状态2。可以原地保持状态,或者从状态0转过来
// 2 冷冻期,过了一天转入状态0。可以从状态1转过来。
// 0 明天可买入,要么今天不买,要么今天是冷冻期
// 1 明天可卖出:要么今天买,要么今天不卖
// 2 明天是冷冻,那就今天卖了吧
dp[0][0] = 0;
for i in 0..n {
dp[i+1][0] = dp[i][0].max(dp[i][2]); // 来自 0 和 2 的转移
dp[i+1][1] = dp[i][1].max(dp[i][0] - prices[i]);
dp[i+1][2] = dp[i][1] + prices[i];
// println!("dp[i][0]: {}", dp[i][0]);
// println!("dp[i][1]: {}", dp[i][1]);
// println!("dp[i][2]: {}", dp[i][2]);
}
return dp[n][0].max(dp[n][2]);
}
pub fn max_profit_once(prices: Vec<i32>) -> i32 {
// suffix 0 means no trade (buy or sell) happen
// 1 means it happend
// let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty_0 = 0;
let mut s_keep_1 = std::i32::MIN;
let mut s_empty_1 = std::i32::MIN;
for price in prices {
s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price);
s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price);
}
return std::cmp::max(s_empty_1, 0);
}
pub fn max_profit_twice(prices: Vec<i32>) -> i32 {
// suffix 0 means no trade (buy or sell) happen
// 1 means it happend
// let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty_0 = 0;
let mut s_keep_1 = std::i32::MIN;
let mut s_empty_1 = std::i32::MIN;
let mut s_keep_2 = std::i32::MIN;
let mut s_empty_2 = std::i32::MIN;
for price in prices {
s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price);
s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price);
s_keep_2 = std::cmp::max(s_keep_2, s_empty_1 - price);
s_empty_2 = std::cmp::max(s_empty_2, s_keep_2 + price);
}
return std::cmp::max(s_empty_2, 0);
}
// this one works but consume too much memory
pub fn max_profit_k_memory_consume(k: i32, prices: Vec<i32>) -> i32 {
// from example above, we know the initial value is 0
// here, k become a variable, some we need a matrix to
// store different status
// how many status we have?
// empty or keep => 2
// trade times => k
// so we have 2k status
let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep
let mut s_times: Vec<[i32;2]> = Vec::new();
let k: usize = k as usize;
for i in 0..k+1 {
s_times.push(s_trade.clone());
}
s_times[0][0] = 0;
for price in prices {
for j in 0..k {
s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price);
s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price);
}
}
return std::cmp::max(0, s_times[k][0]);
}
// memory efficient version
pub fn max_profit_k(k: i32, prices: Vec<i32>) -> i32 {
// here if k in unreasonable large, switch to infinite version
let k: usize = k as usize;
if k > prices.len()/2 {
return max_profit_infinite(prices);
}
let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep
let mut s_times: Vec<[i32;2]> = Vec::new();
for i in 0..k+1 {
s_times.push(s_trade.clone());
}
s_times[0][0] = 0;
for price in prices {
for j in 0..k {
s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price);
s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price);
}
}
return std::cmp::max(0, s_times[k][0]);
}
// shortest path
// https://leetcode-cn.com/problems/minimum-path-sum/
// way: set grid value as the cost to get there
// matrix:
// 1 0 1 1 1 2
// 2 3 5 => 3 4 7
// 5 3 2 8 7 9
pub fn min_path_sum(grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
let mut cost = grid.clone();
for r in 0..row {
for c in 0..col {
if r == 0 && c == 0 {
cost[r][c] = grid[r][c];
} else if r == 0 {
cost[r][c] = grid[r][c] + cost[r][c-1];
} else if c == 0 {
cost[r][c] = grid[r][c] + cost[r-1][c];
} else {
cost[r][c] = grid[r][c] + min(cost[r-1][c], cost[r][c-1]);
}
}
}
return cost[row-1][col-1];
}
// https://leetcode-cn.com/problems/generate-parentheses/solution/
pub fn generate_parenthesis(n: i32) -> Vec<String> {
if n == 0 {
return Vec::new();
}
let mut dp = vec![Vec::<String>::new(); (n+1) as usize];
dp[0] = vec![String::from("")];
for i in 1..=n {
println!("Round {}", i);
let mut cur = vec![];
for j in 0..i {
let left = &dp[j as usize];
let right = &dp[(i-j-1) as usize];
for l in left {
for r in right {
let tmp = format!("({}){}", l, r);
println!("new string {}", tmp);
cur.push(tmp);
}
}
}
dp[i as usize] = cur;
}
let res = dp.pop().unwrap();
return res
}
// https://leetcode-cn.com/problems/unique-paths/
// 到达P[i][j]的路径数 = P[i-1][j] + P[i][j-1]
pub fn unique_paths(m: i32, n: i32) -> i32 {
if m == 1 || n == 1 {
return 1;
} else {
return unique_paths(m - 1, n) + unique_paths(m, n - 1);
}
}
pub fn unique_paths_iter(m: i32, n: i32) -> i32 {
let m: usize = m as usize;
let n: usize = n as usize;
let mut cache = vec![vec![0; n]; m];
for i in 0..m {
for j in 0..n {
if i == 0 || j == 0 {
cache[i][j] = 1;
} else {
cache[i][j] = cache[i-1][j] + cache[i][j-1];
}
}
}
return cache[m-1][n-1] as i32;
}
// https://leetcode-cn.com/problems/unique-paths-ii/solution/
pub fn unique_paths_with_obstacles2(obstacle_grid: Vec<Vec<i32>>) -> i32 {
let m = obstacle_grid.len();
let n = obstacle_grid[0].len();
let mut cache = vec![vec![0; n]; m];
for i in 0..m {
for j in 0..n {
if obstacle_grid[i][j] == 1 {
cache[i][j] = 0;
} else if i == 0 && j == 0 {
cache[i][j] = 1;
} else if i == 0 {
cache[i][j] = cache[i][j-1];
} else if j == 0 {
cache[i][j] = cache[i-1][j];
} else {
cache[i][j] = cache[i-1][j] + cache[i][j-1];
}
}
}
return cache[m-1][n-1];
}
// https://leetcode-cn.com/problems/house-robber/submissions/
pub fn rob(nums: Vec<i32>) -> i32 {
let len = nums.len();
if len == 0 {
return 0;
} else if len == 1 {
return nums[0];
} else if len == 2 {
return nums[0].max(nums[1]);
} // else len > 2
let mut m1 = nums[0];
let mut m2 = nums[1].max(m1);
for i in 2..nums.len() {
println!("m1 {} m2 {}", m1, m2);
m1 = (m1 + nums[i]).max(m2);
let temp = m2;
m2 = m1;
m1 = temp;
}
println!("m1 {} m2 {}", m1, m2);
return m2;
}
// https://leetcode-cn.com/problems/maximum-product-subarray/submissions/
pub fn max_product(nums: Vec<i32>) -> i32 {
if nums.len() == 0 { return 0; }
let (mut max, mut min) = (1, 1);
let mut res = std::i32::MIN;
let len = nums.len();
// 由于有 if 在循环里面,所以速度慢!
for n in nums {
let t_max = max;
let t_min = min;
max = (t_max * n).max(n).max(t_min * n);
min = (t_min * n).min(n).min(t_max * n);
res = res.max(max);
}
println!("{}", res);
return res;
}
// https://leetcode-cn.com/problems/gu-piao-de-zui-da-li-run-lcof/
// 由于只买卖一次,所以只需要记录最低价格就好了
pub fn max_profit(mut prices: Vec<i32>) -> i32 {
let mut profit = 0;
let mut cost = 1<<30;
for i in 0..prices.len() {
cost = cost.min(prices[i]);
profit = (prices[i] - cost).max(profit);
}
return profit;
}
// https://leetcode-cn.com/problems/word-break/
pub fn word_break(s: String, word_dict: Vec<String>) -> bool {
if word_dict.is_empty() { return false; }
let len = s.len();
let mut dp: Vec<bool> = vec![false; len+1];
dp[0] = true;
for i in 0..len {
if !dp[i] { continue; }
for w in &word_dict {
let end = i + w.len();
if end <= len && !dp[end] && &s[i..end] == w.as_str() {
dp[end] = true;
}
}
}
dp[len]
}
// https://leetcode-cn.com/problems/maximum-length-of-repeated-subarray/solution/
// 相当于填表
pub fn find_length(a: Vec<i32>, b: Vec<i32>) -> i32 {
let row = a.len();
let col = b.len();
let mut dp = vec![vec![0; col]; row];
let mut res = 0;
for i in 0..row {
for j in 0..col {
if a[i] == b[j] {
let last = if ( i == 0 || j == 0 ) { 0 } else { dp[i-1][j-1] };
dp[i][j] = last + 1;
res = res.max(dp[i][j]);
} else {
dp[i][j] = 0;
}
}
}
return res as i32;
}
// https://leetcode-cn.com/problems/unique-paths-ii/
pub fn unique_paths_with_obstacles(obstacle_grid: Vec<Vec<i32>>) -> i32 {
let row = obstacle_grid.len();
let col = obstacle_grid[0].len();
let mut dp = vec![vec![0; col]; row];
// init first row and col
for i in 0..row {
for j in 0..col {
if obstacle_grid[i][j] == 0 {
if i == 0 && j == 0 {
dp[i][j] = 1;
} else if i == 0 {
dp[i][j] = dp[i][j-1];
} else if j == 0 {
dp[i][j] = dp[i-1][j];
} else {
dp[i][j] = dp[i-1][j] + dp[i][j-1];
}
} else {
// 遇到障碍了,但一开始我们就是初始化为0的,所以这里其实可以不写
dp[i][j] = 0;
}
}
}
return dp[row-1][col-1];
}
// https://leetcode-cn.com/problems/re-space-lcci/
pub fn respace(dictionary: Vec<String>, sentence: String) -> i32 {
42
}
// https://leetcode-cn.com/problems/li-wu-de-zui-da-jie-zhi-lcof/
pub fn max_value(mut grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
for i in 0..row {
for j in 0..col {
if i == 0 && j == 0 {
// pass
} else if i == 0 {
grid[i][j] += grid[i][j-1];
} else if j == 0 {
grid[i][j] += grid[i-1][j];
} else {
grid[i][j] += grid[i-1][j].max(grid[i][j-1]);
}
}
}
return grid[row-1][col-1];
}
// https://leetcode-cn.com/problems/triangle/solution/di-gui-ji-yi-hua-dp-bi-xu-miao-dong-by-sweetiee/
pub fn minimum_total(triangle: Vec<Vec<i32>>) -> i32 {
let n = triangle.len();
let mut dp = vec![0; n+1];
for i in (0..n).rev() {
for j in 0..=i {
println!("i, j = {}, {}", i, j);
dp[j] = dp[j].min(dp[j+1]) + triangle[i][j];
}
}
return dp[0];
}
// https://leetcode-cn.com/problems/nge-tou-zi-de-dian-shu-lcof/solution/
pub fn two_sum(n: i32) -> Vec<f64> {
let mut res = vec![1./6.;6];
for i in 1..n as usize {
let mut temp = vec![0.0; 5 * i + 6];
for j in 0..res.len() {
for k in 0..6 {
temp[j+k] += res[j] * 1.0/6.0;
}
}
res = temp;
}
return res;
}
// https://leetcode-cn.com/problems/minimum-path-sum/submissions/
pub fn min_path_sum2(mut grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
for i in 1..row {
grid[i][0] += grid[i-1][0];
}
for j in 1..col {
grid[0][j] += grid[0][j-1];
}
for i in 1..row {
for j in 1..col {
grid[i][j] = grid[i][j-1].min(grid[i-1][j]) + grid[i][j];
}
}
return grid[row-1][col-1];
}
fn main()
{
// generate_parenthesis(4);
// println!("(1,1) {}", unique_paths_iter(1, 1));
// println!("(2,2) {}", unique_paths_iter(2, 2));
// println!("(3,2) {}", unique_paths_iter(3, 2));
// println!("(2,3) {}", unique_paths_iter(2, 3));
// rob([1, 3, 1, 3, 100].to_vec());
// max_product([-2,0,-1].to_vec());
// max_product([-1,-2,-9,-6].to_vec());
// max_profit([1,2,3].to_vec());
// word_break("leetcode".to_string(), ["leet".to_string(), "code".to_string()].to_vec());
// dbg!(find_length([1,2,3,2,1].to_vec(), [3,2,1,4,7].to_vec()));
// dbg!(max_profit_cool([1,2,3,0,2].to_vec()));
// let tri = [
// [2].to_vec(),
// [3,4].to_vec(),
// [6,5,7].to_vec(),
// [4,1,8,3].to_vec()
// ].to_vec();
// dbg!(minimum_total(tri));
// dbg!(two_sum(5));
min_path_sum2([
[1,3,1].to_vec(),
[1,5,1].to_vec(),
[4,2,1].to_vec(),
].to_vec());
} | // 最大子序各,好像看不出什么动态规则的意味,反而像滑动窗口
pub fn max_sub_array(nums: Vec<i32>) -> i32 {
let mut sum = nums[0]; | random_line_split |
tools.js | var reporting = require('./reporting')
module.exports = {
timing_section_c : 0,
timing_section_d : 0,
timing_section_e : 0,
calculateAverage: function(data_to_be_tested) {
//console.log(data_to_be_tested)
var sum = 0;
var len = data_to_be_tested.length
for (var j=0; j < len; j++) {
sum += data_to_be_tested[j].value_avg;
}
return parseFloat((sum/len).toFixed(2));
},
sum_last : 0,
sum : 0,
calculateAverageLoop: function(data_to_be_tested) {
// NEW WAY - FASTER
// BUT CAN ONLY BE USED IN CONTEXT OF SIM LOOP
var len = data_to_be_tested.length
if (this.sum === 0) {
// first time run - populate sum
for (var j=0; j < len; j++) {
this.sum += data_to_be_tested[j].value_avg;
}
} else {
// remove first value and add last
this.sum = this.sum - this.sum_last + data_to_be_tested[len - 1].value_avg
}
this.sum_last = data_to_be_tested[0].value_avg;
return parseFloat((this.sum/len).toFixed(2));
},
// return highest sell price
// *****IT IS USING BUY PRICE! which should it be?
calculateHigh: function(data_to_be_tested) {
var highest = 0
for (j=0; j < data_to_be_tested.length; j++) {
highest = (data_to_be_tested[j].value_buy > highest) ? data_to_be_tested[j].value_buy : highest;
}
return highest;
},
calculateLow: function(data_to_be_tested) {
// return lowest buy price
var lowest = data_to_be_tested[0].value_sell;
for (j=0; j < data_to_be_tested.length; j++) {
lowest = (data_to_be_tested[j].value_sell < lowest) ? data_to_be_tested[j].value_sell : lowest;
}
return lowest;
},
// returns float with 2 decimals
calculateAvgPlusHighThreshold: function(avg_for_period, high_threshold) {
return parseFloat((avg_for_period * (1 + high_threshold)).toFixed(2));
},
// returns float
calculateAvgMinusLowThreshold: function(avg_for_period, low_threshold) {
return parseFloat((avg_for_period * (1 - low_threshold)).toFixed(2));
},
/*
* this function takes a slide of the array (144 values for a day, fewer for other periods) and decides on selling or buying
*/
decideBuyOrSell: function(data_to_be_tested, latest_buy_price, latest_sell_price, low_threshold, high_threshold, buy_sell_method, print_full_debug, use_loop_fn) {
if (buy_sell_method === 'avg') {
//var start_c = new Date();
if (use_loop_fn) {
var avg_for_period = this.calculateAverageLoop(data_to_be_tested) // get avg for period
} else {
var avg_for_period = this.calculateAverage(data_to_be_tested) // get avg for period
}
// debug
//console.log(avg_for_period + ' should equal ' + this.calculateAverage(data_to_be_tested))
//this.timing_section_c += ((new Date() - start_c))
//var start_d = new Date();
var avg_plus_high_threshold = this.calculateAvgPlusHighThreshold(avg_for_period, high_threshold);
var avg_minus_low_threshold = this.calculateAvgMinusLowThreshold(avg_for_period, low_threshold)
//this.timing_section_d += ((new Date() - start_d))
var sell = (latest_sell_price > avg_plus_high_threshold) ? true : false;
var buy = (latest_buy_price < avg_minus_low_threshold) ? true : false;
// console.log('- avg_for_period ' + avg_for_period)
// console.log('- avg_plus_high_threshold ' + avg_plus_high_threshold)
// console.log('- avg_minus_low_threshold ' + avg_minus_low_threshold)
// console.log('- high_threshold ' + high_threshold)
// console.log('- low_threshold ' + low_threshold)
// console.log('- sell ' + sell)
// console.log('- buy ' + buy)
if (print_full_debug) {
reporting.debug('avg_for_period: $' + avg_for_period.toFixed(2) + '<br>');// print avg result to browser
reporting.debug('(avg price plus high threshold ('+high_threshold+'%) is $' + avg_plus_high_threshold.toFixed(2) + ')<br />');
reporting.debug('(avg price minus low threshold ('+low_threshold+'%) is $' + avg_minus_low_threshold.toFixed(2) + ')<br />');
}
} else if (buy_sell_method === 'peak') {
var high_for_period = this.calculateHigh(data_to_be_tested) // get avg for period
var low_for_period = this.calculateLow(data_to_be_tested) // get avg for period
var high_minus_high_threshold = (high_for_period * (1 - high_threshold)).toFixed(2);
var low_plus_low_threshold = (low_for_period * (1 + low_threshold)).toFixed(2);
var sell = (latest_sell_price > high_minus_high_threshold) ? true : false;
var buy = (latest_buy_price < low_plus_low_threshold) ? true : false;
if (print_full_debug) {
reporting.debug('high_for_period is: $' + high_for_period + '<br>');// print avg result to browser
reporting.debug('low_for_period is: $' + low_for_period + '<br>');// print avg result to browser
reporting.debug('high_minus_high_threshold is: $' + high_minus_high_threshold + '<br>');// print avg result to browser
reporting.debug('low_plus_low_threshold is: $' + low_plus_low_threshold + '<br>');// print avg result to browser
}
} else {
return;
}
if (sell) {
return 'sell';
} else if (buy) {
return 'buy';
} else {
return 'do_nothing';
}
},
calculateValuesForGivenPeriod: function(hrs_in_period, interval_in_minutes) {
return ((hrs_in_period * 60) / interval_in_minutes); // 144 10-min incremetns in a 24 hr period)
},
sellCoin: function(high_threshold, print_full_debug, sell_all, total_coins_owned, buy_sell_unit, current_coin_price_sell) {
// wrapper must check for 0 coins
if (sell_all) {
var number_of_coins_to_sell = total_coins_owned // SELL EVERYTHING
} else {
var number_of_coins_to_sell = (buy_sell_unit / current_coin_price_sell) // SELL LIMIT
if (number_of_coins_to_sell > total_coins_owned) {
number_of_coins_to_sell = total_coins_owned
}
}
var result_of_this_sale = (current_coin_price_sell * number_of_coins_to_sell)
var transaction_notes = 'Selling ' + number_of_coins_to_sell + ' coins values at $' + current_coin_price_sell.toFixed(2) + ' each for a total sale of $' + result_of_this_sale.toFixed(2);
if (print_full_debug) {
reporting.debug('<span style="color:red">TRANSACTION: SELL ' + number_of_coins_to_sell + ' of my ' + total_coins_owned + ' coins valued at $');
reporting.debug(current_coin_price_sell + ' = $' + result_of_this_sale + '</span><br />');
}
return {
number_of_coins_to_sell : number_of_coins_to_sell,
result_of_this_sale : result_of_this_sale,
transaction_notes : transaction_notes
}
},
buyCoin: function(total_coins_owned, buy_sell_unit, current_coin_price_buy, print_full_debug, latest_sell_price, total_spent, total_sold, money_in_bank, reinvest_profit) {
// expected number of coins to buy
var number_of_coins_to_buy = (buy_sell_unit / current_coin_price_buy);
var amount_spent_on_this_transaction = buy_sell_unit;
var transaction_notes = 'Buying ' + number_of_coins_to_buy + ' coins valued at $' + current_coin_price_buy.toFixed(2) + ' each for a total purchase of $' + amount_spent_on_this_transaction.toFixed(2);
if (print_full_debug) {
reporting.debug('buy_sell_unit: ' + buy_sell_unit + '<br />');
reporting.debug('current_coin_price_buy: ' + current_coin_price_buy + '<br />');
reporting.debug('total_coins_owned: ' + total_coins_owned + '<br />');
reporting.debug('number_of_coins_to_buy: ' + number_of_coins_to_buy + '<br />');
reporting.debug('amount_spent_on_this_transaction: ' + amount_spent_on_this_transaction + '<br />');
}
var current_position = this.calculateCurrentPosition(total_coins_owned, latest_sell_price, total_sold, total_spent)
// if currnet_positon is negative it screws this up, so i added in this little extra rule. if crurr pos is less than 0, then just check if we have money.
if (reinvest_profit || (current_position <= 0)) {
var reached_limit = (money_in_bank < buy_sell_unit) // THIS WILL SPEND PROFIT
} else {
var reached_limit = ((money_in_bank - buy_sell_unit) < current_position) // THIS WILL RETAIN PROFIT
// console.log('***checking limit:')
// console.log('total_coins_owned: ' + total_coins_owned);
// console.log('latest_sell_price: ' + latest_sell_price);
// console.log('total_sold: ' + total_sold);
// console.log('total_spent: ' + total_spent);
// console.log('current_position: ' + current_position);
// console.log('money_in_bank: ' + money_in_bank);
// console.log('buy_sell_unit: ' + buy_sell_unit);
// console.log('reached_limit: ' + reached_limit);
}
if (reached_limit) {
number_of_coins_to_buy = 0;
amount_spent_on_this_transaction = 0;
transaction_notes = '***reached limit*** setting number_of_coins_to_buy/amount_spent_on_this_transaction to 0';
transaction_notes += ' (money_in_bank: ' + money_in_bank + ', buy_sell_unit: ' + buy_sell_unit + ', current_position: ' + current_position + ')';
if (print_full_debug) {
reporting.debug('***reached limit!***<br />')
reporting.debug('---setting number_of_coins_to_buy and amount_spent_on_this_transaction to 0<br />');
} | if (print_full_debug) {
reporting.debug('LIMIT NOT REACHED: <br />')
reporting.debug('total_spent ('+total_spent+') - total_sold ('+total_sold+') + buy_sell_unit ('+buy_sell_unit+') = ('+(total_spent - total_sold + buy_sell_unit)+') is not greater than 2000<br />')
}
}
// if flag set, and already own coins -- dont buy again
// should mean you only ever buy one unit
// if (this.buy_only_once && (total_coins_owned > 0)) {
// if (this.print_full_debug) {
// reporting.debug('not buying -- already own');
// }
// return;
// }
// return some vars for sim to update
return {
number_of_coins_to_buy : number_of_coins_to_buy,
amount_spent_on_this_transaction : amount_spent_on_this_transaction,
transaction_notes : transaction_notes
}
},
calculateCurrentPosition: function(total_coins_owned, latest_sell_price, total_sold, total_spent) {
return ((total_coins_owned * latest_sell_price) + (total_sold - total_spent));
},
getArrayAverage: function(arr) {
var sum = arr.reduce(function add(a, b) {
return a + b;
}, 0);
return (sum / arr.length)
},
roundToPoint5: function(num) {
return (Math.round(num * 2) / 2);
}
} | }
else { | random_line_split |
tools.js | var reporting = require('./reporting')
module.exports = {
timing_section_c : 0,
timing_section_d : 0,
timing_section_e : 0,
calculateAverage: function(data_to_be_tested) {
//console.log(data_to_be_tested)
var sum = 0;
var len = data_to_be_tested.length
for (var j=0; j < len; j++) {
sum += data_to_be_tested[j].value_avg;
}
return parseFloat((sum/len).toFixed(2));
},
sum_last : 0,
sum : 0,
calculateAverageLoop: function(data_to_be_tested) {
// NEW WAY - FASTER
// BUT CAN ONLY BE USED IN CONTEXT OF SIM LOOP
var len = data_to_be_tested.length
if (this.sum === 0) {
// first time run - populate sum
for (var j=0; j < len; j++) {
this.sum += data_to_be_tested[j].value_avg;
}
} else {
// remove first value and add last
this.sum = this.sum - this.sum_last + data_to_be_tested[len - 1].value_avg
}
this.sum_last = data_to_be_tested[0].value_avg;
return parseFloat((this.sum/len).toFixed(2));
},
// return highest sell price
// *****IT IS USING BUY PRICE! which should it be?
calculateHigh: function(data_to_be_tested) {
var highest = 0
for (j=0; j < data_to_be_tested.length; j++) {
highest = (data_to_be_tested[j].value_buy > highest) ? data_to_be_tested[j].value_buy : highest;
}
return highest;
},
calculateLow: function(data_to_be_tested) {
// return lowest buy price
var lowest = data_to_be_tested[0].value_sell;
for (j=0; j < data_to_be_tested.length; j++) {
lowest = (data_to_be_tested[j].value_sell < lowest) ? data_to_be_tested[j].value_sell : lowest;
}
return lowest;
},
// returns float with 2 decimals
calculateAvgPlusHighThreshold: function(avg_for_period, high_threshold) {
return parseFloat((avg_for_period * (1 + high_threshold)).toFixed(2));
},
// returns float
calculateAvgMinusLowThreshold: function(avg_for_period, low_threshold) {
return parseFloat((avg_for_period * (1 - low_threshold)).toFixed(2));
},
/*
* this function takes a slide of the array (144 values for a day, fewer for other periods) and decides on selling or buying
*/
decideBuyOrSell: function(data_to_be_tested, latest_buy_price, latest_sell_price, low_threshold, high_threshold, buy_sell_method, print_full_debug, use_loop_fn) {
if (buy_sell_method === 'avg') {
//var start_c = new Date();
if (use_loop_fn) {
var avg_for_period = this.calculateAverageLoop(data_to_be_tested) // get avg for period
} else {
var avg_for_period = this.calculateAverage(data_to_be_tested) // get avg for period
}
// debug
//console.log(avg_for_period + ' should equal ' + this.calculateAverage(data_to_be_tested))
//this.timing_section_c += ((new Date() - start_c))
//var start_d = new Date();
var avg_plus_high_threshold = this.calculateAvgPlusHighThreshold(avg_for_period, high_threshold);
var avg_minus_low_threshold = this.calculateAvgMinusLowThreshold(avg_for_period, low_threshold)
//this.timing_section_d += ((new Date() - start_d))
var sell = (latest_sell_price > avg_plus_high_threshold) ? true : false;
var buy = (latest_buy_price < avg_minus_low_threshold) ? true : false;
// console.log('- avg_for_period ' + avg_for_period)
// console.log('- avg_plus_high_threshold ' + avg_plus_high_threshold)
// console.log('- avg_minus_low_threshold ' + avg_minus_low_threshold)
// console.log('- high_threshold ' + high_threshold)
// console.log('- low_threshold ' + low_threshold)
// console.log('- sell ' + sell)
// console.log('- buy ' + buy)
if (print_full_debug) {
reporting.debug('avg_for_period: $' + avg_for_period.toFixed(2) + '<br>');// print avg result to browser
reporting.debug('(avg price plus high threshold ('+high_threshold+'%) is $' + avg_plus_high_threshold.toFixed(2) + ')<br />');
reporting.debug('(avg price minus low threshold ('+low_threshold+'%) is $' + avg_minus_low_threshold.toFixed(2) + ')<br />');
}
} else if (buy_sell_method === 'peak') | else {
return;
}
if (sell) {
return 'sell';
} else if (buy) {
return 'buy';
} else {
return 'do_nothing';
}
},
calculateValuesForGivenPeriod: function(hrs_in_period, interval_in_minutes) {
return ((hrs_in_period * 60) / interval_in_minutes); // 144 10-min incremetns in a 24 hr period)
},
sellCoin: function(high_threshold, print_full_debug, sell_all, total_coins_owned, buy_sell_unit, current_coin_price_sell) {
// wrapper must check for 0 coins
if (sell_all) {
var number_of_coins_to_sell = total_coins_owned // SELL EVERYTHING
} else {
var number_of_coins_to_sell = (buy_sell_unit / current_coin_price_sell) // SELL LIMIT
if (number_of_coins_to_sell > total_coins_owned) {
number_of_coins_to_sell = total_coins_owned
}
}
var result_of_this_sale = (current_coin_price_sell * number_of_coins_to_sell)
var transaction_notes = 'Selling ' + number_of_coins_to_sell + ' coins values at $' + current_coin_price_sell.toFixed(2) + ' each for a total sale of $' + result_of_this_sale.toFixed(2);
if (print_full_debug) {
reporting.debug('<span style="color:red">TRANSACTION: SELL ' + number_of_coins_to_sell + ' of my ' + total_coins_owned + ' coins valued at $');
reporting.debug(current_coin_price_sell + ' = $' + result_of_this_sale + '</span><br />');
}
return {
number_of_coins_to_sell : number_of_coins_to_sell,
result_of_this_sale : result_of_this_sale,
transaction_notes : transaction_notes
}
},
buyCoin: function(total_coins_owned, buy_sell_unit, current_coin_price_buy, print_full_debug, latest_sell_price, total_spent, total_sold, money_in_bank, reinvest_profit) {
// expected number of coins to buy
var number_of_coins_to_buy = (buy_sell_unit / current_coin_price_buy);
var amount_spent_on_this_transaction = buy_sell_unit;
var transaction_notes = 'Buying ' + number_of_coins_to_buy + ' coins valued at $' + current_coin_price_buy.toFixed(2) + ' each for a total purchase of $' + amount_spent_on_this_transaction.toFixed(2);
if (print_full_debug) {
reporting.debug('buy_sell_unit: ' + buy_sell_unit + '<br />');
reporting.debug('current_coin_price_buy: ' + current_coin_price_buy + '<br />');
reporting.debug('total_coins_owned: ' + total_coins_owned + '<br />');
reporting.debug('number_of_coins_to_buy: ' + number_of_coins_to_buy + '<br />');
reporting.debug('amount_spent_on_this_transaction: ' + amount_spent_on_this_transaction + '<br />');
}
var current_position = this.calculateCurrentPosition(total_coins_owned, latest_sell_price, total_sold, total_spent)
// if currnet_positon is negative it screws this up, so i added in this little extra rule. if crurr pos is less than 0, then just check if we have money.
if (reinvest_profit || (current_position <= 0)) {
var reached_limit = (money_in_bank < buy_sell_unit) // THIS WILL SPEND PROFIT
} else {
var reached_limit = ((money_in_bank - buy_sell_unit) < current_position) // THIS WILL RETAIN PROFIT
// console.log('***checking limit:')
// console.log('total_coins_owned: ' + total_coins_owned);
// console.log('latest_sell_price: ' + latest_sell_price);
// console.log('total_sold: ' + total_sold);
// console.log('total_spent: ' + total_spent);
// console.log('current_position: ' + current_position);
// console.log('money_in_bank: ' + money_in_bank);
// console.log('buy_sell_unit: ' + buy_sell_unit);
// console.log('reached_limit: ' + reached_limit);
}
if (reached_limit) {
number_of_coins_to_buy = 0;
amount_spent_on_this_transaction = 0;
transaction_notes = '***reached limit*** setting number_of_coins_to_buy/amount_spent_on_this_transaction to 0';
transaction_notes += ' (money_in_bank: ' + money_in_bank + ', buy_sell_unit: ' + buy_sell_unit + ', current_position: ' + current_position + ')';
if (print_full_debug) {
reporting.debug('***reached limit!***<br />')
reporting.debug('---setting number_of_coins_to_buy and amount_spent_on_this_transaction to 0<br />');
}
}
else {
if (print_full_debug) {
reporting.debug('LIMIT NOT REACHED: <br />')
reporting.debug('total_spent ('+total_spent+') - total_sold ('+total_sold+') + buy_sell_unit ('+buy_sell_unit+') = ('+(total_spent - total_sold + buy_sell_unit)+') is not greater than 2000<br />')
}
}
// if flag set, and already own coins -- dont buy again
// should mean you only ever buy one unit
// if (this.buy_only_once && (total_coins_owned > 0)) {
// if (this.print_full_debug) {
// reporting.debug('not buying -- already own');
// }
// return;
// }
// return some vars for sim to update
return {
number_of_coins_to_buy : number_of_coins_to_buy,
amount_spent_on_this_transaction : amount_spent_on_this_transaction,
transaction_notes : transaction_notes
}
},
calculateCurrentPosition: function(total_coins_owned, latest_sell_price, total_sold, total_spent) {
return ((total_coins_owned * latest_sell_price) + (total_sold - total_spent));
},
getArrayAverage: function(arr) {
var sum = arr.reduce(function add(a, b) {
return a + b;
}, 0);
return (sum / arr.length)
},
roundToPoint5: function(num) {
return (Math.round(num * 2) / 2);
}
} | {
var high_for_period = this.calculateHigh(data_to_be_tested) // get avg for period
var low_for_period = this.calculateLow(data_to_be_tested) // get avg for period
var high_minus_high_threshold = (high_for_period * (1 - high_threshold)).toFixed(2);
var low_plus_low_threshold = (low_for_period * (1 + low_threshold)).toFixed(2);
var sell = (latest_sell_price > high_minus_high_threshold) ? true : false;
var buy = (latest_buy_price < low_plus_low_threshold) ? true : false;
if (print_full_debug) {
reporting.debug('high_for_period is: $' + high_for_period + '<br>');// print avg result to browser
reporting.debug('low_for_period is: $' + low_for_period + '<br>');// print avg result to browser
reporting.debug('high_minus_high_threshold is: $' + high_minus_high_threshold + '<br>');// print avg result to browser
reporting.debug('low_plus_low_threshold is: $' + low_plus_low_threshold + '<br>');// print avg result to browser
}
} | conditional_block |
lib.rs | //! # Ordnung
//!
//! Fast, vector-based map implementation that preserves insertion order.
//!
//! + Map is implemented as a binary tree over a `Vec` for storage, with only
//! two extra words per entry for book-keeping on 64-bit architectures.
//! + A fast hash function with good random distribution is used to balance the
//! tree. Ordnung makes no guarantees that the tree will be perfectly
//! balanced, but key lookup should be approaching `O(log n)` in most cases.
//! + Tree traversal is always breadth-first and happens over a single
//! continuous block of memory, which makes it cache friendly.
//! + Iterating over all entries is always `O(n)`, same as `Vec<(K, V)>`.
//! + Removing a value uses a sentinel and is `~O(log n)`.
//! + There are no buckets, so there is no need to re-bucket things when growing
//! the map.
//!
//! ## When should you use this?
//!
//! + You need to preserve insertion order of the map.
//! + Iterating over the map is very performance sensitive.
//! + Your average map has fewer than 100 entries.
//! + You have no a priori knowledge about the final size of the map when you
//! start creating it.
#![warn(missing_docs)]
#![cfg_attr(not(test), no_std)]
extern crate alloc;
use core::borrow::Borrow;
use core::cell::Cell;
use core::hash::{Hash, Hasher};
use core::iter::FromIterator;
use core::marker::PhantomData;
use core::num::NonZeroU32;
use core::ops::Index;
use core::{fmt, slice};
pub mod compact;
mod entry;
mod raw_entry;
use ahash::AHasher;
pub use compact::Vec;
pub use entry::*;
pub use raw_entry::*; | pub struct Keys<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
#[inline]
fn next(&mut self) -> Option<&'a K> {
self.inner.next().map(|(k, _)| k)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
//#[derive(Clone)]
/// Iterator over the values
pub struct Values<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline]
fn next(&mut self) -> Option<&'a V> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[derive(Clone)]
struct Node<K, V> {
// Key
pub key: K,
// Hash of the key
pub hash: u64,
// Value stored. We'll use `None` as a sentinel value for removed
// entries.
pub value: Option<V>,
// Store vector index pointing to the `Node` for which `hash` is smaller
// than that of this `Node`.
pub left: Cell<Option<NonZeroU32>>,
// Same as above but for `Node`s with hash larger than this one. If the
// hash is the same, but keys are different, the lookup will default
// to the right branch as well.
pub right: Cell<Option<NonZeroU32>>,
}
impl<K, V> fmt::Debug for Node<K, V>
where
K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(
&(&self.key, &self.value, self.left.get(), self.right.get()),
f,
)
}
}
impl<K, V> PartialEq for Node<K, V>
where
K: PartialEq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash && self.key == other.key && self.value == other.value
}
}
impl<K, V> Node<K, V> {
#[inline]
const fn new(key: K, value: V, hash: u64) -> Self {
Node {
key,
hash,
value: Some(value),
left: Cell::new(None),
right: Cell::new(None),
}
}
}
// `Cell` isn't `Sync`, but all of our writes are contained and require
// `&mut` access, ergo this is safe.
unsafe impl<K: Sync, V: Sync> Sync for Node<K, V> {}
/// A binary tree implementation of a string -> `JsonValue` map. You normally don't
/// have to interact with instances of `Object`, much more likely you will be
/// using the `JsonValue::Object` variant, which wraps around this struct.
#[derive(Debug, Clone)]
pub struct Map<K, V, H = AHasher> {
store: Vec<Node<K, V>>,
hasher: PhantomData<H>,
}
enum FindResult<'find> {
Hit(usize),
Miss(Option<&'find Cell<Option<NonZeroU32>>>),
}
use FindResult::*;
impl<K, V> Map<K, V, AHasher> {
/// Create a new `Map`.
#[inline]
pub fn new() -> Self {
Map::<K, V, AHasher>::default()
}
/// Create a `Map` with a given capacity
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Map {
store: Vec::with_capacity(capacity),
hasher: PhantomData,
}
}
}
impl<K, V, H> Default for Map<K, V, H> {
/// Create a new `Map` with a custom hasher.
#[inline]
fn default() -> Self {
Map {
store: Vec::new(),
hasher: PhantomData,
}
}
}
impl<K, V, H> Map<K, V, H>
where
K: Hash + Eq,
H: Hasher + Default,
{
/// An iterator visiting all keys in arbitrary order.
/// The iterator element type is `&'a K`.
pub fn keys(&self) -> Keys<'_, K, V> {
Keys { inner: self.iter() }
}
/// An iterator visiting all values in arbitrary order.
/// The iterator element type is `&'a V`.
pub fn values(&self) -> Values<'_, K, V> {
Values { inner: self.iter() }
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.replace(value) },
Miss(parent) => {
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, value, hash));
None
}
}
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked(idx) };
node.value.as_ref()
}
Miss(_) => None,
}
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but Hash and Eq
/// on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.as_mut() },
Miss(_) => None,
}
}
/// Returns `true` if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked(idx).value.is_some() },
Miss(_) => false,
}
}
/// Get a mutable reference to entry at key. Inserts a new entry by
/// calling `F` if absent.
// TODO: Replace with entry API
pub fn get_or_insert<F>(&mut self, key: K, fill: F) -> &mut V
where
F: FnOnce() -> V,
{
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked_mut(idx) };
if node.value.is_none() {
node.value = Some(fill());
}
node.value.as_mut().unwrap()
}
Miss(parent) => {
let idx = self.store.len();
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, fill(), hash));
self.store[idx].value.as_mut().unwrap()
}
}
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.take() },
Miss(_) => return None,
}
}
/// Returns the number of elements in the map.
#[inline]
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` if the map contains no elements.
#[inline]
pub fn is_empty(&self) -> bool {
self.store.is_empty()
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory for reuse.
#[inline]
pub fn clear(&mut self) {
self.store.clear();
}
#[inline]
fn find(&self, hash: u64) -> FindResult {
if self.len() == 0 {
return Miss(None);
}
let mut idx = 0;
loop {
let node = unsafe { self.store.get_unchecked(idx) };
if hash < node.hash {
match node.left.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.left)),
}
} else if hash > node.hash {
match node.right.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.right)),
}
} else {
return Hit(idx);
}
}
}
#[inline]
fn hash_key<Q: Hash>(key: Q) -> u64 {
// let mut hasher = fnv::FnvHasher::default();
// let mut hasher = rustc_hash::FxHasher::default();
let mut hasher = H::default();
key.hash(&mut hasher);
hasher.finish()
}
/// An iterator visiting all key-value pairs in insertion order.
/// The iterator element type is `(&K, &V)`.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &1),
/// (&"b", &2),
/// (&"c", &3),
/// ],
/// );
/// ```
#[inline]
pub fn iter(&self) -> Iter<K, V> {
Iter {
inner: self.store.iter(),
}
}
/// An iterator visiting all key-value pairs in insertion order, with
/// mutable references to the values. The iterator element type is
/// (&K, &mut V).
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// // Check if values are doubled
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &2),
/// (&"b", &4),
/// (&"c", &6),
/// ],
/// );
/// ```
#[inline]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut {
inner: self.store.iter_mut(),
}
}
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched. After this, insertions into a vacant entry
/// still require an owned key to be provided.
///
/// Raw entries are useful for such exotic situations as:
///
/// * Hash memoization
/// * Deferring the creation of an owned key until it is known to be required
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Because raw entries provide much more low-level control, it's much easier
/// to put the HashMap into an inconsistent state which, while memory-safe,
/// will cause the map to produce seemingly random results. Higher-level and
/// more foolproof APIs like `entry` should be preferred when possible.
///
/// In particular, the hash used to initialized the raw entry must still be
/// consistent with the hash of the key that is ultimately stored in the entry.
/// This is because implementations of HashMap may need to recompute hashes
/// when resizing, at which point only the keys are available.
///
/// Raw entries give mutable access to the keys. This must not be used
/// to modify how the key would compare or hash, as the map will not re-evaluate
/// where the key should go, meaning the keys may become "lost" if their
/// location does not reflect their state. For instance, if you change a key
/// so that the map now contains keys which compare equal, search may start
/// acting erratically, with two keys randomly masking each other. Implementations
/// are free to assume this doesn't happen (within the limits of memory-safety).
#[inline]
pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, H> {
RawEntryBuilderMut { map: self }
}
/// Creates a raw immutable entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched.
///
/// This is useful for
/// * Hash memoization
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Unless you are in such a situation, higher-level and more foolproof APIs like
/// `get` should be preferred.
///
/// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
#[inline]
pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, H> {
RawEntryBuilder { map: self }
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut letters = Map::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
pub fn entry(&mut self, key: K) -> Entry<K, V, H>
where
K: Eq + Clone,
{
for (idx, n) in self.store.iter().enumerate() {
if &key == &n.key {
return Entry::Occupied(OccupiedEntry::new(idx, key, self));
}
}
Entry::Vacant(VacantEntry::new(key, self))
}
}
impl<K, V> IntoIterator for Map<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
#[inline]
fn into_iter(self) -> IntoIter<K, V> {
IntoIter(self)
}
}
/// Consuming iterator
pub struct IntoIter<K, V>(Map<K, V>);
impl<K, V> IntoIter<K, V> {
/// The length of this iterator
pub fn len(&self) -> usize {
self.0.store.len()
}
/// If this iteratoris empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(n) = self.0.store.pop() {
if let Some(v) = n.value {
return Some((n.key, v));
}
} else {
return None;
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.0.store.len();
(l, Some(l))
}
}
impl<K, Q: ?Sized, V> Index<&Q> for Map<K, V>
where
K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the HashMap.
fn index(&self, key: &Q) -> &V {
self.get(key).expect("Key not found in Map")
}
}
impl<'json, IK, IV, K, V> FromIterator<(IK, IV)> for Map<K, V>
where
IK: Into<K>,
IV: Into<V>,
K: Hash + Eq,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (IK, IV)>,
{
let iter = iter.into_iter();
let mut map = Map::with_capacity(iter.size_hint().0);
for (key, value) in iter {
map.insert(key.into(), value.into());
}
map
}
}
// Because keys can inserted in different order, the safe way to
// compare `Map`s is to iterate over one and check if the other
// has all the same keys.
impl<K, V> PartialEq for Map<K, V>
where
K: Hash + Eq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
if self.len() != other.len() {
return false;
}
// Faster than .get() since we can avoid hashing
for &Node {
ref value, hash, ..
} in self.store.iter()
{
if let Hit(idx) = other.find(hash) {
if &other.store[idx].value == value {
continue;
}
}
return false;
}
true
}
}
/// An iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter`](./struct.Map.html#method.iter)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct Iter<'a, K, V> {
inner: slice::Iter<'a, Node<K, V>>,
}
/// A mutable iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter_mut`](./struct.Map.html#method.iter_mut)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct IterMut<'a, K, V> {
inner: slice::IterMut<'a, Node<K, V>>,
}
impl<K, V> Iter<'_, K, V> {
/// Create an empty iterator that always returns `None`
pub fn empty() -> Self {
Iter { inner: [].iter() }
}
}
impl<'i, K, V> Iterator for Iter<'i, K, V> {
type Item = (&'i K, &'i V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next() {
let value = match node.value {
Some(ref value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> DoubleEndedIterator for Iter<'_, K, V> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next_back() {
let value = match node.value {
Some(ref value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
impl<K, V> IterMut<'_, K, V> {
/// Create an empty iterator that always returns `None`
pub fn empty() -> Self {
IterMut {
inner: [].iter_mut(),
}
}
}
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next() {
let value = match node.value {
Some(ref mut value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> DoubleEndedIterator for IterMut<'_, K, V> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next_back() {
let value = match node.value {
Some(ref mut value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
#[cfg(test)]
mod tests {
use super::Map;
#[test]
fn empty() {
let map: Map<&str, u64> = Map::new();
assert_eq!(map.get("foo"), None);
assert_eq!(map.len(), 0);
assert_eq!(map.is_empty(), true);
}
#[test]
fn simple() {
let mut map: Map<&str, u64> = Map::new();
map.insert("foo", 42);
assert_eq!(map.get("foo"), Some(&42));
assert_eq!(map.len(), 1);
assert_eq!(map.is_empty(), false);
}
} | // use alloc::vec::Vec;
/// Iterator over the keys | random_line_split |
lib.rs | //! # Ordnung
//!
//! Fast, vector-based map implementation that preserves insertion order.
//!
//! + Map is implemented as a binary tree over a `Vec` for storage, with only
//! two extra words per entry for book-keeping on 64-bit architectures.
//! + A fast hash function with good random distribution is used to balance the
//! tree. Ordnung makes no guarantees that the tree will be perfectly
//! balanced, but key lookup should be approaching `O(log n)` in most cases.
//! + Tree traversal is always breadth-first and happens over a single
//! continuous block of memory, which makes it cache friendly.
//! + Iterating over all entries is always `O(n)`, same as `Vec<(K, V)>`.
//! + Removing a value uses a sentinel and is `~O(log n)`.
//! + There are no buckets, so there is no need to re-bucket things when growing
//! the map.
//!
//! ## When should you use this?
//!
//! + You need to preserve insertion order of the map.
//! + Iterating over the map is very performance sensitive.
//! + Your average map has fewer than 100 entries.
//! + You have no a priori knowledge about the final size of the map when you
//! start creating it.
#![warn(missing_docs)]
#![cfg_attr(not(test), no_std)]
extern crate alloc;
use core::borrow::Borrow;
use core::cell::Cell;
use core::hash::{Hash, Hasher};
use core::iter::FromIterator;
use core::marker::PhantomData;
use core::num::NonZeroU32;
use core::ops::Index;
use core::{fmt, slice};
pub mod compact;
mod entry;
mod raw_entry;
use ahash::AHasher;
pub use compact::Vec;
pub use entry::*;
pub use raw_entry::*;
// use alloc::vec::Vec;
/// Iterator over the keys
pub struct Keys<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
#[inline]
fn next(&mut self) -> Option<&'a K> {
self.inner.next().map(|(k, _)| k)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
//#[derive(Clone)]
/// Iterator over the values
pub struct Values<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline]
fn next(&mut self) -> Option<&'a V> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[derive(Clone)]
struct Node<K, V> {
// Key
pub key: K,
// Hash of the key
pub hash: u64,
// Value stored. We'll use `None` as a sentinel value for removed
// entries.
pub value: Option<V>,
// Store vector index pointing to the `Node` for which `hash` is smaller
// than that of this `Node`.
pub left: Cell<Option<NonZeroU32>>,
// Same as above but for `Node`s with hash larger than this one. If the
// hash is the same, but keys are different, the lookup will default
// to the right branch as well.
pub right: Cell<Option<NonZeroU32>>,
}
impl<K, V> fmt::Debug for Node<K, V>
where
K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(
&(&self.key, &self.value, self.left.get(), self.right.get()),
f,
)
}
}
impl<K, V> PartialEq for Node<K, V>
where
K: PartialEq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash && self.key == other.key && self.value == other.value
}
}
impl<K, V> Node<K, V> {
#[inline]
const fn new(key: K, value: V, hash: u64) -> Self {
Node {
key,
hash,
value: Some(value),
left: Cell::new(None),
right: Cell::new(None),
}
}
}
// `Cell` isn't `Sync`, but all of our writes are contained and require
// `&mut` access, ergo this is safe.
unsafe impl<K: Sync, V: Sync> Sync for Node<K, V> {}
/// A binary tree implementation of a string -> `JsonValue` map. You normally don't
/// have to interact with instances of `Object`, much more likely you will be
/// using the `JsonValue::Object` variant, which wraps around this struct.
#[derive(Debug, Clone)]
pub struct Map<K, V, H = AHasher> {
store: Vec<Node<K, V>>,
hasher: PhantomData<H>,
}
enum FindResult<'find> {
Hit(usize),
Miss(Option<&'find Cell<Option<NonZeroU32>>>),
}
use FindResult::*;
impl<K, V> Map<K, V, AHasher> {
/// Create a new `Map`.
#[inline]
pub fn new() -> Self {
Map::<K, V, AHasher>::default()
}
/// Create a `Map` with a given capacity
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Map {
store: Vec::with_capacity(capacity),
hasher: PhantomData,
}
}
}
impl<K, V, H> Default for Map<K, V, H> {
/// Create a new `Map` with a custom hasher.
#[inline]
fn default() -> Self {
Map {
store: Vec::new(),
hasher: PhantomData,
}
}
}
impl<K, V, H> Map<K, V, H>
where
K: Hash + Eq,
H: Hasher + Default,
{
/// An iterator visiting all keys in arbitrary order.
/// The iterator element type is `&'a K`.
pub fn | (&self) -> Keys<'_, K, V> {
Keys { inner: self.iter() }
}
/// An iterator visiting all values in arbitrary order.
/// The iterator element type is `&'a V`.
pub fn values(&self) -> Values<'_, K, V> {
Values { inner: self.iter() }
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.replace(value) },
Miss(parent) => {
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, value, hash));
None
}
}
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked(idx) };
node.value.as_ref()
}
Miss(_) => None,
}
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but Hash and Eq
/// on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.as_mut() },
Miss(_) => None,
}
}
/// Returns `true` if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked(idx).value.is_some() },
Miss(_) => false,
}
}
/// Get a mutable reference to entry at key. Inserts a new entry by
/// calling `F` if absent.
// TODO: Replace with entry API
pub fn get_or_insert<F>(&mut self, key: K, fill: F) -> &mut V
where
F: FnOnce() -> V,
{
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked_mut(idx) };
if node.value.is_none() {
node.value = Some(fill());
}
node.value.as_mut().unwrap()
}
Miss(parent) => {
let idx = self.store.len();
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, fill(), hash));
self.store[idx].value.as_mut().unwrap()
}
}
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.take() },
Miss(_) => return None,
}
}
/// Returns the number of elements in the map.
#[inline]
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` if the map contains no elements.
#[inline]
pub fn is_empty(&self) -> bool {
self.store.is_empty()
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory for reuse.
#[inline]
pub fn clear(&mut self) {
self.store.clear();
}
#[inline]
fn find(&self, hash: u64) -> FindResult {
if self.len() == 0 {
return Miss(None);
}
let mut idx = 0;
loop {
let node = unsafe { self.store.get_unchecked(idx) };
if hash < node.hash {
match node.left.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.left)),
}
} else if hash > node.hash {
match node.right.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.right)),
}
} else {
return Hit(idx);
}
}
}
#[inline]
fn hash_key<Q: Hash>(key: Q) -> u64 {
// let mut hasher = fnv::FnvHasher::default();
// let mut hasher = rustc_hash::FxHasher::default();
let mut hasher = H::default();
key.hash(&mut hasher);
hasher.finish()
}
/// An iterator visiting all key-value pairs in insertion order.
/// The iterator element type is `(&K, &V)`.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &1),
/// (&"b", &2),
/// (&"c", &3),
/// ],
/// );
/// ```
#[inline]
pub fn iter(&self) -> Iter<K, V> {
Iter {
inner: self.store.iter(),
}
}
/// An iterator visiting all key-value pairs in insertion order, with
/// mutable references to the values. The iterator element type is
/// (&K, &mut V).
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// // Check if values are doubled
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &2),
/// (&"b", &4),
/// (&"c", &6),
/// ],
/// );
/// ```
#[inline]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut {
inner: self.store.iter_mut(),
}
}
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched. After this, insertions into a vacant entry
/// still require an owned key to be provided.
///
/// Raw entries are useful for such exotic situations as:
///
/// * Hash memoization
/// * Deferring the creation of an owned key until it is known to be required
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Because raw entries provide much more low-level control, it's much easier
/// to put the HashMap into an inconsistent state which, while memory-safe,
/// will cause the map to produce seemingly random results. Higher-level and
/// more foolproof APIs like `entry` should be preferred when possible.
///
/// In particular, the hash used to initialized the raw entry must still be
/// consistent with the hash of the key that is ultimately stored in the entry.
/// This is because implementations of HashMap may need to recompute hashes
/// when resizing, at which point only the keys are available.
///
/// Raw entries give mutable access to the keys. This must not be used
/// to modify how the key would compare or hash, as the map will not re-evaluate
/// where the key should go, meaning the keys may become "lost" if their
/// location does not reflect their state. For instance, if you change a key
/// so that the map now contains keys which compare equal, search may start
/// acting erratically, with two keys randomly masking each other. Implementations
/// are free to assume this doesn't happen (within the limits of memory-safety).
#[inline]
pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, H> {
RawEntryBuilderMut { map: self }
}
/// Creates a raw immutable entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched.
///
/// This is useful for
/// * Hash memoization
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Unless you are in such a situation, higher-level and more foolproof APIs like
/// `get` should be preferred.
///
/// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
#[inline]
pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, H> {
RawEntryBuilder { map: self }
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut letters = Map::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
pub fn entry(&mut self, key: K) -> Entry<K, V, H>
where
K: Eq + Clone,
{
for (idx, n) in self.store.iter().enumerate() {
if &key == &n.key {
return Entry::Occupied(OccupiedEntry::new(idx, key, self));
}
}
Entry::Vacant(VacantEntry::new(key, self))
}
}
impl<K, V> IntoIterator for Map<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
#[inline]
fn into_iter(self) -> IntoIter<K, V> {
IntoIter(self)
}
}
/// Consuming iterator
pub struct IntoIter<K, V>(Map<K, V>);
impl<K, V> IntoIter<K, V> {
/// The length of this iterator
pub fn len(&self) -> usize {
self.0.store.len()
}
/// If this iteratoris empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(n) = self.0.store.pop() {
if let Some(v) = n.value {
return Some((n.key, v));
}
} else {
return None;
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.0.store.len();
(l, Some(l))
}
}
impl<K, Q: ?Sized, V> Index<&Q> for Map<K, V>
where
K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the HashMap.
fn index(&self, key: &Q) -> &V {
self.get(key).expect("Key not found in Map")
}
}
impl<'json, IK, IV, K, V> FromIterator<(IK, IV)> for Map<K, V>
where
IK: Into<K>,
IV: Into<V>,
K: Hash + Eq,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (IK, IV)>,
{
let iter = iter.into_iter();
let mut map = Map::with_capacity(iter.size_hint().0);
for (key, value) in iter {
map.insert(key.into(), value.into());
}
map
}
}
// Because keys can inserted in different order, the safe way to
// compare `Map`s is to iterate over one and check if the other
// has all the same keys.
impl<K, V> PartialEq for Map<K, V>
where
K: Hash + Eq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
if self.len() != other.len() {
return false;
}
// Faster than .get() since we can avoid hashing
for &Node {
ref value, hash, ..
} in self.store.iter()
{
if let Hit(idx) = other.find(hash) {
if &other.store[idx].value == value {
continue;
}
}
return false;
}
true
}
}
/// An iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter`](./struct.Map.html#method.iter)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct Iter<'a, K, V> {
inner: slice::Iter<'a, Node<K, V>>,
}
/// A mutable iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter_mut`](./struct.Map.html#method.iter_mut)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct IterMut<'a, K, V> {
inner: slice::IterMut<'a, Node<K, V>>,
}
impl<K, V> Iter<'_, K, V> {
/// Create an empty iterator that always returns `None`
pub fn empty() -> Self {
Iter { inner: [].iter() }
}
}
impl<'i, K, V> Iterator for Iter<'i, K, V> {
type Item = (&'i K, &'i V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next() {
let value = match node.value {
Some(ref value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> DoubleEndedIterator for Iter<'_, K, V> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next_back() {
let value = match node.value {
Some(ref value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
impl<K, V> IterMut<'_, K, V> {
/// Create an empty iterator that always returns `None`
pub fn empty() -> Self {
IterMut {
inner: [].iter_mut(),
}
}
}
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next() {
let value = match node.value {
Some(ref mut value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> DoubleEndedIterator for IterMut<'_, K, V> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next_back() {
let value = match node.value {
Some(ref mut value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
#[cfg(test)]
mod tests {
use super::Map;
#[test]
fn empty() {
let map: Map<&str, u64> = Map::new();
assert_eq!(map.get("foo"), None);
assert_eq!(map.len(), 0);
assert_eq!(map.is_empty(), true);
}
#[test]
fn simple() {
let mut map: Map<&str, u64> = Map::new();
map.insert("foo", 42);
assert_eq!(map.get("foo"), Some(&42));
assert_eq!(map.len(), 1);
assert_eq!(map.is_empty(), false);
}
}
| keys | identifier_name |
lib.rs | //! # Ordnung
//!
//! Fast, vector-based map implementation that preserves insertion order.
//!
//! + Map is implemented as a binary tree over a `Vec` for storage, with only
//! two extra words per entry for book-keeping on 64-bit architectures.
//! + A fast hash function with good random distribution is used to balance the
//! tree. Ordnung makes no guarantees that the tree will be perfectly
//! balanced, but key lookup should be approaching `O(log n)` in most cases.
//! + Tree traversal is always breadth-first and happens over a single
//! continuous block of memory, which makes it cache friendly.
//! + Iterating over all entries is always `O(n)`, same as `Vec<(K, V)>`.
//! + Removing a value uses a sentinel and is `~O(log n)`.
//! + There are no buckets, so there is no need to re-bucket things when growing
//! the map.
//!
//! ## When should you use this?
//!
//! + You need to preserve insertion order of the map.
//! + Iterating over the map is very performance sensitive.
//! + Your average map has fewer than 100 entries.
//! + You have no a priori knowledge about the final size of the map when you
//! start creating it.
#![warn(missing_docs)]
#![cfg_attr(not(test), no_std)]
extern crate alloc;
use core::borrow::Borrow;
use core::cell::Cell;
use core::hash::{Hash, Hasher};
use core::iter::FromIterator;
use core::marker::PhantomData;
use core::num::NonZeroU32;
use core::ops::Index;
use core::{fmt, slice};
pub mod compact;
mod entry;
mod raw_entry;
use ahash::AHasher;
pub use compact::Vec;
pub use entry::*;
pub use raw_entry::*;
// use alloc::vec::Vec;
/// Iterator over the keys
pub struct Keys<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
#[inline]
fn next(&mut self) -> Option<&'a K> {
self.inner.next().map(|(k, _)| k)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
//#[derive(Clone)]
/// Iterator over the values
pub struct Values<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline]
fn next(&mut self) -> Option<&'a V> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[derive(Clone)]
struct Node<K, V> {
// Key
pub key: K,
// Hash of the key
pub hash: u64,
// Value stored. We'll use `None` as a sentinel value for removed
// entries.
pub value: Option<V>,
// Store vector index pointing to the `Node` for which `hash` is smaller
// than that of this `Node`.
pub left: Cell<Option<NonZeroU32>>,
// Same as above but for `Node`s with hash larger than this one. If the
// hash is the same, but keys are different, the lookup will default
// to the right branch as well.
pub right: Cell<Option<NonZeroU32>>,
}
impl<K, V> fmt::Debug for Node<K, V>
where
K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(
&(&self.key, &self.value, self.left.get(), self.right.get()),
f,
)
}
}
impl<K, V> PartialEq for Node<K, V>
where
K: PartialEq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash && self.key == other.key && self.value == other.value
}
}
impl<K, V> Node<K, V> {
#[inline]
const fn new(key: K, value: V, hash: u64) -> Self {
Node {
key,
hash,
value: Some(value),
left: Cell::new(None),
right: Cell::new(None),
}
}
}
// `Cell` isn't `Sync`, but all of our writes are contained and require
// `&mut` access, ergo this is safe.
unsafe impl<K: Sync, V: Sync> Sync for Node<K, V> {}
/// A binary tree implementation of a string -> `JsonValue` map. You normally don't
/// have to interact with instances of `Object`, much more likely you will be
/// using the `JsonValue::Object` variant, which wraps around this struct.
#[derive(Debug, Clone)]
pub struct Map<K, V, H = AHasher> {
store: Vec<Node<K, V>>,
hasher: PhantomData<H>,
}
enum FindResult<'find> {
Hit(usize),
Miss(Option<&'find Cell<Option<NonZeroU32>>>),
}
use FindResult::*;
impl<K, V> Map<K, V, AHasher> {
/// Create a new `Map`.
#[inline]
pub fn new() -> Self {
Map::<K, V, AHasher>::default()
}
/// Create a `Map` with a given capacity
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Map {
store: Vec::with_capacity(capacity),
hasher: PhantomData,
}
}
}
impl<K, V, H> Default for Map<K, V, H> {
/// Create a new `Map` with a custom hasher.
#[inline]
fn default() -> Self {
Map {
store: Vec::new(),
hasher: PhantomData,
}
}
}
impl<K, V, H> Map<K, V, H>
where
K: Hash + Eq,
H: Hasher + Default,
{
/// An iterator visiting all keys in arbitrary order.
/// The iterator element type is `&'a K`.
pub fn keys(&self) -> Keys<'_, K, V> {
Keys { inner: self.iter() }
}
/// An iterator visiting all values in arbitrary order.
/// The iterator element type is `&'a V`.
pub fn values(&self) -> Values<'_, K, V> {
Values { inner: self.iter() }
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.replace(value) },
Miss(parent) => {
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, value, hash));
None
}
}
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked(idx) };
node.value.as_ref()
}
Miss(_) => None,
}
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but Hash and Eq
/// on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.as_mut() },
Miss(_) => None,
}
}
/// Returns `true` if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked(idx).value.is_some() },
Miss(_) => false,
}
}
/// Get a mutable reference to entry at key. Inserts a new entry by
/// calling `F` if absent.
// TODO: Replace with entry API
pub fn get_or_insert<F>(&mut self, key: K, fill: F) -> &mut V
where
F: FnOnce() -> V,
{
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked_mut(idx) };
if node.value.is_none() {
node.value = Some(fill());
}
node.value.as_mut().unwrap()
}
Miss(parent) => {
let idx = self.store.len();
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, fill(), hash));
self.store[idx].value.as_mut().unwrap()
}
}
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.take() },
Miss(_) => return None,
}
}
/// Returns the number of elements in the map.
#[inline]
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` if the map contains no elements.
#[inline]
pub fn is_empty(&self) -> bool {
self.store.is_empty()
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory for reuse.
#[inline]
pub fn clear(&mut self) {
self.store.clear();
}
#[inline]
fn find(&self, hash: u64) -> FindResult {
if self.len() == 0 {
return Miss(None);
}
let mut idx = 0;
loop {
let node = unsafe { self.store.get_unchecked(idx) };
if hash < node.hash {
match node.left.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.left)),
}
} else if hash > node.hash {
match node.right.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.right)),
}
} else {
return Hit(idx);
}
}
}
#[inline]
fn hash_key<Q: Hash>(key: Q) -> u64 {
// let mut hasher = fnv::FnvHasher::default();
// let mut hasher = rustc_hash::FxHasher::default();
let mut hasher = H::default();
key.hash(&mut hasher);
hasher.finish()
}
/// An iterator visiting all key-value pairs in insertion order.
/// The iterator element type is `(&K, &V)`.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &1),
/// (&"b", &2),
/// (&"c", &3),
/// ],
/// );
/// ```
#[inline]
pub fn iter(&self) -> Iter<K, V> {
Iter {
inner: self.store.iter(),
}
}
/// An iterator visiting all key-value pairs in insertion order, with
/// mutable references to the values. The iterator element type is
/// (&K, &mut V).
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// // Check if values are doubled
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &2),
/// (&"b", &4),
/// (&"c", &6),
/// ],
/// );
/// ```
#[inline]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut {
inner: self.store.iter_mut(),
}
}
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched. After this, insertions into a vacant entry
/// still require an owned key to be provided.
///
/// Raw entries are useful for such exotic situations as:
///
/// * Hash memoization
/// * Deferring the creation of an owned key until it is known to be required
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Because raw entries provide much more low-level control, it's much easier
/// to put the HashMap into an inconsistent state which, while memory-safe,
/// will cause the map to produce seemingly random results. Higher-level and
/// more foolproof APIs like `entry` should be preferred when possible.
///
/// In particular, the hash used to initialized the raw entry must still be
/// consistent with the hash of the key that is ultimately stored in the entry.
/// This is because implementations of HashMap may need to recompute hashes
/// when resizing, at which point only the keys are available.
///
/// Raw entries give mutable access to the keys. This must not be used
/// to modify how the key would compare or hash, as the map will not re-evaluate
/// where the key should go, meaning the keys may become "lost" if their
/// location does not reflect their state. For instance, if you change a key
/// so that the map now contains keys which compare equal, search may start
/// acting erratically, with two keys randomly masking each other. Implementations
/// are free to assume this doesn't happen (within the limits of memory-safety).
#[inline]
pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, H> {
RawEntryBuilderMut { map: self }
}
/// Creates a raw immutable entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched.
///
/// This is useful for
/// * Hash memoization
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Unless you are in such a situation, higher-level and more foolproof APIs like
/// `get` should be preferred.
///
/// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
#[inline]
pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, H> {
RawEntryBuilder { map: self }
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut letters = Map::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
pub fn entry(&mut self, key: K) -> Entry<K, V, H>
where
K: Eq + Clone,
{
for (idx, n) in self.store.iter().enumerate() {
if &key == &n.key {
return Entry::Occupied(OccupiedEntry::new(idx, key, self));
}
}
Entry::Vacant(VacantEntry::new(key, self))
}
}
impl<K, V> IntoIterator for Map<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
#[inline]
fn into_iter(self) -> IntoIter<K, V> {
IntoIter(self)
}
}
/// Consuming iterator
pub struct IntoIter<K, V>(Map<K, V>);
impl<K, V> IntoIter<K, V> {
/// The length of this iterator
pub fn len(&self) -> usize {
self.0.store.len()
}
/// If this iteratoris empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(n) = self.0.store.pop() {
if let Some(v) = n.value {
return Some((n.key, v));
}
} else {
return None;
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.0.store.len();
(l, Some(l))
}
}
impl<K, Q: ?Sized, V> Index<&Q> for Map<K, V>
where
K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the HashMap.
fn index(&self, key: &Q) -> &V {
self.get(key).expect("Key not found in Map")
}
}
impl<'json, IK, IV, K, V> FromIterator<(IK, IV)> for Map<K, V>
where
IK: Into<K>,
IV: Into<V>,
K: Hash + Eq,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (IK, IV)>,
{
let iter = iter.into_iter();
let mut map = Map::with_capacity(iter.size_hint().0);
for (key, value) in iter {
map.insert(key.into(), value.into());
}
map
}
}
// Because keys can inserted in different order, the safe way to
// compare `Map`s is to iterate over one and check if the other
// has all the same keys.
impl<K, V> PartialEq for Map<K, V>
where
K: Hash + Eq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
if self.len() != other.len() {
return false;
}
// Faster than .get() since we can avoid hashing
for &Node {
ref value, hash, ..
} in self.store.iter()
{
if let Hit(idx) = other.find(hash) {
if &other.store[idx].value == value {
continue;
}
}
return false;
}
true
}
}
/// An iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter`](./struct.Map.html#method.iter)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct Iter<'a, K, V> {
inner: slice::Iter<'a, Node<K, V>>,
}
/// A mutable iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter_mut`](./struct.Map.html#method.iter_mut)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct IterMut<'a, K, V> {
inner: slice::IterMut<'a, Node<K, V>>,
}
impl<K, V> Iter<'_, K, V> {
/// Create an empty iterator that always returns `None`
pub fn empty() -> Self {
Iter { inner: [].iter() }
}
}
impl<'i, K, V> Iterator for Iter<'i, K, V> {
type Item = (&'i K, &'i V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next() {
let value = match node.value {
Some(ref value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> DoubleEndedIterator for Iter<'_, K, V> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next_back() {
let value = match node.value {
Some(ref value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
impl<K, V> IterMut<'_, K, V> {
/// Create an empty iterator that always returns `None`
pub fn empty() -> Self |
}
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next() {
let value = match node.value {
Some(ref mut value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> DoubleEndedIterator for IterMut<'_, K, V> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next_back() {
let value = match node.value {
Some(ref mut value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
#[cfg(test)]
mod tests {
use super::Map;
#[test]
fn empty() {
let map: Map<&str, u64> = Map::new();
assert_eq!(map.get("foo"), None);
assert_eq!(map.len(), 0);
assert_eq!(map.is_empty(), true);
}
#[test]
fn simple() {
let mut map: Map<&str, u64> = Map::new();
map.insert("foo", 42);
assert_eq!(map.get("foo"), Some(&42));
assert_eq!(map.len(), 1);
assert_eq!(map.is_empty(), false);
}
}
| {
IterMut {
inner: [].iter_mut(),
}
} | identifier_body |
lib.rs | //! # Ordnung
//!
//! Fast, vector-based map implementation that preserves insertion order.
//!
//! + Map is implemented as a binary tree over a `Vec` for storage, with only
//! two extra words per entry for book-keeping on 64-bit architectures.
//! + A fast hash function with good random distribution is used to balance the
//! tree. Ordnung makes no guarantees that the tree will be perfectly
//! balanced, but key lookup should be approaching `O(log n)` in most cases.
//! + Tree traversal is always breadth-first and happens over a single
//! continuous block of memory, which makes it cache friendly.
//! + Iterating over all entries is always `O(n)`, same as `Vec<(K, V)>`.
//! + Removing a value uses a sentinel and is `~O(log n)`.
//! + There are no buckets, so there is no need to re-bucket things when growing
//! the map.
//!
//! ## When should you use this?
//!
//! + You need to preserve insertion order of the map.
//! + Iterating over the map is very performance sensitive.
//! + Your average map has fewer than 100 entries.
//! + You have no a priori knowledge about the final size of the map when you
//! start creating it.
#![warn(missing_docs)]
#![cfg_attr(not(test), no_std)]
extern crate alloc;
use core::borrow::Borrow;
use core::cell::Cell;
use core::hash::{Hash, Hasher};
use core::iter::FromIterator;
use core::marker::PhantomData;
use core::num::NonZeroU32;
use core::ops::Index;
use core::{fmt, slice};
pub mod compact;
mod entry;
mod raw_entry;
use ahash::AHasher;
pub use compact::Vec;
pub use entry::*;
pub use raw_entry::*;
// use alloc::vec::Vec;
/// Iterator over the keys
pub struct Keys<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
#[inline]
fn next(&mut self) -> Option<&'a K> {
self.inner.next().map(|(k, _)| k)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
//#[derive(Clone)]
/// Iterator over the values
pub struct Values<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline]
fn next(&mut self) -> Option<&'a V> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[derive(Clone)]
struct Node<K, V> {
// Key
pub key: K,
// Hash of the key
pub hash: u64,
// Value stored. We'll use `None` as a sentinel value for removed
// entries.
pub value: Option<V>,
// Store vector index pointing to the `Node` for which `hash` is smaller
// than that of this `Node`.
pub left: Cell<Option<NonZeroU32>>,
// Same as above but for `Node`s with hash larger than this one. If the
// hash is the same, but keys are different, the lookup will default
// to the right branch as well.
pub right: Cell<Option<NonZeroU32>>,
}
impl<K, V> fmt::Debug for Node<K, V>
where
K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(
&(&self.key, &self.value, self.left.get(), self.right.get()),
f,
)
}
}
impl<K, V> PartialEq for Node<K, V>
where
K: PartialEq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash && self.key == other.key && self.value == other.value
}
}
impl<K, V> Node<K, V> {
#[inline]
const fn new(key: K, value: V, hash: u64) -> Self {
Node {
key,
hash,
value: Some(value),
left: Cell::new(None),
right: Cell::new(None),
}
}
}
// `Cell` isn't `Sync`, but all of our writes are contained and require
// `&mut` access, ergo this is safe.
unsafe impl<K: Sync, V: Sync> Sync for Node<K, V> {}
/// A binary tree implementation of a string -> `JsonValue` map. You normally don't
/// have to interact with instances of `Object`, much more likely you will be
/// using the `JsonValue::Object` variant, which wraps around this struct.
#[derive(Debug, Clone)]
pub struct Map<K, V, H = AHasher> {
store: Vec<Node<K, V>>,
hasher: PhantomData<H>,
}
enum FindResult<'find> {
Hit(usize),
Miss(Option<&'find Cell<Option<NonZeroU32>>>),
}
use FindResult::*;
impl<K, V> Map<K, V, AHasher> {
/// Create a new `Map`.
#[inline]
pub fn new() -> Self {
Map::<K, V, AHasher>::default()
}
/// Create a `Map` with a given capacity
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Map {
store: Vec::with_capacity(capacity),
hasher: PhantomData,
}
}
}
impl<K, V, H> Default for Map<K, V, H> {
/// Create a new `Map` with a custom hasher.
#[inline]
fn default() -> Self {
Map {
store: Vec::new(),
hasher: PhantomData,
}
}
}
impl<K, V, H> Map<K, V, H>
where
K: Hash + Eq,
H: Hasher + Default,
{
/// An iterator visiting all keys in arbitrary order.
/// The iterator element type is `&'a K`.
pub fn keys(&self) -> Keys<'_, K, V> {
Keys { inner: self.iter() }
}
/// An iterator visiting all values in arbitrary order.
/// The iterator element type is `&'a V`.
pub fn values(&self) -> Values<'_, K, V> {
Values { inner: self.iter() }
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.replace(value) },
Miss(parent) => {
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, value, hash));
None
}
}
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked(idx) };
node.value.as_ref()
}
Miss(_) => None,
}
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but Hash and Eq
/// on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.as_mut() },
Miss(_) => None,
}
}
/// Returns `true` if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked(idx).value.is_some() },
Miss(_) => false,
}
}
/// Get a mutable reference to entry at key. Inserts a new entry by
/// calling `F` if absent.
// TODO: Replace with entry API
pub fn get_or_insert<F>(&mut self, key: K, fill: F) -> &mut V
where
F: FnOnce() -> V,
{
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked_mut(idx) };
if node.value.is_none() {
node.value = Some(fill());
}
node.value.as_mut().unwrap()
}
Miss(parent) => {
let idx = self.store.len();
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, fill(), hash));
self.store[idx].value.as_mut().unwrap()
}
}
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.take() },
Miss(_) => return None,
}
}
/// Returns the number of elements in the map.
#[inline]
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` if the map contains no elements.
#[inline]
pub fn is_empty(&self) -> bool {
self.store.is_empty()
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory for reuse.
#[inline]
pub fn clear(&mut self) {
self.store.clear();
}
#[inline]
fn find(&self, hash: u64) -> FindResult {
if self.len() == 0 {
return Miss(None);
}
let mut idx = 0;
loop {
let node = unsafe { self.store.get_unchecked(idx) };
if hash < node.hash {
match node.left.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.left)),
}
} else if hash > node.hash {
match node.right.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.right)),
}
} else {
return Hit(idx);
}
}
}
#[inline]
fn hash_key<Q: Hash>(key: Q) -> u64 {
// let mut hasher = fnv::FnvHasher::default();
// let mut hasher = rustc_hash::FxHasher::default();
let mut hasher = H::default();
key.hash(&mut hasher);
hasher.finish()
}
/// An iterator visiting all key-value pairs in insertion order.
/// The iterator element type is `(&K, &V)`.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &1),
/// (&"b", &2),
/// (&"c", &3),
/// ],
/// );
/// ```
#[inline]
pub fn iter(&self) -> Iter<K, V> {
Iter {
inner: self.store.iter(),
}
}
/// An iterator visiting all key-value pairs in insertion order, with
/// mutable references to the values. The iterator element type is
/// (&K, &mut V).
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// // Check if values are doubled
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &2),
/// (&"b", &4),
/// (&"c", &6),
/// ],
/// );
/// ```
#[inline]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut {
inner: self.store.iter_mut(),
}
}
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched. After this, insertions into a vacant entry
/// still require an owned key to be provided.
///
/// Raw entries are useful for such exotic situations as:
///
/// * Hash memoization
/// * Deferring the creation of an owned key until it is known to be required
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Because raw entries provide much more low-level control, it's much easier
/// to put the HashMap into an inconsistent state which, while memory-safe,
/// will cause the map to produce seemingly random results. Higher-level and
/// more foolproof APIs like `entry` should be preferred when possible.
///
/// In particular, the hash used to initialized the raw entry must still be
/// consistent with the hash of the key that is ultimately stored in the entry.
/// This is because implementations of HashMap may need to recompute hashes
/// when resizing, at which point only the keys are available.
///
/// Raw entries give mutable access to the keys. This must not be used
/// to modify how the key would compare or hash, as the map will not re-evaluate
/// where the key should go, meaning the keys may become "lost" if their
/// location does not reflect their state. For instance, if you change a key
/// so that the map now contains keys which compare equal, search may start
/// acting erratically, with two keys randomly masking each other. Implementations
/// are free to assume this doesn't happen (within the limits of memory-safety).
#[inline]
pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, H> {
RawEntryBuilderMut { map: self }
}
/// Creates a raw immutable entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched.
///
/// This is useful for
/// * Hash memoization
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Unless you are in such a situation, higher-level and more foolproof APIs like
/// `get` should be preferred.
///
/// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
#[inline]
pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, H> {
RawEntryBuilder { map: self }
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut letters = Map::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
pub fn entry(&mut self, key: K) -> Entry<K, V, H>
where
K: Eq + Clone,
{
for (idx, n) in self.store.iter().enumerate() {
if &key == &n.key {
return Entry::Occupied(OccupiedEntry::new(idx, key, self));
}
}
Entry::Vacant(VacantEntry::new(key, self))
}
}
impl<K, V> IntoIterator for Map<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
#[inline]
fn into_iter(self) -> IntoIter<K, V> {
IntoIter(self)
}
}
/// Consuming iterator
pub struct IntoIter<K, V>(Map<K, V>);
impl<K, V> IntoIter<K, V> {
/// The length of this iterator
pub fn len(&self) -> usize {
self.0.store.len()
}
/// If this iteratoris empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(n) = self.0.store.pop() {
if let Some(v) = n.value {
return Some((n.key, v));
}
} else {
return None;
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.0.store.len();
(l, Some(l))
}
}
impl<K, Q: ?Sized, V> Index<&Q> for Map<K, V>
where
K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the HashMap.
fn index(&self, key: &Q) -> &V {
self.get(key).expect("Key not found in Map")
}
}
impl<'json, IK, IV, K, V> FromIterator<(IK, IV)> for Map<K, V>
where
IK: Into<K>,
IV: Into<V>,
K: Hash + Eq,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (IK, IV)>,
{
let iter = iter.into_iter();
let mut map = Map::with_capacity(iter.size_hint().0);
for (key, value) in iter {
map.insert(key.into(), value.into());
}
map
}
}
// Because keys can inserted in different order, the safe way to
// compare `Map`s is to iterate over one and check if the other
// has all the same keys.
impl<K, V> PartialEq for Map<K, V>
where
K: Hash + Eq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
if self.len() != other.len() {
return false;
}
// Faster than .get() since we can avoid hashing
for &Node {
ref value, hash, ..
} in self.store.iter()
{
if let Hit(idx) = other.find(hash) {
if &other.store[idx].value == value |
}
return false;
}
true
}
}
/// An iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter`](./struct.Map.html#method.iter)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct Iter<'a, K, V> {
inner: slice::Iter<'a, Node<K, V>>,
}
/// A mutable iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter_mut`](./struct.Map.html#method.iter_mut)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct IterMut<'a, K, V> {
inner: slice::IterMut<'a, Node<K, V>>,
}
impl<K, V> Iter<'_, K, V> {
/// Create an empty iterator that always returns `None`
pub fn empty() -> Self {
Iter { inner: [].iter() }
}
}
impl<'i, K, V> Iterator for Iter<'i, K, V> {
type Item = (&'i K, &'i V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next() {
let value = match node.value {
Some(ref value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> DoubleEndedIterator for Iter<'_, K, V> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next_back() {
let value = match node.value {
Some(ref value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
impl<K, V> IterMut<'_, K, V> {
/// Create an empty iterator that always returns `None`
pub fn empty() -> Self {
IterMut {
inner: [].iter_mut(),
}
}
}
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next() {
let value = match node.value {
Some(ref mut value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> DoubleEndedIterator for IterMut<'_, K, V> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next_back() {
let value = match node.value {
Some(ref mut value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
#[cfg(test)]
mod tests {
use super::Map;
#[test]
fn empty() {
let map: Map<&str, u64> = Map::new();
assert_eq!(map.get("foo"), None);
assert_eq!(map.len(), 0);
assert_eq!(map.is_empty(), true);
}
#[test]
fn simple() {
let mut map: Map<&str, u64> = Map::new();
map.insert("foo", 42);
assert_eq!(map.get("foo"), Some(&42));
assert_eq!(map.len(), 1);
assert_eq!(map.is_empty(), false);
}
}
| {
continue;
} | conditional_block |
main_api_sentence.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020/12/23 4:56 下午
# @File : api.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc :
######################################################
# 使用没有蒸馏的模型预测,改造成一个flask api, 句子级情感预测
# 包括训练接口api和预测接口api
# /api/train
# /api/predict
######################################################
import logging
import logging.config
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os, random, time
import re
import glob
import numpy as np
import torch
from pytorch_pretrained_bert.my_modeling import BertConfig
from pytorch_pretrained_bert import BertTokenizer
from modeling import BertSPCSimple, BertForGLUESimpleAdaptorTraining
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from utils import divide_parameters
from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer
from optimization import BERTAdam
from functools import partial
from tqdm import tqdm
from utils_glue import InputExample, convert_examples_to_features
import argparse
import scipy
from flask import Flask, request, jsonify, abort
app = Flask(__name__) | :param max_seq_length:
:param tokenizer: 初始化后的tokenizer
:param label_list:
:return:
"""
examples = []
for guid, content in enumerate(contents):
examples.append(
InputExample(guid=guid, text_a=content))
features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer,
output_mode="classification",
cls_token_segment_id=0, pad_token_segment_id=0,reverse_truncate=reverse_truncate)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
class TorchAsBertModel(object):
def __init__(self, verbose=0):
self.verbose = verbose
self.label_list = ["消极","中性","积极"]
self.num_labels = len(self.label_list)
# 判断使用的设备
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0
# 预测的batch_size大小
self.train_batch_size = 8
# 预测的batch_size大小
self.predict_batch_size = 64
self.max_seq_length = 128
# self.load_predict_model()
self.load_macbert_model()
# self.load_train_model()
self.reverse_truncate = True
def load_train_model(self):
"""
初始化训练的模型
:return:
"""
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.learning_rate = 2e-05
#学习率 warmup的比例
self.warmup_proportion = 0.1
self.num_train_epochs = 1
#使用的学习率scheduler
self.schedule = 'slanted_triangular'
self.s_opt1 = 30.0
self.s_opt2 = 0.0
self.s_opt3 = 1.0
self.weight_decay_rate = 0.01
#训练多少epcoh保存一次模型
self.ckpt_frequency = 1
#模型和日志保存的位置
self.output_dir = "output_root_dir/train_api"
#梯度累积步数
self.gradient_accumulation_steps = 1
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "mac_bert_model/vocab.txt"
self.bert_config_file_S = "mac_bert_model/config.json"
self.tuned_checkpoint_S = "mac_bert_model/pytorch_model.bin"
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
state_weight = {k[5:]: v for k, v in state_dict_S.items() if k.startswith('bert.')}
missing_keys, _ = model_S.bert.load_state_dict(state_weight, strict=False)
#验证下参数没有丢失
assert len(missing_keys) == 0
self.train_tokenizer = tokenizer
self.train_model = model_S
logger.info(f"训练模型{self.tuned_checkpoint_S}加载完成")
def load_predict_model(self, model_file="trained_teacher_model/gs3024.pkl"):
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "bert_model/vocab.txt"
# 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型
# student config: config/chinese_bert_config_L4t.json
# distil student model: distil_model/gs8316.pkl
self.bert_config_file_S = "bert_model/config.json"
self.tuned_checkpoint_S = model_file
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
model_S.load_state_dict(state_dict_S)
if self.verbose:
print("模型已加载")
self.predict_tokenizer = tokenizer
self.predict_model = model_S
logger.info(f"预测模型{model_file}加载完成")
def load_macbert_model(self):
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "mac_bert_model/vocab.txt"
# 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型
# student config: config/chinese_bert_config_L4t.json
# distil student model: distil_model/gs8316.pkl
self.bert_config_file_S = "mac_bert_model/config.json"
self.tuned_checkpoint_S = "trained_teacher_model/macbert_2290_cosmetics_weibo.pkl"
# self.tuned_checkpoint_S = "trained_teacher_model/macbert_894_cosmetics.pkl"
# self.tuned_checkpoint_S = "trained_teacher_model/macbert_teacher_max75len_5000.pkl"
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
model_S.load_state_dict(state_dict_S)
if self.verbose:
print("模型已加载")
self.predict_tokenizer = tokenizer
self.predict_model = model_S
logger.info(f"macbert预测模型加载完成")
def compute_metrics(self, predid, labelid):
"""
计算准确度
:return:
"""
return (predid == labelid).mean()
def predict_batch(self, data, model_file=None, print_acc=False):
"""
batch_size数据处理
:param data: 是一个要处理的数据列表[(content,aspect),...,]
:param truncated: 是否要截断数据
:param model_file: 模型文件
:param print_acc: 如果要打印准确率,需要data的第三个位置时label
:return:, 返回格式是 [(predicted_label, predict_score),...]
"""
#如果为None,就不改变加载的模型,否则就改变加载的模型
if model_file:
self.load_predict_model(model_file=model_file)
eval_dataset = load_examples(data, self.max_seq_length, self.predict_tokenizer, self.label_list, self.reverse_truncate)
if self.verbose:
print("评估数据集已加载")
predictids, probability = self.do_predict(model=self.predict_model, eval_dataset=eval_dataset)
if self.verbose:
print(f"预测的结果是: {predictids}, {[self.label_list[id] for id in predictids]}")
#把id变成标签
predict_labels = [self.label_list[r] for r in predictids]
results = list(zip(predict_labels, probability, data))
if print_acc:
label_ids = [self.label_list.index(d[2]) for d in data]
accuracy = self.compute_metrics(np.array(predictids), np.array(label_ids))
return results, accuracy
return results
def do_predict(self, model, eval_dataset, step=0):
"""
:param eval_dataset:
:param model 参数必须携带,因为训练的callback会调用评估模型时会传入model
:return: 2个list,一个是预测的id列表,一个是预测的probability列表
"""
# 任务名字
if self.verbose:
print("***** 开始预测 *****")
print(" 样本数 = %d", len(eval_dataset))
print(" Step数 = %d", step)
print(" Batch size = %d", self.predict_batch_size)
# 评估样本
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=self.predict_batch_size)
model.eval()
model.to(self.device)
# 起始时间
start_time = time.time()
# 存储预测值
pred_logits = []
for batch in tqdm(eval_dataloader, desc="评估中", disable=True):
input_ids, input_mask, segment_ids, _ = batch
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids)
cpu_logits = logits.detach().cpu()
for i in range(len(cpu_logits)):
pred_logits.append(cpu_logits[i].numpy())
pred_logits = np.array(pred_logits)
# 找到最大的概率label
preds = np.argmax(pred_logits, axis=1)
if self.verbose:
print(f"preds: {preds}")
predictids = preds.tolist()
#获取最大概率的可能性,即分数
pred_logits_softmax = scipy.special.softmax(pred_logits, axis=1)
probability = np.max(pred_logits_softmax, axis=1)
probability = probability.tolist()
cost_time = time.time() - start_time
if self.verbose:
print(
f"--- 评估{len(eval_dataset)}条数据的总耗时是 {cost_time} seconds, 每条耗时 {cost_time / len(eval_dataset)} seconds ---")
return predictids, probability
def do_train(self, data, truncated=False):
"""
训练模型, 数据集分成2部分,训练集和验证集, 默认比例9:1
:param data: 输入的数据,注意如果做truncated,那么输入的数据为 [(content,aspect,start_idx, end_idx, label),...,]
:param truncated: 是否要截断,截断按照 self.left_max_seq_len, self.right_max_seq_len进行
:return:
"""
if truncated:
data, locations = self.do_truncate_data(data)
train_data_len = int(len(data) * 0.9)
train_data = data[:train_data_len]
eval_data = data[train_data_len:]
train_dataset = load_examples(train_data, self.max_seq_length, self.train_tokenizer, self.label_list, self.reverse_truncate)
eval_dataset = load_examples(eval_data, self.max_seq_length, self.train_tokenizer, self.label_list, self.reverse_truncate)
logger.info("训练数据集已加载,开始训练")
num_train_steps = int(len(train_dataset) / self.train_batch_size) * self.num_train_epochs
forward_batch_size = int(self.train_batch_size / self.gradient_accumulation_steps)
# 开始训练
params = list(self.train_model.named_parameters())
all_trainable_params = divide_parameters(params, lr=self.learning_rate, weight_decay_rate=self.weight_decay_rate)
# 优化器设置
optimizer = BERTAdam(all_trainable_params, lr=self.learning_rate,
warmup=self.warmup_proportion, t_total=num_train_steps, schedule=self.schedule,
s_opt1=self.s_opt1, s_opt2=self.s_opt2, s_opt3=self.s_opt3)
logger.info("***** 开始训练 *****")
logger.info(" 训练样本数是 = %d", len(train_dataset))
logger.info(" 评估样本数是 = %d", len(eval_dataset))
logger.info(" 前向 batch size = %d", forward_batch_size)
logger.info(" 训练的steps = %d", num_train_steps)
########### 训练的配置 ###########
train_config = TrainingConfig(
gradient_accumulation_steps = self.gradient_accumulation_steps,
ckpt_frequency = self.ckpt_frequency,
log_dir = self.output_dir,
output_dir = self.output_dir,
device = self.device)
#初始化trainer,执行监督训练,而不是蒸馏。它可以把model_S模型训练成为teacher模型
distiller = BasicTrainer(train_config = train_config,
model = self.train_model,
adaptor = BertForGLUESimpleAdaptorTraining)
train_sampler = RandomSampler(train_dataset)
#训练的dataloader
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=forward_batch_size, drop_last=True)
#执行callbakc函数,对eval数据集
callback_func = partial(self.do_predict, eval_dataset=eval_dataset)
with distiller:
#开始训练
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=self.num_train_epochs, callback=callback_func)
logger.info(f"训练完成")
return "Done"
@app.route("/api/predict", methods=['POST'])
def predict():
"""
接收POST请求,获取data参数
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
results = model.predict_batch(test_data)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_truncate", methods=['POST'])
def predict_truncate():
"""
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
results = model.predict_batch(test_data)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_truncate_model", methods=['POST'])
def predict_truncate_model():
"""
使用self.output_dir 下的最新的模型的文件
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
list_of_files = glob.glob(os.path.join(model.output_dir, "*.pkl"))
latest_model_file = max(list_of_files, key=os.path.getctime)
results = model.predict_batch(test_data, model_file=latest_model_file)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_macbert", methods=['POST'])
def predict_macbert():
"""
加载macbert模型,返回预测结果
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score, data),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
model.load_macbert_model()
results, accuracy = model.predict_batch(test_data, print_acc=True)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
logger.info(f"模型准确率为:{accuracy}")
return jsonify(results)
@app.route("/api/train", methods=['POST'])
def train():
"""
接收data参数,
Args:
data: 训练的数据,是一个文字列表, [(content,aspect,label),...,]
Returns:
"""
jsonres = request.get_json()
data = jsonres.get('data', None)
logger.info(f"收到的数据是:{data}, 进行训练")
# model = TorchAsBertModel()
results = model.do_train(data)
return jsonify(results)
@app.route("/api/train_truncate", methods=['POST'])
def train_truncate():
"""
接收data参数,data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
data: 训练的数据,是一个文字列表, [(content,aspect,start_idx, end_idx, label),...,]
Returns:
"""
jsonres = request.get_json()
data = jsonres.get('data', None)
logger.info(f"收到的数据是:{data}, 进行训练")
# model = TorchAsBertModel()
results = model.do_train(data, truncated=True)
return jsonify(results)
if __name__ == "__main__":
model = TorchAsBertModel()
app.run(host='0.0.0.0', port=5016, debug=False, threaded=True) |
def load_examples(contents, max_seq_length, tokenizer, label_list, reverse_truncate=False):
"""
:param contents: eg: [('苹果很好用', '苹果')] 或者 [('苹果很好用', '苹果', '积极')] | random_line_split |
main_api_sentence.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020/12/23 4:56 下午
# @File : api.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc :
######################################################
# 使用没有蒸馏的模型预测,改造成一个flask api, 句子级情感预测
# 包括训练接口api和预测接口api
# /api/train
# /api/predict
######################################################
import logging
import logging.config
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os, random, time
import re
import glob
import numpy as np
import torch
from pytorch_pretrained_bert.my_modeling import BertConfig
from pytorch_pretrained_bert import BertTokenizer
from modeling import BertSPCSimple, BertForGLUESimpleAdaptorTraining
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from utils import divide_parameters
from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer
from optimization import BERTAdam
from functools import partial
from tqdm import tqdm
from utils_glue import InputExample, convert_examples_to_features
import argparse
import scipy
from flask import Flask, request, jsonify, abort
app = Flask(__name__)
def load_examples(contents, max_seq_length, tokenizer, label_list, reverse_truncate=False):
"""
:param contents: eg: [('苹果很好用', '苹果')] 或者 [('苹果很好用', '苹果', '积极')]
:param max_seq_length:
:param tokenizer: 初始化后的tokenizer
:param label_list:
:return:
"""
examples = []
for guid, content in enumerate(contents):
examples.append(
InputExample(guid=guid, text_a=content))
features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer,
output_mode="classification",
cls_token_segment_id=0, pad_token_segment_id=0,reverse_truncate=reverse_truncate)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
class TorchAsBertModel(object):
def __init__(self, verbose=0):
self.verbose = verbose
self.label_list = ["消极","中性","积极"]
self.num_labels = len(self.label_list)
# 判断使用的设备
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0
# 预测的batch_size大小
self.train_batch_size = 8
# 预测的batch_size大小
self.predict_batch_size = 64
self.max_seq_length = 128
# self.load_predict_model()
self.load_macbert_model()
# self.load_train_model()
self.reverse_truncate = True
def load_train_model(self):
"""
初始化训练的模型
:return:
"""
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.learning_rate = 2e-05
#学习率 warmup的比例
self.warmup_proportion = 0.1
self.num_train_epochs = 1
#使用的学习率scheduler
self.schedule = 'slanted_triangular'
self.s_opt1 = 30.0
self.s_opt2 = 0.0
self.s_opt3 = 1.0
self.weight_decay_rate = 0.01
#训练多少epcoh保存一次模型
self.ckpt_frequency = 1
#模型和日志保存的位置
self.output_dir = "output_root_dir/train_api"
#梯度累积步数
self.gradient_accumulation_steps = 1
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "mac_bert_model/vocab.txt"
self.bert_config_file_S = "mac_bert_model/config.json"
self.tuned_checkpoint_S = "mac_bert_model/pytorch_model.bin"
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
state_weight = {k[5:]: v for k, v in state_dict_S.items() if k.startswith('bert.')}
missing_keys, _ = model_S.bert.load_state_dict(state_weight, strict=False)
#验证下参数没有丢失
assert len(missing_keys) == 0
self.train_tokenizer = tokenizer
self.train_model = model_S
logger.info(f"训练模型{self.tuned_checkpoint_S}加载完成")
def load_predict_model(self, model_file="trained_teacher_model/gs3024.pkl"):
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "bert_model/vocab.txt"
# 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型
# student config: config/chinese_bert_config_L4t.json
# distil student model: distil_model/gs8316.pkl
self.bert_config_file_S = "bert_model/config.json"
self.tuned_checkpoint_S = model_file
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
model_S.load_state_dict(state_dict_S)
if self.verbose:
print("模型已加载")
self.predict_tokenizer = tokenizer
self.predict_model = model_S
logger.info(f"预测模型{model_file}加载完成")
def load_macbert_model(self):
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "mac_bert_model/vocab.txt"
# 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型
# student config: config/chinese_bert_config_L4t.json
# distil student model: distil_model/gs8316.pkl
self.bert_config_file_S = "mac_bert_model/config.json"
self.tuned_checkpoint_S = "trained_teacher_model/macbert_2290_cosmetics_weibo.pkl"
# self.tuned_checkpoint_S = "trained_teacher_model/macbert_894_cosmetics.pkl"
# self.tuned_checkpoint_S = "trained_teacher_model/macbert_teacher_max75len_5000.pkl"
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
model_S.load_state_dict(state_dict_S)
if self.verbose:
print("模型已加载")
self.predict_tokenizer = tokenizer
self.predict_model = model_S
logger.info(f"macbert预测模型加载完成")
def compute_metrics(self, predid, labelid):
"""
计算准确度
:return:
"""
return (predid == labelid).mean()
def predict_batch(self, data, model_file=None, print_acc=False):
"""
batch_size数据处理
:param data: 是一个要处理的数据列表[(content,aspect),...,]
:param truncated: 是否要截断数据
:param model_file: 模型文件
:param print_acc: 如果要打印准确率,需要data的第三个位置时label
:return:, 返回格式是 [(predicted_label, predict_score),...]
"""
#如果为None,就不改变加载的模型,否则就改变加载的模型
if model_file:
self.load_predict_model(model_file=model_file)
eval_dataset = load_examples(data, self.max_seq_length, self.predict_tokenizer, self.label_list, self.reverse_truncate)
if self.verbose:
print("评估数据集已加载")
predictids, probability = self.do_predict(model=self.predict_model, eval_dataset=eval_dataset)
if self.verbose:
print(f"预测的结果是: {predictids}, {[self.label_list[id] for id in predictids]}")
#把id变成标签
predict_labels = [self.label_list[r] for r in predictids]
results = list(zip(predict_labels, probability, data))
if print_acc:
label_ids = [self.label_list.index(d[2]) for d in data]
accuracy = self.compute_metrics(np.array(predictids), np.array(label_ids))
return results, accuracy
return results
def do_predict(self, model, eval_dataset, step=0):
"""
:param eval_dataset:
:param model 参数必须携带,因为训练的callback会调用评估模型时会传入model
:return: 2个list,一个是预测的id列表,一个是预测的probability列表
"""
# 任务名字
if self.verbose:
print("***** 开始预测 *****")
print(" 样本数 = %d", len(eval_dataset))
print(" Step数 = %d", step)
print(" Batch size = %d", self.predict_batch_size)
# 评估样本
e | dataset, sampler=eval_sampler, batch_size=self.predict_batch_size)
model.eval()
model.to(self.device)
# 起始时间
start_time = time.time()
# 存储预测值
pred_logits = []
for batch in tqdm(eval_dataloader, desc="评估中", disable=True):
input_ids, input_mask, segment_ids, _ = batch
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids)
cpu_logits = logits.detach().cpu()
for i in range(len(cpu_logits)):
pred_logits.append(cpu_logits[i].numpy())
pred_logits = np.array(pred_logits)
# 找到最大的概率label
preds = np.argmax(pred_logits, axis=1)
if self.verbose:
print(f"preds: {preds}")
predictids = preds.tolist()
#获取最大概率的可能性,即分数
pred_logits_softmax = scipy.special.softmax(pred_logits, axis=1)
probability = np.max(pred_logits_softmax, axis=1)
probability = probability.tolist()
cost_time = time.time() - start_time
if self.verbose:
print(
f"--- 评估{len(eval_dataset)}条数据的总耗时是 {cost_time} seconds, 每条耗时 {cost_time / len(eval_dataset)} seconds ---")
return predictids, probability
def do_train(self, data, truncated=False):
"""
训练模型, 数据集分成2部分,训练集和验证集, 默认比例9:1
:param data: 输入的数据,注意如果做truncated,那么输入的数据为 [(content,aspect,start_idx, end_idx, label),...,]
:param truncated: 是否要截断,截断按照 self.left_max_seq_len, self.right_max_seq_len进行
:return:
"""
if truncated:
data, locations = self.do_truncate_data(data)
train_data_len = int(len(data) * 0.9)
train_data = data[:train_data_len]
eval_data = data[train_data_len:]
train_dataset = load_examples(train_data, self.max_seq_length, self.train_tokenizer, self.label_list, self.reverse_truncate)
eval_dataset = load_examples(eval_data, self.max_seq_length, self.train_tokenizer, self.label_list, self.reverse_truncate)
logger.info("训练数据集已加载,开始训练")
num_train_steps = int(len(train_dataset) / self.train_batch_size) * self.num_train_epochs
forward_batch_size = int(self.train_batch_size / self.gradient_accumulation_steps)
# 开始训练
params = list(self.train_model.named_parameters())
all_trainable_params = divide_parameters(params, lr=self.learning_rate, weight_decay_rate=self.weight_decay_rate)
# 优化器设置
optimizer = BERTAdam(all_trainable_params, lr=self.learning_rate,
warmup=self.warmup_proportion, t_total=num_train_steps, schedule=self.schedule,
s_opt1=self.s_opt1, s_opt2=self.s_opt2, s_opt3=self.s_opt3)
logger.info("***** 开始训练 *****")
logger.info(" 训练样本数是 = %d", len(train_dataset))
logger.info(" 评估样本数是 = %d", len(eval_dataset))
logger.info(" 前向 batch size = %d", forward_batch_size)
logger.info(" 训练的steps = %d", num_train_steps)
########### 训练的配置 ###########
train_config = TrainingConfig(
gradient_accumulation_steps = self.gradient_accumulation_steps,
ckpt_frequency = self.ckpt_frequency,
log_dir = self.output_dir,
output_dir = self.output_dir,
device = self.device)
#初始化trainer,执行监督训练,而不是蒸馏。它可以把model_S模型训练成为teacher模型
distiller = BasicTrainer(train_config = train_config,
model = self.train_model,
adaptor = BertForGLUESimpleAdaptorTraining)
train_sampler = RandomSampler(train_dataset)
#训练的dataloader
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=forward_batch_size, drop_last=True)
#执行callbakc函数,对eval数据集
callback_func = partial(self.do_predict, eval_dataset=eval_dataset)
with distiller:
#开始训练
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=self.num_train_epochs, callback=callback_func)
logger.info(f"训练完成")
return "Done"
@app.route("/api/predict", methods=['POST'])
def predict():
"""
接收POST请求,获取data参数
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
results = model.predict_batch(test_data)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_truncate", methods=['POST'])
def predict_truncate():
"""
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
results = model.predict_batch(test_data)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_truncate_model", methods=['POST'])
def predict_truncate_model():
"""
使用self.output_dir 下的最新的模型的文件
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
list_of_files = glob.glob(os.path.join(model.output_dir, "*.pkl"))
latest_model_file = max(list_of_files, key=os.path.getctime)
results = model.predict_batch(test_data, model_file=latest_model_file)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_macbert", methods=['POST'])
def predict_macbert():
"""
加载macbert模型,返回预测结果
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score, data),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
model.load_macbert_model()
results, accuracy = model.predict_batch(test_data, print_acc=True)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
logger.info(f"模型准确率为:{accuracy}")
return jsonify(results)
@app.route("/api/train", methods=['POST'])
def train():
"""
接收data参数,
Args:
data: 训练的数据,是一个文字列表, [(content,aspect,label),...,]
Returns:
"""
jsonres = request.get_json()
data = jsonres.get('data', None)
logger.info(f"收到的数据是:{data}, 进行训练")
# model = TorchAsBertModel()
results = model.do_train(data)
return jsonify(results)
@app.route("/api/train_truncate", methods=['POST'])
def train_truncate():
"""
接收data参数,data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
data: 训练的数据,是一个文字列表, [(content,aspect,start_idx, end_idx, label),...,]
Returns:
"""
jsonres = request.get_json()
data = jsonres.get('data', None)
logger.info(f"收到的数据是:{data}, 进行训练")
# model = TorchAsBertModel()
results = model.do_train(data, truncated=True)
return jsonify(results)
if __name__ == "__main__":
model = TorchAsBertModel()
app.run(host='0.0.0.0', port=5016, debug=False, threaded=True)
| val_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_ | conditional_block |
main_api_sentence.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020/12/23 4:56 下午
# @File : api.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc :
######################################################
# 使用没有蒸馏的模型预测,改造成一个flask api, 句子级情感预测
# 包括训练接口api和预测接口api
# /api/train
# /api/predict
######################################################
import logging
import logging.config
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os, random, time
import re
import glob
import numpy as np
import torch
from pytorch_pretrained_bert.my_modeling import BertConfig
from pytorch_pretrained_bert import BertTokenizer
from modeling import BertSPCSimple, BertForGLUESimpleAdaptorTraining
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from utils import divide_parameters
from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer
from optimization import BERTAdam
from functools import partial
from tqdm import tqdm
from utils_glue import InputExample, convert_examples_to_features
import argparse
import scipy
from flask import Flask, request, jsonify, abort
app = Flask(__name__)
def load_examples(contents, max_seq_length, tokenizer, label_list, reverse_truncate=False):
"""
:param contents: eg: [('苹果很好用', '苹果')] 或者 [('苹果很好用', '苹果', '积极')]
:param max_seq_length:
:param tokenizer: 初始化后的tokenizer
:param label_list:
:return:
"""
examples = []
for guid, content in enumerate(contents):
examples.append(
InputExample(guid=guid, text_a=content))
features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer,
output_mode="classification",
cls_token_segment_id=0, pad_token_segment_id=0,reverse_truncate=reverse_truncate)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
class TorchAsBertModel(object):
def __init__(self, verbose=0):
self.verbose = verbose
self.label_list = ["消极","中性","积极"]
self.num_labels = len(self.label_list)
# 判断使用的设备
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0
# 预测的batch_size大小
self.train_batch_size = 8
# 预测的batch_size大小
self.predict_batch_size = 64
self.max_seq_length = 128
# self.load_predict_model()
self.load_macbert_model()
# self.load_train_model()
self.reverse_truncate = True
def load_train_model(self):
"""
初始化训练的模型
:return:
"""
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.learning_rate = 2e-05
#学习率 warmup的比例
self.warmup_proportion = 0.1
self.num_train_epochs = 1
#使用的学习率scheduler
self.schedule = 'slanted_triangular'
self.s_opt1 = 30.0
self.s_opt2 = 0.0
self.s_opt3 = 1.0
self.weight_decay_rate = 0.01
#训练多少epcoh保存一次模型
self.ckpt_frequency = 1
#模型和日志保存的位置
self.output_dir = "output_root_dir/train_api"
#梯度累积步数
self.gradient_accumulation_steps = 1
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "mac_bert_model/vocab.txt"
self.bert_config_file_S = "mac_bert_model/config.json"
self.tuned_checkpoint_S = "mac_bert_model/pytorch_model.bin"
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
state_weight = {k[5:]: v for k, v in state_dict_S.items() if k.startswith('bert.')}
missing_keys, _ = model_S.bert.load_state_dict(state_weight, strict=False)
#验证下参数没有丢失
assert len(missing_keys) == 0
self.train_tokenizer = tokenizer
self.train_model = model_S
logger.info(f"训练模型{self.tuned_checkpoint_S}加载完成")
def load_predict_model(self, model_file="trained_teacher_model/gs3024.pkl"):
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "bert_model/vocab.txt"
# 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型
# student config: config/chinese_bert_config_L4t.json
# distil student model: distil_model/gs8316.pkl
self.bert_config_file_S = "bert_model/config.json"
self.tuned_checkpoint_S = model_file
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
model_S.load_state_dict(state_dict_S)
if self.verbose:
print("模型已加载")
self.predict_tokenizer = tokenizer
self.predict_model = model_S
logger.info(f"预测模型{model_file}加载完成")
def load_macbert_model(self):
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "mac_bert_model/vocab.txt"
# 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型
# student config: config/chinese_bert_config_L4t.json
# distil student model: distil_model/gs8316.pkl
self.bert_config_file_S = "mac_bert_model/config.json"
self.tuned_checkpoint_S = "trained_teacher_model/macbert_2290_cosmetics_weibo.pkl"
# self.tuned_checkpoint_S = "trained_teacher_model/macbert_894_cosmetics.pkl"
# self.tuned_checkpoint_S = "trained_teacher_model/macbert_teacher_max75len_5000.pkl"
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
model_S.load_state_dict(state_dict_S)
if self.verbose:
print("模型已加载")
self.predict_tokenizer = tokenizer
self.predict_model = model_S
logger.info(f"macbert预测模型加载完成")
def compute_metrics(self, predid, labelid):
"""
计算准确度
:return:
"""
return (predid == labelid).mean()
def predict_batch(self, data, model_file=None, print_acc=False):
"""
batch_size数据处理
:param data: 是一个要处理的数据列表[(content,aspect),...,]
:param truncated: 是否要截断数据
:param model_file: 模型文件
:param print_acc: 如果要打印准确率,需要data的第三个位置时label
:return:, 返回格式是 [(predicted_label, predict_score),...]
"""
#如果为None,就不改变加载的模型,否则就改变加载的模型
if model_file:
self.load_predict_model(model_file=model_file)
eval_dataset = load_examples(data, self.max_seq_length, self.predict_tokenizer, self.label_list, self.reverse_truncate)
if self.verbose:
print("评估数据集已加载")
predictids, probability = self.do_predict(model=self.predict_model, eval_dataset=eval_dataset)
if self.verbose:
print(f"预测的结果是: {predictids}, {[self.label_list[id] for id in predictids]}")
#把id变成标签
predict_labels = [self.label_list[r] for r in predictids]
results = list(zip(predict_labels, probability, data))
if print_acc:
label_ids = [self.label_list.index(d[2]) for d in data]
accuracy = self.compute_metrics(np.array(predictids), np.array(label_ids))
return results, accuracy
return results
def do_predict(self, model, eval_dataset, step=0):
"""
:param eval_dataset:
:param model 参数必须携带,因为训练的callback会调用评估模型时会传入model
:return: 2个list,一个是预测的id列表,一个是预测的probability列表
"""
# 任务名字
if self.verbose:
print("***** 开始预测 *****")
print(" 样本数 = %d", len(eval_dataset))
print(" Step数 = %d", step)
print(" Batch size = %d", self.predict_batch_size)
# 评估样本
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=self.predict_batch_size)
model.eval()
model.to(self.device)
# 起始时间
start_time = time.time()
# 存储预测值
pred_logits = []
for batch in tqdm(eval_dataloader, desc="评估中", disable=True):
input_ids, input_mask, segment_ids, _ = batch
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids)
cpu_logits = logits.detach().cpu()
for i in range(len(cpu_logits)):
pred_logits.append(cpu_logits[i].numpy())
pred_logits = np.array(pred_logits)
# 找到最大的概率label
preds = np.argmax(pred_logits, axis=1)
if self.verbose:
print(f"preds: {preds}")
predictids = preds.tolist()
#获取最大概率的可能性,即分数
pred_logits_softmax = scipy.special.softmax(pred_logits, axis=1)
probability = np.max(pred_logits_softmax, axis=1)
probability = probability.tolist()
cost_time = time.time() - start_time
if self.verbose:
print(
f"--- 评估{len(eval_dataset)}条数据的总耗时是 {cost_time} seconds, 每条耗时 {cost_time / len(eval_dataset)} seconds ---")
return predictids, probability
def do_train(self, data, truncated=False):
"""
训练模型, 数据集分成2部分,训练集和验证集, 默认比例9:1
:param data: 输入的数据,注意如果做truncated,那么输入的数据为 [(content,aspect,start_idx, end_idx, label),...,]
:param truncated: 是否要截断,截断按照 self.left_max_seq_len, self.right_max_seq_len进行
:return:
"""
if truncated:
data, locations = self.do_truncate_data(data)
train_data_len = int(len(data) * 0.9)
train_data = data[:train_data_len]
eval_data = data[train_data_len:]
train_dataset = load_examples(train_data, self.max_seq_length, self.train_tokenizer, self.label_list, self.reverse_truncate)
eval_dataset = load_examples(eval_data, self.max_seq_length, self.train_tokenizer, self.label_list, self.reverse_truncate)
logger.info("训练数据集已加载,开始训练")
num_train_steps = int(len(train_dataset) / self.train_batch_size) * self.num_train_epochs
forward_batch_size = int(self.train_batch_size / self.gradient_accumulation_steps)
# 开始训练
params = list(self.train_model.named_parameters())
all_trainable_params = divide_parameters(params, lr=self.learning_rate, weight_decay_rate=self.weight_decay_rate)
# 优化器设置
optimizer = BERTAdam(all_trainable_params, lr=self.learning_rate,
warmup=self.warmup_proportion, t_total=num_train_steps, schedule=self.schedule,
s_opt1=self.s_opt1, s_opt2=self.s_opt2, s_opt3=self.s_opt3)
logger.info("***** 开始训练 *****")
logger.info(" 训练样本数是 = %d", len(train_dataset))
logger.info(" 评估样本数是 = %d", len(eval_dataset))
logger.info(" 前向 batch size = %d", forward_batch_size)
logger.info(" 训练的steps = %d", num_train_steps)
########### 训练的配置 ###########
train_config = TrainingConfig(
gradient_accumulation_steps = self.gradient_accumulation_steps,
ckpt_frequency = self.ckpt_frequency,
log_dir = self.output_dir,
output_dir = self.output_dir,
device = self.device)
#初始化trainer,执行监督训练,而不是蒸馏。它可以把model_S模型训练成为teacher模型
distiller = BasicTrainer(train_config = train_config,
model = self.train_model,
adaptor = BertForGLUESimpleAdaptorTraining)
train_sampler = RandomSampler(train_dataset)
#训练的dataloader
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=forward_batch_size, drop_last=True)
#执行callbakc函数,对eval数据集
callback_func = partial(self.do_predict, eval_dataset=eval_dataset)
with distiller:
#开始训练
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=self.num_train_epochs, callback=callback_func)
logger.info(f"训练完成")
return "Done"
@app.route("/api/predict", methods=['POST'])
def predict():
"""
接收POST请求,获取data参数
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
results = model.predict_batch(test_data)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_truncate", methods=['POST'])
def predict_truncate():
"""
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
results = model.predict_batch(test_data)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_truncate_model", methods=['POST'])
def predict_truncate_model():
"""
使用self.output_dir 下的最新的模型的文件
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
list_of_files = glob.glob(os.path.join(model.output_dir, "*.pkl"))
latest_model_file = max(list_of_files, key=os.path.getctime)
results = model.predict_batch(test_data, model_file=latest_model_file)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_macbert", methods=['POST'])
def predict_macbert():
"""
加载macbert模型,返回预测结果
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score, data),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
model.load_macbert_model()
results, accuracy = model.predict_batch(test_data, print_acc=True)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
logger.info(f"模型准确率为:{accuracy}")
return jsonify(results)
@app.route("/api/train", methods=['POST'])
def train():
"""
接收data参数,
Args:
data: 训练的数据,是一个文字列表, [(content,aspect,label),...,]
Returns:
"""
jsonres = request.get_json()
data = jsonres.get('data', None)
logger.info(f"收到的数据是:{data}, 进行训练")
# model = TorchAsBertModel()
results = model.do_train(data)
return jsonify(results)
@app.route("/api/train_truncate", methods=['POST'])
def train_truncate():
"""
接收data参数,data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
data: 训练的数据,是一个文字列表, [(content,aspect,start_idx, end_idx, label),...,]
Returns:
"""
jsonres = request.get_json()
data = jsonres.get('data', None)
logger.info(f"收到的数据是:{data}, 进行训练")
# model = TorchAsBertModel()
results = model.do_train(data, truncated=True)
return jsonify(results)
if __name__ == "__main__":
model = TorchAsBertModel()
app.run(host='0.0.0.0', port=5016, debug=False, threaded=True)
| identifier_name | ||
main_api_sentence.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020/12/23 4:56 下午
# @File : api.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc :
######################################################
# 使用没有蒸馏的模型预测,改造成一个flask api, 句子级情感预测
# 包括训练接口api和预测接口api
# /api/train
# /api/predict
######################################################
import logging
import logging.config
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os, random, time
import re
import glob
import numpy as np
import torch
from pytorch_pretrained_bert.my_modeling import BertConfig
from pytorch_pretrained_bert import BertTokenizer
from modeling import BertSPCSimple, BertForGLUESimpleAdaptorTraining
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from utils import divide_parameters
from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer
from optimization import BERTAdam
from functools import partial
from tqdm import tqdm
from utils_glue import InputExample, convert_examples_to_features
import argparse
import scipy
from flask import Flask, request, jsonify, abort
app = Flask(__name__)
def load_examples(contents, max_seq_length, tokenizer, label_list, reverse_truncate=False):
"""
:param contents: eg: [('苹果很好用', '苹果')] 或者 [('苹果很好用', '苹果', '积极')]
:param max_seq_length:
:param tokenizer: 初始化后的tokenizer
:param label_list:
:return:
"""
examples = []
for guid, content in enumerate(contents):
examples.append(
InputExample(guid=guid, text_a=content))
features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer,
output_mode="classification",
cls_token_segment_id=0, pad_token_segment_id=0,reverse_truncate=reverse_truncate)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
class TorchAsBertModel(object):
def __init__(self, verbose=0):
self.verbose = verbose
self.label_list = ["消极","中性","积极"]
self.num_labels = len(self.label_list)
# 判断使用的设备
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0
# 预测的batch_size大小
self.train_batch_size = 8
# 预测的batch_size大小
self.predict_batch_size = 64
self.max_seq_length = 128
# self.load_predict_model()
self.load_macbert_model()
# self.load_train_model()
self.reverse_truncate = True
def load_train_model(self):
"""
初始化训练的模型
:return:
"""
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.learning_rate = 2e-05
#学习率 warmup的比例
self.warmup_proportion = 0.1
self.num_train_epochs = 1
#使用的学习率scheduler
self.schedule = 'slanted_triangular'
self.s_opt1 = 30.0
self.s_opt2 = 0.0
self.s_opt3 = 1.0
self.weight_decay_rate = 0.01
#训练多少epcoh保存一次模型
self.ckpt_frequency = 1
#模型和日志保存的位置
self.output_dir = "output_root_dir/train_api"
#梯度累积步数
self.gradient_accumulation_steps = 1
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "mac_bert_model/vocab.txt"
self.bert_config_file_S = "mac_bert_model/config.json"
self.tuned_checkpoint_S = "mac_bert_model/pytorch_model.bin"
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
state_weight = {k[5:]: v for k, v in state_dict_S.items() if k.startswith('bert.')}
missing_keys, _ = model_S.bert.load_state_dict(state_weight, strict=False)
#验证下参数没有丢失
assert len(missing_keys) == 0
self.train_tokenizer = tokenizer
self.train_model = model_S
logger.info(f"训练模型{self.tuned_checkpoint_S}加载完成")
def load_predict_model(self, model_file="trained_teacher_model/gs3024.pkl"):
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "bert_model/vocab.txt"
# 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型
# student config: config/chinese_bert_config_L4t.json
# distil student model: distil_model/gs8316.pkl
self.bert_config_file_S = "bert_model/config.json"
self.tuned_checkpoint_S = model_file
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
model_S.load_state_dict(state_dict_S)
if self.verbose:
print("模型已加载")
self.predict_tokenizer = tokenizer
self.predict_model = model_S
logger.info(f"预测模型{model_file}加载完成")
def load_macbert_model(self):
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "mac_bert_model/vocab.txt"
# 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型
# student config: config/chinese_bert_config_L4t.json
# distil student model: distil_model/gs8316.pkl
self.bert_config_file_S = "mac_bert_model/config.json"
self.tuned_checkpoint_S = "trained_teacher_model/macbert_2290_cosmetics_weibo.pkl"
# self.tuned_checkpoint_S = "trained_teacher_model/macbert_894_cosmetics.pkl"
# self.tuned_checkpoint_S = "trained_teacher_model/macbert_teacher_max75len_5000.pkl"
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
model_S.load_state_dict(state_dict_S)
if self.verbose:
print("模型已加载")
self.predict_tokenizer = tokenizer
self.predict_model = model_S
logger.info(f"macbert预测模型加载完成")
def compute_metrics(self, predid, labelid):
"""
计算准确度
:return:
"""
return (predid == labelid).mean()
def predict_batch(self, data, model_file=None, print_acc=False):
"""
batch_size数据处理
:param data: 是一个要处理的数据列表[(content,aspect),...,]
:param truncated: 是否要截断数据
:param model_file: 模型文件
:param print_acc: 如果要打印准确率,需要data的第三个位置时label
:return:, 返回格式是 [(predicted_label, predict_score),...]
"""
#如果为None,就不改变加载的模型,否则就改变加载的模型
if model_file:
self.load_predict_model(model_file=model_file)
eval_dataset = load_examples(data, self.max_seq_length, self.predict_tokenizer, self.label_list, self.reverse_truncate)
if self.verbose:
print("评估数据集已加载")
predictids, probability = self.do_predict(model=self.predict_model, eval_dataset=eval_dataset)
if self.verbose:
print(f"预测的结果是: {predictids}, {[self.label_list[id] for id in predictids]}")
#把id变成标签
predict_labels = [self.label_list[r] for r in predictids]
results = list(zip(predict_labels, probability, data))
if print_acc:
label_ids = [self.label_list.index(d[2]) for d in data]
accuracy = self.compute_metrics(np.array(predictids), np.array(label_ids))
return results, accuracy
return results
def do_predict(self, model, eval_dataset, step=0):
"""
:param eval_dataset:
:param model 参数必须携带,因为训练的callback会调用评估模型时会传入model
:return: 2个list,一个是预测的id列表,一个是预测的probability列表
"""
# 任务名字
if self.verbose:
print("***** 开始预测 *****")
print(" 样本数 = %d", len(eval_dataset))
print(" Step数 = %d", step)
print(" Batch size = %d", self.predict_batch_size)
# 评估样本
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=self.predict_batch_size)
model.eval()
model.to(self.device)
# 起始时间
start_time = time.time()
# 存储预测值
pred_logits = []
for batch in tqdm(eval_dataloader, desc="评估中", disable=True):
input_ids, input_mask, segment_ids, _ = batch
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids | weight_decay_rate=self.weight_decay_rate)
# 优化器设置
optimizer = BERTAdam(all_trainable_params, lr=self.learning_rate,
warmup=self.warmup_proportion, t_total=num_train_steps, schedule=self.schedule,
s_opt1=self.s_opt1, s_opt2=self.s_opt2, s_opt3=self.s_opt3)
logger.info("***** 开始训练 *****")
logger.info(" 训练样本数是 = %d", len(train_dataset))
logger.info(" 评估样本数是 = %d", len(eval_dataset))
logger.info(" 前向 batch size = %d", forward_batch_size)
logger.info(" 训练的steps = %d", num_train_steps)
########### 训练的配置 ###########
train_config = TrainingConfig(
gradient_accumulation_steps = self.gradient_accumulation_steps,
ckpt_frequency = self.ckpt_frequency,
log_dir = self.output_dir,
output_dir = self.output_dir,
device = self.device)
#初始化trainer,执行监督训练,而不是蒸馏。它可以把model_S模型训练成为teacher模型
distiller = BasicTrainer(train_config = train_config,
model = self.train_model,
adaptor = BertForGLUESimpleAdaptorTraining)
train_sampler = RandomSampler(train_dataset)
#训练的dataloader
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=forward_batch_size, drop_last=True)
#执行callbakc函数,对eval数据集
callback_func = partial(self.do_predict, eval_dataset=eval_dataset)
with distiller:
#开始训练
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=self.num_train_epochs, callback=callback_func)
logger.info(f"训练完成")
return "Done"
@app.route("/api/predict", methods=['POST'])
def predict():
"""
接收POST请求,获取data参数
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
results = model.predict_batch(test_data)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_truncate", methods=['POST'])
def predict_truncate():
"""
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
results = model.predict_batch(test_data)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_truncate_model", methods=['POST'])
def predict_truncate_model():
"""
使用self.output_dir 下的最新的模型的文件
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
list_of_files = glob.glob(os.path.join(model.output_dir, "*.pkl"))
latest_model_file = max(list_of_files, key=os.path.getctime)
results = model.predict_batch(test_data, model_file=latest_model_file)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
return jsonify(results)
@app.route("/api/predict_macbert", methods=['POST'])
def predict_macbert():
"""
加载macbert模型,返回预测结果
接收POST请求,获取data参数, data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect,start_idx, end_idx),...,]
如果传过来的数据没有索引,那么需要自己去查找索引 [(content,aspect),...,]
Returns: 返回格式是 [(predicted_label, predict_score, data),...]
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
# model = TorchAsBertModel()
model.load_macbert_model()
results, accuracy = model.predict_batch(test_data, print_acc=True)
logger.info(f"收到的数据是:{test_data}")
logger.info(f"预测的结果是:{results}")
logger.info(f"模型准确率为:{accuracy}")
return jsonify(results)
@app.route("/api/train", methods=['POST'])
def train():
"""
接收data参数,
Args:
data: 训练的数据,是一个文字列表, [(content,aspect,label),...,]
Returns:
"""
jsonres = request.get_json()
data = jsonres.get('data', None)
logger.info(f"收到的数据是:{data}, 进行训练")
# model = TorchAsBertModel()
results = model.do_train(data)
return jsonify(results)
@app.route("/api/train_truncate", methods=['POST'])
def train_truncate():
"""
接收data参数,data信息包含aspect关键在在句子中的位置信息,方便我们截取,我们截取aspect关键字的前后一定的字符作为输入
例如关键字前后的25个字作为sentenceA,aspect关键字作为sentenceB,输入模型
Args:
data: 训练的数据,是一个文字列表, [(content,aspect,start_idx, end_idx, label),...,]
Returns:
"""
jsonres = request.get_json()
data = jsonres.get('data', None)
logger.info(f"收到的数据是:{data}, 进行训练")
# model = TorchAsBertModel()
results = model.do_train(data, truncated=True)
return jsonify(results)
if __name__ == "__main__":
model = TorchAsBertModel()
app.run(host='0.0.0.0', port=5016, debug=False, threaded=True)
| = segment_ids.to(self.device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids)
cpu_logits = logits.detach().cpu()
for i in range(len(cpu_logits)):
pred_logits.append(cpu_logits[i].numpy())
pred_logits = np.array(pred_logits)
# 找到最大的概率label
preds = np.argmax(pred_logits, axis=1)
if self.verbose:
print(f"preds: {preds}")
predictids = preds.tolist()
#获取最大概率的可能性,即分数
pred_logits_softmax = scipy.special.softmax(pred_logits, axis=1)
probability = np.max(pred_logits_softmax, axis=1)
probability = probability.tolist()
cost_time = time.time() - start_time
if self.verbose:
print(
f"--- 评估{len(eval_dataset)}条数据的总耗时是 {cost_time} seconds, 每条耗时 {cost_time / len(eval_dataset)} seconds ---")
return predictids, probability
def do_train(self, data, truncated=False):
"""
训练模型, 数据集分成2部分,训练集和验证集, 默认比例9:1
:param data: 输入的数据,注意如果做truncated,那么输入的数据为 [(content,aspect,start_idx, end_idx, label),...,]
:param truncated: 是否要截断,截断按照 self.left_max_seq_len, self.right_max_seq_len进行
:return:
"""
if truncated:
data, locations = self.do_truncate_data(data)
train_data_len = int(len(data) * 0.9)
train_data = data[:train_data_len]
eval_data = data[train_data_len:]
train_dataset = load_examples(train_data, self.max_seq_length, self.train_tokenizer, self.label_list, self.reverse_truncate)
eval_dataset = load_examples(eval_data, self.max_seq_length, self.train_tokenizer, self.label_list, self.reverse_truncate)
logger.info("训练数据集已加载,开始训练")
num_train_steps = int(len(train_dataset) / self.train_batch_size) * self.num_train_epochs
forward_batch_size = int(self.train_batch_size / self.gradient_accumulation_steps)
# 开始训练
params = list(self.train_model.named_parameters())
all_trainable_params = divide_parameters(params, lr=self.learning_rate, | identifier_body |
tf_train_loop.py | from numpy import linalg
from laika.lib.coordinates import ecef2geodetic, geodetic2ecef
from laika import AstroDog
from laika.gps_time import GPSTime
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.python.ops.array_ops import zeros
from tensorflow.python.training.tracking import base
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
import pymap3d as pm
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from skimage.io import imread, imsave
import time
import pandas as pd
import loader
import read_log
import itertools
from coords_tools import *
from matplotlib import pyplot
import datetime
from slac import loadSlac
from loader import *
import tf_phone_model
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
autotune = tf.data.experimental.AUTOTUNE
tf.keras.backend.set_floatx('float64')
def get_track_path(folder, track):
| phone_glob = next(os.walk(folder+"/"+track))[1]
print(folder, track, end=' ')
phones = {}
phone_names = []
if "train" in folder:
df_baseline = pd.read_csv("data/baseline_locations_train.csv")
else:
df_baseline = pd.read_csv("data/baseline_locations_test.csv")
df_baseline = df_baseline[df_baseline['collectionName'] == track]
df_baseline.rename(columns = {'latDeg':'baseLatDeg', 'lngDeg':'baseLngDeg', 'heightAboveWgs84EllipsoidM':'baseHeightAboveWgs84EllipsoidM'}, inplace = True)
df_baseline.set_index('millisSinceGpsEpoch', inplace = True)
df_baseline = df_baseline[~df_baseline.index.duplicated(keep='first')]
df_baseline.sort_index(inplace=True)
if "train" in folder:
for phonepath in phone_glob:
truepos = pd.read_csv(folder+"/" + track + "/" + phonepath + "/ground_truth.csv")
truepos.set_index('millisSinceGpsEpoch', inplace = True)
df_baseline = df_baseline.combine_first(truepos)
else:
df_baseline['latDeg'] = df_baseline['baseLatDeg']
df_baseline['lngDeg'] = df_baseline['baseLngDeg']
df_baseline['heightAboveWgs84EllipsoidM'] = df_baseline['baseHeightAboveWgs84EllipsoidM']
baseline_times = []
baseline_ecef_coords = []
gt_ecef_coords = []
for timemili, row in df_baseline.iterrows():
latbl, lonbl, altbl = float(row['baseLatDeg']),float(row['baseLngDeg']),float(row['baseHeightAboveWgs84EllipsoidM'])
baseline_times.append(timemili)
baseline_ecef_coords.append(np.array(pm.geodetic2ecef(latbl,lonbl,altbl, deg = True)))
latbl, lonbl, altbl = float(row['latDeg']),float(row['lngDeg']),float(row['heightAboveWgs84EllipsoidM'] - 61)
gt_ecef_coords.append(np.array(pm.geodetic2ecef(latbl,lonbl,altbl, deg = True)))
#baseline_ecef_coords = gt_ecef_coords.copy()
mat_local = np.zeros((3,3))
mat_local[2] = baseline_ecef_coords[0]/np.linalg.norm(baseline_ecef_coords[0], axis = -1)
mat_local[0] = np.array([0,0,1])
mat_local[0] = mat_local[0] - mat_local[2]*np.sum(mat_local[2]*mat_local[0])
mat_local[0] = mat_local[0]/np.linalg.norm(mat_local[0], axis = -1)
mat_local[1] = np.cross(mat_local[0], mat_local[2])
mat_local = np.transpose(mat_local)
#mat_local = np.eye(3)
gt_ecef_coords = np.array(gt_ecef_coords)
baseline_times = np.array(baseline_times)
baseline_ecef_coords = np.array(baseline_ecef_coords)
gt_ecef_coords = np.matmul(gt_ecef_coords,mat_local)
timeshift = 3657*24*60*60
datetimenow = int(baseline_times[0])//1000+timeshift
datetimenow = datetime.datetime.utcfromtimestamp(datetimenow)
slac_file = loadSlac(datetimenow)
slac = myLoadRinexPrevdoIndexed(slac_file)
slac_coords = np.array(myLoadRinex(slac_file).position)
slac_times = np.array([r[0] for r in slac])
slac_values = np.array([r[1] for r in slac])
phone_models = []
phone_times = []
constellations = ['GPS', 'GLONASS', 'BEIDOU','GALILEO']
dog = AstroDog(valid_const=constellations, pull_orbit=True)
phones = {}
phone_names = []
max_time = min_time = 0
bufx = []
bufy = []
bufz = []
window = 8
for i in range(len(baseline_ecef_coords)):
bufx.append(baseline_ecef_coords[i,0])
bufy.append(baseline_ecef_coords[i,1])
bufz.append(baseline_ecef_coords[i,2])
if len(bufx) > window*2+1:
bufx = bufx[1:]
bufy = bufy[1:]
bufz = bufz[1:]
if i >= window:
baseline_ecef_coords[i-window,0] = sorted(bufx)[len(bufx)//2]
baseline_ecef_coords[i-window,1] = sorted(bufy)[len(bufy)//2]
baseline_ecef_coords[i-window,2] = sorted(bufz)[len(bufz)//2]
#baseline_ecef_coords = scipy.signal.medfilt(baseline_ecef_coords, [1025,1])
baseline_ecef_coords += np.random.normal(0.,20.,baseline_ecef_coords.shape)
try:
with open(folder + "/" + track + "/export.dat", 'rb') as f:
data_file = pickle.load(f)
except:
data_file = None
for phonepath in phone_glob:
phone = phonepath
phones[phone] = len(phones)
phone_names.append(phone)
print(phone, end=' ')
if False: #data_file != None:
model, times = tf_phone_model.createGpsPhoneModelFromDataFile(data_file,phone,{ 'times':baseline_times, 'values':baseline_ecef_coords}, mat_local)
else:
try:
df_raw = pd.read_csv(folder + "/" + track + "/" + phone + "/" + phone + "_raw.csv")
except:
logs = read_log.gnss_log_to_dataframes(folder + "/" + track + "/" + phone + "/" + phone + "_GnssLog.txt")
df_raw = logs['Raw']
df_raw.to_csv(folder + "/" + track + "/" + phone + "/" + phone + "_raw.csv")
model, times = tf_phone_model.createGpsPhoneModel(df_raw,{ 'times':baseline_times, 'values':baseline_ecef_coords},mat_local,dog, { 'times':slac_times, 'values':slac_values, 'coords':slac_coords})
phone_models.append(model)
phone_times.append(times)
if min_time == 0 or min_time > times[0]:
min_time = times[0]
if max_time == 0 or max_time < times[-1]:
max_time = times[-1]
model_track, track_model_error, num_measures, start_nanos, time_tick = tf_phone_model.createTrackModel(min_time,max_time, { 'times':baseline_times, 'values':baseline_ecef_coords}, mat_local)
istart = np.searchsorted(baseline_times, start_nanos*1e-6)
iend = np.searchsorted(baseline_times, (start_nanos+time_tick*num_measures)*1e-6)
baseline_ecef_coords = baseline_ecef_coords[istart:iend]
baseline_times = baseline_times[istart:iend]
gt_ecef_coords = gt_ecef_coords[istart:iend]
track_input = np.arange(num_measures)
track_input = np.reshape(track_input,(-1,1))
def kernel_init(shape, dtype=None, partition_info=None):
kernel = np.zeros(shape)
kernel[:,0,0] = np.array([-1,1]).astype(np.float64)
return kernel
derivative = tf.keras.layers.Conv1D(1,2,use_bias=False,kernel_initializer=kernel_init, dtype = tf.float64)
def kernel_init_epoch(shape, dtype=None, partition_info=None):
kernel = np.zeros(shape).astype(np.float64)
kin = np.zeros((3)).astype(np.float64)
kin[0] = -1
kin[-1] = 1
kernel[:,0,0] = kin
return kernel
derivative_epoch = tf.keras.layers.Conv1D(1,3,use_bias=False,kernel_initializer=kernel_init_epoch, dtype = tf.float64)
@tf.function
def train_step_gnss(optimizer, physics):
for _ in range(16):
with tf.GradientTape(persistent=True) as tape:
total_loss_psevdo = 0
total_loss_delta = 0
accs_loss_large = 0
accs_loss_small = 0
speed_loss_small = 0
for i in range(len(phone_models)):
poses = model_track(phone_times[i], training=True)
poses = tf.reshape(poses,(1,-1,3))
psevdo_loss,delta_loss,delta_dif, psev_error = phone_models[i](poses, training=True)
total_loss_psevdo += psevdo_loss/10
total_loss_delta += delta_loss*2
total_loss = total_loss_delta +total_loss_psevdo
poses = track_model_error(track_input, training=True)
poses = tf.reshape(poses,(-1, 3))
poses_batch = tf.transpose(poses)
poses_batch = tf.expand_dims(poses_batch, axis=-1)
speed = derivative_epoch(poses_batch)
speed = tf.pad(speed,[[0,0],[0,1], [0,0]])
shift1 = derivative(poses_batch)
shift2 = speed*0.5
shift_loss = tf.reduce_mean(tf.abs(shift1-shift2)) * 0.01
accel = derivative(speed)
accel = tf.squeeze(accel)
accel = tf.transpose(accel)
accs_loss_large = tf.reduce_mean(tf.nn.relu(tf.abs(accel) - 4))
accs_loss_small = tf.reduce_mean(tf.abs(accel)) * 0.01
speed_loss_small = tf.reduce_mean(tf.abs(speed[2])) * 0.01 + shift_loss
'''
speed = (poses[3:] - poses[:-3])
speed_loss_small += tf.reduce_mean(tf.abs(poses[2:-1] - poses[1:-2]-speed/3))*0.01
accs = speed[1:] - speed[:-1]
acs2 = tf.linalg.norm(tf.abs(accs)+1.e-7, axis = -1)
accs_loss_small = tf.reduce_mean(acs2) / 100
accs_loss_large = tf.reduce_mean(tf.nn.relu(acs2-5))
'''
total_loss += (accs_loss_small + accs_loss_large + speed_loss_small)*5
for i in range(len(phone_models)):
grads = tape.gradient(total_loss, phone_models[i].trainable_weights)
optimizer.apply_gradients(zip(grads, phone_models[i].trainable_weights))
grads = tape.gradient(total_loss, model_track.trainable_weights)
optimizer.apply_gradients(zip(grads, model_track.trainable_weights))
grads = tape.gradient(total_loss, track_model_error.trainable_weights)
optimizer.apply_gradients(zip(grads, track_model_error.trainable_weights))
del tape
return total_loss, accs_loss_small, accs_loss_large, speed_loss_small, total_loss_psevdo, total_loss_delta, delta_dif, poses, psev_error
lr = 0.5
#optimizer = keras.optimizers.SGD(learning_rate=100., nesterov=True, momentum=0.5)
#optimizer = keras.optimizers.Adam(learning_rate=0.5)
optimizer = keras.optimizers.Adam(learning_rate=0.01)#, epsilon= 0.0001)
#optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=100. )
for step in range(32*60):
total_loss, accs_loss_small, accs_loss_large, speed_loss_small = 0,0,0,0
physics = 0
for _ in range(32):
total_loss, accs_loss_small, accs_loss_large, speed_loss_small, total_loss_psevdo, total_loss_delta, delta_dif, poses, psev_error = train_step_gnss(optimizer, physics)
pred_pos = model_track(baseline_times*1000000).numpy()
poses = poses.numpy()
psev_error = psev_error.numpy()
psev_error = psev_error[np.abs(psev_error) > 0]
percents_good_psev = np.sum(np.abs(psev_error) < 1)*100/len(psev_error)
shift = pred_pos - gt_ecef_coords
meanshift = np.mean(shift,axis=0,keepdims=True)
shift = shift - meanshift
err3d = np.mean(np.linalg.norm(shift,axis = -1))
dist_2d = np.linalg.norm(shift[:,:2],axis = -1)
err2d = np.mean(dist_2d)
dist_2d = np.sort(dist_2d)
err50 = dist_2d[len(dist_2d)//2]
err95 = dist_2d[len(dist_2d)*95//100]
delta_dif = delta_dif.numpy()
delta_dif = delta_dif[np.abs(delta_dif) > 0]
percents_good = np.sum(np.abs(delta_dif) < 0.1)*100/len(delta_dif)
print( "Training loss at step %d (%.2f (%.2f),%.2f,%.2f,%.2f,%.4f): %.4f (%.2f),%.4f (%.2f),%.4f,%.4f,%.4f lr %.4f" % (step, err3d, np.linalg.norm(meanshift[0,:2]), err2d, err50, err95, (err50+err95)/2, float(total_loss_psevdo), percents_good_psev, float(total_loss_delta),percents_good,float(accs_loss_large),float(accs_loss_small), float(speed_loss_small), float(lr)), end='\r')
if(step % 32 == 0):
lr *= 0.90
optimizer.learning_rate = lr
if(step > 32):
physics = 1.
print()
if True:
plt.clf()
plt.scatter(pred_pos[:,1], pred_pos[:,0], s=0.2)
plt.scatter(gt_ecef_coords[:,1], gt_ecef_coords[:,0], s=0.2)
#fig1.canvas.start_event_loop(sys.float_info.min) #workaround for Exception in Tkinter callback
plt.savefig("fig/"+track+str(step+10000)+".png", dpi = 1000)
plt.close()
poses = track_model_error(track_input)
times = start_nanos + time_tick*track_input
poses = np.matmul(poses, mat_local.T)
d = {'nanos': np.reshape(times,(-1)), 'X': poses[:,0], 'Y': poses[:,1], 'Z': poses[:,2]}
df = pd.DataFrame(data=d)
df.to_csv(folder + "/" + track + "/track.csv") | identifier_body | |
tf_train_loop.py | from numpy import linalg
from laika.lib.coordinates import ecef2geodetic, geodetic2ecef
from laika import AstroDog
from laika.gps_time import GPSTime
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.python.ops.array_ops import zeros
from tensorflow.python.training.tracking import base
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
import pymap3d as pm
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from skimage.io import imread, imsave
import time
import pandas as pd
import loader
import read_log
import itertools
from coords_tools import *
from matplotlib import pyplot
import datetime
from slac import loadSlac
from loader import *
import tf_phone_model
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
autotune = tf.data.experimental.AUTOTUNE
tf.keras.backend.set_floatx('float64')
def get_track_path(folder, track):
phone_glob = next(os.walk(folder+"/"+track))[1]
print(folder, track, end=' ')
phones = {}
phone_names = []
if "train" in folder:
df_baseline = pd.read_csv("data/baseline_locations_train.csv")
else:
df_baseline = pd.read_csv("data/baseline_locations_test.csv")
df_baseline = df_baseline[df_baseline['collectionName'] == track]
df_baseline.rename(columns = {'latDeg':'baseLatDeg', 'lngDeg':'baseLngDeg', 'heightAboveWgs84EllipsoidM':'baseHeightAboveWgs84EllipsoidM'}, inplace = True)
df_baseline.set_index('millisSinceGpsEpoch', inplace = True)
df_baseline = df_baseline[~df_baseline.index.duplicated(keep='first')]
df_baseline.sort_index(inplace=True)
if "train" in folder:
for phonepath in phone_glob:
truepos = pd.read_csv(folder+"/" + track + "/" + phonepath + "/ground_truth.csv")
truepos.set_index('millisSinceGpsEpoch', inplace = True)
df_baseline = df_baseline.combine_first(truepos)
else:
df_baseline['latDeg'] = df_baseline['baseLatDeg']
df_baseline['lngDeg'] = df_baseline['baseLngDeg']
df_baseline['heightAboveWgs84EllipsoidM'] = df_baseline['baseHeightAboveWgs84EllipsoidM']
baseline_times = []
baseline_ecef_coords = []
gt_ecef_coords = []
for timemili, row in df_baseline.iterrows():
latbl, lonbl, altbl = float(row['baseLatDeg']),float(row['baseLngDeg']),float(row['baseHeightAboveWgs84EllipsoidM'])
baseline_times.append(timemili)
baseline_ecef_coords.append(np.array(pm.geodetic2ecef(latbl,lonbl,altbl, deg = True)))
latbl, lonbl, altbl = float(row['latDeg']),float(row['lngDeg']),float(row['heightAboveWgs84EllipsoidM'] - 61)
gt_ecef_coords.append(np.array(pm.geodetic2ecef(latbl,lonbl,altbl, deg = True)))
#baseline_ecef_coords = gt_ecef_coords.copy()
mat_local = np.zeros((3,3))
mat_local[2] = baseline_ecef_coords[0]/np.linalg.norm(baseline_ecef_coords[0], axis = -1)
mat_local[0] = np.array([0,0,1])
mat_local[0] = mat_local[0] - mat_local[2]*np.sum(mat_local[2]*mat_local[0])
mat_local[0] = mat_local[0]/np.linalg.norm(mat_local[0], axis = -1)
mat_local[1] = np.cross(mat_local[0], mat_local[2])
mat_local = np.transpose(mat_local)
#mat_local = np.eye(3)
gt_ecef_coords = np.array(gt_ecef_coords)
baseline_times = np.array(baseline_times)
baseline_ecef_coords = np.array(baseline_ecef_coords)
gt_ecef_coords = np.matmul(gt_ecef_coords,mat_local)
timeshift = 3657*24*60*60
datetimenow = int(baseline_times[0])//1000+timeshift
datetimenow = datetime.datetime.utcfromtimestamp(datetimenow)
slac_file = loadSlac(datetimenow)
slac = myLoadRinexPrevdoIndexed(slac_file)
slac_coords = np.array(myLoadRinex(slac_file).position)
slac_times = np.array([r[0] for r in slac])
slac_values = np.array([r[1] for r in slac])
phone_models = []
phone_times = []
constellations = ['GPS', 'GLONASS', 'BEIDOU','GALILEO']
dog = AstroDog(valid_const=constellations, pull_orbit=True)
phones = {}
phone_names = []
max_time = min_time = 0
bufx = []
bufy = []
bufz = []
window = 8
for i in range(len(baseline_ecef_coords)):
bufx.append(baseline_ecef_coords[i,0])
bufy.append(baseline_ecef_coords[i,1])
bufz.append(baseline_ecef_coords[i,2])
if len(bufx) > window*2+1:
bufx = bufx[1:]
bufy = bufy[1:]
bufz = bufz[1:]
if i >= window:
baseline_ecef_coords[i-window,0] = sorted(bufx)[len(bufx)//2]
baseline_ecef_coords[i-window,1] = sorted(bufy)[len(bufy)//2]
baseline_ecef_coords[i-window,2] = sorted(bufz)[len(bufz)//2]
#baseline_ecef_coords = scipy.signal.medfilt(baseline_ecef_coords, [1025,1])
baseline_ecef_coords += np.random.normal(0.,20.,baseline_ecef_coords.shape)
try:
with open(folder + "/" + track + "/export.dat", 'rb') as f:
data_file = pickle.load(f)
except:
data_file = None
for phonepath in phone_glob:
phone = phonepath
phones[phone] = len(phones)
phone_names.append(phone)
print(phone, end=' ')
if False: #data_file != None:
model, times = tf_phone_model.createGpsPhoneModelFromDataFile(data_file,phone,{ 'times':baseline_times, 'values':baseline_ecef_coords}, mat_local)
else:
try:
df_raw = pd.read_csv(folder + "/" + track + "/" + phone + "/" + phone + "_raw.csv")
except:
logs = read_log.gnss_log_to_dataframes(folder + "/" + track + "/" + phone + "/" + phone + "_GnssLog.txt")
df_raw = logs['Raw']
df_raw.to_csv(folder + "/" + track + "/" + phone + "/" + phone + "_raw.csv")
model, times = tf_phone_model.createGpsPhoneModel(df_raw,{ 'times':baseline_times, 'values':baseline_ecef_coords},mat_local,dog, { 'times':slac_times, 'values':slac_values, 'coords':slac_coords})
phone_models.append(model)
phone_times.append(times)
if min_time == 0 or min_time > times[0]:
min_time = times[0]
if max_time == 0 or max_time < times[-1]:
max_time = times[-1]
model_track, track_model_error, num_measures, start_nanos, time_tick = tf_phone_model.createTrackModel(min_time,max_time, { 'times':baseline_times, 'values':baseline_ecef_coords}, mat_local)
istart = np.searchsorted(baseline_times, start_nanos*1e-6)
iend = np.searchsorted(baseline_times, (start_nanos+time_tick*num_measures)*1e-6)
baseline_ecef_coords = baseline_ecef_coords[istart:iend]
baseline_times = baseline_times[istart:iend]
gt_ecef_coords = gt_ecef_coords[istart:iend]
track_input = np.arange(num_measures)
track_input = np.reshape(track_input,(-1,1))
def kernel_init(shape, dtype=None, partition_info=None):
kernel = np.zeros(shape)
kernel[:,0,0] = np.array([-1,1]).astype(np.float64)
return kernel
derivative = tf.keras.layers.Conv1D(1,2,use_bias=False,kernel_initializer=kernel_init, dtype = tf.float64)
def kernel_init_epoch(shape, dtype=None, partition_info=None):
kernel = np.zeros(shape).astype(np.float64)
kin = np.zeros((3)).astype(np.float64)
kin[0] = -1
kin[-1] = 1
kernel[:,0,0] = kin
return kernel
derivative_epoch = tf.keras.layers.Conv1D(1,3,use_bias=False,kernel_initializer=kernel_init_epoch, dtype = tf.float64)
@tf.function
def train_step_gnss(optimizer, physics):
for _ in range(16):
with tf.GradientTape(persistent=True) as tape:
total_loss_psevdo = 0
total_loss_delta = 0
accs_loss_large = 0
accs_loss_small = 0
speed_loss_small = 0
for i in range(len(phone_models)):
poses = model_track(phone_times[i], training=True)
poses = tf.reshape(poses,(1,-1,3))
psevdo_loss,delta_loss,delta_dif, psev_error = phone_models[i](poses, training=True)
total_loss_psevdo += psevdo_loss/10
total_loss_delta += delta_loss*2
total_loss = total_loss_delta +total_loss_psevdo
poses = track_model_error(track_input, training=True)
poses = tf.reshape(poses,(-1, 3))
poses_batch = tf.transpose(poses)
poses_batch = tf.expand_dims(poses_batch, axis=-1)
speed = derivative_epoch(poses_batch)
speed = tf.pad(speed,[[0,0],[0,1], [0,0]])
shift1 = derivative(poses_batch) |
shift_loss = tf.reduce_mean(tf.abs(shift1-shift2)) * 0.01
accel = derivative(speed)
accel = tf.squeeze(accel)
accel = tf.transpose(accel)
accs_loss_large = tf.reduce_mean(tf.nn.relu(tf.abs(accel) - 4))
accs_loss_small = tf.reduce_mean(tf.abs(accel)) * 0.01
speed_loss_small = tf.reduce_mean(tf.abs(speed[2])) * 0.01 + shift_loss
'''
speed = (poses[3:] - poses[:-3])
speed_loss_small += tf.reduce_mean(tf.abs(poses[2:-1] - poses[1:-2]-speed/3))*0.01
accs = speed[1:] - speed[:-1]
acs2 = tf.linalg.norm(tf.abs(accs)+1.e-7, axis = -1)
accs_loss_small = tf.reduce_mean(acs2) / 100
accs_loss_large = tf.reduce_mean(tf.nn.relu(acs2-5))
'''
total_loss += (accs_loss_small + accs_loss_large + speed_loss_small)*5
for i in range(len(phone_models)):
grads = tape.gradient(total_loss, phone_models[i].trainable_weights)
optimizer.apply_gradients(zip(grads, phone_models[i].trainable_weights))
grads = tape.gradient(total_loss, model_track.trainable_weights)
optimizer.apply_gradients(zip(grads, model_track.trainable_weights))
grads = tape.gradient(total_loss, track_model_error.trainable_weights)
optimizer.apply_gradients(zip(grads, track_model_error.trainable_weights))
del tape
return total_loss, accs_loss_small, accs_loss_large, speed_loss_small, total_loss_psevdo, total_loss_delta, delta_dif, poses, psev_error
lr = 0.5
#optimizer = keras.optimizers.SGD(learning_rate=100., nesterov=True, momentum=0.5)
#optimizer = keras.optimizers.Adam(learning_rate=0.5)
optimizer = keras.optimizers.Adam(learning_rate=0.01)#, epsilon= 0.0001)
#optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=100. )
for step in range(32*60):
total_loss, accs_loss_small, accs_loss_large, speed_loss_small = 0,0,0,0
physics = 0
for _ in range(32):
total_loss, accs_loss_small, accs_loss_large, speed_loss_small, total_loss_psevdo, total_loss_delta, delta_dif, poses, psev_error = train_step_gnss(optimizer, physics)
pred_pos = model_track(baseline_times*1000000).numpy()
poses = poses.numpy()
psev_error = psev_error.numpy()
psev_error = psev_error[np.abs(psev_error) > 0]
percents_good_psev = np.sum(np.abs(psev_error) < 1)*100/len(psev_error)
shift = pred_pos - gt_ecef_coords
meanshift = np.mean(shift,axis=0,keepdims=True)
shift = shift - meanshift
err3d = np.mean(np.linalg.norm(shift,axis = -1))
dist_2d = np.linalg.norm(shift[:,:2],axis = -1)
err2d = np.mean(dist_2d)
dist_2d = np.sort(dist_2d)
err50 = dist_2d[len(dist_2d)//2]
err95 = dist_2d[len(dist_2d)*95//100]
delta_dif = delta_dif.numpy()
delta_dif = delta_dif[np.abs(delta_dif) > 0]
percents_good = np.sum(np.abs(delta_dif) < 0.1)*100/len(delta_dif)
print( "Training loss at step %d (%.2f (%.2f),%.2f,%.2f,%.2f,%.4f): %.4f (%.2f),%.4f (%.2f),%.4f,%.4f,%.4f lr %.4f" % (step, err3d, np.linalg.norm(meanshift[0,:2]), err2d, err50, err95, (err50+err95)/2, float(total_loss_psevdo), percents_good_psev, float(total_loss_delta),percents_good,float(accs_loss_large),float(accs_loss_small), float(speed_loss_small), float(lr)), end='\r')
if(step % 32 == 0):
lr *= 0.90
optimizer.learning_rate = lr
if(step > 32):
physics = 1.
print()
if True:
plt.clf()
plt.scatter(pred_pos[:,1], pred_pos[:,0], s=0.2)
plt.scatter(gt_ecef_coords[:,1], gt_ecef_coords[:,0], s=0.2)
#fig1.canvas.start_event_loop(sys.float_info.min) #workaround for Exception in Tkinter callback
plt.savefig("fig/"+track+str(step+10000)+".png", dpi = 1000)
plt.close()
poses = track_model_error(track_input)
times = start_nanos + time_tick*track_input
poses = np.matmul(poses, mat_local.T)
d = {'nanos': np.reshape(times,(-1)), 'X': poses[:,0], 'Y': poses[:,1], 'Z': poses[:,2]}
df = pd.DataFrame(data=d)
df.to_csv(folder + "/" + track + "/track.csv") | shift2 = speed*0.5 | random_line_split |
tf_train_loop.py | from numpy import linalg
from laika.lib.coordinates import ecef2geodetic, geodetic2ecef
from laika import AstroDog
from laika.gps_time import GPSTime
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.python.ops.array_ops import zeros
from tensorflow.python.training.tracking import base
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
import pymap3d as pm
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from skimage.io import imread, imsave
import time
import pandas as pd
import loader
import read_log
import itertools
from coords_tools import *
from matplotlib import pyplot
import datetime
from slac import loadSlac
from loader import *
import tf_phone_model
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
autotune = tf.data.experimental.AUTOTUNE
tf.keras.backend.set_floatx('float64')
def get_track_path(folder, track):
phone_glob = next(os.walk(folder+"/"+track))[1]
print(folder, track, end=' ')
phones = {}
phone_names = []
if "train" in folder:
df_baseline = pd.read_csv("data/baseline_locations_train.csv")
else:
df_baseline = pd.read_csv("data/baseline_locations_test.csv")
df_baseline = df_baseline[df_baseline['collectionName'] == track]
df_baseline.rename(columns = {'latDeg':'baseLatDeg', 'lngDeg':'baseLngDeg', 'heightAboveWgs84EllipsoidM':'baseHeightAboveWgs84EllipsoidM'}, inplace = True)
df_baseline.set_index('millisSinceGpsEpoch', inplace = True)
df_baseline = df_baseline[~df_baseline.index.duplicated(keep='first')]
df_baseline.sort_index(inplace=True)
if "train" in folder:
for phonepath in phone_glob:
truepos = pd.read_csv(folder+"/" + track + "/" + phonepath + "/ground_truth.csv")
truepos.set_index('millisSinceGpsEpoch', inplace = True)
df_baseline = df_baseline.combine_first(truepos)
else:
df_baseline['latDeg'] = df_baseline['baseLatDeg']
df_baseline['lngDeg'] = df_baseline['baseLngDeg']
df_baseline['heightAboveWgs84EllipsoidM'] = df_baseline['baseHeightAboveWgs84EllipsoidM']
baseline_times = []
baseline_ecef_coords = []
gt_ecef_coords = []
for timemili, row in df_baseline.iterrows():
latbl, lonbl, altbl = float(row['baseLatDeg']),float(row['baseLngDeg']),float(row['baseHeightAboveWgs84EllipsoidM'])
baseline_times.append(timemili)
baseline_ecef_coords.append(np.array(pm.geodetic2ecef(latbl,lonbl,altbl, deg = True)))
latbl, lonbl, altbl = float(row['latDeg']),float(row['lngDeg']),float(row['heightAboveWgs84EllipsoidM'] - 61)
gt_ecef_coords.append(np.array(pm.geodetic2ecef(latbl,lonbl,altbl, deg = True)))
#baseline_ecef_coords = gt_ecef_coords.copy()
mat_local = np.zeros((3,3))
mat_local[2] = baseline_ecef_coords[0]/np.linalg.norm(baseline_ecef_coords[0], axis = -1)
mat_local[0] = np.array([0,0,1])
mat_local[0] = mat_local[0] - mat_local[2]*np.sum(mat_local[2]*mat_local[0])
mat_local[0] = mat_local[0]/np.linalg.norm(mat_local[0], axis = -1)
mat_local[1] = np.cross(mat_local[0], mat_local[2])
mat_local = np.transpose(mat_local)
#mat_local = np.eye(3)
gt_ecef_coords = np.array(gt_ecef_coords)
baseline_times = np.array(baseline_times)
baseline_ecef_coords = np.array(baseline_ecef_coords)
gt_ecef_coords = np.matmul(gt_ecef_coords,mat_local)
timeshift = 3657*24*60*60
datetimenow = int(baseline_times[0])//1000+timeshift
datetimenow = datetime.datetime.utcfromtimestamp(datetimenow)
slac_file = loadSlac(datetimenow)
slac = myLoadRinexPrevdoIndexed(slac_file)
slac_coords = np.array(myLoadRinex(slac_file).position)
slac_times = np.array([r[0] for r in slac])
slac_values = np.array([r[1] for r in slac])
phone_models = []
phone_times = []
constellations = ['GPS', 'GLONASS', 'BEIDOU','GALILEO']
dog = AstroDog(valid_const=constellations, pull_orbit=True)
phones = {}
phone_names = []
max_time = min_time = 0
bufx = []
bufy = []
bufz = []
window = 8
for i in range(len(baseline_ecef_coords)):
bufx.append(baseline_ecef_coords[i,0])
bufy.append(baseline_ecef_coords[i,1])
bufz.append(baseline_ecef_coords[i,2])
if len(bufx) > window*2+1:
bufx = bufx[1:]
bufy = bufy[1:]
bufz = bufz[1:]
if i >= window:
baseline_ecef_coords[i-window,0] = sorted(bufx)[len(bufx)//2]
baseline_ecef_coords[i-window,1] = sorted(bufy)[len(bufy)//2]
baseline_ecef_coords[i-window,2] = sorted(bufz)[len(bufz)//2]
#baseline_ecef_coords = scipy.signal.medfilt(baseline_ecef_coords, [1025,1])
baseline_ecef_coords += np.random.normal(0.,20.,baseline_ecef_coords.shape)
try:
with open(folder + "/" + track + "/export.dat", 'rb') as f:
data_file = pickle.load(f)
except:
data_file = None
for phonepath in phone_glob:
phone = phonepath
phones[phone] = len(phones)
phone_names.append(phone)
print(phone, end=' ')
if False: #data_file != None:
model, times = tf_phone_model.createGpsPhoneModelFromDataFile(data_file,phone,{ 'times':baseline_times, 'values':baseline_ecef_coords}, mat_local)
else:
try:
df_raw = pd.read_csv(folder + "/" + track + "/" + phone + "/" + phone + "_raw.csv")
except:
logs = read_log.gnss_log_to_dataframes(folder + "/" + track + "/" + phone + "/" + phone + "_GnssLog.txt")
df_raw = logs['Raw']
df_raw.to_csv(folder + "/" + track + "/" + phone + "/" + phone + "_raw.csv")
model, times = tf_phone_model.createGpsPhoneModel(df_raw,{ 'times':baseline_times, 'values':baseline_ecef_coords},mat_local,dog, { 'times':slac_times, 'values':slac_values, 'coords':slac_coords})
phone_models.append(model)
phone_times.append(times)
if min_time == 0 or min_time > times[0]:
min_time = times[0]
if max_time == 0 or max_time < times[-1]:
max_time = times[-1]
model_track, track_model_error, num_measures, start_nanos, time_tick = tf_phone_model.createTrackModel(min_time,max_time, { 'times':baseline_times, 'values':baseline_ecef_coords}, mat_local)
istart = np.searchsorted(baseline_times, start_nanos*1e-6)
iend = np.searchsorted(baseline_times, (start_nanos+time_tick*num_measures)*1e-6)
baseline_ecef_coords = baseline_ecef_coords[istart:iend]
baseline_times = baseline_times[istart:iend]
gt_ecef_coords = gt_ecef_coords[istart:iend]
track_input = np.arange(num_measures)
track_input = np.reshape(track_input,(-1,1))
def kernel_init(shape, dtype=None, partition_info=None):
kernel = np.zeros(shape)
kernel[:,0,0] = np.array([-1,1]).astype(np.float64)
return kernel
derivative = tf.keras.layers.Conv1D(1,2,use_bias=False,kernel_initializer=kernel_init, dtype = tf.float64)
def kernel_init_epoch(shape, dtype=None, partition_info=None):
kernel = np.zeros(shape).astype(np.float64)
kin = np.zeros((3)).astype(np.float64)
kin[0] = -1
kin[-1] = 1
kernel[:,0,0] = kin
return kernel
derivative_epoch = tf.keras.layers.Conv1D(1,3,use_bias=False,kernel_initializer=kernel_init_epoch, dtype = tf.float64)
@tf.function
def train_step_gnss(optimizer, physics):
for _ in range(16):
with tf.GradientTape(persistent=True) as tape:
total_loss_psevdo = 0
total_loss_delta = 0
accs_loss_large = 0
accs_loss_small = 0
speed_loss_small = 0
for i in range(len(phone_models)):
poses = model_track(phone_times[i], training=True)
poses = tf.reshape(poses,(1,-1,3))
psevdo_loss,delta_loss,delta_dif, psev_error = phone_models[i](poses, training=True)
total_loss_psevdo += psevdo_loss/10
total_loss_delta += delta_loss*2
total_loss = total_loss_delta +total_loss_psevdo
poses = track_model_error(track_input, training=True)
poses = tf.reshape(poses,(-1, 3))
poses_batch = tf.transpose(poses)
poses_batch = tf.expand_dims(poses_batch, axis=-1)
speed = derivative_epoch(poses_batch)
speed = tf.pad(speed,[[0,0],[0,1], [0,0]])
shift1 = derivative(poses_batch)
shift2 = speed*0.5
shift_loss = tf.reduce_mean(tf.abs(shift1-shift2)) * 0.01
accel = derivative(speed)
accel = tf.squeeze(accel)
accel = tf.transpose(accel)
accs_loss_large = tf.reduce_mean(tf.nn.relu(tf.abs(accel) - 4))
accs_loss_small = tf.reduce_mean(tf.abs(accel)) * 0.01
speed_loss_small = tf.reduce_mean(tf.abs(speed[2])) * 0.01 + shift_loss
'''
speed = (poses[3:] - poses[:-3])
speed_loss_small += tf.reduce_mean(tf.abs(poses[2:-1] - poses[1:-2]-speed/3))*0.01
accs = speed[1:] - speed[:-1]
acs2 = tf.linalg.norm(tf.abs(accs)+1.e-7, axis = -1)
accs_loss_small = tf.reduce_mean(acs2) / 100
accs_loss_large = tf.reduce_mean(tf.nn.relu(acs2-5))
'''
total_loss += (accs_loss_small + accs_loss_large + speed_loss_small)*5
for i in range(len(phone_models)):
grads = tape.gradient(total_loss, phone_models[i].trainable_weights)
optimizer.apply_gradients(zip(grads, phone_models[i].trainable_weights))
grads = tape.gradient(total_loss, model_track.trainable_weights)
optimizer.apply_gradients(zip(grads, model_track.trainable_weights))
grads = tape.gradient(total_loss, track_model_error.trainable_weights)
optimizer.apply_gradients(zip(grads, track_model_error.trainable_weights))
del tape
return total_loss, accs_loss_small, accs_loss_large, speed_loss_small, total_loss_psevdo, total_loss_delta, delta_dif, poses, psev_error
lr = 0.5
#optimizer = keras.optimizers.SGD(learning_rate=100., nesterov=True, momentum=0.5)
#optimizer = keras.optimizers.Adam(learning_rate=0.5)
optimizer = keras.optimizers.Adam(learning_rate=0.01)#, epsilon= 0.0001)
#optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=100. )
for step in range(32*60):
total_loss, accs_loss_small, accs_loss_large, speed_loss_small = 0,0,0,0
physics = 0
for _ in range(32):
total_loss, accs_loss_small, accs_loss_large, speed_loss_small, total_loss_psevdo, total_loss_delta, delta_dif, poses, psev_error = train_step_gnss(optimizer, physics)
pred_pos = model_track(baseline_times*1000000).numpy()
poses = poses.numpy()
psev_error = psev_error.numpy()
psev_error = psev_error[np.abs(psev_error) > 0]
percents_good_psev = np.sum(np.abs(psev_error) < 1)*100/len(psev_error)
shift = pred_pos - gt_ecef_coords
meanshift = np.mean(shift,axis=0,keepdims=True)
shift = shift - meanshift
err3d = np.mean(np.linalg.norm(shift,axis = -1))
dist_2d = np.linalg.norm(shift[:,:2],axis = -1)
err2d = np.mean(dist_2d)
dist_2d = np.sort(dist_2d)
err50 = dist_2d[len(dist_2d)//2]
err95 = dist_2d[len(dist_2d)*95//100]
delta_dif = delta_dif.numpy()
delta_dif = delta_dif[np.abs(delta_dif) > 0]
percents_good = np.sum(np.abs(delta_dif) < 0.1)*100/len(delta_dif)
print( "Training loss at step %d (%.2f (%.2f),%.2f,%.2f,%.2f,%.4f): %.4f (%.2f),%.4f (%.2f),%.4f,%.4f,%.4f lr %.4f" % (step, err3d, np.linalg.norm(meanshift[0,:2]), err2d, err50, err95, (err50+err95)/2, float(total_loss_psevdo), percents_good_psev, float(total_loss_delta),percents_good,float(accs_loss_large),float(accs_loss_small), float(speed_loss_small), float(lr)), end='\r')
if(step % 32 == 0):
lr *= 0.90
optimizer.learning_rate = lr
if(step > 32):
|
print()
if True:
plt.clf()
plt.scatter(pred_pos[:,1], pred_pos[:,0], s=0.2)
plt.scatter(gt_ecef_coords[:,1], gt_ecef_coords[:,0], s=0.2)
#fig1.canvas.start_event_loop(sys.float_info.min) #workaround for Exception in Tkinter callback
plt.savefig("fig/"+track+str(step+10000)+".png", dpi = 1000)
plt.close()
poses = track_model_error(track_input)
times = start_nanos + time_tick*track_input
poses = np.matmul(poses, mat_local.T)
d = {'nanos': np.reshape(times,(-1)), 'X': poses[:,0], 'Y': poses[:,1], 'Z': poses[:,2]}
df = pd.DataFrame(data=d)
df.to_csv(folder + "/" + track + "/track.csv")
| physics = 1. | conditional_block |
tf_train_loop.py | from numpy import linalg
from laika.lib.coordinates import ecef2geodetic, geodetic2ecef
from laika import AstroDog
from laika.gps_time import GPSTime
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.python.ops.array_ops import zeros
from tensorflow.python.training.tracking import base
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
import pymap3d as pm
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from skimage.io import imread, imsave
import time
import pandas as pd
import loader
import read_log
import itertools
from coords_tools import *
from matplotlib import pyplot
import datetime
from slac import loadSlac
from loader import *
import tf_phone_model
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
autotune = tf.data.experimental.AUTOTUNE
tf.keras.backend.set_floatx('float64')
def get_track_path(folder, track):
phone_glob = next(os.walk(folder+"/"+track))[1]
print(folder, track, end=' ')
phones = {}
phone_names = []
if "train" in folder:
df_baseline = pd.read_csv("data/baseline_locations_train.csv")
else:
df_baseline = pd.read_csv("data/baseline_locations_test.csv")
df_baseline = df_baseline[df_baseline['collectionName'] == track]
df_baseline.rename(columns = {'latDeg':'baseLatDeg', 'lngDeg':'baseLngDeg', 'heightAboveWgs84EllipsoidM':'baseHeightAboveWgs84EllipsoidM'}, inplace = True)
df_baseline.set_index('millisSinceGpsEpoch', inplace = True)
df_baseline = df_baseline[~df_baseline.index.duplicated(keep='first')]
df_baseline.sort_index(inplace=True)
if "train" in folder:
for phonepath in phone_glob:
truepos = pd.read_csv(folder+"/" + track + "/" + phonepath + "/ground_truth.csv")
truepos.set_index('millisSinceGpsEpoch', inplace = True)
df_baseline = df_baseline.combine_first(truepos)
else:
df_baseline['latDeg'] = df_baseline['baseLatDeg']
df_baseline['lngDeg'] = df_baseline['baseLngDeg']
df_baseline['heightAboveWgs84EllipsoidM'] = df_baseline['baseHeightAboveWgs84EllipsoidM']
baseline_times = []
baseline_ecef_coords = []
gt_ecef_coords = []
for timemili, row in df_baseline.iterrows():
latbl, lonbl, altbl = float(row['baseLatDeg']),float(row['baseLngDeg']),float(row['baseHeightAboveWgs84EllipsoidM'])
baseline_times.append(timemili)
baseline_ecef_coords.append(np.array(pm.geodetic2ecef(latbl,lonbl,altbl, deg = True)))
latbl, lonbl, altbl = float(row['latDeg']),float(row['lngDeg']),float(row['heightAboveWgs84EllipsoidM'] - 61)
gt_ecef_coords.append(np.array(pm.geodetic2ecef(latbl,lonbl,altbl, deg = True)))
#baseline_ecef_coords = gt_ecef_coords.copy()
mat_local = np.zeros((3,3))
mat_local[2] = baseline_ecef_coords[0]/np.linalg.norm(baseline_ecef_coords[0], axis = -1)
mat_local[0] = np.array([0,0,1])
mat_local[0] = mat_local[0] - mat_local[2]*np.sum(mat_local[2]*mat_local[0])
mat_local[0] = mat_local[0]/np.linalg.norm(mat_local[0], axis = -1)
mat_local[1] = np.cross(mat_local[0], mat_local[2])
mat_local = np.transpose(mat_local)
#mat_local = np.eye(3)
gt_ecef_coords = np.array(gt_ecef_coords)
baseline_times = np.array(baseline_times)
baseline_ecef_coords = np.array(baseline_ecef_coords)
gt_ecef_coords = np.matmul(gt_ecef_coords,mat_local)
timeshift = 3657*24*60*60
datetimenow = int(baseline_times[0])//1000+timeshift
datetimenow = datetime.datetime.utcfromtimestamp(datetimenow)
slac_file = loadSlac(datetimenow)
slac = myLoadRinexPrevdoIndexed(slac_file)
slac_coords = np.array(myLoadRinex(slac_file).position)
slac_times = np.array([r[0] for r in slac])
slac_values = np.array([r[1] for r in slac])
phone_models = []
phone_times = []
constellations = ['GPS', 'GLONASS', 'BEIDOU','GALILEO']
dog = AstroDog(valid_const=constellations, pull_orbit=True)
phones = {}
phone_names = []
max_time = min_time = 0
bufx = []
bufy = []
bufz = []
window = 8
for i in range(len(baseline_ecef_coords)):
bufx.append(baseline_ecef_coords[i,0])
bufy.append(baseline_ecef_coords[i,1])
bufz.append(baseline_ecef_coords[i,2])
if len(bufx) > window*2+1:
bufx = bufx[1:]
bufy = bufy[1:]
bufz = bufz[1:]
if i >= window:
baseline_ecef_coords[i-window,0] = sorted(bufx)[len(bufx)//2]
baseline_ecef_coords[i-window,1] = sorted(bufy)[len(bufy)//2]
baseline_ecef_coords[i-window,2] = sorted(bufz)[len(bufz)//2]
#baseline_ecef_coords = scipy.signal.medfilt(baseline_ecef_coords, [1025,1])
baseline_ecef_coords += np.random.normal(0.,20.,baseline_ecef_coords.shape)
try:
with open(folder + "/" + track + "/export.dat", 'rb') as f:
data_file = pickle.load(f)
except:
data_file = None
for phonepath in phone_glob:
phone = phonepath
phones[phone] = len(phones)
phone_names.append(phone)
print(phone, end=' ')
if False: #data_file != None:
model, times = tf_phone_model.createGpsPhoneModelFromDataFile(data_file,phone,{ 'times':baseline_times, 'values':baseline_ecef_coords}, mat_local)
else:
try:
df_raw = pd.read_csv(folder + "/" + track + "/" + phone + "/" + phone + "_raw.csv")
except:
logs = read_log.gnss_log_to_dataframes(folder + "/" + track + "/" + phone + "/" + phone + "_GnssLog.txt")
df_raw = logs['Raw']
df_raw.to_csv(folder + "/" + track + "/" + phone + "/" + phone + "_raw.csv")
model, times = tf_phone_model.createGpsPhoneModel(df_raw,{ 'times':baseline_times, 'values':baseline_ecef_coords},mat_local,dog, { 'times':slac_times, 'values':slac_values, 'coords':slac_coords})
phone_models.append(model)
phone_times.append(times)
if min_time == 0 or min_time > times[0]:
min_time = times[0]
if max_time == 0 or max_time < times[-1]:
max_time = times[-1]
model_track, track_model_error, num_measures, start_nanos, time_tick = tf_phone_model.createTrackModel(min_time,max_time, { 'times':baseline_times, 'values':baseline_ecef_coords}, mat_local)
istart = np.searchsorted(baseline_times, start_nanos*1e-6)
iend = np.searchsorted(baseline_times, (start_nanos+time_tick*num_measures)*1e-6)
baseline_ecef_coords = baseline_ecef_coords[istart:iend]
baseline_times = baseline_times[istart:iend]
gt_ecef_coords = gt_ecef_coords[istart:iend]
track_input = np.arange(num_measures)
track_input = np.reshape(track_input,(-1,1))
def kernel_init(shape, dtype=None, partition_info=None):
kernel = np.zeros(shape)
kernel[:,0,0] = np.array([-1,1]).astype(np.float64)
return kernel
derivative = tf.keras.layers.Conv1D(1,2,use_bias=False,kernel_initializer=kernel_init, dtype = tf.float64)
def kernel_init_epoch(shape, dtype=None, partition_info=None):
kernel = np.zeros(shape).astype(np.float64)
kin = np.zeros((3)).astype(np.float64)
kin[0] = -1
kin[-1] = 1
kernel[:,0,0] = kin
return kernel
derivative_epoch = tf.keras.layers.Conv1D(1,3,use_bias=False,kernel_initializer=kernel_init_epoch, dtype = tf.float64)
@tf.function
def | (optimizer, physics):
for _ in range(16):
with tf.GradientTape(persistent=True) as tape:
total_loss_psevdo = 0
total_loss_delta = 0
accs_loss_large = 0
accs_loss_small = 0
speed_loss_small = 0
for i in range(len(phone_models)):
poses = model_track(phone_times[i], training=True)
poses = tf.reshape(poses,(1,-1,3))
psevdo_loss,delta_loss,delta_dif, psev_error = phone_models[i](poses, training=True)
total_loss_psevdo += psevdo_loss/10
total_loss_delta += delta_loss*2
total_loss = total_loss_delta +total_loss_psevdo
poses = track_model_error(track_input, training=True)
poses = tf.reshape(poses,(-1, 3))
poses_batch = tf.transpose(poses)
poses_batch = tf.expand_dims(poses_batch, axis=-1)
speed = derivative_epoch(poses_batch)
speed = tf.pad(speed,[[0,0],[0,1], [0,0]])
shift1 = derivative(poses_batch)
shift2 = speed*0.5
shift_loss = tf.reduce_mean(tf.abs(shift1-shift2)) * 0.01
accel = derivative(speed)
accel = tf.squeeze(accel)
accel = tf.transpose(accel)
accs_loss_large = tf.reduce_mean(tf.nn.relu(tf.abs(accel) - 4))
accs_loss_small = tf.reduce_mean(tf.abs(accel)) * 0.01
speed_loss_small = tf.reduce_mean(tf.abs(speed[2])) * 0.01 + shift_loss
'''
speed = (poses[3:] - poses[:-3])
speed_loss_small += tf.reduce_mean(tf.abs(poses[2:-1] - poses[1:-2]-speed/3))*0.01
accs = speed[1:] - speed[:-1]
acs2 = tf.linalg.norm(tf.abs(accs)+1.e-7, axis = -1)
accs_loss_small = tf.reduce_mean(acs2) / 100
accs_loss_large = tf.reduce_mean(tf.nn.relu(acs2-5))
'''
total_loss += (accs_loss_small + accs_loss_large + speed_loss_small)*5
for i in range(len(phone_models)):
grads = tape.gradient(total_loss, phone_models[i].trainable_weights)
optimizer.apply_gradients(zip(grads, phone_models[i].trainable_weights))
grads = tape.gradient(total_loss, model_track.trainable_weights)
optimizer.apply_gradients(zip(grads, model_track.trainable_weights))
grads = tape.gradient(total_loss, track_model_error.trainable_weights)
optimizer.apply_gradients(zip(grads, track_model_error.trainable_weights))
del tape
return total_loss, accs_loss_small, accs_loss_large, speed_loss_small, total_loss_psevdo, total_loss_delta, delta_dif, poses, psev_error
lr = 0.5
#optimizer = keras.optimizers.SGD(learning_rate=100., nesterov=True, momentum=0.5)
#optimizer = keras.optimizers.Adam(learning_rate=0.5)
optimizer = keras.optimizers.Adam(learning_rate=0.01)#, epsilon= 0.0001)
#optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=100. )
for step in range(32*60):
total_loss, accs_loss_small, accs_loss_large, speed_loss_small = 0,0,0,0
physics = 0
for _ in range(32):
total_loss, accs_loss_small, accs_loss_large, speed_loss_small, total_loss_psevdo, total_loss_delta, delta_dif, poses, psev_error = train_step_gnss(optimizer, physics)
pred_pos = model_track(baseline_times*1000000).numpy()
poses = poses.numpy()
psev_error = psev_error.numpy()
psev_error = psev_error[np.abs(psev_error) > 0]
percents_good_psev = np.sum(np.abs(psev_error) < 1)*100/len(psev_error)
shift = pred_pos - gt_ecef_coords
meanshift = np.mean(shift,axis=0,keepdims=True)
shift = shift - meanshift
err3d = np.mean(np.linalg.norm(shift,axis = -1))
dist_2d = np.linalg.norm(shift[:,:2],axis = -1)
err2d = np.mean(dist_2d)
dist_2d = np.sort(dist_2d)
err50 = dist_2d[len(dist_2d)//2]
err95 = dist_2d[len(dist_2d)*95//100]
delta_dif = delta_dif.numpy()
delta_dif = delta_dif[np.abs(delta_dif) > 0]
percents_good = np.sum(np.abs(delta_dif) < 0.1)*100/len(delta_dif)
print( "Training loss at step %d (%.2f (%.2f),%.2f,%.2f,%.2f,%.4f): %.4f (%.2f),%.4f (%.2f),%.4f,%.4f,%.4f lr %.4f" % (step, err3d, np.linalg.norm(meanshift[0,:2]), err2d, err50, err95, (err50+err95)/2, float(total_loss_psevdo), percents_good_psev, float(total_loss_delta),percents_good,float(accs_loss_large),float(accs_loss_small), float(speed_loss_small), float(lr)), end='\r')
if(step % 32 == 0):
lr *= 0.90
optimizer.learning_rate = lr
if(step > 32):
physics = 1.
print()
if True:
plt.clf()
plt.scatter(pred_pos[:,1], pred_pos[:,0], s=0.2)
plt.scatter(gt_ecef_coords[:,1], gt_ecef_coords[:,0], s=0.2)
#fig1.canvas.start_event_loop(sys.float_info.min) #workaround for Exception in Tkinter callback
plt.savefig("fig/"+track+str(step+10000)+".png", dpi = 1000)
plt.close()
poses = track_model_error(track_input)
times = start_nanos + time_tick*track_input
poses = np.matmul(poses, mat_local.T)
d = {'nanos': np.reshape(times,(-1)), 'X': poses[:,0], 'Y': poses[:,1], 'Z': poses[:,2]}
df = pd.DataFrame(data=d)
df.to_csv(folder + "/" + track + "/track.csv")
| train_step_gnss | identifier_name |
dashboard.go | package dashboard
import (
"arrowcloudapi/models"
"arrowcloudapi/mongo"
"arrowcloudapi/utils/log"
"errors"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"gopkg.in/mgo.v2/bson"
"github.com/jeffail/gabs"
)
const (
host360 = "platform.appcelerator.com"
authPath = "/api/v1/auth/login"
logoutPath = "/api/v1/auth/logout"
orgInfoPath = "/api/v1/user/organizations"
thisEnvAdminURL = "http://admin.cloudapp-1.appctest.com"
)
// Auth implements Authenticator interface to authenticate user against DB.
type Auth struct{}
/**
* Authenticate user against appcelerator 360 (dashboard). This is for enterprise user only.
* @param username
* @param password
* @param cb
*/
//function validateThrough360(mid, username, password, callback) {
func (d *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
//check whether the dashboard session is still valid first
/*
>db.dashboard_sessions.findOne()
{
"_id" : ObjectId("53d07fcba38d8ba60518c900"),
"username" : "rdong@appcelerator.com",
"sid_360": "s%3ANpiTvlGoViClfe_peVLfBJFN.r7IEVTSaVKnz2a6nQ8joUn2Uf8o1QMKv40YRnnime3E",
"cookie": [
"connect.sid=s%3ANpiTvlGoViClfe_peVLfBJFN.r7IEVTSaVKnz2a6nQ8joUn2Uf8o1QMKv40YRnnime3E; Domain=360-preprod.appcelerator.com; Path=/; HttpOnly; Secure"
]
}
*/
//TODO find and invalidate previous 360 session
loginUrl := "https://" + host360 + authPath
username := m.Principal
creds := url.Values{}
creds.Set("username", username)
creds.Add("password", m.Password)
// v.Encode() == "name=Ava&friend=Jess&friend=Sarah&friend=Zoe"
//curl -i -b cookies.txt -c cookies.txt -F "username=mgoff@appcelerator.com" -F "password=food" http://360-dev.appcelerator.com/api/v1/auth/login
/*
response for bad username/password
HTTP/1.1 400 Bad Request
X-Powered-By: Express
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: GET, POST, DELETE, PUT
Access-Control-Allow-Headers: Content-Type, api_key
Content-Type: application/json; charset=utf-8
Content-Length: 79
Date: Fri, 19 Apr 2013 01:25:24 GMT
Connection: keep-alive
{"success":false,"description":"Invalid password.","code":400,"internalCode":2}
*/
resp, err := http.PostForm(loginUrl, creds)
if err != nil {
log.Errorf("Failed to login to dashboard. %v", err)
return nil, err
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
return nil, errors.New("authentication failed")
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return nil, err
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil |
// 'set-cookie': ['t=UpnUzNztGWO7K8A%2BCYihZz056Bk%3D; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'un=mgoff%40appcelerator.com; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'sid=33f33a6b7f8fef7b0fc649654187d467; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'dvid=2019bea3-9e7b-48e3-890f-00e3e22b39e2; Path=/; Expires=Sat, 17 Oct 2015 06:27:19 GMT',
// 'connect.sid=s%3Aj0kX71OMFpIQ11Vf1ruhqJLH.on4RLy9q9tpVqnUeoQJBWlDPiB6bS8rWWhq8sOCDGPc; Domain=360-dev.appcelerator.com; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT; HttpOnly'
// ]
// {
// "success": true,
// "result": {
// "success": true,
// "username": "mgoff@appcelerator.com",
// "email": "mgoff@appcelerator.com",
// "guid": "ae6150453b3599b2875b311c40785b40",
// "org_id": 14301,
// "connect.sid": "s:QGW1cqj5h9B3fL6jwJTtjkuT.iuwQ23WOgiK/E+QfkRNVWi7G5S9DA00Li6BQPLGkROM"
// }
cookie := resp.Header.Get("set-cookie")
if cookie == "" {
log.Error("No cookie found in response")
return nil, errors.New("authentication failed")
}
sid := strings.Split(strings.Split(cookie, ";")[0], "=")[1]
log.Debugf("sid: %s", sid)
// for _, cookie := range cookies.([]string) {
// log.Debugf("cookie: %s", cookie)
// }
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
return nil, errors.New("authentication failed")
}
err = handleDashboardSession(username, sid, cookie)
if err != nil {
return nil, err
}
haveAccess, orgs, err := getAndVerifyOrgInfoFrom360(username, sid)
if err != nil {
return nil, err
}
if !haveAccess {
log.Errorf("user's organizations do not have access to this domain")
return nil, errors.New("No access to this domain")
}
user := bson.M{
"username": jsonBody.Path("result.username").Data().(string),
"email": jsonBody.Path("result.email").Data().(string),
"guid": jsonBody.Path("result.guid").Data().(string),
}
if jsonBody.Path("result.firstname").Data() != nil {
user["firstname"] = jsonBody.Path("result.firstname").Data().(string)
} else {
user["firstname"] = jsonBody.Path("result.username").Data().(string)
}
//user's organization info returned from dashboard. It's an array since a user can belong to
//multiple organizations.
user["orgs_360"] = orgs
user["orgs_360_updated_at"] = time.Now()
savedUser, err := saveUser(user)
if err != nil {
log.Errorf("Failed to save user. %v", err)
return nil, err
}
mUser := &models.User{
ID: savedUser["_id"].(bson.ObjectId).Hex(),
Username: jsonBody.Path("result.username").Data().(string),
Email: jsonBody.Path("result.email").Data().(string),
Orgs: orgs,
}
if jsonBody.Path("result.firstname").Data() != nil {
mUser.Firstname = jsonBody.Path("result.firstname").Data().(string)
} else {
mUser.Firstname = jsonBody.Path("result.username").Data().(string)
}
return mUser, nil
}
func handleDashboardSession(username, sid_360, cookie string) error {
old_db_session, err := findDashboardSession(username)
if err != nil && !strings.Contains(err.Error(), "not found") {
return err
}
if _, ok := old_db_session["sid_360"]; ok {
err = logoutFromDashboard(old_db_session["sid_360"].(string))
if err != nil {
return err
}
}
// save 360 session to DB
db_session := bson.M{
"username": username,
"sid_360": sid_360,
"cookie": cookie,
}
return saveDashboardSession(db_session)
}
func getAndVerifyOrgInfoFrom360(username, sid string) (haveAccess bool, orgs []models.Org, err error) {
// reqTimeout := 20000; //20s
//curl -i -b connect.sid=s%3AaJaL7IWQ_cDvmVBeQRY997hf.vVzLV2aFvrYiEKmfdTARTuHessesQ0Xm87JvFESaus http://dashboard.appcelerator.com/api/v1/user/organizations
/*
response for invalid session
HTTP/1.1 401 Unauthorized
X-Frame-Options: SAMEORIGIN
Cache-Control: no-cache, max-age=0, must-revalidate
Pragma: no-cache
Vary: Accept-Encoding
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: GET, POST, PUT, PATCH, DELETE
Access-Control-Allow-Headers: Content-Type, api_key
Content-Type: application/json; charset=utf-8
Content-Length: 59
Set-Cookie: connect.sid=s%3AIEpzWmzs4MQJGJMEcLmjlZm_.Cyi4LlO8gP%2B4sPHR0bdEGqjiqjuW3RJlZe6O2bt8QkI; Domain=dashboard.appcelerator.com; Path=/; Expires=Sat, 12 Apr 2014 13:04:07 GMT; HttpOnly; Secure
Date: Thu, 13 Mar 2014 13:04:07 GMT
Connection: close
{"success":false,"description":"Login Required","code":401}
*/
log.Debug("Get user organization information from " + host360 + orgInfoPath)
orgInfoUrl := "https://" + host360 + orgInfoPath
//https://webcache.googleusercontent.com/search?q=cache:OVK76hrG4T8J:https://medium.com/%40nate510/don-t-use-go-s-default-http-client-4804cb19f779+&cd=4&hl=en&ct=clnk&gl=jp
client := &http.Client{}
req, err := http.NewRequest("GET", orgInfoUrl, nil)
req.Header.Add("Cookie", "connect.sid="+sid)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
resp, err := client.Do(req)
if err != nil {
log.Errorf("Failed to get organization info from dashboard. %v", err)
return
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode == 401 {
log.Warning("getAndVerifyOrgInfoFrom360 - Failed to get organization information. Session is invalid")
err = errors.New("Failed to get organization information. Session is invalid.")
return
}
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
err = errors.New("Failed to get organization info")
return
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return
}
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
err = errors.New("Failed to get organization info")
return
}
/*
{
"success": true,
"result": [{
"_id": "51c40b4497a98c6046000002",
"org_id": 14301,
"name": "Appcelerator, Inc",
"guid": "64310644-794b-c8d0-a8b8-0a373d20dabc",
"user_count": 97,
"current_users_role": "normal",
"is_node_acs_admin": false,
"trial_end_date": "",
"created": "2012-01-11 10:58:09.0",
"reseller": false,
"active": true,
"envs": [{
"_id": "production",
"name": "production",
"isProduction": true,
"acsBaseUrl": "https://preprod-api.cloud.appcelerator.com",
"acsAuthBaseUrl": "https://dolphin-secure-identity.cloud.appcelerator.com",
"nodeACSEndpoint": "https://admin.cloudapp-enterprise-preprod.appcelerator.com"
}, {
"_id": "development",
"name": "development",
"isProduction": false,
"acsBaseUrl": "https://preprod-api.cloud.appcelerator.com",
"acsAuthBaseUrl": "https://dolphin-secure-identity.cloud.appcelerator.com",
"nodeACSEndpoint": "https://admin.cloudapp-enterprise-preprod.appcelerator.com"
}],
"parent_org_guid": ""
}]
}
*/
organizations := jsonBody.Path("result").Data().([]interface{})
if !validateOrgs(organizations) {
log.Errorf("getAndVerifyOrgInfoFrom360 - Bad response from dashboard: invalid organization info. %v", organizations)
err = errors.New("Bad response from dashboard")
return
//TODO send mail
}
//check if the user's organizations have access to current deployment (identified by admin host)
orgs, haveAccess = checkOrgs(organizations)
return
}
func checkOrgs(orgArray []interface{}) (orgs []models.Org, haveAccess bool) {
log.Debugf("check if user's organizations have access to this domain")
re := regexp.MustCompile("^(http|https)://") //https://golang.org/pkg/regexp/#MustCompile
thisEnvHost := re.ReplaceAllString(thisEnvAdminURL, "")
orgs = []models.Org{} //organizations which can access this domain (deployment)
userOrgIds := []string{}
for _, orgData := range orgArray {
orgDoc := orgData.(map[string]interface{})
orgToSave := models.Org{
ID: strconv.FormatFloat(orgDoc["org_id"].(float64), 'f', -1, 64),
Name: orgDoc["name"].(string),
Admin: orgDoc["current_users_role"].(string) == "admin",
Node_acs_admin: orgDoc["is_node_acs_admin"].(bool),
}
userOrgIds = append(userOrgIds, orgToSave.ID)
//check if the org has access to this domain (deployment)
//if yes save it in "orgs"
if envsData, ok := orgDoc["envs"]; ok {
envs := envsData.([]interface{})
for _, envData := range envs {
env := envData.(map[string]interface{})
adminHost, hok := env["nodeACSEndpoint"].(string)
if hok {
re := regexp.MustCompile("^(http|https)://")
adminHost := re.ReplaceAllString(adminHost, "")
log.Debugf("org %s(%s) have access to %s", orgToSave.Name, orgToSave.ID, adminHost)
if adminHost == thisEnvHost {
orgs = append(orgs, orgToSave)
break
}
}
}
}
}
//workaround for testing - start
// userOrgIds.push('14301');
// orgs.push({id:'14301', name:'appcelerator Inc.', admin: true, node_acs_admin: true});
//workaround for testing - end
if len(orgs) < 1 {
log.Errorf("getAndVerifyOrgInfoFrom360 - User's organization(s) %v doesn't have access to current deployment (%s).", userOrgIds, thisEnvHost)
haveAccess = false
return
}
haveAccess = true
return
}
/**
* Validate the organization info got from 360 for a user is valid.
* @param orgArray
* @returns {boolean}
*/
func validateOrgs(orgArray []interface{}) bool {
if len(orgArray) == 0 {
return false
}
for _, orgData := range orgArray {
orgDoc := orgData.(map[string]interface{})
if _, ok := orgDoc["org_id"]; !ok {
return false
}
if _, ok := orgDoc["name"]; !ok {
return false
}
if _, ok := orgDoc["is_node_acs_admin"]; !ok {
return false
}
}
return true
}
/**
* Load user's 360 session information based on username.
*/
func findDashboardSession(username string) (bson.M, error) {
log.Debugf("find dashboard session for user %s", username)
re, err := mongo.FindOneDocument(mongo.STRATUS_DASHBOARD_SESSIONS_COLL,
bson.M{"username": username})
if err != nil {
log.Errorf("Failed to find dashboard session. %v", err)
return nil, err
}
return re, err
}
/**
* insert or update user's 360 session information upon login to 360
*/
func saveDashboardSession(session bson.M) error {
log.Debugf("save dashboard session for user %v", session["username"])
_, err := mongo.UpsertDocument(mongo.STRATUS_DASHBOARD_SESSIONS_COLL,
bson.M{"username": session["username"]}, session)
if err != nil {
log.Errorf("Failed to save dashboard session. %v", err)
return err
}
log.Debugf("Upserted %v into %s collection.", session, mongo.STRATUS_DASHBOARD_SESSIONS_COLL)
return nil
}
/**
* insert or update user information upon login to Appcelerator's sso interface
*/
func saveUser(user bson.M) (bson.M, error) {
saved, err := mongo.UpsertDocument(mongo.STRATUS_USERS_COLL,
bson.M{"guid": user["guid"]}, user)
if err != nil {
log.Errorf("Failed to save user. %v", err)
return nil, err
}
log.Debugf("Upserted %v into %s collection.", user, mongo.STRATUS_USERS_COLL)
return saved, nil
}
//TOOD use request module to support proxy
func logoutFromDashboard(sid_360 string) (err error) {
log.Debugf("Logout session %s from Appcelerator 360.", sid_360)
logOutUrl := "https://" + host360 + logoutPath
client := &http.Client{}
req, err := http.NewRequest("GET", logOutUrl, nil)
req.Header.Add("Cookie", "connect.sid="+sid_360)
resp, err := client.Do(req)
if err != nil {
log.Errorf("Failed to logout from dashboard. %v", err)
return
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode == 400 {
log.Warning("Failed to logout from dashboard. Session is invalid")
err = errors.New("Failed to logout from dashboard. Session is invalid.")
return
}
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
err = errors.New("Failed to logout from dashboard")
return
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return
}
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
err = errors.New("Failed to logout from dashboard")
return
}
return
}
| {
log.Errorf("Failed to parse response body. %v", err)
return nil, err
} | conditional_block |
dashboard.go | package dashboard
import (
"arrowcloudapi/models"
"arrowcloudapi/mongo"
"arrowcloudapi/utils/log"
"errors"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"gopkg.in/mgo.v2/bson"
"github.com/jeffail/gabs"
)
const (
host360 = "platform.appcelerator.com"
authPath = "/api/v1/auth/login"
logoutPath = "/api/v1/auth/logout"
orgInfoPath = "/api/v1/user/organizations"
thisEnvAdminURL = "http://admin.cloudapp-1.appctest.com"
)
// Auth implements Authenticator interface to authenticate user against DB.
type Auth struct{}
/**
* Authenticate user against appcelerator 360 (dashboard). This is for enterprise user only.
* @param username
* @param password
* @param cb
*/
//function validateThrough360(mid, username, password, callback) {
func (d *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
//check whether the dashboard session is still valid first
/*
>db.dashboard_sessions.findOne()
{
"_id" : ObjectId("53d07fcba38d8ba60518c900"),
"username" : "rdong@appcelerator.com",
"sid_360": "s%3ANpiTvlGoViClfe_peVLfBJFN.r7IEVTSaVKnz2a6nQ8joUn2Uf8o1QMKv40YRnnime3E",
"cookie": [
"connect.sid=s%3ANpiTvlGoViClfe_peVLfBJFN.r7IEVTSaVKnz2a6nQ8joUn2Uf8o1QMKv40YRnnime3E; Domain=360-preprod.appcelerator.com; Path=/; HttpOnly; Secure"
]
}
*/
//TODO find and invalidate previous 360 session
loginUrl := "https://" + host360 + authPath
username := m.Principal
creds := url.Values{}
creds.Set("username", username)
creds.Add("password", m.Password)
// v.Encode() == "name=Ava&friend=Jess&friend=Sarah&friend=Zoe"
//curl -i -b cookies.txt -c cookies.txt -F "username=mgoff@appcelerator.com" -F "password=food" http://360-dev.appcelerator.com/api/v1/auth/login
/*
response for bad username/password
HTTP/1.1 400 Bad Request
X-Powered-By: Express
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: GET, POST, DELETE, PUT
Access-Control-Allow-Headers: Content-Type, api_key
Content-Type: application/json; charset=utf-8
Content-Length: 79
Date: Fri, 19 Apr 2013 01:25:24 GMT
Connection: keep-alive
{"success":false,"description":"Invalid password.","code":400,"internalCode":2}
*/
resp, err := http.PostForm(loginUrl, creds)
if err != nil {
log.Errorf("Failed to login to dashboard. %v", err)
return nil, err
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
return nil, errors.New("authentication failed")
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return nil, err
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return nil, err
}
// 'set-cookie': ['t=UpnUzNztGWO7K8A%2BCYihZz056Bk%3D; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'un=mgoff%40appcelerator.com; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'sid=33f33a6b7f8fef7b0fc649654187d467; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'dvid=2019bea3-9e7b-48e3-890f-00e3e22b39e2; Path=/; Expires=Sat, 17 Oct 2015 06:27:19 GMT',
// 'connect.sid=s%3Aj0kX71OMFpIQ11Vf1ruhqJLH.on4RLy9q9tpVqnUeoQJBWlDPiB6bS8rWWhq8sOCDGPc; Domain=360-dev.appcelerator.com; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT; HttpOnly'
// ]
// {
// "success": true,
// "result": {
// "success": true,
// "username": "mgoff@appcelerator.com",
// "email": "mgoff@appcelerator.com",
// "guid": "ae6150453b3599b2875b311c40785b40",
// "org_id": 14301,
// "connect.sid": "s:QGW1cqj5h9B3fL6jwJTtjkuT.iuwQ23WOgiK/E+QfkRNVWi7G5S9DA00Li6BQPLGkROM"
// }
cookie := resp.Header.Get("set-cookie")
if cookie == "" {
log.Error("No cookie found in response")
return nil, errors.New("authentication failed")
}
sid := strings.Split(strings.Split(cookie, ";")[0], "=")[1]
log.Debugf("sid: %s", sid)
// for _, cookie := range cookies.([]string) {
// log.Debugf("cookie: %s", cookie)
// }
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
return nil, errors.New("authentication failed")
}
err = handleDashboardSession(username, sid, cookie)
if err != nil {
return nil, err
}
haveAccess, orgs, err := getAndVerifyOrgInfoFrom360(username, sid)
if err != nil {
return nil, err
}
if !haveAccess {
log.Errorf("user's organizations do not have access to this domain")
return nil, errors.New("No access to this domain")
}
user := bson.M{
"username": jsonBody.Path("result.username").Data().(string),
"email": jsonBody.Path("result.email").Data().(string),
"guid": jsonBody.Path("result.guid").Data().(string),
}
if jsonBody.Path("result.firstname").Data() != nil {
user["firstname"] = jsonBody.Path("result.firstname").Data().(string)
} else {
user["firstname"] = jsonBody.Path("result.username").Data().(string)
}
//user's organization info returned from dashboard. It's an array since a user can belong to
//multiple organizations.
user["orgs_360"] = orgs
user["orgs_360_updated_at"] = time.Now()
savedUser, err := saveUser(user)
if err != nil {
log.Errorf("Failed to save user. %v", err)
return nil, err
}
mUser := &models.User{
ID: savedUser["_id"].(bson.ObjectId).Hex(),
Username: jsonBody.Path("result.username").Data().(string),
Email: jsonBody.Path("result.email").Data().(string),
Orgs: orgs,
}
if jsonBody.Path("result.firstname").Data() != nil {
mUser.Firstname = jsonBody.Path("result.firstname").Data().(string)
} else {
mUser.Firstname = jsonBody.Path("result.username").Data().(string)
}
return mUser, nil
}
func handleDashboardSession(username, sid_360, cookie string) error {
old_db_session, err := findDashboardSession(username)
if err != nil && !strings.Contains(err.Error(), "not found") {
return err
}
if _, ok := old_db_session["sid_360"]; ok {
err = logoutFromDashboard(old_db_session["sid_360"].(string))
if err != nil {
return err
}
}
// save 360 session to DB
db_session := bson.M{
"username": username,
"sid_360": sid_360,
"cookie": cookie,
}
return saveDashboardSession(db_session)
}
func getAndVerifyOrgInfoFrom360(username, sid string) (haveAccess bool, orgs []models.Org, err error) {
// reqTimeout := 20000; //20s
//curl -i -b connect.sid=s%3AaJaL7IWQ_cDvmVBeQRY997hf.vVzLV2aFvrYiEKmfdTARTuHessesQ0Xm87JvFESaus http://dashboard.appcelerator.com/api/v1/user/organizations
/*
response for invalid session
HTTP/1.1 401 Unauthorized
X-Frame-Options: SAMEORIGIN
Cache-Control: no-cache, max-age=0, must-revalidate
Pragma: no-cache
Vary: Accept-Encoding
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: GET, POST, PUT, PATCH, DELETE
Access-Control-Allow-Headers: Content-Type, api_key
Content-Type: application/json; charset=utf-8
Content-Length: 59
Set-Cookie: connect.sid=s%3AIEpzWmzs4MQJGJMEcLmjlZm_.Cyi4LlO8gP%2B4sPHR0bdEGqjiqjuW3RJlZe6O2bt8QkI; Domain=dashboard.appcelerator.com; Path=/; Expires=Sat, 12 Apr 2014 13:04:07 GMT; HttpOnly; Secure
Date: Thu, 13 Mar 2014 13:04:07 GMT
Connection: close
{"success":false,"description":"Login Required","code":401}
*/
log.Debug("Get user organization information from " + host360 + orgInfoPath)
orgInfoUrl := "https://" + host360 + orgInfoPath
//https://webcache.googleusercontent.com/search?q=cache:OVK76hrG4T8J:https://medium.com/%40nate510/don-t-use-go-s-default-http-client-4804cb19f779+&cd=4&hl=en&ct=clnk&gl=jp
client := &http.Client{}
req, err := http.NewRequest("GET", orgInfoUrl, nil)
req.Header.Add("Cookie", "connect.sid="+sid)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
resp, err := client.Do(req)
if err != nil {
log.Errorf("Failed to get organization info from dashboard. %v", err)
return
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode == 401 {
log.Warning("getAndVerifyOrgInfoFrom360 - Failed to get organization information. Session is invalid")
err = errors.New("Failed to get organization information. Session is invalid.")
return
}
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
err = errors.New("Failed to get organization info")
return
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return
}
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
err = errors.New("Failed to get organization info")
return
}
/*
{
"success": true,
"result": [{
"_id": "51c40b4497a98c6046000002",
"org_id": 14301,
"name": "Appcelerator, Inc",
"guid": "64310644-794b-c8d0-a8b8-0a373d20dabc",
"user_count": 97,
"current_users_role": "normal",
"is_node_acs_admin": false,
"trial_end_date": "",
"created": "2012-01-11 10:58:09.0",
"reseller": false,
"active": true,
"envs": [{
"_id": "production",
"name": "production",
"isProduction": true,
"acsBaseUrl": "https://preprod-api.cloud.appcelerator.com",
"acsAuthBaseUrl": "https://dolphin-secure-identity.cloud.appcelerator.com",
"nodeACSEndpoint": "https://admin.cloudapp-enterprise-preprod.appcelerator.com"
}, {
"_id": "development",
"name": "development",
"isProduction": false,
"acsBaseUrl": "https://preprod-api.cloud.appcelerator.com",
"acsAuthBaseUrl": "https://dolphin-secure-identity.cloud.appcelerator.com",
"nodeACSEndpoint": "https://admin.cloudapp-enterprise-preprod.appcelerator.com"
}],
"parent_org_guid": ""
}]
}
*/
organizations := jsonBody.Path("result").Data().([]interface{})
if !validateOrgs(organizations) {
log.Errorf("getAndVerifyOrgInfoFrom360 - Bad response from dashboard: invalid organization info. %v", organizations)
err = errors.New("Bad response from dashboard")
return
//TODO send mail
}
//check if the user's organizations have access to current deployment (identified by admin host)
orgs, haveAccess = checkOrgs(organizations)
return
}
func checkOrgs(orgArray []interface{}) (orgs []models.Org, haveAccess bool) {
log.Debugf("check if user's organizations have access to this domain")
re := regexp.MustCompile("^(http|https)://") //https://golang.org/pkg/regexp/#MustCompile
thisEnvHost := re.ReplaceAllString(thisEnvAdminURL, "")
orgs = []models.Org{} //organizations which can access this domain (deployment)
userOrgIds := []string{}
for _, orgData := range orgArray {
orgDoc := orgData.(map[string]interface{})
orgToSave := models.Org{
ID: strconv.FormatFloat(orgDoc["org_id"].(float64), 'f', -1, 64),
Name: orgDoc["name"].(string),
Admin: orgDoc["current_users_role"].(string) == "admin",
Node_acs_admin: orgDoc["is_node_acs_admin"].(bool),
}
userOrgIds = append(userOrgIds, orgToSave.ID)
//check if the org has access to this domain (deployment)
//if yes save it in "orgs"
if envsData, ok := orgDoc["envs"]; ok {
envs := envsData.([]interface{})
for _, envData := range envs {
env := envData.(map[string]interface{})
adminHost, hok := env["nodeACSEndpoint"].(string)
if hok {
re := regexp.MustCompile("^(http|https)://")
adminHost := re.ReplaceAllString(adminHost, "")
log.Debugf("org %s(%s) have access to %s", orgToSave.Name, orgToSave.ID, adminHost)
if adminHost == thisEnvHost {
orgs = append(orgs, orgToSave)
break
}
}
}
}
}
//workaround for testing - start
// userOrgIds.push('14301');
// orgs.push({id:'14301', name:'appcelerator Inc.', admin: true, node_acs_admin: true});
//workaround for testing - end
if len(orgs) < 1 {
log.Errorf("getAndVerifyOrgInfoFrom360 - User's organization(s) %v doesn't have access to current deployment (%s).", userOrgIds, thisEnvHost)
haveAccess = false
return
}
haveAccess = true
return
}
/**
* Validate the organization info got from 360 for a user is valid.
* @param orgArray
* @returns {boolean}
*/
func validateOrgs(orgArray []interface{}) bool {
if len(orgArray) == 0 {
return false
}
for _, orgData := range orgArray {
orgDoc := orgData.(map[string]interface{})
if _, ok := orgDoc["org_id"]; !ok {
return false
}
if _, ok := orgDoc["name"]; !ok {
return false
}
if _, ok := orgDoc["is_node_acs_admin"]; !ok {
return false
}
}
return true
}
/**
* Load user's 360 session information based on username.
*/
func | (username string) (bson.M, error) {
log.Debugf("find dashboard session for user %s", username)
re, err := mongo.FindOneDocument(mongo.STRATUS_DASHBOARD_SESSIONS_COLL,
bson.M{"username": username})
if err != nil {
log.Errorf("Failed to find dashboard session. %v", err)
return nil, err
}
return re, err
}
/**
* insert or update user's 360 session information upon login to 360
*/
func saveDashboardSession(session bson.M) error {
log.Debugf("save dashboard session for user %v", session["username"])
_, err := mongo.UpsertDocument(mongo.STRATUS_DASHBOARD_SESSIONS_COLL,
bson.M{"username": session["username"]}, session)
if err != nil {
log.Errorf("Failed to save dashboard session. %v", err)
return err
}
log.Debugf("Upserted %v into %s collection.", session, mongo.STRATUS_DASHBOARD_SESSIONS_COLL)
return nil
}
/**
* insert or update user information upon login to Appcelerator's sso interface
*/
func saveUser(user bson.M) (bson.M, error) {
saved, err := mongo.UpsertDocument(mongo.STRATUS_USERS_COLL,
bson.M{"guid": user["guid"]}, user)
if err != nil {
log.Errorf("Failed to save user. %v", err)
return nil, err
}
log.Debugf("Upserted %v into %s collection.", user, mongo.STRATUS_USERS_COLL)
return saved, nil
}
//TOOD use request module to support proxy
func logoutFromDashboard(sid_360 string) (err error) {
log.Debugf("Logout session %s from Appcelerator 360.", sid_360)
logOutUrl := "https://" + host360 + logoutPath
client := &http.Client{}
req, err := http.NewRequest("GET", logOutUrl, nil)
req.Header.Add("Cookie", "connect.sid="+sid_360)
resp, err := client.Do(req)
if err != nil {
log.Errorf("Failed to logout from dashboard. %v", err)
return
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode == 400 {
log.Warning("Failed to logout from dashboard. Session is invalid")
err = errors.New("Failed to logout from dashboard. Session is invalid.")
return
}
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
err = errors.New("Failed to logout from dashboard")
return
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return
}
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
err = errors.New("Failed to logout from dashboard")
return
}
return
}
| findDashboardSession | identifier_name |
dashboard.go | package dashboard
import (
"arrowcloudapi/models"
"arrowcloudapi/mongo"
"arrowcloudapi/utils/log"
"errors"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"gopkg.in/mgo.v2/bson"
"github.com/jeffail/gabs"
)
const (
host360 = "platform.appcelerator.com"
authPath = "/api/v1/auth/login"
logoutPath = "/api/v1/auth/logout"
orgInfoPath = "/api/v1/user/organizations"
thisEnvAdminURL = "http://admin.cloudapp-1.appctest.com"
)
// Auth implements Authenticator interface to authenticate user against DB.
type Auth struct{}
/**
* Authenticate user against appcelerator 360 (dashboard). This is for enterprise user only.
* @param username
* @param password
* @param cb
*/
//function validateThrough360(mid, username, password, callback) {
func (d *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
//check whether the dashboard session is still valid first
/*
>db.dashboard_sessions.findOne()
{
"_id" : ObjectId("53d07fcba38d8ba60518c900"),
"username" : "rdong@appcelerator.com",
"sid_360": "s%3ANpiTvlGoViClfe_peVLfBJFN.r7IEVTSaVKnz2a6nQ8joUn2Uf8o1QMKv40YRnnime3E",
"cookie": [
"connect.sid=s%3ANpiTvlGoViClfe_peVLfBJFN.r7IEVTSaVKnz2a6nQ8joUn2Uf8o1QMKv40YRnnime3E; Domain=360-preprod.appcelerator.com; Path=/; HttpOnly; Secure"
]
}
*/
//TODO find and invalidate previous 360 session
loginUrl := "https://" + host360 + authPath
username := m.Principal
creds := url.Values{}
creds.Set("username", username)
creds.Add("password", m.Password)
// v.Encode() == "name=Ava&friend=Jess&friend=Sarah&friend=Zoe"
//curl -i -b cookies.txt -c cookies.txt -F "username=mgoff@appcelerator.com" -F "password=food" http://360-dev.appcelerator.com/api/v1/auth/login
/*
response for bad username/password
HTTP/1.1 400 Bad Request
X-Powered-By: Express
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: GET, POST, DELETE, PUT
Access-Control-Allow-Headers: Content-Type, api_key
Content-Type: application/json; charset=utf-8
Content-Length: 79
Date: Fri, 19 Apr 2013 01:25:24 GMT
Connection: keep-alive
{"success":false,"description":"Invalid password.","code":400,"internalCode":2}
*/
resp, err := http.PostForm(loginUrl, creds)
if err != nil {
log.Errorf("Failed to login to dashboard. %v", err)
return nil, err
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
return nil, errors.New("authentication failed")
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return nil, err
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return nil, err
}
// 'set-cookie': ['t=UpnUzNztGWO7K8A%2BCYihZz056Bk%3D; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'un=mgoff%40appcelerator.com; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'sid=33f33a6b7f8fef7b0fc649654187d467; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'dvid=2019bea3-9e7b-48e3-890f-00e3e22b39e2; Path=/; Expires=Sat, 17 Oct 2015 06:27:19 GMT',
// 'connect.sid=s%3Aj0kX71OMFpIQ11Vf1ruhqJLH.on4RLy9q9tpVqnUeoQJBWlDPiB6bS8rWWhq8sOCDGPc; Domain=360-dev.appcelerator.com; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT; HttpOnly'
// ]
// {
// "success": true,
// "result": {
// "success": true,
// "username": "mgoff@appcelerator.com",
// "email": "mgoff@appcelerator.com",
// "guid": "ae6150453b3599b2875b311c40785b40",
// "org_id": 14301,
// "connect.sid": "s:QGW1cqj5h9B3fL6jwJTtjkuT.iuwQ23WOgiK/E+QfkRNVWi7G5S9DA00Li6BQPLGkROM"
// }
cookie := resp.Header.Get("set-cookie")
if cookie == "" {
log.Error("No cookie found in response")
return nil, errors.New("authentication failed")
}
sid := strings.Split(strings.Split(cookie, ";")[0], "=")[1]
log.Debugf("sid: %s", sid)
// for _, cookie := range cookies.([]string) {
// log.Debugf("cookie: %s", cookie)
// }
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
return nil, errors.New("authentication failed")
}
err = handleDashboardSession(username, sid, cookie)
if err != nil {
return nil, err
}
haveAccess, orgs, err := getAndVerifyOrgInfoFrom360(username, sid)
if err != nil {
return nil, err
}
if !haveAccess {
log.Errorf("user's organizations do not have access to this domain")
return nil, errors.New("No access to this domain")
}
user := bson.M{
"username": jsonBody.Path("result.username").Data().(string),
"email": jsonBody.Path("result.email").Data().(string),
"guid": jsonBody.Path("result.guid").Data().(string),
}
if jsonBody.Path("result.firstname").Data() != nil {
user["firstname"] = jsonBody.Path("result.firstname").Data().(string)
} else {
user["firstname"] = jsonBody.Path("result.username").Data().(string)
}
//user's organization info returned from dashboard. It's an array since a user can belong to
//multiple organizations.
user["orgs_360"] = orgs
user["orgs_360_updated_at"] = time.Now()
savedUser, err := saveUser(user)
if err != nil {
log.Errorf("Failed to save user. %v", err)
return nil, err
}
mUser := &models.User{
ID: savedUser["_id"].(bson.ObjectId).Hex(),
Username: jsonBody.Path("result.username").Data().(string),
Email: jsonBody.Path("result.email").Data().(string),
Orgs: orgs,
}
if jsonBody.Path("result.firstname").Data() != nil {
mUser.Firstname = jsonBody.Path("result.firstname").Data().(string)
} else {
mUser.Firstname = jsonBody.Path("result.username").Data().(string)
}
return mUser, nil
}
func handleDashboardSession(username, sid_360, cookie string) error {
old_db_session, err := findDashboardSession(username)
if err != nil && !strings.Contains(err.Error(), "not found") {
return err
}
if _, ok := old_db_session["sid_360"]; ok {
err = logoutFromDashboard(old_db_session["sid_360"].(string))
if err != nil {
return err
}
}
// save 360 session to DB
db_session := bson.M{
"username": username,
"sid_360": sid_360,
"cookie": cookie,
}
return saveDashboardSession(db_session)
}
func getAndVerifyOrgInfoFrom360(username, sid string) (haveAccess bool, orgs []models.Org, err error) {
// reqTimeout := 20000; //20s
//curl -i -b connect.sid=s%3AaJaL7IWQ_cDvmVBeQRY997hf.vVzLV2aFvrYiEKmfdTARTuHessesQ0Xm87JvFESaus http://dashboard.appcelerator.com/api/v1/user/organizations
/*
response for invalid session
HTTP/1.1 401 Unauthorized
X-Frame-Options: SAMEORIGIN
Cache-Control: no-cache, max-age=0, must-revalidate
Pragma: no-cache
Vary: Accept-Encoding
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: GET, POST, PUT, PATCH, DELETE
Access-Control-Allow-Headers: Content-Type, api_key
Content-Type: application/json; charset=utf-8
Content-Length: 59
Set-Cookie: connect.sid=s%3AIEpzWmzs4MQJGJMEcLmjlZm_.Cyi4LlO8gP%2B4sPHR0bdEGqjiqjuW3RJlZe6O2bt8QkI; Domain=dashboard.appcelerator.com; Path=/; Expires=Sat, 12 Apr 2014 13:04:07 GMT; HttpOnly; Secure
Date: Thu, 13 Mar 2014 13:04:07 GMT
Connection: close
{"success":false,"description":"Login Required","code":401}
*/
log.Debug("Get user organization information from " + host360 + orgInfoPath)
orgInfoUrl := "https://" + host360 + orgInfoPath
//https://webcache.googleusercontent.com/search?q=cache:OVK76hrG4T8J:https://medium.com/%40nate510/don-t-use-go-s-default-http-client-4804cb19f779+&cd=4&hl=en&ct=clnk&gl=jp
client := &http.Client{}
req, err := http.NewRequest("GET", orgInfoUrl, nil)
req.Header.Add("Cookie", "connect.sid="+sid)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
resp, err := client.Do(req)
if err != nil {
log.Errorf("Failed to get organization info from dashboard. %v", err)
return
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode == 401 {
log.Warning("getAndVerifyOrgInfoFrom360 - Failed to get organization information. Session is invalid")
err = errors.New("Failed to get organization information. Session is invalid.")
return
}
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
err = errors.New("Failed to get organization info")
return
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return
}
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
err = errors.New("Failed to get organization info")
return
}
/*
{
"success": true,
"result": [{
"_id": "51c40b4497a98c6046000002",
"org_id": 14301,
"name": "Appcelerator, Inc",
"guid": "64310644-794b-c8d0-a8b8-0a373d20dabc",
"user_count": 97,
"current_users_role": "normal",
"is_node_acs_admin": false,
"trial_end_date": "",
"created": "2012-01-11 10:58:09.0",
"reseller": false,
"active": true,
"envs": [{
"_id": "production",
"name": "production",
"isProduction": true,
"acsBaseUrl": "https://preprod-api.cloud.appcelerator.com",
"acsAuthBaseUrl": "https://dolphin-secure-identity.cloud.appcelerator.com",
"nodeACSEndpoint": "https://admin.cloudapp-enterprise-preprod.appcelerator.com"
}, {
"_id": "development",
"name": "development",
"isProduction": false,
"acsBaseUrl": "https://preprod-api.cloud.appcelerator.com",
"acsAuthBaseUrl": "https://dolphin-secure-identity.cloud.appcelerator.com",
"nodeACSEndpoint": "https://admin.cloudapp-enterprise-preprod.appcelerator.com"
}],
"parent_org_guid": ""
}]
}
*/
organizations := jsonBody.Path("result").Data().([]interface{})
if !validateOrgs(organizations) {
log.Errorf("getAndVerifyOrgInfoFrom360 - Bad response from dashboard: invalid organization info. %v", organizations)
err = errors.New("Bad response from dashboard")
return
//TODO send mail
}
//check if the user's organizations have access to current deployment (identified by admin host)
orgs, haveAccess = checkOrgs(organizations)
return
}
func checkOrgs(orgArray []interface{}) (orgs []models.Org, haveAccess bool) {
log.Debugf("check if user's organizations have access to this domain")
re := regexp.MustCompile("^(http|https)://") //https://golang.org/pkg/regexp/#MustCompile
thisEnvHost := re.ReplaceAllString(thisEnvAdminURL, "")
orgs = []models.Org{} //organizations which can access this domain (deployment)
userOrgIds := []string{}
for _, orgData := range orgArray {
orgDoc := orgData.(map[string]interface{})
orgToSave := models.Org{
ID: strconv.FormatFloat(orgDoc["org_id"].(float64), 'f', -1, 64),
Name: orgDoc["name"].(string),
Admin: orgDoc["current_users_role"].(string) == "admin",
Node_acs_admin: orgDoc["is_node_acs_admin"].(bool),
}
userOrgIds = append(userOrgIds, orgToSave.ID)
//check if the org has access to this domain (deployment)
//if yes save it in "orgs"
if envsData, ok := orgDoc["envs"]; ok {
envs := envsData.([]interface{})
for _, envData := range envs {
env := envData.(map[string]interface{})
adminHost, hok := env["nodeACSEndpoint"].(string)
if hok {
re := regexp.MustCompile("^(http|https)://")
adminHost := re.ReplaceAllString(adminHost, "")
log.Debugf("org %s(%s) have access to %s", orgToSave.Name, orgToSave.ID, adminHost)
if adminHost == thisEnvHost {
orgs = append(orgs, orgToSave)
break
}
}
}
}
}
//workaround for testing - start
// userOrgIds.push('14301');
// orgs.push({id:'14301', name:'appcelerator Inc.', admin: true, node_acs_admin: true});
//workaround for testing - end
if len(orgs) < 1 {
log.Errorf("getAndVerifyOrgInfoFrom360 - User's organization(s) %v doesn't have access to current deployment (%s).", userOrgIds, thisEnvHost)
haveAccess = false
return
}
haveAccess = true
return
}
/**
* Validate the organization info got from 360 for a user is valid.
* @param orgArray
* @returns {boolean}
*/
func validateOrgs(orgArray []interface{}) bool {
if len(orgArray) == 0 {
return false
}
for _, orgData := range orgArray {
orgDoc := orgData.(map[string]interface{})
if _, ok := orgDoc["org_id"]; !ok {
return false
}
if _, ok := orgDoc["name"]; !ok {
return false
}
if _, ok := orgDoc["is_node_acs_admin"]; !ok {
return false
}
}
return true
}
/**
* Load user's 360 session information based on username.
*/
func findDashboardSession(username string) (bson.M, error) {
log.Debugf("find dashboard session for user %s", username)
re, err := mongo.FindOneDocument(mongo.STRATUS_DASHBOARD_SESSIONS_COLL,
bson.M{"username": username})
if err != nil {
log.Errorf("Failed to find dashboard session. %v", err)
return nil, err
}
return re, err
}
/**
* insert or update user's 360 session information upon login to 360
*/
func saveDashboardSession(session bson.M) error |
/**
* insert or update user information upon login to Appcelerator's sso interface
*/
func saveUser(user bson.M) (bson.M, error) {
saved, err := mongo.UpsertDocument(mongo.STRATUS_USERS_COLL,
bson.M{"guid": user["guid"]}, user)
if err != nil {
log.Errorf("Failed to save user. %v", err)
return nil, err
}
log.Debugf("Upserted %v into %s collection.", user, mongo.STRATUS_USERS_COLL)
return saved, nil
}
//TOOD use request module to support proxy
func logoutFromDashboard(sid_360 string) (err error) {
log.Debugf("Logout session %s from Appcelerator 360.", sid_360)
logOutUrl := "https://" + host360 + logoutPath
client := &http.Client{}
req, err := http.NewRequest("GET", logOutUrl, nil)
req.Header.Add("Cookie", "connect.sid="+sid_360)
resp, err := client.Do(req)
if err != nil {
log.Errorf("Failed to logout from dashboard. %v", err)
return
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode == 400 {
log.Warning("Failed to logout from dashboard. Session is invalid")
err = errors.New("Failed to logout from dashboard. Session is invalid.")
return
}
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
err = errors.New("Failed to logout from dashboard")
return
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return
}
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
err = errors.New("Failed to logout from dashboard")
return
}
return
}
| {
log.Debugf("save dashboard session for user %v", session["username"])
_, err := mongo.UpsertDocument(mongo.STRATUS_DASHBOARD_SESSIONS_COLL,
bson.M{"username": session["username"]}, session)
if err != nil {
log.Errorf("Failed to save dashboard session. %v", err)
return err
}
log.Debugf("Upserted %v into %s collection.", session, mongo.STRATUS_DASHBOARD_SESSIONS_COLL)
return nil
} | identifier_body |
dashboard.go | package dashboard
import (
"arrowcloudapi/models"
"arrowcloudapi/mongo"
"arrowcloudapi/utils/log"
"errors"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"gopkg.in/mgo.v2/bson"
"github.com/jeffail/gabs"
)
const (
host360 = "platform.appcelerator.com"
authPath = "/api/v1/auth/login"
logoutPath = "/api/v1/auth/logout"
orgInfoPath = "/api/v1/user/organizations"
thisEnvAdminURL = "http://admin.cloudapp-1.appctest.com"
)
// Auth implements Authenticator interface to authenticate user against DB.
type Auth struct{}
/**
* Authenticate user against appcelerator 360 (dashboard). This is for enterprise user only.
* @param username
* @param password
* @param cb
*/
//function validateThrough360(mid, username, password, callback) {
func (d *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
//check whether the dashboard session is still valid first
/*
>db.dashboard_sessions.findOne()
{
"_id" : ObjectId("53d07fcba38d8ba60518c900"),
"username" : "rdong@appcelerator.com",
"sid_360": "s%3ANpiTvlGoViClfe_peVLfBJFN.r7IEVTSaVKnz2a6nQ8joUn2Uf8o1QMKv40YRnnime3E",
"cookie": [
"connect.sid=s%3ANpiTvlGoViClfe_peVLfBJFN.r7IEVTSaVKnz2a6nQ8joUn2Uf8o1QMKv40YRnnime3E; Domain=360-preprod.appcelerator.com; Path=/; HttpOnly; Secure"
]
}
*/
//TODO find and invalidate previous 360 session
loginUrl := "https://" + host360 + authPath
username := m.Principal
creds := url.Values{}
creds.Set("username", username)
creds.Add("password", m.Password)
// v.Encode() == "name=Ava&friend=Jess&friend=Sarah&friend=Zoe"
//curl -i -b cookies.txt -c cookies.txt -F "username=mgoff@appcelerator.com" -F "password=food" http://360-dev.appcelerator.com/api/v1/auth/login
/*
response for bad username/password
HTTP/1.1 400 Bad Request
X-Powered-By: Express
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: GET, POST, DELETE, PUT
Access-Control-Allow-Headers: Content-Type, api_key
Content-Type: application/json; charset=utf-8
Content-Length: 79
Date: Fri, 19 Apr 2013 01:25:24 GMT
Connection: keep-alive
{"success":false,"description":"Invalid password.","code":400,"internalCode":2}
*/
resp, err := http.PostForm(loginUrl, creds)
if err != nil {
log.Errorf("Failed to login to dashboard. %v", err)
return nil, err
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
return nil, errors.New("authentication failed")
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return nil, err
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return nil, err
}
// 'set-cookie': ['t=UpnUzNztGWO7K8A%2BCYihZz056Bk%3D; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'un=mgoff%40appcelerator.com; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'sid=33f33a6b7f8fef7b0fc649654187d467; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT',
// 'dvid=2019bea3-9e7b-48e3-890f-00e3e22b39e2; Path=/; Expires=Sat, 17 Oct 2015 06:27:19 GMT',
// 'connect.sid=s%3Aj0kX71OMFpIQ11Vf1ruhqJLH.on4RLy9q9tpVqnUeoQJBWlDPiB6bS8rWWhq8sOCDGPc; Domain=360-dev.appcelerator.com; Path=/; Expires=Sat, 16 Nov 2013 06:27:19 GMT; HttpOnly'
// ]
// {
// "success": true,
// "result": {
// "success": true,
// "username": "mgoff@appcelerator.com",
// "email": "mgoff@appcelerator.com",
// "guid": "ae6150453b3599b2875b311c40785b40",
// "org_id": 14301,
// "connect.sid": "s:QGW1cqj5h9B3fL6jwJTtjkuT.iuwQ23WOgiK/E+QfkRNVWi7G5S9DA00Li6BQPLGkROM"
// }
cookie := resp.Header.Get("set-cookie")
if cookie == "" {
log.Error("No cookie found in response")
return nil, errors.New("authentication failed")
}
sid := strings.Split(strings.Split(cookie, ";")[0], "=")[1]
log.Debugf("sid: %s", sid)
// for _, cookie := range cookies.([]string) {
// log.Debugf("cookie: %s", cookie)
// }
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
return nil, errors.New("authentication failed")
}
err = handleDashboardSession(username, sid, cookie)
if err != nil {
return nil, err
}
haveAccess, orgs, err := getAndVerifyOrgInfoFrom360(username, sid)
if err != nil {
return nil, err
}
if !haveAccess {
log.Errorf("user's organizations do not have access to this domain")
return nil, errors.New("No access to this domain")
}
user := bson.M{
"username": jsonBody.Path("result.username").Data().(string),
"email": jsonBody.Path("result.email").Data().(string),
"guid": jsonBody.Path("result.guid").Data().(string),
}
if jsonBody.Path("result.firstname").Data() != nil {
user["firstname"] = jsonBody.Path("result.firstname").Data().(string)
} else {
user["firstname"] = jsonBody.Path("result.username").Data().(string)
}
//user's organization info returned from dashboard. It's an array since a user can belong to
//multiple organizations.
user["orgs_360"] = orgs
user["orgs_360_updated_at"] = time.Now()
savedUser, err := saveUser(user)
if err != nil {
log.Errorf("Failed to save user. %v", err)
return nil, err
}
mUser := &models.User{
ID: savedUser["_id"].(bson.ObjectId).Hex(),
Username: jsonBody.Path("result.username").Data().(string),
Email: jsonBody.Path("result.email").Data().(string),
Orgs: orgs,
}
if jsonBody.Path("result.firstname").Data() != nil {
mUser.Firstname = jsonBody.Path("result.firstname").Data().(string)
} else {
mUser.Firstname = jsonBody.Path("result.username").Data().(string)
}
return mUser, nil
}
func handleDashboardSession(username, sid_360, cookie string) error {
old_db_session, err := findDashboardSession(username)
if err != nil && !strings.Contains(err.Error(), "not found") {
return err
}
if _, ok := old_db_session["sid_360"]; ok {
err = logoutFromDashboard(old_db_session["sid_360"].(string))
if err != nil {
return err
}
}
// save 360 session to DB
db_session := bson.M{
"username": username,
"sid_360": sid_360,
"cookie": cookie,
}
return saveDashboardSession(db_session)
}
func getAndVerifyOrgInfoFrom360(username, sid string) (haveAccess bool, orgs []models.Org, err error) {
// reqTimeout := 20000; //20s
//curl -i -b connect.sid=s%3AaJaL7IWQ_cDvmVBeQRY997hf.vVzLV2aFvrYiEKmfdTARTuHessesQ0Xm87JvFESaus http://dashboard.appcelerator.com/api/v1/user/organizations
/*
response for invalid session
HTTP/1.1 401 Unauthorized
X-Frame-Options: SAMEORIGIN
Cache-Control: no-cache, max-age=0, must-revalidate
Pragma: no-cache
Vary: Accept-Encoding
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: GET, POST, PUT, PATCH, DELETE
Access-Control-Allow-Headers: Content-Type, api_key
Content-Type: application/json; charset=utf-8
Content-Length: 59
Set-Cookie: connect.sid=s%3AIEpzWmzs4MQJGJMEcLmjlZm_.Cyi4LlO8gP%2B4sPHR0bdEGqjiqjuW3RJlZe6O2bt8QkI; Domain=dashboard.appcelerator.com; Path=/; Expires=Sat, 12 Apr 2014 13:04:07 GMT; HttpOnly; Secure
Date: Thu, 13 Mar 2014 13:04:07 GMT
Connection: close
{"success":false,"description":"Login Required","code":401}
*/
log.Debug("Get user organization information from " + host360 + orgInfoPath)
orgInfoUrl := "https://" + host360 + orgInfoPath
//https://webcache.googleusercontent.com/search?q=cache:OVK76hrG4T8J:https://medium.com/%40nate510/don-t-use-go-s-default-http-client-4804cb19f779+&cd=4&hl=en&ct=clnk&gl=jp
client := &http.Client{}
req, err := http.NewRequest("GET", orgInfoUrl, nil)
req.Header.Add("Cookie", "connect.sid="+sid)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
resp, err := client.Do(req)
if err != nil {
log.Errorf("Failed to get organization info from dashboard. %v", err)
return
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode == 401 {
log.Warning("getAndVerifyOrgInfoFrom360 - Failed to get organization information. Session is invalid")
err = errors.New("Failed to get organization information. Session is invalid.")
return
}
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
err = errors.New("Failed to get organization info")
return
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return
}
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
err = errors.New("Failed to get organization info")
return
}
/*
{
"success": true,
"result": [{
"_id": "51c40b4497a98c6046000002",
"org_id": 14301,
"name": "Appcelerator, Inc",
"guid": "64310644-794b-c8d0-a8b8-0a373d20dabc",
"user_count": 97,
"current_users_role": "normal",
"is_node_acs_admin": false,
"trial_end_date": "",
"created": "2012-01-11 10:58:09.0",
"reseller": false,
"active": true,
"envs": [{
"_id": "production",
"name": "production",
"isProduction": true,
"acsBaseUrl": "https://preprod-api.cloud.appcelerator.com",
"acsAuthBaseUrl": "https://dolphin-secure-identity.cloud.appcelerator.com",
"nodeACSEndpoint": "https://admin.cloudapp-enterprise-preprod.appcelerator.com"
}, {
"_id": "development",
"name": "development",
"isProduction": false,
"acsBaseUrl": "https://preprod-api.cloud.appcelerator.com",
"acsAuthBaseUrl": "https://dolphin-secure-identity.cloud.appcelerator.com",
"nodeACSEndpoint": "https://admin.cloudapp-enterprise-preprod.appcelerator.com"
}],
"parent_org_guid": ""
}]
}
*/
organizations := jsonBody.Path("result").Data().([]interface{})
if !validateOrgs(organizations) {
log.Errorf("getAndVerifyOrgInfoFrom360 - Bad response from dashboard: invalid organization info. %v", organizations)
err = errors.New("Bad response from dashboard")
return
//TODO send mail
}
//check if the user's organizations have access to current deployment (identified by admin host)
orgs, haveAccess = checkOrgs(organizations)
return
}
func checkOrgs(orgArray []interface{}) (orgs []models.Org, haveAccess bool) {
log.Debugf("check if user's organizations have access to this domain")
re := regexp.MustCompile("^(http|https)://") //https://golang.org/pkg/regexp/#MustCompile
thisEnvHost := re.ReplaceAllString(thisEnvAdminURL, "")
orgs = []models.Org{} //organizations which can access this domain (deployment)
userOrgIds := []string{}
for _, orgData := range orgArray {
orgDoc := orgData.(map[string]interface{})
orgToSave := models.Org{
ID: strconv.FormatFloat(orgDoc["org_id"].(float64), 'f', -1, 64),
Name: orgDoc["name"].(string),
Admin: orgDoc["current_users_role"].(string) == "admin",
Node_acs_admin: orgDoc["is_node_acs_admin"].(bool),
}
userOrgIds = append(userOrgIds, orgToSave.ID)
//check if the org has access to this domain (deployment)
//if yes save it in "orgs"
if envsData, ok := orgDoc["envs"]; ok {
envs := envsData.([]interface{})
for _, envData := range envs {
env := envData.(map[string]interface{})
adminHost, hok := env["nodeACSEndpoint"].(string)
if hok {
re := regexp.MustCompile("^(http|https)://")
adminHost := re.ReplaceAllString(adminHost, "")
log.Debugf("org %s(%s) have access to %s", orgToSave.Name, orgToSave.ID, adminHost)
if adminHost == thisEnvHost {
orgs = append(orgs, orgToSave)
break
}
}
}
}
}
//workaround for testing - start
// userOrgIds.push('14301');
// orgs.push({id:'14301', name:'appcelerator Inc.', admin: true, node_acs_admin: true});
//workaround for testing - end
if len(orgs) < 1 {
log.Errorf("getAndVerifyOrgInfoFrom360 - User's organization(s) %v doesn't have access to current deployment (%s).", userOrgIds, thisEnvHost)
haveAccess = false
return
}
haveAccess = true
return
}
/**
* Validate the organization info got from 360 for a user is valid.
* @param orgArray
* @returns {boolean}
*/
func validateOrgs(orgArray []interface{}) bool {
if len(orgArray) == 0 {
return false
}
for _, orgData := range orgArray {
orgDoc := orgData.(map[string]interface{})
if _, ok := orgDoc["org_id"]; !ok {
return false
}
if _, ok := orgDoc["name"]; !ok {
return false
}
if _, ok := orgDoc["is_node_acs_admin"]; !ok {
return false
}
}
return true
}
/**
* Load user's 360 session information based on username.
*/
func findDashboardSession(username string) (bson.M, error) {
log.Debugf("find dashboard session for user %s", username)
re, err := mongo.FindOneDocument(mongo.STRATUS_DASHBOARD_SESSIONS_COLL,
bson.M{"username": username}) | return nil, err
}
return re, err
}
/**
* insert or update user's 360 session information upon login to 360
*/
func saveDashboardSession(session bson.M) error {
log.Debugf("save dashboard session for user %v", session["username"])
_, err := mongo.UpsertDocument(mongo.STRATUS_DASHBOARD_SESSIONS_COLL,
bson.M{"username": session["username"]}, session)
if err != nil {
log.Errorf("Failed to save dashboard session. %v", err)
return err
}
log.Debugf("Upserted %v into %s collection.", session, mongo.STRATUS_DASHBOARD_SESSIONS_COLL)
return nil
}
/**
* insert or update user information upon login to Appcelerator's sso interface
*/
func saveUser(user bson.M) (bson.M, error) {
saved, err := mongo.UpsertDocument(mongo.STRATUS_USERS_COLL,
bson.M{"guid": user["guid"]}, user)
if err != nil {
log.Errorf("Failed to save user. %v", err)
return nil, err
}
log.Debugf("Upserted %v into %s collection.", user, mongo.STRATUS_USERS_COLL)
return saved, nil
}
//TOOD use request module to support proxy
func logoutFromDashboard(sid_360 string) (err error) {
log.Debugf("Logout session %s from Appcelerator 360.", sid_360)
logOutUrl := "https://" + host360 + logoutPath
client := &http.Client{}
req, err := http.NewRequest("GET", logOutUrl, nil)
req.Header.Add("Cookie", "connect.sid="+sid_360)
resp, err := client.Do(req)
if err != nil {
log.Errorf("Failed to logout from dashboard. %v", err)
return
}
//log.Debugf("resp: %v", resp)
if resp.StatusCode == 400 {
log.Warning("Failed to logout from dashboard. Session is invalid")
err = errors.New("Failed to logout from dashboard. Session is invalid.")
return
}
if resp.StatusCode != 200 {
log.Debugf("dashboard returns status %s", resp.Status)
err = errors.New("Failed to logout from dashboard")
return
}
bodyBuf, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Errorf("Failed to read response body. %v", err)
return
}
jsonBody, err := gabs.ParseJSON(bodyBuf)
if err != nil {
log.Errorf("Failed to parse response body. %v", err)
return
}
success := jsonBody.Path("success").Data().(bool)
if !success {
log.Error("dashboard returns false for success field")
err = errors.New("Failed to logout from dashboard")
return
}
return
} |
if err != nil {
log.Errorf("Failed to find dashboard session. %v", err) | random_line_split |
lib.rs | //! A lock-free, eventually consistent, concurrent multi-value map.
//!
//! This map implementation allows reads and writes to execute entirely in parallel, with no
//! implicit synchronization overhead. Reads never take locks on their critical path, and neither
//! do writes assuming there is a single writer (multi-writer is possible using a `Mutex`), which
//! significantly improves performance under contention. See the [`left-right` crate](left_right)
//! for details on the underlying concurrency primitive.
//!
//! The trade-off exposed by this type is one of eventual consistency: writes are not visible to
//! readers except following explicit synchronization. Specifically, readers only see the
//! operations that preceeded the last call to `WriteHandle::refresh` by a writer. This lets
//! writers decide how stale they are willing to let reads get. They can refresh the map after
//! every write to emulate a regular concurrent `HashMap`, or they can refresh only occasionally to
//! reduce the synchronization overhead at the cost of stale reads.
//!
//! For read-heavy workloads, the scheme used by this module is particularly useful. Writers can
//! afford to refresh after every write, which provides up-to-date reads, and readers remain fast
//! as they do not need to ever take locks.
//!
//! The map is multi-value, meaning that every key maps to a *collection* of values. This
//! introduces some memory cost by adding a layer of indirection through a `Vec` for each value,
//! but enables more advanced use. This choice was made as it would not be possible to emulate such
//! functionality on top of the semantics of this map (think about it -- what would the operational
//! log contain?).
//!
//! To faciliate more advanced use-cases, each of the two maps also carry some customizeable
//! meta-information. The writers may update this at will, and when a refresh happens, the current
//! meta will also be made visible to readers. This could be useful, for example, to indicate what
//! time the refresh happened.
//!
//! # Features
//!
//! - `eviction`: Gives you access to [`WriteHandle::empty_random`] to empty out randomly chosen
//! keys from the map.
//! - `amortize`: Amortizes the cost of resizes in the underlying data structures. See
//! [`griddle`](https://github.com/jonhoo/griddle/) and
//! [`atone`](https://github.com/jonhoo/atone/) for details. This requires a nightly compiler
//! [for the time being](https://docs.rs/indexmap-amortized/1.0/indexmap_amortized/#rust-version).
//!
//!
//! # Examples
//!
//! Single-reader, single-writer
//!
//! ```
//! // new will use the default HashMap hasher, and a meta of ()
//! // note that we get separate read and write handles
//! // the read handle can be cloned to have more readers
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // review some books.
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//!
//! // at this point, reads from book_reviews_r will not see any of the reviews!
//! assert_eq!(book_reviews_r.len(), 0);
//! // we need to refresh first to make the writes visible
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.len(), 4);
//! // reads will now return Some() because the map has been initialized
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // remember, this is a multi-value map, so we can have many reviews
//! book_reviews_w.insert("Grimms' Fairy Tales", "Eh, the title seemed weird.");
//! book_reviews_w.insert("Pride and Prejudice", "Too many words.");
//!
//! // but again, new writes are not yet visible
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // we need to refresh first
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(2));
//!
//! // oops, this review has a lot of spelling mistakes, let's delete it.
//! // remove_entry deletes *all* reviews (though in this case, just one)
//! book_reviews_w.remove_entry("The Adventures of Sherlock Holmes");
//! // but again, it's not visible to readers until we refresh
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), Some(1));
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), None);
//!
//! // look up the values associated with some keys.
//! let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
//! for book in &to_find {
//! if let Some(reviews) = book_reviews_r.get(book) {
//! for review in &*reviews {
//! println!("{}: {}", book, review);
//! }
//! } else {
//! println!("{} is unreviewed.", book);
//! }
//! }
//!
//! // iterate over everything.
//! for (book, reviews) in &book_reviews_r.enter().unwrap() {
//! for review in reviews {
//! println!("{}: \"{}\"", book, review);
//! }
//! }
//! ```
//!
//! Reads from multiple threads are possible by cloning the `ReadHandle`.
//!
//! ```
//! use std::thread;
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some readers
//! let readers: Vec<_> = (0..4).map(|_| {
//! let r = book_reviews_r.clone();
//! thread::spawn(move || {
//! loop {
//! let l = r.len();
//! if l == 0 {
//! thread::yield_now();
//! } else {
//! // the reader will either see all the reviews,
//! // or none of them, since refresh() is atomic.
//! assert_eq!(l, 4);
//! break;
//! }
//! }
//! })
//! }).collect();
//!
//! // do some writes
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//! // expose the writes
//! book_reviews_w.publish();
//!
//! // you can read through the write handle
//! assert_eq!(book_reviews_w.len(), 4);
//!
//! // the original read handle still works too
//! assert_eq!(book_reviews_r.len(), 4);
//!
//! // all the threads should eventually see .len() == 4
//! for r in readers.into_iter() {
//! assert!(r.join().is_ok());
//! }
//! ```
//!
//! If multiple writers are needed, the `WriteHandle` must be protected by a `Mutex`.
//!
//! ```
//! use std::thread;
//! use std::sync::{Arc, Mutex};
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some writers.
//! // since evmap does not support concurrent writes, we need
//! // to protect the write handle by a mutex.
//! let w = Arc::new(Mutex::new(book_reviews_w));
//! let writers: Vec<_> = (0..4).map(|i| {
//! let w = w.clone();
//! thread::spawn(move || {
//! let mut w = w.lock().unwrap();
//! w.insert(i, true);
//! w.publish();
//! })
//! }).collect();
//!
//! // eventually we should see all the writes
//! while book_reviews_r.len() < 4 { thread::yield_now(); };
//!
//! // all the threads should eventually finish writing
//! for w in writers.into_iter() {
//! assert!(w.join().is_ok());
//! }
//! ```
//!
//! [`ReadHandle`] is not `Sync` as sharing a single instance amongst threads would introduce a
//! significant performance bottleneck. A fresh `ReadHandle` needs to be created for each thread
//! either by cloning a [`ReadHandle`] or from a [`handles::ReadHandleFactory`]. For further
//! information, see [`left_right::ReadHandle`].
//!
//! # Implementation
//!
//! Under the hood, the map is implemented using two regular `HashMap`s and some magic. Take a look
//! at [`left-right`](left_right) for a much more in-depth discussion. Since the implementation
//! uses regular `HashMap`s under the hood, table resizing is fully supported. It does, however,
//! also mean that the memory usage of this implementation is approximately twice of that of a
//! regular `HashMap`, and more if writers rarely refresh after writing.
//!
//! # Value storage
//!
//! The values for each key in the map are stored in [`refs::Values`]. Conceptually, each `Values`
//! is a _bag_ or _multiset_; it can store multiple copies of the same value. `evmap` applies some
//! cleverness in an attempt to reduce unnecessary allocations and keep the cost of operations on
//! even large value-bags small. For small bags, `Values` uses the `smallvec` crate. This avoids
//! allocation entirely for single-element bags, and uses a `Vec` if the bag is relatively small.
//! For large bags, `Values` uses the `hashbag` crate, which enables `evmap` to efficiently look up
//! and remove specific elements in the value bag. For bags larger than one element, but smaller
//! than the threshold for moving to `hashbag`, we use `smallvec` to avoid unnecessary hashing.
//! Operations such as `Fit` and `Replace` will automatically switch back to the inline storage if
//! possible. This is ideal for maps that mostly use one element per key, as it can improvate
//! memory locality with less indirection.
#![warn(
missing_docs,
rust_2018_idioms,
missing_debug_implementations,
broken_intra_doc_links
)]
#![allow(clippy::type_complexity)]
// This _should_ detect if we ever accidentally leak aliasing::NoDrop.
// But, currently, it does not..
#![deny(unreachable_pub)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use crate::inner::Inner;
use crate::read::ReadHandle;
use crate::write::WriteHandle;
use left_right::aliasing::Aliased;
use std::collections::hash_map::RandomState;
use std::fmt;
use std::hash::{BuildHasher, Hash};
mod inner;
mod read;
mod stable_hash_eq;
mod values;
mod write;
pub use stable_hash_eq::StableHashEq;
/// Handles to the read and write halves of an `evmap`.
pub mod handles {
pub use crate::write::WriteHandle;
// These cannot use ::{..} syntax because of
// https://github.com/rust-lang/rust/issues/57411
pub use crate::read::ReadHandle;
pub use crate::read::ReadHandleFactory;
}
/// Helper types that give access to values inside the read half of an `evmap`.
pub mod refs {
// Same here, ::{..} won't work.
pub use super::values::Values;
pub use crate::read::MapReadRef;
pub use crate::read::ReadGuardIter;
// Expose `ReadGuard` since it has useful methods the user will likely care about.
#[doc(inline)]
pub use left_right::ReadGuard;
}
// NOTE: It is _critical_ that this module is not public.
mod aliasing;
/// Options for how to initialize the map.
///
/// In particular, the options dictate the hashing function, meta type, and initial capacity of the
/// map.
pub struct Options<M, S>
where
S: BuildHasher,
{
meta: M,
hasher: S,
capacity: Option<usize>,
}
impl<M, S> fmt::Debug for Options<M, S>
where
S: BuildHasher,
M: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Options")
.field("meta", &self.meta)
.field("capacity", &self.capacity)
.finish()
}
}
impl Default for Options<(), RandomState> {
fn default() -> Self {
Options {
meta: (),
hasher: RandomState::default(),
capacity: None,
}
}
}
impl<M, S> Options<M, S>
where
S: BuildHasher,
{
/// Set the initial meta value for the map.
pub fn with_meta<M2>(self, meta: M2) -> Options<M2, S> {
Options {
meta,
hasher: self.hasher,
capacity: self.capacity,
}
}
/// Set the hasher used for the map.
///
/// # Safety
///
/// This method is safe to call as long as the given hasher is deterministic. That is, it must
/// yield the same hash if given the same sequence of inputs.
pub unsafe fn with_hasher<S2>(self, hash_builder: S2) -> Options<M, S2>
where
S2: BuildHasher + Clone,
{
Options {
meta: self.meta,
hasher: hash_builder,
capacity: self.capacity,
}
}
/// Set the initial capacity for the map.
pub fn with_capacity(self, capacity: usize) -> Options<M, S> {
Options {
meta: self.meta,
hasher: self.hasher,
capacity: Some(capacity),
}
}
/// Create the map, and construct the read and write handles used to access it.
///
/// If you want to use arbitrary types for the keys and values, use [`assert_stable`][Options::assert_stable].
#[allow(clippy::type_complexity)]
pub fn construct<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: StableHashEq + Clone,
S: BuildHasher + Clone,
V: StableHashEq,
M: 'static + Clone,
{
unsafe { self.assert_stable() }
}
/// Create the map, and construct the read and write handles used to access it.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K`
/// and `V` are deterministic. That is, they must always yield the same result if given the
/// same inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn | <K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
S: BuildHasher + Clone,
V: Eq + Hash,
M: 'static + Clone,
{
let inner = if let Some(cap) = self.capacity {
Inner::with_capacity_and_hasher(self.meta, cap, self.hasher)
} else {
Inner::with_hasher(self.meta, self.hasher)
};
let (mut w, r) = left_right::new_from_empty(inner);
w.append(write::Operation::MarkReady);
(WriteHandle::new(w), ReadHandle::new(r))
}
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// If you want to use arbitrary types for the keys and values, use [`new_assert_stable`].
#[allow(clippy::type_complexity)]
pub fn new<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: StableHashEq + Clone,
V: StableHashEq,
{
Options::default().construct()
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V` are deterministic. That is, they must always yield the same result if given the same
/// inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn new_assert_stable<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
{
Options::default().assert_stable()
}
/// Create an empty eventually consistent map with meta information and custom hasher.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V`, and the implementation of `BuildHasher` for `S` and [`Hasher`][std::hash::Hasher]
/// for <code>S::[Hasher][BuildHasher::Hasher]</code> are deterministic. That is, they must always
/// yield the same result if given the same inputs. For keys of type `K` and hashers of type `S`,
/// their behavior must also be consistent between different clones of the same value.
#[allow(clippy::type_complexity)]
pub unsafe fn with_hasher<K, V, M, S>(
meta: M,
hasher: S,
) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
M: 'static + Clone,
S: BuildHasher + Clone,
{
Options::default()
.with_hasher(hasher)
.with_meta(meta)
.assert_stable()
}
| assert_stable | identifier_name |
lib.rs | //! A lock-free, eventually consistent, concurrent multi-value map.
//!
//! This map implementation allows reads and writes to execute entirely in parallel, with no
//! implicit synchronization overhead. Reads never take locks on their critical path, and neither
//! do writes assuming there is a single writer (multi-writer is possible using a `Mutex`), which
//! significantly improves performance under contention. See the [`left-right` crate](left_right)
//! for details on the underlying concurrency primitive.
//!
//! The trade-off exposed by this type is one of eventual consistency: writes are not visible to
//! readers except following explicit synchronization. Specifically, readers only see the
//! operations that preceeded the last call to `WriteHandle::refresh` by a writer. This lets
//! writers decide how stale they are willing to let reads get. They can refresh the map after
//! every write to emulate a regular concurrent `HashMap`, or they can refresh only occasionally to
//! reduce the synchronization overhead at the cost of stale reads.
//!
//! For read-heavy workloads, the scheme used by this module is particularly useful. Writers can
//! afford to refresh after every write, which provides up-to-date reads, and readers remain fast
//! as they do not need to ever take locks.
//!
//! The map is multi-value, meaning that every key maps to a *collection* of values. This
//! introduces some memory cost by adding a layer of indirection through a `Vec` for each value,
//! but enables more advanced use. This choice was made as it would not be possible to emulate such
//! functionality on top of the semantics of this map (think about it -- what would the operational
//! log contain?).
//!
//! To faciliate more advanced use-cases, each of the two maps also carry some customizeable
//! meta-information. The writers may update this at will, and when a refresh happens, the current
//! meta will also be made visible to readers. This could be useful, for example, to indicate what
//! time the refresh happened.
//!
//! # Features
//!
//! - `eviction`: Gives you access to [`WriteHandle::empty_random`] to empty out randomly chosen
//! keys from the map.
//! - `amortize`: Amortizes the cost of resizes in the underlying data structures. See
//! [`griddle`](https://github.com/jonhoo/griddle/) and
//! [`atone`](https://github.com/jonhoo/atone/) for details. This requires a nightly compiler
//! [for the time being](https://docs.rs/indexmap-amortized/1.0/indexmap_amortized/#rust-version).
//!
//!
//! # Examples
//!
//! Single-reader, single-writer
//!
//! ```
//! // new will use the default HashMap hasher, and a meta of ()
//! // note that we get separate read and write handles
//! // the read handle can be cloned to have more readers
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // review some books.
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//!
//! // at this point, reads from book_reviews_r will not see any of the reviews!
//! assert_eq!(book_reviews_r.len(), 0);
//! // we need to refresh first to make the writes visible
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.len(), 4);
//! // reads will now return Some() because the map has been initialized
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // remember, this is a multi-value map, so we can have many reviews
//! book_reviews_w.insert("Grimms' Fairy Tales", "Eh, the title seemed weird.");
//! book_reviews_w.insert("Pride and Prejudice", "Too many words.");
//!
//! // but again, new writes are not yet visible
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // we need to refresh first
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(2));
//!
//! // oops, this review has a lot of spelling mistakes, let's delete it.
//! // remove_entry deletes *all* reviews (though in this case, just one)
//! book_reviews_w.remove_entry("The Adventures of Sherlock Holmes");
//! // but again, it's not visible to readers until we refresh
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), Some(1));
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), None);
//!
//! // look up the values associated with some keys.
//! let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
//! for book in &to_find {
//! if let Some(reviews) = book_reviews_r.get(book) {
//! for review in &*reviews {
//! println!("{}: {}", book, review);
//! }
//! } else {
//! println!("{} is unreviewed.", book);
//! }
//! }
//!
//! // iterate over everything.
//! for (book, reviews) in &book_reviews_r.enter().unwrap() {
//! for review in reviews {
//! println!("{}: \"{}\"", book, review);
//! }
//! }
//! ```
//!
//! Reads from multiple threads are possible by cloning the `ReadHandle`.
//!
//! ```
//! use std::thread;
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some readers
//! let readers: Vec<_> = (0..4).map(|_| {
//! let r = book_reviews_r.clone();
//! thread::spawn(move || {
//! loop {
//! let l = r.len();
//! if l == 0 {
//! thread::yield_now();
//! } else {
//! // the reader will either see all the reviews,
//! // or none of them, since refresh() is atomic.
//! assert_eq!(l, 4);
//! break;
//! }
//! }
//! })
//! }).collect();
//!
//! // do some writes
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//! // expose the writes
//! book_reviews_w.publish();
//!
//! // you can read through the write handle
//! assert_eq!(book_reviews_w.len(), 4);
//!
//! // the original read handle still works too
//! assert_eq!(book_reviews_r.len(), 4);
//!
//! // all the threads should eventually see .len() == 4
//! for r in readers.into_iter() {
//! assert!(r.join().is_ok());
//! }
//! ```
//!
//! If multiple writers are needed, the `WriteHandle` must be protected by a `Mutex`.
//!
//! ```
//! use std::thread;
//! use std::sync::{Arc, Mutex};
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some writers.
//! // since evmap does not support concurrent writes, we need
//! // to protect the write handle by a mutex.
//! let w = Arc::new(Mutex::new(book_reviews_w));
//! let writers: Vec<_> = (0..4).map(|i| {
//! let w = w.clone();
//! thread::spawn(move || {
//! let mut w = w.lock().unwrap();
//! w.insert(i, true);
//! w.publish();
//! })
//! }).collect();
//!
//! // eventually we should see all the writes
//! while book_reviews_r.len() < 4 { thread::yield_now(); };
//!
//! // all the threads should eventually finish writing
//! for w in writers.into_iter() {
//! assert!(w.join().is_ok());
//! }
//! ```
//!
//! [`ReadHandle`] is not `Sync` as sharing a single instance amongst threads would introduce a
//! significant performance bottleneck. A fresh `ReadHandle` needs to be created for each thread
//! either by cloning a [`ReadHandle`] or from a [`handles::ReadHandleFactory`]. For further
//! information, see [`left_right::ReadHandle`].
//!
//! # Implementation
//!
//! Under the hood, the map is implemented using two regular `HashMap`s and some magic. Take a look
//! at [`left-right`](left_right) for a much more in-depth discussion. Since the implementation
//! uses regular `HashMap`s under the hood, table resizing is fully supported. It does, however,
//! also mean that the memory usage of this implementation is approximately twice of that of a
//! regular `HashMap`, and more if writers rarely refresh after writing.
//!
//! # Value storage
//!
//! The values for each key in the map are stored in [`refs::Values`]. Conceptually, each `Values`
//! is a _bag_ or _multiset_; it can store multiple copies of the same value. `evmap` applies some
//! cleverness in an attempt to reduce unnecessary allocations and keep the cost of operations on
//! even large value-bags small. For small bags, `Values` uses the `smallvec` crate. This avoids
//! allocation entirely for single-element bags, and uses a `Vec` if the bag is relatively small.
//! For large bags, `Values` uses the `hashbag` crate, which enables `evmap` to efficiently look up
//! and remove specific elements in the value bag. For bags larger than one element, but smaller
//! than the threshold for moving to `hashbag`, we use `smallvec` to avoid unnecessary hashing.
//! Operations such as `Fit` and `Replace` will automatically switch back to the inline storage if
//! possible. This is ideal for maps that mostly use one element per key, as it can improvate
//! memory locality with less indirection.
#![warn(
missing_docs,
rust_2018_idioms,
missing_debug_implementations,
broken_intra_doc_links
)]
#![allow(clippy::type_complexity)]
// This _should_ detect if we ever accidentally leak aliasing::NoDrop.
// But, currently, it does not..
#![deny(unreachable_pub)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use crate::inner::Inner;
use crate::read::ReadHandle;
use crate::write::WriteHandle;
use left_right::aliasing::Aliased;
use std::collections::hash_map::RandomState;
use std::fmt;
use std::hash::{BuildHasher, Hash};
mod inner;
mod read;
mod stable_hash_eq;
mod values;
mod write;
pub use stable_hash_eq::StableHashEq;
/// Handles to the read and write halves of an `evmap`.
pub mod handles {
pub use crate::write::WriteHandle;
// These cannot use ::{..} syntax because of
// https://github.com/rust-lang/rust/issues/57411
pub use crate::read::ReadHandle;
pub use crate::read::ReadHandleFactory;
}
/// Helper types that give access to values inside the read half of an `evmap`.
pub mod refs {
// Same here, ::{..} won't work.
pub use super::values::Values;
pub use crate::read::MapReadRef;
pub use crate::read::ReadGuardIter;
// Expose `ReadGuard` since it has useful methods the user will likely care about.
#[doc(inline)]
pub use left_right::ReadGuard;
}
// NOTE: It is _critical_ that this module is not public.
mod aliasing;
/// Options for how to initialize the map.
///
/// In particular, the options dictate the hashing function, meta type, and initial capacity of the
/// map.
pub struct Options<M, S>
where
S: BuildHasher,
{
meta: M,
hasher: S,
capacity: Option<usize>,
}
impl<M, S> fmt::Debug for Options<M, S>
where
S: BuildHasher,
M: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Options")
.field("meta", &self.meta)
.field("capacity", &self.capacity)
.finish()
}
}
impl Default for Options<(), RandomState> {
fn default() -> Self {
Options {
meta: (),
hasher: RandomState::default(),
capacity: None,
}
}
}
impl<M, S> Options<M, S>
where
S: BuildHasher,
{
/// Set the initial meta value for the map.
pub fn with_meta<M2>(self, meta: M2) -> Options<M2, S> {
Options {
meta,
hasher: self.hasher,
capacity: self.capacity,
}
}
/// Set the hasher used for the map.
///
/// # Safety
///
/// This method is safe to call as long as the given hasher is deterministic. That is, it must
/// yield the same hash if given the same sequence of inputs.
pub unsafe fn with_hasher<S2>(self, hash_builder: S2) -> Options<M, S2>
where
S2: BuildHasher + Clone,
{
Options {
meta: self.meta,
hasher: hash_builder,
capacity: self.capacity,
}
}
/// Set the initial capacity for the map.
pub fn with_capacity(self, capacity: usize) -> Options<M, S> {
Options {
meta: self.meta,
hasher: self.hasher,
capacity: Some(capacity),
}
}
/// Create the map, and construct the read and write handles used to access it.
///
/// If you want to use arbitrary types for the keys and values, use [`assert_stable`][Options::assert_stable].
#[allow(clippy::type_complexity)]
pub fn construct<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: StableHashEq + Clone,
S: BuildHasher + Clone,
V: StableHashEq,
M: 'static + Clone,
{
unsafe { self.assert_stable() }
}
/// Create the map, and construct the read and write handles used to access it.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K`
/// and `V` are deterministic. That is, they must always yield the same result if given the
/// same inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn assert_stable<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
S: BuildHasher + Clone,
V: Eq + Hash,
M: 'static + Clone,
{
let inner = if let Some(cap) = self.capacity | else {
Inner::with_hasher(self.meta, self.hasher)
};
let (mut w, r) = left_right::new_from_empty(inner);
w.append(write::Operation::MarkReady);
(WriteHandle::new(w), ReadHandle::new(r))
}
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// If you want to use arbitrary types for the keys and values, use [`new_assert_stable`].
#[allow(clippy::type_complexity)]
pub fn new<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: StableHashEq + Clone,
V: StableHashEq,
{
Options::default().construct()
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V` are deterministic. That is, they must always yield the same result if given the same
/// inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn new_assert_stable<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
{
Options::default().assert_stable()
}
/// Create an empty eventually consistent map with meta information and custom hasher.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V`, and the implementation of `BuildHasher` for `S` and [`Hasher`][std::hash::Hasher]
/// for <code>S::[Hasher][BuildHasher::Hasher]</code> are deterministic. That is, they must always
/// yield the same result if given the same inputs. For keys of type `K` and hashers of type `S`,
/// their behavior must also be consistent between different clones of the same value.
#[allow(clippy::type_complexity)]
pub unsafe fn with_hasher<K, V, M, S>(
meta: M,
hasher: S,
) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
M: 'static + Clone,
S: BuildHasher + Clone,
{
Options::default()
.with_hasher(hasher)
.with_meta(meta)
.assert_stable()
}
| {
Inner::with_capacity_and_hasher(self.meta, cap, self.hasher)
} | conditional_block |
lib.rs | //! A lock-free, eventually consistent, concurrent multi-value map.
//!
//! This map implementation allows reads and writes to execute entirely in parallel, with no
//! implicit synchronization overhead. Reads never take locks on their critical path, and neither
//! do writes assuming there is a single writer (multi-writer is possible using a `Mutex`), which
//! significantly improves performance under contention. See the [`left-right` crate](left_right)
//! for details on the underlying concurrency primitive.
//!
//! The trade-off exposed by this type is one of eventual consistency: writes are not visible to
//! readers except following explicit synchronization. Specifically, readers only see the
//! operations that preceeded the last call to `WriteHandle::refresh` by a writer. This lets
//! writers decide how stale they are willing to let reads get. They can refresh the map after
//! every write to emulate a regular concurrent `HashMap`, or they can refresh only occasionally to
//! reduce the synchronization overhead at the cost of stale reads.
//!
//! For read-heavy workloads, the scheme used by this module is particularly useful. Writers can
//! afford to refresh after every write, which provides up-to-date reads, and readers remain fast
//! as they do not need to ever take locks.
//!
//! The map is multi-value, meaning that every key maps to a *collection* of values. This
//! introduces some memory cost by adding a layer of indirection through a `Vec` for each value,
//! but enables more advanced use. This choice was made as it would not be possible to emulate such
//! functionality on top of the semantics of this map (think about it -- what would the operational
//! log contain?).
//!
//! To faciliate more advanced use-cases, each of the two maps also carry some customizeable
//! meta-information. The writers may update this at will, and when a refresh happens, the current
//! meta will also be made visible to readers. This could be useful, for example, to indicate what
//! time the refresh happened.
//!
//! # Features
//!
//! - `eviction`: Gives you access to [`WriteHandle::empty_random`] to empty out randomly chosen
//! keys from the map.
//! - `amortize`: Amortizes the cost of resizes in the underlying data structures. See
//! [`griddle`](https://github.com/jonhoo/griddle/) and
//! [`atone`](https://github.com/jonhoo/atone/) for details. This requires a nightly compiler
//! [for the time being](https://docs.rs/indexmap-amortized/1.0/indexmap_amortized/#rust-version).
//!
//!
//! # Examples
//!
//! Single-reader, single-writer
//!
//! ```
//! // new will use the default HashMap hasher, and a meta of ()
//! // note that we get separate read and write handles
//! // the read handle can be cloned to have more readers
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // review some books.
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//!
//! // at this point, reads from book_reviews_r will not see any of the reviews!
//! assert_eq!(book_reviews_r.len(), 0);
//! // we need to refresh first to make the writes visible
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.len(), 4);
//! // reads will now return Some() because the map has been initialized
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // remember, this is a multi-value map, so we can have many reviews
//! book_reviews_w.insert("Grimms' Fairy Tales", "Eh, the title seemed weird.");
//! book_reviews_w.insert("Pride and Prejudice", "Too many words.");
//!
//! // but again, new writes are not yet visible
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // we need to refresh first
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(2));
//!
//! // oops, this review has a lot of spelling mistakes, let's delete it.
//! // remove_entry deletes *all* reviews (though in this case, just one)
//! book_reviews_w.remove_entry("The Adventures of Sherlock Holmes");
//! // but again, it's not visible to readers until we refresh
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), Some(1));
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), None);
//!
//! // look up the values associated with some keys.
//! let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
//! for book in &to_find {
//! if let Some(reviews) = book_reviews_r.get(book) {
//! for review in &*reviews {
//! println!("{}: {}", book, review);
//! }
//! } else {
//! println!("{} is unreviewed.", book);
//! }
//! }
//!
//! // iterate over everything.
//! for (book, reviews) in &book_reviews_r.enter().unwrap() {
//! for review in reviews {
//! println!("{}: \"{}\"", book, review);
//! }
//! }
//! ```
//!
//! Reads from multiple threads are possible by cloning the `ReadHandle`.
//!
//! ```
//! use std::thread;
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some readers
//! let readers: Vec<_> = (0..4).map(|_| {
//! let r = book_reviews_r.clone();
//! thread::spawn(move || {
//! loop {
//! let l = r.len();
//! if l == 0 {
//! thread::yield_now();
//! } else {
//! // the reader will either see all the reviews,
//! // or none of them, since refresh() is atomic.
//! assert_eq!(l, 4);
//! break;
//! }
//! }
//! })
//! }).collect();
//!
//! // do some writes
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//! // expose the writes
//! book_reviews_w.publish();
//!
//! // you can read through the write handle
//! assert_eq!(book_reviews_w.len(), 4);
//!
//! // the original read handle still works too
//! assert_eq!(book_reviews_r.len(), 4);
//!
//! // all the threads should eventually see .len() == 4
//! for r in readers.into_iter() {
//! assert!(r.join().is_ok());
//! }
//! ```
//!
//! If multiple writers are needed, the `WriteHandle` must be protected by a `Mutex`.
//!
//! ```
//! use std::thread;
//! use std::sync::{Arc, Mutex};
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some writers.
//! // since evmap does not support concurrent writes, we need
//! // to protect the write handle by a mutex.
//! let w = Arc::new(Mutex::new(book_reviews_w));
//! let writers: Vec<_> = (0..4).map(|i| {
//! let w = w.clone();
//! thread::spawn(move || {
//! let mut w = w.lock().unwrap();
//! w.insert(i, true);
//! w.publish();
//! })
//! }).collect();
//!
//! // eventually we should see all the writes
//! while book_reviews_r.len() < 4 { thread::yield_now(); };
//!
//! // all the threads should eventually finish writing
//! for w in writers.into_iter() {
//! assert!(w.join().is_ok());
//! }
//! ```
//!
//! [`ReadHandle`] is not `Sync` as sharing a single instance amongst threads would introduce a
//! significant performance bottleneck. A fresh `ReadHandle` needs to be created for each thread
//! either by cloning a [`ReadHandle`] or from a [`handles::ReadHandleFactory`]. For further
//! information, see [`left_right::ReadHandle`].
//!
//! # Implementation
//!
//! Under the hood, the map is implemented using two regular `HashMap`s and some magic. Take a look
//! at [`left-right`](left_right) for a much more in-depth discussion. Since the implementation
//! uses regular `HashMap`s under the hood, table resizing is fully supported. It does, however,
//! also mean that the memory usage of this implementation is approximately twice of that of a
//! regular `HashMap`, and more if writers rarely refresh after writing.
//!
//! # Value storage
//!
//! The values for each key in the map are stored in [`refs::Values`]. Conceptually, each `Values`
//! is a _bag_ or _multiset_; it can store multiple copies of the same value. `evmap` applies some
//! cleverness in an attempt to reduce unnecessary allocations and keep the cost of operations on
//! even large value-bags small. For small bags, `Values` uses the `smallvec` crate. This avoids
//! allocation entirely for single-element bags, and uses a `Vec` if the bag is relatively small.
//! For large bags, `Values` uses the `hashbag` crate, which enables `evmap` to efficiently look up
//! and remove specific elements in the value bag. For bags larger than one element, but smaller
//! than the threshold for moving to `hashbag`, we use `smallvec` to avoid unnecessary hashing.
//! Operations such as `Fit` and `Replace` will automatically switch back to the inline storage if
//! possible. This is ideal for maps that mostly use one element per key, as it can improvate
//! memory locality with less indirection.
#![warn(
missing_docs,
rust_2018_idioms,
missing_debug_implementations,
broken_intra_doc_links
)]
#![allow(clippy::type_complexity)]
// This _should_ detect if we ever accidentally leak aliasing::NoDrop.
// But, currently, it does not..
#![deny(unreachable_pub)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use crate::inner::Inner;
use crate::read::ReadHandle;
use crate::write::WriteHandle;
use left_right::aliasing::Aliased;
use std::collections::hash_map::RandomState;
use std::fmt;
use std::hash::{BuildHasher, Hash};
mod inner;
mod read;
mod stable_hash_eq;
mod values;
mod write;
pub use stable_hash_eq::StableHashEq;
/// Handles to the read and write halves of an `evmap`.
pub mod handles {
pub use crate::write::WriteHandle;
// These cannot use ::{..} syntax because of
// https://github.com/rust-lang/rust/issues/57411
pub use crate::read::ReadHandle;
pub use crate::read::ReadHandleFactory;
}
/// Helper types that give access to values inside the read half of an `evmap`.
pub mod refs {
// Same here, ::{..} won't work.
pub use super::values::Values;
pub use crate::read::MapReadRef;
pub use crate::read::ReadGuardIter;
// Expose `ReadGuard` since it has useful methods the user will likely care about.
#[doc(inline)]
pub use left_right::ReadGuard;
}
// NOTE: It is _critical_ that this module is not public.
mod aliasing;
/// Options for how to initialize the map.
///
/// In particular, the options dictate the hashing function, meta type, and initial capacity of the
/// map.
pub struct Options<M, S>
where
S: BuildHasher,
{
meta: M,
hasher: S,
capacity: Option<usize>,
}
impl<M, S> fmt::Debug for Options<M, S>
where
S: BuildHasher,
M: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Options")
.field("meta", &self.meta)
.field("capacity", &self.capacity)
.finish()
}
}
impl Default for Options<(), RandomState> {
fn default() -> Self {
Options {
meta: (),
hasher: RandomState::default(),
capacity: None,
}
}
}
impl<M, S> Options<M, S>
where
S: BuildHasher,
{
/// Set the initial meta value for the map.
pub fn with_meta<M2>(self, meta: M2) -> Options<M2, S> {
Options {
meta,
hasher: self.hasher,
capacity: self.capacity,
}
}
/// Set the hasher used for the map.
///
/// # Safety
///
/// This method is safe to call as long as the given hasher is deterministic. That is, it must
/// yield the same hash if given the same sequence of inputs.
pub unsafe fn with_hasher<S2>(self, hash_builder: S2) -> Options<M, S2>
where
S2: BuildHasher + Clone,
{
Options {
meta: self.meta,
hasher: hash_builder,
capacity: self.capacity,
}
}
/// Set the initial capacity for the map.
pub fn with_capacity(self, capacity: usize) -> Options<M, S> {
Options {
meta: self.meta,
hasher: self.hasher,
capacity: Some(capacity),
}
}
/// Create the map, and construct the read and write handles used to access it.
///
/// If you want to use arbitrary types for the keys and values, use [`assert_stable`][Options::assert_stable].
#[allow(clippy::type_complexity)]
pub fn construct<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: StableHashEq + Clone,
S: BuildHasher + Clone,
V: StableHashEq,
M: 'static + Clone,
{
unsafe { self.assert_stable() }
}
/// Create the map, and construct the read and write handles used to access it.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K`
/// and `V` are deterministic. That is, they must always yield the same result if given the | /// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn assert_stable<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
S: BuildHasher + Clone,
V: Eq + Hash,
M: 'static + Clone,
{
let inner = if let Some(cap) = self.capacity {
Inner::with_capacity_and_hasher(self.meta, cap, self.hasher)
} else {
Inner::with_hasher(self.meta, self.hasher)
};
let (mut w, r) = left_right::new_from_empty(inner);
w.append(write::Operation::MarkReady);
(WriteHandle::new(w), ReadHandle::new(r))
}
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// If you want to use arbitrary types for the keys and values, use [`new_assert_stable`].
#[allow(clippy::type_complexity)]
pub fn new<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: StableHashEq + Clone,
V: StableHashEq,
{
Options::default().construct()
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V` are deterministic. That is, they must always yield the same result if given the same
/// inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn new_assert_stable<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
{
Options::default().assert_stable()
}
/// Create an empty eventually consistent map with meta information and custom hasher.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V`, and the implementation of `BuildHasher` for `S` and [`Hasher`][std::hash::Hasher]
/// for <code>S::[Hasher][BuildHasher::Hasher]</code> are deterministic. That is, they must always
/// yield the same result if given the same inputs. For keys of type `K` and hashers of type `S`,
/// their behavior must also be consistent between different clones of the same value.
#[allow(clippy::type_complexity)]
pub unsafe fn with_hasher<K, V, M, S>(
meta: M,
hasher: S,
) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
M: 'static + Clone,
S: BuildHasher + Clone,
{
Options::default()
.with_hasher(hasher)
.with_meta(meta)
.assert_stable()
} | /// same inputs. For keys of type `K`, the result must also be consistent between different clones | random_line_split |
lib.rs | //! A lock-free, eventually consistent, concurrent multi-value map.
//!
//! This map implementation allows reads and writes to execute entirely in parallel, with no
//! implicit synchronization overhead. Reads never take locks on their critical path, and neither
//! do writes assuming there is a single writer (multi-writer is possible using a `Mutex`), which
//! significantly improves performance under contention. See the [`left-right` crate](left_right)
//! for details on the underlying concurrency primitive.
//!
//! The trade-off exposed by this type is one of eventual consistency: writes are not visible to
//! readers except following explicit synchronization. Specifically, readers only see the
//! operations that preceeded the last call to `WriteHandle::refresh` by a writer. This lets
//! writers decide how stale they are willing to let reads get. They can refresh the map after
//! every write to emulate a regular concurrent `HashMap`, or they can refresh only occasionally to
//! reduce the synchronization overhead at the cost of stale reads.
//!
//! For read-heavy workloads, the scheme used by this module is particularly useful. Writers can
//! afford to refresh after every write, which provides up-to-date reads, and readers remain fast
//! as they do not need to ever take locks.
//!
//! The map is multi-value, meaning that every key maps to a *collection* of values. This
//! introduces some memory cost by adding a layer of indirection through a `Vec` for each value,
//! but enables more advanced use. This choice was made as it would not be possible to emulate such
//! functionality on top of the semantics of this map (think about it -- what would the operational
//! log contain?).
//!
//! To faciliate more advanced use-cases, each of the two maps also carry some customizeable
//! meta-information. The writers may update this at will, and when a refresh happens, the current
//! meta will also be made visible to readers. This could be useful, for example, to indicate what
//! time the refresh happened.
//!
//! # Features
//!
//! - `eviction`: Gives you access to [`WriteHandle::empty_random`] to empty out randomly chosen
//! keys from the map.
//! - `amortize`: Amortizes the cost of resizes in the underlying data structures. See
//! [`griddle`](https://github.com/jonhoo/griddle/) and
//! [`atone`](https://github.com/jonhoo/atone/) for details. This requires a nightly compiler
//! [for the time being](https://docs.rs/indexmap-amortized/1.0/indexmap_amortized/#rust-version).
//!
//!
//! # Examples
//!
//! Single-reader, single-writer
//!
//! ```
//! // new will use the default HashMap hasher, and a meta of ()
//! // note that we get separate read and write handles
//! // the read handle can be cloned to have more readers
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // review some books.
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//!
//! // at this point, reads from book_reviews_r will not see any of the reviews!
//! assert_eq!(book_reviews_r.len(), 0);
//! // we need to refresh first to make the writes visible
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.len(), 4);
//! // reads will now return Some() because the map has been initialized
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // remember, this is a multi-value map, so we can have many reviews
//! book_reviews_w.insert("Grimms' Fairy Tales", "Eh, the title seemed weird.");
//! book_reviews_w.insert("Pride and Prejudice", "Too many words.");
//!
//! // but again, new writes are not yet visible
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // we need to refresh first
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(2));
//!
//! // oops, this review has a lot of spelling mistakes, let's delete it.
//! // remove_entry deletes *all* reviews (though in this case, just one)
//! book_reviews_w.remove_entry("The Adventures of Sherlock Holmes");
//! // but again, it's not visible to readers until we refresh
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), Some(1));
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), None);
//!
//! // look up the values associated with some keys.
//! let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
//! for book in &to_find {
//! if let Some(reviews) = book_reviews_r.get(book) {
//! for review in &*reviews {
//! println!("{}: {}", book, review);
//! }
//! } else {
//! println!("{} is unreviewed.", book);
//! }
//! }
//!
//! // iterate over everything.
//! for (book, reviews) in &book_reviews_r.enter().unwrap() {
//! for review in reviews {
//! println!("{}: \"{}\"", book, review);
//! }
//! }
//! ```
//!
//! Reads from multiple threads are possible by cloning the `ReadHandle`.
//!
//! ```
//! use std::thread;
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some readers
//! let readers: Vec<_> = (0..4).map(|_| {
//! let r = book_reviews_r.clone();
//! thread::spawn(move || {
//! loop {
//! let l = r.len();
//! if l == 0 {
//! thread::yield_now();
//! } else {
//! // the reader will either see all the reviews,
//! // or none of them, since refresh() is atomic.
//! assert_eq!(l, 4);
//! break;
//! }
//! }
//! })
//! }).collect();
//!
//! // do some writes
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//! // expose the writes
//! book_reviews_w.publish();
//!
//! // you can read through the write handle
//! assert_eq!(book_reviews_w.len(), 4);
//!
//! // the original read handle still works too
//! assert_eq!(book_reviews_r.len(), 4);
//!
//! // all the threads should eventually see .len() == 4
//! for r in readers.into_iter() {
//! assert!(r.join().is_ok());
//! }
//! ```
//!
//! If multiple writers are needed, the `WriteHandle` must be protected by a `Mutex`.
//!
//! ```
//! use std::thread;
//! use std::sync::{Arc, Mutex};
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some writers.
//! // since evmap does not support concurrent writes, we need
//! // to protect the write handle by a mutex.
//! let w = Arc::new(Mutex::new(book_reviews_w));
//! let writers: Vec<_> = (0..4).map(|i| {
//! let w = w.clone();
//! thread::spawn(move || {
//! let mut w = w.lock().unwrap();
//! w.insert(i, true);
//! w.publish();
//! })
//! }).collect();
//!
//! // eventually we should see all the writes
//! while book_reviews_r.len() < 4 { thread::yield_now(); };
//!
//! // all the threads should eventually finish writing
//! for w in writers.into_iter() {
//! assert!(w.join().is_ok());
//! }
//! ```
//!
//! [`ReadHandle`] is not `Sync` as sharing a single instance amongst threads would introduce a
//! significant performance bottleneck. A fresh `ReadHandle` needs to be created for each thread
//! either by cloning a [`ReadHandle`] or from a [`handles::ReadHandleFactory`]. For further
//! information, see [`left_right::ReadHandle`].
//!
//! # Implementation
//!
//! Under the hood, the map is implemented using two regular `HashMap`s and some magic. Take a look
//! at [`left-right`](left_right) for a much more in-depth discussion. Since the implementation
//! uses regular `HashMap`s under the hood, table resizing is fully supported. It does, however,
//! also mean that the memory usage of this implementation is approximately twice of that of a
//! regular `HashMap`, and more if writers rarely refresh after writing.
//!
//! # Value storage
//!
//! The values for each key in the map are stored in [`refs::Values`]. Conceptually, each `Values`
//! is a _bag_ or _multiset_; it can store multiple copies of the same value. `evmap` applies some
//! cleverness in an attempt to reduce unnecessary allocations and keep the cost of operations on
//! even large value-bags small. For small bags, `Values` uses the `smallvec` crate. This avoids
//! allocation entirely for single-element bags, and uses a `Vec` if the bag is relatively small.
//! For large bags, `Values` uses the `hashbag` crate, which enables `evmap` to efficiently look up
//! and remove specific elements in the value bag. For bags larger than one element, but smaller
//! than the threshold for moving to `hashbag`, we use `smallvec` to avoid unnecessary hashing.
//! Operations such as `Fit` and `Replace` will automatically switch back to the inline storage if
//! possible. This is ideal for maps that mostly use one element per key, as it can improvate
//! memory locality with less indirection.
#![warn(
missing_docs,
rust_2018_idioms,
missing_debug_implementations,
broken_intra_doc_links
)]
#![allow(clippy::type_complexity)]
// This _should_ detect if we ever accidentally leak aliasing::NoDrop.
// But, currently, it does not..
#![deny(unreachable_pub)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use crate::inner::Inner;
use crate::read::ReadHandle;
use crate::write::WriteHandle;
use left_right::aliasing::Aliased;
use std::collections::hash_map::RandomState;
use std::fmt;
use std::hash::{BuildHasher, Hash};
mod inner;
mod read;
mod stable_hash_eq;
mod values;
mod write;
pub use stable_hash_eq::StableHashEq;
/// Handles to the read and write halves of an `evmap`.
pub mod handles {
pub use crate::write::WriteHandle;
// These cannot use ::{..} syntax because of
// https://github.com/rust-lang/rust/issues/57411
pub use crate::read::ReadHandle;
pub use crate::read::ReadHandleFactory;
}
/// Helper types that give access to values inside the read half of an `evmap`.
pub mod refs {
// Same here, ::{..} won't work.
pub use super::values::Values;
pub use crate::read::MapReadRef;
pub use crate::read::ReadGuardIter;
// Expose `ReadGuard` since it has useful methods the user will likely care about.
#[doc(inline)]
pub use left_right::ReadGuard;
}
// NOTE: It is _critical_ that this module is not public.
mod aliasing;
/// Options for how to initialize the map.
///
/// In particular, the options dictate the hashing function, meta type, and initial capacity of the
/// map.
pub struct Options<M, S>
where
S: BuildHasher,
{
meta: M,
hasher: S,
capacity: Option<usize>,
}
impl<M, S> fmt::Debug for Options<M, S>
where
S: BuildHasher,
M: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Options")
.field("meta", &self.meta)
.field("capacity", &self.capacity)
.finish()
}
}
impl Default for Options<(), RandomState> {
fn default() -> Self {
Options {
meta: (),
hasher: RandomState::default(),
capacity: None,
}
}
}
impl<M, S> Options<M, S>
where
S: BuildHasher,
{
/// Set the initial meta value for the map.
pub fn with_meta<M2>(self, meta: M2) -> Options<M2, S> {
Options {
meta,
hasher: self.hasher,
capacity: self.capacity,
}
}
/// Set the hasher used for the map.
///
/// # Safety
///
/// This method is safe to call as long as the given hasher is deterministic. That is, it must
/// yield the same hash if given the same sequence of inputs.
pub unsafe fn with_hasher<S2>(self, hash_builder: S2) -> Options<M, S2>
where
S2: BuildHasher + Clone,
{
Options {
meta: self.meta,
hasher: hash_builder,
capacity: self.capacity,
}
}
/// Set the initial capacity for the map.
pub fn with_capacity(self, capacity: usize) -> Options<M, S> {
Options {
meta: self.meta,
hasher: self.hasher,
capacity: Some(capacity),
}
}
/// Create the map, and construct the read and write handles used to access it.
///
/// If you want to use arbitrary types for the keys and values, use [`assert_stable`][Options::assert_stable].
#[allow(clippy::type_complexity)]
pub fn construct<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: StableHashEq + Clone,
S: BuildHasher + Clone,
V: StableHashEq,
M: 'static + Clone,
{
unsafe { self.assert_stable() }
}
/// Create the map, and construct the read and write handles used to access it.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K`
/// and `V` are deterministic. That is, they must always yield the same result if given the
/// same inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn assert_stable<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
S: BuildHasher + Clone,
V: Eq + Hash,
M: 'static + Clone,
|
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// If you want to use arbitrary types for the keys and values, use [`new_assert_stable`].
#[allow(clippy::type_complexity)]
pub fn new<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: StableHashEq + Clone,
V: StableHashEq,
{
Options::default().construct()
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V` are deterministic. That is, they must always yield the same result if given the same
/// inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn new_assert_stable<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
{
Options::default().assert_stable()
}
/// Create an empty eventually consistent map with meta information and custom hasher.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V`, and the implementation of `BuildHasher` for `S` and [`Hasher`][std::hash::Hasher]
/// for <code>S::[Hasher][BuildHasher::Hasher]</code> are deterministic. That is, they must always
/// yield the same result if given the same inputs. For keys of type `K` and hashers of type `S`,
/// their behavior must also be consistent between different clones of the same value.
#[allow(clippy::type_complexity)]
pub unsafe fn with_hasher<K, V, M, S>(
meta: M,
hasher: S,
) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
M: 'static + Clone,
S: BuildHasher + Clone,
{
Options::default()
.with_hasher(hasher)
.with_meta(meta)
.assert_stable()
}
| {
let inner = if let Some(cap) = self.capacity {
Inner::with_capacity_and_hasher(self.meta, cap, self.hasher)
} else {
Inner::with_hasher(self.meta, self.hasher)
};
let (mut w, r) = left_right::new_from_empty(inner);
w.append(write::Operation::MarkReady);
(WriteHandle::new(w), ReadHandle::new(r))
} | identifier_body |
results.js | /**
* ALMViz
* See https://github.com/lagotto/almviz for more details
* Distributed under the GNU GPL v2. For full terms see the file docs/COPYING.
*
* @brief Article level metrics visualization controller.
*/
/*global d3 */
var options = {
baseUrl: '',
minItemsToShowGraph: {
minEventsForYearly: 1,
minEventsForMonthly: 1,
minEventsForDaily: 1,
minYearsForYearly: 1,
minMonthsForMonthly: 1,
minDaysForDaily: 1
},
vizDiv: "#panel-results",
work: {},
sources: [],
groups: []
};
var params = d3.select("#api_key");
if (!params.empty()) {
var api_key = params.attr('data-api-key');
var work_id = params.attr('data-pid');
var query = encodeURI("/api/works/" + pathForWork(work_id) + "/results");
}
// asynchronously load data from the Lagotto API
queue()
.defer(d3.json, encodeURI("/api/works/" + pathForWork(work_id)))
.defer(d3.json, encodeURI("/api/sources"))
.defer(d3.json, encodeURI("/api/groups"))
.defer(d3.json, query)
.await(function(error, w, s, g, r) {
if (error) { return console.warn(error); }
options.work = w.work;
options.sources = s.sources;
options.groups = g.groups;
options.almStatsJson = r.results;
var almviz = new AlmViz(options);
almviz.initViz();
});
function | (options) {
// allow jQuery object to be passed in
// in case a different version of jQuery is needed from the one globally defined
$ = options.jQuery || $;
// Init data
// remove group not needed for the following visualizations
var work_ = options.work;
var groups_ = options.groups.filter(function(d) { return d.id !== "other"; });
var sources_ = options.sources;
var data = options.almStatsJson;
// Abort if data are missing
if (!data || !data[0]) {
console.log('Error: missing data');
d3.select("#loading-results").remove();
d3.select("#content-results").text("")
.insert("div")
.attr("class", "alert alert-info")
.text("There are currently no results");
return;
}
// Init basic options
var baseUrl_ = options.baseUrl;
var minItems_ = options.minItemsToShowGraph;
var formatNumber_ = d3.format(",d");
// extract publication date
// Construct date object from date parts, using "1" for missing day and month
var timestamp = Date.parse(work_["issued"]);
if (work_["issued"] === "0000") { timestamp = Date.parse("2000"); }
var pub_date = new Date(timestamp);
var vizDiv;
// Get the Div where the viz should go (default to one with ID "alm')
if (options.vizDiv) {
vizDiv = d3.select(options.vizDiv);
} else {
vizDiv = d3.select("#alm");
}
// look to make sure browser support SVG
var hasSVG_ = document.implementation.hasFeature("http://www.w3.org/TR/SVG11/feature#BasicStructure", "1.1");
// to track if any metrics have been found
var metricsFound_;
/**
* Initialize the visualization.
* NB: needs to be accessible from the outside for initialization
*/
this.initViz = function() {
vizDiv.select("#loading-results").remove();
// loop through groups
groups_.forEach(function(group) {
addGroup_(vizDiv, group, sources_, data);
});
if (!metricsFound_) {
vizDiv.append("p")
.attr("class", "text-muted")
.text("No results found.");
}
};
/**
* Build each article level statistics group.
* @param {Object} canvas d3 element
* @param {Array} group Information about the group.
* @param {Object} data Statistics.
* @return {JQueryObject|boolean}
*/
var addGroup_ = function(canvas, group, sources_, data) {
var $groupRow = false;
// Loop through sources to add statistics data to the group.
sources_.forEach(function(source) {
if (source.group_id !== group.id) { return; }
results = data.filter(function(d) { return d.source_id === source.id; })[0];
if (typeof results === "undefined" || results.total === 0) { return; }
// Only add the group row the first time
if (!$groupRow) {
$groupRow = getgroupRow_(canvas, group);
}
// Flag that there is at least one metric
metricsFound_ = true;
var label = source.title;
addSource_(source, label, results, results.total, group, "total", $groupRow);
});
};
/**
* Get group row d3 HTML element. It will automatically
* add the element to the passed canvas.
* @param {d3Object} canvas d3 HTML element
* @param {Array} group group information.
* @param {d3Object}
*/
var getgroupRow_ = function(canvas, group) {
var groupRow, groupTitle, tooltip;
// Build group html objects.
groupRow = canvas.append("div")
.attr("class", "alm-group")
.attr("id", "group-" + group.id);
return groupRow;
};
/**
* Add source information to the passed group row element.
* @param {Object} source
* @param {integer} sourceTotalValue
* @param {Object} group
* @param {JQueryObject} $groupRow
* @return {JQueryObject}
*/
var addSource_ = function(source, label, results, sourceTotalValue, group, subgroup, $groupRow) {
var $row, $countLabel, $count,
total = sourceTotalValue;
$row = $groupRow
.append("div")
.attr("class", "alm-source")
.attr("id", "source-" + source.id + "-" + subgroup);
$countLabel = $row.append("div")
.attr("class", "alm-label " + group.id);
$count = $countLabel.append("p")
.attr("class", "alm-count")
.attr("id", "alm-count-" + source.id + "-" + group.id);
$count
.text(formatNumber_(total));
if (source.id === 'pkpTimedViews') {
$countLabel.append("p")
.text(label);
} else {
// link the source name
$countLabel.append("p").append("a")
.attr("href", baseUrl_ + "/sources/" + source.id)
.text(label);
}
// Only add a chart if the browser supports SVG
if (hasSVG_) {
var level = false;
// check what levels we can show
var showDaily = false;
var showMonthly = false;
var showYearly = false;
if (results.by_year) {
var level_data = getData_('year', results);
var yearTotal = level_data.reduce(function(i, d) { return i + d[subgroup]; }, 0);
var numYears = d3.time.year.utc.range(pub_date, new Date()).length;
if (yearTotal >= minItems_.minEventsForYearly &&
numYears >= minItems_.minYearsForYearly) {
showYearly = true;
level = 'year';
}
}
if (results.by_month) {
var level_data = getData_('month', results);
var monthTotal = level_data.reduce(function(i, d) { return i + d[subgroup]; }, 0);
var numMonths = d3.time.month.utc.range(pub_date, new Date()).length;
if (monthTotal >= minItems_.minEventsForMonthly &&
numMonths >= minItems_.minMonthsForMonthly) {
showMonthly = true;
level = 'month';
}
}
// The level and level_data should be set to the finest level
// of granularity that we can show
timeInterval = getTimeInterval_(level);
// check there is data for
if (showDaily || showMonthly || showYearly) {
$row
.attr('class', 'alm-source with-chart');
var $chartDiv = $row.append("div")
.attr("class", "alm-chart");
var viz = getViz_($chartDiv, source, group, subgroup, results);
loadData_(viz, level);
var update_controls = function(control) {
control.siblings('.alm-control').removeClass('active');
control.addClass('active');
};
var $levelControlsDiv = $chartDiv.append("div")
.attr("class", "alm-control-label")
.attr("style", "width: " + (viz.margin.left + viz.width) + "px;");
if (showDaily) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showDaily)
.classed("active", (level === 'day'))
.text("daily (first 30)")
.on("click", function() {
if (showDaily && !$(this).hasClass('active')) {
loadData_(viz, 'day');
update_controls($(this));
}
}
);
$levelControlsDiv.append("text").text(" | ");
}
if (showMonthly) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showMonthly || !showYearly)
.classed("active", (level === 'month'))
.text("monthly")
.on("click", function() { if (showMonthly && !$(this).hasClass('active')) {
loadData_(viz, 'month');
update_controls($(this));
} });
if (showYearly) {
$levelControlsDiv.append("text")
.text(" | ");
}
}
if (showYearly) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showYearly || !showMonthly)
.classed("active", (level === 'year'))
.text("yearly")
.on("click", function() {
if (showYearly && !$(this).hasClass('active')) {
loadData_(viz, 'year');
update_controls($(this));
}
}
);
}
}
}
return $row;
};
/**
* Extract the date from the source
* @param level (day|month|year)
* @param d the datum
* @return {Date}
*/
var getDate_ = function(level, d) {
switch (level) {
case 'year':
return new Date(d.year, 0, 1);
case 'month':
// js Date indexes months at 0
return new Date(d.year, d.month - 1, 1);
case 'day':
// js Date indexes months at 0
return new Date(d.year, d.month - 1, d.day);
}
};
/**
* Format the date for display
* @param level (day|month|year)
* @param d the datum
* @return {String}
*/
var getFormattedDate_ = function(level, d) {
switch (level) {
case 'year':
return d3.time.format("%Y")(getDate_(level, d));
case 'month':
return d3.time.format("%b %y")(getDate_(level, d));
case 'day':
return d3.time.format("%d %b %y")(getDate_(level, d));
}
};
/**
* Extract the data from the source.
* @param {string} level (day|month|year)
* @param {Object} source
* @return {Array} Metrics
*/
var getData_ = function(level, results) {
switch (level) {
case 'year':
return results.by_year;
case 'month':
return results.by_month;
}
};
/**
* Returns a d3 timeInterval for date operations.
* @param {string} level (day|month|year
* @return {Object} d3 time Interval
*/
var getTimeInterval_ = function(level) {
switch (level) {
case 'year':
return d3.time.year.utc;
case 'month':
return d3.time.month.utc;
case 'day':
return d3.time.day.utc;
}
};
/**
* The basic general set up of the graph itself
* @param {JQueryElement} chartDiv The div where the chart should go
* @param {Object} source
* @param {Array} group The group for 86 chart
* @return {Object}
*/
var getViz_ = function(chartDiv, source, group, subgroup, results) {
var viz = {};
// size parameters
viz.margin = {top: 10, right: 20, bottom: 5, left: 50};
viz.width = 760 - viz.margin.left - viz.margin.right;
viz.height = 115 - viz.margin.top - viz.margin.bottom;
// div where everything goes
viz.chartDiv = chartDiv;
// source data and which group
viz.group = group;
viz.subgroup = subgroup;
viz.source = source;
viz.results = results;
// just for record keeping
viz.name = source.id + '-' + group.id + '-' + viz.subgroup;
viz.x = d3.time.scale();
viz.x.range([0, viz.width]);
viz.y = d3.scale.linear();
viz.y.range([viz.height, 0]);
viz.z = d3.scale.ordinal();
viz.z.range([group.id, group.id + '-alt']);
// the chart
viz.svg = viz.chartDiv.append("svg")
.attr("width", viz.width + viz.margin.left + viz.margin.right)
.attr("height", viz.height + viz.margin.top + viz.margin.bottom + 1)
.append("g")
.attr("transform", "translate(" + viz.margin.left + "," + viz.margin.top + ")");
// draw the bars g first so it ends up underneath the axes
viz.bars = viz.svg.append("g");
// and the shadow bars on top for the tooltips
viz.barsForTooltips = viz.svg.append("g");
viz.svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + viz.height + ")");
viz.svg.append("g")
.attr("class", "y axis");
return viz;
};
/**
* Takes in the basic set up of a graph and loads the data itself
* @param {Object} viz AlmViz object
* @param {string} level (day|month|year)
*/
var loadData_ = function(viz, level) {
var group = viz.group;
var subgroup = viz.subgroup;
var level_data = getData_(level, viz.results);
var timeInterval = getTimeInterval_(level);
var end_date = new Date();
// use only first 29 days if using day view
// close out the year otherwise
if (level === 'day') {
end_date = timeInterval.offset(pub_date, 29);
} else {
end_date = d3.time.year.utc.ceil(end_date);
}
//
// Domains for x and y
//
// a time x axis, between pub_date and end_date
viz.x.domain([timeInterval.floor(pub_date), end_date]);
// a linear axis from 0 to max value found
viz.y.domain([0, d3.max(level_data, function(d) { return d[subgroup]; })]);
//
// Axis
//
// a linear axis between publication date and current date
viz.xAxis = d3.svg.axis()
.scale(viz.x)
.tickSize(0)
.ticks(0);
// a linear y axis between 0 and max value found in data
viz.yAxis = d3.svg.axis()
.scale(viz.y)
.orient("left")
.tickSize(0)
.tickValues([d3.max(viz.y.domain())]) // only one tick at max
.tickFormat(d3.format(",d"));
//
// The chart itself
//
// TODO: these transitions could use a little work
// add more padding to wider bars
var rawWidth = (viz.width/(timeInterval.range(pub_date, end_date).length + 1));
var barWidth = Math.max(rawWidth - rawWidth/5, 1);
var barsForTooltips = viz.barsForTooltips.selectAll(".barsForTooltip")
.data(level_data, function(d) { return getDate_(level, d); });
barsForTooltips
.exit()
.remove();
var bars = viz.bars.selectAll(".bar")
.data(level_data, function(d) { return getDate_(level, d); });
bars
.enter().append("rect")
.attr("class", function(d) { return "bar " + viz.z((level === 'day' ? d3.time.weekOfYear(getDate_(level, d)) : d.year)); })
.attr("y", viz.height)
.attr("height", 0);
bars
.attr("x", function(d) { return viz.x(getDate_(level, d)) + 2; })
.attr("width", barWidth);
bars.transition()
.duration(1000)
.attr("width", barWidth)
.attr("y", function(d) { return viz.y(d[subgroup]); })
.attr("height", function(d) { return viz.height - viz.y(d[subgroup]); });
bars
.exit().transition()
.attr("y", viz.height)
.attr("height", 0);
bars
.exit()
.remove();
viz.svg
.select(".x.axis")
.call(viz.xAxis);
viz.svg
.transition().duration(1000)
.select(".y.axis")
.call(viz.yAxis);
barsForTooltips
.enter().append("rect")
.attr("class", function(d) { return "barsForTooltip " + viz.z((level === 'day' ? d3.time.weekOfYear(getDate_(level, d)) : d.year)); });
barsForTooltips
.attr("width", barWidth + 2)
.attr("x", function(d) { return viz.x(getDate_(level, d)) + 1; })
.attr("y", function(d) { return viz.y(d[subgroup]) - 1; })
.attr("height", function(d) { return viz.height - viz.y(d[subgroup]) + 1; });
// add in some tool tips
viz.barsForTooltips.selectAll("rect").each(
function(d){
$(this).tooltip('destroy'); // need to destroy so all bars get updated
$(this).tooltip({title: formatNumber_(d[subgroup]) + " in " + getFormattedDate_(level, d), container: "body"});
}
);
};
}
| AlmViz | identifier_name |
results.js | /**
* ALMViz
* See https://github.com/lagotto/almviz for more details
* Distributed under the GNU GPL v2. For full terms see the file docs/COPYING.
*
* @brief Article level metrics visualization controller.
*/
/*global d3 */
var options = {
baseUrl: '',
minItemsToShowGraph: {
minEventsForYearly: 1,
minEventsForMonthly: 1,
minEventsForDaily: 1,
minYearsForYearly: 1,
minMonthsForMonthly: 1,
minDaysForDaily: 1
},
vizDiv: "#panel-results",
work: {},
sources: [],
groups: []
};
var params = d3.select("#api_key");
if (!params.empty()) {
var api_key = params.attr('data-api-key');
var work_id = params.attr('data-pid');
var query = encodeURI("/api/works/" + pathForWork(work_id) + "/results");
}
// asynchronously load data from the Lagotto API
queue()
.defer(d3.json, encodeURI("/api/works/" + pathForWork(work_id)))
.defer(d3.json, encodeURI("/api/sources"))
.defer(d3.json, encodeURI("/api/groups"))
.defer(d3.json, query)
.await(function(error, w, s, g, r) {
if (error) { return console.warn(error); }
options.work = w.work;
options.sources = s.sources;
options.groups = g.groups;
options.almStatsJson = r.results;
var almviz = new AlmViz(options);
almviz.initViz();
});
function AlmViz(options) {
// allow jQuery object to be passed in
// in case a different version of jQuery is needed from the one globally defined
$ = options.jQuery || $;
// Init data
// remove group not needed for the following visualizations
var work_ = options.work;
var groups_ = options.groups.filter(function(d) { return d.id !== "other"; });
var sources_ = options.sources;
var data = options.almStatsJson;
// Abort if data are missing
if (!data || !data[0]) {
console.log('Error: missing data');
d3.select("#loading-results").remove();
d3.select("#content-results").text("")
.insert("div")
.attr("class", "alert alert-info")
.text("There are currently no results");
return;
}
// Init basic options
var baseUrl_ = options.baseUrl;
var minItems_ = options.minItemsToShowGraph;
var formatNumber_ = d3.format(",d");
// extract publication date
// Construct date object from date parts, using "1" for missing day and month
var timestamp = Date.parse(work_["issued"]);
if (work_["issued"] === "0000") { timestamp = Date.parse("2000"); }
var pub_date = new Date(timestamp);
var vizDiv;
// Get the Div where the viz should go (default to one with ID "alm')
if (options.vizDiv) {
vizDiv = d3.select(options.vizDiv);
} else {
vizDiv = d3.select("#alm");
}
// look to make sure browser support SVG
var hasSVG_ = document.implementation.hasFeature("http://www.w3.org/TR/SVG11/feature#BasicStructure", "1.1");
// to track if any metrics have been found
var metricsFound_;
/**
* Initialize the visualization.
* NB: needs to be accessible from the outside for initialization
*/
this.initViz = function() {
vizDiv.select("#loading-results").remove();
// loop through groups
groups_.forEach(function(group) {
addGroup_(vizDiv, group, sources_, data);
});
if (!metricsFound_) {
vizDiv.append("p")
.attr("class", "text-muted")
.text("No results found.");
}
};
/**
* Build each article level statistics group.
* @param {Object} canvas d3 element
* @param {Array} group Information about the group.
* @param {Object} data Statistics.
* @return {JQueryObject|boolean}
*/
var addGroup_ = function(canvas, group, sources_, data) {
var $groupRow = false;
// Loop through sources to add statistics data to the group.
sources_.forEach(function(source) {
if (source.group_id !== group.id) { return; }
results = data.filter(function(d) { return d.source_id === source.id; })[0];
if (typeof results === "undefined" || results.total === 0) { return; }
// Only add the group row the first time
if (!$groupRow) {
$groupRow = getgroupRow_(canvas, group);
}
// Flag that there is at least one metric
metricsFound_ = true;
var label = source.title;
addSource_(source, label, results, results.total, group, "total", $groupRow);
});
};
/**
* Get group row d3 HTML element. It will automatically
* add the element to the passed canvas.
* @param {d3Object} canvas d3 HTML element
* @param {Array} group group information.
* @param {d3Object}
*/
var getgroupRow_ = function(canvas, group) {
var groupRow, groupTitle, tooltip;
// Build group html objects.
groupRow = canvas.append("div")
.attr("class", "alm-group")
.attr("id", "group-" + group.id);
return groupRow;
};
/**
* Add source information to the passed group row element.
* @param {Object} source
* @param {integer} sourceTotalValue
* @param {Object} group
* @param {JQueryObject} $groupRow
* @return {JQueryObject}
*/
var addSource_ = function(source, label, results, sourceTotalValue, group, subgroup, $groupRow) {
var $row, $countLabel, $count,
total = sourceTotalValue;
$row = $groupRow
.append("div")
.attr("class", "alm-source")
.attr("id", "source-" + source.id + "-" + subgroup);
$countLabel = $row.append("div")
.attr("class", "alm-label " + group.id);
$count = $countLabel.append("p")
.attr("class", "alm-count")
.attr("id", "alm-count-" + source.id + "-" + group.id);
$count
.text(formatNumber_(total));
if (source.id === 'pkpTimedViews') {
$countLabel.append("p")
.text(label);
} else {
// link the source name
$countLabel.append("p").append("a")
.attr("href", baseUrl_ + "/sources/" + source.id)
.text(label);
}
// Only add a chart if the browser supports SVG
if (hasSVG_) {
var level = false;
// check what levels we can show
var showDaily = false;
var showMonthly = false;
var showYearly = false;
if (results.by_year) {
var level_data = getData_('year', results);
var yearTotal = level_data.reduce(function(i, d) { return i + d[subgroup]; }, 0);
var numYears = d3.time.year.utc.range(pub_date, new Date()).length;
if (yearTotal >= minItems_.minEventsForYearly &&
numYears >= minItems_.minYearsForYearly) {
showYearly = true;
level = 'year';
}
}
if (results.by_month) {
var level_data = getData_('month', results);
var monthTotal = level_data.reduce(function(i, d) { return i + d[subgroup]; }, 0);
var numMonths = d3.time.month.utc.range(pub_date, new Date()).length;
if (monthTotal >= minItems_.minEventsForMonthly &&
numMonths >= minItems_.minMonthsForMonthly) {
showMonthly = true;
level = 'month';
}
}
// The level and level_data should be set to the finest level
// of granularity that we can show
timeInterval = getTimeInterval_(level);
// check there is data for
if (showDaily || showMonthly || showYearly) {
$row
.attr('class', 'alm-source with-chart');
var $chartDiv = $row.append("div")
.attr("class", "alm-chart");
var viz = getViz_($chartDiv, source, group, subgroup, results);
loadData_(viz, level);
var update_controls = function(control) {
control.siblings('.alm-control').removeClass('active');
control.addClass('active');
};
var $levelControlsDiv = $chartDiv.append("div")
.attr("class", "alm-control-label")
.attr("style", "width: " + (viz.margin.left + viz.width) + "px;");
if (showDaily) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showDaily)
.classed("active", (level === 'day'))
.text("daily (first 30)")
.on("click", function() {
if (showDaily && !$(this).hasClass('active')) {
loadData_(viz, 'day');
update_controls($(this));
}
}
);
$levelControlsDiv.append("text").text(" | ");
}
if (showMonthly) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showMonthly || !showYearly)
.classed("active", (level === 'month'))
.text("monthly")
.on("click", function() { if (showMonthly && !$(this).hasClass('active')) {
loadData_(viz, 'month');
update_controls($(this));
} });
if (showYearly) {
$levelControlsDiv.append("text")
.text(" | ");
}
}
if (showYearly) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showYearly || !showMonthly)
.classed("active", (level === 'year'))
.text("yearly")
.on("click", function() {
if (showYearly && !$(this).hasClass('active')) {
loadData_(viz, 'year');
update_controls($(this));
}
}
);
}
}
}
return $row;
};
/**
* Extract the date from the source
* @param level (day|month|year)
* @param d the datum
* @return {Date}
*/
var getDate_ = function(level, d) {
switch (level) {
case 'year':
return new Date(d.year, 0, 1);
case 'month':
// js Date indexes months at 0
return new Date(d.year, d.month - 1, 1);
case 'day':
// js Date indexes months at 0
return new Date(d.year, d.month - 1, d.day);
}
};
/**
* Format the date for display
* @param level (day|month|year)
* @param d the datum
* @return {String}
*/
var getFormattedDate_ = function(level, d) {
switch (level) {
case 'year':
return d3.time.format("%Y")(getDate_(level, d));
case 'month':
return d3.time.format("%b %y")(getDate_(level, d));
case 'day':
return d3.time.format("%d %b %y")(getDate_(level, d));
}
};
/**
* Extract the data from the source.
* @param {string} level (day|month|year)
* @param {Object} source
* @return {Array} Metrics
*/
var getData_ = function(level, results) {
switch (level) {
case 'year':
return results.by_year;
case 'month':
return results.by_month;
}
};
/**
* Returns a d3 timeInterval for date operations.
* @param {string} level (day|month|year
* @return {Object} d3 time Interval
*/
var getTimeInterval_ = function(level) {
switch (level) {
case 'year':
return d3.time.year.utc;
case 'month':
return d3.time.month.utc;
case 'day':
return d3.time.day.utc;
}
};
/**
* The basic general set up of the graph itself
* @param {JQueryElement} chartDiv The div where the chart should go
* @param {Object} source
* @param {Array} group The group for 86 chart
* @return {Object}
*/
var getViz_ = function(chartDiv, source, group, subgroup, results) {
var viz = {};
// size parameters
viz.margin = {top: 10, right: 20, bottom: 5, left: 50};
viz.width = 760 - viz.margin.left - viz.margin.right;
viz.height = 115 - viz.margin.top - viz.margin.bottom;
// div where everything goes
viz.chartDiv = chartDiv;
// source data and which group
viz.group = group;
viz.subgroup = subgroup;
viz.source = source;
viz.results = results;
// just for record keeping
viz.name = source.id + '-' + group.id + '-' + viz.subgroup;
viz.x = d3.time.scale();
viz.x.range([0, viz.width]);
viz.y = d3.scale.linear();
viz.y.range([viz.height, 0]);
viz.z = d3.scale.ordinal();
viz.z.range([group.id, group.id + '-alt']);
// the chart
viz.svg = viz.chartDiv.append("svg")
.attr("width", viz.width + viz.margin.left + viz.margin.right)
.attr("height", viz.height + viz.margin.top + viz.margin.bottom + 1)
.append("g")
.attr("transform", "translate(" + viz.margin.left + "," + viz.margin.top + ")");
// draw the bars g first so it ends up underneath the axes
viz.bars = viz.svg.append("g");
// and the shadow bars on top for the tooltips
viz.barsForTooltips = viz.svg.append("g");
viz.svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + viz.height + ")");
|
return viz;
};
/**
* Takes in the basic set up of a graph and loads the data itself
* @param {Object} viz AlmViz object
* @param {string} level (day|month|year)
*/
var loadData_ = function(viz, level) {
var group = viz.group;
var subgroup = viz.subgroup;
var level_data = getData_(level, viz.results);
var timeInterval = getTimeInterval_(level);
var end_date = new Date();
// use only first 29 days if using day view
// close out the year otherwise
if (level === 'day') {
end_date = timeInterval.offset(pub_date, 29);
} else {
end_date = d3.time.year.utc.ceil(end_date);
}
//
// Domains for x and y
//
// a time x axis, between pub_date and end_date
viz.x.domain([timeInterval.floor(pub_date), end_date]);
// a linear axis from 0 to max value found
viz.y.domain([0, d3.max(level_data, function(d) { return d[subgroup]; })]);
//
// Axis
//
// a linear axis between publication date and current date
viz.xAxis = d3.svg.axis()
.scale(viz.x)
.tickSize(0)
.ticks(0);
// a linear y axis between 0 and max value found in data
viz.yAxis = d3.svg.axis()
.scale(viz.y)
.orient("left")
.tickSize(0)
.tickValues([d3.max(viz.y.domain())]) // only one tick at max
.tickFormat(d3.format(",d"));
//
// The chart itself
//
// TODO: these transitions could use a little work
// add more padding to wider bars
var rawWidth = (viz.width/(timeInterval.range(pub_date, end_date).length + 1));
var barWidth = Math.max(rawWidth - rawWidth/5, 1);
var barsForTooltips = viz.barsForTooltips.selectAll(".barsForTooltip")
.data(level_data, function(d) { return getDate_(level, d); });
barsForTooltips
.exit()
.remove();
var bars = viz.bars.selectAll(".bar")
.data(level_data, function(d) { return getDate_(level, d); });
bars
.enter().append("rect")
.attr("class", function(d) { return "bar " + viz.z((level === 'day' ? d3.time.weekOfYear(getDate_(level, d)) : d.year)); })
.attr("y", viz.height)
.attr("height", 0);
bars
.attr("x", function(d) { return viz.x(getDate_(level, d)) + 2; })
.attr("width", barWidth);
bars.transition()
.duration(1000)
.attr("width", barWidth)
.attr("y", function(d) { return viz.y(d[subgroup]); })
.attr("height", function(d) { return viz.height - viz.y(d[subgroup]); });
bars
.exit().transition()
.attr("y", viz.height)
.attr("height", 0);
bars
.exit()
.remove();
viz.svg
.select(".x.axis")
.call(viz.xAxis);
viz.svg
.transition().duration(1000)
.select(".y.axis")
.call(viz.yAxis);
barsForTooltips
.enter().append("rect")
.attr("class", function(d) { return "barsForTooltip " + viz.z((level === 'day' ? d3.time.weekOfYear(getDate_(level, d)) : d.year)); });
barsForTooltips
.attr("width", barWidth + 2)
.attr("x", function(d) { return viz.x(getDate_(level, d)) + 1; })
.attr("y", function(d) { return viz.y(d[subgroup]) - 1; })
.attr("height", function(d) { return viz.height - viz.y(d[subgroup]) + 1; });
// add in some tool tips
viz.barsForTooltips.selectAll("rect").each(
function(d){
$(this).tooltip('destroy'); // need to destroy so all bars get updated
$(this).tooltip({title: formatNumber_(d[subgroup]) + " in " + getFormattedDate_(level, d), container: "body"});
}
);
};
} | viz.svg.append("g")
.attr("class", "y axis"); | random_line_split |
results.js | /**
* ALMViz
* See https://github.com/lagotto/almviz for more details
* Distributed under the GNU GPL v2. For full terms see the file docs/COPYING.
*
* @brief Article level metrics visualization controller.
*/
/*global d3 */
var options = {
baseUrl: '',
minItemsToShowGraph: {
minEventsForYearly: 1,
minEventsForMonthly: 1,
minEventsForDaily: 1,
minYearsForYearly: 1,
minMonthsForMonthly: 1,
minDaysForDaily: 1
},
vizDiv: "#panel-results",
work: {},
sources: [],
groups: []
};
var params = d3.select("#api_key");
if (!params.empty()) {
var api_key = params.attr('data-api-key');
var work_id = params.attr('data-pid');
var query = encodeURI("/api/works/" + pathForWork(work_id) + "/results");
}
// asynchronously load data from the Lagotto API
queue()
.defer(d3.json, encodeURI("/api/works/" + pathForWork(work_id)))
.defer(d3.json, encodeURI("/api/sources"))
.defer(d3.json, encodeURI("/api/groups"))
.defer(d3.json, query)
.await(function(error, w, s, g, r) {
if (error) { return console.warn(error); }
options.work = w.work;
options.sources = s.sources;
options.groups = g.groups;
options.almStatsJson = r.results;
var almviz = new AlmViz(options);
almviz.initViz();
});
function AlmViz(options) | {
// allow jQuery object to be passed in
// in case a different version of jQuery is needed from the one globally defined
$ = options.jQuery || $;
// Init data
// remove group not needed for the following visualizations
var work_ = options.work;
var groups_ = options.groups.filter(function(d) { return d.id !== "other"; });
var sources_ = options.sources;
var data = options.almStatsJson;
// Abort if data are missing
if (!data || !data[0]) {
console.log('Error: missing data');
d3.select("#loading-results").remove();
d3.select("#content-results").text("")
.insert("div")
.attr("class", "alert alert-info")
.text("There are currently no results");
return;
}
// Init basic options
var baseUrl_ = options.baseUrl;
var minItems_ = options.minItemsToShowGraph;
var formatNumber_ = d3.format(",d");
// extract publication date
// Construct date object from date parts, using "1" for missing day and month
var timestamp = Date.parse(work_["issued"]);
if (work_["issued"] === "0000") { timestamp = Date.parse("2000"); }
var pub_date = new Date(timestamp);
var vizDiv;
// Get the Div where the viz should go (default to one with ID "alm')
if (options.vizDiv) {
vizDiv = d3.select(options.vizDiv);
} else {
vizDiv = d3.select("#alm");
}
// look to make sure browser support SVG
var hasSVG_ = document.implementation.hasFeature("http://www.w3.org/TR/SVG11/feature#BasicStructure", "1.1");
// to track if any metrics have been found
var metricsFound_;
/**
* Initialize the visualization.
* NB: needs to be accessible from the outside for initialization
*/
this.initViz = function() {
vizDiv.select("#loading-results").remove();
// loop through groups
groups_.forEach(function(group) {
addGroup_(vizDiv, group, sources_, data);
});
if (!metricsFound_) {
vizDiv.append("p")
.attr("class", "text-muted")
.text("No results found.");
}
};
/**
* Build each article level statistics group.
* @param {Object} canvas d3 element
* @param {Array} group Information about the group.
* @param {Object} data Statistics.
* @return {JQueryObject|boolean}
*/
var addGroup_ = function(canvas, group, sources_, data) {
var $groupRow = false;
// Loop through sources to add statistics data to the group.
sources_.forEach(function(source) {
if (source.group_id !== group.id) { return; }
results = data.filter(function(d) { return d.source_id === source.id; })[0];
if (typeof results === "undefined" || results.total === 0) { return; }
// Only add the group row the first time
if (!$groupRow) {
$groupRow = getgroupRow_(canvas, group);
}
// Flag that there is at least one metric
metricsFound_ = true;
var label = source.title;
addSource_(source, label, results, results.total, group, "total", $groupRow);
});
};
/**
* Get group row d3 HTML element. It will automatically
* add the element to the passed canvas.
* @param {d3Object} canvas d3 HTML element
* @param {Array} group group information.
* @param {d3Object}
*/
var getgroupRow_ = function(canvas, group) {
var groupRow, groupTitle, tooltip;
// Build group html objects.
groupRow = canvas.append("div")
.attr("class", "alm-group")
.attr("id", "group-" + group.id);
return groupRow;
};
/**
* Add source information to the passed group row element.
* @param {Object} source
* @param {integer} sourceTotalValue
* @param {Object} group
* @param {JQueryObject} $groupRow
* @return {JQueryObject}
*/
var addSource_ = function(source, label, results, sourceTotalValue, group, subgroup, $groupRow) {
var $row, $countLabel, $count,
total = sourceTotalValue;
$row = $groupRow
.append("div")
.attr("class", "alm-source")
.attr("id", "source-" + source.id + "-" + subgroup);
$countLabel = $row.append("div")
.attr("class", "alm-label " + group.id);
$count = $countLabel.append("p")
.attr("class", "alm-count")
.attr("id", "alm-count-" + source.id + "-" + group.id);
$count
.text(formatNumber_(total));
if (source.id === 'pkpTimedViews') {
$countLabel.append("p")
.text(label);
} else {
// link the source name
$countLabel.append("p").append("a")
.attr("href", baseUrl_ + "/sources/" + source.id)
.text(label);
}
// Only add a chart if the browser supports SVG
if (hasSVG_) {
var level = false;
// check what levels we can show
var showDaily = false;
var showMonthly = false;
var showYearly = false;
if (results.by_year) {
var level_data = getData_('year', results);
var yearTotal = level_data.reduce(function(i, d) { return i + d[subgroup]; }, 0);
var numYears = d3.time.year.utc.range(pub_date, new Date()).length;
if (yearTotal >= minItems_.minEventsForYearly &&
numYears >= minItems_.minYearsForYearly) {
showYearly = true;
level = 'year';
}
}
if (results.by_month) {
var level_data = getData_('month', results);
var monthTotal = level_data.reduce(function(i, d) { return i + d[subgroup]; }, 0);
var numMonths = d3.time.month.utc.range(pub_date, new Date()).length;
if (monthTotal >= minItems_.minEventsForMonthly &&
numMonths >= minItems_.minMonthsForMonthly) {
showMonthly = true;
level = 'month';
}
}
// The level and level_data should be set to the finest level
// of granularity that we can show
timeInterval = getTimeInterval_(level);
// check there is data for
if (showDaily || showMonthly || showYearly) {
$row
.attr('class', 'alm-source with-chart');
var $chartDiv = $row.append("div")
.attr("class", "alm-chart");
var viz = getViz_($chartDiv, source, group, subgroup, results);
loadData_(viz, level);
var update_controls = function(control) {
control.siblings('.alm-control').removeClass('active');
control.addClass('active');
};
var $levelControlsDiv = $chartDiv.append("div")
.attr("class", "alm-control-label")
.attr("style", "width: " + (viz.margin.left + viz.width) + "px;");
if (showDaily) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showDaily)
.classed("active", (level === 'day'))
.text("daily (first 30)")
.on("click", function() {
if (showDaily && !$(this).hasClass('active')) {
loadData_(viz, 'day');
update_controls($(this));
}
}
);
$levelControlsDiv.append("text").text(" | ");
}
if (showMonthly) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showMonthly || !showYearly)
.classed("active", (level === 'month'))
.text("monthly")
.on("click", function() { if (showMonthly && !$(this).hasClass('active')) {
loadData_(viz, 'month');
update_controls($(this));
} });
if (showYearly) {
$levelControlsDiv.append("text")
.text(" | ");
}
}
if (showYearly) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showYearly || !showMonthly)
.classed("active", (level === 'year'))
.text("yearly")
.on("click", function() {
if (showYearly && !$(this).hasClass('active')) {
loadData_(viz, 'year');
update_controls($(this));
}
}
);
}
}
}
return $row;
};
/**
* Extract the date from the source
* @param level (day|month|year)
* @param d the datum
* @return {Date}
*/
var getDate_ = function(level, d) {
switch (level) {
case 'year':
return new Date(d.year, 0, 1);
case 'month':
// js Date indexes months at 0
return new Date(d.year, d.month - 1, 1);
case 'day':
// js Date indexes months at 0
return new Date(d.year, d.month - 1, d.day);
}
};
/**
* Format the date for display
* @param level (day|month|year)
* @param d the datum
* @return {String}
*/
var getFormattedDate_ = function(level, d) {
switch (level) {
case 'year':
return d3.time.format("%Y")(getDate_(level, d));
case 'month':
return d3.time.format("%b %y")(getDate_(level, d));
case 'day':
return d3.time.format("%d %b %y")(getDate_(level, d));
}
};
/**
* Extract the data from the source.
* @param {string} level (day|month|year)
* @param {Object} source
* @return {Array} Metrics
*/
var getData_ = function(level, results) {
switch (level) {
case 'year':
return results.by_year;
case 'month':
return results.by_month;
}
};
/**
* Returns a d3 timeInterval for date operations.
* @param {string} level (day|month|year
* @return {Object} d3 time Interval
*/
var getTimeInterval_ = function(level) {
switch (level) {
case 'year':
return d3.time.year.utc;
case 'month':
return d3.time.month.utc;
case 'day':
return d3.time.day.utc;
}
};
/**
* The basic general set up of the graph itself
* @param {JQueryElement} chartDiv The div where the chart should go
* @param {Object} source
* @param {Array} group The group for 86 chart
* @return {Object}
*/
var getViz_ = function(chartDiv, source, group, subgroup, results) {
var viz = {};
// size parameters
viz.margin = {top: 10, right: 20, bottom: 5, left: 50};
viz.width = 760 - viz.margin.left - viz.margin.right;
viz.height = 115 - viz.margin.top - viz.margin.bottom;
// div where everything goes
viz.chartDiv = chartDiv;
// source data and which group
viz.group = group;
viz.subgroup = subgroup;
viz.source = source;
viz.results = results;
// just for record keeping
viz.name = source.id + '-' + group.id + '-' + viz.subgroup;
viz.x = d3.time.scale();
viz.x.range([0, viz.width]);
viz.y = d3.scale.linear();
viz.y.range([viz.height, 0]);
viz.z = d3.scale.ordinal();
viz.z.range([group.id, group.id + '-alt']);
// the chart
viz.svg = viz.chartDiv.append("svg")
.attr("width", viz.width + viz.margin.left + viz.margin.right)
.attr("height", viz.height + viz.margin.top + viz.margin.bottom + 1)
.append("g")
.attr("transform", "translate(" + viz.margin.left + "," + viz.margin.top + ")");
// draw the bars g first so it ends up underneath the axes
viz.bars = viz.svg.append("g");
// and the shadow bars on top for the tooltips
viz.barsForTooltips = viz.svg.append("g");
viz.svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + viz.height + ")");
viz.svg.append("g")
.attr("class", "y axis");
return viz;
};
/**
* Takes in the basic set up of a graph and loads the data itself
* @param {Object} viz AlmViz object
* @param {string} level (day|month|year)
*/
var loadData_ = function(viz, level) {
var group = viz.group;
var subgroup = viz.subgroup;
var level_data = getData_(level, viz.results);
var timeInterval = getTimeInterval_(level);
var end_date = new Date();
// use only first 29 days if using day view
// close out the year otherwise
if (level === 'day') {
end_date = timeInterval.offset(pub_date, 29);
} else {
end_date = d3.time.year.utc.ceil(end_date);
}
//
// Domains for x and y
//
// a time x axis, between pub_date and end_date
viz.x.domain([timeInterval.floor(pub_date), end_date]);
// a linear axis from 0 to max value found
viz.y.domain([0, d3.max(level_data, function(d) { return d[subgroup]; })]);
//
// Axis
//
// a linear axis between publication date and current date
viz.xAxis = d3.svg.axis()
.scale(viz.x)
.tickSize(0)
.ticks(0);
// a linear y axis between 0 and max value found in data
viz.yAxis = d3.svg.axis()
.scale(viz.y)
.orient("left")
.tickSize(0)
.tickValues([d3.max(viz.y.domain())]) // only one tick at max
.tickFormat(d3.format(",d"));
//
// The chart itself
//
// TODO: these transitions could use a little work
// add more padding to wider bars
var rawWidth = (viz.width/(timeInterval.range(pub_date, end_date).length + 1));
var barWidth = Math.max(rawWidth - rawWidth/5, 1);
var barsForTooltips = viz.barsForTooltips.selectAll(".barsForTooltip")
.data(level_data, function(d) { return getDate_(level, d); });
barsForTooltips
.exit()
.remove();
var bars = viz.bars.selectAll(".bar")
.data(level_data, function(d) { return getDate_(level, d); });
bars
.enter().append("rect")
.attr("class", function(d) { return "bar " + viz.z((level === 'day' ? d3.time.weekOfYear(getDate_(level, d)) : d.year)); })
.attr("y", viz.height)
.attr("height", 0);
bars
.attr("x", function(d) { return viz.x(getDate_(level, d)) + 2; })
.attr("width", barWidth);
bars.transition()
.duration(1000)
.attr("width", barWidth)
.attr("y", function(d) { return viz.y(d[subgroup]); })
.attr("height", function(d) { return viz.height - viz.y(d[subgroup]); });
bars
.exit().transition()
.attr("y", viz.height)
.attr("height", 0);
bars
.exit()
.remove();
viz.svg
.select(".x.axis")
.call(viz.xAxis);
viz.svg
.transition().duration(1000)
.select(".y.axis")
.call(viz.yAxis);
barsForTooltips
.enter().append("rect")
.attr("class", function(d) { return "barsForTooltip " + viz.z((level === 'day' ? d3.time.weekOfYear(getDate_(level, d)) : d.year)); });
barsForTooltips
.attr("width", barWidth + 2)
.attr("x", function(d) { return viz.x(getDate_(level, d)) + 1; })
.attr("y", function(d) { return viz.y(d[subgroup]) - 1; })
.attr("height", function(d) { return viz.height - viz.y(d[subgroup]) + 1; });
// add in some tool tips
viz.barsForTooltips.selectAll("rect").each(
function(d){
$(this).tooltip('destroy'); // need to destroy so all bars get updated
$(this).tooltip({title: formatNumber_(d[subgroup]) + " in " + getFormattedDate_(level, d), container: "body"});
}
);
};
} | identifier_body | |
results.js | /**
* ALMViz
* See https://github.com/lagotto/almviz for more details
* Distributed under the GNU GPL v2. For full terms see the file docs/COPYING.
*
* @brief Article level metrics visualization controller.
*/
/*global d3 */
var options = {
baseUrl: '',
minItemsToShowGraph: {
minEventsForYearly: 1,
minEventsForMonthly: 1,
minEventsForDaily: 1,
minYearsForYearly: 1,
minMonthsForMonthly: 1,
minDaysForDaily: 1
},
vizDiv: "#panel-results",
work: {},
sources: [],
groups: []
};
var params = d3.select("#api_key");
if (!params.empty()) {
var api_key = params.attr('data-api-key');
var work_id = params.attr('data-pid');
var query = encodeURI("/api/works/" + pathForWork(work_id) + "/results");
}
// asynchronously load data from the Lagotto API
queue()
.defer(d3.json, encodeURI("/api/works/" + pathForWork(work_id)))
.defer(d3.json, encodeURI("/api/sources"))
.defer(d3.json, encodeURI("/api/groups"))
.defer(d3.json, query)
.await(function(error, w, s, g, r) {
if (error) { return console.warn(error); }
options.work = w.work;
options.sources = s.sources;
options.groups = g.groups;
options.almStatsJson = r.results;
var almviz = new AlmViz(options);
almviz.initViz();
});
function AlmViz(options) {
// allow jQuery object to be passed in
// in case a different version of jQuery is needed from the one globally defined
$ = options.jQuery || $;
// Init data
// remove group not needed for the following visualizations
var work_ = options.work;
var groups_ = options.groups.filter(function(d) { return d.id !== "other"; });
var sources_ = options.sources;
var data = options.almStatsJson;
// Abort if data are missing
if (!data || !data[0]) {
console.log('Error: missing data');
d3.select("#loading-results").remove();
d3.select("#content-results").text("")
.insert("div")
.attr("class", "alert alert-info")
.text("There are currently no results");
return;
}
// Init basic options
var baseUrl_ = options.baseUrl;
var minItems_ = options.minItemsToShowGraph;
var formatNumber_ = d3.format(",d");
// extract publication date
// Construct date object from date parts, using "1" for missing day and month
var timestamp = Date.parse(work_["issued"]);
if (work_["issued"] === "0000") { timestamp = Date.parse("2000"); }
var pub_date = new Date(timestamp);
var vizDiv;
// Get the Div where the viz should go (default to one with ID "alm')
if (options.vizDiv) {
vizDiv = d3.select(options.vizDiv);
} else |
// look to make sure browser support SVG
var hasSVG_ = document.implementation.hasFeature("http://www.w3.org/TR/SVG11/feature#BasicStructure", "1.1");
// to track if any metrics have been found
var metricsFound_;
/**
* Initialize the visualization.
* NB: needs to be accessible from the outside for initialization
*/
this.initViz = function() {
vizDiv.select("#loading-results").remove();
// loop through groups
groups_.forEach(function(group) {
addGroup_(vizDiv, group, sources_, data);
});
if (!metricsFound_) {
vizDiv.append("p")
.attr("class", "text-muted")
.text("No results found.");
}
};
/**
* Build each article level statistics group.
* @param {Object} canvas d3 element
* @param {Array} group Information about the group.
* @param {Object} data Statistics.
* @return {JQueryObject|boolean}
*/
var addGroup_ = function(canvas, group, sources_, data) {
var $groupRow = false;
// Loop through sources to add statistics data to the group.
sources_.forEach(function(source) {
if (source.group_id !== group.id) { return; }
results = data.filter(function(d) { return d.source_id === source.id; })[0];
if (typeof results === "undefined" || results.total === 0) { return; }
// Only add the group row the first time
if (!$groupRow) {
$groupRow = getgroupRow_(canvas, group);
}
// Flag that there is at least one metric
metricsFound_ = true;
var label = source.title;
addSource_(source, label, results, results.total, group, "total", $groupRow);
});
};
/**
* Get group row d3 HTML element. It will automatically
* add the element to the passed canvas.
* @param {d3Object} canvas d3 HTML element
* @param {Array} group group information.
* @param {d3Object}
*/
var getgroupRow_ = function(canvas, group) {
var groupRow, groupTitle, tooltip;
// Build group html objects.
groupRow = canvas.append("div")
.attr("class", "alm-group")
.attr("id", "group-" + group.id);
return groupRow;
};
/**
* Add source information to the passed group row element.
* @param {Object} source
* @param {integer} sourceTotalValue
* @param {Object} group
* @param {JQueryObject} $groupRow
* @return {JQueryObject}
*/
var addSource_ = function(source, label, results, sourceTotalValue, group, subgroup, $groupRow) {
var $row, $countLabel, $count,
total = sourceTotalValue;
$row = $groupRow
.append("div")
.attr("class", "alm-source")
.attr("id", "source-" + source.id + "-" + subgroup);
$countLabel = $row.append("div")
.attr("class", "alm-label " + group.id);
$count = $countLabel.append("p")
.attr("class", "alm-count")
.attr("id", "alm-count-" + source.id + "-" + group.id);
$count
.text(formatNumber_(total));
if (source.id === 'pkpTimedViews') {
$countLabel.append("p")
.text(label);
} else {
// link the source name
$countLabel.append("p").append("a")
.attr("href", baseUrl_ + "/sources/" + source.id)
.text(label);
}
// Only add a chart if the browser supports SVG
if (hasSVG_) {
var level = false;
// check what levels we can show
var showDaily = false;
var showMonthly = false;
var showYearly = false;
if (results.by_year) {
var level_data = getData_('year', results);
var yearTotal = level_data.reduce(function(i, d) { return i + d[subgroup]; }, 0);
var numYears = d3.time.year.utc.range(pub_date, new Date()).length;
if (yearTotal >= minItems_.minEventsForYearly &&
numYears >= minItems_.minYearsForYearly) {
showYearly = true;
level = 'year';
}
}
if (results.by_month) {
var level_data = getData_('month', results);
var monthTotal = level_data.reduce(function(i, d) { return i + d[subgroup]; }, 0);
var numMonths = d3.time.month.utc.range(pub_date, new Date()).length;
if (monthTotal >= minItems_.minEventsForMonthly &&
numMonths >= minItems_.minMonthsForMonthly) {
showMonthly = true;
level = 'month';
}
}
// The level and level_data should be set to the finest level
// of granularity that we can show
timeInterval = getTimeInterval_(level);
// check there is data for
if (showDaily || showMonthly || showYearly) {
$row
.attr('class', 'alm-source with-chart');
var $chartDiv = $row.append("div")
.attr("class", "alm-chart");
var viz = getViz_($chartDiv, source, group, subgroup, results);
loadData_(viz, level);
var update_controls = function(control) {
control.siblings('.alm-control').removeClass('active');
control.addClass('active');
};
var $levelControlsDiv = $chartDiv.append("div")
.attr("class", "alm-control-label")
.attr("style", "width: " + (viz.margin.left + viz.width) + "px;");
if (showDaily) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showDaily)
.classed("active", (level === 'day'))
.text("daily (first 30)")
.on("click", function() {
if (showDaily && !$(this).hasClass('active')) {
loadData_(viz, 'day');
update_controls($(this));
}
}
);
$levelControlsDiv.append("text").text(" | ");
}
if (showMonthly) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showMonthly || !showYearly)
.classed("active", (level === 'month'))
.text("monthly")
.on("click", function() { if (showMonthly && !$(this).hasClass('active')) {
loadData_(viz, 'month');
update_controls($(this));
} });
if (showYearly) {
$levelControlsDiv.append("text")
.text(" | ");
}
}
if (showYearly) {
$levelControlsDiv.append("a")
.attr("href", "javascript:void(0)")
.classed("alm-control", true)
.classed("disabled", !showYearly || !showMonthly)
.classed("active", (level === 'year'))
.text("yearly")
.on("click", function() {
if (showYearly && !$(this).hasClass('active')) {
loadData_(viz, 'year');
update_controls($(this));
}
}
);
}
}
}
return $row;
};
/**
* Extract the date from the source
* @param level (day|month|year)
* @param d the datum
* @return {Date}
*/
var getDate_ = function(level, d) {
switch (level) {
case 'year':
return new Date(d.year, 0, 1);
case 'month':
// js Date indexes months at 0
return new Date(d.year, d.month - 1, 1);
case 'day':
// js Date indexes months at 0
return new Date(d.year, d.month - 1, d.day);
}
};
/**
* Format the date for display
* @param level (day|month|year)
* @param d the datum
* @return {String}
*/
var getFormattedDate_ = function(level, d) {
switch (level) {
case 'year':
return d3.time.format("%Y")(getDate_(level, d));
case 'month':
return d3.time.format("%b %y")(getDate_(level, d));
case 'day':
return d3.time.format("%d %b %y")(getDate_(level, d));
}
};
/**
* Extract the data from the source.
* @param {string} level (day|month|year)
* @param {Object} source
* @return {Array} Metrics
*/
var getData_ = function(level, results) {
switch (level) {
case 'year':
return results.by_year;
case 'month':
return results.by_month;
}
};
/**
* Returns a d3 timeInterval for date operations.
* @param {string} level (day|month|year
* @return {Object} d3 time Interval
*/
var getTimeInterval_ = function(level) {
switch (level) {
case 'year':
return d3.time.year.utc;
case 'month':
return d3.time.month.utc;
case 'day':
return d3.time.day.utc;
}
};
/**
* The basic general set up of the graph itself
* @param {JQueryElement} chartDiv The div where the chart should go
* @param {Object} source
* @param {Array} group The group for 86 chart
* @return {Object}
*/
var getViz_ = function(chartDiv, source, group, subgroup, results) {
var viz = {};
// size parameters
viz.margin = {top: 10, right: 20, bottom: 5, left: 50};
viz.width = 760 - viz.margin.left - viz.margin.right;
viz.height = 115 - viz.margin.top - viz.margin.bottom;
// div where everything goes
viz.chartDiv = chartDiv;
// source data and which group
viz.group = group;
viz.subgroup = subgroup;
viz.source = source;
viz.results = results;
// just for record keeping
viz.name = source.id + '-' + group.id + '-' + viz.subgroup;
viz.x = d3.time.scale();
viz.x.range([0, viz.width]);
viz.y = d3.scale.linear();
viz.y.range([viz.height, 0]);
viz.z = d3.scale.ordinal();
viz.z.range([group.id, group.id + '-alt']);
// the chart
viz.svg = viz.chartDiv.append("svg")
.attr("width", viz.width + viz.margin.left + viz.margin.right)
.attr("height", viz.height + viz.margin.top + viz.margin.bottom + 1)
.append("g")
.attr("transform", "translate(" + viz.margin.left + "," + viz.margin.top + ")");
// draw the bars g first so it ends up underneath the axes
viz.bars = viz.svg.append("g");
// and the shadow bars on top for the tooltips
viz.barsForTooltips = viz.svg.append("g");
viz.svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + viz.height + ")");
viz.svg.append("g")
.attr("class", "y axis");
return viz;
};
/**
* Takes in the basic set up of a graph and loads the data itself
* @param {Object} viz AlmViz object
* @param {string} level (day|month|year)
*/
var loadData_ = function(viz, level) {
var group = viz.group;
var subgroup = viz.subgroup;
var level_data = getData_(level, viz.results);
var timeInterval = getTimeInterval_(level);
var end_date = new Date();
// use only first 29 days if using day view
// close out the year otherwise
if (level === 'day') {
end_date = timeInterval.offset(pub_date, 29);
} else {
end_date = d3.time.year.utc.ceil(end_date);
}
//
// Domains for x and y
//
// a time x axis, between pub_date and end_date
viz.x.domain([timeInterval.floor(pub_date), end_date]);
// a linear axis from 0 to max value found
viz.y.domain([0, d3.max(level_data, function(d) { return d[subgroup]; })]);
//
// Axis
//
// a linear axis between publication date and current date
viz.xAxis = d3.svg.axis()
.scale(viz.x)
.tickSize(0)
.ticks(0);
// a linear y axis between 0 and max value found in data
viz.yAxis = d3.svg.axis()
.scale(viz.y)
.orient("left")
.tickSize(0)
.tickValues([d3.max(viz.y.domain())]) // only one tick at max
.tickFormat(d3.format(",d"));
//
// The chart itself
//
// TODO: these transitions could use a little work
// add more padding to wider bars
var rawWidth = (viz.width/(timeInterval.range(pub_date, end_date).length + 1));
var barWidth = Math.max(rawWidth - rawWidth/5, 1);
var barsForTooltips = viz.barsForTooltips.selectAll(".barsForTooltip")
.data(level_data, function(d) { return getDate_(level, d); });
barsForTooltips
.exit()
.remove();
var bars = viz.bars.selectAll(".bar")
.data(level_data, function(d) { return getDate_(level, d); });
bars
.enter().append("rect")
.attr("class", function(d) { return "bar " + viz.z((level === 'day' ? d3.time.weekOfYear(getDate_(level, d)) : d.year)); })
.attr("y", viz.height)
.attr("height", 0);
bars
.attr("x", function(d) { return viz.x(getDate_(level, d)) + 2; })
.attr("width", barWidth);
bars.transition()
.duration(1000)
.attr("width", barWidth)
.attr("y", function(d) { return viz.y(d[subgroup]); })
.attr("height", function(d) { return viz.height - viz.y(d[subgroup]); });
bars
.exit().transition()
.attr("y", viz.height)
.attr("height", 0);
bars
.exit()
.remove();
viz.svg
.select(".x.axis")
.call(viz.xAxis);
viz.svg
.transition().duration(1000)
.select(".y.axis")
.call(viz.yAxis);
barsForTooltips
.enter().append("rect")
.attr("class", function(d) { return "barsForTooltip " + viz.z((level === 'day' ? d3.time.weekOfYear(getDate_(level, d)) : d.year)); });
barsForTooltips
.attr("width", barWidth + 2)
.attr("x", function(d) { return viz.x(getDate_(level, d)) + 1; })
.attr("y", function(d) { return viz.y(d[subgroup]) - 1; })
.attr("height", function(d) { return viz.height - viz.y(d[subgroup]) + 1; });
// add in some tool tips
viz.barsForTooltips.selectAll("rect").each(
function(d){
$(this).tooltip('destroy'); // need to destroy so all bars get updated
$(this).tooltip({title: formatNumber_(d[subgroup]) + " in " + getFormattedDate_(level, d), container: "body"});
}
);
};
}
| {
vizDiv = d3.select("#alm");
} | conditional_block |
provider.go | package azurerm
import (
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"log"
"reflect"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/arm/resources/resources"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/mutexkv"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
riviera "github.com/jen20/riviera/azure"
)
// Provider returns a terraform.ResourceProvider.
func Provider() terraform.ResourceProvider {
var p *schema.Provider
p = &schema.Provider{
Schema: map[string]*schema.Schema{
"subscription_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
},
"client_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
},
"client_secret": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
},
"tenant_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
},
"environment": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
},
"skip_provider_registration": {
Type: schema.TypeBool,
Optional: true, | DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false),
},
},
DataSourcesMap: map[string]*schema.Resource{
"azurerm_client_config": dataSourceArmClientConfig(),
},
ResourcesMap: map[string]*schema.Resource{
// These resources use the Azure ARM SDK
"azurerm_availability_set": resourceArmAvailabilitySet(),
"azurerm_cdn_endpoint": resourceArmCdnEndpoint(),
"azurerm_cdn_profile": resourceArmCdnProfile(),
"azurerm_container_registry": resourceArmContainerRegistry(),
"azurerm_container_service": resourceArmContainerService(),
"azurerm_eventhub": resourceArmEventHub(),
"azurerm_eventhub_authorization_rule": resourceArmEventHubAuthorizationRule(),
"azurerm_eventhub_consumer_group": resourceArmEventHubConsumerGroup(),
"azurerm_eventhub_namespace": resourceArmEventHubNamespace(),
"azurerm_lb": resourceArmLoadBalancer(),
"azurerm_lb_backend_address_pool": resourceArmLoadBalancerBackendAddressPool(),
"azurerm_lb_nat_rule": resourceArmLoadBalancerNatRule(),
"azurerm_lb_nat_pool": resourceArmLoadBalancerNatPool(),
"azurerm_lb_probe": resourceArmLoadBalancerProbe(),
"azurerm_lb_rule": resourceArmLoadBalancerRule(),
"azurerm_managed_disk": resourceArmManagedDisk(),
"azurerm_key_vault": resourceArmKeyVault(),
"azurerm_local_network_gateway": resourceArmLocalNetworkGateway(),
"azurerm_network_interface": resourceArmNetworkInterface(),
"azurerm_network_security_group": resourceArmNetworkSecurityGroup(),
"azurerm_network_security_rule": resourceArmNetworkSecurityRule(),
"azurerm_public_ip": resourceArmPublicIp(),
"azurerm_redis_cache": resourceArmRedisCache(),
"azurerm_route": resourceArmRoute(),
"azurerm_route_table": resourceArmRouteTable(),
"azurerm_servicebus_namespace": resourceArmServiceBusNamespace(),
"azurerm_servicebus_subscription": resourceArmServiceBusSubscription(),
"azurerm_servicebus_topic": resourceArmServiceBusTopic(),
"azurerm_storage_account": resourceArmStorageAccount(),
"azurerm_storage_blob": resourceArmStorageBlob(),
"azurerm_storage_container": resourceArmStorageContainer(),
"azurerm_storage_share": resourceArmStorageShare(),
"azurerm_storage_queue": resourceArmStorageQueue(),
"azurerm_storage_table": resourceArmStorageTable(),
"azurerm_subnet": resourceArmSubnet(),
"azurerm_template_deployment": resourceArmTemplateDeployment(),
"azurerm_traffic_manager_endpoint": resourceArmTrafficManagerEndpoint(),
"azurerm_traffic_manager_profile": resourceArmTrafficManagerProfile(),
"azurerm_virtual_machine_extension": resourceArmVirtualMachineExtensions(),
"azurerm_virtual_machine": resourceArmVirtualMachine(),
"azurerm_virtual_machine_scale_set": resourceArmVirtualMachineScaleSet(),
"azurerm_virtual_network": resourceArmVirtualNetwork(),
"azurerm_virtual_network_peering": resourceArmVirtualNetworkPeering(),
// These resources use the Riviera SDK
"azurerm_dns_a_record": resourceArmDnsARecord(),
"azurerm_dns_aaaa_record": resourceArmDnsAAAARecord(),
"azurerm_dns_cname_record": resourceArmDnsCNameRecord(),
"azurerm_dns_mx_record": resourceArmDnsMxRecord(),
"azurerm_dns_ns_record": resourceArmDnsNsRecord(),
"azurerm_dns_srv_record": resourceArmDnsSrvRecord(),
"azurerm_dns_txt_record": resourceArmDnsTxtRecord(),
"azurerm_dns_zone": resourceArmDnsZone(),
"azurerm_resource_group": resourceArmResourceGroup(),
"azurerm_search_service": resourceArmSearchService(),
"azurerm_sql_database": resourceArmSqlDatabase(),
"azurerm_sql_firewall_rule": resourceArmSqlFirewallRule(),
"azurerm_sql_server": resourceArmSqlServer(),
},
}
p.ConfigureFunc = providerConfigure(p)
return p
}
// Config is the configuration structure used to instantiate a
// new Azure management client.
type Config struct {
ManagementURL string
SubscriptionID string
ClientID string
ClientSecret string
TenantID string
Environment string
SkipProviderRegistration bool
validateCredentialsOnce sync.Once
}
func (c *Config) validate() error {
var err *multierror.Error
if c.SubscriptionID == "" {
err = multierror.Append(err, fmt.Errorf("Subscription ID must be configured for the AzureRM provider"))
}
if c.ClientID == "" {
err = multierror.Append(err, fmt.Errorf("Client ID must be configured for the AzureRM provider"))
}
if c.ClientSecret == "" {
err = multierror.Append(err, fmt.Errorf("Client Secret must be configured for the AzureRM provider"))
}
if c.TenantID == "" {
err = multierror.Append(err, fmt.Errorf("Tenant ID must be configured for the AzureRM provider"))
}
if c.Environment == "" {
err = multierror.Append(err, fmt.Errorf("Environment must be configured for the AzureRM provider"))
}
return err.ErrorOrNil()
}
func providerConfigure(p *schema.Provider) schema.ConfigureFunc {
return func(d *schema.ResourceData) (interface{}, error) {
config := &Config{
SubscriptionID: d.Get("subscription_id").(string),
ClientID: d.Get("client_id").(string),
ClientSecret: d.Get("client_secret").(string),
TenantID: d.Get("tenant_id").(string),
Environment: d.Get("environment").(string),
SkipProviderRegistration: d.Get("skip_provider_registration").(bool),
}
if err := config.validate(); err != nil {
return nil, err
}
client, err := config.getArmClient()
if err != nil {
return nil, err
}
client.StopContext = p.StopContext()
// replaces the context between tests
p.MetaReset = func() error {
client.StopContext = p.StopContext()
return nil
}
// List all the available providers and their registration state to avoid unnecessary
// requests. This also lets us check if the provider credentials are correct.
providerList, err := client.providers.List(nil, "")
if err != nil {
return nil, fmt.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+
"credentials or the service principal does not have permission to use the Resource Manager API, Azure "+
"error: %s", err)
}
if !config.SkipProviderRegistration {
err = registerAzureResourceProvidersWithSubscription(*providerList.Value, client.providers)
if err != nil {
return nil, err
}
}
return client, nil
}
}
func registerProviderWithSubscription(providerName string, client resources.ProvidersClient) error {
_, err := client.Register(providerName)
if err != nil {
return fmt.Errorf("Cannot register provider %s with Azure Resource Manager: %s.", providerName, err)
}
return nil
}
var providerRegistrationOnce sync.Once
// registerAzureResourceProvidersWithSubscription uses the providers client to register
// all Azure resource providers which the Terraform provider may require (regardless of
// whether they are actually used by the configuration or not). It was confirmed by Microsoft
// that this is the approach their own internal tools also take.
func registerAzureResourceProvidersWithSubscription(providerList []resources.Provider, client resources.ProvidersClient) error {
var err error
providerRegistrationOnce.Do(func() {
providers := map[string]struct{}{
"Microsoft.Compute": struct{}{},
"Microsoft.Cache": struct{}{},
"Microsoft.ContainerRegistry": struct{}{},
"Microsoft.ContainerService": struct{}{},
"Microsoft.Network": struct{}{},
"Microsoft.Cdn": struct{}{},
"Microsoft.Storage": struct{}{},
"Microsoft.Sql": struct{}{},
"Microsoft.Search": struct{}{},
"Microsoft.Resources": struct{}{},
"Microsoft.ServiceBus": struct{}{},
"Microsoft.KeyVault": struct{}{},
"Microsoft.EventHub": struct{}{},
}
// filter out any providers already registered
for _, p := range providerList {
if _, ok := providers[*p.Namespace]; !ok {
continue
}
if strings.ToLower(*p.RegistrationState) == "registered" {
log.Printf("[DEBUG] Skipping provider registration for namespace %s\n", *p.Namespace)
delete(providers, *p.Namespace)
}
}
var wg sync.WaitGroup
wg.Add(len(providers))
for providerName := range providers {
go func(p string) {
defer wg.Done()
log.Printf("[DEBUG] Registering provider with namespace %s\n", p)
if innerErr := registerProviderWithSubscription(p, client); err != nil {
err = innerErr
}
}(providerName)
}
wg.Wait()
})
return err
}
// armMutexKV is the instance of MutexKV for ARM resources
var armMutexKV = mutexkv.NewMutexKV()
func azureStateRefreshFunc(resourceURI string, client *ArmClient, command riviera.APICall) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
req := client.rivieraClient.NewRequestForURI(resourceURI)
req.Command = command
res, err := req.Execute()
if err != nil {
return nil, "", fmt.Errorf("Error executing %T command in azureStateRefreshFunc", req.Command)
}
var value reflect.Value
if reflect.ValueOf(res.Parsed).Kind() == reflect.Ptr {
value = reflect.ValueOf(res.Parsed).Elem()
} else {
value = reflect.ValueOf(res.Parsed)
}
for i := 0; i < value.NumField(); i++ { // iterates through every struct type field
tag := value.Type().Field(i).Tag // returns the tag string
tagValue := tag.Get("mapstructure")
if tagValue == "provisioningState" {
return res.Parsed, value.Field(i).Elem().String(), nil
}
}
panic(fmt.Errorf("azureStateRefreshFunc called on structure %T with no mapstructure:provisioningState tag. This is a bug", res.Parsed))
}
}
// Resource group names can be capitalised, but we store them in lowercase.
// Use a custom diff function to avoid creation of new resources.
func resourceAzurermResourceGroupNameDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
return strings.ToLower(old) == strings.ToLower(new)
}
// ignoreCaseDiffSuppressFunc is a DiffSuppressFunc from helper/schema that is
// used to ignore any case-changes in a return value.
func ignoreCaseDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool {
return strings.ToLower(old) == strings.ToLower(new)
}
// ignoreCaseStateFunc is a StateFunc from helper/schema that converts the
// supplied value to lower before saving to state for consistency.
func ignoreCaseStateFunc(val interface{}) string {
return strings.ToLower(val.(string))
}
func userDataStateFunc(v interface{}) string {
switch s := v.(type) {
case string:
s = base64Encode(s)
hash := sha1.Sum([]byte(s))
return hex.EncodeToString(hash[:])
default:
return ""
}
}
// Base64Encode encodes data if the input isn't already encoded using
// base64.StdEncoding.EncodeToString. If the input is already base64 encoded,
// return the original input unchanged.
func base64Encode(data string) string {
// Check whether the data is already Base64 encoded; don't double-encode
if isBase64Encoded(data) {
return data
}
// data has not been encoded encode and return
return base64.StdEncoding.EncodeToString([]byte(data))
}
func isBase64Encoded(data string) bool {
_, err := base64.StdEncoding.DecodeString(data)
return err == nil
} | random_line_split | |
provider.go | package azurerm
import (
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"log"
"reflect"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/arm/resources/resources"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/mutexkv"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
riviera "github.com/jen20/riviera/azure"
)
// Provider returns a terraform.ResourceProvider.
func Provider() terraform.ResourceProvider {
var p *schema.Provider
p = &schema.Provider{
Schema: map[string]*schema.Schema{
"subscription_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
},
"client_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
},
"client_secret": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
},
"tenant_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
},
"environment": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
},
"skip_provider_registration": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false),
},
},
DataSourcesMap: map[string]*schema.Resource{
"azurerm_client_config": dataSourceArmClientConfig(),
},
ResourcesMap: map[string]*schema.Resource{
// These resources use the Azure ARM SDK
"azurerm_availability_set": resourceArmAvailabilitySet(),
"azurerm_cdn_endpoint": resourceArmCdnEndpoint(),
"azurerm_cdn_profile": resourceArmCdnProfile(),
"azurerm_container_registry": resourceArmContainerRegistry(),
"azurerm_container_service": resourceArmContainerService(),
"azurerm_eventhub": resourceArmEventHub(),
"azurerm_eventhub_authorization_rule": resourceArmEventHubAuthorizationRule(),
"azurerm_eventhub_consumer_group": resourceArmEventHubConsumerGroup(),
"azurerm_eventhub_namespace": resourceArmEventHubNamespace(),
"azurerm_lb": resourceArmLoadBalancer(),
"azurerm_lb_backend_address_pool": resourceArmLoadBalancerBackendAddressPool(),
"azurerm_lb_nat_rule": resourceArmLoadBalancerNatRule(),
"azurerm_lb_nat_pool": resourceArmLoadBalancerNatPool(),
"azurerm_lb_probe": resourceArmLoadBalancerProbe(),
"azurerm_lb_rule": resourceArmLoadBalancerRule(),
"azurerm_managed_disk": resourceArmManagedDisk(),
"azurerm_key_vault": resourceArmKeyVault(),
"azurerm_local_network_gateway": resourceArmLocalNetworkGateway(),
"azurerm_network_interface": resourceArmNetworkInterface(),
"azurerm_network_security_group": resourceArmNetworkSecurityGroup(),
"azurerm_network_security_rule": resourceArmNetworkSecurityRule(),
"azurerm_public_ip": resourceArmPublicIp(),
"azurerm_redis_cache": resourceArmRedisCache(),
"azurerm_route": resourceArmRoute(),
"azurerm_route_table": resourceArmRouteTable(),
"azurerm_servicebus_namespace": resourceArmServiceBusNamespace(),
"azurerm_servicebus_subscription": resourceArmServiceBusSubscription(),
"azurerm_servicebus_topic": resourceArmServiceBusTopic(),
"azurerm_storage_account": resourceArmStorageAccount(),
"azurerm_storage_blob": resourceArmStorageBlob(),
"azurerm_storage_container": resourceArmStorageContainer(),
"azurerm_storage_share": resourceArmStorageShare(),
"azurerm_storage_queue": resourceArmStorageQueue(),
"azurerm_storage_table": resourceArmStorageTable(),
"azurerm_subnet": resourceArmSubnet(),
"azurerm_template_deployment": resourceArmTemplateDeployment(),
"azurerm_traffic_manager_endpoint": resourceArmTrafficManagerEndpoint(),
"azurerm_traffic_manager_profile": resourceArmTrafficManagerProfile(),
"azurerm_virtual_machine_extension": resourceArmVirtualMachineExtensions(),
"azurerm_virtual_machine": resourceArmVirtualMachine(),
"azurerm_virtual_machine_scale_set": resourceArmVirtualMachineScaleSet(),
"azurerm_virtual_network": resourceArmVirtualNetwork(),
"azurerm_virtual_network_peering": resourceArmVirtualNetworkPeering(),
// These resources use the Riviera SDK
"azurerm_dns_a_record": resourceArmDnsARecord(),
"azurerm_dns_aaaa_record": resourceArmDnsAAAARecord(),
"azurerm_dns_cname_record": resourceArmDnsCNameRecord(),
"azurerm_dns_mx_record": resourceArmDnsMxRecord(),
"azurerm_dns_ns_record": resourceArmDnsNsRecord(),
"azurerm_dns_srv_record": resourceArmDnsSrvRecord(),
"azurerm_dns_txt_record": resourceArmDnsTxtRecord(),
"azurerm_dns_zone": resourceArmDnsZone(),
"azurerm_resource_group": resourceArmResourceGroup(),
"azurerm_search_service": resourceArmSearchService(),
"azurerm_sql_database": resourceArmSqlDatabase(),
"azurerm_sql_firewall_rule": resourceArmSqlFirewallRule(),
"azurerm_sql_server": resourceArmSqlServer(),
},
}
p.ConfigureFunc = providerConfigure(p)
return p
}
// Config is the configuration structure used to instantiate a
// new Azure management client.
type Config struct {
ManagementURL string
SubscriptionID string
ClientID string
ClientSecret string
TenantID string
Environment string
SkipProviderRegistration bool
validateCredentialsOnce sync.Once
}
func (c *Config) validate() error {
var err *multierror.Error
if c.SubscriptionID == "" {
err = multierror.Append(err, fmt.Errorf("Subscription ID must be configured for the AzureRM provider"))
}
if c.ClientID == "" {
err = multierror.Append(err, fmt.Errorf("Client ID must be configured for the AzureRM provider"))
}
if c.ClientSecret == "" {
err = multierror.Append(err, fmt.Errorf("Client Secret must be configured for the AzureRM provider"))
}
if c.TenantID == "" {
err = multierror.Append(err, fmt.Errorf("Tenant ID must be configured for the AzureRM provider"))
}
if c.Environment == "" {
err = multierror.Append(err, fmt.Errorf("Environment must be configured for the AzureRM provider"))
}
return err.ErrorOrNil()
}
func providerConfigure(p *schema.Provider) schema.ConfigureFunc {
return func(d *schema.ResourceData) (interface{}, error) {
config := &Config{
SubscriptionID: d.Get("subscription_id").(string),
ClientID: d.Get("client_id").(string),
ClientSecret: d.Get("client_secret").(string),
TenantID: d.Get("tenant_id").(string),
Environment: d.Get("environment").(string),
SkipProviderRegistration: d.Get("skip_provider_registration").(bool),
}
if err := config.validate(); err != nil {
return nil, err
}
client, err := config.getArmClient()
if err != nil {
return nil, err
}
client.StopContext = p.StopContext()
// replaces the context between tests
p.MetaReset = func() error {
client.StopContext = p.StopContext()
return nil
}
// List all the available providers and their registration state to avoid unnecessary
// requests. This also lets us check if the provider credentials are correct.
providerList, err := client.providers.List(nil, "")
if err != nil {
return nil, fmt.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+
"credentials or the service principal does not have permission to use the Resource Manager API, Azure "+
"error: %s", err)
}
if !config.SkipProviderRegistration {
err = registerAzureResourceProvidersWithSubscription(*providerList.Value, client.providers)
if err != nil {
return nil, err
}
}
return client, nil
}
}
func registerProviderWithSubscription(providerName string, client resources.ProvidersClient) error {
_, err := client.Register(providerName)
if err != nil {
return fmt.Errorf("Cannot register provider %s with Azure Resource Manager: %s.", providerName, err)
}
return nil
}
var providerRegistrationOnce sync.Once
// registerAzureResourceProvidersWithSubscription uses the providers client to register
// all Azure resource providers which the Terraform provider may require (regardless of
// whether they are actually used by the configuration or not). It was confirmed by Microsoft
// that this is the approach their own internal tools also take.
func registerAzureResourceProvidersWithSubscription(providerList []resources.Provider, client resources.ProvidersClient) error {
var err error
providerRegistrationOnce.Do(func() {
providers := map[string]struct{}{
"Microsoft.Compute": struct{}{},
"Microsoft.Cache": struct{}{},
"Microsoft.ContainerRegistry": struct{}{},
"Microsoft.ContainerService": struct{}{},
"Microsoft.Network": struct{}{},
"Microsoft.Cdn": struct{}{},
"Microsoft.Storage": struct{}{},
"Microsoft.Sql": struct{}{},
"Microsoft.Search": struct{}{},
"Microsoft.Resources": struct{}{},
"Microsoft.ServiceBus": struct{}{},
"Microsoft.KeyVault": struct{}{},
"Microsoft.EventHub": struct{}{},
}
// filter out any providers already registered
for _, p := range providerList {
if _, ok := providers[*p.Namespace]; !ok {
continue
}
if strings.ToLower(*p.RegistrationState) == "registered" {
log.Printf("[DEBUG] Skipping provider registration for namespace %s\n", *p.Namespace)
delete(providers, *p.Namespace)
}
}
var wg sync.WaitGroup
wg.Add(len(providers))
for providerName := range providers {
go func(p string) {
defer wg.Done()
log.Printf("[DEBUG] Registering provider with namespace %s\n", p)
if innerErr := registerProviderWithSubscription(p, client); err != nil {
err = innerErr
}
}(providerName)
}
wg.Wait()
})
return err
}
// armMutexKV is the instance of MutexKV for ARM resources
var armMutexKV = mutexkv.NewMutexKV()
func azureStateRefreshFunc(resourceURI string, client *ArmClient, command riviera.APICall) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
req := client.rivieraClient.NewRequestForURI(resourceURI)
req.Command = command
res, err := req.Execute()
if err != nil {
return nil, "", fmt.Errorf("Error executing %T command in azureStateRefreshFunc", req.Command)
}
var value reflect.Value
if reflect.ValueOf(res.Parsed).Kind() == reflect.Ptr {
value = reflect.ValueOf(res.Parsed).Elem()
} else {
value = reflect.ValueOf(res.Parsed)
}
for i := 0; i < value.NumField(); i++ { // iterates through every struct type field
tag := value.Type().Field(i).Tag // returns the tag string
tagValue := tag.Get("mapstructure")
if tagValue == "provisioningState" {
return res.Parsed, value.Field(i).Elem().String(), nil
}
}
panic(fmt.Errorf("azureStateRefreshFunc called on structure %T with no mapstructure:provisioningState tag. This is a bug", res.Parsed))
}
}
// Resource group names can be capitalised, but we store them in lowercase.
// Use a custom diff function to avoid creation of new resources.
func resourceAzurermResourceGroupNameDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
return strings.ToLower(old) == strings.ToLower(new)
}
// ignoreCaseDiffSuppressFunc is a DiffSuppressFunc from helper/schema that is
// used to ignore any case-changes in a return value.
func ignoreCaseDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool {
return strings.ToLower(old) == strings.ToLower(new)
}
// ignoreCaseStateFunc is a StateFunc from helper/schema that converts the
// supplied value to lower before saving to state for consistency.
func ignoreCaseStateFunc(val interface{}) string {
return strings.ToLower(val.(string))
}
func userDataStateFunc(v interface{}) string |
// Base64Encode encodes data if the input isn't already encoded using
// base64.StdEncoding.EncodeToString. If the input is already base64 encoded,
// return the original input unchanged.
func base64Encode(data string) string {
// Check whether the data is already Base64 encoded; don't double-encode
if isBase64Encoded(data) {
return data
}
// data has not been encoded encode and return
return base64.StdEncoding.EncodeToString([]byte(data))
}
func isBase64Encoded(data string) bool {
_, err := base64.StdEncoding.DecodeString(data)
return err == nil
}
| {
switch s := v.(type) {
case string:
s = base64Encode(s)
hash := sha1.Sum([]byte(s))
return hex.EncodeToString(hash[:])
default:
return ""
}
} | identifier_body |
provider.go | package azurerm
import (
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"log"
"reflect"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/arm/resources/resources"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/mutexkv"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
riviera "github.com/jen20/riviera/azure"
)
// Provider returns a terraform.ResourceProvider.
func Provider() terraform.ResourceProvider {
var p *schema.Provider
p = &schema.Provider{
Schema: map[string]*schema.Schema{
"subscription_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
},
"client_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
},
"client_secret": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
},
"tenant_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
},
"environment": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
},
"skip_provider_registration": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false),
},
},
DataSourcesMap: map[string]*schema.Resource{
"azurerm_client_config": dataSourceArmClientConfig(),
},
ResourcesMap: map[string]*schema.Resource{
// These resources use the Azure ARM SDK
"azurerm_availability_set": resourceArmAvailabilitySet(),
"azurerm_cdn_endpoint": resourceArmCdnEndpoint(),
"azurerm_cdn_profile": resourceArmCdnProfile(),
"azurerm_container_registry": resourceArmContainerRegistry(),
"azurerm_container_service": resourceArmContainerService(),
"azurerm_eventhub": resourceArmEventHub(),
"azurerm_eventhub_authorization_rule": resourceArmEventHubAuthorizationRule(),
"azurerm_eventhub_consumer_group": resourceArmEventHubConsumerGroup(),
"azurerm_eventhub_namespace": resourceArmEventHubNamespace(),
"azurerm_lb": resourceArmLoadBalancer(),
"azurerm_lb_backend_address_pool": resourceArmLoadBalancerBackendAddressPool(),
"azurerm_lb_nat_rule": resourceArmLoadBalancerNatRule(),
"azurerm_lb_nat_pool": resourceArmLoadBalancerNatPool(),
"azurerm_lb_probe": resourceArmLoadBalancerProbe(),
"azurerm_lb_rule": resourceArmLoadBalancerRule(),
"azurerm_managed_disk": resourceArmManagedDisk(),
"azurerm_key_vault": resourceArmKeyVault(),
"azurerm_local_network_gateway": resourceArmLocalNetworkGateway(),
"azurerm_network_interface": resourceArmNetworkInterface(),
"azurerm_network_security_group": resourceArmNetworkSecurityGroup(),
"azurerm_network_security_rule": resourceArmNetworkSecurityRule(),
"azurerm_public_ip": resourceArmPublicIp(),
"azurerm_redis_cache": resourceArmRedisCache(),
"azurerm_route": resourceArmRoute(),
"azurerm_route_table": resourceArmRouteTable(),
"azurerm_servicebus_namespace": resourceArmServiceBusNamespace(),
"azurerm_servicebus_subscription": resourceArmServiceBusSubscription(),
"azurerm_servicebus_topic": resourceArmServiceBusTopic(),
"azurerm_storage_account": resourceArmStorageAccount(),
"azurerm_storage_blob": resourceArmStorageBlob(),
"azurerm_storage_container": resourceArmStorageContainer(),
"azurerm_storage_share": resourceArmStorageShare(),
"azurerm_storage_queue": resourceArmStorageQueue(),
"azurerm_storage_table": resourceArmStorageTable(),
"azurerm_subnet": resourceArmSubnet(),
"azurerm_template_deployment": resourceArmTemplateDeployment(),
"azurerm_traffic_manager_endpoint": resourceArmTrafficManagerEndpoint(),
"azurerm_traffic_manager_profile": resourceArmTrafficManagerProfile(),
"azurerm_virtual_machine_extension": resourceArmVirtualMachineExtensions(),
"azurerm_virtual_machine": resourceArmVirtualMachine(),
"azurerm_virtual_machine_scale_set": resourceArmVirtualMachineScaleSet(),
"azurerm_virtual_network": resourceArmVirtualNetwork(),
"azurerm_virtual_network_peering": resourceArmVirtualNetworkPeering(),
// These resources use the Riviera SDK
"azurerm_dns_a_record": resourceArmDnsARecord(),
"azurerm_dns_aaaa_record": resourceArmDnsAAAARecord(),
"azurerm_dns_cname_record": resourceArmDnsCNameRecord(),
"azurerm_dns_mx_record": resourceArmDnsMxRecord(),
"azurerm_dns_ns_record": resourceArmDnsNsRecord(),
"azurerm_dns_srv_record": resourceArmDnsSrvRecord(),
"azurerm_dns_txt_record": resourceArmDnsTxtRecord(),
"azurerm_dns_zone": resourceArmDnsZone(),
"azurerm_resource_group": resourceArmResourceGroup(),
"azurerm_search_service": resourceArmSearchService(),
"azurerm_sql_database": resourceArmSqlDatabase(),
"azurerm_sql_firewall_rule": resourceArmSqlFirewallRule(),
"azurerm_sql_server": resourceArmSqlServer(),
},
}
p.ConfigureFunc = providerConfigure(p)
return p
}
// Config is the configuration structure used to instantiate a
// new Azure management client.
type Config struct {
ManagementURL string
SubscriptionID string
ClientID string
ClientSecret string
TenantID string
Environment string
SkipProviderRegistration bool
validateCredentialsOnce sync.Once
}
func (c *Config) validate() error {
var err *multierror.Error
if c.SubscriptionID == "" {
err = multierror.Append(err, fmt.Errorf("Subscription ID must be configured for the AzureRM provider"))
}
if c.ClientID == "" {
err = multierror.Append(err, fmt.Errorf("Client ID must be configured for the AzureRM provider"))
}
if c.ClientSecret == "" {
err = multierror.Append(err, fmt.Errorf("Client Secret must be configured for the AzureRM provider"))
}
if c.TenantID == "" {
err = multierror.Append(err, fmt.Errorf("Tenant ID must be configured for the AzureRM provider"))
}
if c.Environment == "" {
err = multierror.Append(err, fmt.Errorf("Environment must be configured for the AzureRM provider"))
}
return err.ErrorOrNil()
}
func providerConfigure(p *schema.Provider) schema.ConfigureFunc {
return func(d *schema.ResourceData) (interface{}, error) {
config := &Config{
SubscriptionID: d.Get("subscription_id").(string),
ClientID: d.Get("client_id").(string),
ClientSecret: d.Get("client_secret").(string),
TenantID: d.Get("tenant_id").(string),
Environment: d.Get("environment").(string),
SkipProviderRegistration: d.Get("skip_provider_registration").(bool),
}
if err := config.validate(); err != nil {
return nil, err
}
client, err := config.getArmClient()
if err != nil {
return nil, err
}
client.StopContext = p.StopContext()
// replaces the context between tests
p.MetaReset = func() error {
client.StopContext = p.StopContext()
return nil
}
// List all the available providers and their registration state to avoid unnecessary
// requests. This also lets us check if the provider credentials are correct.
providerList, err := client.providers.List(nil, "")
if err != nil {
return nil, fmt.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+
"credentials or the service principal does not have permission to use the Resource Manager API, Azure "+
"error: %s", err)
}
if !config.SkipProviderRegistration {
err = registerAzureResourceProvidersWithSubscription(*providerList.Value, client.providers)
if err != nil {
return nil, err
}
}
return client, nil
}
}
func registerProviderWithSubscription(providerName string, client resources.ProvidersClient) error {
_, err := client.Register(providerName)
if err != nil {
return fmt.Errorf("Cannot register provider %s with Azure Resource Manager: %s.", providerName, err)
}
return nil
}
var providerRegistrationOnce sync.Once
// registerAzureResourceProvidersWithSubscription uses the providers client to register
// all Azure resource providers which the Terraform provider may require (regardless of
// whether they are actually used by the configuration or not). It was confirmed by Microsoft
// that this is the approach their own internal tools also take.
func | (providerList []resources.Provider, client resources.ProvidersClient) error {
var err error
providerRegistrationOnce.Do(func() {
providers := map[string]struct{}{
"Microsoft.Compute": struct{}{},
"Microsoft.Cache": struct{}{},
"Microsoft.ContainerRegistry": struct{}{},
"Microsoft.ContainerService": struct{}{},
"Microsoft.Network": struct{}{},
"Microsoft.Cdn": struct{}{},
"Microsoft.Storage": struct{}{},
"Microsoft.Sql": struct{}{},
"Microsoft.Search": struct{}{},
"Microsoft.Resources": struct{}{},
"Microsoft.ServiceBus": struct{}{},
"Microsoft.KeyVault": struct{}{},
"Microsoft.EventHub": struct{}{},
}
// filter out any providers already registered
for _, p := range providerList {
if _, ok := providers[*p.Namespace]; !ok {
continue
}
if strings.ToLower(*p.RegistrationState) == "registered" {
log.Printf("[DEBUG] Skipping provider registration for namespace %s\n", *p.Namespace)
delete(providers, *p.Namespace)
}
}
var wg sync.WaitGroup
wg.Add(len(providers))
for providerName := range providers {
go func(p string) {
defer wg.Done()
log.Printf("[DEBUG] Registering provider with namespace %s\n", p)
if innerErr := registerProviderWithSubscription(p, client); err != nil {
err = innerErr
}
}(providerName)
}
wg.Wait()
})
return err
}
// armMutexKV is the instance of MutexKV for ARM resources
var armMutexKV = mutexkv.NewMutexKV()
func azureStateRefreshFunc(resourceURI string, client *ArmClient, command riviera.APICall) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
req := client.rivieraClient.NewRequestForURI(resourceURI)
req.Command = command
res, err := req.Execute()
if err != nil {
return nil, "", fmt.Errorf("Error executing %T command in azureStateRefreshFunc", req.Command)
}
var value reflect.Value
if reflect.ValueOf(res.Parsed).Kind() == reflect.Ptr {
value = reflect.ValueOf(res.Parsed).Elem()
} else {
value = reflect.ValueOf(res.Parsed)
}
for i := 0; i < value.NumField(); i++ { // iterates through every struct type field
tag := value.Type().Field(i).Tag // returns the tag string
tagValue := tag.Get("mapstructure")
if tagValue == "provisioningState" {
return res.Parsed, value.Field(i).Elem().String(), nil
}
}
panic(fmt.Errorf("azureStateRefreshFunc called on structure %T with no mapstructure:provisioningState tag. This is a bug", res.Parsed))
}
}
// Resource group names can be capitalised, but we store them in lowercase.
// Use a custom diff function to avoid creation of new resources.
func resourceAzurermResourceGroupNameDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
return strings.ToLower(old) == strings.ToLower(new)
}
// ignoreCaseDiffSuppressFunc is a DiffSuppressFunc from helper/schema that is
// used to ignore any case-changes in a return value.
func ignoreCaseDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool {
return strings.ToLower(old) == strings.ToLower(new)
}
// ignoreCaseStateFunc is a StateFunc from helper/schema that converts the
// supplied value to lower before saving to state for consistency.
func ignoreCaseStateFunc(val interface{}) string {
return strings.ToLower(val.(string))
}
func userDataStateFunc(v interface{}) string {
switch s := v.(type) {
case string:
s = base64Encode(s)
hash := sha1.Sum([]byte(s))
return hex.EncodeToString(hash[:])
default:
return ""
}
}
// Base64Encode encodes data if the input isn't already encoded using
// base64.StdEncoding.EncodeToString. If the input is already base64 encoded,
// return the original input unchanged.
func base64Encode(data string) string {
// Check whether the data is already Base64 encoded; don't double-encode
if isBase64Encoded(data) {
return data
}
// data has not been encoded encode and return
return base64.StdEncoding.EncodeToString([]byte(data))
}
func isBase64Encoded(data string) bool {
_, err := base64.StdEncoding.DecodeString(data)
return err == nil
}
| registerAzureResourceProvidersWithSubscription | identifier_name |
provider.go | package azurerm
import (
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"log"
"reflect"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/arm/resources/resources"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/mutexkv"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
riviera "github.com/jen20/riviera/azure"
)
// Provider returns a terraform.ResourceProvider.
func Provider() terraform.ResourceProvider {
var p *schema.Provider
p = &schema.Provider{
Schema: map[string]*schema.Schema{
"subscription_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
},
"client_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
},
"client_secret": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
},
"tenant_id": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
},
"environment": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
},
"skip_provider_registration": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false),
},
},
DataSourcesMap: map[string]*schema.Resource{
"azurerm_client_config": dataSourceArmClientConfig(),
},
ResourcesMap: map[string]*schema.Resource{
// These resources use the Azure ARM SDK
"azurerm_availability_set": resourceArmAvailabilitySet(),
"azurerm_cdn_endpoint": resourceArmCdnEndpoint(),
"azurerm_cdn_profile": resourceArmCdnProfile(),
"azurerm_container_registry": resourceArmContainerRegistry(),
"azurerm_container_service": resourceArmContainerService(),
"azurerm_eventhub": resourceArmEventHub(),
"azurerm_eventhub_authorization_rule": resourceArmEventHubAuthorizationRule(),
"azurerm_eventhub_consumer_group": resourceArmEventHubConsumerGroup(),
"azurerm_eventhub_namespace": resourceArmEventHubNamespace(),
"azurerm_lb": resourceArmLoadBalancer(),
"azurerm_lb_backend_address_pool": resourceArmLoadBalancerBackendAddressPool(),
"azurerm_lb_nat_rule": resourceArmLoadBalancerNatRule(),
"azurerm_lb_nat_pool": resourceArmLoadBalancerNatPool(),
"azurerm_lb_probe": resourceArmLoadBalancerProbe(),
"azurerm_lb_rule": resourceArmLoadBalancerRule(),
"azurerm_managed_disk": resourceArmManagedDisk(),
"azurerm_key_vault": resourceArmKeyVault(),
"azurerm_local_network_gateway": resourceArmLocalNetworkGateway(),
"azurerm_network_interface": resourceArmNetworkInterface(),
"azurerm_network_security_group": resourceArmNetworkSecurityGroup(),
"azurerm_network_security_rule": resourceArmNetworkSecurityRule(),
"azurerm_public_ip": resourceArmPublicIp(),
"azurerm_redis_cache": resourceArmRedisCache(),
"azurerm_route": resourceArmRoute(),
"azurerm_route_table": resourceArmRouteTable(),
"azurerm_servicebus_namespace": resourceArmServiceBusNamespace(),
"azurerm_servicebus_subscription": resourceArmServiceBusSubscription(),
"azurerm_servicebus_topic": resourceArmServiceBusTopic(),
"azurerm_storage_account": resourceArmStorageAccount(),
"azurerm_storage_blob": resourceArmStorageBlob(),
"azurerm_storage_container": resourceArmStorageContainer(),
"azurerm_storage_share": resourceArmStorageShare(),
"azurerm_storage_queue": resourceArmStorageQueue(),
"azurerm_storage_table": resourceArmStorageTable(),
"azurerm_subnet": resourceArmSubnet(),
"azurerm_template_deployment": resourceArmTemplateDeployment(),
"azurerm_traffic_manager_endpoint": resourceArmTrafficManagerEndpoint(),
"azurerm_traffic_manager_profile": resourceArmTrafficManagerProfile(),
"azurerm_virtual_machine_extension": resourceArmVirtualMachineExtensions(),
"azurerm_virtual_machine": resourceArmVirtualMachine(),
"azurerm_virtual_machine_scale_set": resourceArmVirtualMachineScaleSet(),
"azurerm_virtual_network": resourceArmVirtualNetwork(),
"azurerm_virtual_network_peering": resourceArmVirtualNetworkPeering(),
// These resources use the Riviera SDK
"azurerm_dns_a_record": resourceArmDnsARecord(),
"azurerm_dns_aaaa_record": resourceArmDnsAAAARecord(),
"azurerm_dns_cname_record": resourceArmDnsCNameRecord(),
"azurerm_dns_mx_record": resourceArmDnsMxRecord(),
"azurerm_dns_ns_record": resourceArmDnsNsRecord(),
"azurerm_dns_srv_record": resourceArmDnsSrvRecord(),
"azurerm_dns_txt_record": resourceArmDnsTxtRecord(),
"azurerm_dns_zone": resourceArmDnsZone(),
"azurerm_resource_group": resourceArmResourceGroup(),
"azurerm_search_service": resourceArmSearchService(),
"azurerm_sql_database": resourceArmSqlDatabase(),
"azurerm_sql_firewall_rule": resourceArmSqlFirewallRule(),
"azurerm_sql_server": resourceArmSqlServer(),
},
}
p.ConfigureFunc = providerConfigure(p)
return p
}
// Config is the configuration structure used to instantiate a
// new Azure management client.
type Config struct {
ManagementURL string
SubscriptionID string
ClientID string
ClientSecret string
TenantID string
Environment string
SkipProviderRegistration bool
validateCredentialsOnce sync.Once
}
func (c *Config) validate() error {
var err *multierror.Error
if c.SubscriptionID == "" {
err = multierror.Append(err, fmt.Errorf("Subscription ID must be configured for the AzureRM provider"))
}
if c.ClientID == "" {
err = multierror.Append(err, fmt.Errorf("Client ID must be configured for the AzureRM provider"))
}
if c.ClientSecret == "" {
err = multierror.Append(err, fmt.Errorf("Client Secret must be configured for the AzureRM provider"))
}
if c.TenantID == "" {
err = multierror.Append(err, fmt.Errorf("Tenant ID must be configured for the AzureRM provider"))
}
if c.Environment == "" {
err = multierror.Append(err, fmt.Errorf("Environment must be configured for the AzureRM provider"))
}
return err.ErrorOrNil()
}
func providerConfigure(p *schema.Provider) schema.ConfigureFunc {
return func(d *schema.ResourceData) (interface{}, error) {
config := &Config{
SubscriptionID: d.Get("subscription_id").(string),
ClientID: d.Get("client_id").(string),
ClientSecret: d.Get("client_secret").(string),
TenantID: d.Get("tenant_id").(string),
Environment: d.Get("environment").(string),
SkipProviderRegistration: d.Get("skip_provider_registration").(bool),
}
if err := config.validate(); err != nil {
return nil, err
}
client, err := config.getArmClient()
if err != nil |
client.StopContext = p.StopContext()
// replaces the context between tests
p.MetaReset = func() error {
client.StopContext = p.StopContext()
return nil
}
// List all the available providers and their registration state to avoid unnecessary
// requests. This also lets us check if the provider credentials are correct.
providerList, err := client.providers.List(nil, "")
if err != nil {
return nil, fmt.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+
"credentials or the service principal does not have permission to use the Resource Manager API, Azure "+
"error: %s", err)
}
if !config.SkipProviderRegistration {
err = registerAzureResourceProvidersWithSubscription(*providerList.Value, client.providers)
if err != nil {
return nil, err
}
}
return client, nil
}
}
func registerProviderWithSubscription(providerName string, client resources.ProvidersClient) error {
_, err := client.Register(providerName)
if err != nil {
return fmt.Errorf("Cannot register provider %s with Azure Resource Manager: %s.", providerName, err)
}
return nil
}
var providerRegistrationOnce sync.Once
// registerAzureResourceProvidersWithSubscription uses the providers client to register
// all Azure resource providers which the Terraform provider may require (regardless of
// whether they are actually used by the configuration or not). It was confirmed by Microsoft
// that this is the approach their own internal tools also take.
func registerAzureResourceProvidersWithSubscription(providerList []resources.Provider, client resources.ProvidersClient) error {
var err error
providerRegistrationOnce.Do(func() {
providers := map[string]struct{}{
"Microsoft.Compute": struct{}{},
"Microsoft.Cache": struct{}{},
"Microsoft.ContainerRegistry": struct{}{},
"Microsoft.ContainerService": struct{}{},
"Microsoft.Network": struct{}{},
"Microsoft.Cdn": struct{}{},
"Microsoft.Storage": struct{}{},
"Microsoft.Sql": struct{}{},
"Microsoft.Search": struct{}{},
"Microsoft.Resources": struct{}{},
"Microsoft.ServiceBus": struct{}{},
"Microsoft.KeyVault": struct{}{},
"Microsoft.EventHub": struct{}{},
}
// filter out any providers already registered
for _, p := range providerList {
if _, ok := providers[*p.Namespace]; !ok {
continue
}
if strings.ToLower(*p.RegistrationState) == "registered" {
log.Printf("[DEBUG] Skipping provider registration for namespace %s\n", *p.Namespace)
delete(providers, *p.Namespace)
}
}
var wg sync.WaitGroup
wg.Add(len(providers))
for providerName := range providers {
go func(p string) {
defer wg.Done()
log.Printf("[DEBUG] Registering provider with namespace %s\n", p)
if innerErr := registerProviderWithSubscription(p, client); err != nil {
err = innerErr
}
}(providerName)
}
wg.Wait()
})
return err
}
// armMutexKV is the instance of MutexKV for ARM resources
var armMutexKV = mutexkv.NewMutexKV()
func azureStateRefreshFunc(resourceURI string, client *ArmClient, command riviera.APICall) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
req := client.rivieraClient.NewRequestForURI(resourceURI)
req.Command = command
res, err := req.Execute()
if err != nil {
return nil, "", fmt.Errorf("Error executing %T command in azureStateRefreshFunc", req.Command)
}
var value reflect.Value
if reflect.ValueOf(res.Parsed).Kind() == reflect.Ptr {
value = reflect.ValueOf(res.Parsed).Elem()
} else {
value = reflect.ValueOf(res.Parsed)
}
for i := 0; i < value.NumField(); i++ { // iterates through every struct type field
tag := value.Type().Field(i).Tag // returns the tag string
tagValue := tag.Get("mapstructure")
if tagValue == "provisioningState" {
return res.Parsed, value.Field(i).Elem().String(), nil
}
}
panic(fmt.Errorf("azureStateRefreshFunc called on structure %T with no mapstructure:provisioningState tag. This is a bug", res.Parsed))
}
}
// Resource group names can be capitalised, but we store them in lowercase.
// Use a custom diff function to avoid creation of new resources.
func resourceAzurermResourceGroupNameDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
return strings.ToLower(old) == strings.ToLower(new)
}
// ignoreCaseDiffSuppressFunc is a DiffSuppressFunc from helper/schema that is
// used to ignore any case-changes in a return value.
func ignoreCaseDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool {
return strings.ToLower(old) == strings.ToLower(new)
}
// ignoreCaseStateFunc is a StateFunc from helper/schema that converts the
// supplied value to lower before saving to state for consistency.
func ignoreCaseStateFunc(val interface{}) string {
return strings.ToLower(val.(string))
}
func userDataStateFunc(v interface{}) string {
switch s := v.(type) {
case string:
s = base64Encode(s)
hash := sha1.Sum([]byte(s))
return hex.EncodeToString(hash[:])
default:
return ""
}
}
// Base64Encode encodes data if the input isn't already encoded using
// base64.StdEncoding.EncodeToString. If the input is already base64 encoded,
// return the original input unchanged.
func base64Encode(data string) string {
// Check whether the data is already Base64 encoded; don't double-encode
if isBase64Encoded(data) {
return data
}
// data has not been encoded encode and return
return base64.StdEncoding.EncodeToString([]byte(data))
}
func isBase64Encoded(data string) bool {
_, err := base64.StdEncoding.DecodeString(data)
return err == nil
}
| {
return nil, err
} | conditional_block |
saver.go | // Package saver contains all logic for writing records to files.
// 1. Sets up a channel that accepts slices of *netlink.ArchivalRecord
// 2. Maintains a map of Connections, one for each connection.
// 3. Uses several marshallers goroutines to serialize data and and write to
// zstd files.
// 4. Rotates Connection output files every 10 minutes for long lasting connections.
// 5. uses a cache to detect meaningful state changes, and avoid excessive
// writes.
package saver
import (
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"sync"
"sync/atomic"
"time"
"github.com/m-lab/go/anonymize"
"github.com/m-lab/tcp-info/cache"
"github.com/m-lab/tcp-info/eventsocket"
"github.com/m-lab/tcp-info/inetdiag"
"github.com/m-lab/tcp-info/metrics"
"github.com/m-lab/tcp-info/netlink"
"github.com/m-lab/tcp-info/tcp"
"github.com/m-lab/tcp-info/zstd"
"github.com/m-lab/uuid"
)
// This is the maximum switch/network if speed in bits/sec. It is used to check for illogical bit rate observations.
const maxSwitchSpeed = 1e10
// We will send an entire batch of prefiltered ArchivalRecords through a channel from
// the collection loop to the top level saver. The saver will detect new connections
// and significant diffs, maintain the connection cache, determine
// how frequently to save deltas for each connection.
//
// The saver will use a small set of Marshallers to convert to protos,
// marshal the protos, and write them to files.
// Errors generated by saver functions.
var (
ErrNoMarshallers = errors.New("Saver has zero Marshallers")
)
// Task represents a single marshalling task, specifying the message and the writer.
type Task struct {
// nil message means close the writer.
Message *netlink.ArchivalRecord
Writer io.WriteCloser
}
// CacheLogger is any object with a LogCacheStats method.
type CacheLogger interface {
LogCacheStats(localCount, errCount int)
}
// MarshalChan is a channel of marshalling tasks.
type MarshalChan chan<- Task
func runMarshaller(taskChan <-chan Task, wg *sync.WaitGroup, anon anonymize.IPAnonymizer) {
for task := range taskChan {
if task.Message == nil {
task.Writer.Close()
continue
}
if task.Writer == nil {
log.Fatal("Nil writer")
}
err := task.Message.RawIDM.Anonymize(anon)
if err != nil {
log.Println("Failed to anonymize message:", err)
continue
}
b, _ := json.Marshal(task.Message) // FIXME: don't ignore error
task.Writer.Write(b)
task.Writer.Write([]byte("\n"))
}
log.Println("Marshaller Done")
wg.Done()
}
func newMarshaller(wg *sync.WaitGroup, anon anonymize.IPAnonymizer) MarshalChan {
marshChan := make(chan Task, 100)
wg.Add(1)
go runMarshaller(marshChan, wg, anon)
return marshChan
}
// Connection objects handle all output associated with a single connection.
type Connection struct {
Inode uint32 // TODO - also use the UID???
ID inetdiag.SockID
UID uint32
Slice string // 4 hex, indicating which machine segment this is on.
StartTime time.Time // Time the connection was initiated.
Sequence int // Typically zero, but increments for long running connections.
Expiration time.Time // Time we will swap files and increment Sequence.
Writer io.WriteCloser
}
func newConnection(info *inetdiag.InetDiagMsg, timestamp time.Time) *Connection {
conn := Connection{Inode: info.IDiagInode, ID: info.ID.GetSockID(), UID: info.IDiagUID, Slice: "", StartTime: timestamp, Sequence: 0,
Expiration: time.Now()}
return &conn
}
// Rotate opens the next writer for a connection.
// Note that long running connections will have data in multiple directories,
// because, for all segments after the first one, we choose the directory
// based on the time Rotate() was called, and not on the StartTime of the
// connection. Long-running connections with data on multiple days will
// therefore likely have data in multiple date directories.
// (This behavior is new as of April 2020. Prior to then, all files were
// placed in the directory corresponding to the StartTime.)
func (conn *Connection) Rotate(Host string, Pod string, FileAgeLimit time.Duration) error {
datePath := conn.StartTime.Format("2006/01/02")
// For first block, date directory is based on the connection start time.
// For all other blocks, (sequence > 0) it is based on the current time.
if conn.Sequence > 0 {
now := time.Now().UTC()
datePath = now.Format("2006/01/02")
}
err := os.MkdirAll(datePath, 0777)
if err != nil {
return err
}
id := uuid.FromCookie(conn.ID.CookieUint64())
conn.Writer, err = zstd.NewWriter(fmt.Sprintf("%s/%s.%05d.jsonl.zst", datePath, id, conn.Sequence))
if err != nil {
return err
}
conn.writeHeader()
metrics.NewFileCount.Inc()
conn.Expiration = conn.Expiration.Add(10 * time.Minute)
conn.Sequence++
return nil
}
func (conn *Connection) writeHeader() {
msg := netlink.ArchivalRecord{
Metadata: &netlink.Metadata{
UUID: uuid.FromCookie(conn.ID.CookieUint64()),
Sequence: conn.Sequence,
StartTime: conn.StartTime,
},
}
// FIXME: Error handling
bytes, _ := json.Marshal(msg)
conn.Writer.Write(bytes)
conn.Writer.Write([]byte("\n"))
}
type stats struct {
TotalCount int64
NewCount int64
DiffCount int64
ExpiredCount int64
}
func (s *stats) IncTotalCount() {
atomic.AddInt64(&s.TotalCount, 1)
}
func (s *stats) IncNewCount() {
atomic.AddInt64(&s.NewCount, 1)
}
func (s *stats) IncDiffCount() {
atomic.AddInt64(&s.DiffCount, 1)
}
func (s *stats) IncExpiredCount() {
atomic.AddInt64(&s.ExpiredCount, 1)
}
func (s *stats) Copy() stats {
result := stats{}
result.TotalCount = atomic.LoadInt64(&s.TotalCount)
result.NewCount = atomic.LoadInt64(&s.NewCount)
result.DiffCount = atomic.LoadInt64(&s.DiffCount)
result.ExpiredCount = atomic.LoadInt64(&s.ExpiredCount)
return result
}
// TcpStats is used to save the connection stats as connection is closing.
type TcpStats struct {
Sent uint64 // BytesSent
Received uint64 // BytesReceived
}
// Saver provides functionality for saving tcpinfo diffs to connection files.
// It handles arbitrary connections, and only writes to file when the
// significant fields change. (TODO - what does "significant fields" mean). | MarshalChans []MarshalChan
Done *sync.WaitGroup // All marshallers will call Done on this.
Connections map[uint64]*Connection
ClosingStats map[uint64]TcpStats // BytesReceived and BytesSent for connections that are closing.
ClosingTotals TcpStats
cache *cache.Cache
stats stats
eventServer eventsocket.Server
exclude *netlink.ExcludeConfig
}
// NewSaver creates a new Saver for the given host and pod. numMarshaller controls
// how many marshalling goroutines are used to distribute the marshalling workload.
func NewSaver(host string, pod string, numMarshaller int, srv eventsocket.Server, anon anonymize.IPAnonymizer, ex *netlink.ExcludeConfig) *Saver {
m := make([]MarshalChan, 0, numMarshaller)
c := cache.NewCache()
// We start with capacity of 500. This will be reallocated as needed, but this
// is not a performance concern.
conn := make(map[uint64]*Connection, 500)
wg := &sync.WaitGroup{}
wg.Add(1)
ageLim := 10 * time.Minute
for i := 0; i < numMarshaller; i++ {
m = append(m, newMarshaller(wg, anon))
}
return &Saver{
Host: host,
Pod: pod,
FileAgeLimit: ageLim,
MarshalChans: m,
Done: wg,
Connections: conn,
ClosingStats: make(map[uint64]TcpStats, 100),
cache: c,
eventServer: srv,
exclude: ex,
}
}
// queue queues a single ArchivalRecord to the appropriate marshalling queue, based on the
// connection Cookie.
func (svr *Saver) queue(msg *netlink.ArchivalRecord) error {
idm, err := msg.RawIDM.Parse()
if err != nil {
log.Println(err)
// TODO error metric
}
cookie := idm.ID.Cookie()
if cookie == 0 {
return errors.New("Cookie = 0")
}
if len(svr.MarshalChans) < 1 {
return ErrNoMarshallers
}
q := svr.MarshalChans[int(cookie%uint64(len(svr.MarshalChans)))]
conn, ok := svr.Connections[cookie]
if !ok {
// Create a new connection for first time cookies. For late connections already
// terminating, log some info for debugging purposes.
if idm.IDiagState >= uint8(tcp.FIN_WAIT1) {
s, r := msg.GetStats()
log.Println("Starting:", msg.Timestamp.Format("15:04:05.000"), cookie, tcp.State(idm.IDiagState), TcpStats{s, r})
}
conn = newConnection(idm, msg.Timestamp)
svr.eventServer.FlowCreated(msg.Timestamp, uuid.FromCookie(cookie), idm.ID.GetSockID())
svr.Connections[cookie] = conn
} else {
//log.Println("Diff inode:", inode)
}
if time.Now().After(conn.Expiration) && conn.Writer != nil {
q <- Task{nil, conn.Writer} // Close the previous file.
conn.Writer = nil
}
if conn.Writer == nil {
err := conn.Rotate(svr.Host, svr.Pod, svr.FileAgeLimit)
if err != nil {
return err
}
}
q <- Task{msg, conn.Writer}
return nil
}
func (svr *Saver) endConn(cookie uint64) {
svr.eventServer.FlowDeleted(time.Now(), uuid.FromCookie(cookie))
q := svr.MarshalChans[cookie%uint64(len(svr.MarshalChans))]
conn, ok := svr.Connections[cookie]
if ok && conn.Writer != nil {
q <- Task{nil, conn.Writer}
delete(svr.Connections, cookie)
}
}
// Handle a bundle of messages.
// Returns the bytes sent and received on all non-local connections.
func (svr *Saver) handleType(t time.Time, msgs []*netlink.NetlinkMessage) (uint64, uint64) {
var liveSent, liveReceived uint64
for _, msg := range msgs {
// In swap and queue, we want to track the total speed of all connections
// every second.
if msg == nil {
log.Println("Nil message")
continue
}
ar, err := netlink.MakeArchivalRecord(msg, svr.exclude)
if ar == nil {
if err != nil {
log.Println(err)
}
continue
}
ar.Timestamp = t
// Note: If GetStats shows up in profiling, might want to move to once/second code.
s, r := ar.GetStats()
liveSent += s
liveReceived += r
svr.swapAndQueue(ar)
}
return liveSent, liveReceived
}
// MessageSaverLoop runs a loop to receive batches of ArchivalRecords. Local connections
func (svr *Saver) MessageSaverLoop(readerChannel <-chan netlink.MessageBlock) {
log.Println("Starting Saver")
var reported, closed TcpStats
lastReportTime := time.Time{}.Unix()
closeLogCount := 10000
for msgs := range readerChannel {
// Handle v4 and v6 messages, and return the total bytes sent and received.
// TODO - we only need to collect these stats if this is a reporting cycle.
// NOTE: Prior to April 2020, we were not using UTC here. The servers
// are configured to use UTC time, so this should not make any difference.
s4, r4 := svr.handleType(msgs.V4Time.UTC(), msgs.V4Messages)
s6, r6 := svr.handleType(msgs.V6Time.UTC(), msgs.V6Messages)
// Note that the connections that have closed may have had traffic that
// we never see, and therefore can't account for in metrics.
residual := svr.cache.EndCycle()
// Remove all missing connections from the cache.
// Also keep a metric of the total cumulative send and receive bytes.
for cookie := range residual {
ar := residual[cookie]
var stats TcpStats
var ok bool
if !ar.HasDiagInfo() {
stats, ok = svr.ClosingStats[cookie]
if ok {
// Remove the stats from closing.
svr.ClosingTotals.Sent -= stats.Sent
svr.ClosingTotals.Received -= stats.Received
delete(svr.ClosingStats, cookie)
} else {
log.Println("Missing stats for", cookie)
}
} else {
stats.Sent, stats.Received = ar.GetStats()
}
closed.Sent += stats.Sent
closed.Received += stats.Received
if closeLogCount > 0 {
idm, err := ar.RawIDM.Parse()
if err != nil {
log.Println("Closed:", ar.Timestamp.Format("15:04:05.000"), cookie, "idm parse error", stats)
} else {
log.Println("Closed:", ar.Timestamp.Format("15:04:05.000"), cookie, tcp.State(idm.IDiagState), stats)
}
closeLogCount--
}
svr.endConn(cookie)
svr.stats.IncExpiredCount()
}
// Every second, update the total throughput for the past second.
if msgs.V4Time.Unix() > lastReportTime {
// This is the total bytes since program start.
totalSent := closed.Sent + svr.ClosingTotals.Sent + s4 + s6
totalReceived := closed.Received + svr.ClosingTotals.Received + r4 + r6
// NOTE: We are seeing occasions when total < reported. This messes up prometheus, so
// we detect that and skip reporting.
// This seems to be persistent, not just a momentary glitch. The total may drop by 500KB,
// and only recover after many seconds of gradual increases (on idle workstation).
// This workaround seems to also cure the 2<<67 reports.
// We also check for increments larger than 10x the maxSwitchSpeed.
// TODO: This can all be discarded when we are confident the bug has been fixed.
if totalSent > 10*maxSwitchSpeed/8+reported.Sent || totalSent < reported.Sent {
// Some bug in the accounting!!
log.Println("Skipping BytesSent report due to bad accounting", totalSent, reported.Sent, closed.Sent, svr.ClosingTotals.Sent, s4, s6)
if totalSent < reported.Sent {
metrics.ErrorCount.WithLabelValues("totalSent < reportedSent").Inc()
} else {
metrics.ErrorCount.WithLabelValues("totalSent-reportedSent exceeds network capacity").Inc()
}
} else {
metrics.SendRateHistogram.Observe(8 * float64(totalSent-reported.Sent))
reported.Sent = totalSent // the total bytes reported to prometheus.
}
if totalReceived > 10*maxSwitchSpeed/8+reported.Received || totalReceived < reported.Received {
// Some bug in the accounting!!
log.Println("Skipping BytesReceived report due to bad accounting", totalReceived, reported.Received, closed.Received, svr.ClosingTotals.Received, r4, r6)
if totalReceived < reported.Received {
metrics.ErrorCount.WithLabelValues("totalReceived < reportedReceived").Inc()
} else {
metrics.ErrorCount.WithLabelValues("totalReceived-reportedReceived exceeds network capacity").Inc()
}
} else {
metrics.ReceiveRateHistogram.Observe(8 * float64(totalReceived-reported.Received))
reported.Received = totalReceived // the total bytes reported to prometheus.
}
lastReportTime = msgs.V4Time.Unix()
}
}
svr.Close()
}
func (svr *Saver) swapAndQueue(pm *netlink.ArchivalRecord) {
svr.stats.IncTotalCount() // TODO fix race
old, err := svr.cache.Update(pm)
if err != nil {
// TODO metric
log.Println(err)
return
}
if old == nil {
svr.stats.IncNewCount()
metrics.SnapshotCount.Inc()
err := svr.queue(pm)
if err != nil {
log.Println(err, "Connections", len(svr.Connections))
}
} else {
pmIDM, err := pm.RawIDM.Parse()
if err != nil {
// TODO metric
log.Println(err)
return
}
if !pm.HasDiagInfo() {
// If the previous record has DiagInfo, store the send/receive stats.
// We will use them when we close the connection.
if old.HasDiagInfo() {
sOld, rOld := old.GetStats()
svr.ClosingStats[pmIDM.ID.Cookie()] = TcpStats{Sent: sOld, Received: rOld}
svr.ClosingTotals.Sent += sOld
svr.ClosingTotals.Received += rOld
log.Println("Closing:", pm.Timestamp.Format("15:04:05.000"), pmIDM.ID.Cookie(), tcp.State(pmIDM.IDiagState), TcpStats{sOld, rOld})
}
}
change, err := pm.Compare(old)
if err != nil {
// TODO metric
log.Println(err)
return
}
if change > netlink.NoMajorChange {
svr.stats.IncDiffCount()
metrics.SnapshotCount.Inc()
err := svr.queue(pm)
if err != nil {
// TODO metric
log.Println(err)
}
}
}
}
// Close shuts down all the marshallers, and waits for all files to be closed.
func (svr *Saver) Close() {
log.Println("Terminating Saver")
log.Println("Total of", len(svr.Connections), "connections active.")
for i := range svr.Connections {
svr.endConn(i)
}
log.Println("Closing Marshallers")
for i := range svr.MarshalChans {
close(svr.MarshalChans[i])
}
svr.Done.Done()
}
// LogCacheStats prints out some basic cache stats.
// TODO(https://github.com/m-lab/tcp-info/issues/32) - should also export all of these as Prometheus metrics.
func (svr *Saver) LogCacheStats(localCount, errCount int) {
stats := svr.stats.Copy() // Get a copy
log.Printf("Cache info total %d local %d same %d diff %d new %d err %d\n",
stats.TotalCount+(int64)(localCount), localCount,
stats.TotalCount-((int64)(errCount)+stats.NewCount+stats.DiffCount+(int64)(localCount)),
stats.DiffCount, stats.NewCount, errCount)
} | // TODO - just export an interface, instead of the implementation.
type Saver struct {
Host string // mlabN
Pod string // 3 alpha + 2 decimal
FileAgeLimit time.Duration | random_line_split |
saver.go | // Package saver contains all logic for writing records to files.
// 1. Sets up a channel that accepts slices of *netlink.ArchivalRecord
// 2. Maintains a map of Connections, one for each connection.
// 3. Uses several marshallers goroutines to serialize data and and write to
// zstd files.
// 4. Rotates Connection output files every 10 minutes for long lasting connections.
// 5. uses a cache to detect meaningful state changes, and avoid excessive
// writes.
package saver
import (
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"sync"
"sync/atomic"
"time"
"github.com/m-lab/go/anonymize"
"github.com/m-lab/tcp-info/cache"
"github.com/m-lab/tcp-info/eventsocket"
"github.com/m-lab/tcp-info/inetdiag"
"github.com/m-lab/tcp-info/metrics"
"github.com/m-lab/tcp-info/netlink"
"github.com/m-lab/tcp-info/tcp"
"github.com/m-lab/tcp-info/zstd"
"github.com/m-lab/uuid"
)
// This is the maximum switch/network if speed in bits/sec. It is used to check for illogical bit rate observations.
const maxSwitchSpeed = 1e10
// We will send an entire batch of prefiltered ArchivalRecords through a channel from
// the collection loop to the top level saver. The saver will detect new connections
// and significant diffs, maintain the connection cache, determine
// how frequently to save deltas for each connection.
//
// The saver will use a small set of Marshallers to convert to protos,
// marshal the protos, and write them to files.
// Errors generated by saver functions.
var (
ErrNoMarshallers = errors.New("Saver has zero Marshallers")
)
// Task represents a single marshalling task, specifying the message and the writer.
type Task struct {
// nil message means close the writer.
Message *netlink.ArchivalRecord
Writer io.WriteCloser
}
// CacheLogger is any object with a LogCacheStats method.
type CacheLogger interface {
LogCacheStats(localCount, errCount int)
}
// MarshalChan is a channel of marshalling tasks.
type MarshalChan chan<- Task
func runMarshaller(taskChan <-chan Task, wg *sync.WaitGroup, anon anonymize.IPAnonymizer) {
for task := range taskChan {
if task.Message == nil {
task.Writer.Close()
continue
}
if task.Writer == nil {
log.Fatal("Nil writer")
}
err := task.Message.RawIDM.Anonymize(anon)
if err != nil {
log.Println("Failed to anonymize message:", err)
continue
}
b, _ := json.Marshal(task.Message) // FIXME: don't ignore error
task.Writer.Write(b)
task.Writer.Write([]byte("\n"))
}
log.Println("Marshaller Done")
wg.Done()
}
func newMarshaller(wg *sync.WaitGroup, anon anonymize.IPAnonymizer) MarshalChan {
marshChan := make(chan Task, 100)
wg.Add(1)
go runMarshaller(marshChan, wg, anon)
return marshChan
}
// Connection objects handle all output associated with a single connection.
type Connection struct {
Inode uint32 // TODO - also use the UID???
ID inetdiag.SockID
UID uint32
Slice string // 4 hex, indicating which machine segment this is on.
StartTime time.Time // Time the connection was initiated.
Sequence int // Typically zero, but increments for long running connections.
Expiration time.Time // Time we will swap files and increment Sequence.
Writer io.WriteCloser
}
func newConnection(info *inetdiag.InetDiagMsg, timestamp time.Time) *Connection |
// Rotate opens the next writer for a connection.
// Note that long running connections will have data in multiple directories,
// because, for all segments after the first one, we choose the directory
// based on the time Rotate() was called, and not on the StartTime of the
// connection. Long-running connections with data on multiple days will
// therefore likely have data in multiple date directories.
// (This behavior is new as of April 2020. Prior to then, all files were
// placed in the directory corresponding to the StartTime.)
func (conn *Connection) Rotate(Host string, Pod string, FileAgeLimit time.Duration) error {
datePath := conn.StartTime.Format("2006/01/02")
// For first block, date directory is based on the connection start time.
// For all other blocks, (sequence > 0) it is based on the current time.
if conn.Sequence > 0 {
now := time.Now().UTC()
datePath = now.Format("2006/01/02")
}
err := os.MkdirAll(datePath, 0777)
if err != nil {
return err
}
id := uuid.FromCookie(conn.ID.CookieUint64())
conn.Writer, err = zstd.NewWriter(fmt.Sprintf("%s/%s.%05d.jsonl.zst", datePath, id, conn.Sequence))
if err != nil {
return err
}
conn.writeHeader()
metrics.NewFileCount.Inc()
conn.Expiration = conn.Expiration.Add(10 * time.Minute)
conn.Sequence++
return nil
}
func (conn *Connection) writeHeader() {
msg := netlink.ArchivalRecord{
Metadata: &netlink.Metadata{
UUID: uuid.FromCookie(conn.ID.CookieUint64()),
Sequence: conn.Sequence,
StartTime: conn.StartTime,
},
}
// FIXME: Error handling
bytes, _ := json.Marshal(msg)
conn.Writer.Write(bytes)
conn.Writer.Write([]byte("\n"))
}
type stats struct {
TotalCount int64
NewCount int64
DiffCount int64
ExpiredCount int64
}
func (s *stats) IncTotalCount() {
atomic.AddInt64(&s.TotalCount, 1)
}
func (s *stats) IncNewCount() {
atomic.AddInt64(&s.NewCount, 1)
}
func (s *stats) IncDiffCount() {
atomic.AddInt64(&s.DiffCount, 1)
}
func (s *stats) IncExpiredCount() {
atomic.AddInt64(&s.ExpiredCount, 1)
}
func (s *stats) Copy() stats {
result := stats{}
result.TotalCount = atomic.LoadInt64(&s.TotalCount)
result.NewCount = atomic.LoadInt64(&s.NewCount)
result.DiffCount = atomic.LoadInt64(&s.DiffCount)
result.ExpiredCount = atomic.LoadInt64(&s.ExpiredCount)
return result
}
// TcpStats is used to save the connection stats as connection is closing.
type TcpStats struct {
Sent uint64 // BytesSent
Received uint64 // BytesReceived
}
// Saver provides functionality for saving tcpinfo diffs to connection files.
// It handles arbitrary connections, and only writes to file when the
// significant fields change. (TODO - what does "significant fields" mean).
// TODO - just export an interface, instead of the implementation.
type Saver struct {
Host string // mlabN
Pod string // 3 alpha + 2 decimal
FileAgeLimit time.Duration
MarshalChans []MarshalChan
Done *sync.WaitGroup // All marshallers will call Done on this.
Connections map[uint64]*Connection
ClosingStats map[uint64]TcpStats // BytesReceived and BytesSent for connections that are closing.
ClosingTotals TcpStats
cache *cache.Cache
stats stats
eventServer eventsocket.Server
exclude *netlink.ExcludeConfig
}
// NewSaver creates a new Saver for the given host and pod. numMarshaller controls
// how many marshalling goroutines are used to distribute the marshalling workload.
func NewSaver(host string, pod string, numMarshaller int, srv eventsocket.Server, anon anonymize.IPAnonymizer, ex *netlink.ExcludeConfig) *Saver {
m := make([]MarshalChan, 0, numMarshaller)
c := cache.NewCache()
// We start with capacity of 500. This will be reallocated as needed, but this
// is not a performance concern.
conn := make(map[uint64]*Connection, 500)
wg := &sync.WaitGroup{}
wg.Add(1)
ageLim := 10 * time.Minute
for i := 0; i < numMarshaller; i++ {
m = append(m, newMarshaller(wg, anon))
}
return &Saver{
Host: host,
Pod: pod,
FileAgeLimit: ageLim,
MarshalChans: m,
Done: wg,
Connections: conn,
ClosingStats: make(map[uint64]TcpStats, 100),
cache: c,
eventServer: srv,
exclude: ex,
}
}
// queue queues a single ArchivalRecord to the appropriate marshalling queue, based on the
// connection Cookie.
func (svr *Saver) queue(msg *netlink.ArchivalRecord) error {
idm, err := msg.RawIDM.Parse()
if err != nil {
log.Println(err)
// TODO error metric
}
cookie := idm.ID.Cookie()
if cookie == 0 {
return errors.New("Cookie = 0")
}
if len(svr.MarshalChans) < 1 {
return ErrNoMarshallers
}
q := svr.MarshalChans[int(cookie%uint64(len(svr.MarshalChans)))]
conn, ok := svr.Connections[cookie]
if !ok {
// Create a new connection for first time cookies. For late connections already
// terminating, log some info for debugging purposes.
if idm.IDiagState >= uint8(tcp.FIN_WAIT1) {
s, r := msg.GetStats()
log.Println("Starting:", msg.Timestamp.Format("15:04:05.000"), cookie, tcp.State(idm.IDiagState), TcpStats{s, r})
}
conn = newConnection(idm, msg.Timestamp)
svr.eventServer.FlowCreated(msg.Timestamp, uuid.FromCookie(cookie), idm.ID.GetSockID())
svr.Connections[cookie] = conn
} else {
//log.Println("Diff inode:", inode)
}
if time.Now().After(conn.Expiration) && conn.Writer != nil {
q <- Task{nil, conn.Writer} // Close the previous file.
conn.Writer = nil
}
if conn.Writer == nil {
err := conn.Rotate(svr.Host, svr.Pod, svr.FileAgeLimit)
if err != nil {
return err
}
}
q <- Task{msg, conn.Writer}
return nil
}
func (svr *Saver) endConn(cookie uint64) {
svr.eventServer.FlowDeleted(time.Now(), uuid.FromCookie(cookie))
q := svr.MarshalChans[cookie%uint64(len(svr.MarshalChans))]
conn, ok := svr.Connections[cookie]
if ok && conn.Writer != nil {
q <- Task{nil, conn.Writer}
delete(svr.Connections, cookie)
}
}
// Handle a bundle of messages.
// Returns the bytes sent and received on all non-local connections.
func (svr *Saver) handleType(t time.Time, msgs []*netlink.NetlinkMessage) (uint64, uint64) {
var liveSent, liveReceived uint64
for _, msg := range msgs {
// In swap and queue, we want to track the total speed of all connections
// every second.
if msg == nil {
log.Println("Nil message")
continue
}
ar, err := netlink.MakeArchivalRecord(msg, svr.exclude)
if ar == nil {
if err != nil {
log.Println(err)
}
continue
}
ar.Timestamp = t
// Note: If GetStats shows up in profiling, might want to move to once/second code.
s, r := ar.GetStats()
liveSent += s
liveReceived += r
svr.swapAndQueue(ar)
}
return liveSent, liveReceived
}
// MessageSaverLoop runs a loop to receive batches of ArchivalRecords. Local connections
func (svr *Saver) MessageSaverLoop(readerChannel <-chan netlink.MessageBlock) {
log.Println("Starting Saver")
var reported, closed TcpStats
lastReportTime := time.Time{}.Unix()
closeLogCount := 10000
for msgs := range readerChannel {
// Handle v4 and v6 messages, and return the total bytes sent and received.
// TODO - we only need to collect these stats if this is a reporting cycle.
// NOTE: Prior to April 2020, we were not using UTC here. The servers
// are configured to use UTC time, so this should not make any difference.
s4, r4 := svr.handleType(msgs.V4Time.UTC(), msgs.V4Messages)
s6, r6 := svr.handleType(msgs.V6Time.UTC(), msgs.V6Messages)
// Note that the connections that have closed may have had traffic that
// we never see, and therefore can't account for in metrics.
residual := svr.cache.EndCycle()
// Remove all missing connections from the cache.
// Also keep a metric of the total cumulative send and receive bytes.
for cookie := range residual {
ar := residual[cookie]
var stats TcpStats
var ok bool
if !ar.HasDiagInfo() {
stats, ok = svr.ClosingStats[cookie]
if ok {
// Remove the stats from closing.
svr.ClosingTotals.Sent -= stats.Sent
svr.ClosingTotals.Received -= stats.Received
delete(svr.ClosingStats, cookie)
} else {
log.Println("Missing stats for", cookie)
}
} else {
stats.Sent, stats.Received = ar.GetStats()
}
closed.Sent += stats.Sent
closed.Received += stats.Received
if closeLogCount > 0 {
idm, err := ar.RawIDM.Parse()
if err != nil {
log.Println("Closed:", ar.Timestamp.Format("15:04:05.000"), cookie, "idm parse error", stats)
} else {
log.Println("Closed:", ar.Timestamp.Format("15:04:05.000"), cookie, tcp.State(idm.IDiagState), stats)
}
closeLogCount--
}
svr.endConn(cookie)
svr.stats.IncExpiredCount()
}
// Every second, update the total throughput for the past second.
if msgs.V4Time.Unix() > lastReportTime {
// This is the total bytes since program start.
totalSent := closed.Sent + svr.ClosingTotals.Sent + s4 + s6
totalReceived := closed.Received + svr.ClosingTotals.Received + r4 + r6
// NOTE: We are seeing occasions when total < reported. This messes up prometheus, so
// we detect that and skip reporting.
// This seems to be persistent, not just a momentary glitch. The total may drop by 500KB,
// and only recover after many seconds of gradual increases (on idle workstation).
// This workaround seems to also cure the 2<<67 reports.
// We also check for increments larger than 10x the maxSwitchSpeed.
// TODO: This can all be discarded when we are confident the bug has been fixed.
if totalSent > 10*maxSwitchSpeed/8+reported.Sent || totalSent < reported.Sent {
// Some bug in the accounting!!
log.Println("Skipping BytesSent report due to bad accounting", totalSent, reported.Sent, closed.Sent, svr.ClosingTotals.Sent, s4, s6)
if totalSent < reported.Sent {
metrics.ErrorCount.WithLabelValues("totalSent < reportedSent").Inc()
} else {
metrics.ErrorCount.WithLabelValues("totalSent-reportedSent exceeds network capacity").Inc()
}
} else {
metrics.SendRateHistogram.Observe(8 * float64(totalSent-reported.Sent))
reported.Sent = totalSent // the total bytes reported to prometheus.
}
if totalReceived > 10*maxSwitchSpeed/8+reported.Received || totalReceived < reported.Received {
// Some bug in the accounting!!
log.Println("Skipping BytesReceived report due to bad accounting", totalReceived, reported.Received, closed.Received, svr.ClosingTotals.Received, r4, r6)
if totalReceived < reported.Received {
metrics.ErrorCount.WithLabelValues("totalReceived < reportedReceived").Inc()
} else {
metrics.ErrorCount.WithLabelValues("totalReceived-reportedReceived exceeds network capacity").Inc()
}
} else {
metrics.ReceiveRateHistogram.Observe(8 * float64(totalReceived-reported.Received))
reported.Received = totalReceived // the total bytes reported to prometheus.
}
lastReportTime = msgs.V4Time.Unix()
}
}
svr.Close()
}
func (svr *Saver) swapAndQueue(pm *netlink.ArchivalRecord) {
svr.stats.IncTotalCount() // TODO fix race
old, err := svr.cache.Update(pm)
if err != nil {
// TODO metric
log.Println(err)
return
}
if old == nil {
svr.stats.IncNewCount()
metrics.SnapshotCount.Inc()
err := svr.queue(pm)
if err != nil {
log.Println(err, "Connections", len(svr.Connections))
}
} else {
pmIDM, err := pm.RawIDM.Parse()
if err != nil {
// TODO metric
log.Println(err)
return
}
if !pm.HasDiagInfo() {
// If the previous record has DiagInfo, store the send/receive stats.
// We will use them when we close the connection.
if old.HasDiagInfo() {
sOld, rOld := old.GetStats()
svr.ClosingStats[pmIDM.ID.Cookie()] = TcpStats{Sent: sOld, Received: rOld}
svr.ClosingTotals.Sent += sOld
svr.ClosingTotals.Received += rOld
log.Println("Closing:", pm.Timestamp.Format("15:04:05.000"), pmIDM.ID.Cookie(), tcp.State(pmIDM.IDiagState), TcpStats{sOld, rOld})
}
}
change, err := pm.Compare(old)
if err != nil {
// TODO metric
log.Println(err)
return
}
if change > netlink.NoMajorChange {
svr.stats.IncDiffCount()
metrics.SnapshotCount.Inc()
err := svr.queue(pm)
if err != nil {
// TODO metric
log.Println(err)
}
}
}
}
// Close shuts down all the marshallers, and waits for all files to be closed.
func (svr *Saver) Close() {
log.Println("Terminating Saver")
log.Println("Total of", len(svr.Connections), "connections active.")
for i := range svr.Connections {
svr.endConn(i)
}
log.Println("Closing Marshallers")
for i := range svr.MarshalChans {
close(svr.MarshalChans[i])
}
svr.Done.Done()
}
// LogCacheStats prints out some basic cache stats.
// TODO(https://github.com/m-lab/tcp-info/issues/32) - should also export all of these as Prometheus metrics.
func (svr *Saver) LogCacheStats(localCount, errCount int) {
stats := svr.stats.Copy() // Get a copy
log.Printf("Cache info total %d local %d same %d diff %d new %d err %d\n",
stats.TotalCount+(int64)(localCount), localCount,
stats.TotalCount-((int64)(errCount)+stats.NewCount+stats.DiffCount+(int64)(localCount)),
stats.DiffCount, stats.NewCount, errCount)
}
| {
conn := Connection{Inode: info.IDiagInode, ID: info.ID.GetSockID(), UID: info.IDiagUID, Slice: "", StartTime: timestamp, Sequence: 0,
Expiration: time.Now()}
return &conn
} | identifier_body |
saver.go | // Package saver contains all logic for writing records to files.
// 1. Sets up a channel that accepts slices of *netlink.ArchivalRecord
// 2. Maintains a map of Connections, one for each connection.
// 3. Uses several marshallers goroutines to serialize data and and write to
// zstd files.
// 4. Rotates Connection output files every 10 minutes for long lasting connections.
// 5. uses a cache to detect meaningful state changes, and avoid excessive
// writes.
package saver
import (
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"sync"
"sync/atomic"
"time"
"github.com/m-lab/go/anonymize"
"github.com/m-lab/tcp-info/cache"
"github.com/m-lab/tcp-info/eventsocket"
"github.com/m-lab/tcp-info/inetdiag"
"github.com/m-lab/tcp-info/metrics"
"github.com/m-lab/tcp-info/netlink"
"github.com/m-lab/tcp-info/tcp"
"github.com/m-lab/tcp-info/zstd"
"github.com/m-lab/uuid"
)
// This is the maximum switch/network if speed in bits/sec. It is used to check for illogical bit rate observations.
const maxSwitchSpeed = 1e10
// We will send an entire batch of prefiltered ArchivalRecords through a channel from
// the collection loop to the top level saver. The saver will detect new connections
// and significant diffs, maintain the connection cache, determine
// how frequently to save deltas for each connection.
//
// The saver will use a small set of Marshallers to convert to protos,
// marshal the protos, and write them to files.
// Errors generated by saver functions.
var (
ErrNoMarshallers = errors.New("Saver has zero Marshallers")
)
// Task represents a single marshalling task, specifying the message and the writer.
type Task struct {
// nil message means close the writer.
Message *netlink.ArchivalRecord
Writer io.WriteCloser
}
// CacheLogger is any object with a LogCacheStats method.
type CacheLogger interface {
LogCacheStats(localCount, errCount int)
}
// MarshalChan is a channel of marshalling tasks.
type MarshalChan chan<- Task
func runMarshaller(taskChan <-chan Task, wg *sync.WaitGroup, anon anonymize.IPAnonymizer) {
for task := range taskChan {
if task.Message == nil {
task.Writer.Close()
continue
}
if task.Writer == nil {
log.Fatal("Nil writer")
}
err := task.Message.RawIDM.Anonymize(anon)
if err != nil {
log.Println("Failed to anonymize message:", err)
continue
}
b, _ := json.Marshal(task.Message) // FIXME: don't ignore error
task.Writer.Write(b)
task.Writer.Write([]byte("\n"))
}
log.Println("Marshaller Done")
wg.Done()
}
func newMarshaller(wg *sync.WaitGroup, anon anonymize.IPAnonymizer) MarshalChan {
marshChan := make(chan Task, 100)
wg.Add(1)
go runMarshaller(marshChan, wg, anon)
return marshChan
}
// Connection objects handle all output associated with a single connection.
type Connection struct {
Inode uint32 // TODO - also use the UID???
ID inetdiag.SockID
UID uint32
Slice string // 4 hex, indicating which machine segment this is on.
StartTime time.Time // Time the connection was initiated.
Sequence int // Typically zero, but increments for long running connections.
Expiration time.Time // Time we will swap files and increment Sequence.
Writer io.WriteCloser
}
func newConnection(info *inetdiag.InetDiagMsg, timestamp time.Time) *Connection {
conn := Connection{Inode: info.IDiagInode, ID: info.ID.GetSockID(), UID: info.IDiagUID, Slice: "", StartTime: timestamp, Sequence: 0,
Expiration: time.Now()}
return &conn
}
// Rotate opens the next writer for a connection.
// Note that long running connections will have data in multiple directories,
// because, for all segments after the first one, we choose the directory
// based on the time Rotate() was called, and not on the StartTime of the
// connection. Long-running connections with data on multiple days will
// therefore likely have data in multiple date directories.
// (This behavior is new as of April 2020. Prior to then, all files were
// placed in the directory corresponding to the StartTime.)
func (conn *Connection) Rotate(Host string, Pod string, FileAgeLimit time.Duration) error {
datePath := conn.StartTime.Format("2006/01/02")
// For first block, date directory is based on the connection start time.
// For all other blocks, (sequence > 0) it is based on the current time.
if conn.Sequence > 0 {
now := time.Now().UTC()
datePath = now.Format("2006/01/02")
}
err := os.MkdirAll(datePath, 0777)
if err != nil {
return err
}
id := uuid.FromCookie(conn.ID.CookieUint64())
conn.Writer, err = zstd.NewWriter(fmt.Sprintf("%s/%s.%05d.jsonl.zst", datePath, id, conn.Sequence))
if err != nil {
return err
}
conn.writeHeader()
metrics.NewFileCount.Inc()
conn.Expiration = conn.Expiration.Add(10 * time.Minute)
conn.Sequence++
return nil
}
func (conn *Connection) writeHeader() {
msg := netlink.ArchivalRecord{
Metadata: &netlink.Metadata{
UUID: uuid.FromCookie(conn.ID.CookieUint64()),
Sequence: conn.Sequence,
StartTime: conn.StartTime,
},
}
// FIXME: Error handling
bytes, _ := json.Marshal(msg)
conn.Writer.Write(bytes)
conn.Writer.Write([]byte("\n"))
}
type stats struct {
TotalCount int64
NewCount int64
DiffCount int64
ExpiredCount int64
}
func (s *stats) IncTotalCount() {
atomic.AddInt64(&s.TotalCount, 1)
}
func (s *stats) IncNewCount() {
atomic.AddInt64(&s.NewCount, 1)
}
func (s *stats) IncDiffCount() {
atomic.AddInt64(&s.DiffCount, 1)
}
func (s *stats) IncExpiredCount() {
atomic.AddInt64(&s.ExpiredCount, 1)
}
func (s *stats) Copy() stats {
result := stats{}
result.TotalCount = atomic.LoadInt64(&s.TotalCount)
result.NewCount = atomic.LoadInt64(&s.NewCount)
result.DiffCount = atomic.LoadInt64(&s.DiffCount)
result.ExpiredCount = atomic.LoadInt64(&s.ExpiredCount)
return result
}
// TcpStats is used to save the connection stats as connection is closing.
type TcpStats struct {
Sent uint64 // BytesSent
Received uint64 // BytesReceived
}
// Saver provides functionality for saving tcpinfo diffs to connection files.
// It handles arbitrary connections, and only writes to file when the
// significant fields change. (TODO - what does "significant fields" mean).
// TODO - just export an interface, instead of the implementation.
type Saver struct {
Host string // mlabN
Pod string // 3 alpha + 2 decimal
FileAgeLimit time.Duration
MarshalChans []MarshalChan
Done *sync.WaitGroup // All marshallers will call Done on this.
Connections map[uint64]*Connection
ClosingStats map[uint64]TcpStats // BytesReceived and BytesSent for connections that are closing.
ClosingTotals TcpStats
cache *cache.Cache
stats stats
eventServer eventsocket.Server
exclude *netlink.ExcludeConfig
}
// NewSaver creates a new Saver for the given host and pod. numMarshaller controls
// how many marshalling goroutines are used to distribute the marshalling workload.
func NewSaver(host string, pod string, numMarshaller int, srv eventsocket.Server, anon anonymize.IPAnonymizer, ex *netlink.ExcludeConfig) *Saver {
m := make([]MarshalChan, 0, numMarshaller)
c := cache.NewCache()
// We start with capacity of 500. This will be reallocated as needed, but this
// is not a performance concern.
conn := make(map[uint64]*Connection, 500)
wg := &sync.WaitGroup{}
wg.Add(1)
ageLim := 10 * time.Minute
for i := 0; i < numMarshaller; i++ {
m = append(m, newMarshaller(wg, anon))
}
return &Saver{
Host: host,
Pod: pod,
FileAgeLimit: ageLim,
MarshalChans: m,
Done: wg,
Connections: conn,
ClosingStats: make(map[uint64]TcpStats, 100),
cache: c,
eventServer: srv,
exclude: ex,
}
}
// queue queues a single ArchivalRecord to the appropriate marshalling queue, based on the
// connection Cookie.
func (svr *Saver) queue(msg *netlink.ArchivalRecord) error {
idm, err := msg.RawIDM.Parse()
if err != nil {
log.Println(err)
// TODO error metric
}
cookie := idm.ID.Cookie()
if cookie == 0 {
return errors.New("Cookie = 0")
}
if len(svr.MarshalChans) < 1 {
return ErrNoMarshallers
}
q := svr.MarshalChans[int(cookie%uint64(len(svr.MarshalChans)))]
conn, ok := svr.Connections[cookie]
if !ok {
// Create a new connection for first time cookies. For late connections already
// terminating, log some info for debugging purposes.
if idm.IDiagState >= uint8(tcp.FIN_WAIT1) {
s, r := msg.GetStats()
log.Println("Starting:", msg.Timestamp.Format("15:04:05.000"), cookie, tcp.State(idm.IDiagState), TcpStats{s, r})
}
conn = newConnection(idm, msg.Timestamp)
svr.eventServer.FlowCreated(msg.Timestamp, uuid.FromCookie(cookie), idm.ID.GetSockID())
svr.Connections[cookie] = conn
} else {
//log.Println("Diff inode:", inode)
}
if time.Now().After(conn.Expiration) && conn.Writer != nil {
q <- Task{nil, conn.Writer} // Close the previous file.
conn.Writer = nil
}
if conn.Writer == nil {
err := conn.Rotate(svr.Host, svr.Pod, svr.FileAgeLimit)
if err != nil {
return err
}
}
q <- Task{msg, conn.Writer}
return nil
}
func (svr *Saver) endConn(cookie uint64) {
svr.eventServer.FlowDeleted(time.Now(), uuid.FromCookie(cookie))
q := svr.MarshalChans[cookie%uint64(len(svr.MarshalChans))]
conn, ok := svr.Connections[cookie]
if ok && conn.Writer != nil {
q <- Task{nil, conn.Writer}
delete(svr.Connections, cookie)
}
}
// Handle a bundle of messages.
// Returns the bytes sent and received on all non-local connections.
func (svr *Saver) handleType(t time.Time, msgs []*netlink.NetlinkMessage) (uint64, uint64) {
var liveSent, liveReceived uint64
for _, msg := range msgs {
// In swap and queue, we want to track the total speed of all connections
// every second.
if msg == nil {
log.Println("Nil message")
continue
}
ar, err := netlink.MakeArchivalRecord(msg, svr.exclude)
if ar == nil {
if err != nil {
log.Println(err)
}
continue
}
ar.Timestamp = t
// Note: If GetStats shows up in profiling, might want to move to once/second code.
s, r := ar.GetStats()
liveSent += s
liveReceived += r
svr.swapAndQueue(ar)
}
return liveSent, liveReceived
}
// MessageSaverLoop runs a loop to receive batches of ArchivalRecords. Local connections
func (svr *Saver) MessageSaverLoop(readerChannel <-chan netlink.MessageBlock) {
log.Println("Starting Saver")
var reported, closed TcpStats
lastReportTime := time.Time{}.Unix()
closeLogCount := 10000
for msgs := range readerChannel {
// Handle v4 and v6 messages, and return the total bytes sent and received.
// TODO - we only need to collect these stats if this is a reporting cycle.
// NOTE: Prior to April 2020, we were not using UTC here. The servers
// are configured to use UTC time, so this should not make any difference.
s4, r4 := svr.handleType(msgs.V4Time.UTC(), msgs.V4Messages)
s6, r6 := svr.handleType(msgs.V6Time.UTC(), msgs.V6Messages)
// Note that the connections that have closed may have had traffic that
// we never see, and therefore can't account for in metrics.
residual := svr.cache.EndCycle()
// Remove all missing connections from the cache.
// Also keep a metric of the total cumulative send and receive bytes.
for cookie := range residual {
ar := residual[cookie]
var stats TcpStats
var ok bool
if !ar.HasDiagInfo() {
stats, ok = svr.ClosingStats[cookie]
if ok {
// Remove the stats from closing.
svr.ClosingTotals.Sent -= stats.Sent
svr.ClosingTotals.Received -= stats.Received
delete(svr.ClosingStats, cookie)
} else {
log.Println("Missing stats for", cookie)
}
} else {
stats.Sent, stats.Received = ar.GetStats()
}
closed.Sent += stats.Sent
closed.Received += stats.Received
if closeLogCount > 0 |
svr.endConn(cookie)
svr.stats.IncExpiredCount()
}
// Every second, update the total throughput for the past second.
if msgs.V4Time.Unix() > lastReportTime {
// This is the total bytes since program start.
totalSent := closed.Sent + svr.ClosingTotals.Sent + s4 + s6
totalReceived := closed.Received + svr.ClosingTotals.Received + r4 + r6
// NOTE: We are seeing occasions when total < reported. This messes up prometheus, so
// we detect that and skip reporting.
// This seems to be persistent, not just a momentary glitch. The total may drop by 500KB,
// and only recover after many seconds of gradual increases (on idle workstation).
// This workaround seems to also cure the 2<<67 reports.
// We also check for increments larger than 10x the maxSwitchSpeed.
// TODO: This can all be discarded when we are confident the bug has been fixed.
if totalSent > 10*maxSwitchSpeed/8+reported.Sent || totalSent < reported.Sent {
// Some bug in the accounting!!
log.Println("Skipping BytesSent report due to bad accounting", totalSent, reported.Sent, closed.Sent, svr.ClosingTotals.Sent, s4, s6)
if totalSent < reported.Sent {
metrics.ErrorCount.WithLabelValues("totalSent < reportedSent").Inc()
} else {
metrics.ErrorCount.WithLabelValues("totalSent-reportedSent exceeds network capacity").Inc()
}
} else {
metrics.SendRateHistogram.Observe(8 * float64(totalSent-reported.Sent))
reported.Sent = totalSent // the total bytes reported to prometheus.
}
if totalReceived > 10*maxSwitchSpeed/8+reported.Received || totalReceived < reported.Received {
// Some bug in the accounting!!
log.Println("Skipping BytesReceived report due to bad accounting", totalReceived, reported.Received, closed.Received, svr.ClosingTotals.Received, r4, r6)
if totalReceived < reported.Received {
metrics.ErrorCount.WithLabelValues("totalReceived < reportedReceived").Inc()
} else {
metrics.ErrorCount.WithLabelValues("totalReceived-reportedReceived exceeds network capacity").Inc()
}
} else {
metrics.ReceiveRateHistogram.Observe(8 * float64(totalReceived-reported.Received))
reported.Received = totalReceived // the total bytes reported to prometheus.
}
lastReportTime = msgs.V4Time.Unix()
}
}
svr.Close()
}
func (svr *Saver) swapAndQueue(pm *netlink.ArchivalRecord) {
svr.stats.IncTotalCount() // TODO fix race
old, err := svr.cache.Update(pm)
if err != nil {
// TODO metric
log.Println(err)
return
}
if old == nil {
svr.stats.IncNewCount()
metrics.SnapshotCount.Inc()
err := svr.queue(pm)
if err != nil {
log.Println(err, "Connections", len(svr.Connections))
}
} else {
pmIDM, err := pm.RawIDM.Parse()
if err != nil {
// TODO metric
log.Println(err)
return
}
if !pm.HasDiagInfo() {
// If the previous record has DiagInfo, store the send/receive stats.
// We will use them when we close the connection.
if old.HasDiagInfo() {
sOld, rOld := old.GetStats()
svr.ClosingStats[pmIDM.ID.Cookie()] = TcpStats{Sent: sOld, Received: rOld}
svr.ClosingTotals.Sent += sOld
svr.ClosingTotals.Received += rOld
log.Println("Closing:", pm.Timestamp.Format("15:04:05.000"), pmIDM.ID.Cookie(), tcp.State(pmIDM.IDiagState), TcpStats{sOld, rOld})
}
}
change, err := pm.Compare(old)
if err != nil {
// TODO metric
log.Println(err)
return
}
if change > netlink.NoMajorChange {
svr.stats.IncDiffCount()
metrics.SnapshotCount.Inc()
err := svr.queue(pm)
if err != nil {
// TODO metric
log.Println(err)
}
}
}
}
// Close shuts down all the marshallers, and waits for all files to be closed.
func (svr *Saver) Close() {
log.Println("Terminating Saver")
log.Println("Total of", len(svr.Connections), "connections active.")
for i := range svr.Connections {
svr.endConn(i)
}
log.Println("Closing Marshallers")
for i := range svr.MarshalChans {
close(svr.MarshalChans[i])
}
svr.Done.Done()
}
// LogCacheStats prints out some basic cache stats.
// TODO(https://github.com/m-lab/tcp-info/issues/32) - should also export all of these as Prometheus metrics.
func (svr *Saver) LogCacheStats(localCount, errCount int) {
stats := svr.stats.Copy() // Get a copy
log.Printf("Cache info total %d local %d same %d diff %d new %d err %d\n",
stats.TotalCount+(int64)(localCount), localCount,
stats.TotalCount-((int64)(errCount)+stats.NewCount+stats.DiffCount+(int64)(localCount)),
stats.DiffCount, stats.NewCount, errCount)
}
| {
idm, err := ar.RawIDM.Parse()
if err != nil {
log.Println("Closed:", ar.Timestamp.Format("15:04:05.000"), cookie, "idm parse error", stats)
} else {
log.Println("Closed:", ar.Timestamp.Format("15:04:05.000"), cookie, tcp.State(idm.IDiagState), stats)
}
closeLogCount--
} | conditional_block |
saver.go | // Package saver contains all logic for writing records to files.
// 1. Sets up a channel that accepts slices of *netlink.ArchivalRecord
// 2. Maintains a map of Connections, one for each connection.
// 3. Uses several marshallers goroutines to serialize data and and write to
// zstd files.
// 4. Rotates Connection output files every 10 minutes for long lasting connections.
// 5. uses a cache to detect meaningful state changes, and avoid excessive
// writes.
package saver
import (
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"sync"
"sync/atomic"
"time"
"github.com/m-lab/go/anonymize"
"github.com/m-lab/tcp-info/cache"
"github.com/m-lab/tcp-info/eventsocket"
"github.com/m-lab/tcp-info/inetdiag"
"github.com/m-lab/tcp-info/metrics"
"github.com/m-lab/tcp-info/netlink"
"github.com/m-lab/tcp-info/tcp"
"github.com/m-lab/tcp-info/zstd"
"github.com/m-lab/uuid"
)
// This is the maximum switch/network if speed in bits/sec. It is used to check for illogical bit rate observations.
const maxSwitchSpeed = 1e10
// We will send an entire batch of prefiltered ArchivalRecords through a channel from
// the collection loop to the top level saver. The saver will detect new connections
// and significant diffs, maintain the connection cache, determine
// how frequently to save deltas for each connection.
//
// The saver will use a small set of Marshallers to convert to protos,
// marshal the protos, and write them to files.
// Errors generated by saver functions.
var (
ErrNoMarshallers = errors.New("Saver has zero Marshallers")
)
// Task represents a single marshalling task, specifying the message and the writer.
type Task struct {
// nil message means close the writer.
Message *netlink.ArchivalRecord
Writer io.WriteCloser
}
// CacheLogger is any object with a LogCacheStats method.
type CacheLogger interface {
LogCacheStats(localCount, errCount int)
}
// MarshalChan is a channel of marshalling tasks.
type MarshalChan chan<- Task
func runMarshaller(taskChan <-chan Task, wg *sync.WaitGroup, anon anonymize.IPAnonymizer) {
for task := range taskChan {
if task.Message == nil {
task.Writer.Close()
continue
}
if task.Writer == nil {
log.Fatal("Nil writer")
}
err := task.Message.RawIDM.Anonymize(anon)
if err != nil {
log.Println("Failed to anonymize message:", err)
continue
}
b, _ := json.Marshal(task.Message) // FIXME: don't ignore error
task.Writer.Write(b)
task.Writer.Write([]byte("\n"))
}
log.Println("Marshaller Done")
wg.Done()
}
func newMarshaller(wg *sync.WaitGroup, anon anonymize.IPAnonymizer) MarshalChan {
marshChan := make(chan Task, 100)
wg.Add(1)
go runMarshaller(marshChan, wg, anon)
return marshChan
}
// Connection objects handle all output associated with a single connection.
type Connection struct {
Inode uint32 // TODO - also use the UID???
ID inetdiag.SockID
UID uint32
Slice string // 4 hex, indicating which machine segment this is on.
StartTime time.Time // Time the connection was initiated.
Sequence int // Typically zero, but increments for long running connections.
Expiration time.Time // Time we will swap files and increment Sequence.
Writer io.WriteCloser
}
func newConnection(info *inetdiag.InetDiagMsg, timestamp time.Time) *Connection {
conn := Connection{Inode: info.IDiagInode, ID: info.ID.GetSockID(), UID: info.IDiagUID, Slice: "", StartTime: timestamp, Sequence: 0,
Expiration: time.Now()}
return &conn
}
// Rotate opens the next writer for a connection.
// Note that long running connections will have data in multiple directories,
// because, for all segments after the first one, we choose the directory
// based on the time Rotate() was called, and not on the StartTime of the
// connection. Long-running connections with data on multiple days will
// therefore likely have data in multiple date directories.
// (This behavior is new as of April 2020. Prior to then, all files were
// placed in the directory corresponding to the StartTime.)
func (conn *Connection) Rotate(Host string, Pod string, FileAgeLimit time.Duration) error {
datePath := conn.StartTime.Format("2006/01/02")
// For first block, date directory is based on the connection start time.
// For all other blocks, (sequence > 0) it is based on the current time.
if conn.Sequence > 0 {
now := time.Now().UTC()
datePath = now.Format("2006/01/02")
}
err := os.MkdirAll(datePath, 0777)
if err != nil {
return err
}
id := uuid.FromCookie(conn.ID.CookieUint64())
conn.Writer, err = zstd.NewWriter(fmt.Sprintf("%s/%s.%05d.jsonl.zst", datePath, id, conn.Sequence))
if err != nil {
return err
}
conn.writeHeader()
metrics.NewFileCount.Inc()
conn.Expiration = conn.Expiration.Add(10 * time.Minute)
conn.Sequence++
return nil
}
func (conn *Connection) writeHeader() {
msg := netlink.ArchivalRecord{
Metadata: &netlink.Metadata{
UUID: uuid.FromCookie(conn.ID.CookieUint64()),
Sequence: conn.Sequence,
StartTime: conn.StartTime,
},
}
// FIXME: Error handling
bytes, _ := json.Marshal(msg)
conn.Writer.Write(bytes)
conn.Writer.Write([]byte("\n"))
}
type stats struct {
TotalCount int64
NewCount int64
DiffCount int64
ExpiredCount int64
}
func (s *stats) IncTotalCount() {
atomic.AddInt64(&s.TotalCount, 1)
}
func (s *stats) IncNewCount() {
atomic.AddInt64(&s.NewCount, 1)
}
func (s *stats) IncDiffCount() {
atomic.AddInt64(&s.DiffCount, 1)
}
func (s *stats) IncExpiredCount() {
atomic.AddInt64(&s.ExpiredCount, 1)
}
func (s *stats) Copy() stats {
result := stats{}
result.TotalCount = atomic.LoadInt64(&s.TotalCount)
result.NewCount = atomic.LoadInt64(&s.NewCount)
result.DiffCount = atomic.LoadInt64(&s.DiffCount)
result.ExpiredCount = atomic.LoadInt64(&s.ExpiredCount)
return result
}
// TcpStats is used to save the connection stats as connection is closing.
type TcpStats struct {
Sent uint64 // BytesSent
Received uint64 // BytesReceived
}
// Saver provides functionality for saving tcpinfo diffs to connection files.
// It handles arbitrary connections, and only writes to file when the
// significant fields change. (TODO - what does "significant fields" mean).
// TODO - just export an interface, instead of the implementation.
type Saver struct {
Host string // mlabN
Pod string // 3 alpha + 2 decimal
FileAgeLimit time.Duration
MarshalChans []MarshalChan
Done *sync.WaitGroup // All marshallers will call Done on this.
Connections map[uint64]*Connection
ClosingStats map[uint64]TcpStats // BytesReceived and BytesSent for connections that are closing.
ClosingTotals TcpStats
cache *cache.Cache
stats stats
eventServer eventsocket.Server
exclude *netlink.ExcludeConfig
}
// NewSaver creates a new Saver for the given host and pod. numMarshaller controls
// how many marshalling goroutines are used to distribute the marshalling workload.
func | (host string, pod string, numMarshaller int, srv eventsocket.Server, anon anonymize.IPAnonymizer, ex *netlink.ExcludeConfig) *Saver {
m := make([]MarshalChan, 0, numMarshaller)
c := cache.NewCache()
// We start with capacity of 500. This will be reallocated as needed, but this
// is not a performance concern.
conn := make(map[uint64]*Connection, 500)
wg := &sync.WaitGroup{}
wg.Add(1)
ageLim := 10 * time.Minute
for i := 0; i < numMarshaller; i++ {
m = append(m, newMarshaller(wg, anon))
}
return &Saver{
Host: host,
Pod: pod,
FileAgeLimit: ageLim,
MarshalChans: m,
Done: wg,
Connections: conn,
ClosingStats: make(map[uint64]TcpStats, 100),
cache: c,
eventServer: srv,
exclude: ex,
}
}
// queue queues a single ArchivalRecord to the appropriate marshalling queue, based on the
// connection Cookie.
func (svr *Saver) queue(msg *netlink.ArchivalRecord) error {
idm, err := msg.RawIDM.Parse()
if err != nil {
log.Println(err)
// TODO error metric
}
cookie := idm.ID.Cookie()
if cookie == 0 {
return errors.New("Cookie = 0")
}
if len(svr.MarshalChans) < 1 {
return ErrNoMarshallers
}
q := svr.MarshalChans[int(cookie%uint64(len(svr.MarshalChans)))]
conn, ok := svr.Connections[cookie]
if !ok {
// Create a new connection for first time cookies. For late connections already
// terminating, log some info for debugging purposes.
if idm.IDiagState >= uint8(tcp.FIN_WAIT1) {
s, r := msg.GetStats()
log.Println("Starting:", msg.Timestamp.Format("15:04:05.000"), cookie, tcp.State(idm.IDiagState), TcpStats{s, r})
}
conn = newConnection(idm, msg.Timestamp)
svr.eventServer.FlowCreated(msg.Timestamp, uuid.FromCookie(cookie), idm.ID.GetSockID())
svr.Connections[cookie] = conn
} else {
//log.Println("Diff inode:", inode)
}
if time.Now().After(conn.Expiration) && conn.Writer != nil {
q <- Task{nil, conn.Writer} // Close the previous file.
conn.Writer = nil
}
if conn.Writer == nil {
err := conn.Rotate(svr.Host, svr.Pod, svr.FileAgeLimit)
if err != nil {
return err
}
}
q <- Task{msg, conn.Writer}
return nil
}
func (svr *Saver) endConn(cookie uint64) {
svr.eventServer.FlowDeleted(time.Now(), uuid.FromCookie(cookie))
q := svr.MarshalChans[cookie%uint64(len(svr.MarshalChans))]
conn, ok := svr.Connections[cookie]
if ok && conn.Writer != nil {
q <- Task{nil, conn.Writer}
delete(svr.Connections, cookie)
}
}
// Handle a bundle of messages.
// Returns the bytes sent and received on all non-local connections.
func (svr *Saver) handleType(t time.Time, msgs []*netlink.NetlinkMessage) (uint64, uint64) {
var liveSent, liveReceived uint64
for _, msg := range msgs {
// In swap and queue, we want to track the total speed of all connections
// every second.
if msg == nil {
log.Println("Nil message")
continue
}
ar, err := netlink.MakeArchivalRecord(msg, svr.exclude)
if ar == nil {
if err != nil {
log.Println(err)
}
continue
}
ar.Timestamp = t
// Note: If GetStats shows up in profiling, might want to move to once/second code.
s, r := ar.GetStats()
liveSent += s
liveReceived += r
svr.swapAndQueue(ar)
}
return liveSent, liveReceived
}
// MessageSaverLoop runs a loop to receive batches of ArchivalRecords. Local connections
func (svr *Saver) MessageSaverLoop(readerChannel <-chan netlink.MessageBlock) {
log.Println("Starting Saver")
var reported, closed TcpStats
lastReportTime := time.Time{}.Unix()
closeLogCount := 10000
for msgs := range readerChannel {
// Handle v4 and v6 messages, and return the total bytes sent and received.
// TODO - we only need to collect these stats if this is a reporting cycle.
// NOTE: Prior to April 2020, we were not using UTC here. The servers
// are configured to use UTC time, so this should not make any difference.
s4, r4 := svr.handleType(msgs.V4Time.UTC(), msgs.V4Messages)
s6, r6 := svr.handleType(msgs.V6Time.UTC(), msgs.V6Messages)
// Note that the connections that have closed may have had traffic that
// we never see, and therefore can't account for in metrics.
residual := svr.cache.EndCycle()
// Remove all missing connections from the cache.
// Also keep a metric of the total cumulative send and receive bytes.
for cookie := range residual {
ar := residual[cookie]
var stats TcpStats
var ok bool
if !ar.HasDiagInfo() {
stats, ok = svr.ClosingStats[cookie]
if ok {
// Remove the stats from closing.
svr.ClosingTotals.Sent -= stats.Sent
svr.ClosingTotals.Received -= stats.Received
delete(svr.ClosingStats, cookie)
} else {
log.Println("Missing stats for", cookie)
}
} else {
stats.Sent, stats.Received = ar.GetStats()
}
closed.Sent += stats.Sent
closed.Received += stats.Received
if closeLogCount > 0 {
idm, err := ar.RawIDM.Parse()
if err != nil {
log.Println("Closed:", ar.Timestamp.Format("15:04:05.000"), cookie, "idm parse error", stats)
} else {
log.Println("Closed:", ar.Timestamp.Format("15:04:05.000"), cookie, tcp.State(idm.IDiagState), stats)
}
closeLogCount--
}
svr.endConn(cookie)
svr.stats.IncExpiredCount()
}
// Every second, update the total throughput for the past second.
if msgs.V4Time.Unix() > lastReportTime {
// This is the total bytes since program start.
totalSent := closed.Sent + svr.ClosingTotals.Sent + s4 + s6
totalReceived := closed.Received + svr.ClosingTotals.Received + r4 + r6
// NOTE: We are seeing occasions when total < reported. This messes up prometheus, so
// we detect that and skip reporting.
// This seems to be persistent, not just a momentary glitch. The total may drop by 500KB,
// and only recover after many seconds of gradual increases (on idle workstation).
// This workaround seems to also cure the 2<<67 reports.
// We also check for increments larger than 10x the maxSwitchSpeed.
// TODO: This can all be discarded when we are confident the bug has been fixed.
if totalSent > 10*maxSwitchSpeed/8+reported.Sent || totalSent < reported.Sent {
// Some bug in the accounting!!
log.Println("Skipping BytesSent report due to bad accounting", totalSent, reported.Sent, closed.Sent, svr.ClosingTotals.Sent, s4, s6)
if totalSent < reported.Sent {
metrics.ErrorCount.WithLabelValues("totalSent < reportedSent").Inc()
} else {
metrics.ErrorCount.WithLabelValues("totalSent-reportedSent exceeds network capacity").Inc()
}
} else {
metrics.SendRateHistogram.Observe(8 * float64(totalSent-reported.Sent))
reported.Sent = totalSent // the total bytes reported to prometheus.
}
if totalReceived > 10*maxSwitchSpeed/8+reported.Received || totalReceived < reported.Received {
// Some bug in the accounting!!
log.Println("Skipping BytesReceived report due to bad accounting", totalReceived, reported.Received, closed.Received, svr.ClosingTotals.Received, r4, r6)
if totalReceived < reported.Received {
metrics.ErrorCount.WithLabelValues("totalReceived < reportedReceived").Inc()
} else {
metrics.ErrorCount.WithLabelValues("totalReceived-reportedReceived exceeds network capacity").Inc()
}
} else {
metrics.ReceiveRateHistogram.Observe(8 * float64(totalReceived-reported.Received))
reported.Received = totalReceived // the total bytes reported to prometheus.
}
lastReportTime = msgs.V4Time.Unix()
}
}
svr.Close()
}
func (svr *Saver) swapAndQueue(pm *netlink.ArchivalRecord) {
svr.stats.IncTotalCount() // TODO fix race
old, err := svr.cache.Update(pm)
if err != nil {
// TODO metric
log.Println(err)
return
}
if old == nil {
svr.stats.IncNewCount()
metrics.SnapshotCount.Inc()
err := svr.queue(pm)
if err != nil {
log.Println(err, "Connections", len(svr.Connections))
}
} else {
pmIDM, err := pm.RawIDM.Parse()
if err != nil {
// TODO metric
log.Println(err)
return
}
if !pm.HasDiagInfo() {
// If the previous record has DiagInfo, store the send/receive stats.
// We will use them when we close the connection.
if old.HasDiagInfo() {
sOld, rOld := old.GetStats()
svr.ClosingStats[pmIDM.ID.Cookie()] = TcpStats{Sent: sOld, Received: rOld}
svr.ClosingTotals.Sent += sOld
svr.ClosingTotals.Received += rOld
log.Println("Closing:", pm.Timestamp.Format("15:04:05.000"), pmIDM.ID.Cookie(), tcp.State(pmIDM.IDiagState), TcpStats{sOld, rOld})
}
}
change, err := pm.Compare(old)
if err != nil {
// TODO metric
log.Println(err)
return
}
if change > netlink.NoMajorChange {
svr.stats.IncDiffCount()
metrics.SnapshotCount.Inc()
err := svr.queue(pm)
if err != nil {
// TODO metric
log.Println(err)
}
}
}
}
// Close shuts down all the marshallers, and waits for all files to be closed.
func (svr *Saver) Close() {
log.Println("Terminating Saver")
log.Println("Total of", len(svr.Connections), "connections active.")
for i := range svr.Connections {
svr.endConn(i)
}
log.Println("Closing Marshallers")
for i := range svr.MarshalChans {
close(svr.MarshalChans[i])
}
svr.Done.Done()
}
// LogCacheStats prints out some basic cache stats.
// TODO(https://github.com/m-lab/tcp-info/issues/32) - should also export all of these as Prometheus metrics.
func (svr *Saver) LogCacheStats(localCount, errCount int) {
stats := svr.stats.Copy() // Get a copy
log.Printf("Cache info total %d local %d same %d diff %d new %d err %d\n",
stats.TotalCount+(int64)(localCount), localCount,
stats.TotalCount-((int64)(errCount)+stats.NewCount+stats.DiffCount+(int64)(localCount)),
stats.DiffCount, stats.NewCount, errCount)
}
| NewSaver | identifier_name |
physical_plan.rs | // Copyright 2020 Andy Grove
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Ballista Physical Plan (Experimental).
//!
//! The physical plan is a serializable data structure describing how the plan will be executed.
//!
//! It differs from the logical plan in that it deals with specific implementations of operators
//! (e.g. SortMergeJoin versus BroadcastHashJoin) whereas the logical plan just deals with an
//! abstract concept of a join.
//!
//! The physical plan also accounts for partitioning and ordering of data between operators.
use std::collections::HashMap;
use std::fmt::{self, Debug};
use std::sync::Arc;
use crate::arrow::array::{
ArrayRef, Float32Builder, Float64Builder, Int16Builder, Int32Builder, Int64Builder,
Int8Builder, StringBuilder, UInt16Builder, UInt32Builder, UInt64Builder, UInt8Builder,
};
use crate::arrow::datatypes::{DataType, Schema};
use crate::arrow::record_batch::RecordBatch;
use crate::datafusion::logicalplan::Expr;
use crate::datafusion::logicalplan::LogicalPlan;
use crate::datafusion::logicalplan::Operator;
use crate::datafusion::logicalplan::ScalarValue;
use crate::distributed::scheduler::ExecutionTask;
use crate::error::{ballista_error, Result};
use crate::execution::expressions::{
add, alias, aliased_aggr, avg, col, compare, count, div, lit, max, min, mult, subtract, sum,
};
use crate::execution::operators::{
CsvScanExec, FilterExec, HashAggregateExec, InMemoryTableScanExec, ParquetScanExec,
ProjectionExec, ShuffleExchangeExec, ShuffleReaderExec,
};
use crate::distributed::executor::ExecutorConfig;
use async_trait::async_trait;
use uuid::Uuid;
/// Stream of columnar batches using futures
pub type ColumnarBatchStream = Arc<dyn ColumnarBatchIter>;
#[derive(Debug, Clone)]
pub struct ExecutorMeta {
pub id: String,
pub host: String,
pub port: usize,
}
/// Async iterator over a stream of columnar batches
pub trait ColumnarBatchIter {
/// The schema of the iterator's batches
// In principle, this should not be needed as `ColumnarBatch` has a schema.
// However, the stream may be empty
fn schema(&self) -> Arc<Schema>;
/// Get the next batch from the stream, or None if the stream has ended
fn next(&self) -> Result<Option<ColumnarBatch>>;
/// Notify the iterator that no more results will be fetched, so that resources
/// can be freed immediately.
fn close(&self) {}
}
#[async_trait]
pub trait ExecutionContext: Send + Sync {
async fn get_executor_ids(&self) -> Result<Vec<ExecutorMeta>>;
async fn execute_task(
&self,
executor_id: ExecutorMeta,
task: ExecutionTask,
) -> Result<ShuffleId>;
async fn read_shuffle(&self, shuffle_id: &ShuffleId) -> Result<Vec<ColumnarBatch>>;
fn config(&self) -> ExecutorConfig;
}
/// Base trait for all operators
#[async_trait]
pub trait ExecutionPlan: Send + Sync {
/// Specified the output schema of this operator.
fn schema(&self) -> Arc<Schema>;
/// Specifies how data is partitioned across different nodes in the cluster
fn output_partitioning(&self) -> Partitioning {
Partitioning::UnknownPartitioning(0)
}
/// Specifies the data distribution requirements of all the children for this operator
fn required_child_distribution(&self) -> Distribution {
Distribution::UnspecifiedDistribution
}
/// Specifies how data is ordered in each partition
fn output_ordering(&self) -> Option<Vec<SortOrder>> {
None
}
/// Specifies the data distribution requirements of all the children for this operator
fn required_child_ordering(&self) -> Option<Vec<Vec<SortOrder>>> {
None
}
/// Get the children of this plan. Leaf nodes have no children. Unary nodes have a single
/// child. Binary nodes have two children.
fn children(&self) -> Vec<Arc<PhysicalPlan>> {
vec![]
}
/// Runs this query against one partition returning a stream of columnar batches
async fn execute(
&self,
ctx: Arc<dyn ExecutionContext>,
partition_index: usize,
) -> Result<ColumnarBatchStream>;
}
pub trait Expression: Send + Sync + Debug {
/// Get the data type of this expression, given the schema of the input
fn data_type(&self, input_schema: &Schema) -> Result<DataType>;
/// Decide whether this expression is nullable, given the schema of the input
fn nullable(&self, input_schema: &Schema) -> Result<bool>;
/// Evaluate an expression against a ColumnarBatch to produce a scalar or columnar result.
fn evaluate(&self, input: &ColumnarBatch) -> Result<ColumnarValue>;
}
/// Aggregate expression that can be evaluated against a RecordBatch
pub trait AggregateExpr: Send + Sync + Debug {
/// Get the data type of this expression, given the schema of the input
fn data_type(&self, input_schema: &Schema) -> Result<DataType>;
/// Decide whether this expression is nullable, given the schema of the input
fn nullable(&self, input_schema: &Schema) -> Result<bool>;
/// Evaluate the expression being aggregated
fn evaluate_input(&self, batch: &ColumnarBatch) -> Result<ColumnarValue>;
/// Create an accumulator for this aggregate expression
fn create_accumulator(&self, mode: &AggregateMode) -> Box<dyn Accumulator>;
}
/// Aggregate accumulator
pub trait Accumulator: Send + Sync {
/// Update the accumulator based on a columnar value
fn accumulate(&mut self, value: &ColumnarValue) -> Result<()>;
/// Get the final value for the accumulator
fn get_value(&self) -> Result<Option<ScalarValue>>;
}
/// Action that can be sent to an executor
#[derive(Debug, Clone)]
pub enum Action {
/// Execute the query with DataFusion and return the results
InteractiveQuery {
plan: LogicalPlan,
settings: HashMap<String, String>,
},
/// Execute a query and store the results in memory
Execute(ExecutionTask),
/// Collect a shuffle
FetchShuffle(ShuffleId),
}
pub type MaybeColumnarBatch = Result<Option<ColumnarBatch>>;
/// Batch of columnar data.
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct ColumnarBatch {
schema: Arc<Schema>,
columns: HashMap<String, ColumnarValue>,
}
impl ColumnarBatch {
pub fn from_arrow(batch: &RecordBatch) -> Self {
let columns = batch
.columns()
.iter()
.enumerate()
.map(|(i, array)| {
(
batch.schema().field(i).name().clone(),
ColumnarValue::Columnar(array.clone()),
)
})
.collect();
Self {
schema: batch.schema(),
columns,
}
}
pub fn from_values(values: &[ColumnarValue], schema: &Schema) -> Self {
let columns = schema
.fields()
.iter()
.enumerate()
.map(|(i, f)| (f.name().clone(), values[i].clone()))
.collect();
Self {
schema: Arc::new(schema.clone()),
columns,
}
}
pub fn to_arrow(&self) -> Result<RecordBatch> {
let arrays = self
.schema
.fields()
.iter()
.map(|c| {
match self.column(c.name())? {
ColumnarValue::Columnar(array) => Ok(array.clone()),
ColumnarValue::Scalar(_, _) => {
// note that this can be implemented easily if needed
Err(ballista_error("Cannot convert scalar value to Arrow array"))
}
}
})
.collect::<Result<Vec<_>>>()?;
Ok(RecordBatch::try_new(self.schema.clone(), arrays)?)
}
pub fn schema(&self) -> Arc<Schema> {
self.schema.clone()
}
pub fn num_columns(&self) -> usize {
self.columns.len()
}
pub fn num_rows(&self) -> usize {
self.columns[self.schema.field(0).name()].len()
}
pub fn column(&self, name: &str) -> Result<&ColumnarValue> {
Ok(&self.columns[name]) |
pub fn memory_size(&self) -> usize {
self.columns.values().map(|c| c.memory_size()).sum()
}
}
macro_rules! build_literal_array {
($LEN:expr, $BUILDER:ident, $VALUE:expr) => {{
let mut builder = $BUILDER::new($LEN);
for _ in 0..$LEN {
builder.append_value($VALUE)?;
}
Ok(Arc::new(builder.finish()))
}};
}
/// A columnar value can either be a scalar value or an Arrow array.
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub enum ColumnarValue {
Scalar(ScalarValue, usize),
Columnar(ArrayRef),
}
impl ColumnarValue {
pub fn len(&self) -> usize {
match self {
ColumnarValue::Scalar(_, n) => *n,
ColumnarValue::Columnar(array) => array.len(),
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn data_type(&self) -> &DataType {
match self {
ColumnarValue::Columnar(array) => array.data_type(),
ColumnarValue::Scalar(value, _) => match value {
ScalarValue::UInt8(_) => &DataType::UInt8,
ScalarValue::UInt16(_) => &DataType::UInt16,
ScalarValue::UInt32(_) => &DataType::UInt32,
ScalarValue::UInt64(_) => &DataType::UInt64,
ScalarValue::Int8(_) => &DataType::Int8,
ScalarValue::Int16(_) => &DataType::Int16,
ScalarValue::Int32(_) => &DataType::Int32,
ScalarValue::Int64(_) => &DataType::Int64,
ScalarValue::Float32(_) => &DataType::Float32,
ScalarValue::Float64(_) => &DataType::Float64,
_ => unimplemented!(),
},
}
}
pub fn to_arrow(&self) -> Result<ArrayRef> {
match self {
ColumnarValue::Columnar(array) => Ok(array.clone()),
ColumnarValue::Scalar(value, n) => match value {
ScalarValue::Int8(value) => build_literal_array!(*n, Int8Builder, *value),
ScalarValue::Int16(value) => build_literal_array!(*n, Int16Builder, *value),
ScalarValue::Int32(value) => build_literal_array!(*n, Int32Builder, *value),
ScalarValue::Int64(value) => build_literal_array!(*n, Int64Builder, *value),
ScalarValue::UInt8(value) => build_literal_array!(*n, UInt8Builder, *value),
ScalarValue::UInt16(value) => build_literal_array!(*n, UInt16Builder, *value),
ScalarValue::UInt32(value) => build_literal_array!(*n, UInt32Builder, *value),
ScalarValue::UInt64(value) => build_literal_array!(*n, UInt64Builder, *value),
ScalarValue::Float32(value) => build_literal_array!(*n, Float32Builder, *value),
ScalarValue::Float64(value) => build_literal_array!(*n, Float64Builder, *value),
ScalarValue::Utf8(value) => build_literal_array!(*n, StringBuilder, value),
other => Err(ballista_error(&format!(
"Unsupported literal type {:?}",
other
))),
},
}
}
pub fn memory_size(&self) -> usize {
//TODO delegate to Arrow once https://issues.apache.org/jira/browse/ARROW-9582 is
// implemented
match self {
ColumnarValue::Columnar(array) => {
let mut size = 0;
for buffer in array.data().buffers() {
size += buffer.capacity();
}
size
}
_ => 0,
}
}
}
/// Enumeration wrapping physical plan structs so that they can be represented in a tree easily
/// and processed using pattern matching
#[derive(Clone)]
pub enum PhysicalPlan {
/// Projection.
Projection(Arc<ProjectionExec>),
/// Filter a.k.a predicate.
Filter(Arc<FilterExec>),
/// Hash aggregate
HashAggregate(Arc<HashAggregateExec>),
/// Performs a shuffle that will result in the desired partitioning.
ShuffleExchange(Arc<ShuffleExchangeExec>),
/// Reads results from a ShuffleExchange
ShuffleReader(Arc<ShuffleReaderExec>),
/// Scans a partitioned Parquet data source
ParquetScan(Arc<ParquetScanExec>),
/// Scans a partitioned CSV data source
CsvScan(Arc<CsvScanExec>),
/// Scans an in-memory table
InMemoryTableScan(Arc<InMemoryTableScanExec>),
}
impl PhysicalPlan {
pub fn as_execution_plan(&self) -> Arc<dyn ExecutionPlan> {
match self {
Self::Projection(exec) => exec.clone(),
Self::Filter(exec) => exec.clone(),
Self::HashAggregate(exec) => exec.clone(),
Self::ParquetScan(exec) => exec.clone(),
Self::CsvScan(exec) => exec.clone(),
Self::ShuffleExchange(exec) => exec.clone(),
Self::ShuffleReader(exec) => exec.clone(),
Self::InMemoryTableScan(exec) => exec.clone(),
}
}
pub fn with_new_children(&self, new_children: Vec<Arc<PhysicalPlan>>) -> PhysicalPlan {
match self {
Self::HashAggregate(exec) => {
Self::HashAggregate(Arc::new(exec.with_new_children(new_children)))
}
_ => unimplemented!(),
}
}
fn fmt_with_indent(&self, f: &mut fmt::Formatter, indent: usize) -> fmt::Result {
if indent > 0 {
writeln!(f)?;
for _ in 0..indent {
write!(f, " ")?;
}
}
match self {
PhysicalPlan::CsvScan(exec) => write!(
f,
"CsvScan: {:?}, partitions={}; projection={:?}",
exec.path,
exec.filenames.len(),
exec.projection
),
PhysicalPlan::ParquetScan(exec) => write!(
f,
"ParquetScan: {:?}, partitions={}; projection={:?}",
exec.path,
exec.filenames.len(),
exec.projection
),
PhysicalPlan::HashAggregate(exec) => {
write!(
f,
"HashAggregate: mode={:?}, groupExpr={:?}, aggrExpr={:?}",
exec.mode, exec.group_expr, exec.aggr_expr
)?;
exec.child.fmt_with_indent(f, indent + 1)
}
PhysicalPlan::ShuffleExchange(exec) => {
write!(f, "Shuffle: {:?}", exec.as_ref().output_partitioning())?;
exec.as_ref().child.fmt_with_indent(f, indent + 1)
}
PhysicalPlan::ShuffleReader(exec) => {
write!(f, "ShuffleReader: shuffle_id={:?}", exec.shuffle_id)
}
PhysicalPlan::Projection(_exec) => write!(f, "Projection:"),
PhysicalPlan::Filter(_exec) => write!(f, "Filter:"),
_ => write!(f, "???"),
}
}
}
impl fmt::Debug for PhysicalPlan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.fmt_with_indent(f, 0)
}
}
#[derive(Debug, Clone)]
pub enum Distribution {
UnspecifiedDistribution,
SinglePartition,
BroadcastDistribution,
ClusteredDistribution {
required_num_partitions: usize,
clustering: Vec<Expr>,
},
HashClusteredDistribution {
required_num_partitions: usize,
clustering: Vec<Expr>,
},
OrderedDistribution(Vec<SortOrder>),
}
#[derive(Debug, Clone)]
pub enum JoinType {
Inner,
}
#[derive(Debug, Clone)]
pub enum BuildSide {
BuildLeft,
BuildRight,
}
#[derive(Debug, Clone)]
pub enum SortDirection {
Ascending,
Descending,
}
/// Aggregate operator modes.
#[derive(Debug, Clone)]
pub enum AggregateMode {
/// Partial aggregation that can run in parallel per partition
Partial,
/// Perform final aggregation on results of partial aggregation. For example, this would
/// produce the SUM of SUMs, or the SUMs of COUNTs.
Final,
/// Perform complete aggregation in one pass. This is used when there is only a single
/// partition to operate on.
Complete,
}
#[derive(Debug, Clone)]
pub struct SortOrder {
child: Arc<Expr>,
direction: SortDirection,
null_ordering: NullOrdering,
}
#[derive(Debug, Clone)]
pub enum NullOrdering {
NullsFirst,
NullsLast,
}
/// Partitioning schemes supported by operators.
#[derive(Debug, Clone)]
pub enum Partitioning {
UnknownPartitioning(usize),
HashPartitioning(usize, Vec<Arc<Expr>>),
}
impl Partitioning {
pub fn partition_count(&self) -> usize {
use Partitioning::*;
match self {
UnknownPartitioning(n) => *n,
HashPartitioning(n, _) => *n,
}
}
}
/// Unique identifier for the output shuffle partition of an operator.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ShuffleId {
pub(crate) job_uuid: Uuid,
pub(crate) stage_id: usize,
pub(crate) partition_id: usize,
}
impl ShuffleId {
pub fn new(job_uuid: Uuid, stage_id: usize, partition_id: usize) -> Self {
Self {
job_uuid,
stage_id,
partition_id,
}
}
}
pub struct ShuffleLocation {}
/// Translate a logical expression into a physical expression that can be evaluated against
/// input data.
pub fn compile_expression(expr: &Expr, input: &Schema) -> Result<Arc<dyn Expression>> {
match expr {
Expr::Alias(expr, name) => Ok(alias(compile_expression(expr, input)?, name)),
Expr::Column(name) => Ok(col(name)),
Expr::Literal(value) => Ok(lit(value.to_owned())),
Expr::BinaryExpr { left, op, right } => {
let l = compile_expression(left, input)?;
let r = compile_expression(right, input)?;
match op {
Operator::Plus => Ok(add(l, r)),
Operator::Minus => Ok(subtract(l, r)),
Operator::Multiply => Ok(mult(l, r)),
Operator::Divide => Ok(div(l, r)),
Operator::Lt
| Operator::LtEq
| Operator::Gt
| Operator::GtEq
| Operator::Eq
| Operator::NotEq => Ok(compare(l, op, r)),
other => Err(ballista_error(&format!(
"Unsupported binary operator in compile_expression {:?}",
other
))),
}
}
other => Err(ballista_error(&format!(
"Unsupported expression in compile_expression {:?}",
other
))),
}
}
/// Translate one or more logical expressions into physical expressions that can be evaluated
/// against input data.
pub fn compile_expressions(expr: &[Expr], input: &Schema) -> Result<Vec<Arc<dyn Expression>>> {
expr.iter().map(|e| compile_expression(e, input)).collect()
}
/// Translate a logical aggregate expression into a physical expression that can be evaluated
/// against input data.
pub fn compile_aggregate_expression(
expr: &Expr,
input_schema: &Schema,
) -> Result<Arc<dyn AggregateExpr>> {
match expr {
Expr::Alias(expr, alias) => Ok(aliased_aggr(
compile_aggregate_expression(expr, input_schema)?,
alias,
)),
Expr::AggregateFunction { name, args, .. } => match name.to_lowercase().as_ref() {
"avg" => Ok(avg(compile_expression(&args[0], input_schema)?)),
"count" => Ok(count(compile_expression(&args[0], input_schema)?)),
"max" => Ok(max(compile_expression(&args[0], input_schema)?)),
"min" => Ok(min(compile_expression(&args[0], input_schema)?)),
"sum" => Ok(sum(compile_expression(&args[0], input_schema)?)),
other => Err(ballista_error(&format!(
"Unsupported aggregate function in compile_aggregate_expression '{}'",
other
))),
},
other => Err(ballista_error(&format!(
"Unsupported aggregate expression in compile_aggregate_expression {:?}",
other
))),
}
}
/// Translate one or more logical aggregate expressions into physical expressions that can be evaluated
/// against input data.
pub fn compile_aggregate_expressions(
expr: &[Expr],
input: &Schema,
) -> Result<Vec<Arc<dyn AggregateExpr>>> {
expr.iter()
.map(|e| compile_aggregate_expression(e, input))
.collect()
} | } | random_line_split |
physical_plan.rs | // Copyright 2020 Andy Grove
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Ballista Physical Plan (Experimental).
//!
//! The physical plan is a serializable data structure describing how the plan will be executed.
//!
//! It differs from the logical plan in that it deals with specific implementations of operators
//! (e.g. SortMergeJoin versus BroadcastHashJoin) whereas the logical plan just deals with an
//! abstract concept of a join.
//!
//! The physical plan also accounts for partitioning and ordering of data between operators.
use std::collections::HashMap;
use std::fmt::{self, Debug};
use std::sync::Arc;
use crate::arrow::array::{
ArrayRef, Float32Builder, Float64Builder, Int16Builder, Int32Builder, Int64Builder,
Int8Builder, StringBuilder, UInt16Builder, UInt32Builder, UInt64Builder, UInt8Builder,
};
use crate::arrow::datatypes::{DataType, Schema};
use crate::arrow::record_batch::RecordBatch;
use crate::datafusion::logicalplan::Expr;
use crate::datafusion::logicalplan::LogicalPlan;
use crate::datafusion::logicalplan::Operator;
use crate::datafusion::logicalplan::ScalarValue;
use crate::distributed::scheduler::ExecutionTask;
use crate::error::{ballista_error, Result};
use crate::execution::expressions::{
add, alias, aliased_aggr, avg, col, compare, count, div, lit, max, min, mult, subtract, sum,
};
use crate::execution::operators::{
CsvScanExec, FilterExec, HashAggregateExec, InMemoryTableScanExec, ParquetScanExec,
ProjectionExec, ShuffleExchangeExec, ShuffleReaderExec,
};
use crate::distributed::executor::ExecutorConfig;
use async_trait::async_trait;
use uuid::Uuid;
/// Stream of columnar batches using futures
pub type ColumnarBatchStream = Arc<dyn ColumnarBatchIter>;
#[derive(Debug, Clone)]
pub struct ExecutorMeta {
pub id: String,
pub host: String,
pub port: usize,
}
/// Async iterator over a stream of columnar batches
pub trait ColumnarBatchIter {
/// The schema of the iterator's batches
// In principle, this should not be needed as `ColumnarBatch` has a schema.
// However, the stream may be empty
fn schema(&self) -> Arc<Schema>;
/// Get the next batch from the stream, or None if the stream has ended
fn next(&self) -> Result<Option<ColumnarBatch>>;
/// Notify the iterator that no more results will be fetched, so that resources
/// can be freed immediately.
fn close(&self) {}
}
#[async_trait]
pub trait ExecutionContext: Send + Sync {
async fn get_executor_ids(&self) -> Result<Vec<ExecutorMeta>>;
async fn execute_task(
&self,
executor_id: ExecutorMeta,
task: ExecutionTask,
) -> Result<ShuffleId>;
async fn read_shuffle(&self, shuffle_id: &ShuffleId) -> Result<Vec<ColumnarBatch>>;
fn config(&self) -> ExecutorConfig;
}
/// Base trait for all operators
#[async_trait]
pub trait ExecutionPlan: Send + Sync {
/// Specified the output schema of this operator.
fn schema(&self) -> Arc<Schema>;
/// Specifies how data is partitioned across different nodes in the cluster
fn output_partitioning(&self) -> Partitioning {
Partitioning::UnknownPartitioning(0)
}
/// Specifies the data distribution requirements of all the children for this operator
fn required_child_distribution(&self) -> Distribution {
Distribution::UnspecifiedDistribution
}
/// Specifies how data is ordered in each partition
fn output_ordering(&self) -> Option<Vec<SortOrder>> {
None
}
/// Specifies the data distribution requirements of all the children for this operator
fn required_child_ordering(&self) -> Option<Vec<Vec<SortOrder>>> {
None
}
/// Get the children of this plan. Leaf nodes have no children. Unary nodes have a single
/// child. Binary nodes have two children.
fn children(&self) -> Vec<Arc<PhysicalPlan>> {
vec![]
}
/// Runs this query against one partition returning a stream of columnar batches
async fn execute(
&self,
ctx: Arc<dyn ExecutionContext>,
partition_index: usize,
) -> Result<ColumnarBatchStream>;
}
pub trait Expression: Send + Sync + Debug {
/// Get the data type of this expression, given the schema of the input
fn data_type(&self, input_schema: &Schema) -> Result<DataType>;
/// Decide whether this expression is nullable, given the schema of the input
fn nullable(&self, input_schema: &Schema) -> Result<bool>;
/// Evaluate an expression against a ColumnarBatch to produce a scalar or columnar result.
fn evaluate(&self, input: &ColumnarBatch) -> Result<ColumnarValue>;
}
/// Aggregate expression that can be evaluated against a RecordBatch
pub trait AggregateExpr: Send + Sync + Debug {
/// Get the data type of this expression, given the schema of the input
fn data_type(&self, input_schema: &Schema) -> Result<DataType>;
/// Decide whether this expression is nullable, given the schema of the input
fn nullable(&self, input_schema: &Schema) -> Result<bool>;
/// Evaluate the expression being aggregated
fn evaluate_input(&self, batch: &ColumnarBatch) -> Result<ColumnarValue>;
/// Create an accumulator for this aggregate expression
fn create_accumulator(&self, mode: &AggregateMode) -> Box<dyn Accumulator>;
}
/// Aggregate accumulator
pub trait Accumulator: Send + Sync {
/// Update the accumulator based on a columnar value
fn accumulate(&mut self, value: &ColumnarValue) -> Result<()>;
/// Get the final value for the accumulator
fn get_value(&self) -> Result<Option<ScalarValue>>;
}
/// Action that can be sent to an executor
#[derive(Debug, Clone)]
pub enum Action {
/// Execute the query with DataFusion and return the results
InteractiveQuery {
plan: LogicalPlan,
settings: HashMap<String, String>,
},
/// Execute a query and store the results in memory
Execute(ExecutionTask),
/// Collect a shuffle
FetchShuffle(ShuffleId),
}
pub type MaybeColumnarBatch = Result<Option<ColumnarBatch>>;
/// Batch of columnar data.
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct ColumnarBatch {
schema: Arc<Schema>,
columns: HashMap<String, ColumnarValue>,
}
impl ColumnarBatch {
pub fn from_arrow(batch: &RecordBatch) -> Self {
let columns = batch
.columns()
.iter()
.enumerate()
.map(|(i, array)| {
(
batch.schema().field(i).name().clone(),
ColumnarValue::Columnar(array.clone()),
)
})
.collect();
Self {
schema: batch.schema(),
columns,
}
}
pub fn from_values(values: &[ColumnarValue], schema: &Schema) -> Self {
let columns = schema
.fields()
.iter()
.enumerate()
.map(|(i, f)| (f.name().clone(), values[i].clone()))
.collect();
Self {
schema: Arc::new(schema.clone()),
columns,
}
}
pub fn to_arrow(&self) -> Result<RecordBatch> {
let arrays = self
.schema
.fields()
.iter()
.map(|c| {
match self.column(c.name())? {
ColumnarValue::Columnar(array) => Ok(array.clone()),
ColumnarValue::Scalar(_, _) => {
// note that this can be implemented easily if needed
Err(ballista_error("Cannot convert scalar value to Arrow array"))
}
}
})
.collect::<Result<Vec<_>>>()?;
Ok(RecordBatch::try_new(self.schema.clone(), arrays)?)
}
pub fn schema(&self) -> Arc<Schema> {
self.schema.clone()
}
pub fn num_columns(&self) -> usize {
self.columns.len()
}
pub fn num_rows(&self) -> usize {
self.columns[self.schema.field(0).name()].len()
}
pub fn column(&self, name: &str) -> Result<&ColumnarValue> {
Ok(&self.columns[name])
}
pub fn memory_size(&self) -> usize {
self.columns.values().map(|c| c.memory_size()).sum()
}
}
macro_rules! build_literal_array {
($LEN:expr, $BUILDER:ident, $VALUE:expr) => {{
let mut builder = $BUILDER::new($LEN);
for _ in 0..$LEN {
builder.append_value($VALUE)?;
}
Ok(Arc::new(builder.finish()))
}};
}
/// A columnar value can either be a scalar value or an Arrow array.
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub enum ColumnarValue {
Scalar(ScalarValue, usize),
Columnar(ArrayRef),
}
impl ColumnarValue {
pub fn len(&self) -> usize {
match self {
ColumnarValue::Scalar(_, n) => *n,
ColumnarValue::Columnar(array) => array.len(),
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn data_type(&self) -> &DataType {
match self {
ColumnarValue::Columnar(array) => array.data_type(),
ColumnarValue::Scalar(value, _) => match value {
ScalarValue::UInt8(_) => &DataType::UInt8,
ScalarValue::UInt16(_) => &DataType::UInt16,
ScalarValue::UInt32(_) => &DataType::UInt32,
ScalarValue::UInt64(_) => &DataType::UInt64,
ScalarValue::Int8(_) => &DataType::Int8,
ScalarValue::Int16(_) => &DataType::Int16,
ScalarValue::Int32(_) => &DataType::Int32,
ScalarValue::Int64(_) => &DataType::Int64,
ScalarValue::Float32(_) => &DataType::Float32,
ScalarValue::Float64(_) => &DataType::Float64,
_ => unimplemented!(),
},
}
}
pub fn to_arrow(&self) -> Result<ArrayRef> {
match self {
ColumnarValue::Columnar(array) => Ok(array.clone()),
ColumnarValue::Scalar(value, n) => match value {
ScalarValue::Int8(value) => build_literal_array!(*n, Int8Builder, *value),
ScalarValue::Int16(value) => build_literal_array!(*n, Int16Builder, *value),
ScalarValue::Int32(value) => build_literal_array!(*n, Int32Builder, *value),
ScalarValue::Int64(value) => build_literal_array!(*n, Int64Builder, *value),
ScalarValue::UInt8(value) => build_literal_array!(*n, UInt8Builder, *value),
ScalarValue::UInt16(value) => build_literal_array!(*n, UInt16Builder, *value),
ScalarValue::UInt32(value) => build_literal_array!(*n, UInt32Builder, *value),
ScalarValue::UInt64(value) => build_literal_array!(*n, UInt64Builder, *value),
ScalarValue::Float32(value) => build_literal_array!(*n, Float32Builder, *value),
ScalarValue::Float64(value) => build_literal_array!(*n, Float64Builder, *value),
ScalarValue::Utf8(value) => build_literal_array!(*n, StringBuilder, value),
other => Err(ballista_error(&format!(
"Unsupported literal type {:?}",
other
))),
},
}
}
pub fn memory_size(&self) -> usize {
//TODO delegate to Arrow once https://issues.apache.org/jira/browse/ARROW-9582 is
// implemented
match self {
ColumnarValue::Columnar(array) => {
let mut size = 0;
for buffer in array.data().buffers() {
size += buffer.capacity();
}
size
}
_ => 0,
}
}
}
/// Enumeration wrapping physical plan structs so that they can be represented in a tree easily
/// and processed using pattern matching
#[derive(Clone)]
pub enum PhysicalPlan {
/// Projection.
Projection(Arc<ProjectionExec>),
/// Filter a.k.a predicate.
Filter(Arc<FilterExec>),
/// Hash aggregate
HashAggregate(Arc<HashAggregateExec>),
/// Performs a shuffle that will result in the desired partitioning.
ShuffleExchange(Arc<ShuffleExchangeExec>),
/// Reads results from a ShuffleExchange
ShuffleReader(Arc<ShuffleReaderExec>),
/// Scans a partitioned Parquet data source
ParquetScan(Arc<ParquetScanExec>),
/// Scans a partitioned CSV data source
CsvScan(Arc<CsvScanExec>),
/// Scans an in-memory table
InMemoryTableScan(Arc<InMemoryTableScanExec>),
}
impl PhysicalPlan {
pub fn as_execution_plan(&self) -> Arc<dyn ExecutionPlan> {
match self {
Self::Projection(exec) => exec.clone(),
Self::Filter(exec) => exec.clone(),
Self::HashAggregate(exec) => exec.clone(),
Self::ParquetScan(exec) => exec.clone(),
Self::CsvScan(exec) => exec.clone(),
Self::ShuffleExchange(exec) => exec.clone(),
Self::ShuffleReader(exec) => exec.clone(),
Self::InMemoryTableScan(exec) => exec.clone(),
}
}
pub fn with_new_children(&self, new_children: Vec<Arc<PhysicalPlan>>) -> PhysicalPlan {
match self {
Self::HashAggregate(exec) => {
Self::HashAggregate(Arc::new(exec.with_new_children(new_children)))
}
_ => unimplemented!(),
}
}
fn fmt_with_indent(&self, f: &mut fmt::Formatter, indent: usize) -> fmt::Result {
if indent > 0 {
writeln!(f)?;
for _ in 0..indent {
write!(f, " ")?;
}
}
match self {
PhysicalPlan::CsvScan(exec) => write!(
f,
"CsvScan: {:?}, partitions={}; projection={:?}",
exec.path,
exec.filenames.len(),
exec.projection
),
PhysicalPlan::ParquetScan(exec) => write!(
f,
"ParquetScan: {:?}, partitions={}; projection={:?}",
exec.path,
exec.filenames.len(),
exec.projection
),
PhysicalPlan::HashAggregate(exec) => {
write!(
f,
"HashAggregate: mode={:?}, groupExpr={:?}, aggrExpr={:?}",
exec.mode, exec.group_expr, exec.aggr_expr
)?;
exec.child.fmt_with_indent(f, indent + 1)
}
PhysicalPlan::ShuffleExchange(exec) => {
write!(f, "Shuffle: {:?}", exec.as_ref().output_partitioning())?;
exec.as_ref().child.fmt_with_indent(f, indent + 1)
}
PhysicalPlan::ShuffleReader(exec) => {
write!(f, "ShuffleReader: shuffle_id={:?}", exec.shuffle_id)
}
PhysicalPlan::Projection(_exec) => write!(f, "Projection:"),
PhysicalPlan::Filter(_exec) => write!(f, "Filter:"),
_ => write!(f, "???"),
}
}
}
impl fmt::Debug for PhysicalPlan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.fmt_with_indent(f, 0)
}
}
#[derive(Debug, Clone)]
pub enum Distribution {
UnspecifiedDistribution,
SinglePartition,
BroadcastDistribution,
ClusteredDistribution {
required_num_partitions: usize,
clustering: Vec<Expr>,
},
HashClusteredDistribution {
required_num_partitions: usize,
clustering: Vec<Expr>,
},
OrderedDistribution(Vec<SortOrder>),
}
#[derive(Debug, Clone)]
pub enum JoinType {
Inner,
}
#[derive(Debug, Clone)]
pub enum BuildSide {
BuildLeft,
BuildRight,
}
#[derive(Debug, Clone)]
pub enum SortDirection {
Ascending,
Descending,
}
/// Aggregate operator modes.
#[derive(Debug, Clone)]
pub enum AggregateMode {
/// Partial aggregation that can run in parallel per partition
Partial,
/// Perform final aggregation on results of partial aggregation. For example, this would
/// produce the SUM of SUMs, or the SUMs of COUNTs.
Final,
/// Perform complete aggregation in one pass. This is used when there is only a single
/// partition to operate on.
Complete,
}
#[derive(Debug, Clone)]
pub struct SortOrder {
child: Arc<Expr>,
direction: SortDirection,
null_ordering: NullOrdering,
}
#[derive(Debug, Clone)]
pub enum NullOrdering {
NullsFirst,
NullsLast,
}
/// Partitioning schemes supported by operators.
#[derive(Debug, Clone)]
pub enum Partitioning {
UnknownPartitioning(usize),
HashPartitioning(usize, Vec<Arc<Expr>>),
}
impl Partitioning {
pub fn partition_count(&self) -> usize {
use Partitioning::*;
match self {
UnknownPartitioning(n) => *n,
HashPartitioning(n, _) => *n,
}
}
}
/// Unique identifier for the output shuffle partition of an operator.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ShuffleId {
pub(crate) job_uuid: Uuid,
pub(crate) stage_id: usize,
pub(crate) partition_id: usize,
}
impl ShuffleId {
pub fn new(job_uuid: Uuid, stage_id: usize, partition_id: usize) -> Self {
Self {
job_uuid,
stage_id,
partition_id,
}
}
}
pub struct ShuffleLocation {}
/// Translate a logical expression into a physical expression that can be evaluated against
/// input data.
pub fn compile_expression(expr: &Expr, input: &Schema) -> Result<Arc<dyn Expression>> {
match expr {
Expr::Alias(expr, name) => Ok(alias(compile_expression(expr, input)?, name)),
Expr::Column(name) => Ok(col(name)),
Expr::Literal(value) => Ok(lit(value.to_owned())),
Expr::BinaryExpr { left, op, right } => {
let l = compile_expression(left, input)?;
let r = compile_expression(right, input)?;
match op {
Operator::Plus => Ok(add(l, r)),
Operator::Minus => Ok(subtract(l, r)),
Operator::Multiply => Ok(mult(l, r)),
Operator::Divide => Ok(div(l, r)),
Operator::Lt
| Operator::LtEq
| Operator::Gt
| Operator::GtEq
| Operator::Eq
| Operator::NotEq => Ok(compare(l, op, r)),
other => Err(ballista_error(&format!(
"Unsupported binary operator in compile_expression {:?}",
other
))),
}
}
other => Err(ballista_error(&format!(
"Unsupported expression in compile_expression {:?}",
other
))),
}
}
/// Translate one or more logical expressions into physical expressions that can be evaluated
/// against input data.
pub fn compile_expressions(expr: &[Expr], input: &Schema) -> Result<Vec<Arc<dyn Expression>>> {
expr.iter().map(|e| compile_expression(e, input)).collect()
}
/// Translate a logical aggregate expression into a physical expression that can be evaluated
/// against input data.
pub fn compile_aggregate_expression(
expr: &Expr,
input_schema: &Schema,
) -> Result<Arc<dyn AggregateExpr>> {
match expr {
Expr::Alias(expr, alias) => Ok(aliased_aggr(
compile_aggregate_expression(expr, input_schema)?,
alias,
)),
Expr::AggregateFunction { name, args, .. } => match name.to_lowercase().as_ref() {
"avg" => Ok(avg(compile_expression(&args[0], input_schema)?)),
"count" => Ok(count(compile_expression(&args[0], input_schema)?)),
"max" => Ok(max(compile_expression(&args[0], input_schema)?)),
"min" => Ok(min(compile_expression(&args[0], input_schema)?)),
"sum" => Ok(sum(compile_expression(&args[0], input_schema)?)),
other => Err(ballista_error(&format!(
"Unsupported aggregate function in compile_aggregate_expression '{}'",
other
))),
},
other => Err(ballista_error(&format!(
"Unsupported aggregate expression in compile_aggregate_expression {:?}",
other
))),
}
}
/// Translate one or more logical aggregate expressions into physical expressions that can be evaluated
/// against input data.
pub fn | (
expr: &[Expr],
input: &Schema,
) -> Result<Vec<Arc<dyn AggregateExpr>>> {
expr.iter()
.map(|e| compile_aggregate_expression(e, input))
.collect()
}
| compile_aggregate_expressions | identifier_name |
transaction.go | package transaction
import (
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/util"
)
const (
// MaxScriptLength is the limit for transaction's script length.
MaxScriptLength = math.MaxUint16
// MaxTransactionSize is the upper limit size in bytes that a transaction can reach. It is
// set to be 102400.
MaxTransactionSize = 102400
// MaxAttributes is maximum number of attributes including signers that can be contained
// within a transaction. It is set to be 16.
MaxAttributes = 16
// DummyVersion represents reserved transaction version for trimmed transactions.
DummyVersion = 255
)
// ErrInvalidWitnessNum returns when the number of witnesses does not match signers.
var ErrInvalidWitnessNum = errors.New("number of signers doesn't match witnesses")
// Transaction is a process recorded in the NEO blockchain.
type Transaction struct {
// The trading version which is currently 0.
Version uint8
// Random number to avoid hash collision.
Nonce uint32
// Fee to be burned.
SystemFee int64
// Fee to be distributed to consensus nodes.
NetworkFee int64
// Maximum blockchain height exceeding which
// transaction should fail verification.
ValidUntilBlock uint32
// Code to run in NeoVM for this transaction.
Script []byte
// Transaction attributes.
Attributes []Attribute
// Transaction signers list (starts with Sender).
Signers []Signer
// The scripts that comes with this transaction.
// Scripts exist out of the verification script
// and invocation script.
Scripts []Witness
// size is transaction's serialized size.
size int
// Hash of the transaction (double SHA256).
hash util.Uint256
// Whether hash is correct.
hashed bool
// Trimmed indicates this is a transaction from trimmed
// data.
Trimmed bool
}
// NewTrimmedTX returns a trimmed transaction with only its hash
// and Trimmed to true.
func NewTrimmedTX(hash util.Uint256) *Transaction {
return &Transaction{
hash: hash,
hashed: true,
Trimmed: true,
}
}
// New returns a new transaction to execute given script and pay given system
// fee.
func New(script []byte, gas int64) *Transaction {
return &Transaction{
Version: 0,
Nonce: rand.Uint32(),
Script: script,
SystemFee: gas,
Attributes: []Attribute{},
Signers: []Signer{},
Scripts: []Witness{},
}
}
// Hash returns the hash of the transaction.
func (t *Transaction) Hash() util.Uint256 {
if !t.hashed {
if t.createHash() != nil {
panic("failed to compute hash!")
}
}
return t.hash
}
// HasAttribute returns true iff t has an attribute of type typ.
func (t *Transaction) HasAttribute(typ AttrType) bool {
for i := range t.Attributes {
if t.Attributes[i].Type == typ {
return true
}
}
return false
}
// GetAttributes returns the list of transaction's attributes of the given type.
// Returns nil in case if attributes not found.
func (t *Transaction) GetAttributes(typ AttrType) []Attribute {
var result []Attribute
for _, attr := range t.Attributes {
if attr.Type == typ {
result = append(result, attr)
}
}
return result
}
// decodeHashableFields decodes the fields that are used for signing the
// transaction, which are all fields except the scripts.
func (t *Transaction) decodeHashableFields(br *io.BinReader, buf []byte) {
var start, end int
if buf != nil {
start = len(buf) - br.Len()
}
t.Version = uint8(br.ReadB())
t.Nonce = br.ReadU32LE()
t.SystemFee = int64(br.ReadU64LE())
t.NetworkFee = int64(br.ReadU64LE())
t.ValidUntilBlock = br.ReadU32LE()
nsigners := br.ReadVarUint()
if br.Err != nil {
return
}
if nsigners > MaxAttributes {
br.Err = errors.New("too many signers")
return
} else if nsigners == 0 {
br.Err = errors.New("missing signers")
return
}
t.Signers = make([]Signer, nsigners)
for i := 0; i < int(nsigners); i++ {
t.Signers[i].DecodeBinary(br)
}
nattrs := br.ReadVarUint()
if nattrs > MaxAttributes-nsigners {
br.Err = errors.New("too many attributes")
return
}
t.Attributes = make([]Attribute, nattrs)
for i := 0; i < int(nattrs); i++ {
t.Attributes[i].DecodeBinary(br)
}
t.Script = br.ReadVarBytes(MaxScriptLength)
if br.Err == nil {
br.Err = t.isValid()
}
if buf != nil {
end = len(buf) - br.Len()
t.hash = hash.Sha256(buf[start:end])
t.hashed = true
}
}
func (t *Transaction) decodeBinaryNoSize(br *io.BinReader, buf []byte) {
t.decodeHashableFields(br, buf)
if br.Err != nil {
return
}
nscripts := br.ReadVarUint()
if nscripts > MaxAttributes {
br.Err = errors.New("too many witnesses")
return
} else if int(nscripts) != len(t.Signers) {
br.Err = fmt.Errorf("%w: %d vs %d", ErrInvalidWitnessNum, len(t.Signers), len(t.Scripts))
return
}
t.Scripts = make([]Witness, nscripts)
for i := 0; i < int(nscripts); i++ {
t.Scripts[i].DecodeBinary(br)
}
// Create the hash of the transaction at decode, so we dont need
// to do it anymore.
if br.Err == nil && buf == nil {
br.Err = t.createHash()
}
}
// DecodeBinary implements Serializable interface.
func (t *Transaction) DecodeBinary(br *io.BinReader) {
t.decodeBinaryNoSize(br, nil)
if br.Err == nil {
_ = t.Size()
}
}
// EncodeBinary implements Serializable interface.
func (t *Transaction) EncodeBinary(bw *io.BinWriter) {
t.encodeHashableFields(bw)
bw.WriteVarUint(uint64(len(t.Scripts)))
for i := range t.Scripts {
t.Scripts[i].EncodeBinary(bw)
}
}
// encodeHashableFields encodes the fields that are not used for
// signing the transaction, which are all fields except the scripts.
func (t *Transaction) encodeHashableFields(bw *io.BinWriter) {
if len(t.Script) == 0 {
bw.Err = errors.New("transaction has no script")
return
}
bw.WriteB(byte(t.Version))
bw.WriteU32LE(t.Nonce)
bw.WriteU64LE(uint64(t.SystemFee))
bw.WriteU64LE(uint64(t.NetworkFee))
bw.WriteU32LE(t.ValidUntilBlock)
bw.WriteVarUint(uint64(len(t.Signers)))
for i := range t.Signers {
t.Signers[i].EncodeBinary(bw)
}
bw.WriteVarUint(uint64(len(t.Attributes)))
for i := range t.Attributes {
t.Attributes[i].EncodeBinary(bw)
}
bw.WriteVarBytes(t.Script)
}
// EncodeHashableFields returns serialized transaction's fields which are hashed.
func (t *Transaction) EncodeHashableFields() ([]byte, error) {
bw := io.NewBufBinWriter()
t.encodeHashableFields(bw.BinWriter)
if bw.Err != nil {
return nil, bw.Err
}
return bw.Bytes(), nil
}
// createHash creates the hash of the transaction.
func (t *Transaction) createHash() error {
shaHash := sha256.New()
bw := io.NewBinWriterFromIO(shaHash)
t.encodeHashableFields(bw)
if bw.Err != nil {
return bw.Err
}
shaHash.Sum(t.hash[:0])
t.hashed = true
return nil
}
// DecodeHashableFields decodes a part of transaction which should be hashed.
func (t *Transaction) DecodeHashableFields(buf []byte) error {
r := io.NewBinReaderFromBuf(buf)
t.decodeHashableFields(r, buf)
if r.Err != nil {
return r.Err
}
// Ensure all the data was read.
if r.Len() != 0 {
return errors.New("additional data after the signed part")
}
t.Scripts = make([]Witness, 0)
return nil
}
// Bytes converts the transaction to []byte.
func (t *Transaction) Bytes() []byte {
buf := io.NewBufBinWriter()
t.EncodeBinary(buf.BinWriter)
if buf.Err != nil {
return nil
}
return buf.Bytes()
}
// NewTransactionFromBytes decodes byte array into *Transaction.
func NewTransactionFromBytes(b []byte) (*Transaction, error) {
tx := &Transaction{}
r := io.NewBinReaderFromBuf(b)
tx.decodeBinaryNoSize(r, b)
if r.Err != nil {
return nil, r.Err
}
if r.Len() != 0 {
return nil, errors.New("additional data after the transaction")
}
tx.size = len(b)
return tx, nil
}
// FeePerByte returns NetworkFee of the transaction divided by
// its size.
func (t *Transaction) FeePerByte() int64 {
return t.NetworkFee / int64(t.Size())
}
// Size returns size of the serialized transaction.
func (t *Transaction) Size() int {
if t.size == 0 {
t.size = io.GetVarSize(t)
}
return t.size
}
// Sender returns the sender of the transaction which is always on the first place
// in the transaction's signers list.
func (t *Transaction) Sender() util.Uint160 {
if len(t.Signers) == 0 {
panic("transaction does not have signers")
}
return t.Signers[0].Account
}
// transactionJSON is a wrapper for Transaction and
// used for correct marhalling of transaction.Data.
type transactionJSON struct {
TxID util.Uint256 `json:"hash"`
Size int `json:"size"`
Version uint8 `json:"version"`
Nonce uint32 `json:"nonce"`
Sender string `json:"sender"`
SystemFee int64 `json:"sysfee,string"`
NetworkFee int64 `json:"netfee,string"`
ValidUntilBlock uint32 `json:"validuntilblock"`
Attributes []Attribute `json:"attributes"`
Signers []Signer `json:"signers"`
Script []byte `json:"script"`
Scripts []Witness `json:"witnesses"`
}
// MarshalJSON implements json.Marshaler interface.
func (t *Transaction) MarshalJSON() ([]byte, error) {
tx := transactionJSON{
TxID: t.Hash(),
Size: t.Size(),
Version: t.Version,
Nonce: t.Nonce,
Sender: address.Uint160ToString(t.Sender()),
ValidUntilBlock: t.ValidUntilBlock,
Attributes: t.Attributes,
Signers: t.Signers,
Script: t.Script,
Scripts: t.Scripts,
SystemFee: t.SystemFee,
NetworkFee: t.NetworkFee,
}
return json.Marshal(tx)
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (t *Transaction) UnmarshalJSON(data []byte) error {
tx := new(transactionJSON)
if err := json.Unmarshal(data, tx); err != nil {
return err
}
t.Version = tx.Version
t.Nonce = tx.Nonce
t.ValidUntilBlock = tx.ValidUntilBlock
t.Attributes = tx.Attributes
t.Signers = tx.Signers
t.Scripts = tx.Scripts
t.SystemFee = tx.SystemFee
t.NetworkFee = tx.NetworkFee
t.Script = tx.Script
if t.Hash() != tx.TxID {
return errors.New("txid doesn't match transaction hash")
}
if t.Size() != tx.Size {
return errors.New("'size' doesn't match transaction size")
}
return t.isValid()
}
// Various errors for transaction validation.
var (
ErrInvalidVersion = errors.New("only version 0 is supported")
ErrNegativeSystemFee = errors.New("negative system fee")
ErrNegativeNetworkFee = errors.New("negative network fee")
ErrTooBigFees = errors.New("too big fees: int64 overflow")
ErrEmptySigners = errors.New("signers array should contain sender")
ErrNonUniqueSigners = errors.New("transaction signers should be unique")
ErrInvalidAttribute = errors.New("invalid attribute")
ErrEmptyScript = errors.New("no script")
)
// isValid checks whether decoded/unmarshalled transaction has all fields valid.
func (t *Transaction) | () error {
if t.Version > 0 && t.Version != DummyVersion {
return ErrInvalidVersion
}
if t.SystemFee < 0 {
return ErrNegativeSystemFee
}
if t.NetworkFee < 0 {
return ErrNegativeNetworkFee
}
if t.NetworkFee+t.SystemFee < t.SystemFee {
return ErrTooBigFees
}
if len(t.Signers) == 0 {
return ErrEmptySigners
}
for i := 0; i < len(t.Signers); i++ {
for j := i + 1; j < len(t.Signers); j++ {
if t.Signers[i].Account.Equals(t.Signers[j].Account) {
return ErrNonUniqueSigners
}
}
}
attrs := map[AttrType]bool{}
for i := range t.Attributes {
typ := t.Attributes[i].Type
if !typ.allowMultiple() {
if attrs[typ] {
return fmt.Errorf("%w: multiple '%s' attributes", ErrInvalidAttribute, typ.String())
}
attrs[typ] = true
}
}
if len(t.Script) == 0 {
return ErrEmptyScript
}
return nil
}
// HasSigner returns true in case if hash is present in the list of signers.
func (t *Transaction) HasSigner(hash util.Uint160) bool {
for _, h := range t.Signers {
if h.Account.Equals(hash) {
return true
}
}
return false
}
| isValid | identifier_name |
transaction.go | package transaction
import (
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/util"
)
const (
// MaxScriptLength is the limit for transaction's script length.
MaxScriptLength = math.MaxUint16
// MaxTransactionSize is the upper limit size in bytes that a transaction can reach. It is
// set to be 102400.
MaxTransactionSize = 102400
// MaxAttributes is maximum number of attributes including signers that can be contained
// within a transaction. It is set to be 16.
MaxAttributes = 16
// DummyVersion represents reserved transaction version for trimmed transactions.
DummyVersion = 255
)
// ErrInvalidWitnessNum returns when the number of witnesses does not match signers.
var ErrInvalidWitnessNum = errors.New("number of signers doesn't match witnesses")
// Transaction is a process recorded in the NEO blockchain.
type Transaction struct {
// The trading version which is currently 0.
Version uint8
// Random number to avoid hash collision.
Nonce uint32
// Fee to be burned.
SystemFee int64
// Fee to be distributed to consensus nodes.
NetworkFee int64
// Maximum blockchain height exceeding which
// transaction should fail verification.
ValidUntilBlock uint32
// Code to run in NeoVM for this transaction.
Script []byte
// Transaction attributes.
Attributes []Attribute
// Transaction signers list (starts with Sender).
Signers []Signer
// The scripts that comes with this transaction.
// Scripts exist out of the verification script
// and invocation script.
Scripts []Witness
// size is transaction's serialized size.
size int
// Hash of the transaction (double SHA256).
hash util.Uint256
// Whether hash is correct.
hashed bool
// Trimmed indicates this is a transaction from trimmed
// data.
Trimmed bool
}
// NewTrimmedTX returns a trimmed transaction with only its hash
// and Trimmed to true.
func NewTrimmedTX(hash util.Uint256) *Transaction {
return &Transaction{
hash: hash,
hashed: true,
Trimmed: true,
}
}
// New returns a new transaction to execute given script and pay given system
// fee.
func New(script []byte, gas int64) *Transaction {
return &Transaction{
Version: 0,
Nonce: rand.Uint32(),
Script: script,
SystemFee: gas,
Attributes: []Attribute{},
Signers: []Signer{},
Scripts: []Witness{},
}
}
// Hash returns the hash of the transaction.
func (t *Transaction) Hash() util.Uint256 {
if !t.hashed {
if t.createHash() != nil {
panic("failed to compute hash!")
}
}
return t.hash
}
// HasAttribute returns true iff t has an attribute of type typ.
func (t *Transaction) HasAttribute(typ AttrType) bool {
for i := range t.Attributes {
if t.Attributes[i].Type == typ {
return true
}
}
return false
}
// GetAttributes returns the list of transaction's attributes of the given type.
// Returns nil in case if attributes not found.
func (t *Transaction) GetAttributes(typ AttrType) []Attribute {
var result []Attribute
for _, attr := range t.Attributes {
if attr.Type == typ {
result = append(result, attr)
}
}
return result
}
// decodeHashableFields decodes the fields that are used for signing the
// transaction, which are all fields except the scripts.
func (t *Transaction) decodeHashableFields(br *io.BinReader, buf []byte) {
var start, end int
if buf != nil {
start = len(buf) - br.Len()
}
t.Version = uint8(br.ReadB())
t.Nonce = br.ReadU32LE()
t.SystemFee = int64(br.ReadU64LE())
t.NetworkFee = int64(br.ReadU64LE())
t.ValidUntilBlock = br.ReadU32LE()
nsigners := br.ReadVarUint()
if br.Err != nil {
return
}
if nsigners > MaxAttributes {
br.Err = errors.New("too many signers")
return
} else if nsigners == 0 {
br.Err = errors.New("missing signers")
return
}
t.Signers = make([]Signer, nsigners)
for i := 0; i < int(nsigners); i++ {
t.Signers[i].DecodeBinary(br)
}
nattrs := br.ReadVarUint()
if nattrs > MaxAttributes-nsigners {
br.Err = errors.New("too many attributes")
return
}
t.Attributes = make([]Attribute, nattrs)
for i := 0; i < int(nattrs); i++ {
t.Attributes[i].DecodeBinary(br)
}
t.Script = br.ReadVarBytes(MaxScriptLength)
if br.Err == nil {
br.Err = t.isValid()
}
if buf != nil {
end = len(buf) - br.Len()
t.hash = hash.Sha256(buf[start:end])
t.hashed = true
}
}
func (t *Transaction) decodeBinaryNoSize(br *io.BinReader, buf []byte) {
t.decodeHashableFields(br, buf)
if br.Err != nil {
return
}
nscripts := br.ReadVarUint()
if nscripts > MaxAttributes {
br.Err = errors.New("too many witnesses")
return
} else if int(nscripts) != len(t.Signers) {
br.Err = fmt.Errorf("%w: %d vs %d", ErrInvalidWitnessNum, len(t.Signers), len(t.Scripts))
return
}
t.Scripts = make([]Witness, nscripts)
for i := 0; i < int(nscripts); i++ {
t.Scripts[i].DecodeBinary(br)
}
// Create the hash of the transaction at decode, so we dont need
// to do it anymore.
if br.Err == nil && buf == nil {
br.Err = t.createHash()
}
}
// DecodeBinary implements Serializable interface.
func (t *Transaction) DecodeBinary(br *io.BinReader) {
t.decodeBinaryNoSize(br, nil)
if br.Err == nil {
_ = t.Size()
}
}
// EncodeBinary implements Serializable interface.
func (t *Transaction) EncodeBinary(bw *io.BinWriter) {
t.encodeHashableFields(bw)
bw.WriteVarUint(uint64(len(t.Scripts)))
for i := range t.Scripts {
t.Scripts[i].EncodeBinary(bw)
}
}
// encodeHashableFields encodes the fields that are not used for
// signing the transaction, which are all fields except the scripts.
func (t *Transaction) encodeHashableFields(bw *io.BinWriter) {
if len(t.Script) == 0 {
bw.Err = errors.New("transaction has no script")
return
}
bw.WriteB(byte(t.Version))
bw.WriteU32LE(t.Nonce)
bw.WriteU64LE(uint64(t.SystemFee))
bw.WriteU64LE(uint64(t.NetworkFee))
bw.WriteU32LE(t.ValidUntilBlock)
bw.WriteVarUint(uint64(len(t.Signers)))
for i := range t.Signers {
t.Signers[i].EncodeBinary(bw)
}
bw.WriteVarUint(uint64(len(t.Attributes)))
for i := range t.Attributes {
t.Attributes[i].EncodeBinary(bw)
}
bw.WriteVarBytes(t.Script)
}
// EncodeHashableFields returns serialized transaction's fields which are hashed.
func (t *Transaction) EncodeHashableFields() ([]byte, error) {
bw := io.NewBufBinWriter()
t.encodeHashableFields(bw.BinWriter)
if bw.Err != nil {
return nil, bw.Err
}
return bw.Bytes(), nil
}
// createHash creates the hash of the transaction.
func (t *Transaction) createHash() error {
shaHash := sha256.New()
bw := io.NewBinWriterFromIO(shaHash)
t.encodeHashableFields(bw)
if bw.Err != nil {
return bw.Err
}
shaHash.Sum(t.hash[:0])
t.hashed = true
return nil
}
// DecodeHashableFields decodes a part of transaction which should be hashed.
func (t *Transaction) DecodeHashableFields(buf []byte) error {
r := io.NewBinReaderFromBuf(buf)
t.decodeHashableFields(r, buf)
if r.Err != nil {
return r.Err
}
// Ensure all the data was read.
if r.Len() != 0 {
return errors.New("additional data after the signed part")
}
t.Scripts = make([]Witness, 0)
return nil
}
// Bytes converts the transaction to []byte.
func (t *Transaction) Bytes() []byte {
buf := io.NewBufBinWriter()
t.EncodeBinary(buf.BinWriter)
if buf.Err != nil {
return nil
}
return buf.Bytes()
}
// NewTransactionFromBytes decodes byte array into *Transaction.
func NewTransactionFromBytes(b []byte) (*Transaction, error) {
tx := &Transaction{}
r := io.NewBinReaderFromBuf(b)
tx.decodeBinaryNoSize(r, b)
if r.Err != nil {
return nil, r.Err
}
if r.Len() != 0 {
return nil, errors.New("additional data after the transaction")
}
tx.size = len(b)
return tx, nil
}
// FeePerByte returns NetworkFee of the transaction divided by
// its size.
func (t *Transaction) FeePerByte() int64 {
return t.NetworkFee / int64(t.Size())
}
// Size returns size of the serialized transaction.
func (t *Transaction) Size() int {
if t.size == 0 {
t.size = io.GetVarSize(t)
}
return t.size
}
// Sender returns the sender of the transaction which is always on the first place
// in the transaction's signers list.
func (t *Transaction) Sender() util.Uint160 {
if len(t.Signers) == 0 {
panic("transaction does not have signers")
}
return t.Signers[0].Account
}
// transactionJSON is a wrapper for Transaction and
// used for correct marhalling of transaction.Data.
type transactionJSON struct {
TxID util.Uint256 `json:"hash"`
Size int `json:"size"`
Version uint8 `json:"version"`
Nonce uint32 `json:"nonce"`
Sender string `json:"sender"`
SystemFee int64 `json:"sysfee,string"`
NetworkFee int64 `json:"netfee,string"`
ValidUntilBlock uint32 `json:"validuntilblock"`
Attributes []Attribute `json:"attributes"`
Signers []Signer `json:"signers"`
Script []byte `json:"script"`
Scripts []Witness `json:"witnesses"`
}
// MarshalJSON implements json.Marshaler interface.
func (t *Transaction) MarshalJSON() ([]byte, error) {
tx := transactionJSON{
TxID: t.Hash(),
Size: t.Size(),
Version: t.Version,
Nonce: t.Nonce,
Sender: address.Uint160ToString(t.Sender()),
ValidUntilBlock: t.ValidUntilBlock,
Attributes: t.Attributes,
Signers: t.Signers,
Script: t.Script,
Scripts: t.Scripts,
SystemFee: t.SystemFee,
NetworkFee: t.NetworkFee,
}
return json.Marshal(tx)
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (t *Transaction) UnmarshalJSON(data []byte) error {
tx := new(transactionJSON)
if err := json.Unmarshal(data, tx); err != nil {
return err
}
t.Version = tx.Version
t.Nonce = tx.Nonce
t.ValidUntilBlock = tx.ValidUntilBlock
t.Attributes = tx.Attributes
t.Signers = tx.Signers
t.Scripts = tx.Scripts
t.SystemFee = tx.SystemFee
t.NetworkFee = tx.NetworkFee
t.Script = tx.Script
if t.Hash() != tx.TxID {
return errors.New("txid doesn't match transaction hash")
}
if t.Size() != tx.Size {
return errors.New("'size' doesn't match transaction size")
}
return t.isValid()
}
// Various errors for transaction validation.
var (
ErrInvalidVersion = errors.New("only version 0 is supported")
ErrNegativeSystemFee = errors.New("negative system fee")
ErrNegativeNetworkFee = errors.New("negative network fee")
ErrTooBigFees = errors.New("too big fees: int64 overflow")
ErrEmptySigners = errors.New("signers array should contain sender")
ErrNonUniqueSigners = errors.New("transaction signers should be unique")
ErrInvalidAttribute = errors.New("invalid attribute")
ErrEmptyScript = errors.New("no script")
)
// isValid checks whether decoded/unmarshalled transaction has all fields valid.
func (t *Transaction) isValid() error {
if t.Version > 0 && t.Version != DummyVersion {
return ErrInvalidVersion
}
if t.SystemFee < 0 {
return ErrNegativeSystemFee
}
if t.NetworkFee < 0 {
return ErrNegativeNetworkFee
}
if t.NetworkFee+t.SystemFee < t.SystemFee {
return ErrTooBigFees
}
if len(t.Signers) == 0 {
return ErrEmptySigners
}
for i := 0; i < len(t.Signers); i++ {
for j := i + 1; j < len(t.Signers); j++ |
}
attrs := map[AttrType]bool{}
for i := range t.Attributes {
typ := t.Attributes[i].Type
if !typ.allowMultiple() {
if attrs[typ] {
return fmt.Errorf("%w: multiple '%s' attributes", ErrInvalidAttribute, typ.String())
}
attrs[typ] = true
}
}
if len(t.Script) == 0 {
return ErrEmptyScript
}
return nil
}
// HasSigner returns true in case if hash is present in the list of signers.
func (t *Transaction) HasSigner(hash util.Uint160) bool {
for _, h := range t.Signers {
if h.Account.Equals(hash) {
return true
}
}
return false
}
| {
if t.Signers[i].Account.Equals(t.Signers[j].Account) {
return ErrNonUniqueSigners
}
} | conditional_block |
transaction.go | package transaction
import (
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/util"
)
const (
// MaxScriptLength is the limit for transaction's script length.
MaxScriptLength = math.MaxUint16
// MaxTransactionSize is the upper limit size in bytes that a transaction can reach. It is
// set to be 102400.
MaxTransactionSize = 102400
// MaxAttributes is maximum number of attributes including signers that can be contained
// within a transaction. It is set to be 16.
MaxAttributes = 16
// DummyVersion represents reserved transaction version for trimmed transactions.
DummyVersion = 255
)
// ErrInvalidWitnessNum returns when the number of witnesses does not match signers.
var ErrInvalidWitnessNum = errors.New("number of signers doesn't match witnesses")
// Transaction is a process recorded in the NEO blockchain.
type Transaction struct { | Version uint8
// Random number to avoid hash collision.
Nonce uint32
// Fee to be burned.
SystemFee int64
// Fee to be distributed to consensus nodes.
NetworkFee int64
// Maximum blockchain height exceeding which
// transaction should fail verification.
ValidUntilBlock uint32
// Code to run in NeoVM for this transaction.
Script []byte
// Transaction attributes.
Attributes []Attribute
// Transaction signers list (starts with Sender).
Signers []Signer
// The scripts that comes with this transaction.
// Scripts exist out of the verification script
// and invocation script.
Scripts []Witness
// size is transaction's serialized size.
size int
// Hash of the transaction (double SHA256).
hash util.Uint256
// Whether hash is correct.
hashed bool
// Trimmed indicates this is a transaction from trimmed
// data.
Trimmed bool
}
// NewTrimmedTX returns a trimmed transaction with only its hash
// and Trimmed to true.
func NewTrimmedTX(hash util.Uint256) *Transaction {
return &Transaction{
hash: hash,
hashed: true,
Trimmed: true,
}
}
// New returns a new transaction to execute given script and pay given system
// fee.
func New(script []byte, gas int64) *Transaction {
return &Transaction{
Version: 0,
Nonce: rand.Uint32(),
Script: script,
SystemFee: gas,
Attributes: []Attribute{},
Signers: []Signer{},
Scripts: []Witness{},
}
}
// Hash returns the hash of the transaction.
func (t *Transaction) Hash() util.Uint256 {
if !t.hashed {
if t.createHash() != nil {
panic("failed to compute hash!")
}
}
return t.hash
}
// HasAttribute returns true iff t has an attribute of type typ.
func (t *Transaction) HasAttribute(typ AttrType) bool {
for i := range t.Attributes {
if t.Attributes[i].Type == typ {
return true
}
}
return false
}
// GetAttributes returns the list of transaction's attributes of the given type.
// Returns nil in case if attributes not found.
func (t *Transaction) GetAttributes(typ AttrType) []Attribute {
var result []Attribute
for _, attr := range t.Attributes {
if attr.Type == typ {
result = append(result, attr)
}
}
return result
}
// decodeHashableFields decodes the fields that are used for signing the
// transaction, which are all fields except the scripts.
func (t *Transaction) decodeHashableFields(br *io.BinReader, buf []byte) {
var start, end int
if buf != nil {
start = len(buf) - br.Len()
}
t.Version = uint8(br.ReadB())
t.Nonce = br.ReadU32LE()
t.SystemFee = int64(br.ReadU64LE())
t.NetworkFee = int64(br.ReadU64LE())
t.ValidUntilBlock = br.ReadU32LE()
nsigners := br.ReadVarUint()
if br.Err != nil {
return
}
if nsigners > MaxAttributes {
br.Err = errors.New("too many signers")
return
} else if nsigners == 0 {
br.Err = errors.New("missing signers")
return
}
t.Signers = make([]Signer, nsigners)
for i := 0; i < int(nsigners); i++ {
t.Signers[i].DecodeBinary(br)
}
nattrs := br.ReadVarUint()
if nattrs > MaxAttributes-nsigners {
br.Err = errors.New("too many attributes")
return
}
t.Attributes = make([]Attribute, nattrs)
for i := 0; i < int(nattrs); i++ {
t.Attributes[i].DecodeBinary(br)
}
t.Script = br.ReadVarBytes(MaxScriptLength)
if br.Err == nil {
br.Err = t.isValid()
}
if buf != nil {
end = len(buf) - br.Len()
t.hash = hash.Sha256(buf[start:end])
t.hashed = true
}
}
func (t *Transaction) decodeBinaryNoSize(br *io.BinReader, buf []byte) {
t.decodeHashableFields(br, buf)
if br.Err != nil {
return
}
nscripts := br.ReadVarUint()
if nscripts > MaxAttributes {
br.Err = errors.New("too many witnesses")
return
} else if int(nscripts) != len(t.Signers) {
br.Err = fmt.Errorf("%w: %d vs %d", ErrInvalidWitnessNum, len(t.Signers), len(t.Scripts))
return
}
t.Scripts = make([]Witness, nscripts)
for i := 0; i < int(nscripts); i++ {
t.Scripts[i].DecodeBinary(br)
}
// Create the hash of the transaction at decode, so we dont need
// to do it anymore.
if br.Err == nil && buf == nil {
br.Err = t.createHash()
}
}
// DecodeBinary implements Serializable interface.
func (t *Transaction) DecodeBinary(br *io.BinReader) {
t.decodeBinaryNoSize(br, nil)
if br.Err == nil {
_ = t.Size()
}
}
// EncodeBinary implements Serializable interface.
func (t *Transaction) EncodeBinary(bw *io.BinWriter) {
t.encodeHashableFields(bw)
bw.WriteVarUint(uint64(len(t.Scripts)))
for i := range t.Scripts {
t.Scripts[i].EncodeBinary(bw)
}
}
// encodeHashableFields encodes the fields that are not used for
// signing the transaction, which are all fields except the scripts.
func (t *Transaction) encodeHashableFields(bw *io.BinWriter) {
if len(t.Script) == 0 {
bw.Err = errors.New("transaction has no script")
return
}
bw.WriteB(byte(t.Version))
bw.WriteU32LE(t.Nonce)
bw.WriteU64LE(uint64(t.SystemFee))
bw.WriteU64LE(uint64(t.NetworkFee))
bw.WriteU32LE(t.ValidUntilBlock)
bw.WriteVarUint(uint64(len(t.Signers)))
for i := range t.Signers {
t.Signers[i].EncodeBinary(bw)
}
bw.WriteVarUint(uint64(len(t.Attributes)))
for i := range t.Attributes {
t.Attributes[i].EncodeBinary(bw)
}
bw.WriteVarBytes(t.Script)
}
// EncodeHashableFields returns serialized transaction's fields which are hashed.
func (t *Transaction) EncodeHashableFields() ([]byte, error) {
bw := io.NewBufBinWriter()
t.encodeHashableFields(bw.BinWriter)
if bw.Err != nil {
return nil, bw.Err
}
return bw.Bytes(), nil
}
// createHash creates the hash of the transaction.
func (t *Transaction) createHash() error {
shaHash := sha256.New()
bw := io.NewBinWriterFromIO(shaHash)
t.encodeHashableFields(bw)
if bw.Err != nil {
return bw.Err
}
shaHash.Sum(t.hash[:0])
t.hashed = true
return nil
}
// DecodeHashableFields decodes a part of transaction which should be hashed.
func (t *Transaction) DecodeHashableFields(buf []byte) error {
r := io.NewBinReaderFromBuf(buf)
t.decodeHashableFields(r, buf)
if r.Err != nil {
return r.Err
}
// Ensure all the data was read.
if r.Len() != 0 {
return errors.New("additional data after the signed part")
}
t.Scripts = make([]Witness, 0)
return nil
}
// Bytes converts the transaction to []byte.
func (t *Transaction) Bytes() []byte {
buf := io.NewBufBinWriter()
t.EncodeBinary(buf.BinWriter)
if buf.Err != nil {
return nil
}
return buf.Bytes()
}
// NewTransactionFromBytes decodes byte array into *Transaction.
func NewTransactionFromBytes(b []byte) (*Transaction, error) {
tx := &Transaction{}
r := io.NewBinReaderFromBuf(b)
tx.decodeBinaryNoSize(r, b)
if r.Err != nil {
return nil, r.Err
}
if r.Len() != 0 {
return nil, errors.New("additional data after the transaction")
}
tx.size = len(b)
return tx, nil
}
// FeePerByte returns NetworkFee of the transaction divided by
// its size.
func (t *Transaction) FeePerByte() int64 {
return t.NetworkFee / int64(t.Size())
}
// Size returns size of the serialized transaction.
func (t *Transaction) Size() int {
if t.size == 0 {
t.size = io.GetVarSize(t)
}
return t.size
}
// Sender returns the sender of the transaction which is always on the first place
// in the transaction's signers list.
func (t *Transaction) Sender() util.Uint160 {
if len(t.Signers) == 0 {
panic("transaction does not have signers")
}
return t.Signers[0].Account
}
// transactionJSON is a wrapper for Transaction and
// used for correct marhalling of transaction.Data.
type transactionJSON struct {
TxID util.Uint256 `json:"hash"`
Size int `json:"size"`
Version uint8 `json:"version"`
Nonce uint32 `json:"nonce"`
Sender string `json:"sender"`
SystemFee int64 `json:"sysfee,string"`
NetworkFee int64 `json:"netfee,string"`
ValidUntilBlock uint32 `json:"validuntilblock"`
Attributes []Attribute `json:"attributes"`
Signers []Signer `json:"signers"`
Script []byte `json:"script"`
Scripts []Witness `json:"witnesses"`
}
// MarshalJSON implements json.Marshaler interface.
func (t *Transaction) MarshalJSON() ([]byte, error) {
tx := transactionJSON{
TxID: t.Hash(),
Size: t.Size(),
Version: t.Version,
Nonce: t.Nonce,
Sender: address.Uint160ToString(t.Sender()),
ValidUntilBlock: t.ValidUntilBlock,
Attributes: t.Attributes,
Signers: t.Signers,
Script: t.Script,
Scripts: t.Scripts,
SystemFee: t.SystemFee,
NetworkFee: t.NetworkFee,
}
return json.Marshal(tx)
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (t *Transaction) UnmarshalJSON(data []byte) error {
tx := new(transactionJSON)
if err := json.Unmarshal(data, tx); err != nil {
return err
}
t.Version = tx.Version
t.Nonce = tx.Nonce
t.ValidUntilBlock = tx.ValidUntilBlock
t.Attributes = tx.Attributes
t.Signers = tx.Signers
t.Scripts = tx.Scripts
t.SystemFee = tx.SystemFee
t.NetworkFee = tx.NetworkFee
t.Script = tx.Script
if t.Hash() != tx.TxID {
return errors.New("txid doesn't match transaction hash")
}
if t.Size() != tx.Size {
return errors.New("'size' doesn't match transaction size")
}
return t.isValid()
}
// Various errors for transaction validation.
var (
ErrInvalidVersion = errors.New("only version 0 is supported")
ErrNegativeSystemFee = errors.New("negative system fee")
ErrNegativeNetworkFee = errors.New("negative network fee")
ErrTooBigFees = errors.New("too big fees: int64 overflow")
ErrEmptySigners = errors.New("signers array should contain sender")
ErrNonUniqueSigners = errors.New("transaction signers should be unique")
ErrInvalidAttribute = errors.New("invalid attribute")
ErrEmptyScript = errors.New("no script")
)
// isValid checks whether decoded/unmarshalled transaction has all fields valid.
func (t *Transaction) isValid() error {
if t.Version > 0 && t.Version != DummyVersion {
return ErrInvalidVersion
}
if t.SystemFee < 0 {
return ErrNegativeSystemFee
}
if t.NetworkFee < 0 {
return ErrNegativeNetworkFee
}
if t.NetworkFee+t.SystemFee < t.SystemFee {
return ErrTooBigFees
}
if len(t.Signers) == 0 {
return ErrEmptySigners
}
for i := 0; i < len(t.Signers); i++ {
for j := i + 1; j < len(t.Signers); j++ {
if t.Signers[i].Account.Equals(t.Signers[j].Account) {
return ErrNonUniqueSigners
}
}
}
attrs := map[AttrType]bool{}
for i := range t.Attributes {
typ := t.Attributes[i].Type
if !typ.allowMultiple() {
if attrs[typ] {
return fmt.Errorf("%w: multiple '%s' attributes", ErrInvalidAttribute, typ.String())
}
attrs[typ] = true
}
}
if len(t.Script) == 0 {
return ErrEmptyScript
}
return nil
}
// HasSigner returns true in case if hash is present in the list of signers.
func (t *Transaction) HasSigner(hash util.Uint160) bool {
for _, h := range t.Signers {
if h.Account.Equals(hash) {
return true
}
}
return false
} | // The trading version which is currently 0. | random_line_split |
transaction.go | package transaction
import (
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/util"
)
const (
// MaxScriptLength is the limit for transaction's script length.
MaxScriptLength = math.MaxUint16
// MaxTransactionSize is the upper limit size in bytes that a transaction can reach. It is
// set to be 102400.
MaxTransactionSize = 102400
// MaxAttributes is maximum number of attributes including signers that can be contained
// within a transaction. It is set to be 16.
MaxAttributes = 16
// DummyVersion represents reserved transaction version for trimmed transactions.
DummyVersion = 255
)
// ErrInvalidWitnessNum returns when the number of witnesses does not match signers.
var ErrInvalidWitnessNum = errors.New("number of signers doesn't match witnesses")
// Transaction is a process recorded in the NEO blockchain.
type Transaction struct {
// The trading version which is currently 0.
Version uint8
// Random number to avoid hash collision.
Nonce uint32
// Fee to be burned.
SystemFee int64
// Fee to be distributed to consensus nodes.
NetworkFee int64
// Maximum blockchain height exceeding which
// transaction should fail verification.
ValidUntilBlock uint32
// Code to run in NeoVM for this transaction.
Script []byte
// Transaction attributes.
Attributes []Attribute
// Transaction signers list (starts with Sender).
Signers []Signer
// The scripts that comes with this transaction.
// Scripts exist out of the verification script
// and invocation script.
Scripts []Witness
// size is transaction's serialized size.
size int
// Hash of the transaction (double SHA256).
hash util.Uint256
// Whether hash is correct.
hashed bool
// Trimmed indicates this is a transaction from trimmed
// data.
Trimmed bool
}
// NewTrimmedTX returns a trimmed transaction with only its hash
// and Trimmed to true.
func NewTrimmedTX(hash util.Uint256) *Transaction {
return &Transaction{
hash: hash,
hashed: true,
Trimmed: true,
}
}
// New returns a new transaction to execute given script and pay given system
// fee.
func New(script []byte, gas int64) *Transaction {
return &Transaction{
Version: 0,
Nonce: rand.Uint32(),
Script: script,
SystemFee: gas,
Attributes: []Attribute{},
Signers: []Signer{},
Scripts: []Witness{},
}
}
// Hash returns the hash of the transaction.
func (t *Transaction) Hash() util.Uint256 {
if !t.hashed {
if t.createHash() != nil {
panic("failed to compute hash!")
}
}
return t.hash
}
// HasAttribute returns true iff t has an attribute of type typ.
func (t *Transaction) HasAttribute(typ AttrType) bool |
// GetAttributes returns the list of transaction's attributes of the given type.
// Returns nil in case if attributes not found.
func (t *Transaction) GetAttributes(typ AttrType) []Attribute {
var result []Attribute
for _, attr := range t.Attributes {
if attr.Type == typ {
result = append(result, attr)
}
}
return result
}
// decodeHashableFields decodes the fields that are used for signing the
// transaction, which are all fields except the scripts.
func (t *Transaction) decodeHashableFields(br *io.BinReader, buf []byte) {
var start, end int
if buf != nil {
start = len(buf) - br.Len()
}
t.Version = uint8(br.ReadB())
t.Nonce = br.ReadU32LE()
t.SystemFee = int64(br.ReadU64LE())
t.NetworkFee = int64(br.ReadU64LE())
t.ValidUntilBlock = br.ReadU32LE()
nsigners := br.ReadVarUint()
if br.Err != nil {
return
}
if nsigners > MaxAttributes {
br.Err = errors.New("too many signers")
return
} else if nsigners == 0 {
br.Err = errors.New("missing signers")
return
}
t.Signers = make([]Signer, nsigners)
for i := 0; i < int(nsigners); i++ {
t.Signers[i].DecodeBinary(br)
}
nattrs := br.ReadVarUint()
if nattrs > MaxAttributes-nsigners {
br.Err = errors.New("too many attributes")
return
}
t.Attributes = make([]Attribute, nattrs)
for i := 0; i < int(nattrs); i++ {
t.Attributes[i].DecodeBinary(br)
}
t.Script = br.ReadVarBytes(MaxScriptLength)
if br.Err == nil {
br.Err = t.isValid()
}
if buf != nil {
end = len(buf) - br.Len()
t.hash = hash.Sha256(buf[start:end])
t.hashed = true
}
}
func (t *Transaction) decodeBinaryNoSize(br *io.BinReader, buf []byte) {
t.decodeHashableFields(br, buf)
if br.Err != nil {
return
}
nscripts := br.ReadVarUint()
if nscripts > MaxAttributes {
br.Err = errors.New("too many witnesses")
return
} else if int(nscripts) != len(t.Signers) {
br.Err = fmt.Errorf("%w: %d vs %d", ErrInvalidWitnessNum, len(t.Signers), len(t.Scripts))
return
}
t.Scripts = make([]Witness, nscripts)
for i := 0; i < int(nscripts); i++ {
t.Scripts[i].DecodeBinary(br)
}
// Create the hash of the transaction at decode, so we dont need
// to do it anymore.
if br.Err == nil && buf == nil {
br.Err = t.createHash()
}
}
// DecodeBinary implements Serializable interface.
func (t *Transaction) DecodeBinary(br *io.BinReader) {
t.decodeBinaryNoSize(br, nil)
if br.Err == nil {
_ = t.Size()
}
}
// EncodeBinary implements Serializable interface.
func (t *Transaction) EncodeBinary(bw *io.BinWriter) {
t.encodeHashableFields(bw)
bw.WriteVarUint(uint64(len(t.Scripts)))
for i := range t.Scripts {
t.Scripts[i].EncodeBinary(bw)
}
}
// encodeHashableFields encodes the fields that are not used for
// signing the transaction, which are all fields except the scripts.
func (t *Transaction) encodeHashableFields(bw *io.BinWriter) {
if len(t.Script) == 0 {
bw.Err = errors.New("transaction has no script")
return
}
bw.WriteB(byte(t.Version))
bw.WriteU32LE(t.Nonce)
bw.WriteU64LE(uint64(t.SystemFee))
bw.WriteU64LE(uint64(t.NetworkFee))
bw.WriteU32LE(t.ValidUntilBlock)
bw.WriteVarUint(uint64(len(t.Signers)))
for i := range t.Signers {
t.Signers[i].EncodeBinary(bw)
}
bw.WriteVarUint(uint64(len(t.Attributes)))
for i := range t.Attributes {
t.Attributes[i].EncodeBinary(bw)
}
bw.WriteVarBytes(t.Script)
}
// EncodeHashableFields returns serialized transaction's fields which are hashed.
func (t *Transaction) EncodeHashableFields() ([]byte, error) {
bw := io.NewBufBinWriter()
t.encodeHashableFields(bw.BinWriter)
if bw.Err != nil {
return nil, bw.Err
}
return bw.Bytes(), nil
}
// createHash creates the hash of the transaction.
func (t *Transaction) createHash() error {
shaHash := sha256.New()
bw := io.NewBinWriterFromIO(shaHash)
t.encodeHashableFields(bw)
if bw.Err != nil {
return bw.Err
}
shaHash.Sum(t.hash[:0])
t.hashed = true
return nil
}
// DecodeHashableFields decodes a part of transaction which should be hashed.
func (t *Transaction) DecodeHashableFields(buf []byte) error {
r := io.NewBinReaderFromBuf(buf)
t.decodeHashableFields(r, buf)
if r.Err != nil {
return r.Err
}
// Ensure all the data was read.
if r.Len() != 0 {
return errors.New("additional data after the signed part")
}
t.Scripts = make([]Witness, 0)
return nil
}
// Bytes converts the transaction to []byte.
func (t *Transaction) Bytes() []byte {
buf := io.NewBufBinWriter()
t.EncodeBinary(buf.BinWriter)
if buf.Err != nil {
return nil
}
return buf.Bytes()
}
// NewTransactionFromBytes decodes byte array into *Transaction.
func NewTransactionFromBytes(b []byte) (*Transaction, error) {
tx := &Transaction{}
r := io.NewBinReaderFromBuf(b)
tx.decodeBinaryNoSize(r, b)
if r.Err != nil {
return nil, r.Err
}
if r.Len() != 0 {
return nil, errors.New("additional data after the transaction")
}
tx.size = len(b)
return tx, nil
}
// FeePerByte returns NetworkFee of the transaction divided by
// its size.
func (t *Transaction) FeePerByte() int64 {
return t.NetworkFee / int64(t.Size())
}
// Size returns size of the serialized transaction.
func (t *Transaction) Size() int {
if t.size == 0 {
t.size = io.GetVarSize(t)
}
return t.size
}
// Sender returns the sender of the transaction which is always on the first place
// in the transaction's signers list.
func (t *Transaction) Sender() util.Uint160 {
if len(t.Signers) == 0 {
panic("transaction does not have signers")
}
return t.Signers[0].Account
}
// transactionJSON is a wrapper for Transaction and
// used for correct marhalling of transaction.Data.
type transactionJSON struct {
TxID util.Uint256 `json:"hash"`
Size int `json:"size"`
Version uint8 `json:"version"`
Nonce uint32 `json:"nonce"`
Sender string `json:"sender"`
SystemFee int64 `json:"sysfee,string"`
NetworkFee int64 `json:"netfee,string"`
ValidUntilBlock uint32 `json:"validuntilblock"`
Attributes []Attribute `json:"attributes"`
Signers []Signer `json:"signers"`
Script []byte `json:"script"`
Scripts []Witness `json:"witnesses"`
}
// MarshalJSON implements json.Marshaler interface.
func (t *Transaction) MarshalJSON() ([]byte, error) {
tx := transactionJSON{
TxID: t.Hash(),
Size: t.Size(),
Version: t.Version,
Nonce: t.Nonce,
Sender: address.Uint160ToString(t.Sender()),
ValidUntilBlock: t.ValidUntilBlock,
Attributes: t.Attributes,
Signers: t.Signers,
Script: t.Script,
Scripts: t.Scripts,
SystemFee: t.SystemFee,
NetworkFee: t.NetworkFee,
}
return json.Marshal(tx)
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (t *Transaction) UnmarshalJSON(data []byte) error {
tx := new(transactionJSON)
if err := json.Unmarshal(data, tx); err != nil {
return err
}
t.Version = tx.Version
t.Nonce = tx.Nonce
t.ValidUntilBlock = tx.ValidUntilBlock
t.Attributes = tx.Attributes
t.Signers = tx.Signers
t.Scripts = tx.Scripts
t.SystemFee = tx.SystemFee
t.NetworkFee = tx.NetworkFee
t.Script = tx.Script
if t.Hash() != tx.TxID {
return errors.New("txid doesn't match transaction hash")
}
if t.Size() != tx.Size {
return errors.New("'size' doesn't match transaction size")
}
return t.isValid()
}
// Various errors for transaction validation.
var (
ErrInvalidVersion = errors.New("only version 0 is supported")
ErrNegativeSystemFee = errors.New("negative system fee")
ErrNegativeNetworkFee = errors.New("negative network fee")
ErrTooBigFees = errors.New("too big fees: int64 overflow")
ErrEmptySigners = errors.New("signers array should contain sender")
ErrNonUniqueSigners = errors.New("transaction signers should be unique")
ErrInvalidAttribute = errors.New("invalid attribute")
ErrEmptyScript = errors.New("no script")
)
// isValid checks whether decoded/unmarshalled transaction has all fields valid.
func (t *Transaction) isValid() error {
if t.Version > 0 && t.Version != DummyVersion {
return ErrInvalidVersion
}
if t.SystemFee < 0 {
return ErrNegativeSystemFee
}
if t.NetworkFee < 0 {
return ErrNegativeNetworkFee
}
if t.NetworkFee+t.SystemFee < t.SystemFee {
return ErrTooBigFees
}
if len(t.Signers) == 0 {
return ErrEmptySigners
}
for i := 0; i < len(t.Signers); i++ {
for j := i + 1; j < len(t.Signers); j++ {
if t.Signers[i].Account.Equals(t.Signers[j].Account) {
return ErrNonUniqueSigners
}
}
}
attrs := map[AttrType]bool{}
for i := range t.Attributes {
typ := t.Attributes[i].Type
if !typ.allowMultiple() {
if attrs[typ] {
return fmt.Errorf("%w: multiple '%s' attributes", ErrInvalidAttribute, typ.String())
}
attrs[typ] = true
}
}
if len(t.Script) == 0 {
return ErrEmptyScript
}
return nil
}
// HasSigner returns true in case if hash is present in the list of signers.
func (t *Transaction) HasSigner(hash util.Uint160) bool {
for _, h := range t.Signers {
if h.Account.Equals(hash) {
return true
}
}
return false
}
| {
for i := range t.Attributes {
if t.Attributes[i].Type == typ {
return true
}
}
return false
} | identifier_body |
time_zones.rs | use core::fmt;
use super::{NaiveDateTime, DateTime, UnixTimestamp, Month, DayOfTheWeek};
use num::{div_floor, positive_rem};
pub trait TimeZone {
fn from_timestamp(&self, t: UnixTimestamp) -> NaiveDateTime;
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError>;
}
/// When a time zone makes clock jump forward or back at any instant in time
/// (for example twice a year with daylight-saving time, a.k.a. summer-time period)
/// This error is returned when either:
///
/// * Clocks went back and this local time occurred at multiple instants in time,
/// making its interpretation or conversion ambiguous.
///
/// * Clocks jumped forward and this local time did not occur.
/// It does not represent any real instant in time.
/// It could be argued that a range of local times all represent the same instant,
/// but this library does not implement the conversion that way.
#[derive(Eq, PartialEq)]
pub struct LocalTimeConversionError {
/// Make the type opaque to allow for future extensions
_private: (),
}
impl fmt::Debug for LocalTimeConversionError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "LocalTimeConversionError")
}
}
/// Implemented for time zones where `LocalTimeConversionError` never occurs,
/// namely for `Utc` and `FixedOffsetFromUtc`.
///
/// Any UTC-offset change in a time zone creates local times that either don’t occur or occur twice.
/// `TimeZone::to_timestamp` returns `Err(LocalTimeConversionError)` for such local times.
pub trait UnambiguousTimeZone: TimeZone {
fn to_unambiguous_timestamp(&self, d: &NaiveDateTime) -> UnixTimestamp {
self.to_timestamp(d).unwrap()
}
}
/// The *Coordinated Universal Time* time time zone.
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct Utc;
impl UnambiguousTimeZone for Utc {}
impl TimeZone for Utc {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
let days_since_unix = div_floor(u.0, SECONDS_PER_DAY) as i32;
let days = days_since_unix + days_since_d0(1970);
let year = div_floor(days * 400, DAYS_PER_400YEARS) as i32;
let day_of_the_year = days - days_since_d0(year);
let (month, day) = Month::from_day_of_the_year(day_of_the_year, year.into());
let hour = positive_rem(div_floor(u.0, SECONDS_PER_HOUR), 24) as u8;
let minute = positive_rem(div_floor(u.0, SECONDS_PER_MINUTE), 60) as u8;
let second = positive_rem(u.0, 60) as u8;
NaiveDateTime::new(year, month, day, hour, minute, second)
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
Ok(UnixTimestamp(
i64::from(days_since_unix(d)) * SECONDS_PER_DAY
+ i64::from(d.hour) * SECONDS_PER_HOUR
+ i64::from(d.minute) * SECONDS_PER_MINUTE
+ i64::from(d.second)
))
}
}
/// The offset is typically positive east of Greenwich (longitude 0°), negative west.
///
/// For example, Japan Standard Time is UTC+09:00:
///
/// ```rust
/// use gregor::FixedOffsetFromUtc;
/// let jst = FixedOffsetFromUtc::from_hours_and_minutes(9, 0);
/// ```
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct FixedOffsetFromUtc {
seconds_ahead_of_utc: i32,
}
impl FixedOffsetFromUtc {
pub fn from_hours_and_minutes(hours: i32, minutes: i32) -> Self {
FixedOffsetFromUtc {
seconds_ahead_of_utc: (hours * 60 + minutes) * 60,
}
}
}
impl UnambiguousTimeZone for FixedOffsetFromUtc {}
impl TimeZone for FixedOffsetFromUtc {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
// When local time is ahead of UTC (positive offset)
// that instant happened before midnight UTC
// so there are more seconds since then.
// (Add the offset rather than subtract it.)
// Seconds since *this time zone*’s midnight of 1970-01-01.
let seconds = u.0 + i64::from(self.seconds_ahead_of_utc);
// This is not really a Unix timestamp or a UTC date-time,
// but the two errors compensate to give a date-time in this time zone.
Utc.from_timestamp(UnixTimestamp(seconds))
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
// Pretend this is UTC to obtain seconds since *this time zone*’s midnight of 1970-01-01.
let seconds = Utc.to_unambiguous_timestamp(d).0;
// For positives offsets (ahead of UTC) this is earlier in time than UTC midnight
// (with more seconds), so *subtract* the offset to make a Unix timestamp.
Ok(UnixTimestamp(seconds - i64::from(self.seconds_ahead_of_utc)))
}
}
pub trait DaylightSaving {
fn offset_outside_dst(&self) -> FixedOffsetFromUtc;
fn offset_during_dst(&self) -> FixedOffsetFromUtc;
fn is_in_dst(&self, t: UnixTimestamp) -> bool;
}
impl<Tz: DaylightSaving> TimeZone for Tz {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
let offset = if self.is_in_dst(u) {
self.offset_during_dst()
} else {
self.offset_outside_dst()
};
offset.from_timestamp(u)
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
// The actual timestamp/instant is one of these two:
let assuming_outside = self.offset_outside_dst().to_unambiguous_timestamp(d);
let assuming_during = self.offset_during_dst().to_unambiguous_timestamp(d);
// Let’s take Central Europe for example.
// When converted to UTC, `assuming_outside` and `assuming_during` respectively
// represent date-times one hour and two hours before `d`.
// They are one hour apart.
//
// If both timestamps are in the same DST period (during DST or outside)
// then we know for sure which of `assuming_outside` or `assuming_during` is correct.
//
// If they disagree, that means their one hour span contains a DST change:
//
// * 1 am UTC is between `d - 2 hours` and `d - 1 hour`
// * `d - 2 hours` < 1am UTC, and 1am UTC <= `d - 1 hour`
// * `d` < 3 am local time, and 2 am local time <= `d`
// * `d` is between 2 am and 3 am local time.
//
// * In October when clocks go "back", this kind of local time happens twice the same day:
// it’s ambiguous.
// * In March when clocks go "forward", that hour is skipped entirely.
// This kind of local time does not exist. This `d` value might come from buggy code.
match (self.is_in_dst(assuming_outside), self.is_in_dst(assuming_during)) {
(true, true) => Ok(assuming_during),
(false, false) => Ok(assuming_outside),
_ => Err(LocalTimeConversionError { _private: () }),
}
}
}
/// CET (Central European Time) / CEST (Central European Summer Time)
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct CentralEurope;
impl DaylightSaving for CentralEurope {
fn offset_outs | FixedOffsetFromUtc {
FixedOffsetFromUtc::from_hours_and_minutes(1, 0)
}
fn offset_during_dst(&self) -> FixedOffsetFromUtc {
FixedOffsetFromUtc::from_hours_and_minutes(2, 0)
}
fn is_in_dst(&self, t: UnixTimestamp) -> bool {
use Month::*;
let d = DateTime::from_timestamp(t, Utc);
// Directive 2000/84/EC of the European Parliament and of the Council
// of 19 January 2001 on summer-time arrangements
// http://eur-lex.europa.eu/legal-content/EN/ALL/?uri=CELEX:32000L0084
//
// > Article 1
//
// > For the purposes of this Directive "summer-time period"
// > shall mean the period of the year
// > during which clocks are put forward by 60 minutes compared with the rest of the year.
// >
// > Article 2
// >
// > From 2002 onwards, the summer-time period shall begin, in every Member State,
// > at 1.00 a.m., Greenwich Mean Time, on the last Sunday in March.
// >
// > Article 3
// >
// > From 2002 onwards, the summer-time period shall end, in every Member State,
// > at 1.00 a.m., Greenwich Mean Time, on the last Sunday in October.
if d.month() < March || d.month() > October {
false
} else if d.month() > March && d.month() < October {
true
} else if d.month() == March {
!before_last_sunday_1_am(&d)
} else if d.month() == October {
before_last_sunday_1_am(&d)
} else {
unreachable!()
}
}
}
fn before_last_sunday_1_am(d: &DateTime<Utc>) -> bool {
let last_sunday = last_of_the_month(d, DayOfTheWeek::Sunday);
d.day() < last_sunday || (
d.day() == last_sunday &&
(d.hour(), d.minute(), d.second()) < (1, 0, 0)
)
}
fn last_of_the_month(d: &DateTime<Utc>, requested_dow: DayOfTheWeek) -> u8 {
let last_day = d.month().length(d.year().into());
let last_dow = NaiveDateTime::new(d.year(), d.month(), last_day, 0, 0, 0).day_of_the_week();
let difference = i32::from(last_dow.to_iso_number()) - i32::from(requested_dow.to_iso_number());
last_day - (positive_rem(difference, 7) as u8)
}
pub fn days_since_unix(d: &NaiveDateTime) -> i32 {
(d.year - 1970) * DAYS_PER_COMMON_YEAR
+ leap_days_since_y0(d.year) - leap_days_since_y0(1970)
+ d.month.days_since_january_1st(d.year.into())
+ i32::from(d.day - 1)
}
/// How many leap days occurred between January of year 0 and January of the given year
/// (in Gregorian calendar).
pub fn leap_days_since_y0(year: i32) -> i32 {
if year > 0 {
let year = year - 1; // Don’t include Feb 29 of the given year, if any.
// +1 because year 0 is a leap year.
((year / 4) - (year / 100) + (year / 400)) + 1
} else {
let year = -year;
-((year / 4) - (year / 100) + (year / 400))
}
}
/// Days between January 1st of year 0 and January 1st of the given year.
fn days_since_d0(year: i32) -> i32 {
year * DAYS_PER_COMMON_YEAR + leap_days_since_y0(year)
}
const SECONDS_PER_MINUTE: i64 = 60;
const SECONDS_PER_HOUR: i64 = SECONDS_PER_MINUTE * 60;
const SECONDS_PER_DAY: i64 = SECONDS_PER_HOUR * 24;
/// The leap year schedule of the Gregorian calendar cycles every 400 years.
/// In one cycle, there are:
///
/// * 100 years multiple of 4
/// * 4 years multiple of 100
/// * 1 year multiple of 400
const LEAP_DAYS_PER_400YEARS: i32 = 100 - 4 + 1;
const DAYS_PER_COMMON_YEAR: i32 = 365;
const DAYS_PER_400YEARS: i32 = DAYS_PER_COMMON_YEAR * 400 + LEAP_DAYS_PER_400YEARS;
| ide_dst(&self) -> | identifier_name |
time_zones.rs | use core::fmt;
use super::{NaiveDateTime, DateTime, UnixTimestamp, Month, DayOfTheWeek};
use num::{div_floor, positive_rem}; | fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError>;
}
/// When a time zone makes clock jump forward or back at any instant in time
/// (for example twice a year with daylight-saving time, a.k.a. summer-time period)
/// This error is returned when either:
///
/// * Clocks went back and this local time occurred at multiple instants in time,
/// making its interpretation or conversion ambiguous.
///
/// * Clocks jumped forward and this local time did not occur.
/// It does not represent any real instant in time.
/// It could be argued that a range of local times all represent the same instant,
/// but this library does not implement the conversion that way.
#[derive(Eq, PartialEq)]
pub struct LocalTimeConversionError {
/// Make the type opaque to allow for future extensions
_private: (),
}
impl fmt::Debug for LocalTimeConversionError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "LocalTimeConversionError")
}
}
/// Implemented for time zones where `LocalTimeConversionError` never occurs,
/// namely for `Utc` and `FixedOffsetFromUtc`.
///
/// Any UTC-offset change in a time zone creates local times that either don’t occur or occur twice.
/// `TimeZone::to_timestamp` returns `Err(LocalTimeConversionError)` for such local times.
pub trait UnambiguousTimeZone: TimeZone {
fn to_unambiguous_timestamp(&self, d: &NaiveDateTime) -> UnixTimestamp {
self.to_timestamp(d).unwrap()
}
}
/// The *Coordinated Universal Time* time time zone.
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct Utc;
impl UnambiguousTimeZone for Utc {}
impl TimeZone for Utc {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
let days_since_unix = div_floor(u.0, SECONDS_PER_DAY) as i32;
let days = days_since_unix + days_since_d0(1970);
let year = div_floor(days * 400, DAYS_PER_400YEARS) as i32;
let day_of_the_year = days - days_since_d0(year);
let (month, day) = Month::from_day_of_the_year(day_of_the_year, year.into());
let hour = positive_rem(div_floor(u.0, SECONDS_PER_HOUR), 24) as u8;
let minute = positive_rem(div_floor(u.0, SECONDS_PER_MINUTE), 60) as u8;
let second = positive_rem(u.0, 60) as u8;
NaiveDateTime::new(year, month, day, hour, minute, second)
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
Ok(UnixTimestamp(
i64::from(days_since_unix(d)) * SECONDS_PER_DAY
+ i64::from(d.hour) * SECONDS_PER_HOUR
+ i64::from(d.minute) * SECONDS_PER_MINUTE
+ i64::from(d.second)
))
}
}
/// The offset is typically positive east of Greenwich (longitude 0°), negative west.
///
/// For example, Japan Standard Time is UTC+09:00:
///
/// ```rust
/// use gregor::FixedOffsetFromUtc;
/// let jst = FixedOffsetFromUtc::from_hours_and_minutes(9, 0);
/// ```
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct FixedOffsetFromUtc {
seconds_ahead_of_utc: i32,
}
impl FixedOffsetFromUtc {
pub fn from_hours_and_minutes(hours: i32, minutes: i32) -> Self {
FixedOffsetFromUtc {
seconds_ahead_of_utc: (hours * 60 + minutes) * 60,
}
}
}
impl UnambiguousTimeZone for FixedOffsetFromUtc {}
impl TimeZone for FixedOffsetFromUtc {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
// When local time is ahead of UTC (positive offset)
// that instant happened before midnight UTC
// so there are more seconds since then.
// (Add the offset rather than subtract it.)
// Seconds since *this time zone*’s midnight of 1970-01-01.
let seconds = u.0 + i64::from(self.seconds_ahead_of_utc);
// This is not really a Unix timestamp or a UTC date-time,
// but the two errors compensate to give a date-time in this time zone.
Utc.from_timestamp(UnixTimestamp(seconds))
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
// Pretend this is UTC to obtain seconds since *this time zone*’s midnight of 1970-01-01.
let seconds = Utc.to_unambiguous_timestamp(d).0;
// For positives offsets (ahead of UTC) this is earlier in time than UTC midnight
// (with more seconds), so *subtract* the offset to make a Unix timestamp.
Ok(UnixTimestamp(seconds - i64::from(self.seconds_ahead_of_utc)))
}
}
pub trait DaylightSaving {
fn offset_outside_dst(&self) -> FixedOffsetFromUtc;
fn offset_during_dst(&self) -> FixedOffsetFromUtc;
fn is_in_dst(&self, t: UnixTimestamp) -> bool;
}
impl<Tz: DaylightSaving> TimeZone for Tz {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
let offset = if self.is_in_dst(u) {
self.offset_during_dst()
} else {
self.offset_outside_dst()
};
offset.from_timestamp(u)
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
// The actual timestamp/instant is one of these two:
let assuming_outside = self.offset_outside_dst().to_unambiguous_timestamp(d);
let assuming_during = self.offset_during_dst().to_unambiguous_timestamp(d);
// Let’s take Central Europe for example.
// When converted to UTC, `assuming_outside` and `assuming_during` respectively
// represent date-times one hour and two hours before `d`.
// They are one hour apart.
//
// If both timestamps are in the same DST period (during DST or outside)
// then we know for sure which of `assuming_outside` or `assuming_during` is correct.
//
// If they disagree, that means their one hour span contains a DST change:
//
// * 1 am UTC is between `d - 2 hours` and `d - 1 hour`
// * `d - 2 hours` < 1am UTC, and 1am UTC <= `d - 1 hour`
// * `d` < 3 am local time, and 2 am local time <= `d`
// * `d` is between 2 am and 3 am local time.
//
// * In October when clocks go "back", this kind of local time happens twice the same day:
// it’s ambiguous.
// * In March when clocks go "forward", that hour is skipped entirely.
// This kind of local time does not exist. This `d` value might come from buggy code.
match (self.is_in_dst(assuming_outside), self.is_in_dst(assuming_during)) {
(true, true) => Ok(assuming_during),
(false, false) => Ok(assuming_outside),
_ => Err(LocalTimeConversionError { _private: () }),
}
}
}
/// CET (Central European Time) / CEST (Central European Summer Time)
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct CentralEurope;
impl DaylightSaving for CentralEurope {
fn offset_outside_dst(&self) -> FixedOffsetFromUtc {
FixedOffsetFromUtc::from_hours_and_minutes(1, 0)
}
fn offset_during_dst(&self) -> FixedOffsetFromUtc {
FixedOffsetFromUtc::from_hours_and_minutes(2, 0)
}
fn is_in_dst(&self, t: UnixTimestamp) -> bool {
use Month::*;
let d = DateTime::from_timestamp(t, Utc);
// Directive 2000/84/EC of the European Parliament and of the Council
// of 19 January 2001 on summer-time arrangements
// http://eur-lex.europa.eu/legal-content/EN/ALL/?uri=CELEX:32000L0084
//
// > Article 1
//
// > For the purposes of this Directive "summer-time period"
// > shall mean the period of the year
// > during which clocks are put forward by 60 minutes compared with the rest of the year.
// >
// > Article 2
// >
// > From 2002 onwards, the summer-time period shall begin, in every Member State,
// > at 1.00 a.m., Greenwich Mean Time, on the last Sunday in March.
// >
// > Article 3
// >
// > From 2002 onwards, the summer-time period shall end, in every Member State,
// > at 1.00 a.m., Greenwich Mean Time, on the last Sunday in October.
if d.month() < March || d.month() > October {
false
} else if d.month() > March && d.month() < October {
true
} else if d.month() == March {
!before_last_sunday_1_am(&d)
} else if d.month() == October {
before_last_sunday_1_am(&d)
} else {
unreachable!()
}
}
}
fn before_last_sunday_1_am(d: &DateTime<Utc>) -> bool {
let last_sunday = last_of_the_month(d, DayOfTheWeek::Sunday);
d.day() < last_sunday || (
d.day() == last_sunday &&
(d.hour(), d.minute(), d.second()) < (1, 0, 0)
)
}
fn last_of_the_month(d: &DateTime<Utc>, requested_dow: DayOfTheWeek) -> u8 {
let last_day = d.month().length(d.year().into());
let last_dow = NaiveDateTime::new(d.year(), d.month(), last_day, 0, 0, 0).day_of_the_week();
let difference = i32::from(last_dow.to_iso_number()) - i32::from(requested_dow.to_iso_number());
last_day - (positive_rem(difference, 7) as u8)
}
pub fn days_since_unix(d: &NaiveDateTime) -> i32 {
(d.year - 1970) * DAYS_PER_COMMON_YEAR
+ leap_days_since_y0(d.year) - leap_days_since_y0(1970)
+ d.month.days_since_january_1st(d.year.into())
+ i32::from(d.day - 1)
}
/// How many leap days occurred between January of year 0 and January of the given year
/// (in Gregorian calendar).
pub fn leap_days_since_y0(year: i32) -> i32 {
if year > 0 {
let year = year - 1; // Don’t include Feb 29 of the given year, if any.
// +1 because year 0 is a leap year.
((year / 4) - (year / 100) + (year / 400)) + 1
} else {
let year = -year;
-((year / 4) - (year / 100) + (year / 400))
}
}
/// Days between January 1st of year 0 and January 1st of the given year.
fn days_since_d0(year: i32) -> i32 {
year * DAYS_PER_COMMON_YEAR + leap_days_since_y0(year)
}
const SECONDS_PER_MINUTE: i64 = 60;
const SECONDS_PER_HOUR: i64 = SECONDS_PER_MINUTE * 60;
const SECONDS_PER_DAY: i64 = SECONDS_PER_HOUR * 24;
/// The leap year schedule of the Gregorian calendar cycles every 400 years.
/// In one cycle, there are:
///
/// * 100 years multiple of 4
/// * 4 years multiple of 100
/// * 1 year multiple of 400
const LEAP_DAYS_PER_400YEARS: i32 = 100 - 4 + 1;
const DAYS_PER_COMMON_YEAR: i32 = 365;
const DAYS_PER_400YEARS: i32 = DAYS_PER_COMMON_YEAR * 400 + LEAP_DAYS_PER_400YEARS; |
pub trait TimeZone {
fn from_timestamp(&self, t: UnixTimestamp) -> NaiveDateTime; | random_line_split |
selector.rs | use std::collections::hash_map;
use std::fmt;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::sync::{Arc, Mutex, Weak};
use std::time::Duration;
use sys;
use sys::fuchsia::{
assert_fuchsia_ready_repr, epoll_event_to_ready, poll_opts_to_wait_async, EventedFd,
EventedFdInner, FuchsiaReady,
};
use zircon;
use zircon::AsHandleRef;
use zircon_sys::zx_handle_t;
use {io, Event, PollOpt, Ready, Token};
/// The kind of registration-- file descriptor or handle.
///
/// The last bit of a token is set to indicate the type of the registration.
#[derive(Copy, Clone, Eq, PartialEq)]
enum RegType {
Fd,
Handle,
}
fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result<u64> {
let key = token.0 as u64;
let msb = 1u64 << 63;
if (key & msb) != 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Most-significant bit of token must remain unset.",
));
}
Ok(match reg_type {
RegType::Fd => key,
RegType::Handle => key | msb,
})
}
fn token_and_type_from_key(key: u64) -> (Token, RegType) {
let msb = 1u64 << 63;
(
Token((key & !msb) as usize),
if (key & msb) == 0 {
RegType::Fd
} else {
RegType::Handle
},
)
}
/// Each Selector has a globally unique(ish) ID associated with it. This ID
/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
/// registered with the `Selector`. If a type that is previously associated with
/// a `Selector` attempts to register itself with a different `Selector`, the
/// operation will return with an error. This matches windows behavior.
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
pub struct Selector {
id: usize,
/// Zircon object on which the handles have been registered, and on which events occur
port: Arc<zircon::Port>,
/// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt
/// used to prevent having to lock `tokens_to_rereg` when it is empty.
has_tokens_to_rereg: AtomicBool,
/// List of `Token`s corresponding to registrations that need to be reregistered before the
/// next `port::wait`. This is necessary to provide level-triggered behavior for
/// `Async::repeating` registrations.
///
/// When a level-triggered `Async::repeating` event is seen, its token is added to this list so
/// that it will be reregistered before the next `port::wait` call, making `port::wait` return
/// immediately if the signal was high during the reregistration.
///
/// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_
/// `token_to_fd`.
tokens_to_rereg: Mutex<Vec<Token>>,
/// Map from tokens to weak references to `EventedFdInner`-- a structure describing a
/// file handle, its associated `fdio` object, and its current registration.
token_to_fd: Mutex<hash_map::HashMap<Token, Weak<EventedFdInner>>>,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
// Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is
// compatible with Ready.
assert_fuchsia_ready_repr();
let port = Arc::new(zircon::Port::create(zircon::PortOpts::Default)?);
// offset by 1 to avoid choosing 0 as the id of a selector
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
let has_tokens_to_rereg = AtomicBool::new(false);
let tokens_to_rereg = Mutex::new(Vec::new());
let token_to_fd = Mutex::new(hash_map::HashMap::new());
Ok(Selector {
id: id,
port: port,
has_tokens_to_rereg: has_tokens_to_rereg,
tokens_to_rereg: tokens_to_rereg,
token_to_fd: token_to_fd,
})
}
pub fn id(&self) -> usize {
self.id
}
/// Returns a reference to the underlying port `Arc`.
pub fn port(&self) -> &Arc<zircon::Port> {
&self.port
}
/// Reregisters all registrations pointed to by the `tokens_to_rereg` list
/// if `has_tokens_to_rereg`.
fn reregister_handles(&self) -> io::Result<()> {
// We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg`
// written before the store using `Ordering::Release`.
if self.has_tokens_to_rereg.load(Ordering::Acquire) {
let mut tokens = self.tokens_to_rereg.lock().unwrap();
let token_to_fd = self.token_to_fd.lock().unwrap();
for token in tokens.drain(0..) {
if let Some(eventedfd) = token_to_fd.get(&token).and_then(|h| h.upgrade())
{
eventedfd.rereg_for_level(&self.port);
}
}
self.has_tokens_to_rereg.store(false, Ordering::Release);
}
Ok(())
}
pub fn select(
&self,
evts: &mut Events,
_awakener: Token,
timeout: Option<Duration>,
) -> io::Result<bool> {
evts.clear();
self.reregister_handles()?;
let deadline = match timeout {
Some(duration) => {
let nanos = duration
.as_secs()
.saturating_mul(1_000_000_000)
.saturating_add(duration.subsec_nanos() as u64);
zircon::deadline_after(nanos)
}
None => zircon::ZX_TIME_INFINITE,
};
let packet = match self.port.wait(deadline) {
Ok(packet) => packet,
Err(zircon::Status::ErrTimedOut) => return Ok(false),
Err(e) => Err(e)?,
};
let observed_signals = match packet.contents() {
zircon::PacketContents::SignalOne(signal_packet) => signal_packet.observed(),
zircon::PacketContents::SignalRep(signal_packet) => signal_packet.observed(),
zircon::PacketContents::User(_user_packet) => {
// User packets are only ever sent by an Awakener
return Ok(true);
}
};
let key = packet.key();
let (token, reg_type) = token_and_type_from_key(key);
match reg_type {
RegType::Handle => {
// We can return immediately-- no lookup or registration necessary.
evts.events
.push(Event::new(Ready::from(observed_signals), token));
Ok(false)
}
RegType::Fd => {
// Convert the signals to epoll events using __fdio_wait_end,
// and add to reregistration list if necessary.
let events: u32;
{
let handle = if let Some(handle) = self
.token_to_fd
.lock()
.unwrap()
.get(&token)
.and_then(|h| h.upgrade())
{
handle
} else {
// This handle is apparently in the process of removal.
// It has been removed from the list, but port_cancel has not been called.
return Ok(false);
};
events = unsafe {
let mut events: u32 = mem::uninitialized();
sys::fuchsia::sys::__fdio_wait_end(
handle.fdio(),
observed_signals,
&mut events,
);
events
};
// If necessary, queue to be reregistered before next port_await
let needs_to_rereg = {
let registration_lock = handle.registration().lock().unwrap();
registration_lock
.as_ref()
.and_then(|r| r.rereg_signals())
.is_some()
};
if needs_to_rereg {
let mut tokens_to_rereg_lock =
self.tokens_to_rereg.lock().unwrap();
tokens_to_rereg_lock.push(token);
// We use `Ordering::Release` to make sure that we see all `tokens_to_rereg`
// written before the store.
self.has_tokens_to_rereg.store(true, Ordering::Release);
}
}
evts.events | .push(Event::new(epoll_event_to_ready(events), token));
Ok(false)
}
}
}
/// Register event interests for the given IO handle with the OS
pub fn register_fd(
&self,
handle: &zircon::Handle,
fd: &EventedFd,
token: Token,
signals: zircon::Signals,
poll_opts: PollOpt,
) -> io::Result<()> {
{
let mut token_to_fd = self.token_to_fd.lock().unwrap();
match token_to_fd.entry(token) {
hash_map::Entry::Occupied(_) => {
return Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"Attempted to register a filedescriptor on an existing token.",
))
}
hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)),
};
}
let wait_async_opts = poll_opts_to_wait_async(poll_opts);
let wait_res = handle.wait_async_handle(
&self.port,
token.0 as u64,
signals,
wait_async_opts,
);
if wait_res.is_err() {
self.token_to_fd.lock().unwrap().remove(&token);
}
Ok(wait_res?)
}
/// Deregister event interests for the given IO handle with the OS
pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> {
self.token_to_fd.lock().unwrap().remove(&token);
// We ignore NotFound errors since oneshots are automatically deregistered,
// but mio will attempt to deregister them manually.
self.port
.cancel(&*handle, token.0 as u64)
.map_err(io::Error::from)
.or_else(|e| {
if e.kind() == io::ErrorKind::NotFound {
Ok(())
} else {
Err(e)
}
})
}
pub fn register_handle(
&self,
handle: zx_handle_t,
token: Token,
interests: Ready,
poll_opts: PollOpt,
) -> io::Result<()> {
if poll_opts.is_level() && !poll_opts.is_oneshot() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Repeated level-triggered events are not supported on Fuchsia handles.",
));
}
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = temp_handle.wait_async_handle(
&self.port,
key_from_token_and_type(token, RegType::Handle)?,
FuchsiaReady::from(interests).into_zx_signals(),
poll_opts_to_wait_async(poll_opts),
);
mem::forget(temp_handle);
Ok(res?)
}
pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()> {
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = self.port.cancel(
&temp_handle,
key_from_token_and_type(token, RegType::Handle)?,
);
mem::forget(temp_handle);
Ok(res?)
}
}
pub struct Events {
events: Vec<Event>,
}
impl Events {
pub fn with_capacity(_u: usize) -> Events {
// The Fuchsia selector only handles one event at a time,
// so we ignore the default capacity and set it to one.
Events {
events: Vec::with_capacity(1),
}
}
pub fn len(&self) -> usize {
self.events.len()
}
pub fn capacity(&self) -> usize {
self.events.capacity()
}
pub fn is_empty(&self) -> bool {
self.events.is_empty()
}
pub fn get(&self, idx: usize) -> Option<Event> {
self.events.get(idx).map(|e| *e)
}
pub fn push_event(&mut self, event: Event) {
self.events.push(event)
}
pub fn clear(&mut self) {
self.events.events.drain(0..);
}
}
impl fmt::Debug for Events {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Events")
.field("len", &self.len())
.finish()
}
} | random_line_split | |
selector.rs | use std::collections::hash_map;
use std::fmt;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::sync::{Arc, Mutex, Weak};
use std::time::Duration;
use sys;
use sys::fuchsia::{
assert_fuchsia_ready_repr, epoll_event_to_ready, poll_opts_to_wait_async, EventedFd,
EventedFdInner, FuchsiaReady,
};
use zircon;
use zircon::AsHandleRef;
use zircon_sys::zx_handle_t;
use {io, Event, PollOpt, Ready, Token};
/// The kind of registration-- file descriptor or handle.
///
/// The last bit of a token is set to indicate the type of the registration.
#[derive(Copy, Clone, Eq, PartialEq)]
enum RegType {
Fd,
Handle,
}
fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result<u64> {
let key = token.0 as u64;
let msb = 1u64 << 63;
if (key & msb) != 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Most-significant bit of token must remain unset.",
));
}
Ok(match reg_type {
RegType::Fd => key,
RegType::Handle => key | msb,
})
}
fn token_and_type_from_key(key: u64) -> (Token, RegType) {
let msb = 1u64 << 63;
(
Token((key & !msb) as usize),
if (key & msb) == 0 {
RegType::Fd
} else {
RegType::Handle
},
)
}
/// Each Selector has a globally unique(ish) ID associated with it. This ID
/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
/// registered with the `Selector`. If a type that is previously associated with
/// a `Selector` attempts to register itself with a different `Selector`, the
/// operation will return with an error. This matches windows behavior.
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
pub struct Selector {
id: usize,
/// Zircon object on which the handles have been registered, and on which events occur
port: Arc<zircon::Port>,
/// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt
/// used to prevent having to lock `tokens_to_rereg` when it is empty.
has_tokens_to_rereg: AtomicBool,
/// List of `Token`s corresponding to registrations that need to be reregistered before the
/// next `port::wait`. This is necessary to provide level-triggered behavior for
/// `Async::repeating` registrations.
///
/// When a level-triggered `Async::repeating` event is seen, its token is added to this list so
/// that it will be reregistered before the next `port::wait` call, making `port::wait` return
/// immediately if the signal was high during the reregistration.
///
/// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_
/// `token_to_fd`.
tokens_to_rereg: Mutex<Vec<Token>>,
/// Map from tokens to weak references to `EventedFdInner`-- a structure describing a
/// file handle, its associated `fdio` object, and its current registration.
token_to_fd: Mutex<hash_map::HashMap<Token, Weak<EventedFdInner>>>,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
// Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is
// compatible with Ready.
assert_fuchsia_ready_repr();
let port = Arc::new(zircon::Port::create(zircon::PortOpts::Default)?);
// offset by 1 to avoid choosing 0 as the id of a selector
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
let has_tokens_to_rereg = AtomicBool::new(false);
let tokens_to_rereg = Mutex::new(Vec::new());
let token_to_fd = Mutex::new(hash_map::HashMap::new());
Ok(Selector {
id: id,
port: port,
has_tokens_to_rereg: has_tokens_to_rereg,
tokens_to_rereg: tokens_to_rereg,
token_to_fd: token_to_fd,
})
}
pub fn id(&self) -> usize {
self.id
}
/// Returns a reference to the underlying port `Arc`.
pub fn | (&self) -> &Arc<zircon::Port> {
&self.port
}
/// Reregisters all registrations pointed to by the `tokens_to_rereg` list
/// if `has_tokens_to_rereg`.
fn reregister_handles(&self) -> io::Result<()> {
// We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg`
// written before the store using `Ordering::Release`.
if self.has_tokens_to_rereg.load(Ordering::Acquire) {
let mut tokens = self.tokens_to_rereg.lock().unwrap();
let token_to_fd = self.token_to_fd.lock().unwrap();
for token in tokens.drain(0..) {
if let Some(eventedfd) = token_to_fd.get(&token).and_then(|h| h.upgrade())
{
eventedfd.rereg_for_level(&self.port);
}
}
self.has_tokens_to_rereg.store(false, Ordering::Release);
}
Ok(())
}
pub fn select(
&self,
evts: &mut Events,
_awakener: Token,
timeout: Option<Duration>,
) -> io::Result<bool> {
evts.clear();
self.reregister_handles()?;
let deadline = match timeout {
Some(duration) => {
let nanos = duration
.as_secs()
.saturating_mul(1_000_000_000)
.saturating_add(duration.subsec_nanos() as u64);
zircon::deadline_after(nanos)
}
None => zircon::ZX_TIME_INFINITE,
};
let packet = match self.port.wait(deadline) {
Ok(packet) => packet,
Err(zircon::Status::ErrTimedOut) => return Ok(false),
Err(e) => Err(e)?,
};
let observed_signals = match packet.contents() {
zircon::PacketContents::SignalOne(signal_packet) => signal_packet.observed(),
zircon::PacketContents::SignalRep(signal_packet) => signal_packet.observed(),
zircon::PacketContents::User(_user_packet) => {
// User packets are only ever sent by an Awakener
return Ok(true);
}
};
let key = packet.key();
let (token, reg_type) = token_and_type_from_key(key);
match reg_type {
RegType::Handle => {
// We can return immediately-- no lookup or registration necessary.
evts.events
.push(Event::new(Ready::from(observed_signals), token));
Ok(false)
}
RegType::Fd => {
// Convert the signals to epoll events using __fdio_wait_end,
// and add to reregistration list if necessary.
let events: u32;
{
let handle = if let Some(handle) = self
.token_to_fd
.lock()
.unwrap()
.get(&token)
.and_then(|h| h.upgrade())
{
handle
} else {
// This handle is apparently in the process of removal.
// It has been removed from the list, but port_cancel has not been called.
return Ok(false);
};
events = unsafe {
let mut events: u32 = mem::uninitialized();
sys::fuchsia::sys::__fdio_wait_end(
handle.fdio(),
observed_signals,
&mut events,
);
events
};
// If necessary, queue to be reregistered before next port_await
let needs_to_rereg = {
let registration_lock = handle.registration().lock().unwrap();
registration_lock
.as_ref()
.and_then(|r| r.rereg_signals())
.is_some()
};
if needs_to_rereg {
let mut tokens_to_rereg_lock =
self.tokens_to_rereg.lock().unwrap();
tokens_to_rereg_lock.push(token);
// We use `Ordering::Release` to make sure that we see all `tokens_to_rereg`
// written before the store.
self.has_tokens_to_rereg.store(true, Ordering::Release);
}
}
evts.events
.push(Event::new(epoll_event_to_ready(events), token));
Ok(false)
}
}
}
/// Register event interests for the given IO handle with the OS
pub fn register_fd(
&self,
handle: &zircon::Handle,
fd: &EventedFd,
token: Token,
signals: zircon::Signals,
poll_opts: PollOpt,
) -> io::Result<()> {
{
let mut token_to_fd = self.token_to_fd.lock().unwrap();
match token_to_fd.entry(token) {
hash_map::Entry::Occupied(_) => {
return Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"Attempted to register a filedescriptor on an existing token.",
))
}
hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)),
};
}
let wait_async_opts = poll_opts_to_wait_async(poll_opts);
let wait_res = handle.wait_async_handle(
&self.port,
token.0 as u64,
signals,
wait_async_opts,
);
if wait_res.is_err() {
self.token_to_fd.lock().unwrap().remove(&token);
}
Ok(wait_res?)
}
/// Deregister event interests for the given IO handle with the OS
pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> {
self.token_to_fd.lock().unwrap().remove(&token);
// We ignore NotFound errors since oneshots are automatically deregistered,
// but mio will attempt to deregister them manually.
self.port
.cancel(&*handle, token.0 as u64)
.map_err(io::Error::from)
.or_else(|e| {
if e.kind() == io::ErrorKind::NotFound {
Ok(())
} else {
Err(e)
}
})
}
pub fn register_handle(
&self,
handle: zx_handle_t,
token: Token,
interests: Ready,
poll_opts: PollOpt,
) -> io::Result<()> {
if poll_opts.is_level() && !poll_opts.is_oneshot() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Repeated level-triggered events are not supported on Fuchsia handles.",
));
}
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = temp_handle.wait_async_handle(
&self.port,
key_from_token_and_type(token, RegType::Handle)?,
FuchsiaReady::from(interests).into_zx_signals(),
poll_opts_to_wait_async(poll_opts),
);
mem::forget(temp_handle);
Ok(res?)
}
pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()> {
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = self.port.cancel(
&temp_handle,
key_from_token_and_type(token, RegType::Handle)?,
);
mem::forget(temp_handle);
Ok(res?)
}
}
pub struct Events {
events: Vec<Event>,
}
impl Events {
pub fn with_capacity(_u: usize) -> Events {
// The Fuchsia selector only handles one event at a time,
// so we ignore the default capacity and set it to one.
Events {
events: Vec::with_capacity(1),
}
}
pub fn len(&self) -> usize {
self.events.len()
}
pub fn capacity(&self) -> usize {
self.events.capacity()
}
pub fn is_empty(&self) -> bool {
self.events.is_empty()
}
pub fn get(&self, idx: usize) -> Option<Event> {
self.events.get(idx).map(|e| *e)
}
pub fn push_event(&mut self, event: Event) {
self.events.push(event)
}
pub fn clear(&mut self) {
self.events.events.drain(0..);
}
}
impl fmt::Debug for Events {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Events")
.field("len", &self.len())
.finish()
}
}
| port | identifier_name |
selector.rs | use std::collections::hash_map;
use std::fmt;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::sync::{Arc, Mutex, Weak};
use std::time::Duration;
use sys;
use sys::fuchsia::{
assert_fuchsia_ready_repr, epoll_event_to_ready, poll_opts_to_wait_async, EventedFd,
EventedFdInner, FuchsiaReady,
};
use zircon;
use zircon::AsHandleRef;
use zircon_sys::zx_handle_t;
use {io, Event, PollOpt, Ready, Token};
/// The kind of registration-- file descriptor or handle.
///
/// The last bit of a token is set to indicate the type of the registration.
#[derive(Copy, Clone, Eq, PartialEq)]
enum RegType {
Fd,
Handle,
}
fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result<u64> {
let key = token.0 as u64;
let msb = 1u64 << 63;
if (key & msb) != 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Most-significant bit of token must remain unset.",
));
}
Ok(match reg_type {
RegType::Fd => key,
RegType::Handle => key | msb,
})
}
fn token_and_type_from_key(key: u64) -> (Token, RegType) {
let msb = 1u64 << 63;
(
Token((key & !msb) as usize),
if (key & msb) == 0 {
RegType::Fd
} else {
RegType::Handle
},
)
}
/// Each Selector has a globally unique(ish) ID associated with it. This ID
/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
/// registered with the `Selector`. If a type that is previously associated with
/// a `Selector` attempts to register itself with a different `Selector`, the
/// operation will return with an error. This matches windows behavior.
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
pub struct Selector {
id: usize,
/// Zircon object on which the handles have been registered, and on which events occur
port: Arc<zircon::Port>,
/// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt
/// used to prevent having to lock `tokens_to_rereg` when it is empty.
has_tokens_to_rereg: AtomicBool,
/// List of `Token`s corresponding to registrations that need to be reregistered before the
/// next `port::wait`. This is necessary to provide level-triggered behavior for
/// `Async::repeating` registrations.
///
/// When a level-triggered `Async::repeating` event is seen, its token is added to this list so
/// that it will be reregistered before the next `port::wait` call, making `port::wait` return
/// immediately if the signal was high during the reregistration.
///
/// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_
/// `token_to_fd`.
tokens_to_rereg: Mutex<Vec<Token>>,
/// Map from tokens to weak references to `EventedFdInner`-- a structure describing a
/// file handle, its associated `fdio` object, and its current registration.
token_to_fd: Mutex<hash_map::HashMap<Token, Weak<EventedFdInner>>>,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
// Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is
// compatible with Ready.
assert_fuchsia_ready_repr();
let port = Arc::new(zircon::Port::create(zircon::PortOpts::Default)?);
// offset by 1 to avoid choosing 0 as the id of a selector
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
let has_tokens_to_rereg = AtomicBool::new(false);
let tokens_to_rereg = Mutex::new(Vec::new());
let token_to_fd = Mutex::new(hash_map::HashMap::new());
Ok(Selector {
id: id,
port: port,
has_tokens_to_rereg: has_tokens_to_rereg,
tokens_to_rereg: tokens_to_rereg,
token_to_fd: token_to_fd,
})
}
pub fn id(&self) -> usize {
self.id
}
/// Returns a reference to the underlying port `Arc`.
pub fn port(&self) -> &Arc<zircon::Port> {
&self.port
}
/// Reregisters all registrations pointed to by the `tokens_to_rereg` list
/// if `has_tokens_to_rereg`.
fn reregister_handles(&self) -> io::Result<()> {
// We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg`
// written before the store using `Ordering::Release`.
if self.has_tokens_to_rereg.load(Ordering::Acquire) {
let mut tokens = self.tokens_to_rereg.lock().unwrap();
let token_to_fd = self.token_to_fd.lock().unwrap();
for token in tokens.drain(0..) {
if let Some(eventedfd) = token_to_fd.get(&token).and_then(|h| h.upgrade())
{
eventedfd.rereg_for_level(&self.port);
}
}
self.has_tokens_to_rereg.store(false, Ordering::Release);
}
Ok(())
}
pub fn select(
&self,
evts: &mut Events,
_awakener: Token,
timeout: Option<Duration>,
) -> io::Result<bool> {
evts.clear();
self.reregister_handles()?;
let deadline = match timeout {
Some(duration) => |
None => zircon::ZX_TIME_INFINITE,
};
let packet = match self.port.wait(deadline) {
Ok(packet) => packet,
Err(zircon::Status::ErrTimedOut) => return Ok(false),
Err(e) => Err(e)?,
};
let observed_signals = match packet.contents() {
zircon::PacketContents::SignalOne(signal_packet) => signal_packet.observed(),
zircon::PacketContents::SignalRep(signal_packet) => signal_packet.observed(),
zircon::PacketContents::User(_user_packet) => {
// User packets are only ever sent by an Awakener
return Ok(true);
}
};
let key = packet.key();
let (token, reg_type) = token_and_type_from_key(key);
match reg_type {
RegType::Handle => {
// We can return immediately-- no lookup or registration necessary.
evts.events
.push(Event::new(Ready::from(observed_signals), token));
Ok(false)
}
RegType::Fd => {
// Convert the signals to epoll events using __fdio_wait_end,
// and add to reregistration list if necessary.
let events: u32;
{
let handle = if let Some(handle) = self
.token_to_fd
.lock()
.unwrap()
.get(&token)
.and_then(|h| h.upgrade())
{
handle
} else {
// This handle is apparently in the process of removal.
// It has been removed from the list, but port_cancel has not been called.
return Ok(false);
};
events = unsafe {
let mut events: u32 = mem::uninitialized();
sys::fuchsia::sys::__fdio_wait_end(
handle.fdio(),
observed_signals,
&mut events,
);
events
};
// If necessary, queue to be reregistered before next port_await
let needs_to_rereg = {
let registration_lock = handle.registration().lock().unwrap();
registration_lock
.as_ref()
.and_then(|r| r.rereg_signals())
.is_some()
};
if needs_to_rereg {
let mut tokens_to_rereg_lock =
self.tokens_to_rereg.lock().unwrap();
tokens_to_rereg_lock.push(token);
// We use `Ordering::Release` to make sure that we see all `tokens_to_rereg`
// written before the store.
self.has_tokens_to_rereg.store(true, Ordering::Release);
}
}
evts.events
.push(Event::new(epoll_event_to_ready(events), token));
Ok(false)
}
}
}
/// Register event interests for the given IO handle with the OS
pub fn register_fd(
&self,
handle: &zircon::Handle,
fd: &EventedFd,
token: Token,
signals: zircon::Signals,
poll_opts: PollOpt,
) -> io::Result<()> {
{
let mut token_to_fd = self.token_to_fd.lock().unwrap();
match token_to_fd.entry(token) {
hash_map::Entry::Occupied(_) => {
return Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"Attempted to register a filedescriptor on an existing token.",
))
}
hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)),
};
}
let wait_async_opts = poll_opts_to_wait_async(poll_opts);
let wait_res = handle.wait_async_handle(
&self.port,
token.0 as u64,
signals,
wait_async_opts,
);
if wait_res.is_err() {
self.token_to_fd.lock().unwrap().remove(&token);
}
Ok(wait_res?)
}
/// Deregister event interests for the given IO handle with the OS
pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> {
self.token_to_fd.lock().unwrap().remove(&token);
// We ignore NotFound errors since oneshots are automatically deregistered,
// but mio will attempt to deregister them manually.
self.port
.cancel(&*handle, token.0 as u64)
.map_err(io::Error::from)
.or_else(|e| {
if e.kind() == io::ErrorKind::NotFound {
Ok(())
} else {
Err(e)
}
})
}
pub fn register_handle(
&self,
handle: zx_handle_t,
token: Token,
interests: Ready,
poll_opts: PollOpt,
) -> io::Result<()> {
if poll_opts.is_level() && !poll_opts.is_oneshot() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Repeated level-triggered events are not supported on Fuchsia handles.",
));
}
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = temp_handle.wait_async_handle(
&self.port,
key_from_token_and_type(token, RegType::Handle)?,
FuchsiaReady::from(interests).into_zx_signals(),
poll_opts_to_wait_async(poll_opts),
);
mem::forget(temp_handle);
Ok(res?)
}
pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()> {
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = self.port.cancel(
&temp_handle,
key_from_token_and_type(token, RegType::Handle)?,
);
mem::forget(temp_handle);
Ok(res?)
}
}
pub struct Events {
events: Vec<Event>,
}
impl Events {
pub fn with_capacity(_u: usize) -> Events {
// The Fuchsia selector only handles one event at a time,
// so we ignore the default capacity and set it to one.
Events {
events: Vec::with_capacity(1),
}
}
pub fn len(&self) -> usize {
self.events.len()
}
pub fn capacity(&self) -> usize {
self.events.capacity()
}
pub fn is_empty(&self) -> bool {
self.events.is_empty()
}
pub fn get(&self, idx: usize) -> Option<Event> {
self.events.get(idx).map(|e| *e)
}
pub fn push_event(&mut self, event: Event) {
self.events.push(event)
}
pub fn clear(&mut self) {
self.events.events.drain(0..);
}
}
impl fmt::Debug for Events {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Events")
.field("len", &self.len())
.finish()
}
}
| {
let nanos = duration
.as_secs()
.saturating_mul(1_000_000_000)
.saturating_add(duration.subsec_nanos() as u64);
zircon::deadline_after(nanos)
} | conditional_block |
selector.rs | use std::collections::hash_map;
use std::fmt;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::sync::{Arc, Mutex, Weak};
use std::time::Duration;
use sys;
use sys::fuchsia::{
assert_fuchsia_ready_repr, epoll_event_to_ready, poll_opts_to_wait_async, EventedFd,
EventedFdInner, FuchsiaReady,
};
use zircon;
use zircon::AsHandleRef;
use zircon_sys::zx_handle_t;
use {io, Event, PollOpt, Ready, Token};
/// The kind of registration-- file descriptor or handle.
///
/// The last bit of a token is set to indicate the type of the registration.
#[derive(Copy, Clone, Eq, PartialEq)]
enum RegType {
Fd,
Handle,
}
fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result<u64> {
let key = token.0 as u64;
let msb = 1u64 << 63;
if (key & msb) != 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Most-significant bit of token must remain unset.",
));
}
Ok(match reg_type {
RegType::Fd => key,
RegType::Handle => key | msb,
})
}
fn token_and_type_from_key(key: u64) -> (Token, RegType) {
let msb = 1u64 << 63;
(
Token((key & !msb) as usize),
if (key & msb) == 0 {
RegType::Fd
} else {
RegType::Handle
},
)
}
/// Each Selector has a globally unique(ish) ID associated with it. This ID
/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
/// registered with the `Selector`. If a type that is previously associated with
/// a `Selector` attempts to register itself with a different `Selector`, the
/// operation will return with an error. This matches windows behavior.
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
pub struct Selector {
id: usize,
/// Zircon object on which the handles have been registered, and on which events occur
port: Arc<zircon::Port>,
/// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt
/// used to prevent having to lock `tokens_to_rereg` when it is empty.
has_tokens_to_rereg: AtomicBool,
/// List of `Token`s corresponding to registrations that need to be reregistered before the
/// next `port::wait`. This is necessary to provide level-triggered behavior for
/// `Async::repeating` registrations.
///
/// When a level-triggered `Async::repeating` event is seen, its token is added to this list so
/// that it will be reregistered before the next `port::wait` call, making `port::wait` return
/// immediately if the signal was high during the reregistration.
///
/// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_
/// `token_to_fd`.
tokens_to_rereg: Mutex<Vec<Token>>,
/// Map from tokens to weak references to `EventedFdInner`-- a structure describing a
/// file handle, its associated `fdio` object, and its current registration.
token_to_fd: Mutex<hash_map::HashMap<Token, Weak<EventedFdInner>>>,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
// Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is
// compatible with Ready.
assert_fuchsia_ready_repr();
let port = Arc::new(zircon::Port::create(zircon::PortOpts::Default)?);
// offset by 1 to avoid choosing 0 as the id of a selector
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
let has_tokens_to_rereg = AtomicBool::new(false);
let tokens_to_rereg = Mutex::new(Vec::new());
let token_to_fd = Mutex::new(hash_map::HashMap::new());
Ok(Selector {
id: id,
port: port,
has_tokens_to_rereg: has_tokens_to_rereg,
tokens_to_rereg: tokens_to_rereg,
token_to_fd: token_to_fd,
})
}
pub fn id(&self) -> usize {
self.id
}
/// Returns a reference to the underlying port `Arc`.
pub fn port(&self) -> &Arc<zircon::Port> {
&self.port
}
/// Reregisters all registrations pointed to by the `tokens_to_rereg` list
/// if `has_tokens_to_rereg`.
fn reregister_handles(&self) -> io::Result<()> {
// We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg`
// written before the store using `Ordering::Release`.
if self.has_tokens_to_rereg.load(Ordering::Acquire) {
let mut tokens = self.tokens_to_rereg.lock().unwrap();
let token_to_fd = self.token_to_fd.lock().unwrap();
for token in tokens.drain(0..) {
if let Some(eventedfd) = token_to_fd.get(&token).and_then(|h| h.upgrade())
{
eventedfd.rereg_for_level(&self.port);
}
}
self.has_tokens_to_rereg.store(false, Ordering::Release);
}
Ok(())
}
pub fn select(
&self,
evts: &mut Events,
_awakener: Token,
timeout: Option<Duration>,
) -> io::Result<bool> {
evts.clear();
self.reregister_handles()?;
let deadline = match timeout {
Some(duration) => {
let nanos = duration
.as_secs()
.saturating_mul(1_000_000_000)
.saturating_add(duration.subsec_nanos() as u64);
zircon::deadline_after(nanos)
}
None => zircon::ZX_TIME_INFINITE,
};
let packet = match self.port.wait(deadline) {
Ok(packet) => packet,
Err(zircon::Status::ErrTimedOut) => return Ok(false),
Err(e) => Err(e)?,
};
let observed_signals = match packet.contents() {
zircon::PacketContents::SignalOne(signal_packet) => signal_packet.observed(),
zircon::PacketContents::SignalRep(signal_packet) => signal_packet.observed(),
zircon::PacketContents::User(_user_packet) => {
// User packets are only ever sent by an Awakener
return Ok(true);
}
};
let key = packet.key();
let (token, reg_type) = token_and_type_from_key(key);
match reg_type {
RegType::Handle => {
// We can return immediately-- no lookup or registration necessary.
evts.events
.push(Event::new(Ready::from(observed_signals), token));
Ok(false)
}
RegType::Fd => {
// Convert the signals to epoll events using __fdio_wait_end,
// and add to reregistration list if necessary.
let events: u32;
{
let handle = if let Some(handle) = self
.token_to_fd
.lock()
.unwrap()
.get(&token)
.and_then(|h| h.upgrade())
{
handle
} else {
// This handle is apparently in the process of removal.
// It has been removed from the list, but port_cancel has not been called.
return Ok(false);
};
events = unsafe {
let mut events: u32 = mem::uninitialized();
sys::fuchsia::sys::__fdio_wait_end(
handle.fdio(),
observed_signals,
&mut events,
);
events
};
// If necessary, queue to be reregistered before next port_await
let needs_to_rereg = {
let registration_lock = handle.registration().lock().unwrap();
registration_lock
.as_ref()
.and_then(|r| r.rereg_signals())
.is_some()
};
if needs_to_rereg {
let mut tokens_to_rereg_lock =
self.tokens_to_rereg.lock().unwrap();
tokens_to_rereg_lock.push(token);
// We use `Ordering::Release` to make sure that we see all `tokens_to_rereg`
// written before the store.
self.has_tokens_to_rereg.store(true, Ordering::Release);
}
}
evts.events
.push(Event::new(epoll_event_to_ready(events), token));
Ok(false)
}
}
}
/// Register event interests for the given IO handle with the OS
pub fn register_fd(
&self,
handle: &zircon::Handle,
fd: &EventedFd,
token: Token,
signals: zircon::Signals,
poll_opts: PollOpt,
) -> io::Result<()> {
{
let mut token_to_fd = self.token_to_fd.lock().unwrap();
match token_to_fd.entry(token) {
hash_map::Entry::Occupied(_) => {
return Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"Attempted to register a filedescriptor on an existing token.",
))
}
hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)),
};
}
let wait_async_opts = poll_opts_to_wait_async(poll_opts);
let wait_res = handle.wait_async_handle(
&self.port,
token.0 as u64,
signals,
wait_async_opts,
);
if wait_res.is_err() {
self.token_to_fd.lock().unwrap().remove(&token);
}
Ok(wait_res?)
}
/// Deregister event interests for the given IO handle with the OS
pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> |
pub fn register_handle(
&self,
handle: zx_handle_t,
token: Token,
interests: Ready,
poll_opts: PollOpt,
) -> io::Result<()> {
if poll_opts.is_level() && !poll_opts.is_oneshot() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Repeated level-triggered events are not supported on Fuchsia handles.",
));
}
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = temp_handle.wait_async_handle(
&self.port,
key_from_token_and_type(token, RegType::Handle)?,
FuchsiaReady::from(interests).into_zx_signals(),
poll_opts_to_wait_async(poll_opts),
);
mem::forget(temp_handle);
Ok(res?)
}
pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()> {
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = self.port.cancel(
&temp_handle,
key_from_token_and_type(token, RegType::Handle)?,
);
mem::forget(temp_handle);
Ok(res?)
}
}
pub struct Events {
events: Vec<Event>,
}
impl Events {
pub fn with_capacity(_u: usize) -> Events {
// The Fuchsia selector only handles one event at a time,
// so we ignore the default capacity and set it to one.
Events {
events: Vec::with_capacity(1),
}
}
pub fn len(&self) -> usize {
self.events.len()
}
pub fn capacity(&self) -> usize {
self.events.capacity()
}
pub fn is_empty(&self) -> bool {
self.events.is_empty()
}
pub fn get(&self, idx: usize) -> Option<Event> {
self.events.get(idx).map(|e| *e)
}
pub fn push_event(&mut self, event: Event) {
self.events.push(event)
}
pub fn clear(&mut self) {
self.events.events.drain(0..);
}
}
impl fmt::Debug for Events {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Events")
.field("len", &self.len())
.finish()
}
}
| {
self.token_to_fd.lock().unwrap().remove(&token);
// We ignore NotFound errors since oneshots are automatically deregistered,
// but mio will attempt to deregister them manually.
self.port
.cancel(&*handle, token.0 as u64)
.map_err(io::Error::from)
.or_else(|e| {
if e.kind() == io::ErrorKind::NotFound {
Ok(())
} else {
Err(e)
}
})
} | identifier_body |
policy_distillation.py |
import numpy as np
import pickle
import torch as th
from tqdm import tqdm
from torch import nn
from torch.nn import functional as F
from rl_baselines.base_classes import BaseRLObject
from srl_zoo.models.base_models import CustomCNN
from srl_zoo.preprocessing.data_loader import SupervisedDataLoader, DataLoader
from srl_zoo.utils import loadData, loadDataCVAE
from state_representation.models import loadSRLModel, getSRLDim
from srl_zoo.preprocessing.utils import one_hot
N_WORKERS = 4
BATCH_SIZE = 8
TEST_BATCH_SIZE = 8
VALIDATION_SIZE = 0.2 # 20% of training data for validation
MAX_BATCH_SIZE_GPU = 256 # For plotting, max batch_size before having memory issues
RENDER_HEIGHT = 224
RENDER_WIDTH = 224
FINE_TUNING = False
CL_LABEL_KEY = "continual_learning_label"
USE_ADAPTIVE_TEMPERATURE = True
TEMPERATURES = {'CC': 0.1, 'SC': 0.1, 'EC': 0.1, 'SQC': 0.1, "default": 0.1}
# run with 0.1 to have good results!
# 0.01 worse reward for CC, better SC
class MLPPolicy(nn.Module):
def __init__(self, output_size, input_size, hidden_size=16):
super(MLPPolicy, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.fc = nn.Sequential(nn.Linear(self.input_size, self.hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self.hidden_size, self.output_size)
)
def forward(self, input):
input = input.view(-1, self.input_size)
return F.softmax(self.fc(input), dim=1)
class CNNPolicy(nn.Module):
def __init__(self, output_size, img_shape):
super(CNNPolicy, self).__init__()
self.model = CustomCNN(state_dim=output_size, img_shape=img_shape)
def forward(self, input):
return F.softmax(self.model(input), dim=1)
class PolicyDistillationModel(BaseRLObject):
"""
Implementation of PolicyDistillation
"""
def __init__(self):
super(PolicyDistillationModel, self).__init__()
def | (self, save_path, _locals=None):
assert self.model is not None, "Error: must train or load model before use"
with open(save_path, "wb") as f:
pickle.dump(self.__dict__, f)
@classmethod
def load(cls, load_path, args=None):
with open(load_path, "rb") as f:
class_dict = pickle.load(f)
loaded_model = PolicyDistillationModel()
loaded_model.__dict__ = class_dict
return loaded_model
def customArguments(self, parser):
parser.add_argument('--nothing4instance', help='Number of population (each one has 2 threads)', type=bool,
default=True)
return parser
def getActionProba(self, observation, dones=None, delta=0):
"""
returns the action probability distribution, from a given observation.
:param observation: (numpy int or numpy float)
:param dones: ([bool])
:param delta: (numpy float or float) The exploration noise applied to the policy, set to 0 for no noise.
:return: (numpy float)
"""
assert self.model is not None, "Error: must train or load model before use"
if len(observation.shape) > 2:
observation = np.transpose(observation, (0, 3, 2, 1))
observation = th.from_numpy(observation).float().requires_grad_(False).to(self.device)
action = self.model.forward(observation).detach().cpu().numpy()
return action
def getAction(self, observation, dones=None, delta=0, sample=False):
"""
From an observation returns the associated action
:param observation: (numpy int or numpy float)
:param dones: ([bool])
:param delta: (numpy float or float) The exploration noise applied to the policy, set to 0 for no noise.
:return: (numpy float)
"""
assert self.model is not None, "Error: must train or load model before use"
self.model.eval()
if len(observation.shape) > 2:
observation = np.transpose(observation, (0, 3, 2, 1))
observation = th.from_numpy(observation).float().requires_grad_(False).to(self.device)
if sample:
proba_actions = self.model.forward(observation).detach().cpu().numpy().flatten()
return np.random.choice(range(len(proba_actions)), 1, p=proba_actions)
else:
return [np.argmax(self.model.forward(observation).detach().cpu().numpy())]
def loss_fn_kd(self, outputs, teacher_outputs, labels=None, adaptive_temperature=False):
"""
Hyperparameters: temperature and alpha
:param outputs: output from the student model
:param teacher_outputs: output from the teacher_outputs model
:return: loss
"""
if labels is not None and adaptive_temperature:
T = th.from_numpy(np.array([TEMPERATURES[labels[idx_elm]] for idx_elm in range(BATCH_SIZE)])).cuda().float()
KD_loss = F.softmax(th.div(teacher_outputs.transpose(1, 0), T), dim=1) * \
th.log((F.softmax(th.div(teacher_outputs.transpose(1, 0), T), dim=1) / F.softmax(outputs.transpose(1,0), dim=1)))
else:
T = TEMPERATURES["default"]
print('1',teacher_outputs.size())
print('2', outputs.size())
KD_loss = F.softmax(teacher_outputs/T, dim=1) * \
th.log((F.softmax(teacher_outputs/T, dim=1) / F.softmax(outputs, dim=1)))
print(KD_loss.size())
return KD_loss.mean()
def loss_mse(self, outputs, teacher_outputs):
return (outputs - teacher_outputs).pow(2).sum(1).mean()
def train(self, args, callback, env_kwargs=None, train_kwargs=None):
N_EPOCHS = args.epochs_distillation
self.seed = args.seed
self.batch_size = BATCH_SIZE
print("We assumed SRL training already done")
print('Loading data for distillation ')
# training_data, ground_truth, true_states, _ = loadData(args.teacher_data_folder)
training_data, ground_truth, _, _ = loadData(args.teacher_data_folder, with_env=False)
images_path = ground_truth['images_path']
episode_starts = training_data['episode_starts']
actions = training_data['actions']
actions_proba = training_data['actions_proba']
if USE_ADAPTIVE_TEMPERATURE:
cl_labels = training_data[CL_LABEL_KEY]
else:
cl_labels_st = None
if args.distillation_training_set_size > 0:
limit = args.distillation_training_set_size
actions = actions[:limit]
images_path = images_path[:limit]
episode_starts = episode_starts[:limit]
images_path_copy = [images_path[k] for k in range(images_path.shape[0])]
images_path = np.array(images_path_copy)
num_samples = images_path.shape[0] - 1 # number of samples
if args.img_shape is None:
self.img_shape = None #(3,224,224)
else:
self.img_shape = tuple(map(int, args.img_shape[1:-1].split(",")))
# indices for all time steps where the episode continues
indices = np.array([i for i in range(num_samples) if not episode_starts[i + 1]], dtype='int64')
np.random.shuffle(indices)
# split indices into minibatches. minibatchlist is a list of lists; each
# list is the id of the observation preserved through the training
minibatchlist = [np.array(sorted(indices[start_idx:start_idx + self.batch_size]))
for start_idx in range(0, len(indices) - self.batch_size + 1, self.batch_size)]
data_loader = DataLoader(minibatchlist, images_path, self.img_shape, n_workers=N_WORKERS, multi_view=False,
use_triplets=False, is_training=True,absolute_path=False)
test_minibatchlist = DataLoader.createTestMinibatchList(len(images_path), MAX_BATCH_SIZE_GPU)
test_data_loader = DataLoader(test_minibatchlist, images_path, self.img_shape, n_workers=N_WORKERS, multi_view=False,
use_triplets=False, max_queue_len=1, is_training=False,absolute_path=False)
# Number of minibatches used for validation:
n_val_batches = np.round(VALIDATION_SIZE * len(minibatchlist)).astype(np.int64)
val_indices = np.random.permutation(len(minibatchlist))[:n_val_batches]
# Print some info
print("{} minibatches for training, {} samples".format(len(minibatchlist) - n_val_batches,
(len(minibatchlist) - n_val_batches) * BATCH_SIZE))
print("{} minibatches for validation, {} samples".format(n_val_batches, n_val_batches * BATCH_SIZE))
assert n_val_batches > 0, "Not enough sample to create a validation set"
# Stats about actions
if not args.continuous_actions:
print('Discrete action space:')
action_set = set(actions)
n_actions = int(np.max(actions) + 1)
print("{} unique actions / {} actions".format(len(action_set), n_actions))
n_obs_per_action = np.zeros(n_actions, dtype=np.int64)
for i in range(n_actions):
n_obs_per_action[i] = np.sum(actions == i)
print("Number of observations per action")
print(n_obs_per_action)
else:
print('Continuous action space:')
print('Action dimension: {}'.format(self.dim_action))
# Here the default SRL model is assumed to be raw_pixels
self.state_dim = self.img_shape[0] * self.img_shape[1] * self.img_shape[2] # why
self.srl_model = None
print("env_kwargs[srl_model] ",env_kwargs["srl_model"])
# TODO: add sanity checks & test for all possible SRL for distillation
if env_kwargs["srl_model"] == "raw_pixels":
# if the pilicy distillation is used with raw pixel
self.model = CNNPolicy(n_actions, self.img_shape)
learnable_params = self.model.parameters()
learning_rate = 1e-3
else:
self.state_dim = getSRLDim(env_kwargs.get("srl_model_path", None))
self.srl_model = loadSRLModel(env_kwargs.get("srl_model_path", None),
th.cuda.is_available(), self.state_dim, env_object=None)
self.model = MLPPolicy(output_size=n_actions, input_size=self.state_dim)
for param in self.model.parameters():
param.requires_grad = True
learnable_params = [param for param in self.model.parameters()]
if FINE_TUNING and self.srl_model is not None:
for param in self.srl_model.model.parameters():
param.requires_grad = True
learnable_params += [param for param in self.srl_model.model.parameters()]
learning_rate = 1e-3
self.device = th.device("cuda" if th.cuda.is_available() else "cpu")
if th.cuda.is_available():
self.model.cuda()
self.optimizer = th.optim.Adam(learnable_params, lr=learning_rate)
best_error = np.inf
best_model_path = "{}/{}_model.pkl".format(args.log_dir, args.algo)
for epoch in range(N_EPOCHS):
# In each epoch, we do a full pass over the training data:
epoch_loss, epoch_batches = 0, 0
val_loss = 0
pbar = tqdm(total=len(minibatchlist))
for minibatch_num, (minibatch_idx, obs, _, _, _) in enumerate(data_loader):
self.optimizer.zero_grad()
obs = obs.to(self.device)
validation_mode = minibatch_idx in val_indices
if validation_mode:
self.model.eval()
if FINE_TUNING and self.srl_model is not None:
self.srl_model.model.eval()
else:
self.model.train()
if FINE_TUNING and self.srl_model is not None:
self.srl_model.model.train()
# Actions associated to the observations of the current minibatch
actions_st = actions[minibatchlist[minibatch_idx]]
actions_proba_st = actions_proba[minibatchlist[minibatch_idx]]
if USE_ADAPTIVE_TEMPERATURE:
cl_labels_st = cl_labels[minibatchlist[minibatch_idx]]
if not args.continuous_actions:
# Discrete actions, rearrange action to have n_minibatch ligns and one column,
# containing the int action
actions_st = one_hot(th.from_numpy(actions_st)).requires_grad_(False).to(self.device)
actions_proba_st = th.from_numpy(actions_proba_st).requires_grad_(False).to(self.device)
else:
a = 0
# Continuous actions, rearrange action to have n_minibatch ligns and dim_action columns
actions_st = th.from_numpy(actions_st).view(-1, self.dim_action).requires_grad_(False).to(
self.device)
if self.srl_model is not None:
state = self.srl_model.model.getStates(obs).to(self.device).detach()
if "autoencoder" in self.srl_model.model.losses:
use_ae = True
decoded_obs = self.srl_model.model.model.decode(state).to(self.device).detach()
else:
state = obs.detach()
pred_action = self.model.forward(state)
loss = self.loss_fn_kd(pred_action,
actions_proba_st.float(),
labels=cl_labels_st, adaptive_temperature=USE_ADAPTIVE_TEMPERATURE)
#loss = self.loss_mse(pred_action, actions_proba_st.float())
if validation_mode:
val_loss += loss.item()
# We do not optimize on validation data
# so optimizer.step() is not called
else:
loss.backward()
self.optimizer.step()
epoch_loss += loss.item()
epoch_batches += 1
pbar.update(1)
train_loss = epoch_loss / float(epoch_batches)
val_loss /= float(n_val_batches)
pbar.close()
print("Epoch {:3}/{}, train_loss:{:.6f} val_loss:{:.6f}".format(epoch + 1, N_EPOCHS, train_loss, val_loss))
# Save best model
if val_loss < best_error:
best_error = val_loss
self.save(best_model_path)
| save | identifier_name |
policy_distillation.py | import numpy as np
import pickle
import torch as th
from tqdm import tqdm
from torch import nn
from torch.nn import functional as F
from rl_baselines.base_classes import BaseRLObject
from srl_zoo.models.base_models import CustomCNN
from srl_zoo.preprocessing.data_loader import SupervisedDataLoader, DataLoader
from srl_zoo.utils import loadData, loadDataCVAE
from state_representation.models import loadSRLModel, getSRLDim
from srl_zoo.preprocessing.utils import one_hot
N_WORKERS = 4
BATCH_SIZE = 8
TEST_BATCH_SIZE = 8
VALIDATION_SIZE = 0.2 # 20% of training data for validation
MAX_BATCH_SIZE_GPU = 256 # For plotting, max batch_size before having memory issues
RENDER_HEIGHT = 224
RENDER_WIDTH = 224
FINE_TUNING = False
CL_LABEL_KEY = "continual_learning_label"
USE_ADAPTIVE_TEMPERATURE = True
TEMPERATURES = {'CC': 0.1, 'SC': 0.1, 'EC': 0.1, 'SQC': 0.1, "default": 0.1}
# run with 0.1 to have good results!
# 0.01 worse reward for CC, better SC
class MLPPolicy(nn.Module):
def __init__(self, output_size, input_size, hidden_size=16):
super(MLPPolicy, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.fc = nn.Sequential(nn.Linear(self.input_size, self.hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self.hidden_size, self.output_size)
)
def forward(self, input):
input = input.view(-1, self.input_size)
return F.softmax(self.fc(input), dim=1)
class CNNPolicy(nn.Module):
def __init__(self, output_size, img_shape):
super(CNNPolicy, self).__init__()
self.model = CustomCNN(state_dim=output_size, img_shape=img_shape)
def forward(self, input):
return F.softmax(self.model(input), dim=1)
class PolicyDistillationModel(BaseRLObject):
"""
Implementation of PolicyDistillation
"""
def __init__(self):
super(PolicyDistillationModel, self).__init__()
def save(self, save_path, _locals=None):
assert self.model is not None, "Error: must train or load model before use"
with open(save_path, "wb") as f:
pickle.dump(self.__dict__, f)
@classmethod
def load(cls, load_path, args=None):
with open(load_path, "rb") as f:
class_dict = pickle.load(f)
loaded_model = PolicyDistillationModel()
loaded_model.__dict__ = class_dict
return loaded_model
def customArguments(self, parser):
parser.add_argument('--nothing4instance', help='Number of population (each one has 2 threads)', type=bool,
default=True)
return parser
def getActionProba(self, observation, dones=None, delta=0):
"""
returns the action probability distribution, from a given observation.
:param observation: (numpy int or numpy float)
:param dones: ([bool])
:param delta: (numpy float or float) The exploration noise applied to the policy, set to 0 for no noise.
:return: (numpy float)
"""
assert self.model is not None, "Error: must train or load model before use"
if len(observation.shape) > 2:
observation = np.transpose(observation, (0, 3, 2, 1))
observation = th.from_numpy(observation).float().requires_grad_(False).to(self.device)
action = self.model.forward(observation).detach().cpu().numpy()
return action
def getAction(self, observation, dones=None, delta=0, sample=False):
"""
From an observation returns the associated action
:param observation: (numpy int or numpy float)
:param dones: ([bool])
:param delta: (numpy float or float) The exploration noise applied to the policy, set to 0 for no noise.
:return: (numpy float)
"""
assert self.model is not None, "Error: must train or load model before use"
self.model.eval()
if len(observation.shape) > 2:
observation = np.transpose(observation, (0, 3, 2, 1))
observation = th.from_numpy(observation).float().requires_grad_(False).to(self.device)
if sample:
proba_actions = self.model.forward(observation).detach().cpu().numpy().flatten()
return np.random.choice(range(len(proba_actions)), 1, p=proba_actions)
else:
return [np.argmax(self.model.forward(observation).detach().cpu().numpy())]
def loss_fn_kd(self, outputs, teacher_outputs, labels=None, adaptive_temperature=False):
"""
Hyperparameters: temperature and alpha
:param outputs: output from the student model
:param teacher_outputs: output from the teacher_outputs model
:return: loss
"""
if labels is not None and adaptive_temperature:
T = th.from_numpy(np.array([TEMPERATURES[labels[idx_elm]] for idx_elm in range(BATCH_SIZE)])).cuda().float()
KD_loss = F.softmax(th.div(teacher_outputs.transpose(1, 0), T), dim=1) * \
th.log((F.softmax(th.div(teacher_outputs.transpose(1, 0), T), dim=1) / F.softmax(outputs.transpose(1,0), dim=1)))
else:
T = TEMPERATURES["default"]
print('1',teacher_outputs.size())
print('2', outputs.size())
KD_loss = F.softmax(teacher_outputs/T, dim=1) * \
th.log((F.softmax(teacher_outputs/T, dim=1) / F.softmax(outputs, dim=1)))
print(KD_loss.size())
return KD_loss.mean()
def loss_mse(self, outputs, teacher_outputs):
return (outputs - teacher_outputs).pow(2).sum(1).mean()
def train(self, args, callback, env_kwargs=None, train_kwargs=None):
N_EPOCHS = args.epochs_distillation
self.seed = args.seed
self.batch_size = BATCH_SIZE
print("We assumed SRL training already done")
print('Loading data for distillation ')
# training_data, ground_truth, true_states, _ = loadData(args.teacher_data_folder)
training_data, ground_truth, _, _ = loadData(args.teacher_data_folder, with_env=False)
images_path = ground_truth['images_path']
episode_starts = training_data['episode_starts']
actions = training_data['actions']
actions_proba = training_data['actions_proba']
if USE_ADAPTIVE_TEMPERATURE:
cl_labels = training_data[CL_LABEL_KEY]
else:
cl_labels_st = None
if args.distillation_training_set_size > 0:
limit = args.distillation_training_set_size
actions = actions[:limit]
images_path = images_path[:limit]
episode_starts = episode_starts[:limit]
images_path_copy = [images_path[k] for k in range(images_path.shape[0])]
images_path = np.array(images_path_copy)
num_samples = images_path.shape[0] - 1 # number of samples
if args.img_shape is None:
self.img_shape = None #(3,224,224)
else:
self.img_shape = tuple(map(int, args.img_shape[1:-1].split(",")))
# indices for all time steps where the episode continues
indices = np.array([i for i in range(num_samples) if not episode_starts[i + 1]], dtype='int64')
np.random.shuffle(indices)
# split indices into minibatches. minibatchlist is a list of lists; each
# list is the id of the observation preserved through the training
minibatchlist = [np.array(sorted(indices[start_idx:start_idx + self.batch_size]))
for start_idx in range(0, len(indices) - self.batch_size + 1, self.batch_size)]
data_loader = DataLoader(minibatchlist, images_path, self.img_shape, n_workers=N_WORKERS, multi_view=False,
use_triplets=False, is_training=True,absolute_path=False)
test_minibatchlist = DataLoader.createTestMinibatchList(len(images_path), MAX_BATCH_SIZE_GPU)
test_data_loader = DataLoader(test_minibatchlist, images_path, self.img_shape, n_workers=N_WORKERS, multi_view=False,
use_triplets=False, max_queue_len=1, is_training=False,absolute_path=False)
# Number of minibatches used for validation:
n_val_batches = np.round(VALIDATION_SIZE * len(minibatchlist)).astype(np.int64)
val_indices = np.random.permutation(len(minibatchlist))[:n_val_batches]
# Print some info
print("{} minibatches for training, {} samples".format(len(minibatchlist) - n_val_batches,
(len(minibatchlist) - n_val_batches) * BATCH_SIZE))
print("{} minibatches for validation, {} samples".format(n_val_batches, n_val_batches * BATCH_SIZE))
assert n_val_batches > 0, "Not enough sample to create a validation set"
# Stats about actions
if not args.continuous_actions:
print('Discrete action space:')
action_set = set(actions)
n_actions = int(np.max(actions) + 1)
print("{} unique actions / {} actions".format(len(action_set), n_actions))
n_obs_per_action = np.zeros(n_actions, dtype=np.int64)
for i in range(n_actions):
n_obs_per_action[i] = np.sum(actions == i)
print("Number of observations per action")
print(n_obs_per_action)
else:
print('Continuous action space:')
print('Action dimension: {}'.format(self.dim_action))
# Here the default SRL model is assumed to be raw_pixels
self.state_dim = self.img_shape[0] * self.img_shape[1] * self.img_shape[2] # why
self.srl_model = None
print("env_kwargs[srl_model] ",env_kwargs["srl_model"])
# TODO: add sanity checks & test for all possible SRL for distillation
if env_kwargs["srl_model"] == "raw_pixels":
# if the pilicy distillation is used with raw pixel
self.model = CNNPolicy(n_actions, self.img_shape)
learnable_params = self.model.parameters()
learning_rate = 1e-3
else:
self.state_dim = getSRLDim(env_kwargs.get("srl_model_path", None))
self.srl_model = loadSRLModel(env_kwargs.get("srl_model_path", None),
th.cuda.is_available(), self.state_dim, env_object=None)
self.model = MLPPolicy(output_size=n_actions, input_size=self.state_dim)
for param in self.model.parameters():
param.requires_grad = True
learnable_params = [param for param in self.model.parameters()]
if FINE_TUNING and self.srl_model is not None:
for param in self.srl_model.model.parameters(): | self.device = th.device("cuda" if th.cuda.is_available() else "cpu")
if th.cuda.is_available():
self.model.cuda()
self.optimizer = th.optim.Adam(learnable_params, lr=learning_rate)
best_error = np.inf
best_model_path = "{}/{}_model.pkl".format(args.log_dir, args.algo)
for epoch in range(N_EPOCHS):
# In each epoch, we do a full pass over the training data:
epoch_loss, epoch_batches = 0, 0
val_loss = 0
pbar = tqdm(total=len(minibatchlist))
for minibatch_num, (minibatch_idx, obs, _, _, _) in enumerate(data_loader):
self.optimizer.zero_grad()
obs = obs.to(self.device)
validation_mode = minibatch_idx in val_indices
if validation_mode:
self.model.eval()
if FINE_TUNING and self.srl_model is not None:
self.srl_model.model.eval()
else:
self.model.train()
if FINE_TUNING and self.srl_model is not None:
self.srl_model.model.train()
# Actions associated to the observations of the current minibatch
actions_st = actions[minibatchlist[minibatch_idx]]
actions_proba_st = actions_proba[minibatchlist[minibatch_idx]]
if USE_ADAPTIVE_TEMPERATURE:
cl_labels_st = cl_labels[minibatchlist[minibatch_idx]]
if not args.continuous_actions:
# Discrete actions, rearrange action to have n_minibatch ligns and one column,
# containing the int action
actions_st = one_hot(th.from_numpy(actions_st)).requires_grad_(False).to(self.device)
actions_proba_st = th.from_numpy(actions_proba_st).requires_grad_(False).to(self.device)
else:
a = 0
# Continuous actions, rearrange action to have n_minibatch ligns and dim_action columns
actions_st = th.from_numpy(actions_st).view(-1, self.dim_action).requires_grad_(False).to(
self.device)
if self.srl_model is not None:
state = self.srl_model.model.getStates(obs).to(self.device).detach()
if "autoencoder" in self.srl_model.model.losses:
use_ae = True
decoded_obs = self.srl_model.model.model.decode(state).to(self.device).detach()
else:
state = obs.detach()
pred_action = self.model.forward(state)
loss = self.loss_fn_kd(pred_action,
actions_proba_st.float(),
labels=cl_labels_st, adaptive_temperature=USE_ADAPTIVE_TEMPERATURE)
#loss = self.loss_mse(pred_action, actions_proba_st.float())
if validation_mode:
val_loss += loss.item()
# We do not optimize on validation data
# so optimizer.step() is not called
else:
loss.backward()
self.optimizer.step()
epoch_loss += loss.item()
epoch_batches += 1
pbar.update(1)
train_loss = epoch_loss / float(epoch_batches)
val_loss /= float(n_val_batches)
pbar.close()
print("Epoch {:3}/{}, train_loss:{:.6f} val_loss:{:.6f}".format(epoch + 1, N_EPOCHS, train_loss, val_loss))
# Save best model
if val_loss < best_error:
best_error = val_loss
self.save(best_model_path) | param.requires_grad = True
learnable_params += [param for param in self.srl_model.model.parameters()]
learning_rate = 1e-3 | random_line_split |
policy_distillation.py |
import numpy as np
import pickle
import torch as th
from tqdm import tqdm
from torch import nn
from torch.nn import functional as F
from rl_baselines.base_classes import BaseRLObject
from srl_zoo.models.base_models import CustomCNN
from srl_zoo.preprocessing.data_loader import SupervisedDataLoader, DataLoader
from srl_zoo.utils import loadData, loadDataCVAE
from state_representation.models import loadSRLModel, getSRLDim
from srl_zoo.preprocessing.utils import one_hot
N_WORKERS = 4
BATCH_SIZE = 8
TEST_BATCH_SIZE = 8
VALIDATION_SIZE = 0.2 # 20% of training data for validation
MAX_BATCH_SIZE_GPU = 256 # For plotting, max batch_size before having memory issues
RENDER_HEIGHT = 224
RENDER_WIDTH = 224
FINE_TUNING = False
CL_LABEL_KEY = "continual_learning_label"
USE_ADAPTIVE_TEMPERATURE = True
TEMPERATURES = {'CC': 0.1, 'SC': 0.1, 'EC': 0.1, 'SQC': 0.1, "default": 0.1}
# run with 0.1 to have good results!
# 0.01 worse reward for CC, better SC
class MLPPolicy(nn.Module):
def __init__(self, output_size, input_size, hidden_size=16):
super(MLPPolicy, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.fc = nn.Sequential(nn.Linear(self.input_size, self.hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self.hidden_size, self.output_size)
)
def forward(self, input):
input = input.view(-1, self.input_size)
return F.softmax(self.fc(input), dim=1)
class CNNPolicy(nn.Module):
def __init__(self, output_size, img_shape):
super(CNNPolicy, self).__init__()
self.model = CustomCNN(state_dim=output_size, img_shape=img_shape)
def forward(self, input):
return F.softmax(self.model(input), dim=1)
class PolicyDistillationModel(BaseRLObject):
"""
Implementation of PolicyDistillation
"""
def __init__(self):
super(PolicyDistillationModel, self).__init__()
def save(self, save_path, _locals=None):
assert self.model is not None, "Error: must train or load model before use"
with open(save_path, "wb") as f:
pickle.dump(self.__dict__, f)
@classmethod
def load(cls, load_path, args=None):
with open(load_path, "rb") as f:
class_dict = pickle.load(f)
loaded_model = PolicyDistillationModel()
loaded_model.__dict__ = class_dict
return loaded_model
def customArguments(self, parser):
|
def getActionProba(self, observation, dones=None, delta=0):
"""
returns the action probability distribution, from a given observation.
:param observation: (numpy int or numpy float)
:param dones: ([bool])
:param delta: (numpy float or float) The exploration noise applied to the policy, set to 0 for no noise.
:return: (numpy float)
"""
assert self.model is not None, "Error: must train or load model before use"
if len(observation.shape) > 2:
observation = np.transpose(observation, (0, 3, 2, 1))
observation = th.from_numpy(observation).float().requires_grad_(False).to(self.device)
action = self.model.forward(observation).detach().cpu().numpy()
return action
def getAction(self, observation, dones=None, delta=0, sample=False):
"""
From an observation returns the associated action
:param observation: (numpy int or numpy float)
:param dones: ([bool])
:param delta: (numpy float or float) The exploration noise applied to the policy, set to 0 for no noise.
:return: (numpy float)
"""
assert self.model is not None, "Error: must train or load model before use"
self.model.eval()
if len(observation.shape) > 2:
observation = np.transpose(observation, (0, 3, 2, 1))
observation = th.from_numpy(observation).float().requires_grad_(False).to(self.device)
if sample:
proba_actions = self.model.forward(observation).detach().cpu().numpy().flatten()
return np.random.choice(range(len(proba_actions)), 1, p=proba_actions)
else:
return [np.argmax(self.model.forward(observation).detach().cpu().numpy())]
def loss_fn_kd(self, outputs, teacher_outputs, labels=None, adaptive_temperature=False):
"""
Hyperparameters: temperature and alpha
:param outputs: output from the student model
:param teacher_outputs: output from the teacher_outputs model
:return: loss
"""
if labels is not None and adaptive_temperature:
T = th.from_numpy(np.array([TEMPERATURES[labels[idx_elm]] for idx_elm in range(BATCH_SIZE)])).cuda().float()
KD_loss = F.softmax(th.div(teacher_outputs.transpose(1, 0), T), dim=1) * \
th.log((F.softmax(th.div(teacher_outputs.transpose(1, 0), T), dim=1) / F.softmax(outputs.transpose(1,0), dim=1)))
else:
T = TEMPERATURES["default"]
print('1',teacher_outputs.size())
print('2', outputs.size())
KD_loss = F.softmax(teacher_outputs/T, dim=1) * \
th.log((F.softmax(teacher_outputs/T, dim=1) / F.softmax(outputs, dim=1)))
print(KD_loss.size())
return KD_loss.mean()
def loss_mse(self, outputs, teacher_outputs):
return (outputs - teacher_outputs).pow(2).sum(1).mean()
def train(self, args, callback, env_kwargs=None, train_kwargs=None):
N_EPOCHS = args.epochs_distillation
self.seed = args.seed
self.batch_size = BATCH_SIZE
print("We assumed SRL training already done")
print('Loading data for distillation ')
# training_data, ground_truth, true_states, _ = loadData(args.teacher_data_folder)
training_data, ground_truth, _, _ = loadData(args.teacher_data_folder, with_env=False)
images_path = ground_truth['images_path']
episode_starts = training_data['episode_starts']
actions = training_data['actions']
actions_proba = training_data['actions_proba']
if USE_ADAPTIVE_TEMPERATURE:
cl_labels = training_data[CL_LABEL_KEY]
else:
cl_labels_st = None
if args.distillation_training_set_size > 0:
limit = args.distillation_training_set_size
actions = actions[:limit]
images_path = images_path[:limit]
episode_starts = episode_starts[:limit]
images_path_copy = [images_path[k] for k in range(images_path.shape[0])]
images_path = np.array(images_path_copy)
num_samples = images_path.shape[0] - 1 # number of samples
if args.img_shape is None:
self.img_shape = None #(3,224,224)
else:
self.img_shape = tuple(map(int, args.img_shape[1:-1].split(",")))
# indices for all time steps where the episode continues
indices = np.array([i for i in range(num_samples) if not episode_starts[i + 1]], dtype='int64')
np.random.shuffle(indices)
# split indices into minibatches. minibatchlist is a list of lists; each
# list is the id of the observation preserved through the training
minibatchlist = [np.array(sorted(indices[start_idx:start_idx + self.batch_size]))
for start_idx in range(0, len(indices) - self.batch_size + 1, self.batch_size)]
data_loader = DataLoader(minibatchlist, images_path, self.img_shape, n_workers=N_WORKERS, multi_view=False,
use_triplets=False, is_training=True,absolute_path=False)
test_minibatchlist = DataLoader.createTestMinibatchList(len(images_path), MAX_BATCH_SIZE_GPU)
test_data_loader = DataLoader(test_minibatchlist, images_path, self.img_shape, n_workers=N_WORKERS, multi_view=False,
use_triplets=False, max_queue_len=1, is_training=False,absolute_path=False)
# Number of minibatches used for validation:
n_val_batches = np.round(VALIDATION_SIZE * len(minibatchlist)).astype(np.int64)
val_indices = np.random.permutation(len(minibatchlist))[:n_val_batches]
# Print some info
print("{} minibatches for training, {} samples".format(len(minibatchlist) - n_val_batches,
(len(minibatchlist) - n_val_batches) * BATCH_SIZE))
print("{} minibatches for validation, {} samples".format(n_val_batches, n_val_batches * BATCH_SIZE))
assert n_val_batches > 0, "Not enough sample to create a validation set"
# Stats about actions
if not args.continuous_actions:
print('Discrete action space:')
action_set = set(actions)
n_actions = int(np.max(actions) + 1)
print("{} unique actions / {} actions".format(len(action_set), n_actions))
n_obs_per_action = np.zeros(n_actions, dtype=np.int64)
for i in range(n_actions):
n_obs_per_action[i] = np.sum(actions == i)
print("Number of observations per action")
print(n_obs_per_action)
else:
print('Continuous action space:')
print('Action dimension: {}'.format(self.dim_action))
# Here the default SRL model is assumed to be raw_pixels
self.state_dim = self.img_shape[0] * self.img_shape[1] * self.img_shape[2] # why
self.srl_model = None
print("env_kwargs[srl_model] ",env_kwargs["srl_model"])
# TODO: add sanity checks & test for all possible SRL for distillation
if env_kwargs["srl_model"] == "raw_pixels":
# if the pilicy distillation is used with raw pixel
self.model = CNNPolicy(n_actions, self.img_shape)
learnable_params = self.model.parameters()
learning_rate = 1e-3
else:
self.state_dim = getSRLDim(env_kwargs.get("srl_model_path", None))
self.srl_model = loadSRLModel(env_kwargs.get("srl_model_path", None),
th.cuda.is_available(), self.state_dim, env_object=None)
self.model = MLPPolicy(output_size=n_actions, input_size=self.state_dim)
for param in self.model.parameters():
param.requires_grad = True
learnable_params = [param for param in self.model.parameters()]
if FINE_TUNING and self.srl_model is not None:
for param in self.srl_model.model.parameters():
param.requires_grad = True
learnable_params += [param for param in self.srl_model.model.parameters()]
learning_rate = 1e-3
self.device = th.device("cuda" if th.cuda.is_available() else "cpu")
if th.cuda.is_available():
self.model.cuda()
self.optimizer = th.optim.Adam(learnable_params, lr=learning_rate)
best_error = np.inf
best_model_path = "{}/{}_model.pkl".format(args.log_dir, args.algo)
for epoch in range(N_EPOCHS):
# In each epoch, we do a full pass over the training data:
epoch_loss, epoch_batches = 0, 0
val_loss = 0
pbar = tqdm(total=len(minibatchlist))
for minibatch_num, (minibatch_idx, obs, _, _, _) in enumerate(data_loader):
self.optimizer.zero_grad()
obs = obs.to(self.device)
validation_mode = minibatch_idx in val_indices
if validation_mode:
self.model.eval()
if FINE_TUNING and self.srl_model is not None:
self.srl_model.model.eval()
else:
self.model.train()
if FINE_TUNING and self.srl_model is not None:
self.srl_model.model.train()
# Actions associated to the observations of the current minibatch
actions_st = actions[minibatchlist[minibatch_idx]]
actions_proba_st = actions_proba[minibatchlist[minibatch_idx]]
if USE_ADAPTIVE_TEMPERATURE:
cl_labels_st = cl_labels[minibatchlist[minibatch_idx]]
if not args.continuous_actions:
# Discrete actions, rearrange action to have n_minibatch ligns and one column,
# containing the int action
actions_st = one_hot(th.from_numpy(actions_st)).requires_grad_(False).to(self.device)
actions_proba_st = th.from_numpy(actions_proba_st).requires_grad_(False).to(self.device)
else:
a = 0
# Continuous actions, rearrange action to have n_minibatch ligns and dim_action columns
actions_st = th.from_numpy(actions_st).view(-1, self.dim_action).requires_grad_(False).to(
self.device)
if self.srl_model is not None:
state = self.srl_model.model.getStates(obs).to(self.device).detach()
if "autoencoder" in self.srl_model.model.losses:
use_ae = True
decoded_obs = self.srl_model.model.model.decode(state).to(self.device).detach()
else:
state = obs.detach()
pred_action = self.model.forward(state)
loss = self.loss_fn_kd(pred_action,
actions_proba_st.float(),
labels=cl_labels_st, adaptive_temperature=USE_ADAPTIVE_TEMPERATURE)
#loss = self.loss_mse(pred_action, actions_proba_st.float())
if validation_mode:
val_loss += loss.item()
# We do not optimize on validation data
# so optimizer.step() is not called
else:
loss.backward()
self.optimizer.step()
epoch_loss += loss.item()
epoch_batches += 1
pbar.update(1)
train_loss = epoch_loss / float(epoch_batches)
val_loss /= float(n_val_batches)
pbar.close()
print("Epoch {:3}/{}, train_loss:{:.6f} val_loss:{:.6f}".format(epoch + 1, N_EPOCHS, train_loss, val_loss))
# Save best model
if val_loss < best_error:
best_error = val_loss
self.save(best_model_path)
| parser.add_argument('--nothing4instance', help='Number of population (each one has 2 threads)', type=bool,
default=True)
return parser | identifier_body |
policy_distillation.py |
import numpy as np
import pickle
import torch as th
from tqdm import tqdm
from torch import nn
from torch.nn import functional as F
from rl_baselines.base_classes import BaseRLObject
from srl_zoo.models.base_models import CustomCNN
from srl_zoo.preprocessing.data_loader import SupervisedDataLoader, DataLoader
from srl_zoo.utils import loadData, loadDataCVAE
from state_representation.models import loadSRLModel, getSRLDim
from srl_zoo.preprocessing.utils import one_hot
N_WORKERS = 4
BATCH_SIZE = 8
TEST_BATCH_SIZE = 8
VALIDATION_SIZE = 0.2 # 20% of training data for validation
MAX_BATCH_SIZE_GPU = 256 # For plotting, max batch_size before having memory issues
RENDER_HEIGHT = 224
RENDER_WIDTH = 224
FINE_TUNING = False
CL_LABEL_KEY = "continual_learning_label"
USE_ADAPTIVE_TEMPERATURE = True
TEMPERATURES = {'CC': 0.1, 'SC': 0.1, 'EC': 0.1, 'SQC': 0.1, "default": 0.1}
# run with 0.1 to have good results!
# 0.01 worse reward for CC, better SC
class MLPPolicy(nn.Module):
def __init__(self, output_size, input_size, hidden_size=16):
super(MLPPolicy, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.fc = nn.Sequential(nn.Linear(self.input_size, self.hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self.hidden_size, self.output_size)
)
def forward(self, input):
input = input.view(-1, self.input_size)
return F.softmax(self.fc(input), dim=1)
class CNNPolicy(nn.Module):
def __init__(self, output_size, img_shape):
super(CNNPolicy, self).__init__()
self.model = CustomCNN(state_dim=output_size, img_shape=img_shape)
def forward(self, input):
return F.softmax(self.model(input), dim=1)
class PolicyDistillationModel(BaseRLObject):
"""
Implementation of PolicyDistillation
"""
def __init__(self):
super(PolicyDistillationModel, self).__init__()
def save(self, save_path, _locals=None):
assert self.model is not None, "Error: must train or load model before use"
with open(save_path, "wb") as f:
pickle.dump(self.__dict__, f)
@classmethod
def load(cls, load_path, args=None):
with open(load_path, "rb") as f:
class_dict = pickle.load(f)
loaded_model = PolicyDistillationModel()
loaded_model.__dict__ = class_dict
return loaded_model
def customArguments(self, parser):
parser.add_argument('--nothing4instance', help='Number of population (each one has 2 threads)', type=bool,
default=True)
return parser
def getActionProba(self, observation, dones=None, delta=0):
"""
returns the action probability distribution, from a given observation.
:param observation: (numpy int or numpy float)
:param dones: ([bool])
:param delta: (numpy float or float) The exploration noise applied to the policy, set to 0 for no noise.
:return: (numpy float)
"""
assert self.model is not None, "Error: must train or load model before use"
if len(observation.shape) > 2:
observation = np.transpose(observation, (0, 3, 2, 1))
observation = th.from_numpy(observation).float().requires_grad_(False).to(self.device)
action = self.model.forward(observation).detach().cpu().numpy()
return action
def getAction(self, observation, dones=None, delta=0, sample=False):
"""
From an observation returns the associated action
:param observation: (numpy int or numpy float)
:param dones: ([bool])
:param delta: (numpy float or float) The exploration noise applied to the policy, set to 0 for no noise.
:return: (numpy float)
"""
assert self.model is not None, "Error: must train or load model before use"
self.model.eval()
if len(observation.shape) > 2:
|
observation = th.from_numpy(observation).float().requires_grad_(False).to(self.device)
if sample:
proba_actions = self.model.forward(observation).detach().cpu().numpy().flatten()
return np.random.choice(range(len(proba_actions)), 1, p=proba_actions)
else:
return [np.argmax(self.model.forward(observation).detach().cpu().numpy())]
def loss_fn_kd(self, outputs, teacher_outputs, labels=None, adaptive_temperature=False):
"""
Hyperparameters: temperature and alpha
:param outputs: output from the student model
:param teacher_outputs: output from the teacher_outputs model
:return: loss
"""
if labels is not None and adaptive_temperature:
T = th.from_numpy(np.array([TEMPERATURES[labels[idx_elm]] for idx_elm in range(BATCH_SIZE)])).cuda().float()
KD_loss = F.softmax(th.div(teacher_outputs.transpose(1, 0), T), dim=1) * \
th.log((F.softmax(th.div(teacher_outputs.transpose(1, 0), T), dim=1) / F.softmax(outputs.transpose(1,0), dim=1)))
else:
T = TEMPERATURES["default"]
print('1',teacher_outputs.size())
print('2', outputs.size())
KD_loss = F.softmax(teacher_outputs/T, dim=1) * \
th.log((F.softmax(teacher_outputs/T, dim=1) / F.softmax(outputs, dim=1)))
print(KD_loss.size())
return KD_loss.mean()
def loss_mse(self, outputs, teacher_outputs):
return (outputs - teacher_outputs).pow(2).sum(1).mean()
def train(self, args, callback, env_kwargs=None, train_kwargs=None):
N_EPOCHS = args.epochs_distillation
self.seed = args.seed
self.batch_size = BATCH_SIZE
print("We assumed SRL training already done")
print('Loading data for distillation ')
# training_data, ground_truth, true_states, _ = loadData(args.teacher_data_folder)
training_data, ground_truth, _, _ = loadData(args.teacher_data_folder, with_env=False)
images_path = ground_truth['images_path']
episode_starts = training_data['episode_starts']
actions = training_data['actions']
actions_proba = training_data['actions_proba']
if USE_ADAPTIVE_TEMPERATURE:
cl_labels = training_data[CL_LABEL_KEY]
else:
cl_labels_st = None
if args.distillation_training_set_size > 0:
limit = args.distillation_training_set_size
actions = actions[:limit]
images_path = images_path[:limit]
episode_starts = episode_starts[:limit]
images_path_copy = [images_path[k] for k in range(images_path.shape[0])]
images_path = np.array(images_path_copy)
num_samples = images_path.shape[0] - 1 # number of samples
if args.img_shape is None:
self.img_shape = None #(3,224,224)
else:
self.img_shape = tuple(map(int, args.img_shape[1:-1].split(",")))
# indices for all time steps where the episode continues
indices = np.array([i for i in range(num_samples) if not episode_starts[i + 1]], dtype='int64')
np.random.shuffle(indices)
# split indices into minibatches. minibatchlist is a list of lists; each
# list is the id of the observation preserved through the training
minibatchlist = [np.array(sorted(indices[start_idx:start_idx + self.batch_size]))
for start_idx in range(0, len(indices) - self.batch_size + 1, self.batch_size)]
data_loader = DataLoader(minibatchlist, images_path, self.img_shape, n_workers=N_WORKERS, multi_view=False,
use_triplets=False, is_training=True,absolute_path=False)
test_minibatchlist = DataLoader.createTestMinibatchList(len(images_path), MAX_BATCH_SIZE_GPU)
test_data_loader = DataLoader(test_minibatchlist, images_path, self.img_shape, n_workers=N_WORKERS, multi_view=False,
use_triplets=False, max_queue_len=1, is_training=False,absolute_path=False)
# Number of minibatches used for validation:
n_val_batches = np.round(VALIDATION_SIZE * len(minibatchlist)).astype(np.int64)
val_indices = np.random.permutation(len(minibatchlist))[:n_val_batches]
# Print some info
print("{} minibatches for training, {} samples".format(len(minibatchlist) - n_val_batches,
(len(minibatchlist) - n_val_batches) * BATCH_SIZE))
print("{} minibatches for validation, {} samples".format(n_val_batches, n_val_batches * BATCH_SIZE))
assert n_val_batches > 0, "Not enough sample to create a validation set"
# Stats about actions
if not args.continuous_actions:
print('Discrete action space:')
action_set = set(actions)
n_actions = int(np.max(actions) + 1)
print("{} unique actions / {} actions".format(len(action_set), n_actions))
n_obs_per_action = np.zeros(n_actions, dtype=np.int64)
for i in range(n_actions):
n_obs_per_action[i] = np.sum(actions == i)
print("Number of observations per action")
print(n_obs_per_action)
else:
print('Continuous action space:')
print('Action dimension: {}'.format(self.dim_action))
# Here the default SRL model is assumed to be raw_pixels
self.state_dim = self.img_shape[0] * self.img_shape[1] * self.img_shape[2] # why
self.srl_model = None
print("env_kwargs[srl_model] ",env_kwargs["srl_model"])
# TODO: add sanity checks & test for all possible SRL for distillation
if env_kwargs["srl_model"] == "raw_pixels":
# if the pilicy distillation is used with raw pixel
self.model = CNNPolicy(n_actions, self.img_shape)
learnable_params = self.model.parameters()
learning_rate = 1e-3
else:
self.state_dim = getSRLDim(env_kwargs.get("srl_model_path", None))
self.srl_model = loadSRLModel(env_kwargs.get("srl_model_path", None),
th.cuda.is_available(), self.state_dim, env_object=None)
self.model = MLPPolicy(output_size=n_actions, input_size=self.state_dim)
for param in self.model.parameters():
param.requires_grad = True
learnable_params = [param for param in self.model.parameters()]
if FINE_TUNING and self.srl_model is not None:
for param in self.srl_model.model.parameters():
param.requires_grad = True
learnable_params += [param for param in self.srl_model.model.parameters()]
learning_rate = 1e-3
self.device = th.device("cuda" if th.cuda.is_available() else "cpu")
if th.cuda.is_available():
self.model.cuda()
self.optimizer = th.optim.Adam(learnable_params, lr=learning_rate)
best_error = np.inf
best_model_path = "{}/{}_model.pkl".format(args.log_dir, args.algo)
for epoch in range(N_EPOCHS):
# In each epoch, we do a full pass over the training data:
epoch_loss, epoch_batches = 0, 0
val_loss = 0
pbar = tqdm(total=len(minibatchlist))
for minibatch_num, (minibatch_idx, obs, _, _, _) in enumerate(data_loader):
self.optimizer.zero_grad()
obs = obs.to(self.device)
validation_mode = minibatch_idx in val_indices
if validation_mode:
self.model.eval()
if FINE_TUNING and self.srl_model is not None:
self.srl_model.model.eval()
else:
self.model.train()
if FINE_TUNING and self.srl_model is not None:
self.srl_model.model.train()
# Actions associated to the observations of the current minibatch
actions_st = actions[minibatchlist[minibatch_idx]]
actions_proba_st = actions_proba[minibatchlist[minibatch_idx]]
if USE_ADAPTIVE_TEMPERATURE:
cl_labels_st = cl_labels[minibatchlist[minibatch_idx]]
if not args.continuous_actions:
# Discrete actions, rearrange action to have n_minibatch ligns and one column,
# containing the int action
actions_st = one_hot(th.from_numpy(actions_st)).requires_grad_(False).to(self.device)
actions_proba_st = th.from_numpy(actions_proba_st).requires_grad_(False).to(self.device)
else:
a = 0
# Continuous actions, rearrange action to have n_minibatch ligns and dim_action columns
actions_st = th.from_numpy(actions_st).view(-1, self.dim_action).requires_grad_(False).to(
self.device)
if self.srl_model is not None:
state = self.srl_model.model.getStates(obs).to(self.device).detach()
if "autoencoder" in self.srl_model.model.losses:
use_ae = True
decoded_obs = self.srl_model.model.model.decode(state).to(self.device).detach()
else:
state = obs.detach()
pred_action = self.model.forward(state)
loss = self.loss_fn_kd(pred_action,
actions_proba_st.float(),
labels=cl_labels_st, adaptive_temperature=USE_ADAPTIVE_TEMPERATURE)
#loss = self.loss_mse(pred_action, actions_proba_st.float())
if validation_mode:
val_loss += loss.item()
# We do not optimize on validation data
# so optimizer.step() is not called
else:
loss.backward()
self.optimizer.step()
epoch_loss += loss.item()
epoch_batches += 1
pbar.update(1)
train_loss = epoch_loss / float(epoch_batches)
val_loss /= float(n_val_batches)
pbar.close()
print("Epoch {:3}/{}, train_loss:{:.6f} val_loss:{:.6f}".format(epoch + 1, N_EPOCHS, train_loss, val_loss))
# Save best model
if val_loss < best_error:
best_error = val_loss
self.save(best_model_path)
| observation = np.transpose(observation, (0, 3, 2, 1)) | conditional_block |
WarpController.ts | import * as CIDTool from 'cid-tool';
import {
Log,
MarketReportingState,
NULL_ADDRESS,
SubscriptionEventName,
} from '@augurproject/sdk-lite';
import { Log as SerializedLog } from '@augurproject/types';
import { IPFSEndpointInfo, IPFSHashVersion, logger } from '@augurproject/utils';
import Dexie from 'dexie';
import { Block } from 'ethers/providers';
import { BigNumber } from 'ethers/utils';
import * as IPFS from 'ipfs';
import * as Unixfs from 'ipfs-unixfs';
import { DAGNode } from 'ipld-dag-pb';
import _ from 'lodash';
import LZString from 'lz-string';
import fetch from 'cross-fetch';
import { Augur, Provider } from '..';
import { DB } from '../state/db/DB';
import { IpfsInfo } from '../state/db/WarpSyncCheckpointsDB';
import { Markets } from '../state/getter/Markets';
import { Checkpoints } from './Checkpoints';
export const WARPSYNC_VERSION = '1';
const FILE_FETCH_TIMEOUT = 30000; // 10 seconds
type NameOfType<T, R> = {
[P in keyof T]: T[P] extends R ? P : never;
}[keyof T];
type AllDBNames = NameOfType<DB, Dexie.Table<Log, unknown>>;
type AllDbs = {
[P in AllDBNames]: DB[P] extends Dexie.Table<infer R, unknown> ? R : never;
};
// Assuming indexes we need are simple ones e.g. 'market'.
// Will need to rethink this for something like '[universe+reporter]'.
type DbExpander<P, G extends keyof AllDbs> = P extends keyof AllDbs
? {
databaseName: P;
indexes?: Readonly<Array<keyof AllDbs[P]>>;
join?: G extends keyof AllDbs
? Readonly<{
// Indexes to query source db on.
indexes: Readonly<Array<keyof AllDbs[G]>>;
// The common index between the two DBs.
// length 2 or more is treated
on: Readonly<Array<keyof AllDbs[P] & keyof AllDbs[G]>>;
// This is the source of the criteria to filter the `dataBaseName` db with.
source: G;
}>
: never;
}
: never;
type Db = DbExpander<keyof AllDbs, keyof AllDbs>;
export type RollupDescription = Readonly<Db[]>;
interface IPFSObject {
Hash: string;
Name?: string;
Size: number;
}
export const databasesToSync: RollupDescription = [
{ databaseName: 'CompleteSetsPurchased' },
{ databaseName: 'CompleteSetsSold' },
{ databaseName: 'DisputeCrowdsourcerContribution' },
{ databaseName: 'DisputeCrowdsourcerCompleted' },
{ databaseName: 'DisputeCrowdsourcerCreated' },
{ databaseName: 'DisputeCrowdsourcerRedeemed' },
{ databaseName: 'DisputeWindowCreated' },
{ databaseName: 'InitialReporterRedeemed' },
{ databaseName: 'InitialReportSubmitted' },
{ databaseName: 'InitialReporterTransferred' },
{ databaseName: 'MarketCreated' },
{ databaseName: 'MarketFinalized' },
{ databaseName: 'MarketMigrated' },
{ databaseName: 'MarketParticipantsDisavowed' },
{ databaseName: 'MarketTransferred' },
{ databaseName: 'MarketVolumeChanged' },
{ databaseName: 'MarketOIChanged' },
{ databaseName: 'OrderEvent' },
{ databaseName: 'ParticipationTokensRedeemed' },
{ databaseName: 'ProfitLossChanged' },
{ databaseName: 'ReportingParticipantDisavowed' },
{ databaseName: 'TimestampSet' },
{ databaseName: 'TokenBalanceChanged' },
{ databaseName: 'TokensMinted' },
{ databaseName: 'TokensTransferred' },
{ databaseName: 'TradingProceedsClaimed' },
{ databaseName: 'UniverseCreated' },
{ databaseName: 'UniverseForked' },
{ databaseName: 'TransferSingle' },
{ databaseName: 'TransferBatch' },
{ databaseName: 'ShareTokenBalanceChanged' },
];
export interface CheckpointInterface {
startBlockNumber: number;
endBlockNumber: number;
logs: SerializedLog[];
}
export class WarpController {
private static DEFAULT_NODE_TYPE = { format: 'dag-pb', hashAlg: 'sha2-256' };
checkpoints: Checkpoints;
ipfs: Promise<IPFS>;
constructor(
private db: DB,
private augur: Augur<Provider>,
private provider: Provider,
private uploadBlockNumber: number,
private ipfsEndpointInfo:IPFSEndpointInfo,
ipfs?: Promise<IPFS>,
) {
this.checkpoints = new Checkpoints(provider);
if (ipfs) {
this.ipfs = ipfs;
} else {
this.ipfs = IPFS.create({
repo: './data',
});
}
}
async getIpfs(): Promise<IPFS> {
return this.ipfs;
}
onNewBlock = async (newBlock: Block): Promise<string | void> => {
await this.createInitialCheckpoint();
/*
0. Base case: need to have created initial warp checkpoint.
1. Check if we need to create warp sync
1. This will happen if the active market endTime has elapsed
2. Check if we have a market awaiting finalization
1. If so, do we dispute?
3. If market is finalized
1. If no dispute we make note of new market end time
*/
const mostRecentCheckpoint = await this.db.warpCheckpoints.getMostRecentCheckpoint();
// Universe not initialized.
if (!mostRecentCheckpoint) {
return;
}
// Warp sync has been created. Need to report, dispute or create next unfinished checkpoint record.
if (mostRecentCheckpoint.end) {
const [marketRecord] = await Markets.getMarketsInfo(this.augur, this.db, {
marketIds: [mostRecentCheckpoint.market],
});
switch (marketRecord.reportingState) {
case MarketReportingState.OpenReporting:
// Emit event to notify UI to report.
break;
case MarketReportingState.AwaitingFinalization:
// confirm hash matches and emit dispute event if needed.
break;
case MarketReportingState.Finalized:
const endBlock = Object.assign({}, mostRecentCheckpoint.end, {
gasLimit: new BigNumber(mostRecentCheckpoint.end.gasLimit),
gasUsed: new BigNumber(mostRecentCheckpoint.end.gasUsed),
})
const [begin, end] = await this.checkpoints.calculateBoundary(
mostRecentCheckpoint.endTimestamp,
endBlock
);
const newWarpSyncMarket = await this.augur.warpSync.getWarpSyncMarket(
this.augur.contracts.universe.address
);
await this.db.warpCheckpoints.createInitialCheckpoint(
end,
newWarpSyncMarket
);
break;
default:
}
return;
}
// WarpSync Market has ended. Need to create checkpoint.
if (mostRecentCheckpoint.endTimestamp < newBlock.timestamp) {
const [
newEndBlock,
newBeginBlock,
] = await this.checkpoints.calculateBoundary(
mostRecentCheckpoint.endTimestamp,
await this.provider.getBlock(this.uploadBlockNumber),
newBlock
);
// Market has finished and now we need to wait 30 blocks.
if (newBlock.number - newEndBlock.number < 30) return;
await this.db.prune(newEndBlock.timestamp);
// This version of the client will no longer generate a
// warp sync because it does not know about the para deploy logs.
}
// nothing left to do.
};
async createInitialCheckpoint() {
const mostRecentCheckpoint = await this.db.warpCheckpoints.getMostRecentCheckpoint();
if (!mostRecentCheckpoint) {
const market = await this.augur.warpSync.getWarpSyncMarket(
this.augur.contracts.universe.address
);
if (market.address === NULL_ADDRESS) {
console.log(
`Warp sync market not initialized for current universe ${this.augur.contracts.universe.address}.`
);
return;
}
await this.db.warpCheckpoints.createInitialCheckpoint(
await this.provider.getBlock(this.uploadBlockNumber),
market
);
}
}
async destroyAndRecreateDB() {
await this.db.delete();
await this.db.initializeDB();
}
async createCheckpoint(endBlock: Block): Promise<IpfsInfo> {
const logs = [];
for (const { databaseName } of databasesToSync) {
// Awaiting here to reduce load on db.
logs.push(
await this.db[databaseName]
.where('blockNumber')
.between(this.uploadBlockNumber, endBlock.number, true, true)
.toArray()
);
}
const sortedLogs = _.orderBy(
_.flatten(logs),
['blockNumber', 'logIndex'],
['asc', 'asc']
);
const body = JSON.stringify({
startBlockNumber: this.uploadBlockNumber,
endBlockNumber: endBlock.number,
logs: sortedLogs,
} as CheckpointInterface);
const content = LZString.compressToUint8Array(body);
const [result] = await (await this.ipfs).add({
content,
});
const topLevelDirectory = new DAGNode(
Unixfs.default('directory').marshal()
);
const versionFile = await (await this.ipfs).add({
content: Buffer.from(WARPSYNC_VERSION),
});
topLevelDirectory.addLink({
Name: 'VERSION',
Hash: versionFile[0].hash,
Size: 1,
});
topLevelDirectory.addLink({
Name: 'index',
Hash: result.hash,
Size: 0,
});
const hash = (await (await this.ipfs).dag.put(
topLevelDirectory,
WarpController.DEFAULT_NODE_TYPE
)).toString();
await this.db.warpCheckpoints.createCheckpoint(endBlock, hash);
return hash;
}
getFile(ipfsHash: string, ipfsPath: string) {
return new Promise<CheckpointInterface>(async (resolve, reject) => {
const timeout = setTimeout(() => {reject(new Error('Request timed out'));}, FILE_FETCH_TIMEOUT);
let fileResult;
switch (this.ipfsEndpointInfo.version) {
case IPFSHashVersion.CIDv0:
case IPFSHashVersion.CIDv1:
fileResult = await fetch(`${this.ipfsEndpointInfo.url}/${ipfsHash}${ipfsPath}`)
.then(item => item.arrayBuffer())
.then(item => new Uint8Array(item))
break;
case IPFSHashVersion.IPFS:
try {
fileResult = await (await this.ipfs).cat(`${ipfsHash}${ipfsPath}`);
} catch(err) {
if (err.message === 'this dag node is a directory') {
throw Error(`IPFS: tried to read directory as if it were a file: hash=${ipfsHash} path=${ipfsPath}`)
}
}
break;
default:
throw new Error('No IPFS gateway configured');
}
clearTimeout(timeout);
const decompressedResult = await LZString.decompressFromUint8Array(fileResult);
resolve(JSON.parse(decompressedResult));
});
}
async getCheckpointFile(ipfsRootHash: string): Promise<CheckpointInterface> {
return this.getFile(ipfsRootHash, '/index');
}
async pinHashByGatewayUrl(urlString: string) {
const url = new URL(urlString);
try {
const matches = /^(\w+)\.ipfs\..+$/.exec(url.hostname);
const thingToPin = (matches) ? matches[1] : url.pathname;
await (await this.ipfs).pin.add(thingToPin);
logger.info(`Client pinned with ipfs hash: ${thingToPin}`)
return true;
} catch (e) {
return false;
}
}
async getMostRecentWarpSync() {
return this.db.warpCheckpoints.getMostRecentWarpSync();
}
async getMostRecentCheckpoint() |
async hasMostRecentCheckpoint() {
return (await this.getMostRecentCheckpoint()) !== undefined;
}
}
| {
return this.db.warpCheckpoints.getMostRecentCheckpoint();
} | identifier_body |
WarpController.ts | import * as CIDTool from 'cid-tool';
import {
Log,
MarketReportingState,
NULL_ADDRESS,
SubscriptionEventName,
} from '@augurproject/sdk-lite';
import { Log as SerializedLog } from '@augurproject/types';
import { IPFSEndpointInfo, IPFSHashVersion, logger } from '@augurproject/utils';
import Dexie from 'dexie';
import { Block } from 'ethers/providers';
import { BigNumber } from 'ethers/utils';
import * as IPFS from 'ipfs';
import * as Unixfs from 'ipfs-unixfs';
import { DAGNode } from 'ipld-dag-pb';
import _ from 'lodash';
import LZString from 'lz-string';
import fetch from 'cross-fetch';
import { Augur, Provider } from '..';
import { DB } from '../state/db/DB';
import { IpfsInfo } from '../state/db/WarpSyncCheckpointsDB';
import { Markets } from '../state/getter/Markets';
import { Checkpoints } from './Checkpoints';
export const WARPSYNC_VERSION = '1';
const FILE_FETCH_TIMEOUT = 30000; // 10 seconds
type NameOfType<T, R> = {
[P in keyof T]: T[P] extends R ? P : never;
}[keyof T];
type AllDBNames = NameOfType<DB, Dexie.Table<Log, unknown>>;
type AllDbs = {
[P in AllDBNames]: DB[P] extends Dexie.Table<infer R, unknown> ? R : never;
};
// Assuming indexes we need are simple ones e.g. 'market'.
// Will need to rethink this for something like '[universe+reporter]'.
type DbExpander<P, G extends keyof AllDbs> = P extends keyof AllDbs
? {
databaseName: P;
indexes?: Readonly<Array<keyof AllDbs[P]>>;
join?: G extends keyof AllDbs
? Readonly<{
// Indexes to query source db on.
indexes: Readonly<Array<keyof AllDbs[G]>>;
// The common index between the two DBs.
// length 2 or more is treated
on: Readonly<Array<keyof AllDbs[P] & keyof AllDbs[G]>>;
// This is the source of the criteria to filter the `dataBaseName` db with.
source: G;
}>
: never;
}
: never;
type Db = DbExpander<keyof AllDbs, keyof AllDbs>;
export type RollupDescription = Readonly<Db[]>;
interface IPFSObject {
Hash: string;
Name?: string;
Size: number;
}
export const databasesToSync: RollupDescription = [
{ databaseName: 'CompleteSetsPurchased' },
{ databaseName: 'CompleteSetsSold' },
{ databaseName: 'DisputeCrowdsourcerContribution' },
{ databaseName: 'DisputeCrowdsourcerCompleted' },
{ databaseName: 'DisputeCrowdsourcerCreated' },
{ databaseName: 'DisputeCrowdsourcerRedeemed' },
{ databaseName: 'DisputeWindowCreated' },
{ databaseName: 'InitialReporterRedeemed' },
{ databaseName: 'InitialReportSubmitted' },
{ databaseName: 'InitialReporterTransferred' },
{ databaseName: 'MarketCreated' },
{ databaseName: 'MarketFinalized' },
{ databaseName: 'MarketMigrated' },
{ databaseName: 'MarketParticipantsDisavowed' },
{ databaseName: 'MarketTransferred' },
{ databaseName: 'MarketVolumeChanged' },
{ databaseName: 'MarketOIChanged' },
{ databaseName: 'OrderEvent' },
{ databaseName: 'ParticipationTokensRedeemed' },
{ databaseName: 'ProfitLossChanged' },
{ databaseName: 'ReportingParticipantDisavowed' },
{ databaseName: 'TimestampSet' },
{ databaseName: 'TokenBalanceChanged' },
{ databaseName: 'TokensMinted' },
{ databaseName: 'TokensTransferred' },
{ databaseName: 'TradingProceedsClaimed' },
{ databaseName: 'UniverseCreated' },
{ databaseName: 'UniverseForked' },
{ databaseName: 'TransferSingle' },
{ databaseName: 'TransferBatch' },
{ databaseName: 'ShareTokenBalanceChanged' },
];
export interface CheckpointInterface {
startBlockNumber: number;
endBlockNumber: number;
logs: SerializedLog[];
}
export class WarpController {
private static DEFAULT_NODE_TYPE = { format: 'dag-pb', hashAlg: 'sha2-256' };
checkpoints: Checkpoints;
ipfs: Promise<IPFS>;
constructor(
private db: DB,
private augur: Augur<Provider>,
private provider: Provider,
private uploadBlockNumber: number,
private ipfsEndpointInfo:IPFSEndpointInfo,
ipfs?: Promise<IPFS>,
) {
this.checkpoints = new Checkpoints(provider);
if (ipfs) {
this.ipfs = ipfs;
} else {
this.ipfs = IPFS.create({
repo: './data',
});
}
}
async getIpfs(): Promise<IPFS> {
return this.ipfs;
}
onNewBlock = async (newBlock: Block): Promise<string | void> => {
await this.createInitialCheckpoint();
/*
0. Base case: need to have created initial warp checkpoint.
1. Check if we need to create warp sync
1. This will happen if the active market endTime has elapsed
2. Check if we have a market awaiting finalization
1. If so, do we dispute?
3. If market is finalized
1. If no dispute we make note of new market end time
*/
const mostRecentCheckpoint = await this.db.warpCheckpoints.getMostRecentCheckpoint();
// Universe not initialized.
if (!mostRecentCheckpoint) {
return;
}
// Warp sync has been created. Need to report, dispute or create next unfinished checkpoint record.
if (mostRecentCheckpoint.end) {
const [marketRecord] = await Markets.getMarketsInfo(this.augur, this.db, {
marketIds: [mostRecentCheckpoint.market],
});
switch (marketRecord.reportingState) {
case MarketReportingState.OpenReporting:
// Emit event to notify UI to report.
break;
case MarketReportingState.AwaitingFinalization:
// confirm hash matches and emit dispute event if needed.
break;
case MarketReportingState.Finalized:
const endBlock = Object.assign({}, mostRecentCheckpoint.end, {
gasLimit: new BigNumber(mostRecentCheckpoint.end.gasLimit),
gasUsed: new BigNumber(mostRecentCheckpoint.end.gasUsed),
})
const [begin, end] = await this.checkpoints.calculateBoundary(
mostRecentCheckpoint.endTimestamp,
endBlock
);
const newWarpSyncMarket = await this.augur.warpSync.getWarpSyncMarket(
this.augur.contracts.universe.address
);
await this.db.warpCheckpoints.createInitialCheckpoint(
end,
newWarpSyncMarket
);
break;
default:
}
return;
}
// WarpSync Market has ended. Need to create checkpoint.
if (mostRecentCheckpoint.endTimestamp < newBlock.timestamp) {
const [
newEndBlock,
newBeginBlock,
] = await this.checkpoints.calculateBoundary(
mostRecentCheckpoint.endTimestamp,
await this.provider.getBlock(this.uploadBlockNumber),
newBlock
);
// Market has finished and now we need to wait 30 blocks.
if (newBlock.number - newEndBlock.number < 30) return;
await this.db.prune(newEndBlock.timestamp);
// This version of the client will no longer generate a
// warp sync because it does not know about the para deploy logs.
}
// nothing left to do.
};
async createInitialCheckpoint() {
const mostRecentCheckpoint = await this.db.warpCheckpoints.getMostRecentCheckpoint();
if (!mostRecentCheckpoint) {
const market = await this.augur.warpSync.getWarpSyncMarket(
this.augur.contracts.universe.address
);
if (market.address === NULL_ADDRESS) |
await this.db.warpCheckpoints.createInitialCheckpoint(
await this.provider.getBlock(this.uploadBlockNumber),
market
);
}
}
async destroyAndRecreateDB() {
await this.db.delete();
await this.db.initializeDB();
}
async createCheckpoint(endBlock: Block): Promise<IpfsInfo> {
const logs = [];
for (const { databaseName } of databasesToSync) {
// Awaiting here to reduce load on db.
logs.push(
await this.db[databaseName]
.where('blockNumber')
.between(this.uploadBlockNumber, endBlock.number, true, true)
.toArray()
);
}
const sortedLogs = _.orderBy(
_.flatten(logs),
['blockNumber', 'logIndex'],
['asc', 'asc']
);
const body = JSON.stringify({
startBlockNumber: this.uploadBlockNumber,
endBlockNumber: endBlock.number,
logs: sortedLogs,
} as CheckpointInterface);
const content = LZString.compressToUint8Array(body);
const [result] = await (await this.ipfs).add({
content,
});
const topLevelDirectory = new DAGNode(
Unixfs.default('directory').marshal()
);
const versionFile = await (await this.ipfs).add({
content: Buffer.from(WARPSYNC_VERSION),
});
topLevelDirectory.addLink({
Name: 'VERSION',
Hash: versionFile[0].hash,
Size: 1,
});
topLevelDirectory.addLink({
Name: 'index',
Hash: result.hash,
Size: 0,
});
const hash = (await (await this.ipfs).dag.put(
topLevelDirectory,
WarpController.DEFAULT_NODE_TYPE
)).toString();
await this.db.warpCheckpoints.createCheckpoint(endBlock, hash);
return hash;
}
getFile(ipfsHash: string, ipfsPath: string) {
return new Promise<CheckpointInterface>(async (resolve, reject) => {
const timeout = setTimeout(() => {reject(new Error('Request timed out'));}, FILE_FETCH_TIMEOUT);
let fileResult;
switch (this.ipfsEndpointInfo.version) {
case IPFSHashVersion.CIDv0:
case IPFSHashVersion.CIDv1:
fileResult = await fetch(`${this.ipfsEndpointInfo.url}/${ipfsHash}${ipfsPath}`)
.then(item => item.arrayBuffer())
.then(item => new Uint8Array(item))
break;
case IPFSHashVersion.IPFS:
try {
fileResult = await (await this.ipfs).cat(`${ipfsHash}${ipfsPath}`);
} catch(err) {
if (err.message === 'this dag node is a directory') {
throw Error(`IPFS: tried to read directory as if it were a file: hash=${ipfsHash} path=${ipfsPath}`)
}
}
break;
default:
throw new Error('No IPFS gateway configured');
}
clearTimeout(timeout);
const decompressedResult = await LZString.decompressFromUint8Array(fileResult);
resolve(JSON.parse(decompressedResult));
});
}
async getCheckpointFile(ipfsRootHash: string): Promise<CheckpointInterface> {
return this.getFile(ipfsRootHash, '/index');
}
async pinHashByGatewayUrl(urlString: string) {
const url = new URL(urlString);
try {
const matches = /^(\w+)\.ipfs\..+$/.exec(url.hostname);
const thingToPin = (matches) ? matches[1] : url.pathname;
await (await this.ipfs).pin.add(thingToPin);
logger.info(`Client pinned with ipfs hash: ${thingToPin}`)
return true;
} catch (e) {
return false;
}
}
async getMostRecentWarpSync() {
return this.db.warpCheckpoints.getMostRecentWarpSync();
}
async getMostRecentCheckpoint() {
return this.db.warpCheckpoints.getMostRecentCheckpoint();
}
async hasMostRecentCheckpoint() {
return (await this.getMostRecentCheckpoint()) !== undefined;
}
}
| {
console.log(
`Warp sync market not initialized for current universe ${this.augur.contracts.universe.address}.`
);
return;
} | conditional_block |
WarpController.ts | import * as CIDTool from 'cid-tool';
import {
Log,
MarketReportingState,
NULL_ADDRESS,
SubscriptionEventName,
} from '@augurproject/sdk-lite';
import { Log as SerializedLog } from '@augurproject/types';
import { IPFSEndpointInfo, IPFSHashVersion, logger } from '@augurproject/utils';
import Dexie from 'dexie';
import { Block } from 'ethers/providers';
import { BigNumber } from 'ethers/utils';
import * as IPFS from 'ipfs';
import * as Unixfs from 'ipfs-unixfs';
import { DAGNode } from 'ipld-dag-pb';
import _ from 'lodash';
import LZString from 'lz-string';
import fetch from 'cross-fetch';
import { Augur, Provider } from '..';
import { DB } from '../state/db/DB';
import { IpfsInfo } from '../state/db/WarpSyncCheckpointsDB';
import { Markets } from '../state/getter/Markets';
import { Checkpoints } from './Checkpoints';
export const WARPSYNC_VERSION = '1';
const FILE_FETCH_TIMEOUT = 30000; // 10 seconds
type NameOfType<T, R> = {
[P in keyof T]: T[P] extends R ? P : never;
}[keyof T];
type AllDBNames = NameOfType<DB, Dexie.Table<Log, unknown>>;
type AllDbs = {
[P in AllDBNames]: DB[P] extends Dexie.Table<infer R, unknown> ? R : never;
};
// Assuming indexes we need are simple ones e.g. 'market'.
// Will need to rethink this for something like '[universe+reporter]'.
type DbExpander<P, G extends keyof AllDbs> = P extends keyof AllDbs
? {
databaseName: P;
indexes?: Readonly<Array<keyof AllDbs[P]>>;
join?: G extends keyof AllDbs
? Readonly<{
// Indexes to query source db on.
indexes: Readonly<Array<keyof AllDbs[G]>>;
// The common index between the two DBs.
// length 2 or more is treated
on: Readonly<Array<keyof AllDbs[P] & keyof AllDbs[G]>>;
// This is the source of the criteria to filter the `dataBaseName` db with.
source: G;
}>
: never;
}
: never;
type Db = DbExpander<keyof AllDbs, keyof AllDbs>;
export type RollupDescription = Readonly<Db[]>;
interface IPFSObject {
Hash: string;
Name?: string;
Size: number;
}
export const databasesToSync: RollupDescription = [
{ databaseName: 'CompleteSetsPurchased' },
{ databaseName: 'CompleteSetsSold' },
{ databaseName: 'DisputeCrowdsourcerContribution' },
{ databaseName: 'DisputeCrowdsourcerCompleted' },
{ databaseName: 'DisputeCrowdsourcerCreated' },
{ databaseName: 'DisputeCrowdsourcerRedeemed' },
{ databaseName: 'DisputeWindowCreated' },
{ databaseName: 'InitialReporterRedeemed' },
{ databaseName: 'InitialReportSubmitted' },
{ databaseName: 'InitialReporterTransferred' },
{ databaseName: 'MarketCreated' },
{ databaseName: 'MarketFinalized' },
{ databaseName: 'MarketMigrated' },
{ databaseName: 'MarketParticipantsDisavowed' },
{ databaseName: 'MarketTransferred' },
{ databaseName: 'MarketVolumeChanged' },
{ databaseName: 'MarketOIChanged' },
{ databaseName: 'OrderEvent' },
{ databaseName: 'ParticipationTokensRedeemed' },
{ databaseName: 'ProfitLossChanged' },
{ databaseName: 'ReportingParticipantDisavowed' },
{ databaseName: 'TimestampSet' },
{ databaseName: 'TokenBalanceChanged' },
{ databaseName: 'TokensMinted' },
{ databaseName: 'TokensTransferred' },
{ databaseName: 'TradingProceedsClaimed' },
{ databaseName: 'UniverseCreated' },
{ databaseName: 'UniverseForked' },
{ databaseName: 'TransferSingle' },
{ databaseName: 'TransferBatch' },
{ databaseName: 'ShareTokenBalanceChanged' },
];
export interface CheckpointInterface {
startBlockNumber: number;
endBlockNumber: number;
logs: SerializedLog[];
}
export class WarpController {
private static DEFAULT_NODE_TYPE = { format: 'dag-pb', hashAlg: 'sha2-256' };
checkpoints: Checkpoints;
ipfs: Promise<IPFS>;
constructor(
private db: DB,
private augur: Augur<Provider>,
private provider: Provider,
private uploadBlockNumber: number,
private ipfsEndpointInfo:IPFSEndpointInfo,
ipfs?: Promise<IPFS>,
) {
this.checkpoints = new Checkpoints(provider);
if (ipfs) {
this.ipfs = ipfs;
} else {
this.ipfs = IPFS.create({
repo: './data',
});
}
} | }
onNewBlock = async (newBlock: Block): Promise<string | void> => {
await this.createInitialCheckpoint();
/*
0. Base case: need to have created initial warp checkpoint.
1. Check if we need to create warp sync
1. This will happen if the active market endTime has elapsed
2. Check if we have a market awaiting finalization
1. If so, do we dispute?
3. If market is finalized
1. If no dispute we make note of new market end time
*/
const mostRecentCheckpoint = await this.db.warpCheckpoints.getMostRecentCheckpoint();
// Universe not initialized.
if (!mostRecentCheckpoint) {
return;
}
// Warp sync has been created. Need to report, dispute or create next unfinished checkpoint record.
if (mostRecentCheckpoint.end) {
const [marketRecord] = await Markets.getMarketsInfo(this.augur, this.db, {
marketIds: [mostRecentCheckpoint.market],
});
switch (marketRecord.reportingState) {
case MarketReportingState.OpenReporting:
// Emit event to notify UI to report.
break;
case MarketReportingState.AwaitingFinalization:
// confirm hash matches and emit dispute event if needed.
break;
case MarketReportingState.Finalized:
const endBlock = Object.assign({}, mostRecentCheckpoint.end, {
gasLimit: new BigNumber(mostRecentCheckpoint.end.gasLimit),
gasUsed: new BigNumber(mostRecentCheckpoint.end.gasUsed),
})
const [begin, end] = await this.checkpoints.calculateBoundary(
mostRecentCheckpoint.endTimestamp,
endBlock
);
const newWarpSyncMarket = await this.augur.warpSync.getWarpSyncMarket(
this.augur.contracts.universe.address
);
await this.db.warpCheckpoints.createInitialCheckpoint(
end,
newWarpSyncMarket
);
break;
default:
}
return;
}
// WarpSync Market has ended. Need to create checkpoint.
if (mostRecentCheckpoint.endTimestamp < newBlock.timestamp) {
const [
newEndBlock,
newBeginBlock,
] = await this.checkpoints.calculateBoundary(
mostRecentCheckpoint.endTimestamp,
await this.provider.getBlock(this.uploadBlockNumber),
newBlock
);
// Market has finished and now we need to wait 30 blocks.
if (newBlock.number - newEndBlock.number < 30) return;
await this.db.prune(newEndBlock.timestamp);
// This version of the client will no longer generate a
// warp sync because it does not know about the para deploy logs.
}
// nothing left to do.
};
async createInitialCheckpoint() {
const mostRecentCheckpoint = await this.db.warpCheckpoints.getMostRecentCheckpoint();
if (!mostRecentCheckpoint) {
const market = await this.augur.warpSync.getWarpSyncMarket(
this.augur.contracts.universe.address
);
if (market.address === NULL_ADDRESS) {
console.log(
`Warp sync market not initialized for current universe ${this.augur.contracts.universe.address}.`
);
return;
}
await this.db.warpCheckpoints.createInitialCheckpoint(
await this.provider.getBlock(this.uploadBlockNumber),
market
);
}
}
async destroyAndRecreateDB() {
await this.db.delete();
await this.db.initializeDB();
}
async createCheckpoint(endBlock: Block): Promise<IpfsInfo> {
const logs = [];
for (const { databaseName } of databasesToSync) {
// Awaiting here to reduce load on db.
logs.push(
await this.db[databaseName]
.where('blockNumber')
.between(this.uploadBlockNumber, endBlock.number, true, true)
.toArray()
);
}
const sortedLogs = _.orderBy(
_.flatten(logs),
['blockNumber', 'logIndex'],
['asc', 'asc']
);
const body = JSON.stringify({
startBlockNumber: this.uploadBlockNumber,
endBlockNumber: endBlock.number,
logs: sortedLogs,
} as CheckpointInterface);
const content = LZString.compressToUint8Array(body);
const [result] = await (await this.ipfs).add({
content,
});
const topLevelDirectory = new DAGNode(
Unixfs.default('directory').marshal()
);
const versionFile = await (await this.ipfs).add({
content: Buffer.from(WARPSYNC_VERSION),
});
topLevelDirectory.addLink({
Name: 'VERSION',
Hash: versionFile[0].hash,
Size: 1,
});
topLevelDirectory.addLink({
Name: 'index',
Hash: result.hash,
Size: 0,
});
const hash = (await (await this.ipfs).dag.put(
topLevelDirectory,
WarpController.DEFAULT_NODE_TYPE
)).toString();
await this.db.warpCheckpoints.createCheckpoint(endBlock, hash);
return hash;
}
getFile(ipfsHash: string, ipfsPath: string) {
return new Promise<CheckpointInterface>(async (resolve, reject) => {
const timeout = setTimeout(() => {reject(new Error('Request timed out'));}, FILE_FETCH_TIMEOUT);
let fileResult;
switch (this.ipfsEndpointInfo.version) {
case IPFSHashVersion.CIDv0:
case IPFSHashVersion.CIDv1:
fileResult = await fetch(`${this.ipfsEndpointInfo.url}/${ipfsHash}${ipfsPath}`)
.then(item => item.arrayBuffer())
.then(item => new Uint8Array(item))
break;
case IPFSHashVersion.IPFS:
try {
fileResult = await (await this.ipfs).cat(`${ipfsHash}${ipfsPath}`);
} catch(err) {
if (err.message === 'this dag node is a directory') {
throw Error(`IPFS: tried to read directory as if it were a file: hash=${ipfsHash} path=${ipfsPath}`)
}
}
break;
default:
throw new Error('No IPFS gateway configured');
}
clearTimeout(timeout);
const decompressedResult = await LZString.decompressFromUint8Array(fileResult);
resolve(JSON.parse(decompressedResult));
});
}
async getCheckpointFile(ipfsRootHash: string): Promise<CheckpointInterface> {
return this.getFile(ipfsRootHash, '/index');
}
async pinHashByGatewayUrl(urlString: string) {
const url = new URL(urlString);
try {
const matches = /^(\w+)\.ipfs\..+$/.exec(url.hostname);
const thingToPin = (matches) ? matches[1] : url.pathname;
await (await this.ipfs).pin.add(thingToPin);
logger.info(`Client pinned with ipfs hash: ${thingToPin}`)
return true;
} catch (e) {
return false;
}
}
async getMostRecentWarpSync() {
return this.db.warpCheckpoints.getMostRecentWarpSync();
}
async getMostRecentCheckpoint() {
return this.db.warpCheckpoints.getMostRecentCheckpoint();
}
async hasMostRecentCheckpoint() {
return (await this.getMostRecentCheckpoint()) !== undefined;
}
} |
async getIpfs(): Promise<IPFS> {
return this.ipfs; | random_line_split |
WarpController.ts | import * as CIDTool from 'cid-tool';
import {
Log,
MarketReportingState,
NULL_ADDRESS,
SubscriptionEventName,
} from '@augurproject/sdk-lite';
import { Log as SerializedLog } from '@augurproject/types';
import { IPFSEndpointInfo, IPFSHashVersion, logger } from '@augurproject/utils';
import Dexie from 'dexie';
import { Block } from 'ethers/providers';
import { BigNumber } from 'ethers/utils';
import * as IPFS from 'ipfs';
import * as Unixfs from 'ipfs-unixfs';
import { DAGNode } from 'ipld-dag-pb';
import _ from 'lodash';
import LZString from 'lz-string';
import fetch from 'cross-fetch';
import { Augur, Provider } from '..';
import { DB } from '../state/db/DB';
import { IpfsInfo } from '../state/db/WarpSyncCheckpointsDB';
import { Markets } from '../state/getter/Markets';
import { Checkpoints } from './Checkpoints';
export const WARPSYNC_VERSION = '1';
const FILE_FETCH_TIMEOUT = 30000; // 10 seconds
type NameOfType<T, R> = {
[P in keyof T]: T[P] extends R ? P : never;
}[keyof T];
type AllDBNames = NameOfType<DB, Dexie.Table<Log, unknown>>;
type AllDbs = {
[P in AllDBNames]: DB[P] extends Dexie.Table<infer R, unknown> ? R : never;
};
// Assuming indexes we need are simple ones e.g. 'market'.
// Will need to rethink this for something like '[universe+reporter]'.
type DbExpander<P, G extends keyof AllDbs> = P extends keyof AllDbs
? {
databaseName: P;
indexes?: Readonly<Array<keyof AllDbs[P]>>;
join?: G extends keyof AllDbs
? Readonly<{
// Indexes to query source db on.
indexes: Readonly<Array<keyof AllDbs[G]>>;
// The common index between the two DBs.
// length 2 or more is treated
on: Readonly<Array<keyof AllDbs[P] & keyof AllDbs[G]>>;
// This is the source of the criteria to filter the `dataBaseName` db with.
source: G;
}>
: never;
}
: never;
type Db = DbExpander<keyof AllDbs, keyof AllDbs>;
export type RollupDescription = Readonly<Db[]>;
interface IPFSObject {
Hash: string;
Name?: string;
Size: number;
}
export const databasesToSync: RollupDescription = [
{ databaseName: 'CompleteSetsPurchased' },
{ databaseName: 'CompleteSetsSold' },
{ databaseName: 'DisputeCrowdsourcerContribution' },
{ databaseName: 'DisputeCrowdsourcerCompleted' },
{ databaseName: 'DisputeCrowdsourcerCreated' },
{ databaseName: 'DisputeCrowdsourcerRedeemed' },
{ databaseName: 'DisputeWindowCreated' },
{ databaseName: 'InitialReporterRedeemed' },
{ databaseName: 'InitialReportSubmitted' },
{ databaseName: 'InitialReporterTransferred' },
{ databaseName: 'MarketCreated' },
{ databaseName: 'MarketFinalized' },
{ databaseName: 'MarketMigrated' },
{ databaseName: 'MarketParticipantsDisavowed' },
{ databaseName: 'MarketTransferred' },
{ databaseName: 'MarketVolumeChanged' },
{ databaseName: 'MarketOIChanged' },
{ databaseName: 'OrderEvent' },
{ databaseName: 'ParticipationTokensRedeemed' },
{ databaseName: 'ProfitLossChanged' },
{ databaseName: 'ReportingParticipantDisavowed' },
{ databaseName: 'TimestampSet' },
{ databaseName: 'TokenBalanceChanged' },
{ databaseName: 'TokensMinted' },
{ databaseName: 'TokensTransferred' },
{ databaseName: 'TradingProceedsClaimed' },
{ databaseName: 'UniverseCreated' },
{ databaseName: 'UniverseForked' },
{ databaseName: 'TransferSingle' },
{ databaseName: 'TransferBatch' },
{ databaseName: 'ShareTokenBalanceChanged' },
];
export interface CheckpointInterface {
startBlockNumber: number;
endBlockNumber: number;
logs: SerializedLog[];
}
export class WarpController {
private static DEFAULT_NODE_TYPE = { format: 'dag-pb', hashAlg: 'sha2-256' };
checkpoints: Checkpoints;
ipfs: Promise<IPFS>;
constructor(
private db: DB,
private augur: Augur<Provider>,
private provider: Provider,
private uploadBlockNumber: number,
private ipfsEndpointInfo:IPFSEndpointInfo,
ipfs?: Promise<IPFS>,
) {
this.checkpoints = new Checkpoints(provider);
if (ipfs) {
this.ipfs = ipfs;
} else {
this.ipfs = IPFS.create({
repo: './data',
});
}
}
async getIpfs(): Promise<IPFS> {
return this.ipfs;
}
onNewBlock = async (newBlock: Block): Promise<string | void> => {
await this.createInitialCheckpoint();
/*
0. Base case: need to have created initial warp checkpoint.
1. Check if we need to create warp sync
1. This will happen if the active market endTime has elapsed
2. Check if we have a market awaiting finalization
1. If so, do we dispute?
3. If market is finalized
1. If no dispute we make note of new market end time
*/
const mostRecentCheckpoint = await this.db.warpCheckpoints.getMostRecentCheckpoint();
// Universe not initialized.
if (!mostRecentCheckpoint) {
return;
}
// Warp sync has been created. Need to report, dispute or create next unfinished checkpoint record.
if (mostRecentCheckpoint.end) {
const [marketRecord] = await Markets.getMarketsInfo(this.augur, this.db, {
marketIds: [mostRecentCheckpoint.market],
});
switch (marketRecord.reportingState) {
case MarketReportingState.OpenReporting:
// Emit event to notify UI to report.
break;
case MarketReportingState.AwaitingFinalization:
// confirm hash matches and emit dispute event if needed.
break;
case MarketReportingState.Finalized:
const endBlock = Object.assign({}, mostRecentCheckpoint.end, {
gasLimit: new BigNumber(mostRecentCheckpoint.end.gasLimit),
gasUsed: new BigNumber(mostRecentCheckpoint.end.gasUsed),
})
const [begin, end] = await this.checkpoints.calculateBoundary(
mostRecentCheckpoint.endTimestamp,
endBlock
);
const newWarpSyncMarket = await this.augur.warpSync.getWarpSyncMarket(
this.augur.contracts.universe.address
);
await this.db.warpCheckpoints.createInitialCheckpoint(
end,
newWarpSyncMarket
);
break;
default:
}
return;
}
// WarpSync Market has ended. Need to create checkpoint.
if (mostRecentCheckpoint.endTimestamp < newBlock.timestamp) {
const [
newEndBlock,
newBeginBlock,
] = await this.checkpoints.calculateBoundary(
mostRecentCheckpoint.endTimestamp,
await this.provider.getBlock(this.uploadBlockNumber),
newBlock
);
// Market has finished and now we need to wait 30 blocks.
if (newBlock.number - newEndBlock.number < 30) return;
await this.db.prune(newEndBlock.timestamp);
// This version of the client will no longer generate a
// warp sync because it does not know about the para deploy logs.
}
// nothing left to do.
};
async createInitialCheckpoint() {
const mostRecentCheckpoint = await this.db.warpCheckpoints.getMostRecentCheckpoint();
if (!mostRecentCheckpoint) {
const market = await this.augur.warpSync.getWarpSyncMarket(
this.augur.contracts.universe.address
);
if (market.address === NULL_ADDRESS) {
console.log(
`Warp sync market not initialized for current universe ${this.augur.contracts.universe.address}.`
);
return;
}
await this.db.warpCheckpoints.createInitialCheckpoint(
await this.provider.getBlock(this.uploadBlockNumber),
market
);
}
}
async | () {
await this.db.delete();
await this.db.initializeDB();
}
async createCheckpoint(endBlock: Block): Promise<IpfsInfo> {
const logs = [];
for (const { databaseName } of databasesToSync) {
// Awaiting here to reduce load on db.
logs.push(
await this.db[databaseName]
.where('blockNumber')
.between(this.uploadBlockNumber, endBlock.number, true, true)
.toArray()
);
}
const sortedLogs = _.orderBy(
_.flatten(logs),
['blockNumber', 'logIndex'],
['asc', 'asc']
);
const body = JSON.stringify({
startBlockNumber: this.uploadBlockNumber,
endBlockNumber: endBlock.number,
logs: sortedLogs,
} as CheckpointInterface);
const content = LZString.compressToUint8Array(body);
const [result] = await (await this.ipfs).add({
content,
});
const topLevelDirectory = new DAGNode(
Unixfs.default('directory').marshal()
);
const versionFile = await (await this.ipfs).add({
content: Buffer.from(WARPSYNC_VERSION),
});
topLevelDirectory.addLink({
Name: 'VERSION',
Hash: versionFile[0].hash,
Size: 1,
});
topLevelDirectory.addLink({
Name: 'index',
Hash: result.hash,
Size: 0,
});
const hash = (await (await this.ipfs).dag.put(
topLevelDirectory,
WarpController.DEFAULT_NODE_TYPE
)).toString();
await this.db.warpCheckpoints.createCheckpoint(endBlock, hash);
return hash;
}
getFile(ipfsHash: string, ipfsPath: string) {
return new Promise<CheckpointInterface>(async (resolve, reject) => {
const timeout = setTimeout(() => {reject(new Error('Request timed out'));}, FILE_FETCH_TIMEOUT);
let fileResult;
switch (this.ipfsEndpointInfo.version) {
case IPFSHashVersion.CIDv0:
case IPFSHashVersion.CIDv1:
fileResult = await fetch(`${this.ipfsEndpointInfo.url}/${ipfsHash}${ipfsPath}`)
.then(item => item.arrayBuffer())
.then(item => new Uint8Array(item))
break;
case IPFSHashVersion.IPFS:
try {
fileResult = await (await this.ipfs).cat(`${ipfsHash}${ipfsPath}`);
} catch(err) {
if (err.message === 'this dag node is a directory') {
throw Error(`IPFS: tried to read directory as if it were a file: hash=${ipfsHash} path=${ipfsPath}`)
}
}
break;
default:
throw new Error('No IPFS gateway configured');
}
clearTimeout(timeout);
const decompressedResult = await LZString.decompressFromUint8Array(fileResult);
resolve(JSON.parse(decompressedResult));
});
}
async getCheckpointFile(ipfsRootHash: string): Promise<CheckpointInterface> {
return this.getFile(ipfsRootHash, '/index');
}
async pinHashByGatewayUrl(urlString: string) {
const url = new URL(urlString);
try {
const matches = /^(\w+)\.ipfs\..+$/.exec(url.hostname);
const thingToPin = (matches) ? matches[1] : url.pathname;
await (await this.ipfs).pin.add(thingToPin);
logger.info(`Client pinned with ipfs hash: ${thingToPin}`)
return true;
} catch (e) {
return false;
}
}
async getMostRecentWarpSync() {
return this.db.warpCheckpoints.getMostRecentWarpSync();
}
async getMostRecentCheckpoint() {
return this.db.warpCheckpoints.getMostRecentCheckpoint();
}
async hasMostRecentCheckpoint() {
return (await this.getMostRecentCheckpoint()) !== undefined;
}
}
| destroyAndRecreateDB | identifier_name |
worker.go | package nameresolver
import (
"fmt"
"github.com/miekg/dns"
"net"
"github.com/ANSSI-FR/transdep/messages/zonecut"
"github.com/ANSSI-FR/transdep/tools"
"github.com/ANSSI-FR/transdep/messages/nameresolver"
"github.com/ANSSI-FR/transdep/errors"
"strings"
)
// WORKER_CHAN_CAPACITY indicates the maximum number of request unhandled by the start() goroutine can be spooled before
// the call to Handle() becomes blocking.
const WORKER_CHAN_CAPACITY = 10
// MAX_CNAME_CHAIN indicates the longest chain of CNAME that is acceptable to be followed a name is considered a
// dead-end (i.e. unfit for name resolution)
const MAX_CNAME_CHAIN = 10
// worker represents a request handler for a specific request target domain name for which name resolution is sought.
type worker struct {
// req is the request topic for which this worker was started in the first place.
req *nameresolver.Request
// reqs is the channel by which subsequent requests for the same topic as for "req" are received.
reqs chan *nameresolver.Request
// closedReqChan helps prevent double-close issue on reqs channel, when the worker is stopping.
closedReqChan bool
// joinChan is used by stop() to wait for the completion of the start() goroutine
joinChan chan bool
// zcHandler is used to submit new zone cut requests. This is most notably used to get the delegation information of
// the parent zone of the requested name, in order to query its name servers for the requested name delegation
// information.
zcHandler func(*zonecut.Request) *errors.ErrorStack
// nrHandler is used to submit new name resolution requests. This is used, for instance, to get the IP addresses
// associated to nameservers that are out-of-bailiwick and for which we don't have acceptable glues or IP addresses.
nrHandler func(*nameresolver.Request) *errors.ErrorStack
// config is the configuration of the current Transdep run
config *tools.TransdepConfig
}
// initNewWorker builds a new worker instance and returns it.
// It DOES NOT start the new worker, and should not be called directly by the finder.
func initNewWorker(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, conf *tools.TransdepConfig) *worker {
w := new(worker)
w.req = req
w.zcHandler = zcHandler
w.nrHandler = nrHandler
w.config = conf
w.reqs = make(chan *nameresolver.Request, WORKER_CHAN_CAPACITY)
w.closedReqChan = false
w.joinChan = make(chan bool, 1)
return w
}
// newWorker builds a new worker instance and returns it.
// The worker is started and will resolve the request from the network.
func newWorker(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, conf *tools.TransdepConfig) *worker {
w := initNewWorker(req, nrHandler, zcHandler, conf)
w.start()
return w
}
// newWorker builds a new worker instance and returns it.
// The worker is started and will resolve the request from a cache file.
func newWorkerWithCachedResult(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, cf *nameresolver.CacheFile, conf *tools.TransdepConfig) *worker {
w := initNewWorker(req, nrHandler, zcHandler, conf)
w.startWithCachedResult(cf)
return w
}
// handle allows the submission of new requests to this worker.
// This method returns an error if the worker is stopped or if the submitted request does not match the request usually
// handled by this worker.
func (w *worker) handle(req *nameresolver.Request) *errors.ErrorStack {
if w.closedReqChan {
return errors.NewErrorStack(fmt.Errorf("handle: worker channel for name resolution of %s is already closed", w.req.Name()))
} else if !w.req.Equal(req) {
return errors.NewErrorStack(fmt.Errorf("handle: invalid request; the submitted request (%s) does not match the requests handled by this worker (%s)", req.Name(), w.req.Name()))
}
w.reqs <- req
return nil
}
// resolveFromWith resolves the topic of the requests associated with this worker by querying the "ip" IP address and
// using the "proto" protocol (either "" for UDP or "tcp"). It returns an entry corresponding to the requested topic, or an
// definitive error that happened during the resolution.
func (w *worker) resolveFromWith(ip net.IP, proto string) (*nameresolver.Entry, *errors.ErrorStack) {
var ipList []net.IP
// We first query about the IPv4 addresses associated to the request topic.
clnt := new(dns.Client)
clnt.Net = proto
ma := new(dns.Msg)
ma.SetEdns0(4096, false)
ma.SetQuestion(w.req.Name(), dns.TypeA)
ma.RecursionDesired = false
ans, _, err := clnt.Exchange(ma, net.JoinHostPort(ip.String(), "53"))
if err != nil {
errStack := errors.NewErrorStack(err)
errStack.Push(fmt.Errorf("resolveFromWith: error while exchanging with %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
return nil, errStack
}
if ans == nil {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got empty answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
if ans.Rcode != dns.RcodeSuccess {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got DNS error %s from %s over %s for %s %s?", dns.RcodeToString[ans.Rcode], ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
if !ans.Authoritative {
// We expect an non-empty answer from the server, with a positive answer (no NXDOMAIN (lame delegation),
// no SERVFAIL (broken server)). We also expect the server to be authoritative; if it is not, it is not clear
// why, because the name is delegated to this server according to the parent zone, so we assume that this server
// is broken, but there might be other reasons for this that I can't think off from the top of my head.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got non-authoritative data from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
// If the answer is truncated, we might want to retry over TCP... except of course if the truncated answer is
// already provided over TCP (see Spotify blog post about when it happened to them :))
if ans.Truncated {
if proto == "tcp" {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a truncated answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
return w.resolveFromWith(ip, "tcp")
}
for _, grr := range ans.Answer {
// We only consider records from the answer section that have a owner name equal to the qname.
if dns.CompareDomainName(grr.Header().Name, w.req.Name()) == dns.CountLabel(w.req.Name()) && dns.CountLabel(grr.Header().Name) == dns.CountLabel(w.req.Name()){
// We may receive either A or CNAME records with matching owner name. We dismiss all other cases
// (which are probably constituted of NSEC and DNAME and similar stuff. NSEC is of no value here, and DNAME
// are not supported by this tool.
switch rr := grr.(type) {
case *dns.A:
// We stack IPv4 addresses because the RRSet might be composed of multiple A records
ipList = append(ipList, rr.A)
case *dns.CNAME:
// A CNAME is supposed to be the only record at a given domain name. Thus, we return this alias marker
// and forget about all other records that might resides here.
return nameresolver.NewAliasEntry(w.req.Name(), rr.Target), nil
}
}
}
// We now query for the AAAA records to also get the IPv6 addresses
clnt = new(dns.Client)
clnt.Net = proto
maaaa := new(dns.Msg)
maaaa.SetEdns0(4096, false)
maaaa.SetQuestion(w.req.Name(), dns.TypeAAAA)
maaaa.RecursionDesired = false
ans, _, err = clnt.Exchange(maaaa, net.JoinHostPort(ip.String(), "53"))
if err != nil {
errStack := errors.NewErrorStack(err)
errStack.Push(fmt.Errorf("resolveFromWith: error while exchanging with %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
return nil, errStack
}
if ans == nil {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got empty answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if ans.Rcode != dns.RcodeSuccess {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got DNS error %s from %s over %s for %s %s?", dns.RcodeToString[ans.Rcode], ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if !ans.Authoritative {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got non-authoritative data from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if ans.Truncated {
if proto == "tcp" {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a truncated answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
return w.resolveFromWith(ip, "tcp")
}
for _, grr := range ans.Answer {
if dns.CompareDomainName(grr.Header().Name, w.req.Name()) == dns.CountLabel(w.req.Name()) && dns.CountLabel(grr.Header().Name) == dns.CountLabel(w.req.Name()){
switch rr := grr.(type) {
case *dns.AAAA:
ipList = append(ipList, rr.AAAA)
case *dns.CNAME:
// We should have a CNAME here because the CNAME was not returned when asked for A records, and if we
// had received a CNAME, we would already have returned.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a CNAME that was not provided for the A query from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
}
}
return nameresolver.NewIPEntry(w.req.Name(), ipList), nil
}
// resolveFrom resolves the request associated to this worker. It returns the entry generated from a successful
// resolution or the error that occurred.
func (w *worker) resolveFrom(ip net.IP) (*nameresolver.Entry, *errors.ErrorStack) {
// (proto == "" means UDP)
return w.resolveFromWith(ip, "")
}
// resolveFromGlues tries to resolve the request associated to this worker using the list of servers provided as
// parameters, assuming their are all delegation with glues (i.e. IP addresses of nameservers are already known).
func (w *worker) resolveFromGlues(nameSrvs []*zonecut.NameSrvInfo) (*nameresolver.Entry, *errors.ErrorStack) {
var errList []string
for _, ns := range nameSrvs {
for _, ip := range ns.Addrs() {
// Tries every IP address of every name server. If an error occurs, the next IP, then server is tried.
entry, err := w.resolveFrom(ip)
if err == nil {
return entry, nil
}
errList = append(errList, fmt.Sprintf("resolveFromGlues: error from %s(%s): %s", ns.Name(), ip.String(), err.Error()))
}
}
// No IP address of any server returned a positive result.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromGlues: no valid glued delegation for %s: [%s]", w.req.Name(), strings.Join(errList, ", ")))
}
// resolveFromGluelessNameSrvs resolves the request associated to this worker using name servers whose IP address is not
// known thanks to glues and in-bailiwick address records. It returns the answer to that request or an error no server
// returned an acceptable response.
func (w *worker) resolveFromGluelessNameSrvs(nameSrvs []*zonecut.NameSrvInfo) (*nameresolver.Entry, *errors.ErrorStack) {
var errList []string
Outerloop:
for _, ns := range nameSrvs {
var addrs []net.IP
// requestedName is the nameserver name, by default. It may evolve, as aliases/CNAME are met along the resolution
requestedName := ns.Name()
// We limit to MAX_CNAME_CHAIN the number of CNAME that we are willing to follow
Innerloop:
for i := 0; i < MAX_CNAME_CHAIN && len(addrs) == 0; i++ {
// Start up the resolution of the name of the nameserver into IP addresses so that we can query these IP
// addresses for the request topic of this worker.
req := nameresolver.NewRequestWithContext(requestedName, w.req.Exceptions(), w.req)
w.nrHandler(req)
ne, err := req.Result()
if err != nil || ne == nil {
// if an error occurred, we just try with the next nameserver until we get an answer or all servers have
// been tried.
continue Outerloop
}
if ne.CNAMETarget() == "" {
// We got some IP addresses ; we store them away and go to the next step
addrs = ne.Addrs()
break Innerloop
}
// If the answer is an alias, we retry with the new target name
requestedName = ne.CNAMETarget()
}
if len(addrs) == 0 {
// We hit a very long CNAME Chain or the name cannot be resolved for some reason
continue
}
// Try to query every IP that we found, until we get a valid answer
for _, addr := range addrs {
entry, err := w.resolveFrom(addr)
if err == nil {
return entry, nil | }
errList = append(errList, fmt.Sprintf("resolveFromGluelessNameSrvs: error from %s(%s): %s", ns.Name(), addr.String(), err.Error()))
}
}
// We tried every IP address of every name server to no avail. Return an error
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromGluelessNameSrvs: no valid glueless delegation for %s: [%s]", w.req.Name(), strings.Join(errList, ", ")))
}
// resolve is in charge of orchestrating the resolution of the request that is associated with this worker
func (w *worker) resolve() (*nameresolver.Entry, *errors.ErrorStack) {
// First, we search the list of name servers to which the requested domain name is delegated. This is obtained by
// submitting delegation info request, removing a label each time, until a non-null response is provided (meaning we
// reached the apex of the zone containing the requested name).
var entry *zonecut.Entry
reqName := w.req.Name()
for entry == nil {
var err *errors.ErrorStack
// Get the servers for this zonecut
req := zonecut.NewRequest(reqName, w.req.Exceptions())
w.zcHandler(req)
entry, err = req.Result()
if err != nil {
var returnErr bool
switch typedErr := err.OriginalError().(type) {
case *errors.TimeoutError:
returnErr = true
case *errors.NXDomainError:
returnErr = w.req.Exceptions().RFC8020
case *errors.ServfailError:
returnErr = !w.req.Exceptions().AcceptServFailAsNoData
case *errors.NoNameServerError:
returnErr = false
default:
_ = typedErr
returnErr = true
}
// If we receive an error while searching for the delegation info, we will not be able to perform the
// subsequent queries, so we bail out on this request.
if returnErr {
err.Push(fmt.Errorf("resolve: error while getting zone cut info of %s for %s", reqName, w.req.Name()))
return nil, err
}
err = nil
entry = nil
}
if entry == nil {
// If no entry was provided, reqName is not the zone apex, so we remove a label and retry.
pos, end := dns.NextLabel(reqName, 1)
if end {
reqName = "."
} else {
reqName = reqName[pos:]
}
}
}
// Setting apart glueless delegations and glued delegations
var nameSrvsWithGlues []*zonecut.NameSrvInfo
var gluelessNameSrvs []*zonecut.NameSrvInfo
for _, nameSrv := range entry.NameServers() {
if len(nameSrv.Addrs()) == 0 {
gluelessNameSrvs = append(gluelessNameSrvs, nameSrv)
} else {
nameSrvsWithGlues = append(nameSrvsWithGlues, nameSrv)
}
}
// Try to resolve first using glues to go faster
r, gluedErr := w.resolveFromGlues(nameSrvsWithGlues)
if gluedErr != nil {
if _, ok := gluedErr.OriginalError().(*errors.NXDomainError) ; ok {
gluedErr.Push(fmt.Errorf("resolve: got NXDomain while resolving %s from glued servers", w.req.Name()))
return nil, gluedErr
}
// No glued servers returned an answer, so we now try with the glueless delegations.
var gluelessErr *errors.ErrorStack
r, gluelessErr = w.resolveFromGluelessNameSrvs(gluelessNameSrvs)
if gluelessErr != nil {
gluelessErr.Push(fmt.Errorf("resolve: unable to resolve %s: glued errors: [%s]", w.req.Name(), gluedErr.Error()))
return nil, gluelessErr
}
}
return r, nil
}
// start prepares the worker for handling new requests.
// The current implementation is to launch a goroutine that will read from the reqs channel attribute new requests and
// will try to answer them. When stopped, it will immediately send the join signal.
func (w *worker) start() {
go func() {
result, err := w.resolve()
for req := range w.reqs {
req.SetResult(result, err)
}
w.joinChan <- true
}()
}
// startWithCachedResult performs the same kind of operations that start(), except that the response is not obtained
// from the network, but by loading it from a cache file.
func (w *worker) startWithCachedResult(cf *nameresolver.CacheFile) {
go func() {
var result *nameresolver.Entry
var resultErr *errors.ErrorStack
var err error
result, resultErr, err = cf.Result()
if err != nil {
result = nil
cacheErr := fmt.Errorf("startWithCachedResult: error while loading cache of %s: %s", w.req.Name(), err.Error())
if resultErr != nil {
resultErr.Push(cacheErr)
} else {
resultErr = errors.NewErrorStack(cacheErr)
}
}
for req := range w.reqs {
req.SetResult(result, resultErr)
}
w.joinChan <- true
}()
}
// stop is to be called during the cleanup of the worker. It shuts down the goroutine started by start() and waits for
// it to actually end. stop returns true if it is the first time it is called and the start() routine was stopped, or
// else it returns false.
func (w *worker) stop() bool {
if w.closedReqChan {
return false
}
close (w.reqs)
w.closedReqChan = true
_ = <-w.joinChan
close(w.joinChan)
return true
} | random_line_split | |
worker.go | package nameresolver
import (
"fmt"
"github.com/miekg/dns"
"net"
"github.com/ANSSI-FR/transdep/messages/zonecut"
"github.com/ANSSI-FR/transdep/tools"
"github.com/ANSSI-FR/transdep/messages/nameresolver"
"github.com/ANSSI-FR/transdep/errors"
"strings"
)
// WORKER_CHAN_CAPACITY indicates the maximum number of request unhandled by the start() goroutine can be spooled before
// the call to Handle() becomes blocking.
const WORKER_CHAN_CAPACITY = 10
// MAX_CNAME_CHAIN indicates the longest chain of CNAME that is acceptable to be followed a name is considered a
// dead-end (i.e. unfit for name resolution)
const MAX_CNAME_CHAIN = 10
// worker represents a request handler for a specific request target domain name for which name resolution is sought.
type worker struct {
// req is the request topic for which this worker was started in the first place.
req *nameresolver.Request
// reqs is the channel by which subsequent requests for the same topic as for "req" are received.
reqs chan *nameresolver.Request
// closedReqChan helps prevent double-close issue on reqs channel, when the worker is stopping.
closedReqChan bool
// joinChan is used by stop() to wait for the completion of the start() goroutine
joinChan chan bool
// zcHandler is used to submit new zone cut requests. This is most notably used to get the delegation information of
// the parent zone of the requested name, in order to query its name servers for the requested name delegation
// information.
zcHandler func(*zonecut.Request) *errors.ErrorStack
// nrHandler is used to submit new name resolution requests. This is used, for instance, to get the IP addresses
// associated to nameservers that are out-of-bailiwick and for which we don't have acceptable glues or IP addresses.
nrHandler func(*nameresolver.Request) *errors.ErrorStack
// config is the configuration of the current Transdep run
config *tools.TransdepConfig
}
// initNewWorker builds a new worker instance and returns it.
// It DOES NOT start the new worker, and should not be called directly by the finder.
func initNewWorker(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, conf *tools.TransdepConfig) *worker {
w := new(worker)
w.req = req
w.zcHandler = zcHandler
w.nrHandler = nrHandler
w.config = conf
w.reqs = make(chan *nameresolver.Request, WORKER_CHAN_CAPACITY)
w.closedReqChan = false
w.joinChan = make(chan bool, 1)
return w
}
// newWorker builds a new worker instance and returns it.
// The worker is started and will resolve the request from the network.
func newWorker(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, conf *tools.TransdepConfig) *worker {
w := initNewWorker(req, nrHandler, zcHandler, conf)
w.start()
return w
}
// newWorker builds a new worker instance and returns it.
// The worker is started and will resolve the request from a cache file.
func newWorkerWithCachedResult(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, cf *nameresolver.CacheFile, conf *tools.TransdepConfig) *worker {
w := initNewWorker(req, nrHandler, zcHandler, conf)
w.startWithCachedResult(cf)
return w
}
// handle allows the submission of new requests to this worker.
// This method returns an error if the worker is stopped or if the submitted request does not match the request usually
// handled by this worker.
func (w *worker) handle(req *nameresolver.Request) *errors.ErrorStack {
if w.closedReqChan {
return errors.NewErrorStack(fmt.Errorf("handle: worker channel for name resolution of %s is already closed", w.req.Name()))
} else if !w.req.Equal(req) {
return errors.NewErrorStack(fmt.Errorf("handle: invalid request; the submitted request (%s) does not match the requests handled by this worker (%s)", req.Name(), w.req.Name()))
}
w.reqs <- req
return nil
}
// resolveFromWith resolves the topic of the requests associated with this worker by querying the "ip" IP address and
// using the "proto" protocol (either "" for UDP or "tcp"). It returns an entry corresponding to the requested topic, or an
// definitive error that happened during the resolution.
func (w *worker) resolveFromWith(ip net.IP, proto string) (*nameresolver.Entry, *errors.ErrorStack) {
var ipList []net.IP
// We first query about the IPv4 addresses associated to the request topic.
clnt := new(dns.Client)
clnt.Net = proto
ma := new(dns.Msg)
ma.SetEdns0(4096, false)
ma.SetQuestion(w.req.Name(), dns.TypeA)
ma.RecursionDesired = false
ans, _, err := clnt.Exchange(ma, net.JoinHostPort(ip.String(), "53"))
if err != nil {
errStack := errors.NewErrorStack(err)
errStack.Push(fmt.Errorf("resolveFromWith: error while exchanging with %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
return nil, errStack
}
if ans == nil {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got empty answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
if ans.Rcode != dns.RcodeSuccess {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got DNS error %s from %s over %s for %s %s?", dns.RcodeToString[ans.Rcode], ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
if !ans.Authoritative {
// We expect an non-empty answer from the server, with a positive answer (no NXDOMAIN (lame delegation),
// no SERVFAIL (broken server)). We also expect the server to be authoritative; if it is not, it is not clear
// why, because the name is delegated to this server according to the parent zone, so we assume that this server
// is broken, but there might be other reasons for this that I can't think off from the top of my head.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got non-authoritative data from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
// If the answer is truncated, we might want to retry over TCP... except of course if the truncated answer is
// already provided over TCP (see Spotify blog post about when it happened to them :))
if ans.Truncated {
if proto == "tcp" {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a truncated answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
return w.resolveFromWith(ip, "tcp")
}
for _, grr := range ans.Answer {
// We only consider records from the answer section that have a owner name equal to the qname.
if dns.CompareDomainName(grr.Header().Name, w.req.Name()) == dns.CountLabel(w.req.Name()) && dns.CountLabel(grr.Header().Name) == dns.CountLabel(w.req.Name()){
// We may receive either A or CNAME records with matching owner name. We dismiss all other cases
// (which are probably constituted of NSEC and DNAME and similar stuff. NSEC is of no value here, and DNAME
// are not supported by this tool.
switch rr := grr.(type) {
case *dns.A:
// We stack IPv4 addresses because the RRSet might be composed of multiple A records
ipList = append(ipList, rr.A)
case *dns.CNAME:
// A CNAME is supposed to be the only record at a given domain name. Thus, we return this alias marker
// and forget about all other records that might resides here.
return nameresolver.NewAliasEntry(w.req.Name(), rr.Target), nil
}
}
}
// We now query for the AAAA records to also get the IPv6 addresses
clnt = new(dns.Client)
clnt.Net = proto
maaaa := new(dns.Msg)
maaaa.SetEdns0(4096, false)
maaaa.SetQuestion(w.req.Name(), dns.TypeAAAA)
maaaa.RecursionDesired = false
ans, _, err = clnt.Exchange(maaaa, net.JoinHostPort(ip.String(), "53"))
if err != nil {
errStack := errors.NewErrorStack(err)
errStack.Push(fmt.Errorf("resolveFromWith: error while exchanging with %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
return nil, errStack
}
if ans == nil {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got empty answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if ans.Rcode != dns.RcodeSuccess {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got DNS error %s from %s over %s for %s %s?", dns.RcodeToString[ans.Rcode], ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if !ans.Authoritative {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got non-authoritative data from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if ans.Truncated {
if proto == "tcp" {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a truncated answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
return w.resolveFromWith(ip, "tcp")
}
for _, grr := range ans.Answer {
if dns.CompareDomainName(grr.Header().Name, w.req.Name()) == dns.CountLabel(w.req.Name()) && dns.CountLabel(grr.Header().Name) == dns.CountLabel(w.req.Name()){
switch rr := grr.(type) {
case *dns.AAAA:
ipList = append(ipList, rr.AAAA)
case *dns.CNAME:
// We should have a CNAME here because the CNAME was not returned when asked for A records, and if we
// had received a CNAME, we would already have returned.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a CNAME that was not provided for the A query from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
}
}
return nameresolver.NewIPEntry(w.req.Name(), ipList), nil
}
// resolveFrom resolves the request associated to this worker. It returns the entry generated from a successful
// resolution or the error that occurred.
func (w *worker) resolveFrom(ip net.IP) (*nameresolver.Entry, *errors.ErrorStack) {
// (proto == "" means UDP)
return w.resolveFromWith(ip, "")
}
// resolveFromGlues tries to resolve the request associated to this worker using the list of servers provided as
// parameters, assuming their are all delegation with glues (i.e. IP addresses of nameservers are already known).
func (w *worker) resolveFromGlues(nameSrvs []*zonecut.NameSrvInfo) (*nameresolver.Entry, *errors.ErrorStack) {
var errList []string
for _, ns := range nameSrvs {
for _, ip := range ns.Addrs() {
// Tries every IP address of every name server. If an error occurs, the next IP, then server is tried.
entry, err := w.resolveFrom(ip)
if err == nil {
return entry, nil
}
errList = append(errList, fmt.Sprintf("resolveFromGlues: error from %s(%s): %s", ns.Name(), ip.String(), err.Error()))
}
}
// No IP address of any server returned a positive result.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromGlues: no valid glued delegation for %s: [%s]", w.req.Name(), strings.Join(errList, ", ")))
}
// resolveFromGluelessNameSrvs resolves the request associated to this worker using name servers whose IP address is not
// known thanks to glues and in-bailiwick address records. It returns the answer to that request or an error no server
// returned an acceptable response.
func (w *worker) resolveFromGluelessNameSrvs(nameSrvs []*zonecut.NameSrvInfo) (*nameresolver.Entry, *errors.ErrorStack) {
var errList []string
Outerloop:
for _, ns := range nameSrvs {
var addrs []net.IP
// requestedName is the nameserver name, by default. It may evolve, as aliases/CNAME are met along the resolution
requestedName := ns.Name()
// We limit to MAX_CNAME_CHAIN the number of CNAME that we are willing to follow
Innerloop:
for i := 0; i < MAX_CNAME_CHAIN && len(addrs) == 0; i++ {
// Start up the resolution of the name of the nameserver into IP addresses so that we can query these IP
// addresses for the request topic of this worker.
req := nameresolver.NewRequestWithContext(requestedName, w.req.Exceptions(), w.req)
w.nrHandler(req)
ne, err := req.Result()
if err != nil || ne == nil {
// if an error occurred, we just try with the next nameserver until we get an answer or all servers have
// been tried.
continue Outerloop
}
if ne.CNAMETarget() == "" {
// We got some IP addresses ; we store them away and go to the next step
addrs = ne.Addrs()
break Innerloop
}
// If the answer is an alias, we retry with the new target name
requestedName = ne.CNAMETarget()
}
if len(addrs) == 0 {
// We hit a very long CNAME Chain or the name cannot be resolved for some reason
continue
}
// Try to query every IP that we found, until we get a valid answer
for _, addr := range addrs {
entry, err := w.resolveFrom(addr)
if err == nil {
return entry, nil
}
errList = append(errList, fmt.Sprintf("resolveFromGluelessNameSrvs: error from %s(%s): %s", ns.Name(), addr.String(), err.Error()))
}
}
// We tried every IP address of every name server to no avail. Return an error
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromGluelessNameSrvs: no valid glueless delegation for %s: [%s]", w.req.Name(), strings.Join(errList, ", ")))
}
// resolve is in charge of orchestrating the resolution of the request that is associated with this worker
func (w *worker) resolve() (*nameresolver.Entry, *errors.ErrorStack) {
// First, we search the list of name servers to which the requested domain name is delegated. This is obtained by
// submitting delegation info request, removing a label each time, until a non-null response is provided (meaning we
// reached the apex of the zone containing the requested name).
var entry *zonecut.Entry
reqName := w.req.Name()
for entry == nil {
var err *errors.ErrorStack
// Get the servers for this zonecut
req := zonecut.NewRequest(reqName, w.req.Exceptions())
w.zcHandler(req)
entry, err = req.Result()
if err != nil {
var returnErr bool
switch typedErr := err.OriginalError().(type) {
case *errors.TimeoutError:
returnErr = true
case *errors.NXDomainError:
returnErr = w.req.Exceptions().RFC8020
case *errors.ServfailError:
returnErr = !w.req.Exceptions().AcceptServFailAsNoData
case *errors.NoNameServerError:
returnErr = false
default:
_ = typedErr
returnErr = true
}
// If we receive an error while searching for the delegation info, we will not be able to perform the
// subsequent queries, so we bail out on this request.
if returnErr |
err = nil
entry = nil
}
if entry == nil {
// If no entry was provided, reqName is not the zone apex, so we remove a label and retry.
pos, end := dns.NextLabel(reqName, 1)
if end {
reqName = "."
} else {
reqName = reqName[pos:]
}
}
}
// Setting apart glueless delegations and glued delegations
var nameSrvsWithGlues []*zonecut.NameSrvInfo
var gluelessNameSrvs []*zonecut.NameSrvInfo
for _, nameSrv := range entry.NameServers() {
if len(nameSrv.Addrs()) == 0 {
gluelessNameSrvs = append(gluelessNameSrvs, nameSrv)
} else {
nameSrvsWithGlues = append(nameSrvsWithGlues, nameSrv)
}
}
// Try to resolve first using glues to go faster
r, gluedErr := w.resolveFromGlues(nameSrvsWithGlues)
if gluedErr != nil {
if _, ok := gluedErr.OriginalError().(*errors.NXDomainError) ; ok {
gluedErr.Push(fmt.Errorf("resolve: got NXDomain while resolving %s from glued servers", w.req.Name()))
return nil, gluedErr
}
// No glued servers returned an answer, so we now try with the glueless delegations.
var gluelessErr *errors.ErrorStack
r, gluelessErr = w.resolveFromGluelessNameSrvs(gluelessNameSrvs)
if gluelessErr != nil {
gluelessErr.Push(fmt.Errorf("resolve: unable to resolve %s: glued errors: [%s]", w.req.Name(), gluedErr.Error()))
return nil, gluelessErr
}
}
return r, nil
}
// start prepares the worker for handling new requests.
// The current implementation is to launch a goroutine that will read from the reqs channel attribute new requests and
// will try to answer them. When stopped, it will immediately send the join signal.
func (w *worker) start() {
go func() {
result, err := w.resolve()
for req := range w.reqs {
req.SetResult(result, err)
}
w.joinChan <- true
}()
}
// startWithCachedResult performs the same kind of operations that start(), except that the response is not obtained
// from the network, but by loading it from a cache file.
func (w *worker) startWithCachedResult(cf *nameresolver.CacheFile) {
go func() {
var result *nameresolver.Entry
var resultErr *errors.ErrorStack
var err error
result, resultErr, err = cf.Result()
if err != nil {
result = nil
cacheErr := fmt.Errorf("startWithCachedResult: error while loading cache of %s: %s", w.req.Name(), err.Error())
if resultErr != nil {
resultErr.Push(cacheErr)
} else {
resultErr = errors.NewErrorStack(cacheErr)
}
}
for req := range w.reqs {
req.SetResult(result, resultErr)
}
w.joinChan <- true
}()
}
// stop is to be called during the cleanup of the worker. It shuts down the goroutine started by start() and waits for
// it to actually end. stop returns true if it is the first time it is called and the start() routine was stopped, or
// else it returns false.
func (w *worker) stop() bool {
if w.closedReqChan {
return false
}
close (w.reqs)
w.closedReqChan = true
_ = <-w.joinChan
close(w.joinChan)
return true
} | {
err.Push(fmt.Errorf("resolve: error while getting zone cut info of %s for %s", reqName, w.req.Name()))
return nil, err
} | conditional_block |
worker.go | package nameresolver
import (
"fmt"
"github.com/miekg/dns"
"net"
"github.com/ANSSI-FR/transdep/messages/zonecut"
"github.com/ANSSI-FR/transdep/tools"
"github.com/ANSSI-FR/transdep/messages/nameresolver"
"github.com/ANSSI-FR/transdep/errors"
"strings"
)
// WORKER_CHAN_CAPACITY indicates the maximum number of request unhandled by the start() goroutine can be spooled before
// the call to Handle() becomes blocking.
const WORKER_CHAN_CAPACITY = 10
// MAX_CNAME_CHAIN indicates the longest chain of CNAME that is acceptable to be followed a name is considered a
// dead-end (i.e. unfit for name resolution)
const MAX_CNAME_CHAIN = 10
// worker represents a request handler for a specific request target domain name for which name resolution is sought.
type worker struct {
// req is the request topic for which this worker was started in the first place.
req *nameresolver.Request
// reqs is the channel by which subsequent requests for the same topic as for "req" are received.
reqs chan *nameresolver.Request
// closedReqChan helps prevent double-close issue on reqs channel, when the worker is stopping.
closedReqChan bool
// joinChan is used by stop() to wait for the completion of the start() goroutine
joinChan chan bool
// zcHandler is used to submit new zone cut requests. This is most notably used to get the delegation information of
// the parent zone of the requested name, in order to query its name servers for the requested name delegation
// information.
zcHandler func(*zonecut.Request) *errors.ErrorStack
// nrHandler is used to submit new name resolution requests. This is used, for instance, to get the IP addresses
// associated to nameservers that are out-of-bailiwick and for which we don't have acceptable glues or IP addresses.
nrHandler func(*nameresolver.Request) *errors.ErrorStack
// config is the configuration of the current Transdep run
config *tools.TransdepConfig
}
// initNewWorker builds a new worker instance and returns it.
// It DOES NOT start the new worker, and should not be called directly by the finder.
func initNewWorker(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, conf *tools.TransdepConfig) *worker {
w := new(worker)
w.req = req
w.zcHandler = zcHandler
w.nrHandler = nrHandler
w.config = conf
w.reqs = make(chan *nameresolver.Request, WORKER_CHAN_CAPACITY)
w.closedReqChan = false
w.joinChan = make(chan bool, 1)
return w
}
// newWorker builds a new worker instance and returns it.
// The worker is started and will resolve the request from the network.
func newWorker(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, conf *tools.TransdepConfig) *worker |
// newWorker builds a new worker instance and returns it.
// The worker is started and will resolve the request from a cache file.
func newWorkerWithCachedResult(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, cf *nameresolver.CacheFile, conf *tools.TransdepConfig) *worker {
w := initNewWorker(req, nrHandler, zcHandler, conf)
w.startWithCachedResult(cf)
return w
}
// handle allows the submission of new requests to this worker.
// This method returns an error if the worker is stopped or if the submitted request does not match the request usually
// handled by this worker.
func (w *worker) handle(req *nameresolver.Request) *errors.ErrorStack {
if w.closedReqChan {
return errors.NewErrorStack(fmt.Errorf("handle: worker channel for name resolution of %s is already closed", w.req.Name()))
} else if !w.req.Equal(req) {
return errors.NewErrorStack(fmt.Errorf("handle: invalid request; the submitted request (%s) does not match the requests handled by this worker (%s)", req.Name(), w.req.Name()))
}
w.reqs <- req
return nil
}
// resolveFromWith resolves the topic of the requests associated with this worker by querying the "ip" IP address and
// using the "proto" protocol (either "" for UDP or "tcp"). It returns an entry corresponding to the requested topic, or an
// definitive error that happened during the resolution.
func (w *worker) resolveFromWith(ip net.IP, proto string) (*nameresolver.Entry, *errors.ErrorStack) {
var ipList []net.IP
// We first query about the IPv4 addresses associated to the request topic.
clnt := new(dns.Client)
clnt.Net = proto
ma := new(dns.Msg)
ma.SetEdns0(4096, false)
ma.SetQuestion(w.req.Name(), dns.TypeA)
ma.RecursionDesired = false
ans, _, err := clnt.Exchange(ma, net.JoinHostPort(ip.String(), "53"))
if err != nil {
errStack := errors.NewErrorStack(err)
errStack.Push(fmt.Errorf("resolveFromWith: error while exchanging with %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
return nil, errStack
}
if ans == nil {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got empty answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
if ans.Rcode != dns.RcodeSuccess {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got DNS error %s from %s over %s for %s %s?", dns.RcodeToString[ans.Rcode], ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
if !ans.Authoritative {
// We expect an non-empty answer from the server, with a positive answer (no NXDOMAIN (lame delegation),
// no SERVFAIL (broken server)). We also expect the server to be authoritative; if it is not, it is not clear
// why, because the name is delegated to this server according to the parent zone, so we assume that this server
// is broken, but there might be other reasons for this that I can't think off from the top of my head.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got non-authoritative data from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
// If the answer is truncated, we might want to retry over TCP... except of course if the truncated answer is
// already provided over TCP (see Spotify blog post about when it happened to them :))
if ans.Truncated {
if proto == "tcp" {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a truncated answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
return w.resolveFromWith(ip, "tcp")
}
for _, grr := range ans.Answer {
// We only consider records from the answer section that have a owner name equal to the qname.
if dns.CompareDomainName(grr.Header().Name, w.req.Name()) == dns.CountLabel(w.req.Name()) && dns.CountLabel(grr.Header().Name) == dns.CountLabel(w.req.Name()){
// We may receive either A or CNAME records with matching owner name. We dismiss all other cases
// (which are probably constituted of NSEC and DNAME and similar stuff. NSEC is of no value here, and DNAME
// are not supported by this tool.
switch rr := grr.(type) {
case *dns.A:
// We stack IPv4 addresses because the RRSet might be composed of multiple A records
ipList = append(ipList, rr.A)
case *dns.CNAME:
// A CNAME is supposed to be the only record at a given domain name. Thus, we return this alias marker
// and forget about all other records that might resides here.
return nameresolver.NewAliasEntry(w.req.Name(), rr.Target), nil
}
}
}
// We now query for the AAAA records to also get the IPv6 addresses
clnt = new(dns.Client)
clnt.Net = proto
maaaa := new(dns.Msg)
maaaa.SetEdns0(4096, false)
maaaa.SetQuestion(w.req.Name(), dns.TypeAAAA)
maaaa.RecursionDesired = false
ans, _, err = clnt.Exchange(maaaa, net.JoinHostPort(ip.String(), "53"))
if err != nil {
errStack := errors.NewErrorStack(err)
errStack.Push(fmt.Errorf("resolveFromWith: error while exchanging with %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
return nil, errStack
}
if ans == nil {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got empty answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if ans.Rcode != dns.RcodeSuccess {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got DNS error %s from %s over %s for %s %s?", dns.RcodeToString[ans.Rcode], ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if !ans.Authoritative {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got non-authoritative data from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if ans.Truncated {
if proto == "tcp" {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a truncated answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
return w.resolveFromWith(ip, "tcp")
}
for _, grr := range ans.Answer {
if dns.CompareDomainName(grr.Header().Name, w.req.Name()) == dns.CountLabel(w.req.Name()) && dns.CountLabel(grr.Header().Name) == dns.CountLabel(w.req.Name()){
switch rr := grr.(type) {
case *dns.AAAA:
ipList = append(ipList, rr.AAAA)
case *dns.CNAME:
// We should have a CNAME here because the CNAME was not returned when asked for A records, and if we
// had received a CNAME, we would already have returned.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a CNAME that was not provided for the A query from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
}
}
return nameresolver.NewIPEntry(w.req.Name(), ipList), nil
}
// resolveFrom resolves the request associated to this worker. It returns the entry generated from a successful
// resolution or the error that occurred.
func (w *worker) resolveFrom(ip net.IP) (*nameresolver.Entry, *errors.ErrorStack) {
// (proto == "" means UDP)
return w.resolveFromWith(ip, "")
}
// resolveFromGlues tries to resolve the request associated to this worker using the list of servers provided as
// parameters, assuming their are all delegation with glues (i.e. IP addresses of nameservers are already known).
func (w *worker) resolveFromGlues(nameSrvs []*zonecut.NameSrvInfo) (*nameresolver.Entry, *errors.ErrorStack) {
var errList []string
for _, ns := range nameSrvs {
for _, ip := range ns.Addrs() {
// Tries every IP address of every name server. If an error occurs, the next IP, then server is tried.
entry, err := w.resolveFrom(ip)
if err == nil {
return entry, nil
}
errList = append(errList, fmt.Sprintf("resolveFromGlues: error from %s(%s): %s", ns.Name(), ip.String(), err.Error()))
}
}
// No IP address of any server returned a positive result.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromGlues: no valid glued delegation for %s: [%s]", w.req.Name(), strings.Join(errList, ", ")))
}
// resolveFromGluelessNameSrvs resolves the request associated to this worker using name servers whose IP address is not
// known thanks to glues and in-bailiwick address records. It returns the answer to that request or an error no server
// returned an acceptable response.
func (w *worker) resolveFromGluelessNameSrvs(nameSrvs []*zonecut.NameSrvInfo) (*nameresolver.Entry, *errors.ErrorStack) {
var errList []string
Outerloop:
for _, ns := range nameSrvs {
var addrs []net.IP
// requestedName is the nameserver name, by default. It may evolve, as aliases/CNAME are met along the resolution
requestedName := ns.Name()
// We limit to MAX_CNAME_CHAIN the number of CNAME that we are willing to follow
Innerloop:
for i := 0; i < MAX_CNAME_CHAIN && len(addrs) == 0; i++ {
// Start up the resolution of the name of the nameserver into IP addresses so that we can query these IP
// addresses for the request topic of this worker.
req := nameresolver.NewRequestWithContext(requestedName, w.req.Exceptions(), w.req)
w.nrHandler(req)
ne, err := req.Result()
if err != nil || ne == nil {
// if an error occurred, we just try with the next nameserver until we get an answer or all servers have
// been tried.
continue Outerloop
}
if ne.CNAMETarget() == "" {
// We got some IP addresses ; we store them away and go to the next step
addrs = ne.Addrs()
break Innerloop
}
// If the answer is an alias, we retry with the new target name
requestedName = ne.CNAMETarget()
}
if len(addrs) == 0 {
// We hit a very long CNAME Chain or the name cannot be resolved for some reason
continue
}
// Try to query every IP that we found, until we get a valid answer
for _, addr := range addrs {
entry, err := w.resolveFrom(addr)
if err == nil {
return entry, nil
}
errList = append(errList, fmt.Sprintf("resolveFromGluelessNameSrvs: error from %s(%s): %s", ns.Name(), addr.String(), err.Error()))
}
}
// We tried every IP address of every name server to no avail. Return an error
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromGluelessNameSrvs: no valid glueless delegation for %s: [%s]", w.req.Name(), strings.Join(errList, ", ")))
}
// resolve is in charge of orchestrating the resolution of the request that is associated with this worker
func (w *worker) resolve() (*nameresolver.Entry, *errors.ErrorStack) {
// First, we search the list of name servers to which the requested domain name is delegated. This is obtained by
// submitting delegation info request, removing a label each time, until a non-null response is provided (meaning we
// reached the apex of the zone containing the requested name).
var entry *zonecut.Entry
reqName := w.req.Name()
for entry == nil {
var err *errors.ErrorStack
// Get the servers for this zonecut
req := zonecut.NewRequest(reqName, w.req.Exceptions())
w.zcHandler(req)
entry, err = req.Result()
if err != nil {
var returnErr bool
switch typedErr := err.OriginalError().(type) {
case *errors.TimeoutError:
returnErr = true
case *errors.NXDomainError:
returnErr = w.req.Exceptions().RFC8020
case *errors.ServfailError:
returnErr = !w.req.Exceptions().AcceptServFailAsNoData
case *errors.NoNameServerError:
returnErr = false
default:
_ = typedErr
returnErr = true
}
// If we receive an error while searching for the delegation info, we will not be able to perform the
// subsequent queries, so we bail out on this request.
if returnErr {
err.Push(fmt.Errorf("resolve: error while getting zone cut info of %s for %s", reqName, w.req.Name()))
return nil, err
}
err = nil
entry = nil
}
if entry == nil {
// If no entry was provided, reqName is not the zone apex, so we remove a label and retry.
pos, end := dns.NextLabel(reqName, 1)
if end {
reqName = "."
} else {
reqName = reqName[pos:]
}
}
}
// Setting apart glueless delegations and glued delegations
var nameSrvsWithGlues []*zonecut.NameSrvInfo
var gluelessNameSrvs []*zonecut.NameSrvInfo
for _, nameSrv := range entry.NameServers() {
if len(nameSrv.Addrs()) == 0 {
gluelessNameSrvs = append(gluelessNameSrvs, nameSrv)
} else {
nameSrvsWithGlues = append(nameSrvsWithGlues, nameSrv)
}
}
// Try to resolve first using glues to go faster
r, gluedErr := w.resolveFromGlues(nameSrvsWithGlues)
if gluedErr != nil {
if _, ok := gluedErr.OriginalError().(*errors.NXDomainError) ; ok {
gluedErr.Push(fmt.Errorf("resolve: got NXDomain while resolving %s from glued servers", w.req.Name()))
return nil, gluedErr
}
// No glued servers returned an answer, so we now try with the glueless delegations.
var gluelessErr *errors.ErrorStack
r, gluelessErr = w.resolveFromGluelessNameSrvs(gluelessNameSrvs)
if gluelessErr != nil {
gluelessErr.Push(fmt.Errorf("resolve: unable to resolve %s: glued errors: [%s]", w.req.Name(), gluedErr.Error()))
return nil, gluelessErr
}
}
return r, nil
}
// start prepares the worker for handling new requests.
// The current implementation is to launch a goroutine that will read from the reqs channel attribute new requests and
// will try to answer them. When stopped, it will immediately send the join signal.
func (w *worker) start() {
go func() {
result, err := w.resolve()
for req := range w.reqs {
req.SetResult(result, err)
}
w.joinChan <- true
}()
}
// startWithCachedResult performs the same kind of operations that start(), except that the response is not obtained
// from the network, but by loading it from a cache file.
func (w *worker) startWithCachedResult(cf *nameresolver.CacheFile) {
go func() {
var result *nameresolver.Entry
var resultErr *errors.ErrorStack
var err error
result, resultErr, err = cf.Result()
if err != nil {
result = nil
cacheErr := fmt.Errorf("startWithCachedResult: error while loading cache of %s: %s", w.req.Name(), err.Error())
if resultErr != nil {
resultErr.Push(cacheErr)
} else {
resultErr = errors.NewErrorStack(cacheErr)
}
}
for req := range w.reqs {
req.SetResult(result, resultErr)
}
w.joinChan <- true
}()
}
// stop is to be called during the cleanup of the worker. It shuts down the goroutine started by start() and waits for
// it to actually end. stop returns true if it is the first time it is called and the start() routine was stopped, or
// else it returns false.
func (w *worker) stop() bool {
if w.closedReqChan {
return false
}
close (w.reqs)
w.closedReqChan = true
_ = <-w.joinChan
close(w.joinChan)
return true
} | {
w := initNewWorker(req, nrHandler, zcHandler, conf)
w.start()
return w
} | identifier_body |
worker.go | package nameresolver
import (
"fmt"
"github.com/miekg/dns"
"net"
"github.com/ANSSI-FR/transdep/messages/zonecut"
"github.com/ANSSI-FR/transdep/tools"
"github.com/ANSSI-FR/transdep/messages/nameresolver"
"github.com/ANSSI-FR/transdep/errors"
"strings"
)
// WORKER_CHAN_CAPACITY indicates the maximum number of request unhandled by the start() goroutine can be spooled before
// the call to Handle() becomes blocking.
const WORKER_CHAN_CAPACITY = 10
// MAX_CNAME_CHAIN indicates the longest chain of CNAME that is acceptable to be followed a name is considered a
// dead-end (i.e. unfit for name resolution)
const MAX_CNAME_CHAIN = 10
// worker represents a request handler for a specific request target domain name for which name resolution is sought.
type worker struct {
// req is the request topic for which this worker was started in the first place.
req *nameresolver.Request
// reqs is the channel by which subsequent requests for the same topic as for "req" are received.
reqs chan *nameresolver.Request
// closedReqChan helps prevent double-close issue on reqs channel, when the worker is stopping.
closedReqChan bool
// joinChan is used by stop() to wait for the completion of the start() goroutine
joinChan chan bool
// zcHandler is used to submit new zone cut requests. This is most notably used to get the delegation information of
// the parent zone of the requested name, in order to query its name servers for the requested name delegation
// information.
zcHandler func(*zonecut.Request) *errors.ErrorStack
// nrHandler is used to submit new name resolution requests. This is used, for instance, to get the IP addresses
// associated to nameservers that are out-of-bailiwick and for which we don't have acceptable glues or IP addresses.
nrHandler func(*nameresolver.Request) *errors.ErrorStack
// config is the configuration of the current Transdep run
config *tools.TransdepConfig
}
// initNewWorker builds a new worker instance and returns it.
// It DOES NOT start the new worker, and should not be called directly by the finder.
func initNewWorker(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, conf *tools.TransdepConfig) *worker {
w := new(worker)
w.req = req
w.zcHandler = zcHandler
w.nrHandler = nrHandler
w.config = conf
w.reqs = make(chan *nameresolver.Request, WORKER_CHAN_CAPACITY)
w.closedReqChan = false
w.joinChan = make(chan bool, 1)
return w
}
// newWorker builds a new worker instance and returns it.
// The worker is started and will resolve the request from the network.
func newWorker(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, conf *tools.TransdepConfig) *worker {
w := initNewWorker(req, nrHandler, zcHandler, conf)
w.start()
return w
}
// newWorker builds a new worker instance and returns it.
// The worker is started and will resolve the request from a cache file.
func newWorkerWithCachedResult(req *nameresolver.Request, nrHandler func(*nameresolver.Request) *errors.ErrorStack, zcHandler func(*zonecut.Request) *errors.ErrorStack, cf *nameresolver.CacheFile, conf *tools.TransdepConfig) *worker {
w := initNewWorker(req, nrHandler, zcHandler, conf)
w.startWithCachedResult(cf)
return w
}
// handle allows the submission of new requests to this worker.
// This method returns an error if the worker is stopped or if the submitted request does not match the request usually
// handled by this worker.
func (w *worker) handle(req *nameresolver.Request) *errors.ErrorStack {
if w.closedReqChan {
return errors.NewErrorStack(fmt.Errorf("handle: worker channel for name resolution of %s is already closed", w.req.Name()))
} else if !w.req.Equal(req) {
return errors.NewErrorStack(fmt.Errorf("handle: invalid request; the submitted request (%s) does not match the requests handled by this worker (%s)", req.Name(), w.req.Name()))
}
w.reqs <- req
return nil
}
// resolveFromWith resolves the topic of the requests associated with this worker by querying the "ip" IP address and
// using the "proto" protocol (either "" for UDP or "tcp"). It returns an entry corresponding to the requested topic, or an
// definitive error that happened during the resolution.
func (w *worker) resolveFromWith(ip net.IP, proto string) (*nameresolver.Entry, *errors.ErrorStack) {
var ipList []net.IP
// We first query about the IPv4 addresses associated to the request topic.
clnt := new(dns.Client)
clnt.Net = proto
ma := new(dns.Msg)
ma.SetEdns0(4096, false)
ma.SetQuestion(w.req.Name(), dns.TypeA)
ma.RecursionDesired = false
ans, _, err := clnt.Exchange(ma, net.JoinHostPort(ip.String(), "53"))
if err != nil {
errStack := errors.NewErrorStack(err)
errStack.Push(fmt.Errorf("resolveFromWith: error while exchanging with %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
return nil, errStack
}
if ans == nil {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got empty answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
if ans.Rcode != dns.RcodeSuccess {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got DNS error %s from %s over %s for %s %s?", dns.RcodeToString[ans.Rcode], ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
if !ans.Authoritative {
// We expect an non-empty answer from the server, with a positive answer (no NXDOMAIN (lame delegation),
// no SERVFAIL (broken server)). We also expect the server to be authoritative; if it is not, it is not clear
// why, because the name is delegated to this server according to the parent zone, so we assume that this server
// is broken, but there might be other reasons for this that I can't think off from the top of my head.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got non-authoritative data from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
// If the answer is truncated, we might want to retry over TCP... except of course if the truncated answer is
// already provided over TCP (see Spotify blog post about when it happened to them :))
if ans.Truncated {
if proto == "tcp" {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a truncated answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))
}
return w.resolveFromWith(ip, "tcp")
}
for _, grr := range ans.Answer {
// We only consider records from the answer section that have a owner name equal to the qname.
if dns.CompareDomainName(grr.Header().Name, w.req.Name()) == dns.CountLabel(w.req.Name()) && dns.CountLabel(grr.Header().Name) == dns.CountLabel(w.req.Name()){
// We may receive either A or CNAME records with matching owner name. We dismiss all other cases
// (which are probably constituted of NSEC and DNAME and similar stuff. NSEC is of no value here, and DNAME
// are not supported by this tool.
switch rr := grr.(type) {
case *dns.A:
// We stack IPv4 addresses because the RRSet might be composed of multiple A records
ipList = append(ipList, rr.A)
case *dns.CNAME:
// A CNAME is supposed to be the only record at a given domain name. Thus, we return this alias marker
// and forget about all other records that might resides here.
return nameresolver.NewAliasEntry(w.req.Name(), rr.Target), nil
}
}
}
// We now query for the AAAA records to also get the IPv6 addresses
clnt = new(dns.Client)
clnt.Net = proto
maaaa := new(dns.Msg)
maaaa.SetEdns0(4096, false)
maaaa.SetQuestion(w.req.Name(), dns.TypeAAAA)
maaaa.RecursionDesired = false
ans, _, err = clnt.Exchange(maaaa, net.JoinHostPort(ip.String(), "53"))
if err != nil {
errStack := errors.NewErrorStack(err)
errStack.Push(fmt.Errorf("resolveFromWith: error while exchanging with %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
return nil, errStack
}
if ans == nil {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got empty answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if ans.Rcode != dns.RcodeSuccess {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got DNS error %s from %s over %s for %s %s?", dns.RcodeToString[ans.Rcode], ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if !ans.Authoritative {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got non-authoritative data from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
if ans.Truncated {
if proto == "tcp" {
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a truncated answer from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
return w.resolveFromWith(ip, "tcp")
}
for _, grr := range ans.Answer {
if dns.CompareDomainName(grr.Header().Name, w.req.Name()) == dns.CountLabel(w.req.Name()) && dns.CountLabel(grr.Header().Name) == dns.CountLabel(w.req.Name()){
switch rr := grr.(type) {
case *dns.AAAA:
ipList = append(ipList, rr.AAAA)
case *dns.CNAME:
// We should have a CNAME here because the CNAME was not returned when asked for A records, and if we
// had received a CNAME, we would already have returned.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromWith: got a CNAME that was not provided for the A query from %s over %s for %s %s?", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))
}
}
}
return nameresolver.NewIPEntry(w.req.Name(), ipList), nil
}
// resolveFrom resolves the request associated to this worker. It returns the entry generated from a successful
// resolution or the error that occurred.
func (w *worker) resolveFrom(ip net.IP) (*nameresolver.Entry, *errors.ErrorStack) {
// (proto == "" means UDP)
return w.resolveFromWith(ip, "")
}
// resolveFromGlues tries to resolve the request associated to this worker using the list of servers provided as
// parameters, assuming their are all delegation with glues (i.e. IP addresses of nameservers are already known).
func (w *worker) resolveFromGlues(nameSrvs []*zonecut.NameSrvInfo) (*nameresolver.Entry, *errors.ErrorStack) {
var errList []string
for _, ns := range nameSrvs {
for _, ip := range ns.Addrs() {
// Tries every IP address of every name server. If an error occurs, the next IP, then server is tried.
entry, err := w.resolveFrom(ip)
if err == nil {
return entry, nil
}
errList = append(errList, fmt.Sprintf("resolveFromGlues: error from %s(%s): %s", ns.Name(), ip.String(), err.Error()))
}
}
// No IP address of any server returned a positive result.
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromGlues: no valid glued delegation for %s: [%s]", w.req.Name(), strings.Join(errList, ", ")))
}
// resolveFromGluelessNameSrvs resolves the request associated to this worker using name servers whose IP address is not
// known thanks to glues and in-bailiwick address records. It returns the answer to that request or an error no server
// returned an acceptable response.
func (w *worker) | (nameSrvs []*zonecut.NameSrvInfo) (*nameresolver.Entry, *errors.ErrorStack) {
var errList []string
Outerloop:
for _, ns := range nameSrvs {
var addrs []net.IP
// requestedName is the nameserver name, by default. It may evolve, as aliases/CNAME are met along the resolution
requestedName := ns.Name()
// We limit to MAX_CNAME_CHAIN the number of CNAME that we are willing to follow
Innerloop:
for i := 0; i < MAX_CNAME_CHAIN && len(addrs) == 0; i++ {
// Start up the resolution of the name of the nameserver into IP addresses so that we can query these IP
// addresses for the request topic of this worker.
req := nameresolver.NewRequestWithContext(requestedName, w.req.Exceptions(), w.req)
w.nrHandler(req)
ne, err := req.Result()
if err != nil || ne == nil {
// if an error occurred, we just try with the next nameserver until we get an answer or all servers have
// been tried.
continue Outerloop
}
if ne.CNAMETarget() == "" {
// We got some IP addresses ; we store them away and go to the next step
addrs = ne.Addrs()
break Innerloop
}
// If the answer is an alias, we retry with the new target name
requestedName = ne.CNAMETarget()
}
if len(addrs) == 0 {
// We hit a very long CNAME Chain or the name cannot be resolved for some reason
continue
}
// Try to query every IP that we found, until we get a valid answer
for _, addr := range addrs {
entry, err := w.resolveFrom(addr)
if err == nil {
return entry, nil
}
errList = append(errList, fmt.Sprintf("resolveFromGluelessNameSrvs: error from %s(%s): %s", ns.Name(), addr.String(), err.Error()))
}
}
// We tried every IP address of every name server to no avail. Return an error
return nil, errors.NewErrorStack(fmt.Errorf("resolveFromGluelessNameSrvs: no valid glueless delegation for %s: [%s]", w.req.Name(), strings.Join(errList, ", ")))
}
// resolve is in charge of orchestrating the resolution of the request that is associated with this worker
func (w *worker) resolve() (*nameresolver.Entry, *errors.ErrorStack) {
// First, we search the list of name servers to which the requested domain name is delegated. This is obtained by
// submitting delegation info request, removing a label each time, until a non-null response is provided (meaning we
// reached the apex of the zone containing the requested name).
var entry *zonecut.Entry
reqName := w.req.Name()
for entry == nil {
var err *errors.ErrorStack
// Get the servers for this zonecut
req := zonecut.NewRequest(reqName, w.req.Exceptions())
w.zcHandler(req)
entry, err = req.Result()
if err != nil {
var returnErr bool
switch typedErr := err.OriginalError().(type) {
case *errors.TimeoutError:
returnErr = true
case *errors.NXDomainError:
returnErr = w.req.Exceptions().RFC8020
case *errors.ServfailError:
returnErr = !w.req.Exceptions().AcceptServFailAsNoData
case *errors.NoNameServerError:
returnErr = false
default:
_ = typedErr
returnErr = true
}
// If we receive an error while searching for the delegation info, we will not be able to perform the
// subsequent queries, so we bail out on this request.
if returnErr {
err.Push(fmt.Errorf("resolve: error while getting zone cut info of %s for %s", reqName, w.req.Name()))
return nil, err
}
err = nil
entry = nil
}
if entry == nil {
// If no entry was provided, reqName is not the zone apex, so we remove a label and retry.
pos, end := dns.NextLabel(reqName, 1)
if end {
reqName = "."
} else {
reqName = reqName[pos:]
}
}
}
// Setting apart glueless delegations and glued delegations
var nameSrvsWithGlues []*zonecut.NameSrvInfo
var gluelessNameSrvs []*zonecut.NameSrvInfo
for _, nameSrv := range entry.NameServers() {
if len(nameSrv.Addrs()) == 0 {
gluelessNameSrvs = append(gluelessNameSrvs, nameSrv)
} else {
nameSrvsWithGlues = append(nameSrvsWithGlues, nameSrv)
}
}
// Try to resolve first using glues to go faster
r, gluedErr := w.resolveFromGlues(nameSrvsWithGlues)
if gluedErr != nil {
if _, ok := gluedErr.OriginalError().(*errors.NXDomainError) ; ok {
gluedErr.Push(fmt.Errorf("resolve: got NXDomain while resolving %s from glued servers", w.req.Name()))
return nil, gluedErr
}
// No glued servers returned an answer, so we now try with the glueless delegations.
var gluelessErr *errors.ErrorStack
r, gluelessErr = w.resolveFromGluelessNameSrvs(gluelessNameSrvs)
if gluelessErr != nil {
gluelessErr.Push(fmt.Errorf("resolve: unable to resolve %s: glued errors: [%s]", w.req.Name(), gluedErr.Error()))
return nil, gluelessErr
}
}
return r, nil
}
// start prepares the worker for handling new requests.
// The current implementation is to launch a goroutine that will read from the reqs channel attribute new requests and
// will try to answer them. When stopped, it will immediately send the join signal.
func (w *worker) start() {
go func() {
result, err := w.resolve()
for req := range w.reqs {
req.SetResult(result, err)
}
w.joinChan <- true
}()
}
// startWithCachedResult performs the same kind of operations that start(), except that the response is not obtained
// from the network, but by loading it from a cache file.
func (w *worker) startWithCachedResult(cf *nameresolver.CacheFile) {
go func() {
var result *nameresolver.Entry
var resultErr *errors.ErrorStack
var err error
result, resultErr, err = cf.Result()
if err != nil {
result = nil
cacheErr := fmt.Errorf("startWithCachedResult: error while loading cache of %s: %s", w.req.Name(), err.Error())
if resultErr != nil {
resultErr.Push(cacheErr)
} else {
resultErr = errors.NewErrorStack(cacheErr)
}
}
for req := range w.reqs {
req.SetResult(result, resultErr)
}
w.joinChan <- true
}()
}
// stop is to be called during the cleanup of the worker. It shuts down the goroutine started by start() and waits for
// it to actually end. stop returns true if it is the first time it is called and the start() routine was stopped, or
// else it returns false.
func (w *worker) stop() bool {
if w.closedReqChan {
return false
}
close (w.reqs)
w.closedReqChan = true
_ = <-w.joinChan
close(w.joinChan)
return true
} | resolveFromGluelessNameSrvs | identifier_name |
project_tasks.py | # coding=utf-8
"""
Project tasks
Add the following to your *requirements.txt* file:
* docutils!=0.14rc1; python_version == "[python_versions]"
"""
import ast
import os
from pprint import pformat
import shutil
import textwrap
# noinspection PyUnresolvedReferences
from herringlib.prompt import prompt
# noinspection PyUnresolvedReferences
from herringlib.template import Template
# noinspection PyUnresolvedReferences
from herringlib.venv import VirtualenvInfo
try:
# python3
# noinspection PyUnresolvedReferences,PyCompatibility
from configparser import ConfigParser, NoSectionError
except ImportError:
# python2
# noinspection PyUnresolvedReferences,PyCompatibility
from ConfigParser import ConfigParser, NoSectionError
# noinspection PyUnresolvedReferences
from herring.herring_app import task, HerringFile
# noinspection PyUnresolvedReferences
from herringlib.simple_logger import info, debug
# noinspection PyUnresolvedReferences
from herringlib.local_shell import LocalShell
# noinspection PyUnresolvedReferences
from herringlib.requirements import Requirements, Requirement
# noinspection PyUnresolvedReferences
from herringlib.project_settings import Project, ATTRIBUTES
missing_modules = []
def value_from_setup_py(arg_name):
"""
Use AST to find the name value in the setup() call in setup.py.
Only works for key=string arguments to setup().
:param arg_name: the keyword argument name passed to the setup() call in setup.py.
:type arg_name: str
:returns: the name value or None
:rtype: str|None
"""
setup_py = 'setup.py'
if os.path.isfile(setup_py):
# scan setup.py for a call to 'setup'.
tree = ast.parse(''.join(open(setup_py)))
call_nodes = [node.value for node in tree.body if type(node) == ast.Expr and type(node.value) == ast.Call]
# noinspection PyShadowingNames
def is_setup(call_node):
try:
return call_node.func.id == 'setup'
except AttributeError as ex:
try:
return call_node.func.value.id == 'setup'
except AttributeError as ex:
print(str(ex))
print(ast.dump(call_node.func))
print(ast.dump(call_node))
keywords = [call_node.keywords for call_node in call_nodes if is_setup(call_node)]
# now setup() takes keyword arguments so scan them looking for key that matches the given arg_name,
# then return the keyword's value
for keyword in keywords:
for keyword_arg in keyword:
if keyword_arg.arg == arg_name:
if hasattr(keyword_arg.value, 's'):
return keyword_arg.value.s
# didn't find it
return None
def _project_defaults():
"""
Get the project defaults from (in order of preference):
* setup.py,
* kwargs,
* herring config file,
* environment variables,
* default values.
:return: dictionary of defaults
:rtype: dict[str,str]
"""
defaults = {
'package': os.path.basename(os.path.abspath(os.curdir)),
'name': os.path.basename(os.path.abspath(os.curdir)).capitalize(),
'description': 'The greatest project there ever was or will be!',
'author': 'author',
'title': os.path.basename(os.path.abspath(os.curdir)).capitalize(),
}
if 'USER' in os.environ:
defaults['author'] = os.environ['USER']
defaults['author_email'] = '{author}@example.com'.format(author=defaults['author'])
# override defaults from herringfile
# for key in ['name', 'author', 'author_email', 'description']:
# attributes = Project.attributes()
# for key in [key for key in attributes.keys() if attributes[key] is not None]:
# # noinspection PyBroadException
# try:
# value = getattr(Project, key, None)
# if value is not None:
# defaults[key] = value
# except:
# pass
# override defaults from any config files
settings = HerringFile.settings
if settings is not None:
config = ConfigParser()
config.read(settings.config_files)
for section in ['project']:
try:
defaults.update(dict(config.items(section)))
except NoSectionError:
pass
# override defaults from kwargs
for key in task.kwargs:
defaults[key] = task.kwargs[key]
# override defaults from setup.py
for key in ['name', 'author', 'author_email', 'description']:
value = value_from_setup_py(key)
if value is not None:
defaults[key] = value
# now add any attributes that are not already in defaults
for key in ATTRIBUTES:
if key not in defaults:
value = getattr(Project, key, None)
if value is not None:
defaults[key] = value
else:
print("{key}:None".format(key=key))
return defaults
@task(namespace='project', help='Available options: --name, --package, --author, --author_email, --description',
kwargs=['name', 'package', 'author', 'author_email', 'description'], configured='no')
def init():
"""
Initialize a new python project with default files. Default values from herring.conf and directory name.
"""
defaults = _project_defaults()
if Project.prompt:
defaults['name'] = prompt("Enter the project's name:", defaults['name'])
defaults['package'] = prompt("Enter the project's package:", defaults['package'])
defaults['author'] = prompt("Enter the project's author:", defaults['author'])
defaults['author_email'] = prompt("Enter the project's author's email:", defaults['author_email'])
defaults['description'] = prompt("Enter the project's description:", defaults['description'])
# print("defaults:\n{defaults}".format(defaults=pformat(defaults)))
if Project.use_templates:
template = Template()
for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))
for herringlib in HerringFile.herringlib_paths]:
info("template directory: %s" % template_dir)
# noinspection PyArgumentEqualDefault
template.generate(template_dir, defaults, overwrite=False)
@task(namespace='project')
def update():
"""
Regenerate files (except herringfile) from current templates.
Delete the file(s) you want to update, then run this task.
"""
if Project.use_templates:
defaults = _project_defaults()
template = Template()
for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))
for herringlib in HerringFile.herringlib_paths]:
info("template directory: %s" % template_dir)
# noinspection PyArgumentEqualDefault
template.generate(template_dir, defaults, overwrite=False)
@task(namespace='project', configured='optional')
def show():
"""Show all project settings"""
info(str(Project))
@task(namespace='project', configured='optional')
def | ():
"""Show all project settings with descriptions"""
keys = Project.__dict__.keys()
for key in sorted(keys):
value = Project.__dict__[key]
if key in ATTRIBUTES:
attrs = ATTRIBUTES[key]
required = False
if 'required' in attrs:
if attrs['required']:
required = True
if 'help' in attrs:
info("# {key}".format(key=key))
if required:
info("# REQUIRED")
for line in textwrap.wrap(attrs['help'], width=100):
info("# {line}".format(line=line))
info("# '{key}': '{value}'".format(key=key, value=value))
info('')
else:
info("'{key}': '{value}'".format(key=key, value=value))
def _pip_list():
names = []
# noinspection PyBroadException
try:
# idiotic python setup tools creates empty egg directory in project that then causes pip to blow up.
# Wonderful python tools in action!
# so lets remove the stupid egg directory so we can use pip to get a listing of installed packages.
egg_info_dir = "{name}.egg-info".format(name=Project.name)
if os.path.exists(egg_info_dir):
shutil.rmtree(egg_info_dir)
with LocalShell() as local:
# if 'VIRTUAL_ENV' in os.environ:
# pip = os.path.join(os.environ['VIRTUAL_ENV'], 'bin', 'pip')
# info("PATH={path}".format(path=os.environ['PATH']))
# info(pip)
pip = local.system('which pip || which pip3', verbose=False).strip()
# info(pip)
# info("pip version: {ver}".format(ver=local.system('{pip} --version'.format(pip=pip))))
pip_list_output = local.run('{pip} list'.format(pip=pip))
# info(pip_list_output)
lines = pip_list_output.split("\n")
names = [line.split(" ")[0].lower().encode('ascii', 'ignore') for line in lines if line.strip()]
except Exception:
pass
return names
# noinspection PyArgumentEqualDefault
__pip_list = [pkg.decode('utf-8') for pkg in _pip_list()]
def packages_required(package_names):
"""
Check that the given packages are installed.
:param package_names: the package names
:type package_names: list
:return: asserted if all the packages are installed
:rtype: bool
"""
# info("packages_required(%s)" % repr(package_names))
# noinspection PyBroadException
try:
result = True
# info(package_names)
# info(__pip_list)
for requirement in [Requirement(name) for name in package_names]:
if requirement.supported_python():
pkg_name = requirement.package
if pkg_name.lower() not in __pip_list:
try:
# info('__import__("{name}")'.format(name=pkg_name))
__import__(pkg_name)
except ImportError:
info(pkg_name + " not installed!")
missing_modules.append(pkg_name)
result = False
return result
except Exception:
return False
@task(configured='optional')
def show_missing():
"""Show modules that if installed would enable additional tasks."""
if missing_modules:
info("The following modules are currently not installed and would enable additional tasks:")
for pkg_name in missing_modules:
info(' ' + pkg_name)
# noinspection PyArgumentEqualDefault
@task(namespace='project', private=False)
def check_requirements():
""" Checks that herringfile and herringlib/* required packages are in requirements.txt file """
debug("check_requirements")
needed = Requirements(Project).find_missing_requirements()
if needed:
info("Please add the following to your %s file:\n" % 'requirements.txt')
info("\n".join(str(needed)))
else:
info("Your %s includes all known herringlib task requirements" % 'requirements.txt')
@task(namespace='project', configured='required')
def environment():
""" Display project environment """
venvs = VirtualenvInfo('python_versions')
site_packages_cmdline = "python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())'"
project_env = {}
if not venvs.in_virtualenv and venvs.defined:
for venv_info in venvs.infos():
site_packages = venv_info.run(site_packages_cmdline).strip().splitlines()[2]
project_env[venv_info.venv + ': site-packages'] = site_packages
else:
with LocalShell() as local:
site_packages = local.system(site_packages_cmdline).strip()
project_env['site-packages'] = site_packages
info(pformat(project_env))
return project_env
| describe | identifier_name |
project_tasks.py | # coding=utf-8
"""
Project tasks
Add the following to your *requirements.txt* file:
* docutils!=0.14rc1; python_version == "[python_versions]"
"""
import ast
import os
from pprint import pformat
import shutil
import textwrap
# noinspection PyUnresolvedReferences
from herringlib.prompt import prompt
# noinspection PyUnresolvedReferences
from herringlib.template import Template
# noinspection PyUnresolvedReferences
from herringlib.venv import VirtualenvInfo
try:
# python3
# noinspection PyUnresolvedReferences,PyCompatibility
from configparser import ConfigParser, NoSectionError
except ImportError:
# python2
# noinspection PyUnresolvedReferences,PyCompatibility
from ConfigParser import ConfigParser, NoSectionError
# noinspection PyUnresolvedReferences
from herring.herring_app import task, HerringFile
# noinspection PyUnresolvedReferences
from herringlib.simple_logger import info, debug
# noinspection PyUnresolvedReferences
from herringlib.local_shell import LocalShell
# noinspection PyUnresolvedReferences
from herringlib.requirements import Requirements, Requirement
# noinspection PyUnresolvedReferences
from herringlib.project_settings import Project, ATTRIBUTES
missing_modules = []
def value_from_setup_py(arg_name):
"""
Use AST to find the name value in the setup() call in setup.py.
Only works for key=string arguments to setup().
:param arg_name: the keyword argument name passed to the setup() call in setup.py.
:type arg_name: str
:returns: the name value or None
:rtype: str|None
"""
setup_py = 'setup.py'
if os.path.isfile(setup_py):
# scan setup.py for a call to 'setup'.
tree = ast.parse(''.join(open(setup_py)))
call_nodes = [node.value for node in tree.body if type(node) == ast.Expr and type(node.value) == ast.Call]
# noinspection PyShadowingNames
def is_setup(call_node):
try:
return call_node.func.id == 'setup'
except AttributeError as ex:
try:
return call_node.func.value.id == 'setup'
except AttributeError as ex:
print(str(ex))
print(ast.dump(call_node.func))
print(ast.dump(call_node))
keywords = [call_node.keywords for call_node in call_nodes if is_setup(call_node)]
# now setup() takes keyword arguments so scan them looking for key that matches the given arg_name,
# then return the keyword's value
for keyword in keywords:
for keyword_arg in keyword:
if keyword_arg.arg == arg_name:
if hasattr(keyword_arg.value, 's'):
return keyword_arg.value.s
# didn't find it
return None
def _project_defaults():
"""
Get the project defaults from (in order of preference):
* setup.py,
* kwargs,
* herring config file,
* environment variables,
* default values.
:return: dictionary of defaults
:rtype: dict[str,str]
"""
defaults = {
'package': os.path.basename(os.path.abspath(os.curdir)),
'name': os.path.basename(os.path.abspath(os.curdir)).capitalize(),
'description': 'The greatest project there ever was or will be!',
'author': 'author',
'title': os.path.basename(os.path.abspath(os.curdir)).capitalize(),
}
if 'USER' in os.environ:
defaults['author'] = os.environ['USER']
defaults['author_email'] = '{author}@example.com'.format(author=defaults['author'])
# override defaults from herringfile
# for key in ['name', 'author', 'author_email', 'description']:
# attributes = Project.attributes()
# for key in [key for key in attributes.keys() if attributes[key] is not None]:
# # noinspection PyBroadException
# try:
# value = getattr(Project, key, None)
# if value is not None:
# defaults[key] = value
# except:
# pass
# override defaults from any config files
settings = HerringFile.settings
if settings is not None:
config = ConfigParser()
config.read(settings.config_files)
for section in ['project']:
try:
defaults.update(dict(config.items(section)))
except NoSectionError:
pass
# override defaults from kwargs
for key in task.kwargs:
defaults[key] = task.kwargs[key]
# override defaults from setup.py
for key in ['name', 'author', 'author_email', 'description']:
value = value_from_setup_py(key)
if value is not None:
defaults[key] = value
# now add any attributes that are not already in defaults
for key in ATTRIBUTES:
if key not in defaults:
value = getattr(Project, key, None)
if value is not None:
defaults[key] = value
else:
print("{key}:None".format(key=key))
return defaults
@task(namespace='project', help='Available options: --name, --package, --author, --author_email, --description',
kwargs=['name', 'package', 'author', 'author_email', 'description'], configured='no')
def init():
"""
Initialize a new python project with default files. Default values from herring.conf and directory name.
"""
defaults = _project_defaults()
if Project.prompt:
defaults['name'] = prompt("Enter the project's name:", defaults['name'])
defaults['package'] = prompt("Enter the project's package:", defaults['package'])
defaults['author'] = prompt("Enter the project's author:", defaults['author'])
defaults['author_email'] = prompt("Enter the project's author's email:", defaults['author_email'])
defaults['description'] = prompt("Enter the project's description:", defaults['description'])
# print("defaults:\n{defaults}".format(defaults=pformat(defaults)))
if Project.use_templates:
template = Template()
for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))
for herringlib in HerringFile.herringlib_paths]:
info("template directory: %s" % template_dir)
# noinspection PyArgumentEqualDefault
template.generate(template_dir, defaults, overwrite=False)
@task(namespace='project')
def update():
"""
Regenerate files (except herringfile) from current templates.
Delete the file(s) you want to update, then run this task.
"""
if Project.use_templates:
defaults = _project_defaults()
template = Template()
for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))
for herringlib in HerringFile.herringlib_paths]:
info("template directory: %s" % template_dir)
# noinspection PyArgumentEqualDefault
template.generate(template_dir, defaults, overwrite=False)
@task(namespace='project', configured='optional')
def show():
|
@task(namespace='project', configured='optional')
def describe():
"""Show all project settings with descriptions"""
keys = Project.__dict__.keys()
for key in sorted(keys):
value = Project.__dict__[key]
if key in ATTRIBUTES:
attrs = ATTRIBUTES[key]
required = False
if 'required' in attrs:
if attrs['required']:
required = True
if 'help' in attrs:
info("# {key}".format(key=key))
if required:
info("# REQUIRED")
for line in textwrap.wrap(attrs['help'], width=100):
info("# {line}".format(line=line))
info("# '{key}': '{value}'".format(key=key, value=value))
info('')
else:
info("'{key}': '{value}'".format(key=key, value=value))
def _pip_list():
names = []
# noinspection PyBroadException
try:
# idiotic python setup tools creates empty egg directory in project that then causes pip to blow up.
# Wonderful python tools in action!
# so lets remove the stupid egg directory so we can use pip to get a listing of installed packages.
egg_info_dir = "{name}.egg-info".format(name=Project.name)
if os.path.exists(egg_info_dir):
shutil.rmtree(egg_info_dir)
with LocalShell() as local:
# if 'VIRTUAL_ENV' in os.environ:
# pip = os.path.join(os.environ['VIRTUAL_ENV'], 'bin', 'pip')
# info("PATH={path}".format(path=os.environ['PATH']))
# info(pip)
pip = local.system('which pip || which pip3', verbose=False).strip()
# info(pip)
# info("pip version: {ver}".format(ver=local.system('{pip} --version'.format(pip=pip))))
pip_list_output = local.run('{pip} list'.format(pip=pip))
# info(pip_list_output)
lines = pip_list_output.split("\n")
names = [line.split(" ")[0].lower().encode('ascii', 'ignore') for line in lines if line.strip()]
except Exception:
pass
return names
# noinspection PyArgumentEqualDefault
__pip_list = [pkg.decode('utf-8') for pkg in _pip_list()]
def packages_required(package_names):
"""
Check that the given packages are installed.
:param package_names: the package names
:type package_names: list
:return: asserted if all the packages are installed
:rtype: bool
"""
# info("packages_required(%s)" % repr(package_names))
# noinspection PyBroadException
try:
result = True
# info(package_names)
# info(__pip_list)
for requirement in [Requirement(name) for name in package_names]:
if requirement.supported_python():
pkg_name = requirement.package
if pkg_name.lower() not in __pip_list:
try:
# info('__import__("{name}")'.format(name=pkg_name))
__import__(pkg_name)
except ImportError:
info(pkg_name + " not installed!")
missing_modules.append(pkg_name)
result = False
return result
except Exception:
return False
@task(configured='optional')
def show_missing():
"""Show modules that if installed would enable additional tasks."""
if missing_modules:
info("The following modules are currently not installed and would enable additional tasks:")
for pkg_name in missing_modules:
info(' ' + pkg_name)
# noinspection PyArgumentEqualDefault
@task(namespace='project', private=False)
def check_requirements():
""" Checks that herringfile and herringlib/* required packages are in requirements.txt file """
debug("check_requirements")
needed = Requirements(Project).find_missing_requirements()
if needed:
info("Please add the following to your %s file:\n" % 'requirements.txt')
info("\n".join(str(needed)))
else:
info("Your %s includes all known herringlib task requirements" % 'requirements.txt')
@task(namespace='project', configured='required')
def environment():
""" Display project environment """
venvs = VirtualenvInfo('python_versions')
site_packages_cmdline = "python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())'"
project_env = {}
if not venvs.in_virtualenv and venvs.defined:
for venv_info in venvs.infos():
site_packages = venv_info.run(site_packages_cmdline).strip().splitlines()[2]
project_env[venv_info.venv + ': site-packages'] = site_packages
else:
with LocalShell() as local:
site_packages = local.system(site_packages_cmdline).strip()
project_env['site-packages'] = site_packages
info(pformat(project_env))
return project_env
| """Show all project settings"""
info(str(Project)) | identifier_body |
project_tasks.py | # coding=utf-8
"""
Project tasks
Add the following to your *requirements.txt* file:
* docutils!=0.14rc1; python_version == "[python_versions]"
"""
import ast
import os
from pprint import pformat
import shutil
import textwrap
# noinspection PyUnresolvedReferences
from herringlib.prompt import prompt
# noinspection PyUnresolvedReferences
from herringlib.template import Template
# noinspection PyUnresolvedReferences
from herringlib.venv import VirtualenvInfo
try:
# python3
# noinspection PyUnresolvedReferences,PyCompatibility
from configparser import ConfigParser, NoSectionError
except ImportError:
# python2
# noinspection PyUnresolvedReferences,PyCompatibility
from ConfigParser import ConfigParser, NoSectionError
# noinspection PyUnresolvedReferences
from herring.herring_app import task, HerringFile
# noinspection PyUnresolvedReferences
from herringlib.simple_logger import info, debug
# noinspection PyUnresolvedReferences
from herringlib.local_shell import LocalShell
# noinspection PyUnresolvedReferences
from herringlib.requirements import Requirements, Requirement
# noinspection PyUnresolvedReferences
from herringlib.project_settings import Project, ATTRIBUTES
missing_modules = []
def value_from_setup_py(arg_name):
"""
Use AST to find the name value in the setup() call in setup.py.
Only works for key=string arguments to setup().
:param arg_name: the keyword argument name passed to the setup() call in setup.py.
:type arg_name: str
:returns: the name value or None
:rtype: str|None
"""
setup_py = 'setup.py'
if os.path.isfile(setup_py):
# scan setup.py for a call to 'setup'.
tree = ast.parse(''.join(open(setup_py)))
call_nodes = [node.value for node in tree.body if type(node) == ast.Expr and type(node.value) == ast.Call]
# noinspection PyShadowingNames
def is_setup(call_node):
try:
return call_node.func.id == 'setup'
except AttributeError as ex:
try:
return call_node.func.value.id == 'setup'
except AttributeError as ex:
print(str(ex))
print(ast.dump(call_node.func))
print(ast.dump(call_node))
keywords = [call_node.keywords for call_node in call_nodes if is_setup(call_node)]
# now setup() takes keyword arguments so scan them looking for key that matches the given arg_name,
# then return the keyword's value
for keyword in keywords:
for keyword_arg in keyword:
if keyword_arg.arg == arg_name:
if hasattr(keyword_arg.value, 's'):
return keyword_arg.value.s
# didn't find it
return None
def _project_defaults():
"""
Get the project defaults from (in order of preference):
* setup.py,
* kwargs,
* herring config file,
* environment variables,
* default values.
:return: dictionary of defaults
:rtype: dict[str,str]
"""
defaults = {
'package': os.path.basename(os.path.abspath(os.curdir)),
'name': os.path.basename(os.path.abspath(os.curdir)).capitalize(),
'description': 'The greatest project there ever was or will be!',
'author': 'author',
'title': os.path.basename(os.path.abspath(os.curdir)).capitalize(),
}
if 'USER' in os.environ:
defaults['author'] = os.environ['USER']
defaults['author_email'] = '{author}@example.com'.format(author=defaults['author'])
# override defaults from herringfile
# for key in ['name', 'author', 'author_email', 'description']:
# attributes = Project.attributes()
# for key in [key for key in attributes.keys() if attributes[key] is not None]:
# # noinspection PyBroadException
# try:
# value = getattr(Project, key, None)
# if value is not None:
# defaults[key] = value
# except:
# pass
# override defaults from any config files
settings = HerringFile.settings
if settings is not None:
config = ConfigParser()
config.read(settings.config_files)
for section in ['project']:
try:
defaults.update(dict(config.items(section)))
except NoSectionError:
pass
# override defaults from kwargs
for key in task.kwargs:
defaults[key] = task.kwargs[key]
# override defaults from setup.py
for key in ['name', 'author', 'author_email', 'description']:
value = value_from_setup_py(key)
if value is not None:
defaults[key] = value
# now add any attributes that are not already in defaults
for key in ATTRIBUTES:
if key not in defaults:
value = getattr(Project, key, None)
if value is not None:
defaults[key] = value
else:
print("{key}:None".format(key=key))
return defaults
@task(namespace='project', help='Available options: --name, --package, --author, --author_email, --description',
kwargs=['name', 'package', 'author', 'author_email', 'description'], configured='no')
def init():
"""
Initialize a new python project with default files. Default values from herring.conf and directory name.
"""
defaults = _project_defaults()
if Project.prompt:
defaults['name'] = prompt("Enter the project's name:", defaults['name'])
defaults['package'] = prompt("Enter the project's package:", defaults['package'])
defaults['author'] = prompt("Enter the project's author:", defaults['author'])
defaults['author_email'] = prompt("Enter the project's author's email:", defaults['author_email'])
defaults['description'] = prompt("Enter the project's description:", defaults['description'])
# print("defaults:\n{defaults}".format(defaults=pformat(defaults)))
if Project.use_templates:
template = Template()
for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))
for herringlib in HerringFile.herringlib_paths]:
info("template directory: %s" % template_dir)
# noinspection PyArgumentEqualDefault
template.generate(template_dir, defaults, overwrite=False)
@task(namespace='project')
def update():
"""
Regenerate files (except herringfile) from current templates.
Delete the file(s) you want to update, then run this task.
"""
if Project.use_templates:
|
@task(namespace='project', configured='optional')
def show():
"""Show all project settings"""
info(str(Project))
@task(namespace='project', configured='optional')
def describe():
"""Show all project settings with descriptions"""
keys = Project.__dict__.keys()
for key in sorted(keys):
value = Project.__dict__[key]
if key in ATTRIBUTES:
attrs = ATTRIBUTES[key]
required = False
if 'required' in attrs:
if attrs['required']:
required = True
if 'help' in attrs:
info("# {key}".format(key=key))
if required:
info("# REQUIRED")
for line in textwrap.wrap(attrs['help'], width=100):
info("# {line}".format(line=line))
info("# '{key}': '{value}'".format(key=key, value=value))
info('')
else:
info("'{key}': '{value}'".format(key=key, value=value))
def _pip_list():
names = []
# noinspection PyBroadException
try:
# idiotic python setup tools creates empty egg directory in project that then causes pip to blow up.
# Wonderful python tools in action!
# so lets remove the stupid egg directory so we can use pip to get a listing of installed packages.
egg_info_dir = "{name}.egg-info".format(name=Project.name)
if os.path.exists(egg_info_dir):
shutil.rmtree(egg_info_dir)
with LocalShell() as local:
# if 'VIRTUAL_ENV' in os.environ:
# pip = os.path.join(os.environ['VIRTUAL_ENV'], 'bin', 'pip')
# info("PATH={path}".format(path=os.environ['PATH']))
# info(pip)
pip = local.system('which pip || which pip3', verbose=False).strip()
# info(pip)
# info("pip version: {ver}".format(ver=local.system('{pip} --version'.format(pip=pip))))
pip_list_output = local.run('{pip} list'.format(pip=pip))
# info(pip_list_output)
lines = pip_list_output.split("\n")
names = [line.split(" ")[0].lower().encode('ascii', 'ignore') for line in lines if line.strip()]
except Exception:
pass
return names
# noinspection PyArgumentEqualDefault
__pip_list = [pkg.decode('utf-8') for pkg in _pip_list()]
def packages_required(package_names):
"""
Check that the given packages are installed.
:param package_names: the package names
:type package_names: list
:return: asserted if all the packages are installed
:rtype: bool
"""
# info("packages_required(%s)" % repr(package_names))
# noinspection PyBroadException
try:
result = True
# info(package_names)
# info(__pip_list)
for requirement in [Requirement(name) for name in package_names]:
if requirement.supported_python():
pkg_name = requirement.package
if pkg_name.lower() not in __pip_list:
try:
# info('__import__("{name}")'.format(name=pkg_name))
__import__(pkg_name)
except ImportError:
info(pkg_name + " not installed!")
missing_modules.append(pkg_name)
result = False
return result
except Exception:
return False
@task(configured='optional')
def show_missing():
"""Show modules that if installed would enable additional tasks."""
if missing_modules:
info("The following modules are currently not installed and would enable additional tasks:")
for pkg_name in missing_modules:
info(' ' + pkg_name)
# noinspection PyArgumentEqualDefault
@task(namespace='project', private=False)
def check_requirements():
""" Checks that herringfile and herringlib/* required packages are in requirements.txt file """
debug("check_requirements")
needed = Requirements(Project).find_missing_requirements()
if needed:
info("Please add the following to your %s file:\n" % 'requirements.txt')
info("\n".join(str(needed)))
else:
info("Your %s includes all known herringlib task requirements" % 'requirements.txt')
@task(namespace='project', configured='required')
def environment():
""" Display project environment """
venvs = VirtualenvInfo('python_versions')
site_packages_cmdline = "python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())'"
project_env = {}
if not venvs.in_virtualenv and venvs.defined:
for venv_info in venvs.infos():
site_packages = venv_info.run(site_packages_cmdline).strip().splitlines()[2]
project_env[venv_info.venv + ': site-packages'] = site_packages
else:
with LocalShell() as local:
site_packages = local.system(site_packages_cmdline).strip()
project_env['site-packages'] = site_packages
info(pformat(project_env))
return project_env
| defaults = _project_defaults()
template = Template()
for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))
for herringlib in HerringFile.herringlib_paths]:
info("template directory: %s" % template_dir)
# noinspection PyArgumentEqualDefault
template.generate(template_dir, defaults, overwrite=False) | conditional_block |
project_tasks.py | # coding=utf-8
"""
Project tasks
Add the following to your *requirements.txt* file:
* docutils!=0.14rc1; python_version == "[python_versions]"
"""
import ast
import os
from pprint import pformat
import shutil
import textwrap
# noinspection PyUnresolvedReferences
from herringlib.prompt import prompt
# noinspection PyUnresolvedReferences
from herringlib.template import Template
# noinspection PyUnresolvedReferences
from herringlib.venv import VirtualenvInfo
| except ImportError:
# python2
# noinspection PyUnresolvedReferences,PyCompatibility
from ConfigParser import ConfigParser, NoSectionError
# noinspection PyUnresolvedReferences
from herring.herring_app import task, HerringFile
# noinspection PyUnresolvedReferences
from herringlib.simple_logger import info, debug
# noinspection PyUnresolvedReferences
from herringlib.local_shell import LocalShell
# noinspection PyUnresolvedReferences
from herringlib.requirements import Requirements, Requirement
# noinspection PyUnresolvedReferences
from herringlib.project_settings import Project, ATTRIBUTES
missing_modules = []
def value_from_setup_py(arg_name):
"""
Use AST to find the name value in the setup() call in setup.py.
Only works for key=string arguments to setup().
:param arg_name: the keyword argument name passed to the setup() call in setup.py.
:type arg_name: str
:returns: the name value or None
:rtype: str|None
"""
setup_py = 'setup.py'
if os.path.isfile(setup_py):
# scan setup.py for a call to 'setup'.
tree = ast.parse(''.join(open(setup_py)))
call_nodes = [node.value for node in tree.body if type(node) == ast.Expr and type(node.value) == ast.Call]
# noinspection PyShadowingNames
def is_setup(call_node):
try:
return call_node.func.id == 'setup'
except AttributeError as ex:
try:
return call_node.func.value.id == 'setup'
except AttributeError as ex:
print(str(ex))
print(ast.dump(call_node.func))
print(ast.dump(call_node))
keywords = [call_node.keywords for call_node in call_nodes if is_setup(call_node)]
# now setup() takes keyword arguments so scan them looking for key that matches the given arg_name,
# then return the keyword's value
for keyword in keywords:
for keyword_arg in keyword:
if keyword_arg.arg == arg_name:
if hasattr(keyword_arg.value, 's'):
return keyword_arg.value.s
# didn't find it
return None
def _project_defaults():
"""
Get the project defaults from (in order of preference):
* setup.py,
* kwargs,
* herring config file,
* environment variables,
* default values.
:return: dictionary of defaults
:rtype: dict[str,str]
"""
defaults = {
'package': os.path.basename(os.path.abspath(os.curdir)),
'name': os.path.basename(os.path.abspath(os.curdir)).capitalize(),
'description': 'The greatest project there ever was or will be!',
'author': 'author',
'title': os.path.basename(os.path.abspath(os.curdir)).capitalize(),
}
if 'USER' in os.environ:
defaults['author'] = os.environ['USER']
defaults['author_email'] = '{author}@example.com'.format(author=defaults['author'])
# override defaults from herringfile
# for key in ['name', 'author', 'author_email', 'description']:
# attributes = Project.attributes()
# for key in [key for key in attributes.keys() if attributes[key] is not None]:
# # noinspection PyBroadException
# try:
# value = getattr(Project, key, None)
# if value is not None:
# defaults[key] = value
# except:
# pass
# override defaults from any config files
settings = HerringFile.settings
if settings is not None:
config = ConfigParser()
config.read(settings.config_files)
for section in ['project']:
try:
defaults.update(dict(config.items(section)))
except NoSectionError:
pass
# override defaults from kwargs
for key in task.kwargs:
defaults[key] = task.kwargs[key]
# override defaults from setup.py
for key in ['name', 'author', 'author_email', 'description']:
value = value_from_setup_py(key)
if value is not None:
defaults[key] = value
# now add any attributes that are not already in defaults
for key in ATTRIBUTES:
if key not in defaults:
value = getattr(Project, key, None)
if value is not None:
defaults[key] = value
else:
print("{key}:None".format(key=key))
return defaults
@task(namespace='project', help='Available options: --name, --package, --author, --author_email, --description',
kwargs=['name', 'package', 'author', 'author_email', 'description'], configured='no')
def init():
"""
Initialize a new python project with default files. Default values from herring.conf and directory name.
"""
defaults = _project_defaults()
if Project.prompt:
defaults['name'] = prompt("Enter the project's name:", defaults['name'])
defaults['package'] = prompt("Enter the project's package:", defaults['package'])
defaults['author'] = prompt("Enter the project's author:", defaults['author'])
defaults['author_email'] = prompt("Enter the project's author's email:", defaults['author_email'])
defaults['description'] = prompt("Enter the project's description:", defaults['description'])
# print("defaults:\n{defaults}".format(defaults=pformat(defaults)))
if Project.use_templates:
template = Template()
for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))
for herringlib in HerringFile.herringlib_paths]:
info("template directory: %s" % template_dir)
# noinspection PyArgumentEqualDefault
template.generate(template_dir, defaults, overwrite=False)
@task(namespace='project')
def update():
"""
Regenerate files (except herringfile) from current templates.
Delete the file(s) you want to update, then run this task.
"""
if Project.use_templates:
defaults = _project_defaults()
template = Template()
for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))
for herringlib in HerringFile.herringlib_paths]:
info("template directory: %s" % template_dir)
# noinspection PyArgumentEqualDefault
template.generate(template_dir, defaults, overwrite=False)
@task(namespace='project', configured='optional')
def show():
"""Show all project settings"""
info(str(Project))
@task(namespace='project', configured='optional')
def describe():
"""Show all project settings with descriptions"""
keys = Project.__dict__.keys()
for key in sorted(keys):
value = Project.__dict__[key]
if key in ATTRIBUTES:
attrs = ATTRIBUTES[key]
required = False
if 'required' in attrs:
if attrs['required']:
required = True
if 'help' in attrs:
info("# {key}".format(key=key))
if required:
info("# REQUIRED")
for line in textwrap.wrap(attrs['help'], width=100):
info("# {line}".format(line=line))
info("# '{key}': '{value}'".format(key=key, value=value))
info('')
else:
info("'{key}': '{value}'".format(key=key, value=value))
def _pip_list():
names = []
# noinspection PyBroadException
try:
# idiotic python setup tools creates empty egg directory in project that then causes pip to blow up.
# Wonderful python tools in action!
# so lets remove the stupid egg directory so we can use pip to get a listing of installed packages.
egg_info_dir = "{name}.egg-info".format(name=Project.name)
if os.path.exists(egg_info_dir):
shutil.rmtree(egg_info_dir)
with LocalShell() as local:
# if 'VIRTUAL_ENV' in os.environ:
# pip = os.path.join(os.environ['VIRTUAL_ENV'], 'bin', 'pip')
# info("PATH={path}".format(path=os.environ['PATH']))
# info(pip)
pip = local.system('which pip || which pip3', verbose=False).strip()
# info(pip)
# info("pip version: {ver}".format(ver=local.system('{pip} --version'.format(pip=pip))))
pip_list_output = local.run('{pip} list'.format(pip=pip))
# info(pip_list_output)
lines = pip_list_output.split("\n")
names = [line.split(" ")[0].lower().encode('ascii', 'ignore') for line in lines if line.strip()]
except Exception:
pass
return names
# noinspection PyArgumentEqualDefault
__pip_list = [pkg.decode('utf-8') for pkg in _pip_list()]
def packages_required(package_names):
"""
Check that the given packages are installed.
:param package_names: the package names
:type package_names: list
:return: asserted if all the packages are installed
:rtype: bool
"""
# info("packages_required(%s)" % repr(package_names))
# noinspection PyBroadException
try:
result = True
# info(package_names)
# info(__pip_list)
for requirement in [Requirement(name) for name in package_names]:
if requirement.supported_python():
pkg_name = requirement.package
if pkg_name.lower() not in __pip_list:
try:
# info('__import__("{name}")'.format(name=pkg_name))
__import__(pkg_name)
except ImportError:
info(pkg_name + " not installed!")
missing_modules.append(pkg_name)
result = False
return result
except Exception:
return False
@task(configured='optional')
def show_missing():
"""Show modules that if installed would enable additional tasks."""
if missing_modules:
info("The following modules are currently not installed and would enable additional tasks:")
for pkg_name in missing_modules:
info(' ' + pkg_name)
# noinspection PyArgumentEqualDefault
@task(namespace='project', private=False)
def check_requirements():
""" Checks that herringfile and herringlib/* required packages are in requirements.txt file """
debug("check_requirements")
needed = Requirements(Project).find_missing_requirements()
if needed:
info("Please add the following to your %s file:\n" % 'requirements.txt')
info("\n".join(str(needed)))
else:
info("Your %s includes all known herringlib task requirements" % 'requirements.txt')
@task(namespace='project', configured='required')
def environment():
""" Display project environment """
venvs = VirtualenvInfo('python_versions')
site_packages_cmdline = "python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())'"
project_env = {}
if not venvs.in_virtualenv and venvs.defined:
for venv_info in venvs.infos():
site_packages = venv_info.run(site_packages_cmdline).strip().splitlines()[2]
project_env[venv_info.venv + ': site-packages'] = site_packages
else:
with LocalShell() as local:
site_packages = local.system(site_packages_cmdline).strip()
project_env['site-packages'] = site_packages
info(pformat(project_env))
return project_env | try:
# python3
# noinspection PyUnresolvedReferences,PyCompatibility
from configparser import ConfigParser, NoSectionError | random_line_split |
container.go | // Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dockerutil
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
"github.com/moby/moby/client"
"github.com/moby/moby/pkg/stdcopy"
"gvisor.dev/gvisor/pkg/test/testutil"
)
// Container represents a Docker Container allowing
// user to configure and control as one would with the 'docker'
// client. Container is backed by the offical golang docker API.
// See: https://pkg.go.dev/github.com/docker/docker.
type Container struct {
Name string
runtime string
logger testutil.Logger
client *client.Client
id string
mounts []mount.Mount
links []string
copyErr error
cleanups []func()
// profile is the profiling hook associated with this container.
profile *profile
}
// RunOpts are options for running a container.
type RunOpts struct {
// Image is the image relative to images/. This will be mangled
// appropriately, to ensure that only first-party images are used.
Image string
// Memory is the memory limit in bytes.
Memory int
// Cpus in which to allow execution. ("0", "1", "0-2").
CpusetCpus string
// Ports are the ports to be allocated.
Ports []int
// WorkDir sets the working directory.
WorkDir string
// ReadOnly sets the read-only flag.
ReadOnly bool
// Env are additional environment variables.
Env []string
// User is the user to use.
User string
// Privileged enables privileged mode.
Privileged bool
// Sets network mode for the container. See container.NetworkMode for types. Several options will
// not work w/ gVisor. For example, you can't set the "sandbox" network option for gVisor using
// this handle.
NetworkMode string
// CapAdd are the extra set of capabilities to add.
CapAdd []string
// CapDrop are the extra set of capabilities to drop.
CapDrop []string
// Mounts is the list of directories/files to be mounted inside the container.
Mounts []mount.Mount
// Links is the list of containers to be connected to the container.
Links []string
}
func makeContainer(ctx context.Context, logger testutil.Logger, runtime string) *Container {
// Slashes are not allowed in container names.
name := testutil.RandomID(logger.Name())
name = strings.ReplaceAll(name, "/", "-")
client, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return nil
}
client.NegotiateAPIVersion(ctx)
return &Container{
logger: logger,
Name: name,
runtime: runtime,
client: client,
}
}
// MakeContainer constructs a suitable Container object.
//
// The runtime used is determined by the runtime flag.
//
// Containers will check flags for profiling requests.
func MakeContainer(ctx context.Context, logger testutil.Logger) *Container {
return makeContainer(ctx, logger, *runtime)
}
// MakeContainerWithRuntime is like MakeContainer, but allows for a runtime
// to be specified by suffix.
func MakeContainerWithRuntime(ctx context.Context, logger testutil.Logger, suffix string) *Container {
return makeContainer(ctx, logger, *runtime+suffix)
}
// MakeNativeContainer constructs a suitable Container object.
//
// The runtime used will be the system default.
//
// Native containers aren't profiled.
func MakeNativeContainer(ctx context.Context, logger testutil.Logger) *Container {
unsandboxedRuntime := "runc"
if override, found := os.LookupEnv("UNSANDBOXED_RUNTIME"); found {
unsandboxedRuntime = override
}
return makeContainer(ctx, logger, unsandboxedRuntime)
}
// Spawn is analogous to 'docker run -d'.
func (c *Container) Spawn(ctx context.Context, r RunOpts, args ...string) error {
if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
return err
}
return c.Start(ctx)
}
// SpawnProcess is analogous to 'docker run -it'. It returns a process
// which represents the root process.
func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string) (Process, error) {
config, hostconf, netconf := c.ConfigsFrom(r, args...)
config.Tty = true
config.OpenStdin = true
if err := c.CreateFrom(ctx, r.Image, config, hostconf, netconf); err != nil {
return Process{}, err
}
// Open a connection to the container for parsing logs and for TTY.
stream, err := c.client.ContainerAttach(ctx, c.id,
types.ContainerAttachOptions{
Stream: true,
Stdin: true,
Stdout: true,
Stderr: true,
})
if err != nil {
return Process{}, fmt.Errorf("connect failed container id %s: %v", c.id, err)
}
c.cleanups = append(c.cleanups, func() { stream.Close() })
if err := c.Start(ctx); err != nil {
return Process{}, err
}
return Process{container: c, conn: stream}, nil
}
// Run is analogous to 'docker run'.
func (c *Container) Run(ctx context.Context, r RunOpts, args ...string) (string, error) {
if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
return "", err
}
if err := c.Start(ctx); err != nil {
return "", err
}
if err := c.Wait(ctx); err != nil {
return "", err
}
return c.Logs(ctx)
}
// ConfigsFrom returns container configs from RunOpts and args. The caller should call 'CreateFrom'
// and Start.
func (c *Container) ConfigsFrom(r RunOpts, args ...string) (*container.Config, *container.HostConfig, *network.NetworkingConfig) {
return c.config(r, args), c.hostConfig(r), &network.NetworkingConfig{}
}
// MakeLink formats a link to add to a RunOpts.
func (c *Container) MakeLink(target string) string {
return fmt.Sprintf("%s:%s", c.Name, target)
}
// CreateFrom creates a container from the given configs.
func (c *Container) CreateFrom(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
return c.create(ctx, profileImage, conf, hostconf, netconf)
}
// Create is analogous to 'docker create'.
func (c *Container) Create(ctx context.Context, r RunOpts, args ...string) error {
return c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil)
}
func (c *Container) create(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
if c.runtime != "" && c.runtime != "runc" {
// Use the image name as provided here; which normally represents the
// unmodified "basic/alpine" image name. This should be easy to grok.
c.profileInit(profileImage)
}
cont, err := c.client.ContainerCreate(ctx, conf, hostconf, nil, nil, c.Name)
if err != nil {
return err
}
c.id = cont.ID
return nil
}
func (c *Container) | (r RunOpts, args []string) *container.Config {
ports := nat.PortSet{}
for _, p := range r.Ports {
port := nat.Port(fmt.Sprintf("%d", p))
ports[port] = struct{}{}
}
env := append(r.Env, fmt.Sprintf("RUNSC_TEST_NAME=%s", c.Name))
return &container.Config{
Image: testutil.ImageByName(r.Image),
Cmd: args,
ExposedPorts: ports,
Env: env,
WorkingDir: r.WorkDir,
User: r.User,
}
}
func (c *Container) hostConfig(r RunOpts) *container.HostConfig {
c.mounts = append(c.mounts, r.Mounts...)
return &container.HostConfig{
Runtime: c.runtime,
Mounts: c.mounts,
PublishAllPorts: true,
Links: r.Links,
CapAdd: r.CapAdd,
CapDrop: r.CapDrop,
Privileged: r.Privileged,
ReadonlyRootfs: r.ReadOnly,
NetworkMode: container.NetworkMode(r.NetworkMode),
Resources: container.Resources{
Memory: int64(r.Memory), // In bytes.
CpusetCpus: r.CpusetCpus,
},
}
}
// Start is analogous to 'docker start'.
func (c *Container) Start(ctx context.Context) error {
if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("ContainerStart failed: %v", err)
}
if c.profile != nil {
if err := c.profile.Start(c); err != nil {
c.logger.Logf("profile.Start failed: %v", err)
}
}
return nil
}
// Stop is analogous to 'docker stop'.
func (c *Container) Stop(ctx context.Context) error {
return c.client.ContainerStop(ctx, c.id, container.StopOptions{})
}
// Pause is analogous to'docker pause'.
func (c *Container) Pause(ctx context.Context) error {
return c.client.ContainerPause(ctx, c.id)
}
// Unpause is analogous to 'docker unpause'.
func (c *Container) Unpause(ctx context.Context) error {
return c.client.ContainerUnpause(ctx, c.id)
}
// Checkpoint is analogous to 'docker checkpoint'.
func (c *Container) Checkpoint(ctx context.Context, name string) error {
return c.client.CheckpointCreate(ctx, c.Name, types.CheckpointCreateOptions{CheckpointID: name, Exit: true})
}
// Restore is analogous to 'docker start --checkname [name]'.
func (c *Container) Restore(ctx context.Context, name string) error {
return c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{CheckpointID: name})
}
// Logs is analogous 'docker logs'.
func (c *Container) Logs(ctx context.Context) (string, error) {
var out bytes.Buffer
err := c.logs(ctx, &out, &out)
return out.String(), err
}
func (c *Container) logs(ctx context.Context, stdout, stderr *bytes.Buffer) error {
opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}
writer, err := c.client.ContainerLogs(ctx, c.id, opts)
if err != nil {
return err
}
defer writer.Close()
_, err = stdcopy.StdCopy(stdout, stderr, writer)
return err
}
// ID returns the container id.
func (c *Container) ID() string {
return c.id
}
// RootDirectory returns an educated guess about the container's root directory.
func (c *Container) RootDirectory() (string, error) {
// The root directory of this container's runtime.
rootDir := fmt.Sprintf("/var/run/docker/runtime-%s/moby", c.runtime)
_, err := os.Stat(rootDir)
if err == nil {
return rootDir, nil
}
// In docker v20+, due to https://github.com/moby/moby/issues/42345 the
// rootDir seems to always be the following.
const defaultDir = "/var/run/docker/runtime-runc/moby"
_, derr := os.Stat(defaultDir)
if derr == nil {
return defaultDir, nil
}
return "", fmt.Errorf("cannot stat %q: %v or %q: %v", rootDir, err, defaultDir, derr)
}
// SandboxPid returns the container's pid.
func (c *Container) SandboxPid(ctx context.Context) (int, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return -1, err
}
return resp.ContainerJSONBase.State.Pid, nil
}
// ErrNoIP indicates that no IP address is available.
var ErrNoIP = errors.New("no IP available")
// FindIP returns the IP address of the container.
func (c *Container) FindIP(ctx context.Context, ipv6 bool) (net.IP, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return nil, err
}
var ip net.IP
if ipv6 {
ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.GlobalIPv6Address)
} else {
ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.IPAddress)
}
if ip == nil {
return net.IP{}, ErrNoIP
}
return ip, nil
}
// FindPort returns the host port that is mapped to 'sandboxPort'.
func (c *Container) FindPort(ctx context.Context, sandboxPort int) (int, error) {
desc, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return -1, fmt.Errorf("error retrieving port: %v", err)
}
format := fmt.Sprintf("%d/tcp", sandboxPort)
ports, ok := desc.NetworkSettings.Ports[nat.Port(format)]
if !ok {
return -1, fmt.Errorf("error retrieving port: %v", err)
}
port, err := strconv.Atoi(ports[0].HostPort)
if err != nil {
return -1, fmt.Errorf("error parsing port %q: %v", port, err)
}
return port, nil
}
// CopyFiles copies in and mounts the given files. They are always ReadOnly.
func (c *Container) CopyFiles(opts *RunOpts, target string, sources ...string) {
dir, err := ioutil.TempDir("", c.Name)
if err != nil {
c.copyErr = fmt.Errorf("ioutil.TempDir failed: %v", err)
return
}
c.cleanups = append(c.cleanups, func() { os.RemoveAll(dir) })
if err := os.Chmod(dir, 0755); err != nil {
c.copyErr = fmt.Errorf("os.Chmod(%q, 0755) failed: %v", dir, err)
return
}
for _, name := range sources {
src := name
if !filepath.IsAbs(src) {
src, err = testutil.FindFile(name)
if err != nil {
c.copyErr = fmt.Errorf("testutil.FindFile(%q) failed: %w", name, err)
return
}
}
dst := path.Join(dir, path.Base(name))
if err := testutil.Copy(src, dst); err != nil {
c.copyErr = fmt.Errorf("testutil.Copy(%q, %q) failed: %v", src, dst, err)
return
}
c.logger.Logf("copy: %s -> %s", src, dst)
}
opts.Mounts = append(opts.Mounts, mount.Mount{
Type: mount.TypeBind,
Source: dir,
Target: target,
ReadOnly: false,
})
}
// Stats returns a snapshot of container stats similar to `docker stats`.
func (c *Container) Stats(ctx context.Context) (*types.StatsJSON, error) {
responseBody, err := c.client.ContainerStats(ctx, c.id, false /*stream*/)
if err != nil {
return nil, fmt.Errorf("ContainerStats failed: %v", err)
}
defer responseBody.Body.Close()
var v types.StatsJSON
if err := json.NewDecoder(responseBody.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("failed to decode container stats: %v", err)
}
return &v, nil
}
// Status inspects the container returns its status.
func (c *Container) Status(ctx context.Context) (types.ContainerState, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return types.ContainerState{}, err
}
return *resp.State, err
}
// Wait waits for the container to exit.
func (c *Container) Wait(ctx context.Context) error {
defer c.stopProfiling()
statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
select {
case err := <-errChan:
return err
case res := <-statusChan:
if res.StatusCode != 0 {
var msg string
if res.Error != nil {
msg = res.Error.Message
}
return fmt.Errorf("container returned non-zero status: %d, msg: %q", res.StatusCode, msg)
}
return nil
}
}
// WaitTimeout waits for the container to exit with a timeout.
func (c *Container) WaitTimeout(ctx context.Context, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
select {
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
return fmt.Errorf("container %s timed out after %v seconds", c.Name, timeout.Seconds())
}
return nil
case err := <-errChan:
return err
case <-statusChan:
return nil
}
}
// WaitForOutput searches container logs for pattern and returns or timesout.
func (c *Container) WaitForOutput(ctx context.Context, pattern string, timeout time.Duration) (string, error) {
matches, err := c.WaitForOutputSubmatch(ctx, pattern, timeout)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("didn't find pattern %s logs", pattern)
}
return matches[0], nil
}
// WaitForOutputSubmatch searches container logs for the given
// pattern or times out. It returns any regexp submatches as well.
func (c *Container) WaitForOutputSubmatch(ctx context.Context, pattern string, timeout time.Duration) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
re := regexp.MustCompile(pattern)
for {
logs, err := c.Logs(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get logs: %v logs: %s", err, logs)
}
if matches := re.FindStringSubmatch(logs); matches != nil {
return matches, nil
}
time.Sleep(50 * time.Millisecond)
}
}
// stopProfiling stops profiling.
func (c *Container) stopProfiling() {
if c.profile != nil {
if err := c.profile.Stop(c); err != nil {
// This most likely means that the runtime for the container
// was too short to connect and actually get a profile.
c.logger.Logf("warning: profile.Stop failed: %v", err)
}
}
}
// Kill kills the container.
func (c *Container) Kill(ctx context.Context) error {
defer c.stopProfiling()
return c.client.ContainerKill(ctx, c.id, "")
}
// Remove is analogous to 'docker rm'.
func (c *Container) Remove(ctx context.Context) error {
// Remove the image.
remove := types.ContainerRemoveOptions{
RemoveVolumes: c.mounts != nil,
RemoveLinks: c.links != nil,
Force: true,
}
return c.client.ContainerRemove(ctx, c.Name, remove)
}
// CleanUp kills and deletes the container (best effort).
func (c *Container) CleanUp(ctx context.Context) {
// Execute all cleanups. We execute cleanups here to close any
// open connections to the container before closing. Open connections
// can cause Kill and Remove to hang.
for _, c := range c.cleanups {
c()
}
c.cleanups = nil
// Kill the container.
if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") {
// Just log; can't do anything here.
c.logger.Logf("error killing container %q: %v", c.Name, err)
}
// Remove the image.
if err := c.Remove(ctx); err != nil {
c.logger.Logf("error removing container %q: %v", c.Name, err)
}
// Forget all mounts.
c.mounts = nil
}
| config | identifier_name |
container.go | // Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dockerutil
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
"github.com/moby/moby/client"
"github.com/moby/moby/pkg/stdcopy"
"gvisor.dev/gvisor/pkg/test/testutil"
)
// Container represents a Docker Container allowing
// user to configure and control as one would with the 'docker'
// client. Container is backed by the offical golang docker API.
// See: https://pkg.go.dev/github.com/docker/docker.
type Container struct {
Name string
runtime string
logger testutil.Logger
client *client.Client
id string
mounts []mount.Mount
links []string
copyErr error
cleanups []func()
// profile is the profiling hook associated with this container.
profile *profile
}
// RunOpts are options for running a container.
type RunOpts struct {
// Image is the image relative to images/. This will be mangled
// appropriately, to ensure that only first-party images are used.
Image string
// Memory is the memory limit in bytes.
Memory int
// Cpus in which to allow execution. ("0", "1", "0-2").
CpusetCpus string
// Ports are the ports to be allocated.
Ports []int
// WorkDir sets the working directory.
WorkDir string
// ReadOnly sets the read-only flag.
ReadOnly bool
// Env are additional environment variables.
Env []string
// User is the user to use.
User string
// Privileged enables privileged mode.
Privileged bool
// Sets network mode for the container. See container.NetworkMode for types. Several options will
// not work w/ gVisor. For example, you can't set the "sandbox" network option for gVisor using
// this handle.
NetworkMode string
// CapAdd are the extra set of capabilities to add.
CapAdd []string
// CapDrop are the extra set of capabilities to drop.
CapDrop []string
// Mounts is the list of directories/files to be mounted inside the container.
Mounts []mount.Mount
// Links is the list of containers to be connected to the container.
Links []string
}
func makeContainer(ctx context.Context, logger testutil.Logger, runtime string) *Container {
// Slashes are not allowed in container names.
name := testutil.RandomID(logger.Name())
name = strings.ReplaceAll(name, "/", "-")
client, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return nil
}
client.NegotiateAPIVersion(ctx)
return &Container{
logger: logger,
Name: name,
runtime: runtime,
client: client,
}
}
// MakeContainer constructs a suitable Container object.
//
// The runtime used is determined by the runtime flag.
//
// Containers will check flags for profiling requests.
func MakeContainer(ctx context.Context, logger testutil.Logger) *Container {
return makeContainer(ctx, logger, *runtime)
}
// MakeContainerWithRuntime is like MakeContainer, but allows for a runtime
// to be specified by suffix.
func MakeContainerWithRuntime(ctx context.Context, logger testutil.Logger, suffix string) *Container {
return makeContainer(ctx, logger, *runtime+suffix)
}
// MakeNativeContainer constructs a suitable Container object.
//
// The runtime used will be the system default.
//
// Native containers aren't profiled. | return makeContainer(ctx, logger, unsandboxedRuntime)
}
// Spawn is analogous to 'docker run -d'.
func (c *Container) Spawn(ctx context.Context, r RunOpts, args ...string) error {
if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
return err
}
return c.Start(ctx)
}
// SpawnProcess is analogous to 'docker run -it'. It returns a process
// which represents the root process.
func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string) (Process, error) {
config, hostconf, netconf := c.ConfigsFrom(r, args...)
config.Tty = true
config.OpenStdin = true
if err := c.CreateFrom(ctx, r.Image, config, hostconf, netconf); err != nil {
return Process{}, err
}
// Open a connection to the container for parsing logs and for TTY.
stream, err := c.client.ContainerAttach(ctx, c.id,
types.ContainerAttachOptions{
Stream: true,
Stdin: true,
Stdout: true,
Stderr: true,
})
if err != nil {
return Process{}, fmt.Errorf("connect failed container id %s: %v", c.id, err)
}
c.cleanups = append(c.cleanups, func() { stream.Close() })
if err := c.Start(ctx); err != nil {
return Process{}, err
}
return Process{container: c, conn: stream}, nil
}
// Run is analogous to 'docker run'.
func (c *Container) Run(ctx context.Context, r RunOpts, args ...string) (string, error) {
if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
return "", err
}
if err := c.Start(ctx); err != nil {
return "", err
}
if err := c.Wait(ctx); err != nil {
return "", err
}
return c.Logs(ctx)
}
// ConfigsFrom returns container configs from RunOpts and args. The caller should call 'CreateFrom'
// and Start.
func (c *Container) ConfigsFrom(r RunOpts, args ...string) (*container.Config, *container.HostConfig, *network.NetworkingConfig) {
return c.config(r, args), c.hostConfig(r), &network.NetworkingConfig{}
}
// MakeLink formats a link to add to a RunOpts.
func (c *Container) MakeLink(target string) string {
return fmt.Sprintf("%s:%s", c.Name, target)
}
// CreateFrom creates a container from the given configs.
func (c *Container) CreateFrom(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
return c.create(ctx, profileImage, conf, hostconf, netconf)
}
// Create is analogous to 'docker create'.
func (c *Container) Create(ctx context.Context, r RunOpts, args ...string) error {
return c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil)
}
func (c *Container) create(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
if c.runtime != "" && c.runtime != "runc" {
// Use the image name as provided here; which normally represents the
// unmodified "basic/alpine" image name. This should be easy to grok.
c.profileInit(profileImage)
}
cont, err := c.client.ContainerCreate(ctx, conf, hostconf, nil, nil, c.Name)
if err != nil {
return err
}
c.id = cont.ID
return nil
}
func (c *Container) config(r RunOpts, args []string) *container.Config {
ports := nat.PortSet{}
for _, p := range r.Ports {
port := nat.Port(fmt.Sprintf("%d", p))
ports[port] = struct{}{}
}
env := append(r.Env, fmt.Sprintf("RUNSC_TEST_NAME=%s", c.Name))
return &container.Config{
Image: testutil.ImageByName(r.Image),
Cmd: args,
ExposedPorts: ports,
Env: env,
WorkingDir: r.WorkDir,
User: r.User,
}
}
func (c *Container) hostConfig(r RunOpts) *container.HostConfig {
c.mounts = append(c.mounts, r.Mounts...)
return &container.HostConfig{
Runtime: c.runtime,
Mounts: c.mounts,
PublishAllPorts: true,
Links: r.Links,
CapAdd: r.CapAdd,
CapDrop: r.CapDrop,
Privileged: r.Privileged,
ReadonlyRootfs: r.ReadOnly,
NetworkMode: container.NetworkMode(r.NetworkMode),
Resources: container.Resources{
Memory: int64(r.Memory), // In bytes.
CpusetCpus: r.CpusetCpus,
},
}
}
// Start is analogous to 'docker start'.
func (c *Container) Start(ctx context.Context) error {
if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("ContainerStart failed: %v", err)
}
if c.profile != nil {
if err := c.profile.Start(c); err != nil {
c.logger.Logf("profile.Start failed: %v", err)
}
}
return nil
}
// Stop is analogous to 'docker stop'.
func (c *Container) Stop(ctx context.Context) error {
return c.client.ContainerStop(ctx, c.id, container.StopOptions{})
}
// Pause is analogous to'docker pause'.
func (c *Container) Pause(ctx context.Context) error {
return c.client.ContainerPause(ctx, c.id)
}
// Unpause is analogous to 'docker unpause'.
func (c *Container) Unpause(ctx context.Context) error {
return c.client.ContainerUnpause(ctx, c.id)
}
// Checkpoint is analogous to 'docker checkpoint'.
func (c *Container) Checkpoint(ctx context.Context, name string) error {
return c.client.CheckpointCreate(ctx, c.Name, types.CheckpointCreateOptions{CheckpointID: name, Exit: true})
}
// Restore is analogous to 'docker start --checkname [name]'.
func (c *Container) Restore(ctx context.Context, name string) error {
return c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{CheckpointID: name})
}
// Logs is analogous 'docker logs'.
func (c *Container) Logs(ctx context.Context) (string, error) {
var out bytes.Buffer
err := c.logs(ctx, &out, &out)
return out.String(), err
}
func (c *Container) logs(ctx context.Context, stdout, stderr *bytes.Buffer) error {
opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}
writer, err := c.client.ContainerLogs(ctx, c.id, opts)
if err != nil {
return err
}
defer writer.Close()
_, err = stdcopy.StdCopy(stdout, stderr, writer)
return err
}
// ID returns the container id.
func (c *Container) ID() string {
return c.id
}
// RootDirectory returns an educated guess about the container's root directory.
func (c *Container) RootDirectory() (string, error) {
// The root directory of this container's runtime.
rootDir := fmt.Sprintf("/var/run/docker/runtime-%s/moby", c.runtime)
_, err := os.Stat(rootDir)
if err == nil {
return rootDir, nil
}
// In docker v20+, due to https://github.com/moby/moby/issues/42345 the
// rootDir seems to always be the following.
const defaultDir = "/var/run/docker/runtime-runc/moby"
_, derr := os.Stat(defaultDir)
if derr == nil {
return defaultDir, nil
}
return "", fmt.Errorf("cannot stat %q: %v or %q: %v", rootDir, err, defaultDir, derr)
}
// SandboxPid returns the container's pid.
func (c *Container) SandboxPid(ctx context.Context) (int, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return -1, err
}
return resp.ContainerJSONBase.State.Pid, nil
}
// ErrNoIP indicates that no IP address is available.
var ErrNoIP = errors.New("no IP available")
// FindIP returns the IP address of the container.
func (c *Container) FindIP(ctx context.Context, ipv6 bool) (net.IP, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return nil, err
}
var ip net.IP
if ipv6 {
ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.GlobalIPv6Address)
} else {
ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.IPAddress)
}
if ip == nil {
return net.IP{}, ErrNoIP
}
return ip, nil
}
// FindPort returns the host port that is mapped to 'sandboxPort'.
func (c *Container) FindPort(ctx context.Context, sandboxPort int) (int, error) {
desc, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return -1, fmt.Errorf("error retrieving port: %v", err)
}
format := fmt.Sprintf("%d/tcp", sandboxPort)
ports, ok := desc.NetworkSettings.Ports[nat.Port(format)]
if !ok {
return -1, fmt.Errorf("error retrieving port: %v", err)
}
port, err := strconv.Atoi(ports[0].HostPort)
if err != nil {
return -1, fmt.Errorf("error parsing port %q: %v", port, err)
}
return port, nil
}
// CopyFiles copies in and mounts the given files. They are always ReadOnly.
func (c *Container) CopyFiles(opts *RunOpts, target string, sources ...string) {
dir, err := ioutil.TempDir("", c.Name)
if err != nil {
c.copyErr = fmt.Errorf("ioutil.TempDir failed: %v", err)
return
}
c.cleanups = append(c.cleanups, func() { os.RemoveAll(dir) })
if err := os.Chmod(dir, 0755); err != nil {
c.copyErr = fmt.Errorf("os.Chmod(%q, 0755) failed: %v", dir, err)
return
}
for _, name := range sources {
src := name
if !filepath.IsAbs(src) {
src, err = testutil.FindFile(name)
if err != nil {
c.copyErr = fmt.Errorf("testutil.FindFile(%q) failed: %w", name, err)
return
}
}
dst := path.Join(dir, path.Base(name))
if err := testutil.Copy(src, dst); err != nil {
c.copyErr = fmt.Errorf("testutil.Copy(%q, %q) failed: %v", src, dst, err)
return
}
c.logger.Logf("copy: %s -> %s", src, dst)
}
opts.Mounts = append(opts.Mounts, mount.Mount{
Type: mount.TypeBind,
Source: dir,
Target: target,
ReadOnly: false,
})
}
// Stats returns a snapshot of container stats similar to `docker stats`.
func (c *Container) Stats(ctx context.Context) (*types.StatsJSON, error) {
responseBody, err := c.client.ContainerStats(ctx, c.id, false /*stream*/)
if err != nil {
return nil, fmt.Errorf("ContainerStats failed: %v", err)
}
defer responseBody.Body.Close()
var v types.StatsJSON
if err := json.NewDecoder(responseBody.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("failed to decode container stats: %v", err)
}
return &v, nil
}
// Status inspects the container returns its status.
func (c *Container) Status(ctx context.Context) (types.ContainerState, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return types.ContainerState{}, err
}
return *resp.State, err
}
// Wait waits for the container to exit.
func (c *Container) Wait(ctx context.Context) error {
defer c.stopProfiling()
statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
select {
case err := <-errChan:
return err
case res := <-statusChan:
if res.StatusCode != 0 {
var msg string
if res.Error != nil {
msg = res.Error.Message
}
return fmt.Errorf("container returned non-zero status: %d, msg: %q", res.StatusCode, msg)
}
return nil
}
}
// WaitTimeout waits for the container to exit with a timeout.
func (c *Container) WaitTimeout(ctx context.Context, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
select {
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
return fmt.Errorf("container %s timed out after %v seconds", c.Name, timeout.Seconds())
}
return nil
case err := <-errChan:
return err
case <-statusChan:
return nil
}
}
// WaitForOutput searches container logs for pattern and returns or timesout.
func (c *Container) WaitForOutput(ctx context.Context, pattern string, timeout time.Duration) (string, error) {
matches, err := c.WaitForOutputSubmatch(ctx, pattern, timeout)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("didn't find pattern %s logs", pattern)
}
return matches[0], nil
}
// WaitForOutputSubmatch searches container logs for the given
// pattern or times out. It returns any regexp submatches as well.
func (c *Container) WaitForOutputSubmatch(ctx context.Context, pattern string, timeout time.Duration) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
re := regexp.MustCompile(pattern)
for {
logs, err := c.Logs(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get logs: %v logs: %s", err, logs)
}
if matches := re.FindStringSubmatch(logs); matches != nil {
return matches, nil
}
time.Sleep(50 * time.Millisecond)
}
}
// stopProfiling stops profiling.
func (c *Container) stopProfiling() {
if c.profile != nil {
if err := c.profile.Stop(c); err != nil {
// This most likely means that the runtime for the container
// was too short to connect and actually get a profile.
c.logger.Logf("warning: profile.Stop failed: %v", err)
}
}
}
// Kill kills the container.
func (c *Container) Kill(ctx context.Context) error {
defer c.stopProfiling()
return c.client.ContainerKill(ctx, c.id, "")
}
// Remove is analogous to 'docker rm'.
func (c *Container) Remove(ctx context.Context) error {
// Remove the image.
remove := types.ContainerRemoveOptions{
RemoveVolumes: c.mounts != nil,
RemoveLinks: c.links != nil,
Force: true,
}
return c.client.ContainerRemove(ctx, c.Name, remove)
}
// CleanUp kills and deletes the container (best effort).
func (c *Container) CleanUp(ctx context.Context) {
// Execute all cleanups. We execute cleanups here to close any
// open connections to the container before closing. Open connections
// can cause Kill and Remove to hang.
for _, c := range c.cleanups {
c()
}
c.cleanups = nil
// Kill the container.
if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") {
// Just log; can't do anything here.
c.logger.Logf("error killing container %q: %v", c.Name, err)
}
// Remove the image.
if err := c.Remove(ctx); err != nil {
c.logger.Logf("error removing container %q: %v", c.Name, err)
}
// Forget all mounts.
c.mounts = nil
} | func MakeNativeContainer(ctx context.Context, logger testutil.Logger) *Container {
unsandboxedRuntime := "runc"
if override, found := os.LookupEnv("UNSANDBOXED_RUNTIME"); found {
unsandboxedRuntime = override
} | random_line_split |
container.go | // Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dockerutil
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
"github.com/moby/moby/client"
"github.com/moby/moby/pkg/stdcopy"
"gvisor.dev/gvisor/pkg/test/testutil"
)
// Container represents a Docker Container allowing
// user to configure and control as one would with the 'docker'
// client. Container is backed by the offical golang docker API.
// See: https://pkg.go.dev/github.com/docker/docker.
type Container struct {
Name string
runtime string
logger testutil.Logger
client *client.Client
id string
mounts []mount.Mount
links []string
copyErr error
cleanups []func()
// profile is the profiling hook associated with this container.
profile *profile
}
// RunOpts are options for running a container.
type RunOpts struct {
// Image is the image relative to images/. This will be mangled
// appropriately, to ensure that only first-party images are used.
Image string
// Memory is the memory limit in bytes.
Memory int
// Cpus in which to allow execution. ("0", "1", "0-2").
CpusetCpus string
// Ports are the ports to be allocated.
Ports []int
// WorkDir sets the working directory.
WorkDir string
// ReadOnly sets the read-only flag.
ReadOnly bool
// Env are additional environment variables.
Env []string
// User is the user to use.
User string
// Privileged enables privileged mode.
Privileged bool
// Sets network mode for the container. See container.NetworkMode for types. Several options will
// not work w/ gVisor. For example, you can't set the "sandbox" network option for gVisor using
// this handle.
NetworkMode string
// CapAdd are the extra set of capabilities to add.
CapAdd []string
// CapDrop are the extra set of capabilities to drop.
CapDrop []string
// Mounts is the list of directories/files to be mounted inside the container.
Mounts []mount.Mount
// Links is the list of containers to be connected to the container.
Links []string
}
func makeContainer(ctx context.Context, logger testutil.Logger, runtime string) *Container {
// Slashes are not allowed in container names.
name := testutil.RandomID(logger.Name())
name = strings.ReplaceAll(name, "/", "-")
client, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return nil
}
client.NegotiateAPIVersion(ctx)
return &Container{
logger: logger,
Name: name,
runtime: runtime,
client: client,
}
}
// MakeContainer constructs a suitable Container object.
//
// The runtime used is determined by the runtime flag.
//
// Containers will check flags for profiling requests.
func MakeContainer(ctx context.Context, logger testutil.Logger) *Container {
return makeContainer(ctx, logger, *runtime)
}
// MakeContainerWithRuntime is like MakeContainer, but allows for a runtime
// to be specified by suffix.
func MakeContainerWithRuntime(ctx context.Context, logger testutil.Logger, suffix string) *Container {
return makeContainer(ctx, logger, *runtime+suffix)
}
// MakeNativeContainer constructs a suitable Container object.
//
// The runtime used will be the system default.
//
// Native containers aren't profiled.
func MakeNativeContainer(ctx context.Context, logger testutil.Logger) *Container {
unsandboxedRuntime := "runc"
if override, found := os.LookupEnv("UNSANDBOXED_RUNTIME"); found {
unsandboxedRuntime = override
}
return makeContainer(ctx, logger, unsandboxedRuntime)
}
// Spawn is analogous to 'docker run -d'.
func (c *Container) Spawn(ctx context.Context, r RunOpts, args ...string) error {
if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
return err
}
return c.Start(ctx)
}
// SpawnProcess is analogous to 'docker run -it'. It returns a process
// which represents the root process.
func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string) (Process, error) {
config, hostconf, netconf := c.ConfigsFrom(r, args...)
config.Tty = true
config.OpenStdin = true
if err := c.CreateFrom(ctx, r.Image, config, hostconf, netconf); err != nil {
return Process{}, err
}
// Open a connection to the container for parsing logs and for TTY.
stream, err := c.client.ContainerAttach(ctx, c.id,
types.ContainerAttachOptions{
Stream: true,
Stdin: true,
Stdout: true,
Stderr: true,
})
if err != nil {
return Process{}, fmt.Errorf("connect failed container id %s: %v", c.id, err)
}
c.cleanups = append(c.cleanups, func() { stream.Close() })
if err := c.Start(ctx); err != nil {
return Process{}, err
}
return Process{container: c, conn: stream}, nil
}
// Run is analogous to 'docker run'.
func (c *Container) Run(ctx context.Context, r RunOpts, args ...string) (string, error) {
if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
return "", err
}
if err := c.Start(ctx); err != nil {
return "", err
}
if err := c.Wait(ctx); err != nil {
return "", err
}
return c.Logs(ctx)
}
// ConfigsFrom returns container configs from RunOpts and args. The caller should call 'CreateFrom'
// and Start.
func (c *Container) ConfigsFrom(r RunOpts, args ...string) (*container.Config, *container.HostConfig, *network.NetworkingConfig) {
return c.config(r, args), c.hostConfig(r), &network.NetworkingConfig{}
}
// MakeLink formats a link to add to a RunOpts.
func (c *Container) MakeLink(target string) string {
return fmt.Sprintf("%s:%s", c.Name, target)
}
// CreateFrom creates a container from the given configs.
func (c *Container) CreateFrom(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
return c.create(ctx, profileImage, conf, hostconf, netconf)
}
// Create is analogous to 'docker create'.
func (c *Container) Create(ctx context.Context, r RunOpts, args ...string) error {
return c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil)
}
func (c *Container) create(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
if c.runtime != "" && c.runtime != "runc" {
// Use the image name as provided here; which normally represents the
// unmodified "basic/alpine" image name. This should be easy to grok.
c.profileInit(profileImage)
}
cont, err := c.client.ContainerCreate(ctx, conf, hostconf, nil, nil, c.Name)
if err != nil {
return err
}
c.id = cont.ID
return nil
}
func (c *Container) config(r RunOpts, args []string) *container.Config {
ports := nat.PortSet{}
for _, p := range r.Ports {
port := nat.Port(fmt.Sprintf("%d", p))
ports[port] = struct{}{}
}
env := append(r.Env, fmt.Sprintf("RUNSC_TEST_NAME=%s", c.Name))
return &container.Config{
Image: testutil.ImageByName(r.Image),
Cmd: args,
ExposedPorts: ports,
Env: env,
WorkingDir: r.WorkDir,
User: r.User,
}
}
func (c *Container) hostConfig(r RunOpts) *container.HostConfig {
c.mounts = append(c.mounts, r.Mounts...)
return &container.HostConfig{
Runtime: c.runtime,
Mounts: c.mounts,
PublishAllPorts: true,
Links: r.Links,
CapAdd: r.CapAdd,
CapDrop: r.CapDrop,
Privileged: r.Privileged,
ReadonlyRootfs: r.ReadOnly,
NetworkMode: container.NetworkMode(r.NetworkMode),
Resources: container.Resources{
Memory: int64(r.Memory), // In bytes.
CpusetCpus: r.CpusetCpus,
},
}
}
// Start is analogous to 'docker start'.
func (c *Container) Start(ctx context.Context) error {
if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("ContainerStart failed: %v", err)
}
if c.profile != nil |
return nil
}
// Stop is analogous to 'docker stop'.
func (c *Container) Stop(ctx context.Context) error {
return c.client.ContainerStop(ctx, c.id, container.StopOptions{})
}
// Pause is analogous to'docker pause'.
func (c *Container) Pause(ctx context.Context) error {
return c.client.ContainerPause(ctx, c.id)
}
// Unpause is analogous to 'docker unpause'.
func (c *Container) Unpause(ctx context.Context) error {
return c.client.ContainerUnpause(ctx, c.id)
}
// Checkpoint is analogous to 'docker checkpoint'.
func (c *Container) Checkpoint(ctx context.Context, name string) error {
return c.client.CheckpointCreate(ctx, c.Name, types.CheckpointCreateOptions{CheckpointID: name, Exit: true})
}
// Restore is analogous to 'docker start --checkname [name]'.
func (c *Container) Restore(ctx context.Context, name string) error {
return c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{CheckpointID: name})
}
// Logs is analogous 'docker logs'.
func (c *Container) Logs(ctx context.Context) (string, error) {
var out bytes.Buffer
err := c.logs(ctx, &out, &out)
return out.String(), err
}
func (c *Container) logs(ctx context.Context, stdout, stderr *bytes.Buffer) error {
opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}
writer, err := c.client.ContainerLogs(ctx, c.id, opts)
if err != nil {
return err
}
defer writer.Close()
_, err = stdcopy.StdCopy(stdout, stderr, writer)
return err
}
// ID returns the container id.
func (c *Container) ID() string {
return c.id
}
// RootDirectory returns an educated guess about the container's root directory.
func (c *Container) RootDirectory() (string, error) {
// The root directory of this container's runtime.
rootDir := fmt.Sprintf("/var/run/docker/runtime-%s/moby", c.runtime)
_, err := os.Stat(rootDir)
if err == nil {
return rootDir, nil
}
// In docker v20+, due to https://github.com/moby/moby/issues/42345 the
// rootDir seems to always be the following.
const defaultDir = "/var/run/docker/runtime-runc/moby"
_, derr := os.Stat(defaultDir)
if derr == nil {
return defaultDir, nil
}
return "", fmt.Errorf("cannot stat %q: %v or %q: %v", rootDir, err, defaultDir, derr)
}
// SandboxPid returns the container's pid.
func (c *Container) SandboxPid(ctx context.Context) (int, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return -1, err
}
return resp.ContainerJSONBase.State.Pid, nil
}
// ErrNoIP indicates that no IP address is available.
var ErrNoIP = errors.New("no IP available")
// FindIP returns the IP address of the container.
func (c *Container) FindIP(ctx context.Context, ipv6 bool) (net.IP, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return nil, err
}
var ip net.IP
if ipv6 {
ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.GlobalIPv6Address)
} else {
ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.IPAddress)
}
if ip == nil {
return net.IP{}, ErrNoIP
}
return ip, nil
}
// FindPort returns the host port that is mapped to 'sandboxPort'.
func (c *Container) FindPort(ctx context.Context, sandboxPort int) (int, error) {
desc, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return -1, fmt.Errorf("error retrieving port: %v", err)
}
format := fmt.Sprintf("%d/tcp", sandboxPort)
ports, ok := desc.NetworkSettings.Ports[nat.Port(format)]
if !ok {
return -1, fmt.Errorf("error retrieving port: %v", err)
}
port, err := strconv.Atoi(ports[0].HostPort)
if err != nil {
return -1, fmt.Errorf("error parsing port %q: %v", port, err)
}
return port, nil
}
// CopyFiles copies in and mounts the given files. They are always ReadOnly.
func (c *Container) CopyFiles(opts *RunOpts, target string, sources ...string) {
dir, err := ioutil.TempDir("", c.Name)
if err != nil {
c.copyErr = fmt.Errorf("ioutil.TempDir failed: %v", err)
return
}
c.cleanups = append(c.cleanups, func() { os.RemoveAll(dir) })
if err := os.Chmod(dir, 0755); err != nil {
c.copyErr = fmt.Errorf("os.Chmod(%q, 0755) failed: %v", dir, err)
return
}
for _, name := range sources {
src := name
if !filepath.IsAbs(src) {
src, err = testutil.FindFile(name)
if err != nil {
c.copyErr = fmt.Errorf("testutil.FindFile(%q) failed: %w", name, err)
return
}
}
dst := path.Join(dir, path.Base(name))
if err := testutil.Copy(src, dst); err != nil {
c.copyErr = fmt.Errorf("testutil.Copy(%q, %q) failed: %v", src, dst, err)
return
}
c.logger.Logf("copy: %s -> %s", src, dst)
}
opts.Mounts = append(opts.Mounts, mount.Mount{
Type: mount.TypeBind,
Source: dir,
Target: target,
ReadOnly: false,
})
}
// Stats returns a snapshot of container stats similar to `docker stats`.
func (c *Container) Stats(ctx context.Context) (*types.StatsJSON, error) {
responseBody, err := c.client.ContainerStats(ctx, c.id, false /*stream*/)
if err != nil {
return nil, fmt.Errorf("ContainerStats failed: %v", err)
}
defer responseBody.Body.Close()
var v types.StatsJSON
if err := json.NewDecoder(responseBody.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("failed to decode container stats: %v", err)
}
return &v, nil
}
// Status inspects the container returns its status.
func (c *Container) Status(ctx context.Context) (types.ContainerState, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return types.ContainerState{}, err
}
return *resp.State, err
}
// Wait waits for the container to exit.
func (c *Container) Wait(ctx context.Context) error {
defer c.stopProfiling()
statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
select {
case err := <-errChan:
return err
case res := <-statusChan:
if res.StatusCode != 0 {
var msg string
if res.Error != nil {
msg = res.Error.Message
}
return fmt.Errorf("container returned non-zero status: %d, msg: %q", res.StatusCode, msg)
}
return nil
}
}
// WaitTimeout waits for the container to exit with a timeout.
func (c *Container) WaitTimeout(ctx context.Context, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
select {
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
return fmt.Errorf("container %s timed out after %v seconds", c.Name, timeout.Seconds())
}
return nil
case err := <-errChan:
return err
case <-statusChan:
return nil
}
}
// WaitForOutput searches container logs for pattern and returns or timesout.
func (c *Container) WaitForOutput(ctx context.Context, pattern string, timeout time.Duration) (string, error) {
matches, err := c.WaitForOutputSubmatch(ctx, pattern, timeout)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("didn't find pattern %s logs", pattern)
}
return matches[0], nil
}
// WaitForOutputSubmatch searches container logs for the given
// pattern or times out. It returns any regexp submatches as well.
func (c *Container) WaitForOutputSubmatch(ctx context.Context, pattern string, timeout time.Duration) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
re := regexp.MustCompile(pattern)
for {
logs, err := c.Logs(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get logs: %v logs: %s", err, logs)
}
if matches := re.FindStringSubmatch(logs); matches != nil {
return matches, nil
}
time.Sleep(50 * time.Millisecond)
}
}
// stopProfiling stops profiling.
func (c *Container) stopProfiling() {
if c.profile != nil {
if err := c.profile.Stop(c); err != nil {
// This most likely means that the runtime for the container
// was too short to connect and actually get a profile.
c.logger.Logf("warning: profile.Stop failed: %v", err)
}
}
}
// Kill kills the container.
func (c *Container) Kill(ctx context.Context) error {
defer c.stopProfiling()
return c.client.ContainerKill(ctx, c.id, "")
}
// Remove is analogous to 'docker rm'.
func (c *Container) Remove(ctx context.Context) error {
// Remove the image.
remove := types.ContainerRemoveOptions{
RemoveVolumes: c.mounts != nil,
RemoveLinks: c.links != nil,
Force: true,
}
return c.client.ContainerRemove(ctx, c.Name, remove)
}
// CleanUp kills and deletes the container (best effort).
func (c *Container) CleanUp(ctx context.Context) {
// Execute all cleanups. We execute cleanups here to close any
// open connections to the container before closing. Open connections
// can cause Kill and Remove to hang.
for _, c := range c.cleanups {
c()
}
c.cleanups = nil
// Kill the container.
if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") {
// Just log; can't do anything here.
c.logger.Logf("error killing container %q: %v", c.Name, err)
}
// Remove the image.
if err := c.Remove(ctx); err != nil {
c.logger.Logf("error removing container %q: %v", c.Name, err)
}
// Forget all mounts.
c.mounts = nil
}
| {
if err := c.profile.Start(c); err != nil {
c.logger.Logf("profile.Start failed: %v", err)
}
} | conditional_block |
container.go | // Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dockerutil
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
"github.com/moby/moby/client"
"github.com/moby/moby/pkg/stdcopy"
"gvisor.dev/gvisor/pkg/test/testutil"
)
// Container represents a Docker Container allowing
// user to configure and control as one would with the 'docker'
// client. Container is backed by the offical golang docker API.
// See: https://pkg.go.dev/github.com/docker/docker.
type Container struct {
Name string
runtime string
logger testutil.Logger
client *client.Client
id string
mounts []mount.Mount
links []string
copyErr error
cleanups []func()
// profile is the profiling hook associated with this container.
profile *profile
}
// RunOpts are options for running a container.
type RunOpts struct {
// Image is the image relative to images/. This will be mangled
// appropriately, to ensure that only first-party images are used.
Image string
// Memory is the memory limit in bytes.
Memory int
// Cpus in which to allow execution. ("0", "1", "0-2").
CpusetCpus string
// Ports are the ports to be allocated.
Ports []int
// WorkDir sets the working directory.
WorkDir string
// ReadOnly sets the read-only flag.
ReadOnly bool
// Env are additional environment variables.
Env []string
// User is the user to use.
User string
// Privileged enables privileged mode.
Privileged bool
// Sets network mode for the container. See container.NetworkMode for types. Several options will
// not work w/ gVisor. For example, you can't set the "sandbox" network option for gVisor using
// this handle.
NetworkMode string
// CapAdd are the extra set of capabilities to add.
CapAdd []string
// CapDrop are the extra set of capabilities to drop.
CapDrop []string
// Mounts is the list of directories/files to be mounted inside the container.
Mounts []mount.Mount
// Links is the list of containers to be connected to the container.
Links []string
}
func makeContainer(ctx context.Context, logger testutil.Logger, runtime string) *Container {
// Slashes are not allowed in container names.
name := testutil.RandomID(logger.Name())
name = strings.ReplaceAll(name, "/", "-")
client, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return nil
}
client.NegotiateAPIVersion(ctx)
return &Container{
logger: logger,
Name: name,
runtime: runtime,
client: client,
}
}
// MakeContainer constructs a suitable Container object.
//
// The runtime used is determined by the runtime flag.
//
// Containers will check flags for profiling requests.
func MakeContainer(ctx context.Context, logger testutil.Logger) *Container {
return makeContainer(ctx, logger, *runtime)
}
// MakeContainerWithRuntime is like MakeContainer, but allows for a runtime
// to be specified by suffix.
func MakeContainerWithRuntime(ctx context.Context, logger testutil.Logger, suffix string) *Container {
return makeContainer(ctx, logger, *runtime+suffix)
}
// MakeNativeContainer constructs a suitable Container object.
//
// The runtime used will be the system default.
//
// Native containers aren't profiled.
func MakeNativeContainer(ctx context.Context, logger testutil.Logger) *Container {
unsandboxedRuntime := "runc"
if override, found := os.LookupEnv("UNSANDBOXED_RUNTIME"); found {
unsandboxedRuntime = override
}
return makeContainer(ctx, logger, unsandboxedRuntime)
}
// Spawn is analogous to 'docker run -d'.
func (c *Container) Spawn(ctx context.Context, r RunOpts, args ...string) error {
if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
return err
}
return c.Start(ctx)
}
// SpawnProcess is analogous to 'docker run -it'. It returns a process
// which represents the root process.
func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string) (Process, error) {
config, hostconf, netconf := c.ConfigsFrom(r, args...)
config.Tty = true
config.OpenStdin = true
if err := c.CreateFrom(ctx, r.Image, config, hostconf, netconf); err != nil {
return Process{}, err
}
// Open a connection to the container for parsing logs and for TTY.
stream, err := c.client.ContainerAttach(ctx, c.id,
types.ContainerAttachOptions{
Stream: true,
Stdin: true,
Stdout: true,
Stderr: true,
})
if err != nil {
return Process{}, fmt.Errorf("connect failed container id %s: %v", c.id, err)
}
c.cleanups = append(c.cleanups, func() { stream.Close() })
if err := c.Start(ctx); err != nil {
return Process{}, err
}
return Process{container: c, conn: stream}, nil
}
// Run is analogous to 'docker run'.
func (c *Container) Run(ctx context.Context, r RunOpts, args ...string) (string, error) {
if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
return "", err
}
if err := c.Start(ctx); err != nil {
return "", err
}
if err := c.Wait(ctx); err != nil {
return "", err
}
return c.Logs(ctx)
}
// ConfigsFrom returns container configs from RunOpts and args. The caller should call 'CreateFrom'
// and Start.
func (c *Container) ConfigsFrom(r RunOpts, args ...string) (*container.Config, *container.HostConfig, *network.NetworkingConfig) {
return c.config(r, args), c.hostConfig(r), &network.NetworkingConfig{}
}
// MakeLink formats a link to add to a RunOpts.
func (c *Container) MakeLink(target string) string {
return fmt.Sprintf("%s:%s", c.Name, target)
}
// CreateFrom creates a container from the given configs.
func (c *Container) CreateFrom(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
return c.create(ctx, profileImage, conf, hostconf, netconf)
}
// Create is analogous to 'docker create'.
func (c *Container) Create(ctx context.Context, r RunOpts, args ...string) error {
return c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil)
}
func (c *Container) create(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
if c.runtime != "" && c.runtime != "runc" {
// Use the image name as provided here; which normally represents the
// unmodified "basic/alpine" image name. This should be easy to grok.
c.profileInit(profileImage)
}
cont, err := c.client.ContainerCreate(ctx, conf, hostconf, nil, nil, c.Name)
if err != nil {
return err
}
c.id = cont.ID
return nil
}
func (c *Container) config(r RunOpts, args []string) *container.Config {
ports := nat.PortSet{}
for _, p := range r.Ports {
port := nat.Port(fmt.Sprintf("%d", p))
ports[port] = struct{}{}
}
env := append(r.Env, fmt.Sprintf("RUNSC_TEST_NAME=%s", c.Name))
return &container.Config{
Image: testutil.ImageByName(r.Image),
Cmd: args,
ExposedPorts: ports,
Env: env,
WorkingDir: r.WorkDir,
User: r.User,
}
}
func (c *Container) hostConfig(r RunOpts) *container.HostConfig {
c.mounts = append(c.mounts, r.Mounts...)
return &container.HostConfig{
Runtime: c.runtime,
Mounts: c.mounts,
PublishAllPorts: true,
Links: r.Links,
CapAdd: r.CapAdd,
CapDrop: r.CapDrop,
Privileged: r.Privileged,
ReadonlyRootfs: r.ReadOnly,
NetworkMode: container.NetworkMode(r.NetworkMode),
Resources: container.Resources{
Memory: int64(r.Memory), // In bytes.
CpusetCpus: r.CpusetCpus,
},
}
}
// Start is analogous to 'docker start'.
func (c *Container) Start(ctx context.Context) error {
if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("ContainerStart failed: %v", err)
}
if c.profile != nil {
if err := c.profile.Start(c); err != nil {
c.logger.Logf("profile.Start failed: %v", err)
}
}
return nil
}
// Stop is analogous to 'docker stop'.
func (c *Container) Stop(ctx context.Context) error {
return c.client.ContainerStop(ctx, c.id, container.StopOptions{})
}
// Pause is analogous to'docker pause'.
func (c *Container) Pause(ctx context.Context) error {
return c.client.ContainerPause(ctx, c.id)
}
// Unpause is analogous to 'docker unpause'.
func (c *Container) Unpause(ctx context.Context) error {
return c.client.ContainerUnpause(ctx, c.id)
}
// Checkpoint is analogous to 'docker checkpoint'.
func (c *Container) Checkpoint(ctx context.Context, name string) error {
return c.client.CheckpointCreate(ctx, c.Name, types.CheckpointCreateOptions{CheckpointID: name, Exit: true})
}
// Restore is analogous to 'docker start --checkname [name]'.
func (c *Container) Restore(ctx context.Context, name string) error {
return c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{CheckpointID: name})
}
// Logs is analogous 'docker logs'.
func (c *Container) Logs(ctx context.Context) (string, error) {
var out bytes.Buffer
err := c.logs(ctx, &out, &out)
return out.String(), err
}
func (c *Container) logs(ctx context.Context, stdout, stderr *bytes.Buffer) error {
opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}
writer, err := c.client.ContainerLogs(ctx, c.id, opts)
if err != nil {
return err
}
defer writer.Close()
_, err = stdcopy.StdCopy(stdout, stderr, writer)
return err
}
// ID returns the container id.
func (c *Container) ID() string |
// RootDirectory returns an educated guess about the container's root directory.
func (c *Container) RootDirectory() (string, error) {
// The root directory of this container's runtime.
rootDir := fmt.Sprintf("/var/run/docker/runtime-%s/moby", c.runtime)
_, err := os.Stat(rootDir)
if err == nil {
return rootDir, nil
}
// In docker v20+, due to https://github.com/moby/moby/issues/42345 the
// rootDir seems to always be the following.
const defaultDir = "/var/run/docker/runtime-runc/moby"
_, derr := os.Stat(defaultDir)
if derr == nil {
return defaultDir, nil
}
return "", fmt.Errorf("cannot stat %q: %v or %q: %v", rootDir, err, defaultDir, derr)
}
// SandboxPid returns the container's pid.
func (c *Container) SandboxPid(ctx context.Context) (int, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return -1, err
}
return resp.ContainerJSONBase.State.Pid, nil
}
// ErrNoIP indicates that no IP address is available.
var ErrNoIP = errors.New("no IP available")
// FindIP returns the IP address of the container.
func (c *Container) FindIP(ctx context.Context, ipv6 bool) (net.IP, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return nil, err
}
var ip net.IP
if ipv6 {
ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.GlobalIPv6Address)
} else {
ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.IPAddress)
}
if ip == nil {
return net.IP{}, ErrNoIP
}
return ip, nil
}
// FindPort returns the host port that is mapped to 'sandboxPort'.
func (c *Container) FindPort(ctx context.Context, sandboxPort int) (int, error) {
desc, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return -1, fmt.Errorf("error retrieving port: %v", err)
}
format := fmt.Sprintf("%d/tcp", sandboxPort)
ports, ok := desc.NetworkSettings.Ports[nat.Port(format)]
if !ok {
return -1, fmt.Errorf("error retrieving port: %v", err)
}
port, err := strconv.Atoi(ports[0].HostPort)
if err != nil {
return -1, fmt.Errorf("error parsing port %q: %v", port, err)
}
return port, nil
}
// CopyFiles copies in and mounts the given files. They are always ReadOnly.
func (c *Container) CopyFiles(opts *RunOpts, target string, sources ...string) {
dir, err := ioutil.TempDir("", c.Name)
if err != nil {
c.copyErr = fmt.Errorf("ioutil.TempDir failed: %v", err)
return
}
c.cleanups = append(c.cleanups, func() { os.RemoveAll(dir) })
if err := os.Chmod(dir, 0755); err != nil {
c.copyErr = fmt.Errorf("os.Chmod(%q, 0755) failed: %v", dir, err)
return
}
for _, name := range sources {
src := name
if !filepath.IsAbs(src) {
src, err = testutil.FindFile(name)
if err != nil {
c.copyErr = fmt.Errorf("testutil.FindFile(%q) failed: %w", name, err)
return
}
}
dst := path.Join(dir, path.Base(name))
if err := testutil.Copy(src, dst); err != nil {
c.copyErr = fmt.Errorf("testutil.Copy(%q, %q) failed: %v", src, dst, err)
return
}
c.logger.Logf("copy: %s -> %s", src, dst)
}
opts.Mounts = append(opts.Mounts, mount.Mount{
Type: mount.TypeBind,
Source: dir,
Target: target,
ReadOnly: false,
})
}
// Stats returns a snapshot of container stats similar to `docker stats`.
func (c *Container) Stats(ctx context.Context) (*types.StatsJSON, error) {
responseBody, err := c.client.ContainerStats(ctx, c.id, false /*stream*/)
if err != nil {
return nil, fmt.Errorf("ContainerStats failed: %v", err)
}
defer responseBody.Body.Close()
var v types.StatsJSON
if err := json.NewDecoder(responseBody.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("failed to decode container stats: %v", err)
}
return &v, nil
}
// Status inspects the container returns its status.
func (c *Container) Status(ctx context.Context) (types.ContainerState, error) {
resp, err := c.client.ContainerInspect(ctx, c.id)
if err != nil {
return types.ContainerState{}, err
}
return *resp.State, err
}
// Wait waits for the container to exit.
func (c *Container) Wait(ctx context.Context) error {
defer c.stopProfiling()
statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
select {
case err := <-errChan:
return err
case res := <-statusChan:
if res.StatusCode != 0 {
var msg string
if res.Error != nil {
msg = res.Error.Message
}
return fmt.Errorf("container returned non-zero status: %d, msg: %q", res.StatusCode, msg)
}
return nil
}
}
// WaitTimeout waits for the container to exit with a timeout.
func (c *Container) WaitTimeout(ctx context.Context, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
select {
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
return fmt.Errorf("container %s timed out after %v seconds", c.Name, timeout.Seconds())
}
return nil
case err := <-errChan:
return err
case <-statusChan:
return nil
}
}
// WaitForOutput searches container logs for pattern and returns or timesout.
func (c *Container) WaitForOutput(ctx context.Context, pattern string, timeout time.Duration) (string, error) {
matches, err := c.WaitForOutputSubmatch(ctx, pattern, timeout)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("didn't find pattern %s logs", pattern)
}
return matches[0], nil
}
// WaitForOutputSubmatch searches container logs for the given
// pattern or times out. It returns any regexp submatches as well.
func (c *Container) WaitForOutputSubmatch(ctx context.Context, pattern string, timeout time.Duration) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
re := regexp.MustCompile(pattern)
for {
logs, err := c.Logs(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get logs: %v logs: %s", err, logs)
}
if matches := re.FindStringSubmatch(logs); matches != nil {
return matches, nil
}
time.Sleep(50 * time.Millisecond)
}
}
// stopProfiling stops profiling.
func (c *Container) stopProfiling() {
if c.profile != nil {
if err := c.profile.Stop(c); err != nil {
// This most likely means that the runtime for the container
// was too short to connect and actually get a profile.
c.logger.Logf("warning: profile.Stop failed: %v", err)
}
}
}
// Kill kills the container.
func (c *Container) Kill(ctx context.Context) error {
defer c.stopProfiling()
return c.client.ContainerKill(ctx, c.id, "")
}
// Remove is analogous to 'docker rm'.
func (c *Container) Remove(ctx context.Context) error {
// Remove the image.
remove := types.ContainerRemoveOptions{
RemoveVolumes: c.mounts != nil,
RemoveLinks: c.links != nil,
Force: true,
}
return c.client.ContainerRemove(ctx, c.Name, remove)
}
// CleanUp kills and deletes the container (best effort).
func (c *Container) CleanUp(ctx context.Context) {
// Execute all cleanups. We execute cleanups here to close any
// open connections to the container before closing. Open connections
// can cause Kill and Remove to hang.
for _, c := range c.cleanups {
c()
}
c.cleanups = nil
// Kill the container.
if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") {
// Just log; can't do anything here.
c.logger.Logf("error killing container %q: %v", c.Name, err)
}
// Remove the image.
if err := c.Remove(ctx); err != nil {
c.logger.Logf("error removing container %q: %v", c.Name, err)
}
// Forget all mounts.
c.mounts = nil
}
| {
return c.id
} | identifier_body |
combine.py | #!/usr/bin/env python
# coding=utf-8
__version__ = "2.5.0"
# When modifying remember to issue a new tag command in git before committing, then push the new tag
# git tag -a v2.5.0 -m "v2.5.0"
# git push origin --tags
"""
Python script that generates the necessary mp4box -cat commands to concatinate multiple video files
together and generates a chapter file that marks the beginning of each concatenated file in the final result.
This script requires the GPAC package to be installed with the mp4box command line utility.
https://gpac.wp.mines-telecom.fr/downloads/
The script is written in Python 3.5
Details about Xbox video formats:
https://support.xbox.com/en-IE/xbox-360/console/audio-video-playback-faq
Make sure you install the requirements for this script:
pip install -r requirements.txt
See: https://github.com/sverrirs/mp4combine
Author: Sverrir Sigmundarson mp4combine@sverrirs.com https://www.sverrirs.com
"""
from colorama import init, deinit # For colorized output to console windows (platform and shell independent)
from constant import DISKSIZES, ABSSIZES, Colors # Constants for the script
import humanize # Display human readible values for sizes etc
import sys, os, time
from pathlib import Path # to check for file existence in the file system
import argparse # Command-line argument parser
import ntpath # Used to extract file name from path for all platforms http://stackoverflow.com/a/8384788
import glob # Used to do partial file path matching when listing file directories in search of files to concatinate http://stackoverflow.com/a/2225582/779521
import subprocess # To execute shell commands
import re # To perform substring matching on the output of mp4box and other subprocesses
from datetime import timedelta # To store the parsed duration of files and calculate the accumulated duration
from random import shuffle # To be able to shuffle the list of files if the user requests it
import csv # To use for the cutpoint files they are CSV files
#
# Provides natural string sorting (numbers inside strings are sorted in the correct order)
# http://stackoverflow.com/a/3033342/779521
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
#
# The main entry point for the script
def runMain():
try:
init() # Initialize the colorama library
# Compile the regular expressions
regex_mp4box_duration = re.compile(r"Computed Duration (?P<hrs>[0-9]{2}):(?P<min>[0-9]{2}):(?P<sec>[0-9]{2}).(?P<msec>[0-9]{3})", re.MULTILINE)
# Construct the argument parser for the commandline
args = parseArguments()
# The burnsubs and cuts cannot be used together, they will produce incorrect subtitles to be burned into the video
if( args.burnsubs == True and not args.cuts is None):
print(Colors.error("Options --burnsubs and --cuts cannot be used together as they would cause embedded subtitles to be incorrectly synced in the output video."))
sys.exit(100)
# Get the current working directory (place that the script is executing from)
working_dir = sys.path[0]
# Get the mp4box exec
mp4exec = findMp4Box(args.gpac, working_dir)
# Get ffmpeg exec
ffmpegexec = findffmpeg(args.ffmpeg, working_dir)
# Detect the maximum file size that should be generated in kilobytes, if <=0 then unlimited
max_out_size_kb = determineMaximumOutputfileSizeInKb(args.size, args.disk)
# Create the output file names both for the video file and the intermediate chapters file
path_out_file = Path(args.output)
path_chapters_file = path_out_file.with_suffix('.txt') # Just change the file-extension of the output file to TXT
# If the output files exist then either error or overwrite
if( path_out_file.exists() ):
if( args.overwrite ):
os.remove(str(path_out_file))
else:
print( "Output file '{0}' already exists. Use --overwrite switch to overwrite.".format(Colors.filename(path_out_file.name)))
sys.exit(0)
# Get all the input files
in_files = getFileNamesFromGrepMatch(args.match, path_out_file)
if( in_files is None ):
print( "No mp4 video files found matching '{0}'".format(args.match))
sys.exit(0)
file_infos = []
# Only process files that have file-ending .mp4 and not files that have the same name as the joined one
for in_file in in_files:
print("File: {0}".format(Colors.filename(in_file)))
m4b_fileinfo = parseMp4boxMediaInfo(in_file, mp4exec, regex_mp4box_duration)
if not m4b_fileinfo is None:
|
# If nothing was found then don't continue, this can happen if no mp4 files are found or if only the joined file is found
if( len(file_infos) <= 0 ):
print( "No mp4 video files found matching '{0}'".format(args.match))
sys.exit(0)
print("Found {0} files".format(len(file_infos)))
# If the user supplied a cut point information file then we parse it now
cuts = None
if( args.cuts ):
cuts = parseCutPointInformation(Path(args.cuts))
print(cuts)
if not cuts is None:
print("Read {0} cut point data from cut file".format(len(cuts)))
# If the user wants the list of files shuffled then do that now in place
if( args.shuffle ):
shuffle(file_infos)
print("File list shuffled")
# Now create the list of files to create
video_files = []
chapters = []
cumulative_dur = timedelta(seconds=0)
cumulative_size = 0
# Collect the file info data and chapter points for all files
for file_info in file_infos:
file_name = Path(file_info['file']).name
video_files.append(file_info['file'])
file_info_dur = file_info['dur']
# Do we have a proposed cut duration, if so then we must use this info
# to correct the chapter locations
if not cuts is None and file_name in cuts and 't' in cuts[file_name]:
file_info_dur = timedelta(seconds=cuts[file_name]['t'])
chapters.append({"name": Path(file_info['file']).stem, "timecode":formatTimedelta(cumulative_dur)})
cumulative_dur += file_info_dur # Count the cumulative duration
cumulative_size += file_info['size']
createCombinedVideoFile(video_files, chapters, cumulative_dur, cumulative_size, mp4exec, ffmpegexec, path_out_file, path_chapters_file, args.overwrite, cuts, args.videosize, args.burnsubs, max_out_size_kb, args.noaudio )
print(Colors.success("Script completed successfully, bye!"))
finally:
deinit() #Deinitialize the colorama library
# Reads and parses cut information for the input files
def parseCutPointInformation(path_to_cuts_file):
cuts = {}
with open(str(path_to_cuts_file), encoding='utf-8') as csvfile:
row_reader = csv.reader(csvfile)
for row in row_reader:
if( len(row) < 2 ):
continue
filename = row[0].strip()
# Create a new entry for the file
cuts[filename] = {}
starttime = row[1].strip()
cuts[filename]['ss'] = starttime
startpoint = sum(x * int(t) for x, t in zip([1, 60, 3600], reversed(starttime.split(":"))))
if len(row) > 2:
timecode = row[2].strip()
fullduration = sum(x * int(t) for x, t in zip([1, 60, 3600], reversed(timecode.split(":"))))
cuts[filename]['t'] = fullduration - startpoint
if len(cuts) <= 0:
return None
return cuts
#
# Creates a combined video file for a segment
def createCombinedVideoFile(video_files, chapters, cumulative_dur, cumulative_size, mp4exec, ffmpegexec, path_out_file, path_chapters_file, args_overwrite, cuts, args_videomaxsize, args_burnsubs, max_out_size_kb=0, args_noaudio=False ):
print( "Output: {0}".format(Colors.fileout(str(path_out_file))))
# Add the final chapter as the end for this segment
chapters.append({"name": "End", "timecode":formatTimedelta(cumulative_dur)})
# Chapters should be +1 more than files as we have an extra chapter ending at the very end of the file
print("{0} chapters, {1} running time, {2} total size".format( len(chapters), formatTimedelta(cumulative_dur), humanize.naturalsize(cumulative_size, gnu=True)))
# Write the chapters file to out
saveChaptersFile(chapters, path_chapters_file)
# Re-encode and combine the video files first
print(Colors.toolpath("Combining and re-encoding video files (ffmpeg), this will take a while..."))
reencodeAndCombineVideoFiles(ffmpegexec, video_files, path_out_file, args_videomaxsize, cuts, args_burnsubs, args_noaudio)
# Now create the combined file and include the chapter marks
print(Colors.toolpath("Adding chapters to combined video file (mp4box)"))
addChaptersToVideoFile(mp4exec, path_out_file, path_chapters_file)
# Delete the chapters file
os.remove(str(path_chapters_file))
# Read the created file to learn its final filesize
size_out_file_kb = os.path.getsize(str(path_out_file)) / 1024
print( Colors.toolpath("Final size of video file is: {0}".format(humanize.naturalsize(size_out_file_kb * 1024))))
# Now split the file if requested
if max_out_size_kb > 0 and size_out_file_kb > max_out_size_kb :
print( Colors.toolpath("Size limit exceeded, splitting video into files of max size: {0}".format(humanize.naturalsize(max_out_size_kb * 1000))))
splitVideoFile(mp4exec, path_out_file, max_out_size_kb)
#
# Attempts to detect the requested size of the output file based on the input parameters
# the absolute_size is overridden by disk_capacity if both are specified
# Returns KB (kilobytes)
def determineMaximumOutputfileSizeInKb(absolute_size, disk_capacity):
if( disk_capacity and disk_capacity in DISKSIZES ):
dsk_cap = DISKSIZES[disk_capacity]
#print( "Disk Capacity: {0}".format(dsk_cap))
return dsk_cap
elif( absolute_size):
#print( "Absolute size: "+absolute_size)
# First remove all spaces from the size string and convert to uppercase, remove all commas from the string
# now attempt to parse the sizes
abs_size = "".join("".join(absolute_size.split(' ')).split(',')).upper()
regex_size_parse = re.compile(r"^(?P<size>[0-9]*(?:\.[0-9]*)?)\s*(?P<unit>GB|MB|KB|B|TB)?$", re.MULTILINE)
match = regex_size_parse.search( absolute_size )
size = float(match.group("size"))
unit = match.group("unit")
#print( "Absolute value: {0}, unit: {1} ".format(size, unit))
if( not unit or not unit in ABSSIZES ):
unit = "MB" # Default is megabytes if nothing is specified
unit_multiplier = ABSSIZES[unit]
total_size = size * unit_multiplier
#print( "Absolute total: {0}, mult: {1} ".format(total_size, unit_multiplier))
return total_size / 1000 # Return kilobytes but in the metric system sense not the "1024 byte sense"
else:
# If nothing is specified then the default return is to use unbounded
return -1
#
# Executes the mp4box app with the -info switch and
# extracts the track length and file size from the output
def parseMp4boxMediaInfo(file_name, mp4box_path, regex_mp4box_duration):
# Get the size of the file in bytes
statinfo = os.stat(file_name)
file_size = statinfo.st_size #Size in bytes of a plain file
# Run the app and collect the output
proc_cmd = [mp4box_path, "-info", "-std", file_name]
ret = subprocess.run(proc_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
# Ensure that the return code was ok before continuing
if ret.returncode != 0:
print("Command {0} returned non-zero exit status {1}.".format(proc_cmd, ret.returncode))
print("File {0} will be skipped".format(file_name))
return None
#ret.check_returncode()
# Computed Duration 00:23:06.040 - Indicated Duration 00:23:06.040
match = regex_mp4box_duration.search( ret.stdout )
hrs = int(match.group("hrs"))
min = int(match.group("min"))
sec = int(match.group("sec"))
msec = int(match.group("msec"))
duration = timedelta(days=0, hours=hrs, minutes=min, seconds=sec, milliseconds=msec )
return {'file':file_name, 'size':file_size, 'dur':duration }
#
# Locates the mp4box executable and returns a full path to it
def findMp4Box(path_to_gpac_install=None, working_dir=None):
if( not path_to_gpac_install is None and os.path.isfile(os.path.join(path_to_gpac_install, "mp4box.exe")) ):
return os.path.join(path_to_gpac_install, "mp4box.exe")
# Attempts to search for it under the bin folder
bin_dist = os.path.join(working_dir, "..\\bin\\GPAC\\mp4box.exe")
if( os.path.isfile(bin_dist)):
return str(Path(bin_dist).resolve())
# Attempts to search for it under C:\Program Files\GPAC
if( os.path.isfile("C:\\Program Files\\GPAC\\mp4box.exe")):
return "C:\\Program Files\\GPAC\\mp4box.exe"
# For 32 bit installs
if( os.path.isfile("C:\\Program Files\\GPAC\\mp4box.exe")):
return "C:\\Program Files (x86)\\GPAC\\mp4box.exe"
# Throw an error
raise ValueError('Could not locate GPAC install, please use the --gpac switch to specify the path to the mp4box.exe file on your system.')
#
# Locates the ffmpeg executable and returns a full path to it
def findffmpeg(path_to_ffmpeg_install=None, working_dir=None):
if( not path_to_ffmpeg_install is None and os.path.isfile(os.path.join(path_to_ffmpeg_install, "ffmpeg.exe")) ):
return os.path.join(path_to_ffmpeg_install, "ffmpeg.exe")
# Attempts to search for it under the bin folder
bin_dist = os.path.join(working_dir, "..\\bin\\ff\\ffmpeg.exe")
if( os.path.isfile(bin_dist)):
return str(Path(bin_dist).resolve())
# Throw an error
raise ValueError('Could not locate FFMPEG install, please use the --ffmpeg switch to specify the path to the ffmpeg.exe file on your system.')
#
# Returns an array of files matching the grep string passed in
def getFileNamesFromGrepMatch(grep_match, path_out_file):
in_files = glob.glob(grep_match.replace("\\", "/"))
return [f for f in sorted(in_files, key=natural_key) if '.mp4' in Path(f).suffix and not Path(f) == path_out_file]
#
# Cleans any invalid file name and file path characters from the given filename
def sanitizeFileName(local_filename, sep=" "):
#These are symbols that are not "kosher" on a NTFS filesystem.
local_filename = re.sub(r"[\"/:<>|?*\n\r\t\x00]", sep, local_filename)
return local_filename
#
# Creates a nice format of a datetime.timedelta structure, including milliseconds
def formatTimedelta(time_delta):
timecode_s = time_delta.seconds
timecode_ms = int(time_delta.microseconds / 1000)
return '{:02}:{:02}:{:02}.{:03}'.format(timecode_s // 3600, timecode_s % 3600 // 60, timecode_s % 60, timecode_ms)
#
# Saves a list of chapter information to a chapter file in the common chapter syntax
def saveChaptersFile( chapters, path_chapters_file):
# Make sure that the directory exists and then write the full list of pids to it
if not path_chapters_file.parent.exists():
path_chapters_file.parent.mkdir(parents=True, exist_ok=True)
# If the chapters file is already there, delete it
if os.path.exists(str(path_chapters_file)):
os.remove(str(path_chapters_file))
# Writing the common CHAPTER syntax
# Common syntax : CHAPTERX=h:m:s[:ms or .ms] on one line and CHAPTERXNAME=name on the other – the order is not important but chapter lines MUST be declared sequencially (same X value expected for 2 consecutive lines).
chapter_idx = 1
with path_chapters_file.open(mode='w+', encoding='utf-8') as theFile:
for chapter in chapters:
theFile.write("CHAPTER{0}={1}\n".format(chapter_idx, chapter['timecode']))
theFile.write("CHAPTER{0}NAME=\"{1}\"\n".format(chapter_idx, chapter['name']))
chapter_idx += 1
#
# Executes FFMPEG for all video files to be joined and reencodes
def reencodeAndCombineVideoFiles(ffmpeg_path, video_files, path_out_file, args_videomaxsize, cuts, args_burnsubs, args_noaudio ):
# Construct the args to ffmpeg
# See https://stackoverflow.com/a/26366762/779521
prog_args = [ffmpeg_path]
# How many video files
video_count = len(video_files)
# Is sound enabled
audio_is_enabled = True if not args_noaudio is True else False
# The filter complex configuration
filter_complex_concat = []
filter_complex_scale = []
curr_video = 0
# -filter_complex
# "[0:v]scale=1024:576:force_original_aspect_ratio=1[v0];
# [1:v]scale=1024:576:force_original_aspect_ratio=1[v1];
# [v0][0:a][v1][1:a]concat=n=2:v=1:a=1[v][a]"
# For every input construct their filter complex to be added later
# Force scaling of videos first
for video_file in video_files:
video_file_path = Path(video_file)
# Attempt to find a cut point for this video if there are cut points defined
#-ss 00:33 -t 30
if not cuts is None and video_file_path.name in cuts:
video_cut_info = cuts[video_file_path.name]
if 'ss' in video_cut_info:
prog_args.append("-ss")
prog_args.append(str(video_cut_info['ss']))
if 't' in video_cut_info:
prog_args.append("-t")
prog_args.append(str(video_cut_info['t']))
prog_args.append("-i")
prog_args.append(str(video_file_path)) # Don't surrount with quotes ""
# Add the scaling instructions for the input video and give it a new output
# Force downscaling of aspect ratio and size to the minimal available
# the value of =1 is the same as ‘decrease’ => The output video dimensions will automatically be decreased if needed.
if args_burnsubs:
# More info on the subtitles filter http://ffmpeg.org/ffmpeg-filters.html#subtitles
filter_complex_scale.append("[{0}:v]scale={1}:force_original_aspect_ratio=1[vv{0}];[vv{0}]subtitles='{2}':force_style='FontName=Arial,Fontsize=24'[v{0}];".format(curr_video, args_videomaxsize, str(video_file_path).replace('\\', '/').replace(':', '\:'))) # Note the path must have forward slashes AND we must escape the colon!!
else:
filter_complex_scale.append("[{0}:v]scale={1}:force_original_aspect_ratio=1[v{0}];".format(curr_video, args_videomaxsize))
# Add concat filter with the video output from the scaling and audio index from the original video
filter_complex_concat.append("[v{0}]".format(curr_video))
if audio_is_enabled:
filter_complex_concat.append("[{0}:a]".format(curr_video))
curr_video += 1
# Add the final part of the concat filter
audio_track = ':a=1' if audio_is_enabled else ''
filter_complex_concat.append("concat=n={0}:v=1{1}".format(video_count, audio_track))
filter_complex_concat.append("[v]")
if audio_is_enabled:
filter_complex_concat.append("[a]")
# Join and add the filter complex to the args
# First the scaling then the concats
prog_args.append("-filter_complex")
prog_args.append("".join(filter_complex_scale) + "".join(filter_complex_concat)) # Don't surrount with quotes ""
# The mapping for the video and audio
prog_args.append("-map")
prog_args.append("[v]")
if audio_is_enabled:
prog_args.append("-map")
prog_args.append("[a]")
# Don't show copyright header
prog_args.append("-hide_banner")
# Don't show excess logging (only things that cause the exe to terminate)
prog_args.append("-loglevel")
prog_args.append("verbose")
# Force showing progress indicator text
prog_args.append("-stats")
# Overwrite any prompts with YES
prog_args.append("-y")
# Finally the output file
prog_args.append(str(path_out_file)) # Don't surrount with quotes ""
# Disable colour output from FFMPEG before we start
os.environ['AV_LOG_FORCE_NOCOLOR'] = "1"
# Run ffmpeg and wait for the output file to be created before returning
return _runSubProcess(prog_args, path_to_wait_on=path_out_file)
#
# Calls mp4box to create the concatinated video file and includes the chapter file as well
def addChaptersToVideoFile(mp4box_path, path_video_file, path_chapters_file):
# Check to see if the video file exists before doing anything
if not path_video_file.exists():
raise ValueError("Video file {0} could not be found. No chapters were added.".format(path_video_file))
# Construct the args to mp4box
prog_args = [mp4box_path]
# Overwrite the default temporary folder to somewhere we
# know that the current user has write privileges
prog_args.append("-tmp")
prog_args.append("{0}".format(os.environ['TMP']))
# Add the chapter file
prog_args.append("-add")
prog_args.append(str(path_chapters_file)+":chap")
# Add the output file at the very end, we will add the
# chapter marks in-place
prog_args.append(str(path_video_file))
# Run the command
return _runSubProcess(prog_args)
#
# Splits an existing video file into requested chunks
def splitVideoFile(mp4box_path, path_video_file, max_out_size_kb):
# Can't split something that doesn't exist
if not path_video_file.exists():
raise ValueError("Video file {0} could not be found. Nothing was split.".format(path_video_file))
# Construct the args to mp4box
prog_args = [mp4box_path]
# Specify the maximum split size
prog_args.append("-splits")
prog_args.append(str(max_out_size_kb))
# Overwrite the default temporary folder to somewhere we
# know that the current user has write privileges
prog_args.append("-tmp")
prog_args.append("{0}".format(os.environ['TMP']))
# Add the input file we want to split
prog_args.append(str(path_video_file))
# Specify the same file again as an out parameter to use the same directory
prog_args.append("-out")
prog_args.append(str(path_video_file))
# Run the command
return _runSubProcess(prog_args)
# Runs a subprocess using the arguments passed and monitors its progress while printing out the latest
# log line to the console on a single line
def _runSubProcess(prog_args, path_to_wait_on=None):
print( " ".join(prog_args))
# Force a UTF8 environment for the subprocess so that files with non-ascii characters are read correctly
# for this to work we must not use the universal line endings parameter
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
retcode = None
# Run the app and collect the output
ret = subprocess.Popen(prog_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=my_env)
try:
longest_line = 0
trace_lines = []
while True:
try:
#line = ret.stdout.readline().decode('utf-8')
line = ret.stdout.readline()
if not line:
break
trace_lines.append(line)
line = line.strip()[:80] # Limit the max length of the line, otherwise it will screw up our console window
longest_line = max( longest_line, len(line))
sys.stdout.write('\r '+line.ljust(longest_line))
sys.stdout.flush()
except UnicodeDecodeError:
continue # Ignore all unicode errors, don't care!
# Ensure that the return code was ok before continuing
retcode = ret.poll()
while retcode is None:
retcode = ret.poll()
except KeyboardInterrupt:
ret.terminate()
raise
# Move the input to the beginning of the line again
# subsequent output text will look nicer :)
sys.stdout.write('\r '+"Done!".ljust(longest_line))
print()
if( retcode != 0 ):
print( "Error while executing {0}".format(prog_args[0]))
print(" Full arguments:")
print( " ".join(prog_args))
print( "Full error")
print("\n".join(trace_lines))
raise ValueError("Error {1} while executing {0}".format(prog_args[0], retcode))
# If we should wait on the creation of a particular file then do that now
total_wait_sec = 0
if not path_to_wait_on is None and not path_to_wait_on.is_dir():
while not path_to_wait_on.exists() or total_wait_sec < 5:
time.sleep(1)
total_wait_sec += 1
if not path_to_wait_on.exists() or not path_to_wait_on.is_file() :
raise ValueError("Expecting file {0} to be created but it wasn't, something went wrong!".format(str(path_to_wait_on)))
return retcode
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", help="The path and filename of the concatenated output file. If multiple files then the script will append a number to the filename.",
type=str)
parser.add_argument("-m","--match", help="A grep style match that should be used to detect files to concatinate.",
type=str)
parser.add_argument('--disk', help="When defined this defines the maximum file size to generate so that they will fit the required optical disk capacity. dvd4=4.7GB, dvd8=8.5GB, br25=25GB, xbox=4GB. If specified this overrides the -s/--size argument.",
choices=['dvd4', 'dvd8', 'br25', 'xbox'])
parser.add_argument("-s", "--size", help="Defines the maximum size of a single combined output file. Supports format ending such as 'MB' for megabytes, 'GB' for gigabytes. If nothing is specified then 'MB' is assumed. Overridden by the --disk argument if both are specified. Supports only numbers using dot (.) as decimal separator, e.g. '15.5GB'", type=str)
parser.add_argument("--gpac", help="Path to the GPAC install directory (not including the exe)",
type=str)
parser.add_argument("--ffmpeg", help="Path to the ffmpeg install directory (not including the exe)",
type=str)
parser.add_argument("--videosize", help="The desired maximum w/h size for the output video, default is 1024:576 (in case of multiple sizes for videos then all videos above this size are downsized to match) Aspect ratios will be downscaled as needed.",
default="1024:576",
type=str)
parser.add_argument("--overwrite", help="Existing files with the same name as the output will be silently overwritten.",
action="store_true")
parser.add_argument("--shuffle", help="Shuffles the list of episodes in a random fashion before combining. Useful to generate a random list of episodes to fill a DVD.",
action="store_true")
parser.add_argument("-c","--cuts", help="A CSV text file containing cut point information for the input files",
type=str)
parser.add_argument("--burnsubs", help="Burns any subtitles found in the video files into the video itself (necessary to preserve separate subtitle tracks)",
action="store_true")
parser.add_argument("-d", "--debug", help="Prints out extra debugging information while script is running",
action="store_true")
parser.add_argument("--noaudio", help="Explicitly disables audio tracks in the output video (useful for source videos that have no audio track)",
action="store_true")
return parser.parse_args()
# If the script file is called by itself then execute the main function
if __name__ == '__main__':
runMain() | file_infos.append(m4b_fileinfo) | conditional_block |
combine.py | #!/usr/bin/env python
# coding=utf-8
__version__ = "2.5.0"
# When modifying remember to issue a new tag command in git before committing, then push the new tag
# git tag -a v2.5.0 -m "v2.5.0"
# git push origin --tags
"""
Python script that generates the necessary mp4box -cat commands to concatinate multiple video files
together and generates a chapter file that marks the beginning of each concatenated file in the final result.
This script requires the GPAC package to be installed with the mp4box command line utility.
https://gpac.wp.mines-telecom.fr/downloads/
The script is written in Python 3.5
Details about Xbox video formats:
https://support.xbox.com/en-IE/xbox-360/console/audio-video-playback-faq
Make sure you install the requirements for this script:
pip install -r requirements.txt
See: https://github.com/sverrirs/mp4combine
Author: Sverrir Sigmundarson mp4combine@sverrirs.com https://www.sverrirs.com
"""
from colorama import init, deinit # For colorized output to console windows (platform and shell independent)
from constant import DISKSIZES, ABSSIZES, Colors # Constants for the script
import humanize # Display human readible values for sizes etc
import sys, os, time
from pathlib import Path # to check for file existence in the file system
import argparse # Command-line argument parser
import ntpath # Used to extract file name from path for all platforms http://stackoverflow.com/a/8384788
import glob # Used to do partial file path matching when listing file directories in search of files to concatinate http://stackoverflow.com/a/2225582/779521
import subprocess # To execute shell commands
import re # To perform substring matching on the output of mp4box and other subprocesses
from datetime import timedelta # To store the parsed duration of files and calculate the accumulated duration
from random import shuffle # To be able to shuffle the list of files if the user requests it
import csv # To use for the cutpoint files they are CSV files
#
# Provides natural string sorting (numbers inside strings are sorted in the correct order)
# http://stackoverflow.com/a/3033342/779521
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
#
# The main entry point for the script
def runMain():
try:
init() # Initialize the colorama library
# Compile the regular expressions
regex_mp4box_duration = re.compile(r"Computed Duration (?P<hrs>[0-9]{2}):(?P<min>[0-9]{2}):(?P<sec>[0-9]{2}).(?P<msec>[0-9]{3})", re.MULTILINE)
# Construct the argument parser for the commandline
args = parseArguments()
# The burnsubs and cuts cannot be used together, they will produce incorrect subtitles to be burned into the video
if( args.burnsubs == True and not args.cuts is None):
print(Colors.error("Options --burnsubs and --cuts cannot be used together as they would cause embedded subtitles to be incorrectly synced in the output video."))
sys.exit(100)
# Get the current working directory (place that the script is executing from)
working_dir = sys.path[0]
# Get the mp4box exec
mp4exec = findMp4Box(args.gpac, working_dir)
# Get ffmpeg exec
ffmpegexec = findffmpeg(args.ffmpeg, working_dir)
# Detect the maximum file size that should be generated in kilobytes, if <=0 then unlimited
max_out_size_kb = determineMaximumOutputfileSizeInKb(args.size, args.disk)
# Create the output file names both for the video file and the intermediate chapters file
path_out_file = Path(args.output)
path_chapters_file = path_out_file.with_suffix('.txt') # Just change the file-extension of the output file to TXT
# If the output files exist then either error or overwrite
if( path_out_file.exists() ):
if( args.overwrite ):
os.remove(str(path_out_file))
else:
print( "Output file '{0}' already exists. Use --overwrite switch to overwrite.".format(Colors.filename(path_out_file.name)))
sys.exit(0)
# Get all the input files
in_files = getFileNamesFromGrepMatch(args.match, path_out_file)
if( in_files is None ):
print( "No mp4 video files found matching '{0}'".format(args.match))
sys.exit(0)
file_infos = []
# Only process files that have file-ending .mp4 and not files that have the same name as the joined one
for in_file in in_files:
print("File: {0}".format(Colors.filename(in_file)))
m4b_fileinfo = parseMp4boxMediaInfo(in_file, mp4exec, regex_mp4box_duration)
if not m4b_fileinfo is None:
file_infos.append(m4b_fileinfo)
# If nothing was found then don't continue, this can happen if no mp4 files are found or if only the joined file is found
if( len(file_infos) <= 0 ):
print( "No mp4 video files found matching '{0}'".format(args.match))
sys.exit(0)
print("Found {0} files".format(len(file_infos)))
# If the user supplied a cut point information file then we parse it now
cuts = None
if( args.cuts ):
cuts = parseCutPointInformation(Path(args.cuts))
print(cuts)
if not cuts is None:
print("Read {0} cut point data from cut file".format(len(cuts)))
# If the user wants the list of files shuffled then do that now in place
if( args.shuffle ):
shuffle(file_infos)
print("File list shuffled")
# Now create the list of files to create
video_files = []
chapters = []
cumulative_dur = timedelta(seconds=0)
cumulative_size = 0
# Collect the file info data and chapter points for all files
for file_info in file_infos:
file_name = Path(file_info['file']).name
video_files.append(file_info['file'])
file_info_dur = file_info['dur']
# Do we have a proposed cut duration, if so then we must use this info
# to correct the chapter locations
if not cuts is None and file_name in cuts and 't' in cuts[file_name]:
file_info_dur = timedelta(seconds=cuts[file_name]['t'])
chapters.append({"name": Path(file_info['file']).stem, "timecode":formatTimedelta(cumulative_dur)})
cumulative_dur += file_info_dur # Count the cumulative duration
cumulative_size += file_info['size']
createCombinedVideoFile(video_files, chapters, cumulative_dur, cumulative_size, mp4exec, ffmpegexec, path_out_file, path_chapters_file, args.overwrite, cuts, args.videosize, args.burnsubs, max_out_size_kb, args.noaudio )
print(Colors.success("Script completed successfully, bye!"))
finally:
deinit() #Deinitialize the colorama library
# Reads and parses cut information for the input files
def parseCutPointInformation(path_to_cuts_file):
cuts = {}
with open(str(path_to_cuts_file), encoding='utf-8') as csvfile:
row_reader = csv.reader(csvfile)
for row in row_reader:
if( len(row) < 2 ):
continue
filename = row[0].strip()
# Create a new entry for the file
cuts[filename] = {}
starttime = row[1].strip()
cuts[filename]['ss'] = starttime
startpoint = sum(x * int(t) for x, t in zip([1, 60, 3600], reversed(starttime.split(":"))))
if len(row) > 2:
timecode = row[2].strip()
fullduration = sum(x * int(t) for x, t in zip([1, 60, 3600], reversed(timecode.split(":"))))
cuts[filename]['t'] = fullduration - startpoint
if len(cuts) <= 0:
return None
return cuts
#
# Creates a combined video file for a segment
def createCombinedVideoFile(video_files, chapters, cumulative_dur, cumulative_size, mp4exec, ffmpegexec, path_out_file, path_chapters_file, args_overwrite, cuts, args_videomaxsize, args_burnsubs, max_out_size_kb=0, args_noaudio=False ):
print( "Output: {0}".format(Colors.fileout(str(path_out_file))))
# Add the final chapter as the end for this segment
chapters.append({"name": "End", "timecode":formatTimedelta(cumulative_dur)})
# Chapters should be +1 more than files as we have an extra chapter ending at the very end of the file
print("{0} chapters, {1} running time, {2} total size".format( len(chapters), formatTimedelta(cumulative_dur), humanize.naturalsize(cumulative_size, gnu=True)))
# Write the chapters file to out
saveChaptersFile(chapters, path_chapters_file)
# Re-encode and combine the video files first
print(Colors.toolpath("Combining and re-encoding video files (ffmpeg), this will take a while..."))
reencodeAndCombineVideoFiles(ffmpegexec, video_files, path_out_file, args_videomaxsize, cuts, args_burnsubs, args_noaudio)
# Now create the combined file and include the chapter marks
print(Colors.toolpath("Adding chapters to combined video file (mp4box)"))
addChaptersToVideoFile(mp4exec, path_out_file, path_chapters_file)
# Delete the chapters file
os.remove(str(path_chapters_file))
# Read the created file to learn its final filesize
size_out_file_kb = os.path.getsize(str(path_out_file)) / 1024
print( Colors.toolpath("Final size of video file is: {0}".format(humanize.naturalsize(size_out_file_kb * 1024))))
# Now split the file if requested
if max_out_size_kb > 0 and size_out_file_kb > max_out_size_kb :
print( Colors.toolpath("Size limit exceeded, splitting video into files of max size: {0}".format(humanize.naturalsize(max_out_size_kb * 1000))))
splitVideoFile(mp4exec, path_out_file, max_out_size_kb)
#
# Attempts to detect the requested size of the output file based on the input parameters
# the absolute_size is overridden by disk_capacity if both are specified
# Returns KB (kilobytes)
def determineMaximumOutputfileSizeInKb(absolute_size, disk_capacity):
if( disk_capacity and disk_capacity in DISKSIZES ):
dsk_cap = DISKSIZES[disk_capacity]
#print( "Disk Capacity: {0}".format(dsk_cap))
return dsk_cap
elif( absolute_size):
#print( "Absolute size: "+absolute_size)
# First remove all spaces from the size string and convert to uppercase, remove all commas from the string
# now attempt to parse the sizes
abs_size = "".join("".join(absolute_size.split(' ')).split(',')).upper()
regex_size_parse = re.compile(r"^(?P<size>[0-9]*(?:\.[0-9]*)?)\s*(?P<unit>GB|MB|KB|B|TB)?$", re.MULTILINE)
match = regex_size_parse.search( absolute_size )
size = float(match.group("size"))
unit = match.group("unit")
#print( "Absolute value: {0}, unit: {1} ".format(size, unit))
if( not unit or not unit in ABSSIZES ):
unit = "MB" # Default is megabytes if nothing is specified
unit_multiplier = ABSSIZES[unit]
total_size = size * unit_multiplier
#print( "Absolute total: {0}, mult: {1} ".format(total_size, unit_multiplier))
return total_size / 1000 # Return kilobytes but in the metric system sense not the "1024 byte sense"
else:
# If nothing is specified then the default return is to use unbounded
return -1
#
# Executes the mp4box app with the -info switch and
# extracts the track length and file size from the output
def parseMp4boxMediaInfo(file_name, mp4box_path, regex_mp4box_duration):
# Get the size of the file in bytes
statinfo = os.stat(file_name)
file_size = statinfo.st_size #Size in bytes of a plain file
# Run the app and collect the output
proc_cmd = [mp4box_path, "-info", "-std", file_name]
ret = subprocess.run(proc_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
# Ensure that the return code was ok before continuing
if ret.returncode != 0: | print("Command {0} returned non-zero exit status {1}.".format(proc_cmd, ret.returncode))
print("File {0} will be skipped".format(file_name))
return None
#ret.check_returncode()
# Computed Duration 00:23:06.040 - Indicated Duration 00:23:06.040
match = regex_mp4box_duration.search( ret.stdout )
hrs = int(match.group("hrs"))
min = int(match.group("min"))
sec = int(match.group("sec"))
msec = int(match.group("msec"))
duration = timedelta(days=0, hours=hrs, minutes=min, seconds=sec, milliseconds=msec )
return {'file':file_name, 'size':file_size, 'dur':duration }
#
# Locates the mp4box executable and returns a full path to it
def findMp4Box(path_to_gpac_install=None, working_dir=None):
if( not path_to_gpac_install is None and os.path.isfile(os.path.join(path_to_gpac_install, "mp4box.exe")) ):
return os.path.join(path_to_gpac_install, "mp4box.exe")
# Attempts to search for it under the bin folder
bin_dist = os.path.join(working_dir, "..\\bin\\GPAC\\mp4box.exe")
if( os.path.isfile(bin_dist)):
return str(Path(bin_dist).resolve())
# Attempts to search for it under C:\Program Files\GPAC
if( os.path.isfile("C:\\Program Files\\GPAC\\mp4box.exe")):
return "C:\\Program Files\\GPAC\\mp4box.exe"
# For 32 bit installs
if( os.path.isfile("C:\\Program Files\\GPAC\\mp4box.exe")):
return "C:\\Program Files (x86)\\GPAC\\mp4box.exe"
# Throw an error
raise ValueError('Could not locate GPAC install, please use the --gpac switch to specify the path to the mp4box.exe file on your system.')
#
# Locates the ffmpeg executable and returns a full path to it
def findffmpeg(path_to_ffmpeg_install=None, working_dir=None):
if( not path_to_ffmpeg_install is None and os.path.isfile(os.path.join(path_to_ffmpeg_install, "ffmpeg.exe")) ):
return os.path.join(path_to_ffmpeg_install, "ffmpeg.exe")
# Attempts to search for it under the bin folder
bin_dist = os.path.join(working_dir, "..\\bin\\ff\\ffmpeg.exe")
if( os.path.isfile(bin_dist)):
return str(Path(bin_dist).resolve())
# Throw an error
raise ValueError('Could not locate FFMPEG install, please use the --ffmpeg switch to specify the path to the ffmpeg.exe file on your system.')
#
# Returns an array of files matching the grep string passed in
def getFileNamesFromGrepMatch(grep_match, path_out_file):
in_files = glob.glob(grep_match.replace("\\", "/"))
return [f for f in sorted(in_files, key=natural_key) if '.mp4' in Path(f).suffix and not Path(f) == path_out_file]
#
# Cleans any invalid file name and file path characters from the given filename
def sanitizeFileName(local_filename, sep=" "):
#These are symbols that are not "kosher" on a NTFS filesystem.
local_filename = re.sub(r"[\"/:<>|?*\n\r\t\x00]", sep, local_filename)
return local_filename
#
# Creates a nice format of a datetime.timedelta structure, including milliseconds
def formatTimedelta(time_delta):
timecode_s = time_delta.seconds
timecode_ms = int(time_delta.microseconds / 1000)
return '{:02}:{:02}:{:02}.{:03}'.format(timecode_s // 3600, timecode_s % 3600 // 60, timecode_s % 60, timecode_ms)
#
# Saves a list of chapter information to a chapter file in the common chapter syntax
def saveChaptersFile( chapters, path_chapters_file):
# Make sure that the directory exists and then write the full list of pids to it
if not path_chapters_file.parent.exists():
path_chapters_file.parent.mkdir(parents=True, exist_ok=True)
# If the chapters file is already there, delete it
if os.path.exists(str(path_chapters_file)):
os.remove(str(path_chapters_file))
# Writing the common CHAPTER syntax
# Common syntax : CHAPTERX=h:m:s[:ms or .ms] on one line and CHAPTERXNAME=name on the other – the order is not important but chapter lines MUST be declared sequencially (same X value expected for 2 consecutive lines).
chapter_idx = 1
with path_chapters_file.open(mode='w+', encoding='utf-8') as theFile:
for chapter in chapters:
theFile.write("CHAPTER{0}={1}\n".format(chapter_idx, chapter['timecode']))
theFile.write("CHAPTER{0}NAME=\"{1}\"\n".format(chapter_idx, chapter['name']))
chapter_idx += 1
#
# Executes FFMPEG for all video files to be joined and reencodes
def reencodeAndCombineVideoFiles(ffmpeg_path, video_files, path_out_file, args_videomaxsize, cuts, args_burnsubs, args_noaudio ):
# Construct the args to ffmpeg
# See https://stackoverflow.com/a/26366762/779521
prog_args = [ffmpeg_path]
# How many video files
video_count = len(video_files)
# Is sound enabled
audio_is_enabled = True if not args_noaudio is True else False
# The filter complex configuration
filter_complex_concat = []
filter_complex_scale = []
curr_video = 0
# -filter_complex
# "[0:v]scale=1024:576:force_original_aspect_ratio=1[v0];
# [1:v]scale=1024:576:force_original_aspect_ratio=1[v1];
# [v0][0:a][v1][1:a]concat=n=2:v=1:a=1[v][a]"
# For every input construct their filter complex to be added later
# Force scaling of videos first
for video_file in video_files:
video_file_path = Path(video_file)
# Attempt to find a cut point for this video if there are cut points defined
#-ss 00:33 -t 30
if not cuts is None and video_file_path.name in cuts:
video_cut_info = cuts[video_file_path.name]
if 'ss' in video_cut_info:
prog_args.append("-ss")
prog_args.append(str(video_cut_info['ss']))
if 't' in video_cut_info:
prog_args.append("-t")
prog_args.append(str(video_cut_info['t']))
prog_args.append("-i")
prog_args.append(str(video_file_path)) # Don't surrount with quotes ""
# Add the scaling instructions for the input video and give it a new output
# Force downscaling of aspect ratio and size to the minimal available
# the value of =1 is the same as ‘decrease’ => The output video dimensions will automatically be decreased if needed.
if args_burnsubs:
# More info on the subtitles filter http://ffmpeg.org/ffmpeg-filters.html#subtitles
filter_complex_scale.append("[{0}:v]scale={1}:force_original_aspect_ratio=1[vv{0}];[vv{0}]subtitles='{2}':force_style='FontName=Arial,Fontsize=24'[v{0}];".format(curr_video, args_videomaxsize, str(video_file_path).replace('\\', '/').replace(':', '\:'))) # Note the path must have forward slashes AND we must escape the colon!!
else:
filter_complex_scale.append("[{0}:v]scale={1}:force_original_aspect_ratio=1[v{0}];".format(curr_video, args_videomaxsize))
# Add concat filter with the video output from the scaling and audio index from the original video
filter_complex_concat.append("[v{0}]".format(curr_video))
if audio_is_enabled:
filter_complex_concat.append("[{0}:a]".format(curr_video))
curr_video += 1
# Add the final part of the concat filter
audio_track = ':a=1' if audio_is_enabled else ''
filter_complex_concat.append("concat=n={0}:v=1{1}".format(video_count, audio_track))
filter_complex_concat.append("[v]")
if audio_is_enabled:
filter_complex_concat.append("[a]")
# Join and add the filter complex to the args
# First the scaling then the concats
prog_args.append("-filter_complex")
prog_args.append("".join(filter_complex_scale) + "".join(filter_complex_concat)) # Don't surrount with quotes ""
# The mapping for the video and audio
prog_args.append("-map")
prog_args.append("[v]")
if audio_is_enabled:
prog_args.append("-map")
prog_args.append("[a]")
# Don't show copyright header
prog_args.append("-hide_banner")
# Don't show excess logging (only things that cause the exe to terminate)
prog_args.append("-loglevel")
prog_args.append("verbose")
# Force showing progress indicator text
prog_args.append("-stats")
# Overwrite any prompts with YES
prog_args.append("-y")
# Finally the output file
prog_args.append(str(path_out_file)) # Don't surrount with quotes ""
# Disable colour output from FFMPEG before we start
os.environ['AV_LOG_FORCE_NOCOLOR'] = "1"
# Run ffmpeg and wait for the output file to be created before returning
return _runSubProcess(prog_args, path_to_wait_on=path_out_file)
#
# Calls mp4box to create the concatinated video file and includes the chapter file as well
def addChaptersToVideoFile(mp4box_path, path_video_file, path_chapters_file):
# Check to see if the video file exists before doing anything
if not path_video_file.exists():
raise ValueError("Video file {0} could not be found. No chapters were added.".format(path_video_file))
# Construct the args to mp4box
prog_args = [mp4box_path]
# Overwrite the default temporary folder to somewhere we
# know that the current user has write privileges
prog_args.append("-tmp")
prog_args.append("{0}".format(os.environ['TMP']))
# Add the chapter file
prog_args.append("-add")
prog_args.append(str(path_chapters_file)+":chap")
# Add the output file at the very end, we will add the
# chapter marks in-place
prog_args.append(str(path_video_file))
# Run the command
return _runSubProcess(prog_args)
#
# Splits an existing video file into requested chunks
def splitVideoFile(mp4box_path, path_video_file, max_out_size_kb):
# Can't split something that doesn't exist
if not path_video_file.exists():
raise ValueError("Video file {0} could not be found. Nothing was split.".format(path_video_file))
# Construct the args to mp4box
prog_args = [mp4box_path]
# Specify the maximum split size
prog_args.append("-splits")
prog_args.append(str(max_out_size_kb))
# Overwrite the default temporary folder to somewhere we
# know that the current user has write privileges
prog_args.append("-tmp")
prog_args.append("{0}".format(os.environ['TMP']))
# Add the input file we want to split
prog_args.append(str(path_video_file))
# Specify the same file again as an out parameter to use the same directory
prog_args.append("-out")
prog_args.append(str(path_video_file))
# Run the command
return _runSubProcess(prog_args)
# Runs a subprocess using the arguments passed and monitors its progress while printing out the latest
# log line to the console on a single line
def _runSubProcess(prog_args, path_to_wait_on=None):
print( " ".join(prog_args))
# Force a UTF8 environment for the subprocess so that files with non-ascii characters are read correctly
# for this to work we must not use the universal line endings parameter
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
retcode = None
# Run the app and collect the output
ret = subprocess.Popen(prog_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=my_env)
try:
longest_line = 0
trace_lines = []
while True:
try:
#line = ret.stdout.readline().decode('utf-8')
line = ret.stdout.readline()
if not line:
break
trace_lines.append(line)
line = line.strip()[:80] # Limit the max length of the line, otherwise it will screw up our console window
longest_line = max( longest_line, len(line))
sys.stdout.write('\r '+line.ljust(longest_line))
sys.stdout.flush()
except UnicodeDecodeError:
continue # Ignore all unicode errors, don't care!
# Ensure that the return code was ok before continuing
retcode = ret.poll()
while retcode is None:
retcode = ret.poll()
except KeyboardInterrupt:
ret.terminate()
raise
# Move the input to the beginning of the line again
# subsequent output text will look nicer :)
sys.stdout.write('\r '+"Done!".ljust(longest_line))
print()
if( retcode != 0 ):
print( "Error while executing {0}".format(prog_args[0]))
print(" Full arguments:")
print( " ".join(prog_args))
print( "Full error")
print("\n".join(trace_lines))
raise ValueError("Error {1} while executing {0}".format(prog_args[0], retcode))
# If we should wait on the creation of a particular file then do that now
total_wait_sec = 0
if not path_to_wait_on is None and not path_to_wait_on.is_dir():
while not path_to_wait_on.exists() or total_wait_sec < 5:
time.sleep(1)
total_wait_sec += 1
if not path_to_wait_on.exists() or not path_to_wait_on.is_file() :
raise ValueError("Expecting file {0} to be created but it wasn't, something went wrong!".format(str(path_to_wait_on)))
return retcode
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", help="The path and filename of the concatenated output file. If multiple files then the script will append a number to the filename.",
type=str)
parser.add_argument("-m","--match", help="A grep style match that should be used to detect files to concatinate.",
type=str)
parser.add_argument('--disk', help="When defined this defines the maximum file size to generate so that they will fit the required optical disk capacity. dvd4=4.7GB, dvd8=8.5GB, br25=25GB, xbox=4GB. If specified this overrides the -s/--size argument.",
choices=['dvd4', 'dvd8', 'br25', 'xbox'])
parser.add_argument("-s", "--size", help="Defines the maximum size of a single combined output file. Supports format ending such as 'MB' for megabytes, 'GB' for gigabytes. If nothing is specified then 'MB' is assumed. Overridden by the --disk argument if both are specified. Supports only numbers using dot (.) as decimal separator, e.g. '15.5GB'", type=str)
parser.add_argument("--gpac", help="Path to the GPAC install directory (not including the exe)",
type=str)
parser.add_argument("--ffmpeg", help="Path to the ffmpeg install directory (not including the exe)",
type=str)
parser.add_argument("--videosize", help="The desired maximum w/h size for the output video, default is 1024:576 (in case of multiple sizes for videos then all videos above this size are downsized to match) Aspect ratios will be downscaled as needed.",
default="1024:576",
type=str)
parser.add_argument("--overwrite", help="Existing files with the same name as the output will be silently overwritten.",
action="store_true")
parser.add_argument("--shuffle", help="Shuffles the list of episodes in a random fashion before combining. Useful to generate a random list of episodes to fill a DVD.",
action="store_true")
parser.add_argument("-c","--cuts", help="A CSV text file containing cut point information for the input files",
type=str)
parser.add_argument("--burnsubs", help="Burns any subtitles found in the video files into the video itself (necessary to preserve separate subtitle tracks)",
action="store_true")
parser.add_argument("-d", "--debug", help="Prints out extra debugging information while script is running",
action="store_true")
parser.add_argument("--noaudio", help="Explicitly disables audio tracks in the output video (useful for source videos that have no audio track)",
action="store_true")
return parser.parse_args()
# If the script file is called by itself then execute the main function
if __name__ == '__main__':
runMain() | random_line_split | |
combine.py | #!/usr/bin/env python
# coding=utf-8
__version__ = "2.5.0"
# When modifying remember to issue a new tag command in git before committing, then push the new tag
# git tag -a v2.5.0 -m "v2.5.0"
# git push origin --tags
"""
Python script that generates the necessary mp4box -cat commands to concatinate multiple video files
together and generates a chapter file that marks the beginning of each concatenated file in the final result.
This script requires the GPAC package to be installed with the mp4box command line utility.
https://gpac.wp.mines-telecom.fr/downloads/
The script is written in Python 3.5
Details about Xbox video formats:
https://support.xbox.com/en-IE/xbox-360/console/audio-video-playback-faq
Make sure you install the requirements for this script:
pip install -r requirements.txt
See: https://github.com/sverrirs/mp4combine
Author: Sverrir Sigmundarson mp4combine@sverrirs.com https://www.sverrirs.com
"""
from colorama import init, deinit # For colorized output to console windows (platform and shell independent)
from constant import DISKSIZES, ABSSIZES, Colors # Constants for the script
import humanize # Display human readible values for sizes etc
import sys, os, time
from pathlib import Path # to check for file existence in the file system
import argparse # Command-line argument parser
import ntpath # Used to extract file name from path for all platforms http://stackoverflow.com/a/8384788
import glob # Used to do partial file path matching when listing file directories in search of files to concatinate http://stackoverflow.com/a/2225582/779521
import subprocess # To execute shell commands
import re # To perform substring matching on the output of mp4box and other subprocesses
from datetime import timedelta # To store the parsed duration of files and calculate the accumulated duration
from random import shuffle # To be able to shuffle the list of files if the user requests it
import csv # To use for the cutpoint files they are CSV files
#
# Provides natural string sorting (numbers inside strings are sorted in the correct order)
# http://stackoverflow.com/a/3033342/779521
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
#
# The main entry point for the script
def runMain():
try:
init() # Initialize the colorama library
# Compile the regular expressions
regex_mp4box_duration = re.compile(r"Computed Duration (?P<hrs>[0-9]{2}):(?P<min>[0-9]{2}):(?P<sec>[0-9]{2}).(?P<msec>[0-9]{3})", re.MULTILINE)
# Construct the argument parser for the commandline
args = parseArguments()
# The burnsubs and cuts cannot be used together, they will produce incorrect subtitles to be burned into the video
if( args.burnsubs == True and not args.cuts is None):
print(Colors.error("Options --burnsubs and --cuts cannot be used together as they would cause embedded subtitles to be incorrectly synced in the output video."))
sys.exit(100)
# Get the current working directory (place that the script is executing from)
working_dir = sys.path[0]
# Get the mp4box exec
mp4exec = findMp4Box(args.gpac, working_dir)
# Get ffmpeg exec
ffmpegexec = findffmpeg(args.ffmpeg, working_dir)
# Detect the maximum file size that should be generated in kilobytes, if <=0 then unlimited
max_out_size_kb = determineMaximumOutputfileSizeInKb(args.size, args.disk)
# Create the output file names both for the video file and the intermediate chapters file
path_out_file = Path(args.output)
path_chapters_file = path_out_file.with_suffix('.txt') # Just change the file-extension of the output file to TXT
# If the output files exist then either error or overwrite
if( path_out_file.exists() ):
if( args.overwrite ):
os.remove(str(path_out_file))
else:
print( "Output file '{0}' already exists. Use --overwrite switch to overwrite.".format(Colors.filename(path_out_file.name)))
sys.exit(0)
# Get all the input files
in_files = getFileNamesFromGrepMatch(args.match, path_out_file)
if( in_files is None ):
print( "No mp4 video files found matching '{0}'".format(args.match))
sys.exit(0)
file_infos = []
# Only process files that have file-ending .mp4 and not files that have the same name as the joined one
for in_file in in_files:
print("File: {0}".format(Colors.filename(in_file)))
m4b_fileinfo = parseMp4boxMediaInfo(in_file, mp4exec, regex_mp4box_duration)
if not m4b_fileinfo is None:
file_infos.append(m4b_fileinfo)
# If nothing was found then don't continue, this can happen if no mp4 files are found or if only the joined file is found
if( len(file_infos) <= 0 ):
print( "No mp4 video files found matching '{0}'".format(args.match))
sys.exit(0)
print("Found {0} files".format(len(file_infos)))
# If the user supplied a cut point information file then we parse it now
cuts = None
if( args.cuts ):
cuts = parseCutPointInformation(Path(args.cuts))
print(cuts)
if not cuts is None:
print("Read {0} cut point data from cut file".format(len(cuts)))
# If the user wants the list of files shuffled then do that now in place
if( args.shuffle ):
shuffle(file_infos)
print("File list shuffled")
# Now create the list of files to create
video_files = []
chapters = []
cumulative_dur = timedelta(seconds=0)
cumulative_size = 0
# Collect the file info data and chapter points for all files
for file_info in file_infos:
file_name = Path(file_info['file']).name
video_files.append(file_info['file'])
file_info_dur = file_info['dur']
# Do we have a proposed cut duration, if so then we must use this info
# to correct the chapter locations
if not cuts is None and file_name in cuts and 't' in cuts[file_name]:
file_info_dur = timedelta(seconds=cuts[file_name]['t'])
chapters.append({"name": Path(file_info['file']).stem, "timecode":formatTimedelta(cumulative_dur)})
cumulative_dur += file_info_dur # Count the cumulative duration
cumulative_size += file_info['size']
createCombinedVideoFile(video_files, chapters, cumulative_dur, cumulative_size, mp4exec, ffmpegexec, path_out_file, path_chapters_file, args.overwrite, cuts, args.videosize, args.burnsubs, max_out_size_kb, args.noaudio )
print(Colors.success("Script completed successfully, bye!"))
finally:
deinit() #Deinitialize the colorama library
# Reads and parses cut information for the input files
def parseCutPointInformation(path_to_cuts_file):
cuts = {}
with open(str(path_to_cuts_file), encoding='utf-8') as csvfile:
row_reader = csv.reader(csvfile)
for row in row_reader:
if( len(row) < 2 ):
continue
filename = row[0].strip()
# Create a new entry for the file
cuts[filename] = {}
starttime = row[1].strip()
cuts[filename]['ss'] = starttime
startpoint = sum(x * int(t) for x, t in zip([1, 60, 3600], reversed(starttime.split(":"))))
if len(row) > 2:
timecode = row[2].strip()
fullduration = sum(x * int(t) for x, t in zip([1, 60, 3600], reversed(timecode.split(":"))))
cuts[filename]['t'] = fullduration - startpoint
if len(cuts) <= 0:
return None
return cuts
#
# Creates a combined video file for a segment
def createCombinedVideoFile(video_files, chapters, cumulative_dur, cumulative_size, mp4exec, ffmpegexec, path_out_file, path_chapters_file, args_overwrite, cuts, args_videomaxsize, args_burnsubs, max_out_size_kb=0, args_noaudio=False ):
print( "Output: {0}".format(Colors.fileout(str(path_out_file))))
# Add the final chapter as the end for this segment
chapters.append({"name": "End", "timecode":formatTimedelta(cumulative_dur)})
# Chapters should be +1 more than files as we have an extra chapter ending at the very end of the file
print("{0} chapters, {1} running time, {2} total size".format( len(chapters), formatTimedelta(cumulative_dur), humanize.naturalsize(cumulative_size, gnu=True)))
# Write the chapters file to out
saveChaptersFile(chapters, path_chapters_file)
# Re-encode and combine the video files first
print(Colors.toolpath("Combining and re-encoding video files (ffmpeg), this will take a while..."))
reencodeAndCombineVideoFiles(ffmpegexec, video_files, path_out_file, args_videomaxsize, cuts, args_burnsubs, args_noaudio)
# Now create the combined file and include the chapter marks
print(Colors.toolpath("Adding chapters to combined video file (mp4box)"))
addChaptersToVideoFile(mp4exec, path_out_file, path_chapters_file)
# Delete the chapters file
os.remove(str(path_chapters_file))
# Read the created file to learn its final filesize
size_out_file_kb = os.path.getsize(str(path_out_file)) / 1024
print( Colors.toolpath("Final size of video file is: {0}".format(humanize.naturalsize(size_out_file_kb * 1024))))
# Now split the file if requested
if max_out_size_kb > 0 and size_out_file_kb > max_out_size_kb :
print( Colors.toolpath("Size limit exceeded, splitting video into files of max size: {0}".format(humanize.naturalsize(max_out_size_kb * 1000))))
splitVideoFile(mp4exec, path_out_file, max_out_size_kb)
#
# Attempts to detect the requested size of the output file based on the input parameters
# the absolute_size is overridden by disk_capacity if both are specified
# Returns KB (kilobytes)
def determineMaximumOutputfileSizeInKb(absolute_size, disk_capacity):
if( disk_capacity and disk_capacity in DISKSIZES ):
dsk_cap = DISKSIZES[disk_capacity]
#print( "Disk Capacity: {0}".format(dsk_cap))
return dsk_cap
elif( absolute_size):
#print( "Absolute size: "+absolute_size)
# First remove all spaces from the size string and convert to uppercase, remove all commas from the string
# now attempt to parse the sizes
abs_size = "".join("".join(absolute_size.split(' ')).split(',')).upper()
regex_size_parse = re.compile(r"^(?P<size>[0-9]*(?:\.[0-9]*)?)\s*(?P<unit>GB|MB|KB|B|TB)?$", re.MULTILINE)
match = regex_size_parse.search( absolute_size )
size = float(match.group("size"))
unit = match.group("unit")
#print( "Absolute value: {0}, unit: {1} ".format(size, unit))
if( not unit or not unit in ABSSIZES ):
unit = "MB" # Default is megabytes if nothing is specified
unit_multiplier = ABSSIZES[unit]
total_size = size * unit_multiplier
#print( "Absolute total: {0}, mult: {1} ".format(total_size, unit_multiplier))
return total_size / 1000 # Return kilobytes but in the metric system sense not the "1024 byte sense"
else:
# If nothing is specified then the default return is to use unbounded
return -1
#
# Executes the mp4box app with the -info switch and
# extracts the track length and file size from the output
def parseMp4boxMediaInfo(file_name, mp4box_path, regex_mp4box_duration):
# Get the size of the file in bytes
statinfo = os.stat(file_name)
file_size = statinfo.st_size #Size in bytes of a plain file
# Run the app and collect the output
proc_cmd = [mp4box_path, "-info", "-std", file_name]
ret = subprocess.run(proc_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
# Ensure that the return code was ok before continuing
if ret.returncode != 0:
print("Command {0} returned non-zero exit status {1}.".format(proc_cmd, ret.returncode))
print("File {0} will be skipped".format(file_name))
return None
#ret.check_returncode()
# Computed Duration 00:23:06.040 - Indicated Duration 00:23:06.040
match = regex_mp4box_duration.search( ret.stdout )
hrs = int(match.group("hrs"))
min = int(match.group("min"))
sec = int(match.group("sec"))
msec = int(match.group("msec"))
duration = timedelta(days=0, hours=hrs, minutes=min, seconds=sec, milliseconds=msec )
return {'file':file_name, 'size':file_size, 'dur':duration }
#
# Locates the mp4box executable and returns a full path to it
def findMp4Box(path_to_gpac_install=None, working_dir=None):
if( not path_to_gpac_install is None and os.path.isfile(os.path.join(path_to_gpac_install, "mp4box.exe")) ):
return os.path.join(path_to_gpac_install, "mp4box.exe")
# Attempts to search for it under the bin folder
bin_dist = os.path.join(working_dir, "..\\bin\\GPAC\\mp4box.exe")
if( os.path.isfile(bin_dist)):
return str(Path(bin_dist).resolve())
# Attempts to search for it under C:\Program Files\GPAC
if( os.path.isfile("C:\\Program Files\\GPAC\\mp4box.exe")):
return "C:\\Program Files\\GPAC\\mp4box.exe"
# For 32 bit installs
if( os.path.isfile("C:\\Program Files\\GPAC\\mp4box.exe")):
return "C:\\Program Files (x86)\\GPAC\\mp4box.exe"
# Throw an error
raise ValueError('Could not locate GPAC install, please use the --gpac switch to specify the path to the mp4box.exe file on your system.')
#
# Locates the ffmpeg executable and returns a full path to it
def findffmpeg(path_to_ffmpeg_install=None, working_dir=None):
if( not path_to_ffmpeg_install is None and os.path.isfile(os.path.join(path_to_ffmpeg_install, "ffmpeg.exe")) ):
return os.path.join(path_to_ffmpeg_install, "ffmpeg.exe")
# Attempts to search for it under the bin folder
bin_dist = os.path.join(working_dir, "..\\bin\\ff\\ffmpeg.exe")
if( os.path.isfile(bin_dist)):
return str(Path(bin_dist).resolve())
# Throw an error
raise ValueError('Could not locate FFMPEG install, please use the --ffmpeg switch to specify the path to the ffmpeg.exe file on your system.')
#
# Returns an array of files matching the grep string passed in
def getFileNamesFromGrepMatch(grep_match, path_out_file):
in_files = glob.glob(grep_match.replace("\\", "/"))
return [f for f in sorted(in_files, key=natural_key) if '.mp4' in Path(f).suffix and not Path(f) == path_out_file]
#
# Cleans any invalid file name and file path characters from the given filename
def sanitizeFileName(local_filename, sep=" "):
#These are symbols that are not "kosher" on a NTFS filesystem.
local_filename = re.sub(r"[\"/:<>|?*\n\r\t\x00]", sep, local_filename)
return local_filename
#
# Creates a nice format of a datetime.timedelta structure, including milliseconds
def formatTimedelta(time_delta):
timecode_s = time_delta.seconds
timecode_ms = int(time_delta.microseconds / 1000)
return '{:02}:{:02}:{:02}.{:03}'.format(timecode_s // 3600, timecode_s % 3600 // 60, timecode_s % 60, timecode_ms)
#
# Saves a list of chapter information to a chapter file in the common chapter syntax
def saveChaptersFile( chapters, path_chapters_file):
# Make sure that the directory exists and then write the full list of pids to it
if not path_chapters_file.parent.exists():
path_chapters_file.parent.mkdir(parents=True, exist_ok=True)
# If the chapters file is already there, delete it
if os.path.exists(str(path_chapters_file)):
os.remove(str(path_chapters_file))
# Writing the common CHAPTER syntax
# Common syntax : CHAPTERX=h:m:s[:ms or .ms] on one line and CHAPTERXNAME=name on the other – the order is not important but chapter lines MUST be declared sequencially (same X value expected for 2 consecutive lines).
chapter_idx = 1
with path_chapters_file.open(mode='w+', encoding='utf-8') as theFile:
for chapter in chapters:
theFile.write("CHAPTER{0}={1}\n".format(chapter_idx, chapter['timecode']))
theFile.write("CHAPTER{0}NAME=\"{1}\"\n".format(chapter_idx, chapter['name']))
chapter_idx += 1
#
# Executes FFMPEG for all video files to be joined and reencodes
def reencodeAndCombineVideoFiles(ffmpeg_path, video_files, path_out_file, args_videomaxsize, cuts, args_burnsubs, args_noaudio ):
# Construct the args to ffmpeg
# See https://stackoverflow.com/a/26366762/779521
prog_args = [ffmpeg_path]
# How many video files
video_count = len(video_files)
# Is sound enabled
audio_is_enabled = True if not args_noaudio is True else False
# The filter complex configuration
filter_complex_concat = []
filter_complex_scale = []
curr_video = 0
# -filter_complex
# "[0:v]scale=1024:576:force_original_aspect_ratio=1[v0];
# [1:v]scale=1024:576:force_original_aspect_ratio=1[v1];
# [v0][0:a][v1][1:a]concat=n=2:v=1:a=1[v][a]"
# For every input construct their filter complex to be added later
# Force scaling of videos first
for video_file in video_files:
video_file_path = Path(video_file)
# Attempt to find a cut point for this video if there are cut points defined
#-ss 00:33 -t 30
if not cuts is None and video_file_path.name in cuts:
video_cut_info = cuts[video_file_path.name]
if 'ss' in video_cut_info:
prog_args.append("-ss")
prog_args.append(str(video_cut_info['ss']))
if 't' in video_cut_info:
prog_args.append("-t")
prog_args.append(str(video_cut_info['t']))
prog_args.append("-i")
prog_args.append(str(video_file_path)) # Don't surrount with quotes ""
# Add the scaling instructions for the input video and give it a new output
# Force downscaling of aspect ratio and size to the minimal available
# the value of =1 is the same as ‘decrease’ => The output video dimensions will automatically be decreased if needed.
if args_burnsubs:
# More info on the subtitles filter http://ffmpeg.org/ffmpeg-filters.html#subtitles
filter_complex_scale.append("[{0}:v]scale={1}:force_original_aspect_ratio=1[vv{0}];[vv{0}]subtitles='{2}':force_style='FontName=Arial,Fontsize=24'[v{0}];".format(curr_video, args_videomaxsize, str(video_file_path).replace('\\', '/').replace(':', '\:'))) # Note the path must have forward slashes AND we must escape the colon!!
else:
filter_complex_scale.append("[{0}:v]scale={1}:force_original_aspect_ratio=1[v{0}];".format(curr_video, args_videomaxsize))
# Add concat filter with the video output from the scaling and audio index from the original video
filter_complex_concat.append("[v{0}]".format(curr_video))
if audio_is_enabled:
filter_complex_concat.append("[{0}:a]".format(curr_video))
curr_video += 1
# Add the final part of the concat filter
audio_track = ':a=1' if audio_is_enabled else ''
filter_complex_concat.append("concat=n={0}:v=1{1}".format(video_count, audio_track))
filter_complex_concat.append("[v]")
if audio_is_enabled:
filter_complex_concat.append("[a]")
# Join and add the filter complex to the args
# First the scaling then the concats
prog_args.append("-filter_complex")
prog_args.append("".join(filter_complex_scale) + "".join(filter_complex_concat)) # Don't surrount with quotes ""
# The mapping for the video and audio
prog_args.append("-map")
prog_args.append("[v]")
if audio_is_enabled:
prog_args.append("-map")
prog_args.append("[a]")
# Don't show copyright header
prog_args.append("-hide_banner")
# Don't show excess logging (only things that cause the exe to terminate)
prog_args.append("-loglevel")
prog_args.append("verbose")
# Force showing progress indicator text
prog_args.append("-stats")
# Overwrite any prompts with YES
prog_args.append("-y")
# Finally the output file
prog_args.append(str(path_out_file)) # Don't surrount with quotes ""
# Disable colour output from FFMPEG before we start
os.environ['AV_LOG_FORCE_NOCOLOR'] = "1"
# Run ffmpeg and wait for the output file to be created before returning
return _runSubProcess(prog_args, path_to_wait_on=path_out_file)
#
# Calls mp4box to create the concatinated video file and includes the chapter file as well
def addChaptersToVideoFile(mp4box_path, path_video_file, path_chapters_file):
# Check to see if the video file exists before doing anything
if not path_video_file.exists():
raise ValueError("Video file {0} could not be found. No chapters were added.".format(path_video_file))
# Construct the args to mp4box
prog_args = [mp4box_path]
# Overwrite the default temporary folder to somewhere we
# know that the current user has write privileges
prog_args.append("-tmp")
prog_args.append("{0}".format(os.environ['TMP']))
# Add the chapter file
prog_args.append("-add")
prog_args.append(str(path_chapters_file)+":chap")
# Add the output file at the very end, we will add the
# chapter marks in-place
prog_args.append(str(path_video_file))
# Run the command
return _runSubProcess(prog_args)
#
# Splits an existing video file into requested chunks
def splitVideoFile(mp4box_path, path_video_file, max_out_size_kb):
# Can't split something that doesn't exist
if not | uns a subprocess using the arguments passed and monitors its progress while printing out the latest
# log line to the console on a single line
def _runSubProcess(prog_args, path_to_wait_on=None):
print( " ".join(prog_args))
# Force a UTF8 environment for the subprocess so that files with non-ascii characters are read correctly
# for this to work we must not use the universal line endings parameter
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
retcode = None
# Run the app and collect the output
ret = subprocess.Popen(prog_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=my_env)
try:
longest_line = 0
trace_lines = []
while True:
try:
#line = ret.stdout.readline().decode('utf-8')
line = ret.stdout.readline()
if not line:
break
trace_lines.append(line)
line = line.strip()[:80] # Limit the max length of the line, otherwise it will screw up our console window
longest_line = max( longest_line, len(line))
sys.stdout.write('\r '+line.ljust(longest_line))
sys.stdout.flush()
except UnicodeDecodeError:
continue # Ignore all unicode errors, don't care!
# Ensure that the return code was ok before continuing
retcode = ret.poll()
while retcode is None:
retcode = ret.poll()
except KeyboardInterrupt:
ret.terminate()
raise
# Move the input to the beginning of the line again
# subsequent output text will look nicer :)
sys.stdout.write('\r '+"Done!".ljust(longest_line))
print()
if( retcode != 0 ):
print( "Error while executing {0}".format(prog_args[0]))
print(" Full arguments:")
print( " ".join(prog_args))
print( "Full error")
print("\n".join(trace_lines))
raise ValueError("Error {1} while executing {0}".format(prog_args[0], retcode))
# If we should wait on the creation of a particular file then do that now
total_wait_sec = 0
if not path_to_wait_on is None and not path_to_wait_on.is_dir():
while not path_to_wait_on.exists() or total_wait_sec < 5:
time.sleep(1)
total_wait_sec += 1
if not path_to_wait_on.exists() or not path_to_wait_on.is_file() :
raise ValueError("Expecting file {0} to be created but it wasn't, something went wrong!".format(str(path_to_wait_on)))
return retcode
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", help="The path and filename of the concatenated output file. If multiple files then the script will append a number to the filename.",
type=str)
parser.add_argument("-m","--match", help="A grep style match that should be used to detect files to concatinate.",
type=str)
parser.add_argument('--disk', help="When defined this defines the maximum file size to generate so that they will fit the required optical disk capacity. dvd4=4.7GB, dvd8=8.5GB, br25=25GB, xbox=4GB. If specified this overrides the -s/--size argument.",
choices=['dvd4', 'dvd8', 'br25', 'xbox'])
parser.add_argument("-s", "--size", help="Defines the maximum size of a single combined output file. Supports format ending such as 'MB' for megabytes, 'GB' for gigabytes. If nothing is specified then 'MB' is assumed. Overridden by the --disk argument if both are specified. Supports only numbers using dot (.) as decimal separator, e.g. '15.5GB'", type=str)
parser.add_argument("--gpac", help="Path to the GPAC install directory (not including the exe)",
type=str)
parser.add_argument("--ffmpeg", help="Path to the ffmpeg install directory (not including the exe)",
type=str)
parser.add_argument("--videosize", help="The desired maximum w/h size for the output video, default is 1024:576 (in case of multiple sizes for videos then all videos above this size are downsized to match) Aspect ratios will be downscaled as needed.",
default="1024:576",
type=str)
parser.add_argument("--overwrite", help="Existing files with the same name as the output will be silently overwritten.",
action="store_true")
parser.add_argument("--shuffle", help="Shuffles the list of episodes in a random fashion before combining. Useful to generate a random list of episodes to fill a DVD.",
action="store_true")
parser.add_argument("-c","--cuts", help="A CSV text file containing cut point information for the input files",
type=str)
parser.add_argument("--burnsubs", help="Burns any subtitles found in the video files into the video itself (necessary to preserve separate subtitle tracks)",
action="store_true")
parser.add_argument("-d", "--debug", help="Prints out extra debugging information while script is running",
action="store_true")
parser.add_argument("--noaudio", help="Explicitly disables audio tracks in the output video (useful for source videos that have no audio track)",
action="store_true")
return parser.parse_args()
# If the script file is called by itself then execute the main function
if __name__ == '__main__':
runMain() | path_video_file.exists():
raise ValueError("Video file {0} could not be found. Nothing was split.".format(path_video_file))
# Construct the args to mp4box
prog_args = [mp4box_path]
# Specify the maximum split size
prog_args.append("-splits")
prog_args.append(str(max_out_size_kb))
# Overwrite the default temporary folder to somewhere we
# know that the current user has write privileges
prog_args.append("-tmp")
prog_args.append("{0}".format(os.environ['TMP']))
# Add the input file we want to split
prog_args.append(str(path_video_file))
# Specify the same file again as an out parameter to use the same directory
prog_args.append("-out")
prog_args.append(str(path_video_file))
# Run the command
return _runSubProcess(prog_args)
# R | identifier_body |
combine.py | #!/usr/bin/env python
# coding=utf-8
__version__ = "2.5.0"
# When modifying remember to issue a new tag command in git before committing, then push the new tag
# git tag -a v2.5.0 -m "v2.5.0"
# git push origin --tags
"""
Python script that generates the necessary mp4box -cat commands to concatinate multiple video files
together and generates a chapter file that marks the beginning of each concatenated file in the final result.
This script requires the GPAC package to be installed with the mp4box command line utility.
https://gpac.wp.mines-telecom.fr/downloads/
The script is written in Python 3.5
Details about Xbox video formats:
https://support.xbox.com/en-IE/xbox-360/console/audio-video-playback-faq
Make sure you install the requirements for this script:
pip install -r requirements.txt
See: https://github.com/sverrirs/mp4combine
Author: Sverrir Sigmundarson mp4combine@sverrirs.com https://www.sverrirs.com
"""
from colorama import init, deinit # For colorized output to console windows (platform and shell independent)
from constant import DISKSIZES, ABSSIZES, Colors # Constants for the script
import humanize # Display human readible values for sizes etc
import sys, os, time
from pathlib import Path # to check for file existence in the file system
import argparse # Command-line argument parser
import ntpath # Used to extract file name from path for all platforms http://stackoverflow.com/a/8384788
import glob # Used to do partial file path matching when listing file directories in search of files to concatinate http://stackoverflow.com/a/2225582/779521
import subprocess # To execute shell commands
import re # To perform substring matching on the output of mp4box and other subprocesses
from datetime import timedelta # To store the parsed duration of files and calculate the accumulated duration
from random import shuffle # To be able to shuffle the list of files if the user requests it
import csv # To use for the cutpoint files they are CSV files
#
# Provides natural string sorting (numbers inside strings are sorted in the correct order)
# http://stackoverflow.com/a/3033342/779521
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
#
# The main entry point for the script
def runMain():
try:
init() # Initialize the colorama library
# Compile the regular expressions
regex_mp4box_duration = re.compile(r"Computed Duration (?P<hrs>[0-9]{2}):(?P<min>[0-9]{2}):(?P<sec>[0-9]{2}).(?P<msec>[0-9]{3})", re.MULTILINE)
# Construct the argument parser for the commandline
args = parseArguments()
# The burnsubs and cuts cannot be used together, they will produce incorrect subtitles to be burned into the video
if( args.burnsubs == True and not args.cuts is None):
print(Colors.error("Options --burnsubs and --cuts cannot be used together as they would cause embedded subtitles to be incorrectly synced in the output video."))
sys.exit(100)
# Get the current working directory (place that the script is executing from)
working_dir = sys.path[0]
# Get the mp4box exec
mp4exec = findMp4Box(args.gpac, working_dir)
# Get ffmpeg exec
ffmpegexec = findffmpeg(args.ffmpeg, working_dir)
# Detect the maximum file size that should be generated in kilobytes, if <=0 then unlimited
max_out_size_kb = determineMaximumOutputfileSizeInKb(args.size, args.disk)
# Create the output file names both for the video file and the intermediate chapters file
path_out_file = Path(args.output)
path_chapters_file = path_out_file.with_suffix('.txt') # Just change the file-extension of the output file to TXT
# If the output files exist then either error or overwrite
if( path_out_file.exists() ):
if( args.overwrite ):
os.remove(str(path_out_file))
else:
print( "Output file '{0}' already exists. Use --overwrite switch to overwrite.".format(Colors.filename(path_out_file.name)))
sys.exit(0)
# Get all the input files
in_files = getFileNamesFromGrepMatch(args.match, path_out_file)
if( in_files is None ):
print( "No mp4 video files found matching '{0}'".format(args.match))
sys.exit(0)
file_infos = []
# Only process files that have file-ending .mp4 and not files that have the same name as the joined one
for in_file in in_files:
print("File: {0}".format(Colors.filename(in_file)))
m4b_fileinfo = parseMp4boxMediaInfo(in_file, mp4exec, regex_mp4box_duration)
if not m4b_fileinfo is None:
file_infos.append(m4b_fileinfo)
# If nothing was found then don't continue, this can happen if no mp4 files are found or if only the joined file is found
if( len(file_infos) <= 0 ):
print( "No mp4 video files found matching '{0}'".format(args.match))
sys.exit(0)
print("Found {0} files".format(len(file_infos)))
# If the user supplied a cut point information file then we parse it now
cuts = None
if( args.cuts ):
cuts = parseCutPointInformation(Path(args.cuts))
print(cuts)
if not cuts is None:
print("Read {0} cut point data from cut file".format(len(cuts)))
# If the user wants the list of files shuffled then do that now in place
if( args.shuffle ):
shuffle(file_infos)
print("File list shuffled")
# Now create the list of files to create
video_files = []
chapters = []
cumulative_dur = timedelta(seconds=0)
cumulative_size = 0
# Collect the file info data and chapter points for all files
for file_info in file_infos:
file_name = Path(file_info['file']).name
video_files.append(file_info['file'])
file_info_dur = file_info['dur']
# Do we have a proposed cut duration, if so then we must use this info
# to correct the chapter locations
if not cuts is None and file_name in cuts and 't' in cuts[file_name]:
file_info_dur = timedelta(seconds=cuts[file_name]['t'])
chapters.append({"name": Path(file_info['file']).stem, "timecode":formatTimedelta(cumulative_dur)})
cumulative_dur += file_info_dur # Count the cumulative duration
cumulative_size += file_info['size']
createCombinedVideoFile(video_files, chapters, cumulative_dur, cumulative_size, mp4exec, ffmpegexec, path_out_file, path_chapters_file, args.overwrite, cuts, args.videosize, args.burnsubs, max_out_size_kb, args.noaudio )
print(Colors.success("Script completed successfully, bye!"))
finally:
deinit() #Deinitialize the colorama library
# Reads and parses cut information for the input files
def parseCutPointInformation(path_to_cuts_file):
cuts = {}
with open(str(path_to_cuts_file), encoding='utf-8') as csvfile:
row_reader = csv.reader(csvfile)
for row in row_reader:
if( len(row) < 2 ):
continue
filename = row[0].strip()
# Create a new entry for the file
cuts[filename] = {}
starttime = row[1].strip()
cuts[filename]['ss'] = starttime
startpoint = sum(x * int(t) for x, t in zip([1, 60, 3600], reversed(starttime.split(":"))))
if len(row) > 2:
timecode = row[2].strip()
fullduration = sum(x * int(t) for x, t in zip([1, 60, 3600], reversed(timecode.split(":"))))
cuts[filename]['t'] = fullduration - startpoint
if len(cuts) <= 0:
return None
return cuts
#
# Creates a combined video file for a segment
def createCombinedVideoFile(video_files, chapters, cumulative_dur, cumulative_size, mp4exec, ffmpegexec, path_out_file, path_chapters_file, args_overwrite, cuts, args_videomaxsize, args_burnsubs, max_out_size_kb=0, args_noaudio=False ):
print( "Output: {0}".format(Colors.fileout(str(path_out_file))))
# Add the final chapter as the end for this segment
chapters.append({"name": "End", "timecode":formatTimedelta(cumulative_dur)})
# Chapters should be +1 more than files as we have an extra chapter ending at the very end of the file
print("{0} chapters, {1} running time, {2} total size".format( len(chapters), formatTimedelta(cumulative_dur), humanize.naturalsize(cumulative_size, gnu=True)))
# Write the chapters file to out
saveChaptersFile(chapters, path_chapters_file)
# Re-encode and combine the video files first
print(Colors.toolpath("Combining and re-encoding video files (ffmpeg), this will take a while..."))
reencodeAndCombineVideoFiles(ffmpegexec, video_files, path_out_file, args_videomaxsize, cuts, args_burnsubs, args_noaudio)
# Now create the combined file and include the chapter marks
print(Colors.toolpath("Adding chapters to combined video file (mp4box)"))
addChaptersToVideoFile(mp4exec, path_out_file, path_chapters_file)
# Delete the chapters file
os.remove(str(path_chapters_file))
# Read the created file to learn its final filesize
size_out_file_kb = os.path.getsize(str(path_out_file)) / 1024
print( Colors.toolpath("Final size of video file is: {0}".format(humanize.naturalsize(size_out_file_kb * 1024))))
# Now split the file if requested
if max_out_size_kb > 0 and size_out_file_kb > max_out_size_kb :
print( Colors.toolpath("Size limit exceeded, splitting video into files of max size: {0}".format(humanize.naturalsize(max_out_size_kb * 1000))))
splitVideoFile(mp4exec, path_out_file, max_out_size_kb)
#
# Attempts to detect the requested size of the output file based on the input parameters
# the absolute_size is overridden by disk_capacity if both are specified
# Returns KB (kilobytes)
def determineMaximumOutputfileSizeInKb(absolute_size, disk_capacity):
if( disk_capacity and disk_capacity in DISKSIZES ):
dsk_cap = DISKSIZES[disk_capacity]
#print( "Disk Capacity: {0}".format(dsk_cap))
return dsk_cap
elif( absolute_size):
#print( "Absolute size: "+absolute_size)
# First remove all spaces from the size string and convert to uppercase, remove all commas from the string
# now attempt to parse the sizes
abs_size = "".join("".join(absolute_size.split(' ')).split(',')).upper()
regex_size_parse = re.compile(r"^(?P<size>[0-9]*(?:\.[0-9]*)?)\s*(?P<unit>GB|MB|KB|B|TB)?$", re.MULTILINE)
match = regex_size_parse.search( absolute_size )
size = float(match.group("size"))
unit = match.group("unit")
#print( "Absolute value: {0}, unit: {1} ".format(size, unit))
if( not unit or not unit in ABSSIZES ):
unit = "MB" # Default is megabytes if nothing is specified
unit_multiplier = ABSSIZES[unit]
total_size = size * unit_multiplier
#print( "Absolute total: {0}, mult: {1} ".format(total_size, unit_multiplier))
return total_size / 1000 # Return kilobytes but in the metric system sense not the "1024 byte sense"
else:
# If nothing is specified then the default return is to use unbounded
return -1
#
# Executes the mp4box app with the -info switch and
# extracts the track length and file size from the output
def parseMp4boxMediaInfo(file_name, mp4box_path, regex_mp4box_duration):
# Get the size of the file in bytes
statinfo = os.stat(file_name)
file_size = statinfo.st_size #Size in bytes of a plain file
# Run the app and collect the output
proc_cmd = [mp4box_path, "-info", "-std", file_name]
ret = subprocess.run(proc_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
# Ensure that the return code was ok before continuing
if ret.returncode != 0:
print("Command {0} returned non-zero exit status {1}.".format(proc_cmd, ret.returncode))
print("File {0} will be skipped".format(file_name))
return None
#ret.check_returncode()
# Computed Duration 00:23:06.040 - Indicated Duration 00:23:06.040
match = regex_mp4box_duration.search( ret.stdout )
hrs = int(match.group("hrs"))
min = int(match.group("min"))
sec = int(match.group("sec"))
msec = int(match.group("msec"))
duration = timedelta(days=0, hours=hrs, minutes=min, seconds=sec, milliseconds=msec )
return {'file':file_name, 'size':file_size, 'dur':duration }
#
# Locates the mp4box executable and returns a full path to it
def findMp4Box(path_to_gpac_install=None, working_dir=None):
if( not path_to_gpac_install is None and os.path.isfile(os.path.join(path_to_gpac_install, "mp4box.exe")) ):
return os.path.join(path_to_gpac_install, "mp4box.exe")
# Attempts to search for it under the bin folder
bin_dist = os.path.join(working_dir, "..\\bin\\GPAC\\mp4box.exe")
if( os.path.isfile(bin_dist)):
return str(Path(bin_dist).resolve())
# Attempts to search for it under C:\Program Files\GPAC
if( os.path.isfile("C:\\Program Files\\GPAC\\mp4box.exe")):
return "C:\\Program Files\\GPAC\\mp4box.exe"
# For 32 bit installs
if( os.path.isfile("C:\\Program Files\\GPAC\\mp4box.exe")):
return "C:\\Program Files (x86)\\GPAC\\mp4box.exe"
# Throw an error
raise ValueError('Could not locate GPAC install, please use the --gpac switch to specify the path to the mp4box.exe file on your system.')
#
# Locates the ffmpeg executable and returns a full path to it
def findffmpeg(path_to_ffmpeg_install=None, working_dir=None):
if( not path_to_ffmpeg_install is None and os.path.isfile(os.path.join(path_to_ffmpeg_install, "ffmpeg.exe")) ):
return os.path.join(path_to_ffmpeg_install, "ffmpeg.exe")
# Attempts to search for it under the bin folder
bin_dist = os.path.join(working_dir, "..\\bin\\ff\\ffmpeg.exe")
if( os.path.isfile(bin_dist)):
return str(Path(bin_dist).resolve())
# Throw an error
raise ValueError('Could not locate FFMPEG install, please use the --ffmpeg switch to specify the path to the ffmpeg.exe file on your system.')
#
# Returns an array of files matching the grep string passed in
def | (grep_match, path_out_file):
in_files = glob.glob(grep_match.replace("\\", "/"))
return [f for f in sorted(in_files, key=natural_key) if '.mp4' in Path(f).suffix and not Path(f) == path_out_file]
#
# Cleans any invalid file name and file path characters from the given filename
def sanitizeFileName(local_filename, sep=" "):
#These are symbols that are not "kosher" on a NTFS filesystem.
local_filename = re.sub(r"[\"/:<>|?*\n\r\t\x00]", sep, local_filename)
return local_filename
#
# Creates a nice format of a datetime.timedelta structure, including milliseconds
def formatTimedelta(time_delta):
timecode_s = time_delta.seconds
timecode_ms = int(time_delta.microseconds / 1000)
return '{:02}:{:02}:{:02}.{:03}'.format(timecode_s // 3600, timecode_s % 3600 // 60, timecode_s % 60, timecode_ms)
#
# Saves a list of chapter information to a chapter file in the common chapter syntax
def saveChaptersFile( chapters, path_chapters_file):
# Make sure that the directory exists and then write the full list of pids to it
if not path_chapters_file.parent.exists():
path_chapters_file.parent.mkdir(parents=True, exist_ok=True)
# If the chapters file is already there, delete it
if os.path.exists(str(path_chapters_file)):
os.remove(str(path_chapters_file))
# Writing the common CHAPTER syntax
# Common syntax : CHAPTERX=h:m:s[:ms or .ms] on one line and CHAPTERXNAME=name on the other – the order is not important but chapter lines MUST be declared sequencially (same X value expected for 2 consecutive lines).
chapter_idx = 1
with path_chapters_file.open(mode='w+', encoding='utf-8') as theFile:
for chapter in chapters:
theFile.write("CHAPTER{0}={1}\n".format(chapter_idx, chapter['timecode']))
theFile.write("CHAPTER{0}NAME=\"{1}\"\n".format(chapter_idx, chapter['name']))
chapter_idx += 1
#
# Executes FFMPEG for all video files to be joined and reencodes
def reencodeAndCombineVideoFiles(ffmpeg_path, video_files, path_out_file, args_videomaxsize, cuts, args_burnsubs, args_noaudio ):
# Construct the args to ffmpeg
# See https://stackoverflow.com/a/26366762/779521
prog_args = [ffmpeg_path]
# How many video files
video_count = len(video_files)
# Is sound enabled
audio_is_enabled = True if not args_noaudio is True else False
# The filter complex configuration
filter_complex_concat = []
filter_complex_scale = []
curr_video = 0
# -filter_complex
# "[0:v]scale=1024:576:force_original_aspect_ratio=1[v0];
# [1:v]scale=1024:576:force_original_aspect_ratio=1[v1];
# [v0][0:a][v1][1:a]concat=n=2:v=1:a=1[v][a]"
# For every input construct their filter complex to be added later
# Force scaling of videos first
for video_file in video_files:
video_file_path = Path(video_file)
# Attempt to find a cut point for this video if there are cut points defined
#-ss 00:33 -t 30
if not cuts is None and video_file_path.name in cuts:
video_cut_info = cuts[video_file_path.name]
if 'ss' in video_cut_info:
prog_args.append("-ss")
prog_args.append(str(video_cut_info['ss']))
if 't' in video_cut_info:
prog_args.append("-t")
prog_args.append(str(video_cut_info['t']))
prog_args.append("-i")
prog_args.append(str(video_file_path)) # Don't surrount with quotes ""
# Add the scaling instructions for the input video and give it a new output
# Force downscaling of aspect ratio and size to the minimal available
# the value of =1 is the same as ‘decrease’ => The output video dimensions will automatically be decreased if needed.
if args_burnsubs:
# More info on the subtitles filter http://ffmpeg.org/ffmpeg-filters.html#subtitles
filter_complex_scale.append("[{0}:v]scale={1}:force_original_aspect_ratio=1[vv{0}];[vv{0}]subtitles='{2}':force_style='FontName=Arial,Fontsize=24'[v{0}];".format(curr_video, args_videomaxsize, str(video_file_path).replace('\\', '/').replace(':', '\:'))) # Note the path must have forward slashes AND we must escape the colon!!
else:
filter_complex_scale.append("[{0}:v]scale={1}:force_original_aspect_ratio=1[v{0}];".format(curr_video, args_videomaxsize))
# Add concat filter with the video output from the scaling and audio index from the original video
filter_complex_concat.append("[v{0}]".format(curr_video))
if audio_is_enabled:
filter_complex_concat.append("[{0}:a]".format(curr_video))
curr_video += 1
# Add the final part of the concat filter
audio_track = ':a=1' if audio_is_enabled else ''
filter_complex_concat.append("concat=n={0}:v=1{1}".format(video_count, audio_track))
filter_complex_concat.append("[v]")
if audio_is_enabled:
filter_complex_concat.append("[a]")
# Join and add the filter complex to the args
# First the scaling then the concats
prog_args.append("-filter_complex")
prog_args.append("".join(filter_complex_scale) + "".join(filter_complex_concat)) # Don't surrount with quotes ""
# The mapping for the video and audio
prog_args.append("-map")
prog_args.append("[v]")
if audio_is_enabled:
prog_args.append("-map")
prog_args.append("[a]")
# Don't show copyright header
prog_args.append("-hide_banner")
# Don't show excess logging (only things that cause the exe to terminate)
prog_args.append("-loglevel")
prog_args.append("verbose")
# Force showing progress indicator text
prog_args.append("-stats")
# Overwrite any prompts with YES
prog_args.append("-y")
# Finally the output file
prog_args.append(str(path_out_file)) # Don't surrount with quotes ""
# Disable colour output from FFMPEG before we start
os.environ['AV_LOG_FORCE_NOCOLOR'] = "1"
# Run ffmpeg and wait for the output file to be created before returning
return _runSubProcess(prog_args, path_to_wait_on=path_out_file)
#
# Calls mp4box to create the concatinated video file and includes the chapter file as well
def addChaptersToVideoFile(mp4box_path, path_video_file, path_chapters_file):
# Check to see if the video file exists before doing anything
if not path_video_file.exists():
raise ValueError("Video file {0} could not be found. No chapters were added.".format(path_video_file))
# Construct the args to mp4box
prog_args = [mp4box_path]
# Overwrite the default temporary folder to somewhere we
# know that the current user has write privileges
prog_args.append("-tmp")
prog_args.append("{0}".format(os.environ['TMP']))
# Add the chapter file
prog_args.append("-add")
prog_args.append(str(path_chapters_file)+":chap")
# Add the output file at the very end, we will add the
# chapter marks in-place
prog_args.append(str(path_video_file))
# Run the command
return _runSubProcess(prog_args)
#
# Splits an existing video file into requested chunks
def splitVideoFile(mp4box_path, path_video_file, max_out_size_kb):
# Can't split something that doesn't exist
if not path_video_file.exists():
raise ValueError("Video file {0} could not be found. Nothing was split.".format(path_video_file))
# Construct the args to mp4box
prog_args = [mp4box_path]
# Specify the maximum split size
prog_args.append("-splits")
prog_args.append(str(max_out_size_kb))
# Overwrite the default temporary folder to somewhere we
# know that the current user has write privileges
prog_args.append("-tmp")
prog_args.append("{0}".format(os.environ['TMP']))
# Add the input file we want to split
prog_args.append(str(path_video_file))
# Specify the same file again as an out parameter to use the same directory
prog_args.append("-out")
prog_args.append(str(path_video_file))
# Run the command
return _runSubProcess(prog_args)
# Runs a subprocess using the arguments passed and monitors its progress while printing out the latest
# log line to the console on a single line
def _runSubProcess(prog_args, path_to_wait_on=None):
print( " ".join(prog_args))
# Force a UTF8 environment for the subprocess so that files with non-ascii characters are read correctly
# for this to work we must not use the universal line endings parameter
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
retcode = None
# Run the app and collect the output
ret = subprocess.Popen(prog_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=my_env)
try:
longest_line = 0
trace_lines = []
while True:
try:
#line = ret.stdout.readline().decode('utf-8')
line = ret.stdout.readline()
if not line:
break
trace_lines.append(line)
line = line.strip()[:80] # Limit the max length of the line, otherwise it will screw up our console window
longest_line = max( longest_line, len(line))
sys.stdout.write('\r '+line.ljust(longest_line))
sys.stdout.flush()
except UnicodeDecodeError:
continue # Ignore all unicode errors, don't care!
# Ensure that the return code was ok before continuing
retcode = ret.poll()
while retcode is None:
retcode = ret.poll()
except KeyboardInterrupt:
ret.terminate()
raise
# Move the input to the beginning of the line again
# subsequent output text will look nicer :)
sys.stdout.write('\r '+"Done!".ljust(longest_line))
print()
if( retcode != 0 ):
print( "Error while executing {0}".format(prog_args[0]))
print(" Full arguments:")
print( " ".join(prog_args))
print( "Full error")
print("\n".join(trace_lines))
raise ValueError("Error {1} while executing {0}".format(prog_args[0], retcode))
# If we should wait on the creation of a particular file then do that now
total_wait_sec = 0
if not path_to_wait_on is None and not path_to_wait_on.is_dir():
while not path_to_wait_on.exists() or total_wait_sec < 5:
time.sleep(1)
total_wait_sec += 1
if not path_to_wait_on.exists() or not path_to_wait_on.is_file() :
raise ValueError("Expecting file {0} to be created but it wasn't, something went wrong!".format(str(path_to_wait_on)))
return retcode
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", help="The path and filename of the concatenated output file. If multiple files then the script will append a number to the filename.",
type=str)
parser.add_argument("-m","--match", help="A grep style match that should be used to detect files to concatinate.",
type=str)
parser.add_argument('--disk', help="When defined this defines the maximum file size to generate so that they will fit the required optical disk capacity. dvd4=4.7GB, dvd8=8.5GB, br25=25GB, xbox=4GB. If specified this overrides the -s/--size argument.",
choices=['dvd4', 'dvd8', 'br25', 'xbox'])
parser.add_argument("-s", "--size", help="Defines the maximum size of a single combined output file. Supports format ending such as 'MB' for megabytes, 'GB' for gigabytes. If nothing is specified then 'MB' is assumed. Overridden by the --disk argument if both are specified. Supports only numbers using dot (.) as decimal separator, e.g. '15.5GB'", type=str)
parser.add_argument("--gpac", help="Path to the GPAC install directory (not including the exe)",
type=str)
parser.add_argument("--ffmpeg", help="Path to the ffmpeg install directory (not including the exe)",
type=str)
parser.add_argument("--videosize", help="The desired maximum w/h size for the output video, default is 1024:576 (in case of multiple sizes for videos then all videos above this size are downsized to match) Aspect ratios will be downscaled as needed.",
default="1024:576",
type=str)
parser.add_argument("--overwrite", help="Existing files with the same name as the output will be silently overwritten.",
action="store_true")
parser.add_argument("--shuffle", help="Shuffles the list of episodes in a random fashion before combining. Useful to generate a random list of episodes to fill a DVD.",
action="store_true")
parser.add_argument("-c","--cuts", help="A CSV text file containing cut point information for the input files",
type=str)
parser.add_argument("--burnsubs", help="Burns any subtitles found in the video files into the video itself (necessary to preserve separate subtitle tracks)",
action="store_true")
parser.add_argument("-d", "--debug", help="Prints out extra debugging information while script is running",
action="store_true")
parser.add_argument("--noaudio", help="Explicitly disables audio tracks in the output video (useful for source videos that have no audio track)",
action="store_true")
return parser.parse_args()
# If the script file is called by itself then execute the main function
if __name__ == '__main__':
runMain() | getFileNamesFromGrepMatch | identifier_name |
window.rs | use std::{collections::HashMap, sync::Arc};
use log::warn;
use unicode_segmentation::UnicodeSegmentation;
use crate::{
bridge::GridLineCell,
editor::{grid::CharacterGrid, style::Style, AnchorInfo, DrawCommand, DrawCommandBatcher},
renderer::{LineFragment, WindowDrawCommand},
};
pub enum WindowType {
Editor,
Message,
}
pub struct Window {
grid_id: u64,
grid: CharacterGrid,
pub window_type: WindowType,
pub anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
draw_command_batcher: Arc<DrawCommandBatcher>,
}
impl Window {
pub fn new(
grid_id: u64,
window_type: WindowType,
anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
grid_size: (u64, u64),
draw_command_batcher: Arc<DrawCommandBatcher>,
) -> Window {
let window = Window {
grid_id,
grid: CharacterGrid::new(grid_size),
window_type,
anchor_info,
grid_position,
draw_command_batcher,
};
window.send_updated_position();
window
}
fn send_command(&self, command: WindowDrawCommand) {
self.draw_command_batcher
.queue(DrawCommand::Window {
grid_id: self.grid_id,
command,
})
.ok();
}
fn send_updated_position(&self) {
self.send_command(WindowDrawCommand::Position {
grid_position: self.grid_position,
grid_size: (self.grid.width, self.grid.height),
floating_order: self.anchor_info.clone().map(|anchor| anchor.sort_order),
});
}
pub fn get_cursor_grid_cell(
&self,
window_left: u64,
window_top: u64,
) -> (String, Option<Arc<Style>>, bool) {
let grid_cell = match self.grid.get_cell(window_left, window_top) {
Some((character, style)) => (character.clone(), style.clone()),
_ => (' '.to_string(), None),
};
let double_width = match self.grid.get_cell(window_left + 1, window_top) {
Some((character, _)) => character.is_empty(),
_ => false,
};
(grid_cell.0, grid_cell.1, double_width)
}
pub fn get_width(&self) -> u64 {
self.grid.width
}
pub fn get_height(&self) -> u64 {
self.grid.height
}
pub fn get_grid_position(&self) -> (f64, f64) {
self.grid_position
}
pub fn position(
&mut self,
anchor_info: Option<AnchorInfo>,
grid_size: (u64, u64),
grid_position: (f64, f64),
) {
self.grid.resize(grid_size);
self.anchor_info = anchor_info;
self.grid_position = grid_position;
self.send_updated_position();
self.redraw();
}
pub fn resize(&mut self, new_size: (u64, u64)) {
self.grid.resize(new_size);
self.send_updated_position();
self.redraw();
}
fn modify_grid(
&mut self,
row_index: u64,
column_pos: &mut u64,
cell: GridLineCell,
defined_styles: &HashMap<u64, Arc<Style>>,
previous_style: &mut Option<Arc<Style>>,
) {
// Get the defined style from the style list.
let style = match cell.highlight_id {
Some(0) => None,
Some(style_id) => defined_styles.get(&style_id).cloned(),
None => previous_style.clone(),
};
// Compute text.
let mut text = cell.text;
if let Some(times) = cell.repeat {
text = text.repeat(times as usize);
}
// Insert the contents of the cell into the grid.
if text.is_empty() {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (text, style.clone());
}
*column_pos += 1;
} else {
for character in text.graphemes(true) {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (character.to_string(), style.clone());
}
*column_pos += 1;
}
}
*previous_style = style;
}
// Build a line fragment for the given row starting from current_start up until the next style
// change or double width character.
fn build_line_fragment(&self, row_index: u64, start: u64) -> (u64, LineFragment) {
let row = self.grid.row(row_index).unwrap();
let (_, style) = &row[start as usize];
let mut text = String::new();
let mut width = 0;
for possible_end_index in start..self.grid.width {
let (character, possible_end_style) = &row[possible_end_index as usize];
// Style doesn't match. Draw what we've got.
if style != possible_end_style {
break;
}
width += 1;
// The previous character is double width, so send this as its own draw command.
if character.is_empty() {
break;
}
// Add the grid cell to the cells to render.
text.push_str(character);
}
let line_fragment = LineFragment {
text,
window_left: start,
window_top: row_index,
width,
style: style.clone(),
};
(start + width, line_fragment)
}
// Redraw line by calling build_line_fragment starting at 0
// until current_start is greater than the grid width and sending the resulting
// fragments as a batch.
fn redraw_line(&self, row: u64) {
let mut current_start = 0;
let mut line_fragments = Vec::new();
while current_start < self.grid.width {
let (next_start, line_fragment) = self.build_line_fragment(row, current_start);
current_start = next_start;
line_fragments.push(line_fragment);
}
self.send_command(WindowDrawCommand::DrawLine(line_fragments));
}
pub fn draw_grid_line(
&mut self,
row: u64,
column_start: u64,
cells: Vec<GridLineCell>,
defined_styles: &HashMap<u64, Arc<Style>>,
) {
let mut previous_style = None;
if row < self.grid.height {
let mut column_pos = column_start;
for cell in cells {
self.modify_grid(
row,
&mut column_pos,
cell,
defined_styles,
&mut previous_style,
);
}
// Due to the limitations of the current rendering strategy, some underlines get
// clipped by the line below. To mitigate that, we redraw the adjacent lines whenever
// an individual line is redrawn. Unfortunately, some clipping still happens.
// TODO: figure out how to solve this
if row < self.grid.height - 1 {
self.redraw_line(row + 1);
}
self.redraw_line(row);
if row > 0 {
self.redraw_line(row - 1);
}
} else {
warn!("Draw command out of bounds");
}
}
pub fn scroll_region(
&mut self,
top: u64,
bottom: u64,
left: u64,
right: u64,
rows: i64,
cols: i64,
) {
let mut top_to_bottom;
let mut bottom_to_top;
let y_iter: &mut dyn Iterator<Item = i64> = if rows > 0 {
top_to_bottom = (top as i64 + rows)..bottom as i64;
&mut top_to_bottom
} else {
bottom_to_top = (top as i64..(bottom as i64 + rows)).rev();
&mut bottom_to_top
};
self.send_command(WindowDrawCommand::Scroll {
top,
bottom,
left,
right,
rows,
cols,
});
// Scrolls must not only translate the rendered texture, but also must move the grid data
// accordingly so that future renders work correctly.
for y in y_iter {
let dest_y = y - rows;
let mut cols_left;
let mut cols_right;
if dest_y >= 0 && dest_y < self.grid.height as i64 {
let x_iter: &mut dyn Iterator<Item = i64> = if cols > 0 {
cols_left = (left as i64 + cols)..right as i64;
&mut cols_left
} else {
cols_right = (left as i64..(right as i64 + cols)).rev();
&mut cols_right
};
for x in x_iter {
let dest_x = x - cols;
let cell_data = self.grid.get_cell(x as u64, y as u64).cloned();
if let Some(cell_data) = cell_data {
if let Some(dest_cell) =
self.grid.get_cell_mut(dest_x as u64, dest_y as u64)
{
*dest_cell = cell_data;
}
}
}
}
}
}
pub fn clear(&mut self) |
pub fn redraw(&self) {
self.send_command(WindowDrawCommand::Clear);
// Draw the lines from the bottom up so that underlines don't get overwritten by the line
// below.
for row in (0..self.grid.height).rev() {
self.redraw_line(row);
}
}
pub fn hide(&self) {
self.send_command(WindowDrawCommand::Hide);
}
pub fn show(&self) {
self.send_command(WindowDrawCommand::Show);
}
pub fn close(&self) {
self.send_command(WindowDrawCommand::Close);
}
pub fn update_viewport(&self, scroll_delta: f64) {
self.send_command(WindowDrawCommand::Viewport { scroll_delta });
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::event_aggregator::EVENT_AGGREGATOR;
#[test]
fn window_separator_modifies_grid_and_sends_draw_command() {
let mut draw_command_receiver = EVENT_AGGREGATOR.register_event::<Vec<DrawCommand>>();
let draw_command_batcher = Arc::new(DrawCommandBatcher::new());
let mut window = Window::new(
1,
WindowType::Editor,
None,
(0.0, 0.0),
(114, 64),
draw_command_batcher.clone(),
);
draw_command_batcher.send_batch();
draw_command_receiver
.try_recv()
.expect("Could not receive commands");
window.draw_grid_line(
1,
70,
vec![GridLineCell {
text: "|".to_owned(),
highlight_id: None,
repeat: None,
}],
&HashMap::new(),
);
assert_eq!(window.grid.get_cell(70, 1), Some(&("|".to_owned(), None)));
draw_command_batcher.send_batch();
let sent_commands = draw_command_receiver
.try_recv()
.expect("Could not receive commands");
assert!(!sent_commands.is_empty());
}
}
| {
self.grid.clear();
self.send_command(WindowDrawCommand::Clear);
} | identifier_body |
window.rs | use std::{collections::HashMap, sync::Arc};
use log::warn;
use unicode_segmentation::UnicodeSegmentation;
use crate::{
bridge::GridLineCell,
editor::{grid::CharacterGrid, style::Style, AnchorInfo, DrawCommand, DrawCommandBatcher},
renderer::{LineFragment, WindowDrawCommand},
};
pub enum WindowType {
Editor,
Message,
}
pub struct Window {
grid_id: u64,
grid: CharacterGrid,
pub window_type: WindowType,
pub anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
draw_command_batcher: Arc<DrawCommandBatcher>,
}
impl Window {
pub fn new(
grid_id: u64,
window_type: WindowType,
anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
grid_size: (u64, u64),
draw_command_batcher: Arc<DrawCommandBatcher>,
) -> Window {
let window = Window {
grid_id,
grid: CharacterGrid::new(grid_size),
window_type,
anchor_info,
grid_position,
draw_command_batcher,
};
window.send_updated_position();
window
}
fn | (&self, command: WindowDrawCommand) {
self.draw_command_batcher
.queue(DrawCommand::Window {
grid_id: self.grid_id,
command,
})
.ok();
}
fn send_updated_position(&self) {
self.send_command(WindowDrawCommand::Position {
grid_position: self.grid_position,
grid_size: (self.grid.width, self.grid.height),
floating_order: self.anchor_info.clone().map(|anchor| anchor.sort_order),
});
}
pub fn get_cursor_grid_cell(
&self,
window_left: u64,
window_top: u64,
) -> (String, Option<Arc<Style>>, bool) {
let grid_cell = match self.grid.get_cell(window_left, window_top) {
Some((character, style)) => (character.clone(), style.clone()),
_ => (' '.to_string(), None),
};
let double_width = match self.grid.get_cell(window_left + 1, window_top) {
Some((character, _)) => character.is_empty(),
_ => false,
};
(grid_cell.0, grid_cell.1, double_width)
}
pub fn get_width(&self) -> u64 {
self.grid.width
}
pub fn get_height(&self) -> u64 {
self.grid.height
}
pub fn get_grid_position(&self) -> (f64, f64) {
self.grid_position
}
pub fn position(
&mut self,
anchor_info: Option<AnchorInfo>,
grid_size: (u64, u64),
grid_position: (f64, f64),
) {
self.grid.resize(grid_size);
self.anchor_info = anchor_info;
self.grid_position = grid_position;
self.send_updated_position();
self.redraw();
}
pub fn resize(&mut self, new_size: (u64, u64)) {
self.grid.resize(new_size);
self.send_updated_position();
self.redraw();
}
fn modify_grid(
&mut self,
row_index: u64,
column_pos: &mut u64,
cell: GridLineCell,
defined_styles: &HashMap<u64, Arc<Style>>,
previous_style: &mut Option<Arc<Style>>,
) {
// Get the defined style from the style list.
let style = match cell.highlight_id {
Some(0) => None,
Some(style_id) => defined_styles.get(&style_id).cloned(),
None => previous_style.clone(),
};
// Compute text.
let mut text = cell.text;
if let Some(times) = cell.repeat {
text = text.repeat(times as usize);
}
// Insert the contents of the cell into the grid.
if text.is_empty() {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (text, style.clone());
}
*column_pos += 1;
} else {
for character in text.graphemes(true) {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (character.to_string(), style.clone());
}
*column_pos += 1;
}
}
*previous_style = style;
}
// Build a line fragment for the given row starting from current_start up until the next style
// change or double width character.
fn build_line_fragment(&self, row_index: u64, start: u64) -> (u64, LineFragment) {
let row = self.grid.row(row_index).unwrap();
let (_, style) = &row[start as usize];
let mut text = String::new();
let mut width = 0;
for possible_end_index in start..self.grid.width {
let (character, possible_end_style) = &row[possible_end_index as usize];
// Style doesn't match. Draw what we've got.
if style != possible_end_style {
break;
}
width += 1;
// The previous character is double width, so send this as its own draw command.
if character.is_empty() {
break;
}
// Add the grid cell to the cells to render.
text.push_str(character);
}
let line_fragment = LineFragment {
text,
window_left: start,
window_top: row_index,
width,
style: style.clone(),
};
(start + width, line_fragment)
}
// Redraw line by calling build_line_fragment starting at 0
// until current_start is greater than the grid width and sending the resulting
// fragments as a batch.
fn redraw_line(&self, row: u64) {
let mut current_start = 0;
let mut line_fragments = Vec::new();
while current_start < self.grid.width {
let (next_start, line_fragment) = self.build_line_fragment(row, current_start);
current_start = next_start;
line_fragments.push(line_fragment);
}
self.send_command(WindowDrawCommand::DrawLine(line_fragments));
}
pub fn draw_grid_line(
&mut self,
row: u64,
column_start: u64,
cells: Vec<GridLineCell>,
defined_styles: &HashMap<u64, Arc<Style>>,
) {
let mut previous_style = None;
if row < self.grid.height {
let mut column_pos = column_start;
for cell in cells {
self.modify_grid(
row,
&mut column_pos,
cell,
defined_styles,
&mut previous_style,
);
}
// Due to the limitations of the current rendering strategy, some underlines get
// clipped by the line below. To mitigate that, we redraw the adjacent lines whenever
// an individual line is redrawn. Unfortunately, some clipping still happens.
// TODO: figure out how to solve this
if row < self.grid.height - 1 {
self.redraw_line(row + 1);
}
self.redraw_line(row);
if row > 0 {
self.redraw_line(row - 1);
}
} else {
warn!("Draw command out of bounds");
}
}
pub fn scroll_region(
&mut self,
top: u64,
bottom: u64,
left: u64,
right: u64,
rows: i64,
cols: i64,
) {
let mut top_to_bottom;
let mut bottom_to_top;
let y_iter: &mut dyn Iterator<Item = i64> = if rows > 0 {
top_to_bottom = (top as i64 + rows)..bottom as i64;
&mut top_to_bottom
} else {
bottom_to_top = (top as i64..(bottom as i64 + rows)).rev();
&mut bottom_to_top
};
self.send_command(WindowDrawCommand::Scroll {
top,
bottom,
left,
right,
rows,
cols,
});
// Scrolls must not only translate the rendered texture, but also must move the grid data
// accordingly so that future renders work correctly.
for y in y_iter {
let dest_y = y - rows;
let mut cols_left;
let mut cols_right;
if dest_y >= 0 && dest_y < self.grid.height as i64 {
let x_iter: &mut dyn Iterator<Item = i64> = if cols > 0 {
cols_left = (left as i64 + cols)..right as i64;
&mut cols_left
} else {
cols_right = (left as i64..(right as i64 + cols)).rev();
&mut cols_right
};
for x in x_iter {
let dest_x = x - cols;
let cell_data = self.grid.get_cell(x as u64, y as u64).cloned();
if let Some(cell_data) = cell_data {
if let Some(dest_cell) =
self.grid.get_cell_mut(dest_x as u64, dest_y as u64)
{
*dest_cell = cell_data;
}
}
}
}
}
}
pub fn clear(&mut self) {
self.grid.clear();
self.send_command(WindowDrawCommand::Clear);
}
pub fn redraw(&self) {
self.send_command(WindowDrawCommand::Clear);
// Draw the lines from the bottom up so that underlines don't get overwritten by the line
// below.
for row in (0..self.grid.height).rev() {
self.redraw_line(row);
}
}
pub fn hide(&self) {
self.send_command(WindowDrawCommand::Hide);
}
pub fn show(&self) {
self.send_command(WindowDrawCommand::Show);
}
pub fn close(&self) {
self.send_command(WindowDrawCommand::Close);
}
pub fn update_viewport(&self, scroll_delta: f64) {
self.send_command(WindowDrawCommand::Viewport { scroll_delta });
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::event_aggregator::EVENT_AGGREGATOR;
#[test]
fn window_separator_modifies_grid_and_sends_draw_command() {
let mut draw_command_receiver = EVENT_AGGREGATOR.register_event::<Vec<DrawCommand>>();
let draw_command_batcher = Arc::new(DrawCommandBatcher::new());
let mut window = Window::new(
1,
WindowType::Editor,
None,
(0.0, 0.0),
(114, 64),
draw_command_batcher.clone(),
);
draw_command_batcher.send_batch();
draw_command_receiver
.try_recv()
.expect("Could not receive commands");
window.draw_grid_line(
1,
70,
vec![GridLineCell {
text: "|".to_owned(),
highlight_id: None,
repeat: None,
}],
&HashMap::new(),
);
assert_eq!(window.grid.get_cell(70, 1), Some(&("|".to_owned(), None)));
draw_command_batcher.send_batch();
let sent_commands = draw_command_receiver
.try_recv()
.expect("Could not receive commands");
assert!(!sent_commands.is_empty());
}
}
| send_command | identifier_name |
window.rs | use std::{collections::HashMap, sync::Arc};
use log::warn;
use unicode_segmentation::UnicodeSegmentation;
use crate::{
bridge::GridLineCell,
editor::{grid::CharacterGrid, style::Style, AnchorInfo, DrawCommand, DrawCommandBatcher},
renderer::{LineFragment, WindowDrawCommand},
};
pub enum WindowType {
Editor,
Message,
}
pub struct Window {
grid_id: u64,
grid: CharacterGrid,
pub window_type: WindowType,
pub anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
draw_command_batcher: Arc<DrawCommandBatcher>,
}
impl Window {
pub fn new(
grid_id: u64,
window_type: WindowType,
anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
grid_size: (u64, u64),
draw_command_batcher: Arc<DrawCommandBatcher>,
) -> Window {
let window = Window {
grid_id,
grid: CharacterGrid::new(grid_size),
window_type,
anchor_info,
grid_position,
draw_command_batcher,
};
window.send_updated_position();
window
}
fn send_command(&self, command: WindowDrawCommand) {
self.draw_command_batcher
.queue(DrawCommand::Window {
grid_id: self.grid_id,
command,
})
.ok();
}
fn send_updated_position(&self) {
self.send_command(WindowDrawCommand::Position {
grid_position: self.grid_position,
grid_size: (self.grid.width, self.grid.height),
floating_order: self.anchor_info.clone().map(|anchor| anchor.sort_order),
});
}
pub fn get_cursor_grid_cell(
&self,
window_left: u64,
window_top: u64,
) -> (String, Option<Arc<Style>>, bool) {
let grid_cell = match self.grid.get_cell(window_left, window_top) {
Some((character, style)) => (character.clone(), style.clone()),
_ => (' '.to_string(), None),
};
let double_width = match self.grid.get_cell(window_left + 1, window_top) {
Some((character, _)) => character.is_empty(),
_ => false,
};
(grid_cell.0, grid_cell.1, double_width)
}
pub fn get_width(&self) -> u64 {
self.grid.width
}
pub fn get_height(&self) -> u64 {
self.grid.height
}
pub fn get_grid_position(&self) -> (f64, f64) {
self.grid_position
}
pub fn position(
&mut self,
anchor_info: Option<AnchorInfo>,
grid_size: (u64, u64),
grid_position: (f64, f64),
) {
self.grid.resize(grid_size);
self.anchor_info = anchor_info;
self.grid_position = grid_position;
self.send_updated_position();
self.redraw();
}
pub fn resize(&mut self, new_size: (u64, u64)) {
self.grid.resize(new_size);
self.send_updated_position();
self.redraw();
}
fn modify_grid(
&mut self,
row_index: u64,
column_pos: &mut u64,
cell: GridLineCell,
defined_styles: &HashMap<u64, Arc<Style>>,
previous_style: &mut Option<Arc<Style>>,
) {
// Get the defined style from the style list.
let style = match cell.highlight_id {
Some(0) => None,
Some(style_id) => defined_styles.get(&style_id).cloned(),
None => previous_style.clone(),
};
// Compute text.
let mut text = cell.text;
if let Some(times) = cell.repeat {
text = text.repeat(times as usize);
}
// Insert the contents of the cell into the grid.
if text.is_empty() {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (text, style.clone());
}
*column_pos += 1;
} else {
for character in text.graphemes(true) {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (character.to_string(), style.clone());
}
*column_pos += 1;
}
}
*previous_style = style;
}
// Build a line fragment for the given row starting from current_start up until the next style
// change or double width character.
fn build_line_fragment(&self, row_index: u64, start: u64) -> (u64, LineFragment) {
let row = self.grid.row(row_index).unwrap();
let (_, style) = &row[start as usize];
let mut text = String::new();
let mut width = 0;
for possible_end_index in start..self.grid.width {
let (character, possible_end_style) = &row[possible_end_index as usize];
// Style doesn't match. Draw what we've got.
if style != possible_end_style {
break;
}
width += 1;
// The previous character is double width, so send this as its own draw command.
if character.is_empty() {
break;
}
// Add the grid cell to the cells to render.
text.push_str(character);
}
let line_fragment = LineFragment {
text,
window_left: start,
window_top: row_index,
width,
style: style.clone(),
};
(start + width, line_fragment)
}
// Redraw line by calling build_line_fragment starting at 0
// until current_start is greater than the grid width and sending the resulting
// fragments as a batch.
fn redraw_line(&self, row: u64) {
let mut current_start = 0;
let mut line_fragments = Vec::new();
while current_start < self.grid.width {
let (next_start, line_fragment) = self.build_line_fragment(row, current_start);
current_start = next_start;
line_fragments.push(line_fragment);
}
self.send_command(WindowDrawCommand::DrawLine(line_fragments));
}
pub fn draw_grid_line(
&mut self,
row: u64,
column_start: u64,
cells: Vec<GridLineCell>,
defined_styles: &HashMap<u64, Arc<Style>>,
) {
let mut previous_style = None;
if row < self.grid.height {
let mut column_pos = column_start;
for cell in cells {
self.modify_grid(
row,
&mut column_pos,
cell,
defined_styles,
&mut previous_style,
);
}
// Due to the limitations of the current rendering strategy, some underlines get
// clipped by the line below. To mitigate that, we redraw the adjacent lines whenever
// an individual line is redrawn. Unfortunately, some clipping still happens.
// TODO: figure out how to solve this
if row < self.grid.height - 1 |
self.redraw_line(row);
if row > 0 {
self.redraw_line(row - 1);
}
} else {
warn!("Draw command out of bounds");
}
}
pub fn scroll_region(
&mut self,
top: u64,
bottom: u64,
left: u64,
right: u64,
rows: i64,
cols: i64,
) {
let mut top_to_bottom;
let mut bottom_to_top;
let y_iter: &mut dyn Iterator<Item = i64> = if rows > 0 {
top_to_bottom = (top as i64 + rows)..bottom as i64;
&mut top_to_bottom
} else {
bottom_to_top = (top as i64..(bottom as i64 + rows)).rev();
&mut bottom_to_top
};
self.send_command(WindowDrawCommand::Scroll {
top,
bottom,
left,
right,
rows,
cols,
});
// Scrolls must not only translate the rendered texture, but also must move the grid data
// accordingly so that future renders work correctly.
for y in y_iter {
let dest_y = y - rows;
let mut cols_left;
let mut cols_right;
if dest_y >= 0 && dest_y < self.grid.height as i64 {
let x_iter: &mut dyn Iterator<Item = i64> = if cols > 0 {
cols_left = (left as i64 + cols)..right as i64;
&mut cols_left
} else {
cols_right = (left as i64..(right as i64 + cols)).rev();
&mut cols_right
};
for x in x_iter {
let dest_x = x - cols;
let cell_data = self.grid.get_cell(x as u64, y as u64).cloned();
if let Some(cell_data) = cell_data {
if let Some(dest_cell) =
self.grid.get_cell_mut(dest_x as u64, dest_y as u64)
{
*dest_cell = cell_data;
}
}
}
}
}
}
pub fn clear(&mut self) {
self.grid.clear();
self.send_command(WindowDrawCommand::Clear);
}
pub fn redraw(&self) {
self.send_command(WindowDrawCommand::Clear);
// Draw the lines from the bottom up so that underlines don't get overwritten by the line
// below.
for row in (0..self.grid.height).rev() {
self.redraw_line(row);
}
}
pub fn hide(&self) {
self.send_command(WindowDrawCommand::Hide);
}
pub fn show(&self) {
self.send_command(WindowDrawCommand::Show);
}
pub fn close(&self) {
self.send_command(WindowDrawCommand::Close);
}
pub fn update_viewport(&self, scroll_delta: f64) {
self.send_command(WindowDrawCommand::Viewport { scroll_delta });
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::event_aggregator::EVENT_AGGREGATOR;
#[test]
fn window_separator_modifies_grid_and_sends_draw_command() {
let mut draw_command_receiver = EVENT_AGGREGATOR.register_event::<Vec<DrawCommand>>();
let draw_command_batcher = Arc::new(DrawCommandBatcher::new());
let mut window = Window::new(
1,
WindowType::Editor,
None,
(0.0, 0.0),
(114, 64),
draw_command_batcher.clone(),
);
draw_command_batcher.send_batch();
draw_command_receiver
.try_recv()
.expect("Could not receive commands");
window.draw_grid_line(
1,
70,
vec![GridLineCell {
text: "|".to_owned(),
highlight_id: None,
repeat: None,
}],
&HashMap::new(),
);
assert_eq!(window.grid.get_cell(70, 1), Some(&("|".to_owned(), None)));
draw_command_batcher.send_batch();
let sent_commands = draw_command_receiver
.try_recv()
.expect("Could not receive commands");
assert!(!sent_commands.is_empty());
}
}
| {
self.redraw_line(row + 1);
} | conditional_block |
window.rs | use std::{collections::HashMap, sync::Arc};
use log::warn;
use unicode_segmentation::UnicodeSegmentation;
use crate::{
bridge::GridLineCell,
editor::{grid::CharacterGrid, style::Style, AnchorInfo, DrawCommand, DrawCommandBatcher},
renderer::{LineFragment, WindowDrawCommand},
};
pub enum WindowType {
Editor,
Message,
}
pub struct Window {
grid_id: u64,
grid: CharacterGrid,
pub window_type: WindowType,
pub anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
draw_command_batcher: Arc<DrawCommandBatcher>,
}
impl Window {
pub fn new(
grid_id: u64,
window_type: WindowType,
anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
grid_size: (u64, u64),
draw_command_batcher: Arc<DrawCommandBatcher>,
) -> Window {
let window = Window {
grid_id,
grid: CharacterGrid::new(grid_size),
window_type,
anchor_info,
grid_position,
draw_command_batcher,
};
window.send_updated_position();
window
}
fn send_command(&self, command: WindowDrawCommand) {
self.draw_command_batcher
.queue(DrawCommand::Window {
grid_id: self.grid_id,
command,
})
.ok();
}
fn send_updated_position(&self) {
self.send_command(WindowDrawCommand::Position {
grid_position: self.grid_position,
grid_size: (self.grid.width, self.grid.height),
floating_order: self.anchor_info.clone().map(|anchor| anchor.sort_order),
});
}
pub fn get_cursor_grid_cell(
&self,
window_left: u64,
window_top: u64,
) -> (String, Option<Arc<Style>>, bool) {
let grid_cell = match self.grid.get_cell(window_left, window_top) {
Some((character, style)) => (character.clone(), style.clone()),
_ => (' '.to_string(), None),
};
let double_width = match self.grid.get_cell(window_left + 1, window_top) {
Some((character, _)) => character.is_empty(),
_ => false,
};
(grid_cell.0, grid_cell.1, double_width)
}
pub fn get_width(&self) -> u64 {
self.grid.width
}
pub fn get_height(&self) -> u64 {
self.grid.height
}
pub fn get_grid_position(&self) -> (f64, f64) {
self.grid_position
}
pub fn position(
&mut self,
anchor_info: Option<AnchorInfo>,
grid_size: (u64, u64),
grid_position: (f64, f64),
) {
self.grid.resize(grid_size);
self.anchor_info = anchor_info;
self.grid_position = grid_position;
self.send_updated_position();
self.redraw();
}
pub fn resize(&mut self, new_size: (u64, u64)) {
self.grid.resize(new_size);
self.send_updated_position();
self.redraw();
}
fn modify_grid(
&mut self,
row_index: u64,
column_pos: &mut u64,
cell: GridLineCell,
defined_styles: &HashMap<u64, Arc<Style>>,
previous_style: &mut Option<Arc<Style>>,
) {
// Get the defined style from the style list.
let style = match cell.highlight_id {
Some(0) => None,
Some(style_id) => defined_styles.get(&style_id).cloned(),
None => previous_style.clone(),
};
// Compute text.
let mut text = cell.text;
if let Some(times) = cell.repeat {
text = text.repeat(times as usize);
}
// Insert the contents of the cell into the grid.
if text.is_empty() {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (text, style.clone());
}
*column_pos += 1;
} else {
for character in text.graphemes(true) {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (character.to_string(), style.clone());
}
*column_pos += 1;
}
}
*previous_style = style;
}
// Build a line fragment for the given row starting from current_start up until the next style
// change or double width character.
fn build_line_fragment(&self, row_index: u64, start: u64) -> (u64, LineFragment) {
let row = self.grid.row(row_index).unwrap();
let (_, style) = &row[start as usize];
let mut text = String::new();
let mut width = 0;
for possible_end_index in start..self.grid.width {
let (character, possible_end_style) = &row[possible_end_index as usize];
// Style doesn't match. Draw what we've got.
if style != possible_end_style {
break;
}
width += 1;
// The previous character is double width, so send this as its own draw command.
if character.is_empty() {
break; | }
let line_fragment = LineFragment {
text,
window_left: start,
window_top: row_index,
width,
style: style.clone(),
};
(start + width, line_fragment)
}
// Redraw line by calling build_line_fragment starting at 0
// until current_start is greater than the grid width and sending the resulting
// fragments as a batch.
fn redraw_line(&self, row: u64) {
let mut current_start = 0;
let mut line_fragments = Vec::new();
while current_start < self.grid.width {
let (next_start, line_fragment) = self.build_line_fragment(row, current_start);
current_start = next_start;
line_fragments.push(line_fragment);
}
self.send_command(WindowDrawCommand::DrawLine(line_fragments));
}
pub fn draw_grid_line(
&mut self,
row: u64,
column_start: u64,
cells: Vec<GridLineCell>,
defined_styles: &HashMap<u64, Arc<Style>>,
) {
let mut previous_style = None;
if row < self.grid.height {
let mut column_pos = column_start;
for cell in cells {
self.modify_grid(
row,
&mut column_pos,
cell,
defined_styles,
&mut previous_style,
);
}
// Due to the limitations of the current rendering strategy, some underlines get
// clipped by the line below. To mitigate that, we redraw the adjacent lines whenever
// an individual line is redrawn. Unfortunately, some clipping still happens.
// TODO: figure out how to solve this
if row < self.grid.height - 1 {
self.redraw_line(row + 1);
}
self.redraw_line(row);
if row > 0 {
self.redraw_line(row - 1);
}
} else {
warn!("Draw command out of bounds");
}
}
pub fn scroll_region(
&mut self,
top: u64,
bottom: u64,
left: u64,
right: u64,
rows: i64,
cols: i64,
) {
let mut top_to_bottom;
let mut bottom_to_top;
let y_iter: &mut dyn Iterator<Item = i64> = if rows > 0 {
top_to_bottom = (top as i64 + rows)..bottom as i64;
&mut top_to_bottom
} else {
bottom_to_top = (top as i64..(bottom as i64 + rows)).rev();
&mut bottom_to_top
};
self.send_command(WindowDrawCommand::Scroll {
top,
bottom,
left,
right,
rows,
cols,
});
// Scrolls must not only translate the rendered texture, but also must move the grid data
// accordingly so that future renders work correctly.
for y in y_iter {
let dest_y = y - rows;
let mut cols_left;
let mut cols_right;
if dest_y >= 0 && dest_y < self.grid.height as i64 {
let x_iter: &mut dyn Iterator<Item = i64> = if cols > 0 {
cols_left = (left as i64 + cols)..right as i64;
&mut cols_left
} else {
cols_right = (left as i64..(right as i64 + cols)).rev();
&mut cols_right
};
for x in x_iter {
let dest_x = x - cols;
let cell_data = self.grid.get_cell(x as u64, y as u64).cloned();
if let Some(cell_data) = cell_data {
if let Some(dest_cell) =
self.grid.get_cell_mut(dest_x as u64, dest_y as u64)
{
*dest_cell = cell_data;
}
}
}
}
}
}
pub fn clear(&mut self) {
self.grid.clear();
self.send_command(WindowDrawCommand::Clear);
}
pub fn redraw(&self) {
self.send_command(WindowDrawCommand::Clear);
// Draw the lines from the bottom up so that underlines don't get overwritten by the line
// below.
for row in (0..self.grid.height).rev() {
self.redraw_line(row);
}
}
pub fn hide(&self) {
self.send_command(WindowDrawCommand::Hide);
}
pub fn show(&self) {
self.send_command(WindowDrawCommand::Show);
}
pub fn close(&self) {
self.send_command(WindowDrawCommand::Close);
}
pub fn update_viewport(&self, scroll_delta: f64) {
self.send_command(WindowDrawCommand::Viewport { scroll_delta });
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::event_aggregator::EVENT_AGGREGATOR;
#[test]
fn window_separator_modifies_grid_and_sends_draw_command() {
let mut draw_command_receiver = EVENT_AGGREGATOR.register_event::<Vec<DrawCommand>>();
let draw_command_batcher = Arc::new(DrawCommandBatcher::new());
let mut window = Window::new(
1,
WindowType::Editor,
None,
(0.0, 0.0),
(114, 64),
draw_command_batcher.clone(),
);
draw_command_batcher.send_batch();
draw_command_receiver
.try_recv()
.expect("Could not receive commands");
window.draw_grid_line(
1,
70,
vec![GridLineCell {
text: "|".to_owned(),
highlight_id: None,
repeat: None,
}],
&HashMap::new(),
);
assert_eq!(window.grid.get_cell(70, 1), Some(&("|".to_owned(), None)));
draw_command_batcher.send_batch();
let sent_commands = draw_command_receiver
.try_recv()
.expect("Could not receive commands");
assert!(!sent_commands.is_empty());
}
} | }
// Add the grid cell to the cells to render.
text.push_str(character); | random_line_split |
Projeto.py |
# ******************************************************
# *********** Projeto 2 *************
# *********** Fundamentos da Programacao *************
# *********** *************
# *********** Carolina Carreira *************
# *********** 87641 *************
# ******************************************************
# *******************************************************
# Tipo POSICAO
# *******************************************************
# CONSTRUTOR
# faz_pos: natural x natural --> posicao
# faz_pos (l,c) Cria tipo posicao
def faz_pos(l,c):
if not isinstance (l, int) or l != abs (l):
raise ('ERRO: ValueError faz_pos')
if not isinstance (c, int) or c != abs (c):
raise ValueError ('faz_pos: argumentos errados')
return (l,c,)
# SELETOR
# linha_pos|colunas_pos: posicao --> natural
# Seleciona a linha ou coluna da posicao
def linha_pos (p):
return p[0]
def coluna_pos (p):
return p[1]
# RECONHECEDORES
# e_pos: universal --> Boolean
# e_pos (arg) Reconhece o ipo posicao, este tem de ser tuplo constituido por dois elementos positivos, e inteiros
def e_pos (arg):
return isinstance(arg, tuple) and len (arg) == 2 or isinstance (linha_pos (arg), int) and not linha_pos (arg) != abs(linha_pos (arg)) and isinstance (coluna_pos (arg), int) and not coluna_pos (arg) != abs(coluna_pos (arg))
# TESTES
# pos_iguais: posicao x posicao --> Boolean
# pos_iguais (p1, p2) recebe dois argumentos do tipo posicao, p1 e p2, e devolve
# True caso os argumentos correspondam a mesma posicao da chave
def pos_iguais (p1, p2):
return linha_pos (p1) == linha_pos (p2) and coluna_pos (p1) == coluna_pos (p2)
# *******************************************************
# Tipo CHAVE
# *******************************************************
# CONSTRUTORES
# gera_chave_linhas: L x str --> chave
# gera_chave_linhas(l, mgc) recebe dois argumentos, l e mgc, correspondentes
# a um tuplo de 25 letras e a cadeira de caracteres mgc e devolve a chave gerada
# usando a disposicao por linhas
def gera_chave_linhas (letras, mgc):
# funcao auxiliar que verifica a validade dos argumentos L e mgc
if not verificacao_letras_mgc (letras, mgc):
raise ValueError ('gera_chave_linhas: argumentos errados')
# Transforma em lista para poder ser manipulado
letras = list(letras)
r = []
# Verifica cada letras dentro de mgc
for i in range (len( mgc)):
# Testa se o caracter nao esta ja em r e se esta em letras
if not mgc[i] in r and mgc[i] in letras:
# Se sim, junta-o a r
r += [mgc[i]]
# Remove de letras esse caracter de mgc
letras.remove (mgc[i])
# Concatena r com o que resta em letras (todos os caracteres nao presentes em mgc)
r = r + letras
# Corta em listas de listas
return [r[0:5],r[5:10],r[10:15],r[15:20],r[20:25]]
# AUXILIAR (funcao gera_chave_linhas e
# funcao gera_chave_espiral
# verificacao_letras_mgc: L + mgc --> boolean
# verificacao_letras_mgc (L, mgc) verifica a validade dos argumentos letras, L, e mgc
# Para ser True o argumento L tem de ser um tuplo com 25 letras maiusculas como
# elementos unicos e o argumento mgc tem de ser uma string de letras maiusculas
def verificacao_letras_mgc (L, mgc):
# Testa de se L e tuplo, tem tamanho 25, e se mgc e string de caracteres maiusculos
if not isinstance (L, tuple) or len(L) != 25 or not isinstance (mgc, str) or mgc != mgc.upper ():
return False
for i in range (len (L)):
for j in range (i+1, len(L)):
# Testa se elementos sao todos diferentes e maiusculos
if L[i] == L[j] or L[i].upper == L[i] :
return False
return True
# gera_chave_espiral: L x str x {'r','c'} x pos --> chave
# gera_chave_espiral (l; mgc; s; pos) recebe 4 argumentos, l corresponde a
# um tuplo de 25 letras e mgc a mensagem de geracao de chave, e devolve a
# chave gerada usando a disposicao em espiral no sentido indicado pelo parametro
# s ('r' sentido dos ponteiros do rel0gio e 'c' sentido contrario),
# comecando na posicao indicada pelo parametro pos.
def gera_chave_espiral (letras, mgc, s, p):
# Para os argumentos serem validos p tem de ser do tipo posicao, s tem de ser str
# de tamanho 1, e verificacao_letras_mgc tem de ser verdadeiro
if not e_pos (p) or not isinstance (s, str) or len(s) != 1 or not verificacao_letras_mgc (letras, mgc):
raise ValueError ('gera_chave_espiral: argumentos errados')
# Transforma em lista para poder ser manipulado
letras = list(letras)
r = []
# Verifica cada caracter dentro de mgc
for i in range (len( mgc)):
# Testa se o caracter nao esta ja em r e se esta em letras
if not mgc[i] in r and mgc[i] in letras:
# Se sim, junta-o a r
r += [mgc[i]]
# Remove de letras esse caracter de mgc
letras.remove (mgc[i])
# soma o que restou nas letras (os caracteres nao presentes em mgc)
r += letras
# Ordena em espiral segundo a posicao
r = ordenar (r, p)
if s == 'c':
# se for contra os ponteiros a matriz tem de ser transposta
r = transpor (r, p)
# corta
r = [r[0:5],r[5:10],r[10:15],r[15:20],r[20:25]]
return r
# AUXILIAR (gera_chave_linhas)
# ordenar: lista + posicao --> lista
# ordenar (l,p) recebe uma lista, e uma posicao inicial a partir da qual ordena a lista em espiral
# no sentido dos ponteiros do relogio
def ordenar (l, p):
# ordem e o dicionario com as posicoes em espiral
dic_posicoes = {(0,0) : dic00, (0,4) : dic04, (4,0): dic40, (4,4): dic44}
ordem = dic_posicoes [p]
resp = []
for i in range (0, 25):
# Coloca em resp a respetiva letra de l agora de forma ondenada
resp += [ l [ordem[i]] ]
return resp
# Dicionarios de posicoes (Def ordenar)
dic00 = {0:0, 1:1, 2:2, 3:3, 4:4, 5:15, 6:16, 7:17, 8:18, 9:5, 10:14, 11:23, 12:24, 13:19, 14:6, 15:13, 16:22, 17:21, 18:20, 19:7, 20:12, 21:11, 22:10, 23:9, 24:8}
dic04 = {0:12, 1:13, 2:14, 3:15, 4:0, 5:11, 6:22, 7:23, 8:16, 9:1, 10:10, 11:21, 12:24, 13:17, 14:2, 15:9 , 16:20, 17:19, 18:18, 19:3, 20:8, 21:7, 22:6, 23:5, 24:4}
dic44 = {0:8, 1:9 , 2:10, 3:11, 4:12, 5:7, 6:20, 7:21, 8:22, 9:13, 10:6, 11:19, 12:24, 13:23, 14:14, 15:5, 16:18, 17:17, 18:16, 19:15, 20:4, 21:3, 22:2, 23:1, 24:0}
dic40 = {0:4, 1:5, 2:6, 3:7, 4:8, 5:3 , 6:18, 7:19, 8:20, 9:9 , 10:2 , 11:17, 12:24, 13:21, 14:10, 15:1 , 16:16, 17:23, 18:22, 19:11, 20:0 , 21:15, 22:14, 23:13,24:12}
# Funcao auxiliar (gera_chave_linhas)
# transpor: lista + posicao --> lista
# transpor (l,p) recebe uma lista a transpor, e uma posicao inicial para saber como transpor.
# Se for 0,0 or 4,4 a transposicao e uma reflexao em relacao a diagonal principal
# Se for 0,4 ou 4,0 esta e uma reflexao em relacao a diagonal secundaria
def transpor (l, p):
d = []
# Matriz de replexao em relacao a diagonal principal
dic = transposicao_impar
# Se for 4,4 ou 0,0 reflexao em relacao a diagonal pricipal
if linha_pos (p) == coluna_pos (p):
dic = transposicao_par
# Caso contrario e em relacao a diagonal secundaria
else:
dic = transposicao_impar
for i in range (25):
# Ordena os elementos de l segundo o dicionario escolhido
d += [ l [dic[i]] ]
return d
# Dicionario de transposicao (def transpor)
transposicao_par = {0:0, 1:5, 2:10, 3:15, 4:20, 5:1, 6:6, 7:11, 8:16, 9:21, 10:2, 11:7, 12:12, 13:17, 14:22, 15:3, 16:8, 17:13, 18:18, 19:23, 20:4, 21:9, 22:14, 23:19, 24:24}
transposicao_impar = {0:24, 1:19, 2:14, 3:9, 4:4, 5:23, 6:18, 7:13, 8:8, 9:3, 10:22, 11:17, 12:12, 13:7, 14:2, 15:21, 16:16, 17:11, 18:6, 19:1, 20:20, 21:15, 22:10, 23:5, 24:0}
# SELETOR
# ref_chave: chave x posicao --> letra
# ref_chave(c; p) recebe como argumentos a chave c e a posicao p e devolve a
# letra que esta em c na posicao p
def ref_chave (c, pos):
return c[linha_pos (pos)] [coluna_pos(pos)]
# MODIFICADOR
# muda_chave: chave x posicao x letra --> chave
# muda_chave(c; p; l) recebe como argumentos a chave c, a posicao p e a letra l
# e devolve a chave c com a letra l na posicao p
def muda_chave (c,p,l):
c[ linha_pos (p)][coluna_pos (p)] = l
return c
# RECONHECEDORES
# e_chave: arg --> Boolean
# e_chave(arg): devolve True se o argumento arg for do tipo chave e Falso caso contrario
def | (arg):
''' para ser do tipo chave tem de ser uma lista constituida por 5 listas cada uma com 5 elementos
que sejam letras maiusculas unicas'''
if len (arg) != 5 or not isinstance (arg, list):
return False
for a in arg:
# Testa tipo, tamanho, e se nao ha letras repetidas em cada lista dentro da lista
if not isinstance (arg, list) or len(a) != 5 or len (set (a)) != len (a):
return False
for b in a:
# Testa de algum elemento e uma letra minuscula
if not ( 64 < ord(b) < 91):
return False
else:
return True
# TRANSFORMADORES
# string_chave: chave --> str
# string_chave(c) devolve uma cadeia de caracteres que uma vez impressa apresenta
# as letras de c dispostas numa tabela 5 x 5
def string_chave (chave):
c = ''
for linha in chave:
# Coloca espacos entra as letras
for celula in linha:
c = c + celula + ' '
# Coloca no fim de cada linha paragrafo
c += '\n'
return c
# **********************************************************
# FUNCOES A DESEMVOLVER
# **********************************************************
# diagramas: str --> str
# digramas (mens) recebe como argumento uma cadeia de caracteres
# correspondente a uma mensagem, mens, e devolve a cadeia de caracteres
# correspondente aos digramas transformados de mens sem espacos
def digramas (mens):
mensg = ''
for i in range (len(mens)-1, -1, -1):
# Retira espacos
if mens [i] != ' ':
mensg = mens[i] + mensg
for i in range (0, len(mensg)-1, 2):
# Coloca X se existirem digitos iguais
if mensg[i] == mensg [i+1]:
mensg = mensg[:i+1] + 'X' + mensg[i+1:]
# Testa para ver se ha numeros par de caracteres
if len (mensg) % 2 != 0:
# Se nao houver acrescenta X no fim
mensg += 'X'
return mensg
# figura: digrm + chave --> fig + pos1 + pos2
# figura(digrm, chave) recebe dois argumentos, digrm, uma cadeia de
# caracteres de comprimento 2, e chave, e devolve um tuplo de 3 elementos
# da forma (fig, pos1, pos2) em que:
# - fig e a figura determinada pelas letras de digrm, 'l', 'c' ou 'r' (linha, coluna ou
# rectangulo).
# - pos1 e pos2 sao as posicoes ocupadas na chave pela primeira e segunda letras de
# digrm, respectivamente.
def figura (digrm, chave):
t = ()
# Ciclo para determinar posicao de uma letra na chave
for a in digrm:
# cada lista dentro da lista
for i in range (0,5):
for j in range (0,5):
# Verifica se o digito e igual ao digito da posicao i,j da chave
if a == ref_chave (chave, faz_pos (i,j)):
# Encontra posicao do digito e acumula
t += (faz_pos(i,j),)
# Testa se estao na mesma linha
if linha_pos(t[0]) == linha_pos(t[1]):
return ('l',) + t
# Testa de estao na mesma coluna
if coluna_pos (t[0]) == coluna_pos (t[1]):
return ('c',) + t
# Se estao em retangulo
else:
return ('r',) + t
# codifica_l: posicao + posicao + {-1,1} --> posicao + posicao
# codifica_l (pos1, pos2, inc) recebe tres argumentos, pos1, pos2,
# consistindo nas posicoes das letras de um digrama na mesma linha de uma
# chave, e o inteiro inc, que podera ser 1 (encriptar) ou -1 (desencriptar).
# Devolve um tuplo de 2 posicoes (pos1_cod, pos2_cod) que correspondem as
# posicoes das letras do digrama encriptado/desencriptado.
def codifica_l (pos1, pos2, inc):
c = ()
# Acede a cada posicao dada
for posicao in (pos1,pos2):
# Testa se a coluna e 1, 2, 3
if coluna_pos (posicao) != 4 and coluna_pos (posicao) != 0 :
# se sim, retorna a mesma posicao com a coluna alterada ( coluna + 1
# se for encriptacao, coluna - 1 se for desencriptacao)
c += ( faz_pos ( linha_pos (posicao) , coluna_pos (posicao) + inc ), )
# Testa se e encriptacao
elif inc == 1:
# se sim e se a posicao tiver 4 na coluna entao volta a ficar com 0
# se esta tiver 0 atualiza a coluna com 1 (0+1)
c += (faz_pos ( linha_pos (posicao) , 0),) if coluna_pos (posicao) == 4 else (faz_pos ( linha_pos (posicao) , 1),)
# Desemcriptacao
else:
# Se a posicao tiver 4 na coluna fica com 3 (4-1)
# Se tiver 0 fica com 4
c += (faz_pos ( linha_pos (posicao) , 3),) if coluna_pos (posicao) == 4 else (faz_pos ( linha_pos (posicao) , 4),)
return c
# codifica_l: posicao + posicao + {-1,1} --> posicao + posicao
# codifica_l (pos1, pos2, inc) recebe tres argumentos, pos1, pos2,
# consistindo nas posicoes das letras de um digrama na mesma coluna de uma
# chave, e o inteiro inc, que podera ser 1 (encriptao) ou -1 (desencriptar).
# Devolve um tuplo de 2 posicoes (pos1_cod, pos2_cod) que correspondem as
# posicoes das letras do digrama encriptado/desencriptado.
def codifica_c (pos1, pos2, inc):
c = ()
# Acede a cada posicao dada
for posicao in (pos1,pos2):
# Testa se a linha e 1, 2, 3
if linha_pos (posicao) != 4 and linha_pos (posicao) != 0 :
# se sim, retorna a mesma posicao com a linha alterada ( linha + 1
# se for encriptacao, linha - 1 se for desencriptacao)
c += ( faz_pos ( linha_pos (posicao) + inc, coluna_pos (posicao)), )
# Testa se e encriptacao
elif inc == 1:
# se sim e se a posicao tiver 4 na linha entao volta a ficar com 0
# se esta tiver 0, atualiza a linha com 1 (0+1)
c += (faz_pos ( 0 , coluna_pos (posicao)),) if linha_pos (posicao) == 4 else (faz_pos ( 1 , coluna_pos (posicao)),)
# Desemcriptacao
else:
# Se a posicao tiver 4 na linha fica com 4-1 = 3
# Se tiver 0 fica volta ao 4
c += (faz_pos ( 3 , coluna_pos (posicao)),) if linha_pos (posicao) == 4 else (faz_pos ( 4, coluna_pos (posicao)),)
return c
# codifica_r: posicao + posicao --> posicao + posicao
# codifica_r (pos1, pos2) recebe dois argumentos, pos1, pos2, consistindo nas
# posicoes das letras de um digrama numa chave. Estas posicoes encontra-se
# em linhas e colunas diferentes.
# A funcao devolve (pos1_cod, pos2_cod) que correspondem as posicoes das letras
# do digrama encriptado/desencriptado
def codifica_r (pos1, pos2):
# Retorna duas posicoes com as colunas trocadas entre si
return faz_pos(linha_pos(pos1), coluna_pos(pos2),) , faz_pos(linha_pos(pos2), coluna_pos(pos1))
# codifica_digrama: cad + chave + {1,-1} --> cad
# codifica_digrama ( diagrm, c, inc) recebe tres argumentos, digrm, um digrama,
# chave, uma chave, e o inteiro inc, que podera ser 1(encriptacao) ou
# -1 (desencriptacao).
# A funcao devolve o digrama correspondente a encriptacao/desencriptacao de digrm
# usando a chave.
def codifica_digrama ( diagrm, c, inc):
# Funcao figura que devolve as posicoes dos caracteres no diagrama
# e a sua posicao relativa (na mesma linha, coluna, ou em rectangulo)
coordenadas = figura (diagrm, c)
# Verifica o ultimo elemento retornado por figura se e 'l'
if coordenadas [0] == 'l':
# Chama a funcao codifica_l que devolve duas posicoes (encriptadas ou
# desencriptadas) que estejam na mesma linha
diretriz = codifica_l ( coordenadas [1], coordenadas [2], inc)
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave( c, faz_pos (linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave ( c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
# Se ultimo elemento retornado por figura e 'c'
if coordenadas [0] == 'c':
# Chama a funcao codifica_c que devolve duas posicoes (encriptadas ou
# desencriptadas) que estejam na mesma coluna
diretriz = codifica_c ( coordenadas [1], coordenadas [2], inc)
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave ( c, faz_pos (linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave (c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
else:
# Chama a funcao codifica_c que devolve duas posicoes (encriptadas ou
# desencriptadas) que nao estejam na mesma linha ou coluna (em rectangulo)
diretriz = codifica_r ( coordenadas [1], coordenadas [2])
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave ( c,faz_pos(linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave (c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
# codifica: mens + chave + {1,-1} --> mens
# codifica (mens, chave, inc) recebe tres argumentos, mens, uma mensagem,
# chave, uma chave, e o inteiro inc, que podera ser 1(encriptacao) ou -1
# (desencriptacao).
# A funcao devolve a mensagem correspondente a encriptacao/desencriptacao de mens
# usando a chave.
def codifica (mens, chave, inc):
# Chama a funcao diagramas que devolve uma str igual a mens, sem espacos
# e sem caracteres repetidos
mens = digramas (mens)
r = ''
# Codifica os caracteres dois a dois
for i in range (0, len(mens)-1, 2):
# Chama codifica_digrama que devolve dois caracteres codificados
r += codifica_digrama (mens[i:i+2], chave, inc)
return r
l = ('A','B','C','D','E','F','G','H','I','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
| e_chave | identifier_name |
Projeto.py | # ******************************************************
# *********** Projeto 2 *************
# *********** Fundamentos da Programacao *************
# *********** *************
# *********** Carolina Carreira *************
# *********** 87641 *************
# ******************************************************
# *******************************************************
# Tipo POSICAO
# *******************************************************
# CONSTRUTOR
# faz_pos: natural x natural --> posicao
# faz_pos (l,c) Cria tipo posicao
def faz_pos(l,c):
if not isinstance (l, int) or l != abs (l):
raise ('ERRO: ValueError faz_pos')
if not isinstance (c, int) or c != abs (c):
raise ValueError ('faz_pos: argumentos errados')
return (l,c,)
# SELETOR
# linha_pos|colunas_pos: posicao --> natural
# Seleciona a linha ou coluna da posicao
def linha_pos (p):
return p[0]
def coluna_pos (p):
return p[1]
# RECONHECEDORES
# e_pos: universal --> Boolean
# e_pos (arg) Reconhece o ipo posicao, este tem de ser tuplo constituido por dois elementos positivos, e inteiros
def e_pos (arg):
return isinstance(arg, tuple) and len (arg) == 2 or isinstance (linha_pos (arg), int) and not linha_pos (arg) != abs(linha_pos (arg)) and isinstance (coluna_pos (arg), int) and not coluna_pos (arg) != abs(coluna_pos (arg))
# TESTES
# pos_iguais: posicao x posicao --> Boolean
# pos_iguais (p1, p2) recebe dois argumentos do tipo posicao, p1 e p2, e devolve
# True caso os argumentos correspondam a mesma posicao da chave
def pos_iguais (p1, p2):
return linha_pos (p1) == linha_pos (p2) and coluna_pos (p1) == coluna_pos (p2)
# *******************************************************
# Tipo CHAVE
# *******************************************************
# CONSTRUTORES
# gera_chave_linhas: L x str --> chave
# gera_chave_linhas(l, mgc) recebe dois argumentos, l e mgc, correspondentes
# a um tuplo de 25 letras e a cadeira de caracteres mgc e devolve a chave gerada
# usando a disposicao por linhas
def gera_chave_linhas (letras, mgc):
# funcao auxiliar que verifica a validade dos argumentos L e mgc
if not verificacao_letras_mgc (letras, mgc):
raise ValueError ('gera_chave_linhas: argumentos errados')
# Transforma em lista para poder ser manipulado
letras = list(letras)
r = []
# Verifica cada letras dentro de mgc
for i in range (len( mgc)):
# Testa se o caracter nao esta ja em r e se esta em letras
if not mgc[i] in r and mgc[i] in letras:
# Se sim, junta-o a r
r += [mgc[i]]
# Remove de letras esse caracter de mgc
letras.remove (mgc[i])
# Concatena r com o que resta em letras (todos os caracteres nao presentes em mgc)
r = r + letras
# Corta em listas de listas
return [r[0:5],r[5:10],r[10:15],r[15:20],r[20:25]]
# AUXILIAR (funcao gera_chave_linhas e
# funcao gera_chave_espiral
# verificacao_letras_mgc: L + mgc --> boolean
# verificacao_letras_mgc (L, mgc) verifica a validade dos argumentos letras, L, e mgc
# Para ser True o argumento L tem de ser um tuplo com 25 letras maiusculas como
# elementos unicos e o argumento mgc tem de ser uma string de letras maiusculas
def verificacao_letras_mgc (L, mgc):
# Testa de se L e tuplo, tem tamanho 25, e se mgc e string de caracteres maiusculos
if not isinstance (L, tuple) or len(L) != 25 or not isinstance (mgc, str) or mgc != mgc.upper ():
return False
for i in range (len (L)):
for j in range (i+1, len(L)):
# Testa se elementos sao todos diferentes e maiusculos
if L[i] == L[j] or L[i].upper == L[i] :
return False
return True
# gera_chave_espiral: L x str x {'r','c'} x pos --> chave
# gera_chave_espiral (l; mgc; s; pos) recebe 4 argumentos, l corresponde a
# um tuplo de 25 letras e mgc a mensagem de geracao de chave, e devolve a
# chave gerada usando a disposicao em espiral no sentido indicado pelo parametro
# s ('r' sentido dos ponteiros do rel0gio e 'c' sentido contrario),
# comecando na posicao indicada pelo parametro pos.
def gera_chave_espiral (letras, mgc, s, p):
# Para os argumentos serem validos p tem de ser do tipo posicao, s tem de ser str
# de tamanho 1, e verificacao_letras_mgc tem de ser verdadeiro
if not e_pos (p) or not isinstance (s, str) or len(s) != 1 or not verificacao_letras_mgc (letras, mgc):
raise ValueError ('gera_chave_espiral: argumentos errados')
# Transforma em lista para poder ser manipulado
letras = list(letras)
r = []
# Verifica cada caracter dentro de mgc
for i in range (len( mgc)):
# Testa se o caracter nao esta ja em r e se esta em letras
if not mgc[i] in r and mgc[i] in letras:
# Se sim, junta-o a r
r += [mgc[i]]
# Remove de letras esse caracter de mgc
letras.remove (mgc[i])
# soma o que restou nas letras (os caracteres nao presentes em mgc)
r += letras
# Ordena em espiral segundo a posicao
r = ordenar (r, p)
if s == 'c':
# se for contra os ponteiros a matriz tem de ser transposta
r = transpor (r, p)
# corta
r = [r[0:5],r[5:10],r[10:15],r[15:20],r[20:25]]
return r
# AUXILIAR (gera_chave_linhas)
# ordenar: lista + posicao --> lista
# ordenar (l,p) recebe uma lista, e uma posicao inicial a partir da qual ordena a lista em espiral
# no sentido dos ponteiros do relogio
def ordenar (l, p):
# ordem e o dicionario com as posicoes em espiral
dic_posicoes = {(0,0) : dic00, (0,4) : dic04, (4,0): dic40, (4,4): dic44}
ordem = dic_posicoes [p]
resp = []
for i in range (0, 25):
# Coloca em resp a respetiva letra de l agora de forma ondenada
resp += [ l [ordem[i]] ]
return resp
# Dicionarios de posicoes (Def ordenar)
dic00 = {0:0, 1:1, 2:2, 3:3, 4:4, 5:15, 6:16, 7:17, 8:18, 9:5, 10:14, 11:23, 12:24, 13:19, 14:6, 15:13, 16:22, 17:21, 18:20, 19:7, 20:12, 21:11, 22:10, 23:9, 24:8}
dic04 = {0:12, 1:13, 2:14, 3:15, 4:0, 5:11, 6:22, 7:23, 8:16, 9:1, 10:10, 11:21, 12:24, 13:17, 14:2, 15:9 , 16:20, 17:19, 18:18, 19:3, 20:8, 21:7, 22:6, 23:5, 24:4}
dic44 = {0:8, 1:9 , 2:10, 3:11, 4:12, 5:7, 6:20, 7:21, 8:22, 9:13, 10:6, 11:19, 12:24, 13:23, 14:14, 15:5, 16:18, 17:17, 18:16, 19:15, 20:4, 21:3, 22:2, 23:1, 24:0}
dic40 = {0:4, 1:5, 2:6, 3:7, 4:8, 5:3 , 6:18, 7:19, 8:20, 9:9 , 10:2 , 11:17, 12:24, 13:21, 14:10, 15:1 , 16:16, 17:23, 18:22, 19:11, 20:0 , 21:15, 22:14, 23:13,24:12}
# Funcao auxiliar (gera_chave_linhas)
# transpor: lista + posicao --> lista
# transpor (l,p) recebe uma lista a transpor, e uma posicao inicial para saber como transpor.
# Se for 0,0 or 4,4 a transposicao e uma reflexao em relacao a diagonal principal
# Se for 0,4 ou 4,0 esta e uma reflexao em relacao a diagonal secundaria
def transpor (l, p):
d = []
# Matriz de replexao em relacao a diagonal principal
dic = transposicao_impar
# Se for 4,4 ou 0,0 reflexao em relacao a diagonal pricipal
if linha_pos (p) == coluna_pos (p):
dic = transposicao_par
# Caso contrario e em relacao a diagonal secundaria
else:
dic = transposicao_impar
for i in range (25):
# Ordena os elementos de l segundo o dicionario escolhido
d += [ l [dic[i]] ]
return d
# Dicionario de transposicao (def transpor)
transposicao_par = {0:0, 1:5, 2:10, 3:15, 4:20, 5:1, 6:6, 7:11, 8:16, 9:21, 10:2, 11:7, 12:12, 13:17, 14:22, 15:3, 16:8, 17:13, 18:18, 19:23, 20:4, 21:9, 22:14, 23:19, 24:24}
transposicao_impar = {0:24, 1:19, 2:14, 3:9, 4:4, 5:23, 6:18, 7:13, 8:8, 9:3, 10:22, 11:17, 12:12, 13:7, 14:2, 15:21, 16:16, 17:11, 18:6, 19:1, 20:20, 21:15, 22:10, 23:5, 24:0}
# SELETOR
# ref_chave: chave x posicao --> letra
# ref_chave(c; p) recebe como argumentos a chave c e a posicao p e devolve a
# letra que esta em c na posicao p
def ref_chave (c, pos):
return c[linha_pos (pos)] [coluna_pos(pos)]
# MODIFICADOR
# muda_chave: chave x posicao x letra --> chave
# muda_chave(c; p; l) recebe como argumentos a chave c, a posicao p e a letra l
# e devolve a chave c com a letra l na posicao p
def muda_chave (c,p,l):
c[ linha_pos (p)][coluna_pos (p)] = l
return c
# RECONHECEDORES
# e_chave: arg --> Boolean
# e_chave(arg): devolve True se o argumento arg for do tipo chave e Falso caso contrario
def e_chave (arg):
''' para ser do tipo chave tem de ser uma lista constituida por 5 listas cada uma com 5 elementos
que sejam letras maiusculas unicas'''
if len (arg) != 5 or not isinstance (arg, list):
return False
for a in arg:
# Testa tipo, tamanho, e se nao ha letras repetidas em cada lista dentro da lista
if not isinstance (arg, list) or len(a) != 5 or len (set (a)) != len (a):
return False
for b in a:
# Testa de algum elemento e uma letra minuscula
if not ( 64 < ord(b) < 91):
return False
else:
return True
# TRANSFORMADORES
# string_chave: chave --> str
# string_chave(c) devolve uma cadeia de caracteres que uma vez impressa apresenta
# as letras de c dispostas numa tabela 5 x 5
def string_chave (chave):
c = ''
for linha in chave:
# Coloca espacos entra as letras
for celula in linha:
c = c + celula + ' '
# Coloca no fim de cada linha paragrafo
c += '\n'
return c
# **********************************************************
# FUNCOES A DESEMVOLVER
# **********************************************************
# diagramas: str --> str
# digramas (mens) recebe como argumento uma cadeia de caracteres
# correspondente a uma mensagem, mens, e devolve a cadeia de caracteres
# correspondente aos digramas transformados de mens sem espacos
def digramas (mens):
mensg = ''
for i in range (len(mens)-1, -1, -1):
# Retira espacos
if mens [i] != ' ':
mensg = mens[i] + mensg
for i in range (0, len(mensg)-1, 2):
# Coloca X se existirem digitos iguais
if mensg[i] == mensg [i+1]:
mensg = mensg[:i+1] + 'X' + mensg[i+1:]
# Testa para ver se ha numeros par de caracteres
if len (mensg) % 2 != 0:
# Se nao houver acrescenta X no fim
mensg += 'X'
return mensg
# figura: digrm + chave --> fig + pos1 + pos2
# figura(digrm, chave) recebe dois argumentos, digrm, uma cadeia de
# caracteres de comprimento 2, e chave, e devolve um tuplo de 3 elementos
# da forma (fig, pos1, pos2) em que:
# - fig e a figura determinada pelas letras de digrm, 'l', 'c' ou 'r' (linha, coluna ou
# rectangulo).
# - pos1 e pos2 sao as posicoes ocupadas na chave pela primeira e segunda letras de
# digrm, respectivamente.
def figura (digrm, chave):
t = ()
# Ciclo para determinar posicao de uma letra na chave
for a in digrm:
# cada lista dentro da lista
for i in range (0,5):
for j in range (0,5):
# Verifica se o digito e igual ao digito da posicao i,j da chave
if a == ref_chave (chave, faz_pos (i,j)):
# Encontra posicao do digito e acumula
t += (faz_pos(i,j),)
# Testa se estao na mesma linha
if linha_pos(t[0]) == linha_pos(t[1]):
return ('l',) + t
# Testa de estao na mesma coluna
if coluna_pos (t[0]) == coluna_pos (t[1]):
return ('c',) + t
# Se estao em retangulo
else:
return ('r',) + t
# codifica_l: posicao + posicao + {-1,1} --> posicao + posicao
# codifica_l (pos1, pos2, inc) recebe tres argumentos, pos1, pos2,
# consistindo nas posicoes das letras de um digrama na mesma linha de uma
# chave, e o inteiro inc, que podera ser 1 (encriptar) ou -1 (desencriptar).
# Devolve um tuplo de 2 posicoes (pos1_cod, pos2_cod) que correspondem as
# posicoes das letras do digrama encriptado/desencriptado.
def codifica_l (pos1, pos2, inc):
c = ()
# Acede a cada posicao dada
for posicao in (pos1,pos2):
# Testa se a coluna e 1, 2, 3
if coluna_pos (posicao) != 4 and coluna_pos (posicao) != 0 :
# se sim, retorna a mesma posicao com a coluna alterada ( coluna + 1
# se for encriptacao, coluna - 1 se for desencriptacao)
c += ( faz_pos ( linha_pos (posicao) , coluna_pos (posicao) + inc ), )
# Testa se e encriptacao
elif inc == 1:
# se sim e se a posicao tiver 4 na coluna entao volta a ficar com 0
# se esta tiver 0 atualiza a coluna com 1 (0+1)
c += (faz_pos ( linha_pos (posicao) , 0),) if coluna_pos (posicao) == 4 else (faz_pos ( linha_pos (posicao) , 1),)
# Desemcriptacao
else:
# Se a posicao tiver 4 na coluna fica com 3 (4-1)
# Se tiver 0 fica com 4
c += (faz_pos ( linha_pos (posicao) , 3),) if coluna_pos (posicao) == 4 else (faz_pos ( linha_pos (posicao) , 4),)
return c
# codifica_l: posicao + posicao + {-1,1} --> posicao + posicao
# codifica_l (pos1, pos2, inc) recebe tres argumentos, pos1, pos2,
# consistindo nas posicoes das letras de um digrama na mesma coluna de uma
# chave, e o inteiro inc, que podera ser 1 (encriptao) ou -1 (desencriptar).
# Devolve um tuplo de 2 posicoes (pos1_cod, pos2_cod) que correspondem as
# posicoes das letras do digrama encriptado/desencriptado.
def codifica_c (pos1, pos2, inc):
c = ()
# Acede a cada posicao dada
for posicao in (pos1,pos2):
# Testa se a linha e 1, 2, 3
if linha_pos (posicao) != 4 and linha_pos (posicao) != 0 :
# se sim, retorna a mesma posicao com a linha alterada ( linha + 1
# se for encriptacao, linha - 1 se for desencriptacao)
c += ( faz_pos ( linha_pos (posicao) + inc, coluna_pos (posicao)), )
# Testa se e encriptacao
elif inc == 1:
# se sim e se a posicao tiver 4 na linha entao volta a ficar com 0
# se esta tiver 0, atualiza a linha com 1 (0+1)
c += (faz_pos ( 0 , coluna_pos (posicao)),) if linha_pos (posicao) == 4 else (faz_pos ( 1 , coluna_pos (posicao)),)
# Desemcriptacao
else:
# Se a posicao tiver 4 na linha fica com 4-1 = 3
# Se tiver 0 fica volta ao 4
c += (faz_pos ( 3 , coluna_pos (posicao)),) if linha_pos (posicao) == 4 else (faz_pos ( 4, coluna_pos (posicao)),)
return c
| # em linhas e colunas diferentes.
# A funcao devolve (pos1_cod, pos2_cod) que correspondem as posicoes das letras
# do digrama encriptado/desencriptado
def codifica_r (pos1, pos2):
# Retorna duas posicoes com as colunas trocadas entre si
return faz_pos(linha_pos(pos1), coluna_pos(pos2),) , faz_pos(linha_pos(pos2), coluna_pos(pos1))
# codifica_digrama: cad + chave + {1,-1} --> cad
# codifica_digrama ( diagrm, c, inc) recebe tres argumentos, digrm, um digrama,
# chave, uma chave, e o inteiro inc, que podera ser 1(encriptacao) ou
# -1 (desencriptacao).
# A funcao devolve o digrama correspondente a encriptacao/desencriptacao de digrm
# usando a chave.
def codifica_digrama ( diagrm, c, inc):
# Funcao figura que devolve as posicoes dos caracteres no diagrama
# e a sua posicao relativa (na mesma linha, coluna, ou em rectangulo)
coordenadas = figura (diagrm, c)
# Verifica o ultimo elemento retornado por figura se e 'l'
if coordenadas [0] == 'l':
# Chama a funcao codifica_l que devolve duas posicoes (encriptadas ou
# desencriptadas) que estejam na mesma linha
diretriz = codifica_l ( coordenadas [1], coordenadas [2], inc)
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave( c, faz_pos (linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave ( c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
# Se ultimo elemento retornado por figura e 'c'
if coordenadas [0] == 'c':
# Chama a funcao codifica_c que devolve duas posicoes (encriptadas ou
# desencriptadas) que estejam na mesma coluna
diretriz = codifica_c ( coordenadas [1], coordenadas [2], inc)
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave ( c, faz_pos (linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave (c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
else:
# Chama a funcao codifica_c que devolve duas posicoes (encriptadas ou
# desencriptadas) que nao estejam na mesma linha ou coluna (em rectangulo)
diretriz = codifica_r ( coordenadas [1], coordenadas [2])
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave ( c,faz_pos(linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave (c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
# codifica: mens + chave + {1,-1} --> mens
# codifica (mens, chave, inc) recebe tres argumentos, mens, uma mensagem,
# chave, uma chave, e o inteiro inc, que podera ser 1(encriptacao) ou -1
# (desencriptacao).
# A funcao devolve a mensagem correspondente a encriptacao/desencriptacao de mens
# usando a chave.
def codifica (mens, chave, inc):
# Chama a funcao diagramas que devolve uma str igual a mens, sem espacos
# e sem caracteres repetidos
mens = digramas (mens)
r = ''
# Codifica os caracteres dois a dois
for i in range (0, len(mens)-1, 2):
# Chama codifica_digrama que devolve dois caracteres codificados
r += codifica_digrama (mens[i:i+2], chave, inc)
return r
l = ('A','B','C','D','E','F','G','H','I','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z') | # codifica_r: posicao + posicao --> posicao + posicao
# codifica_r (pos1, pos2) recebe dois argumentos, pos1, pos2, consistindo nas
# posicoes das letras de um digrama numa chave. Estas posicoes encontra-se | random_line_split |
Projeto.py |
# ******************************************************
# *********** Projeto 2 *************
# *********** Fundamentos da Programacao *************
# *********** *************
# *********** Carolina Carreira *************
# *********** 87641 *************
# ******************************************************
# *******************************************************
# Tipo POSICAO
# *******************************************************
# CONSTRUTOR
# faz_pos: natural x natural --> posicao
# faz_pos (l,c) Cria tipo posicao
def faz_pos(l,c):
if not isinstance (l, int) or l != abs (l):
raise ('ERRO: ValueError faz_pos')
if not isinstance (c, int) or c != abs (c):
raise ValueError ('faz_pos: argumentos errados')
return (l,c,)
# SELETOR
# linha_pos|colunas_pos: posicao --> natural
# Seleciona a linha ou coluna da posicao
def linha_pos (p):
return p[0]
def coluna_pos (p):
return p[1]
# RECONHECEDORES
# e_pos: universal --> Boolean
# e_pos (arg) Reconhece o ipo posicao, este tem de ser tuplo constituido por dois elementos positivos, e inteiros
def e_pos (arg):
return isinstance(arg, tuple) and len (arg) == 2 or isinstance (linha_pos (arg), int) and not linha_pos (arg) != abs(linha_pos (arg)) and isinstance (coluna_pos (arg), int) and not coluna_pos (arg) != abs(coluna_pos (arg))
# TESTES
# pos_iguais: posicao x posicao --> Boolean
# pos_iguais (p1, p2) recebe dois argumentos do tipo posicao, p1 e p2, e devolve
# True caso os argumentos correspondam a mesma posicao da chave
def pos_iguais (p1, p2):
return linha_pos (p1) == linha_pos (p2) and coluna_pos (p1) == coluna_pos (p2)
# *******************************************************
# Tipo CHAVE
# *******************************************************
# CONSTRUTORES
# gera_chave_linhas: L x str --> chave
# gera_chave_linhas(l, mgc) recebe dois argumentos, l e mgc, correspondentes
# a um tuplo de 25 letras e a cadeira de caracteres mgc e devolve a chave gerada
# usando a disposicao por linhas
def gera_chave_linhas (letras, mgc):
# funcao auxiliar que verifica a validade dos argumentos L e mgc
if not verificacao_letras_mgc (letras, mgc):
raise ValueError ('gera_chave_linhas: argumentos errados')
# Transforma em lista para poder ser manipulado
letras = list(letras)
r = []
# Verifica cada letras dentro de mgc
for i in range (len( mgc)):
# Testa se o caracter nao esta ja em r e se esta em letras
if not mgc[i] in r and mgc[i] in letras:
# Se sim, junta-o a r
r += [mgc[i]]
# Remove de letras esse caracter de mgc
letras.remove (mgc[i])
# Concatena r com o que resta em letras (todos os caracteres nao presentes em mgc)
r = r + letras
# Corta em listas de listas
return [r[0:5],r[5:10],r[10:15],r[15:20],r[20:25]]
# AUXILIAR (funcao gera_chave_linhas e
# funcao gera_chave_espiral
# verificacao_letras_mgc: L + mgc --> boolean
# verificacao_letras_mgc (L, mgc) verifica a validade dos argumentos letras, L, e mgc
# Para ser True o argumento L tem de ser um tuplo com 25 letras maiusculas como
# elementos unicos e o argumento mgc tem de ser uma string de letras maiusculas
def verificacao_letras_mgc (L, mgc):
# Testa de se L e tuplo, tem tamanho 25, e se mgc e string de caracteres maiusculos
if not isinstance (L, tuple) or len(L) != 25 or not isinstance (mgc, str) or mgc != mgc.upper ():
return False
for i in range (len (L)):
for j in range (i+1, len(L)):
# Testa se elementos sao todos diferentes e maiusculos
if L[i] == L[j] or L[i].upper == L[i] :
return False
return True
# gera_chave_espiral: L x str x {'r','c'} x pos --> chave
# gera_chave_espiral (l; mgc; s; pos) recebe 4 argumentos, l corresponde a
# um tuplo de 25 letras e mgc a mensagem de geracao de chave, e devolve a
# chave gerada usando a disposicao em espiral no sentido indicado pelo parametro
# s ('r' sentido dos ponteiros do rel0gio e 'c' sentido contrario),
# comecando na posicao indicada pelo parametro pos.
def gera_chave_espiral (letras, mgc, s, p):
# Para os argumentos serem validos p tem de ser do tipo posicao, s tem de ser str
# de tamanho 1, e verificacao_letras_mgc tem de ser verdadeiro
if not e_pos (p) or not isinstance (s, str) or len(s) != 1 or not verificacao_letras_mgc (letras, mgc):
raise ValueError ('gera_chave_espiral: argumentos errados')
# Transforma em lista para poder ser manipulado
letras = list(letras)
r = []
# Verifica cada caracter dentro de mgc
for i in range (len( mgc)):
# Testa se o caracter nao esta ja em r e se esta em letras
if not mgc[i] in r and mgc[i] in letras:
# Se sim, junta-o a r
r += [mgc[i]]
# Remove de letras esse caracter de mgc
letras.remove (mgc[i])
# soma o que restou nas letras (os caracteres nao presentes em mgc)
r += letras
# Ordena em espiral segundo a posicao
r = ordenar (r, p)
if s == 'c':
# se for contra os ponteiros a matriz tem de ser transposta
r = transpor (r, p)
# corta
r = [r[0:5],r[5:10],r[10:15],r[15:20],r[20:25]]
return r
# AUXILIAR (gera_chave_linhas)
# ordenar: lista + posicao --> lista
# ordenar (l,p) recebe uma lista, e uma posicao inicial a partir da qual ordena a lista em espiral
# no sentido dos ponteiros do relogio
def ordenar (l, p):
# ordem e o dicionario com as posicoes em espiral
dic_posicoes = {(0,0) : dic00, (0,4) : dic04, (4,0): dic40, (4,4): dic44}
ordem = dic_posicoes [p]
resp = []
for i in range (0, 25):
# Coloca em resp a respetiva letra de l agora de forma ondenada
resp += [ l [ordem[i]] ]
return resp
# Dicionarios de posicoes (Def ordenar)
dic00 = {0:0, 1:1, 2:2, 3:3, 4:4, 5:15, 6:16, 7:17, 8:18, 9:5, 10:14, 11:23, 12:24, 13:19, 14:6, 15:13, 16:22, 17:21, 18:20, 19:7, 20:12, 21:11, 22:10, 23:9, 24:8}
dic04 = {0:12, 1:13, 2:14, 3:15, 4:0, 5:11, 6:22, 7:23, 8:16, 9:1, 10:10, 11:21, 12:24, 13:17, 14:2, 15:9 , 16:20, 17:19, 18:18, 19:3, 20:8, 21:7, 22:6, 23:5, 24:4}
dic44 = {0:8, 1:9 , 2:10, 3:11, 4:12, 5:7, 6:20, 7:21, 8:22, 9:13, 10:6, 11:19, 12:24, 13:23, 14:14, 15:5, 16:18, 17:17, 18:16, 19:15, 20:4, 21:3, 22:2, 23:1, 24:0}
dic40 = {0:4, 1:5, 2:6, 3:7, 4:8, 5:3 , 6:18, 7:19, 8:20, 9:9 , 10:2 , 11:17, 12:24, 13:21, 14:10, 15:1 , 16:16, 17:23, 18:22, 19:11, 20:0 , 21:15, 22:14, 23:13,24:12}
# Funcao auxiliar (gera_chave_linhas)
# transpor: lista + posicao --> lista
# transpor (l,p) recebe uma lista a transpor, e uma posicao inicial para saber como transpor.
# Se for 0,0 or 4,4 a transposicao e uma reflexao em relacao a diagonal principal
# Se for 0,4 ou 4,0 esta e uma reflexao em relacao a diagonal secundaria
def transpor (l, p):
d = []
# Matriz de replexao em relacao a diagonal principal
dic = transposicao_impar
# Se for 4,4 ou 0,0 reflexao em relacao a diagonal pricipal
if linha_pos (p) == coluna_pos (p):
dic = transposicao_par
# Caso contrario e em relacao a diagonal secundaria
else:
dic = transposicao_impar
for i in range (25):
# Ordena os elementos de l segundo o dicionario escolhido
d += [ l [dic[i]] ]
return d
# Dicionario de transposicao (def transpor)
transposicao_par = {0:0, 1:5, 2:10, 3:15, 4:20, 5:1, 6:6, 7:11, 8:16, 9:21, 10:2, 11:7, 12:12, 13:17, 14:22, 15:3, 16:8, 17:13, 18:18, 19:23, 20:4, 21:9, 22:14, 23:19, 24:24}
transposicao_impar = {0:24, 1:19, 2:14, 3:9, 4:4, 5:23, 6:18, 7:13, 8:8, 9:3, 10:22, 11:17, 12:12, 13:7, 14:2, 15:21, 16:16, 17:11, 18:6, 19:1, 20:20, 21:15, 22:10, 23:5, 24:0}
# SELETOR
# ref_chave: chave x posicao --> letra
# ref_chave(c; p) recebe como argumentos a chave c e a posicao p e devolve a
# letra que esta em c na posicao p
def ref_chave (c, pos):
return c[linha_pos (pos)] [coluna_pos(pos)]
# MODIFICADOR
# muda_chave: chave x posicao x letra --> chave
# muda_chave(c; p; l) recebe como argumentos a chave c, a posicao p e a letra l
# e devolve a chave c com a letra l na posicao p
def muda_chave (c,p,l):
c[ linha_pos (p)][coluna_pos (p)] = l
return c
# RECONHECEDORES
# e_chave: arg --> Boolean
# e_chave(arg): devolve True se o argumento arg for do tipo chave e Falso caso contrario
def e_chave (arg):
''' para ser do tipo chave tem de ser uma lista constituida por 5 listas cada uma com 5 elementos
que sejam letras maiusculas unicas'''
if len (arg) != 5 or not isinstance (arg, list):
return False
for a in arg:
# Testa tipo, tamanho, e se nao ha letras repetidas em cada lista dentro da lista
if not isinstance (arg, list) or len(a) != 5 or len (set (a)) != len (a):
return False
for b in a:
# Testa de algum elemento e uma letra minuscula
|
else:
return True
# TRANSFORMADORES
# string_chave: chave --> str
# string_chave(c) devolve uma cadeia de caracteres que uma vez impressa apresenta
# as letras de c dispostas numa tabela 5 x 5
def string_chave (chave):
c = ''
for linha in chave:
# Coloca espacos entra as letras
for celula in linha:
c = c + celula + ' '
# Coloca no fim de cada linha paragrafo
c += '\n'
return c
# **********************************************************
# FUNCOES A DESEMVOLVER
# **********************************************************
# diagramas: str --> str
# digramas (mens) recebe como argumento uma cadeia de caracteres
# correspondente a uma mensagem, mens, e devolve a cadeia de caracteres
# correspondente aos digramas transformados de mens sem espacos
def digramas (mens):
mensg = ''
for i in range (len(mens)-1, -1, -1):
# Retira espacos
if mens [i] != ' ':
mensg = mens[i] + mensg
for i in range (0, len(mensg)-1, 2):
# Coloca X se existirem digitos iguais
if mensg[i] == mensg [i+1]:
mensg = mensg[:i+1] + 'X' + mensg[i+1:]
# Testa para ver se ha numeros par de caracteres
if len (mensg) % 2 != 0:
# Se nao houver acrescenta X no fim
mensg += 'X'
return mensg
# figura: digrm + chave --> fig + pos1 + pos2
# figura(digrm, chave) recebe dois argumentos, digrm, uma cadeia de
# caracteres de comprimento 2, e chave, e devolve um tuplo de 3 elementos
# da forma (fig, pos1, pos2) em que:
# - fig e a figura determinada pelas letras de digrm, 'l', 'c' ou 'r' (linha, coluna ou
# rectangulo).
# - pos1 e pos2 sao as posicoes ocupadas na chave pela primeira e segunda letras de
# digrm, respectivamente.
def figura (digrm, chave):
t = ()
# Ciclo para determinar posicao de uma letra na chave
for a in digrm:
# cada lista dentro da lista
for i in range (0,5):
for j in range (0,5):
# Verifica se o digito e igual ao digito da posicao i,j da chave
if a == ref_chave (chave, faz_pos (i,j)):
# Encontra posicao do digito e acumula
t += (faz_pos(i,j),)
# Testa se estao na mesma linha
if linha_pos(t[0]) == linha_pos(t[1]):
return ('l',) + t
# Testa de estao na mesma coluna
if coluna_pos (t[0]) == coluna_pos (t[1]):
return ('c',) + t
# Se estao em retangulo
else:
return ('r',) + t
# codifica_l: posicao + posicao + {-1,1} --> posicao + posicao
# codifica_l (pos1, pos2, inc) recebe tres argumentos, pos1, pos2,
# consistindo nas posicoes das letras de um digrama na mesma linha de uma
# chave, e o inteiro inc, que podera ser 1 (encriptar) ou -1 (desencriptar).
# Devolve um tuplo de 2 posicoes (pos1_cod, pos2_cod) que correspondem as
# posicoes das letras do digrama encriptado/desencriptado.
def codifica_l (pos1, pos2, inc):
c = ()
# Acede a cada posicao dada
for posicao in (pos1,pos2):
# Testa se a coluna e 1, 2, 3
if coluna_pos (posicao) != 4 and coluna_pos (posicao) != 0 :
# se sim, retorna a mesma posicao com a coluna alterada ( coluna + 1
# se for encriptacao, coluna - 1 se for desencriptacao)
c += ( faz_pos ( linha_pos (posicao) , coluna_pos (posicao) + inc ), )
# Testa se e encriptacao
elif inc == 1:
# se sim e se a posicao tiver 4 na coluna entao volta a ficar com 0
# se esta tiver 0 atualiza a coluna com 1 (0+1)
c += (faz_pos ( linha_pos (posicao) , 0),) if coluna_pos (posicao) == 4 else (faz_pos ( linha_pos (posicao) , 1),)
# Desemcriptacao
else:
# Se a posicao tiver 4 na coluna fica com 3 (4-1)
# Se tiver 0 fica com 4
c += (faz_pos ( linha_pos (posicao) , 3),) if coluna_pos (posicao) == 4 else (faz_pos ( linha_pos (posicao) , 4),)
return c
# codifica_l: posicao + posicao + {-1,1} --> posicao + posicao
# codifica_l (pos1, pos2, inc) recebe tres argumentos, pos1, pos2,
# consistindo nas posicoes das letras de um digrama na mesma coluna de uma
# chave, e o inteiro inc, que podera ser 1 (encriptao) ou -1 (desencriptar).
# Devolve um tuplo de 2 posicoes (pos1_cod, pos2_cod) que correspondem as
# posicoes das letras do digrama encriptado/desencriptado.
def codifica_c (pos1, pos2, inc):
c = ()
# Acede a cada posicao dada
for posicao in (pos1,pos2):
# Testa se a linha e 1, 2, 3
if linha_pos (posicao) != 4 and linha_pos (posicao) != 0 :
# se sim, retorna a mesma posicao com a linha alterada ( linha + 1
# se for encriptacao, linha - 1 se for desencriptacao)
c += ( faz_pos ( linha_pos (posicao) + inc, coluna_pos (posicao)), )
# Testa se e encriptacao
elif inc == 1:
# se sim e se a posicao tiver 4 na linha entao volta a ficar com 0
# se esta tiver 0, atualiza a linha com 1 (0+1)
c += (faz_pos ( 0 , coluna_pos (posicao)),) if linha_pos (posicao) == 4 else (faz_pos ( 1 , coluna_pos (posicao)),)
# Desemcriptacao
else:
# Se a posicao tiver 4 na linha fica com 4-1 = 3
# Se tiver 0 fica volta ao 4
c += (faz_pos ( 3 , coluna_pos (posicao)),) if linha_pos (posicao) == 4 else (faz_pos ( 4, coluna_pos (posicao)),)
return c
# codifica_r: posicao + posicao --> posicao + posicao
# codifica_r (pos1, pos2) recebe dois argumentos, pos1, pos2, consistindo nas
# posicoes das letras de um digrama numa chave. Estas posicoes encontra-se
# em linhas e colunas diferentes.
# A funcao devolve (pos1_cod, pos2_cod) que correspondem as posicoes das letras
# do digrama encriptado/desencriptado
def codifica_r (pos1, pos2):
# Retorna duas posicoes com as colunas trocadas entre si
return faz_pos(linha_pos(pos1), coluna_pos(pos2),) , faz_pos(linha_pos(pos2), coluna_pos(pos1))
# codifica_digrama: cad + chave + {1,-1} --> cad
# codifica_digrama ( diagrm, c, inc) recebe tres argumentos, digrm, um digrama,
# chave, uma chave, e o inteiro inc, que podera ser 1(encriptacao) ou
# -1 (desencriptacao).
# A funcao devolve o digrama correspondente a encriptacao/desencriptacao de digrm
# usando a chave.
def codifica_digrama ( diagrm, c, inc):
# Funcao figura que devolve as posicoes dos caracteres no diagrama
# e a sua posicao relativa (na mesma linha, coluna, ou em rectangulo)
coordenadas = figura (diagrm, c)
# Verifica o ultimo elemento retornado por figura se e 'l'
if coordenadas [0] == 'l':
# Chama a funcao codifica_l que devolve duas posicoes (encriptadas ou
# desencriptadas) que estejam na mesma linha
diretriz = codifica_l ( coordenadas [1], coordenadas [2], inc)
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave( c, faz_pos (linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave ( c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
# Se ultimo elemento retornado por figura e 'c'
if coordenadas [0] == 'c':
# Chama a funcao codifica_c que devolve duas posicoes (encriptadas ou
# desencriptadas) que estejam na mesma coluna
diretriz = codifica_c ( coordenadas [1], coordenadas [2], inc)
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave ( c, faz_pos (linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave (c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
else:
# Chama a funcao codifica_c que devolve duas posicoes (encriptadas ou
# desencriptadas) que nao estejam na mesma linha ou coluna (em rectangulo)
diretriz = codifica_r ( coordenadas [1], coordenadas [2])
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave ( c,faz_pos(linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave (c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
# codifica: mens + chave + {1,-1} --> mens
# codifica (mens, chave, inc) recebe tres argumentos, mens, uma mensagem,
# chave, uma chave, e o inteiro inc, que podera ser 1(encriptacao) ou -1
# (desencriptacao).
# A funcao devolve a mensagem correspondente a encriptacao/desencriptacao de mens
# usando a chave.
def codifica (mens, chave, inc):
# Chama a funcao diagramas que devolve uma str igual a mens, sem espacos
# e sem caracteres repetidos
mens = digramas (mens)
r = ''
# Codifica os caracteres dois a dois
for i in range (0, len(mens)-1, 2):
# Chama codifica_digrama que devolve dois caracteres codificados
r += codifica_digrama (mens[i:i+2], chave, inc)
return r
l = ('A','B','C','D','E','F','G','H','I','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
| if not ( 64 < ord(b) < 91):
return False | conditional_block |
Projeto.py |
# ******************************************************
# *********** Projeto 2 *************
# *********** Fundamentos da Programacao *************
# *********** *************
# *********** Carolina Carreira *************
# *********** 87641 *************
# ******************************************************
# *******************************************************
# Tipo POSICAO
# *******************************************************
# CONSTRUTOR
# faz_pos: natural x natural --> posicao
# faz_pos (l,c) Cria tipo posicao
def faz_pos(l,c):
if not isinstance (l, int) or l != abs (l):
raise ('ERRO: ValueError faz_pos')
if not isinstance (c, int) or c != abs (c):
raise ValueError ('faz_pos: argumentos errados')
return (l,c,)
# SELETOR
# linha_pos|colunas_pos: posicao --> natural
# Seleciona a linha ou coluna da posicao
def linha_pos (p):
return p[0]
def coluna_pos (p):
return p[1]
# RECONHECEDORES
# e_pos: universal --> Boolean
# e_pos (arg) Reconhece o ipo posicao, este tem de ser tuplo constituido por dois elementos positivos, e inteiros
def e_pos (arg):
return isinstance(arg, tuple) and len (arg) == 2 or isinstance (linha_pos (arg), int) and not linha_pos (arg) != abs(linha_pos (arg)) and isinstance (coluna_pos (arg), int) and not coluna_pos (arg) != abs(coluna_pos (arg))
# TESTES
# pos_iguais: posicao x posicao --> Boolean
# pos_iguais (p1, p2) recebe dois argumentos do tipo posicao, p1 e p2, e devolve
# True caso os argumentos correspondam a mesma posicao da chave
def pos_iguais (p1, p2):
return linha_pos (p1) == linha_pos (p2) and coluna_pos (p1) == coluna_pos (p2)
# *******************************************************
# Tipo CHAVE
# *******************************************************
# CONSTRUTORES
# gera_chave_linhas: L x str --> chave
# gera_chave_linhas(l, mgc) recebe dois argumentos, l e mgc, correspondentes
# a um tuplo de 25 letras e a cadeira de caracteres mgc e devolve a chave gerada
# usando a disposicao por linhas
def gera_chave_linhas (letras, mgc):
# funcao auxiliar que verifica a validade dos argumentos L e mgc
if not verificacao_letras_mgc (letras, mgc):
raise ValueError ('gera_chave_linhas: argumentos errados')
# Transforma em lista para poder ser manipulado
letras = list(letras)
r = []
# Verifica cada letras dentro de mgc
for i in range (len( mgc)):
# Testa se o caracter nao esta ja em r e se esta em letras
if not mgc[i] in r and mgc[i] in letras:
# Se sim, junta-o a r
r += [mgc[i]]
# Remove de letras esse caracter de mgc
letras.remove (mgc[i])
# Concatena r com o que resta em letras (todos os caracteres nao presentes em mgc)
r = r + letras
# Corta em listas de listas
return [r[0:5],r[5:10],r[10:15],r[15:20],r[20:25]]
# AUXILIAR (funcao gera_chave_linhas e
# funcao gera_chave_espiral
# verificacao_letras_mgc: L + mgc --> boolean
# verificacao_letras_mgc (L, mgc) verifica a validade dos argumentos letras, L, e mgc
# Para ser True o argumento L tem de ser um tuplo com 25 letras maiusculas como
# elementos unicos e o argumento mgc tem de ser uma string de letras maiusculas
def verificacao_letras_mgc (L, mgc):
# Testa de se L e tuplo, tem tamanho 25, e se mgc e string de caracteres maiusculos
if not isinstance (L, tuple) or len(L) != 25 or not isinstance (mgc, str) or mgc != mgc.upper ():
return False
for i in range (len (L)):
for j in range (i+1, len(L)):
# Testa se elementos sao todos diferentes e maiusculos
if L[i] == L[j] or L[i].upper == L[i] :
return False
return True
# gera_chave_espiral: L x str x {'r','c'} x pos --> chave
# gera_chave_espiral (l; mgc; s; pos) recebe 4 argumentos, l corresponde a
# um tuplo de 25 letras e mgc a mensagem de geracao de chave, e devolve a
# chave gerada usando a disposicao em espiral no sentido indicado pelo parametro
# s ('r' sentido dos ponteiros do rel0gio e 'c' sentido contrario),
# comecando na posicao indicada pelo parametro pos.
def gera_chave_espiral (letras, mgc, s, p):
# Para os argumentos serem validos p tem de ser do tipo posicao, s tem de ser str
# de tamanho 1, e verificacao_letras_mgc tem de ser verdadeiro
if not e_pos (p) or not isinstance (s, str) or len(s) != 1 or not verificacao_letras_mgc (letras, mgc):
raise ValueError ('gera_chave_espiral: argumentos errados')
# Transforma em lista para poder ser manipulado
letras = list(letras)
r = []
# Verifica cada caracter dentro de mgc
for i in range (len( mgc)):
# Testa se o caracter nao esta ja em r e se esta em letras
if not mgc[i] in r and mgc[i] in letras:
# Se sim, junta-o a r
r += [mgc[i]]
# Remove de letras esse caracter de mgc
letras.remove (mgc[i])
# soma o que restou nas letras (os caracteres nao presentes em mgc)
r += letras
# Ordena em espiral segundo a posicao
r = ordenar (r, p)
if s == 'c':
# se for contra os ponteiros a matriz tem de ser transposta
r = transpor (r, p)
# corta
r = [r[0:5],r[5:10],r[10:15],r[15:20],r[20:25]]
return r
# AUXILIAR (gera_chave_linhas)
# ordenar: lista + posicao --> lista
# ordenar (l,p) recebe uma lista, e uma posicao inicial a partir da qual ordena a lista em espiral
# no sentido dos ponteiros do relogio
def ordenar (l, p):
# ordem e o dicionario com as posicoes em espiral
dic_posicoes = {(0,0) : dic00, (0,4) : dic04, (4,0): dic40, (4,4): dic44}
ordem = dic_posicoes [p]
resp = []
for i in range (0, 25):
# Coloca em resp a respetiva letra de l agora de forma ondenada
resp += [ l [ordem[i]] ]
return resp
# Dicionarios de posicoes (Def ordenar)
dic00 = {0:0, 1:1, 2:2, 3:3, 4:4, 5:15, 6:16, 7:17, 8:18, 9:5, 10:14, 11:23, 12:24, 13:19, 14:6, 15:13, 16:22, 17:21, 18:20, 19:7, 20:12, 21:11, 22:10, 23:9, 24:8}
dic04 = {0:12, 1:13, 2:14, 3:15, 4:0, 5:11, 6:22, 7:23, 8:16, 9:1, 10:10, 11:21, 12:24, 13:17, 14:2, 15:9 , 16:20, 17:19, 18:18, 19:3, 20:8, 21:7, 22:6, 23:5, 24:4}
dic44 = {0:8, 1:9 , 2:10, 3:11, 4:12, 5:7, 6:20, 7:21, 8:22, 9:13, 10:6, 11:19, 12:24, 13:23, 14:14, 15:5, 16:18, 17:17, 18:16, 19:15, 20:4, 21:3, 22:2, 23:1, 24:0}
dic40 = {0:4, 1:5, 2:6, 3:7, 4:8, 5:3 , 6:18, 7:19, 8:20, 9:9 , 10:2 , 11:17, 12:24, 13:21, 14:10, 15:1 , 16:16, 17:23, 18:22, 19:11, 20:0 , 21:15, 22:14, 23:13,24:12}
# Funcao auxiliar (gera_chave_linhas)
# transpor: lista + posicao --> lista
# transpor (l,p) recebe uma lista a transpor, e uma posicao inicial para saber como transpor.
# Se for 0,0 or 4,4 a transposicao e uma reflexao em relacao a diagonal principal
# Se for 0,4 ou 4,0 esta e uma reflexao em relacao a diagonal secundaria
def transpor (l, p):
d = []
# Matriz de replexao em relacao a diagonal principal
dic = transposicao_impar
# Se for 4,4 ou 0,0 reflexao em relacao a diagonal pricipal
if linha_pos (p) == coluna_pos (p):
dic = transposicao_par
# Caso contrario e em relacao a diagonal secundaria
else:
dic = transposicao_impar
for i in range (25):
# Ordena os elementos de l segundo o dicionario escolhido
d += [ l [dic[i]] ]
return d
# Dicionario de transposicao (def transpor)
transposicao_par = {0:0, 1:5, 2:10, 3:15, 4:20, 5:1, 6:6, 7:11, 8:16, 9:21, 10:2, 11:7, 12:12, 13:17, 14:22, 15:3, 16:8, 17:13, 18:18, 19:23, 20:4, 21:9, 22:14, 23:19, 24:24}
transposicao_impar = {0:24, 1:19, 2:14, 3:9, 4:4, 5:23, 6:18, 7:13, 8:8, 9:3, 10:22, 11:17, 12:12, 13:7, 14:2, 15:21, 16:16, 17:11, 18:6, 19:1, 20:20, 21:15, 22:10, 23:5, 24:0}
# SELETOR
# ref_chave: chave x posicao --> letra
# ref_chave(c; p) recebe como argumentos a chave c e a posicao p e devolve a
# letra que esta em c na posicao p
def ref_chave (c, pos):
return c[linha_pos (pos)] [coluna_pos(pos)]
# MODIFICADOR
# muda_chave: chave x posicao x letra --> chave
# muda_chave(c; p; l) recebe como argumentos a chave c, a posicao p e a letra l
# e devolve a chave c com a letra l na posicao p
def muda_chave (c,p,l):
c[ linha_pos (p)][coluna_pos (p)] = l
return c
# RECONHECEDORES
# e_chave: arg --> Boolean
# e_chave(arg): devolve True se o argumento arg for do tipo chave e Falso caso contrario
def e_chave (arg):
''' para ser do tipo chave tem de ser uma lista constituida por 5 listas cada uma com 5 elementos
que sejam letras maiusculas unicas'''
if len (arg) != 5 or not isinstance (arg, list):
return False
for a in arg:
# Testa tipo, tamanho, e se nao ha letras repetidas em cada lista dentro da lista
if not isinstance (arg, list) or len(a) != 5 or len (set (a)) != len (a):
return False
for b in a:
# Testa de algum elemento e uma letra minuscula
if not ( 64 < ord(b) < 91):
return False
else:
return True
# TRANSFORMADORES
# string_chave: chave --> str
# string_chave(c) devolve uma cadeia de caracteres que uma vez impressa apresenta
# as letras de c dispostas numa tabela 5 x 5
def string_chave (chave):
c = ''
for linha in chave:
# Coloca espacos entra as letras
for celula in linha:
c = c + celula + ' '
# Coloca no fim de cada linha paragrafo
c += '\n'
return c
# **********************************************************
# FUNCOES A DESEMVOLVER
# **********************************************************
# diagramas: str --> str
# digramas (mens) recebe como argumento uma cadeia de caracteres
# correspondente a uma mensagem, mens, e devolve a cadeia de caracteres
# correspondente aos digramas transformados de mens sem espacos
def digramas (mens):
mensg = ''
for i in range (len(mens)-1, -1, -1):
# Retira espacos
if mens [i] != ' ':
mensg = mens[i] + mensg
for i in range (0, len(mensg)-1, 2):
# Coloca X se existirem digitos iguais
if mensg[i] == mensg [i+1]:
mensg = mensg[:i+1] + 'X' + mensg[i+1:]
# Testa para ver se ha numeros par de caracteres
if len (mensg) % 2 != 0:
# Se nao houver acrescenta X no fim
mensg += 'X'
return mensg
# figura: digrm + chave --> fig + pos1 + pos2
# figura(digrm, chave) recebe dois argumentos, digrm, uma cadeia de
# caracteres de comprimento 2, e chave, e devolve um tuplo de 3 elementos
# da forma (fig, pos1, pos2) em que:
# - fig e a figura determinada pelas letras de digrm, 'l', 'c' ou 'r' (linha, coluna ou
# rectangulo).
# - pos1 e pos2 sao as posicoes ocupadas na chave pela primeira e segunda letras de
# digrm, respectivamente.
def figura (digrm, chave):
t = ()
# Ciclo para determinar posicao de uma letra na chave
for a in digrm:
# cada lista dentro da lista
for i in range (0,5):
for j in range (0,5):
# Verifica se o digito e igual ao digito da posicao i,j da chave
if a == ref_chave (chave, faz_pos (i,j)):
# Encontra posicao do digito e acumula
t += (faz_pos(i,j),)
# Testa se estao na mesma linha
if linha_pos(t[0]) == linha_pos(t[1]):
return ('l',) + t
# Testa de estao na mesma coluna
if coluna_pos (t[0]) == coluna_pos (t[1]):
return ('c',) + t
# Se estao em retangulo
else:
return ('r',) + t
# codifica_l: posicao + posicao + {-1,1} --> posicao + posicao
# codifica_l (pos1, pos2, inc) recebe tres argumentos, pos1, pos2,
# consistindo nas posicoes das letras de um digrama na mesma linha de uma
# chave, e o inteiro inc, que podera ser 1 (encriptar) ou -1 (desencriptar).
# Devolve um tuplo de 2 posicoes (pos1_cod, pos2_cod) que correspondem as
# posicoes das letras do digrama encriptado/desencriptado.
def codifica_l (pos1, pos2, inc):
c = ()
# Acede a cada posicao dada
for posicao in (pos1,pos2):
# Testa se a coluna e 1, 2, 3
if coluna_pos (posicao) != 4 and coluna_pos (posicao) != 0 :
# se sim, retorna a mesma posicao com a coluna alterada ( coluna + 1
# se for encriptacao, coluna - 1 se for desencriptacao)
c += ( faz_pos ( linha_pos (posicao) , coluna_pos (posicao) + inc ), )
# Testa se e encriptacao
elif inc == 1:
# se sim e se a posicao tiver 4 na coluna entao volta a ficar com 0
# se esta tiver 0 atualiza a coluna com 1 (0+1)
c += (faz_pos ( linha_pos (posicao) , 0),) if coluna_pos (posicao) == 4 else (faz_pos ( linha_pos (posicao) , 1),)
# Desemcriptacao
else:
# Se a posicao tiver 4 na coluna fica com 3 (4-1)
# Se tiver 0 fica com 4
c += (faz_pos ( linha_pos (posicao) , 3),) if coluna_pos (posicao) == 4 else (faz_pos ( linha_pos (posicao) , 4),)
return c
# codifica_l: posicao + posicao + {-1,1} --> posicao + posicao
# codifica_l (pos1, pos2, inc) recebe tres argumentos, pos1, pos2,
# consistindo nas posicoes das letras de um digrama na mesma coluna de uma
# chave, e o inteiro inc, que podera ser 1 (encriptao) ou -1 (desencriptar).
# Devolve um tuplo de 2 posicoes (pos1_cod, pos2_cod) que correspondem as
# posicoes das letras do digrama encriptado/desencriptado.
def codifica_c (pos1, pos2, inc):
c = ()
# Acede a cada posicao dada
for posicao in (pos1,pos2):
# Testa se a linha e 1, 2, 3
if linha_pos (posicao) != 4 and linha_pos (posicao) != 0 :
# se sim, retorna a mesma posicao com a linha alterada ( linha + 1
# se for encriptacao, linha - 1 se for desencriptacao)
c += ( faz_pos ( linha_pos (posicao) + inc, coluna_pos (posicao)), )
# Testa se e encriptacao
elif inc == 1:
# se sim e se a posicao tiver 4 na linha entao volta a ficar com 0
# se esta tiver 0, atualiza a linha com 1 (0+1)
c += (faz_pos ( 0 , coluna_pos (posicao)),) if linha_pos (posicao) == 4 else (faz_pos ( 1 , coluna_pos (posicao)),)
# Desemcriptacao
else:
# Se a posicao tiver 4 na linha fica com 4-1 = 3
# Se tiver 0 fica volta ao 4
c += (faz_pos ( 3 , coluna_pos (posicao)),) if linha_pos (posicao) == 4 else (faz_pos ( 4, coluna_pos (posicao)),)
return c
# codifica_r: posicao + posicao --> posicao + posicao
# codifica_r (pos1, pos2) recebe dois argumentos, pos1, pos2, consistindo nas
# posicoes das letras de um digrama numa chave. Estas posicoes encontra-se
# em linhas e colunas diferentes.
# A funcao devolve (pos1_cod, pos2_cod) que correspondem as posicoes das letras
# do digrama encriptado/desencriptado
def codifica_r (pos1, pos2):
# Retorna duas posicoes com as colunas trocadas entre si
return faz_pos(linha_pos(pos1), coluna_pos(pos2),) , faz_pos(linha_pos(pos2), coluna_pos(pos1))
# codifica_digrama: cad + chave + {1,-1} --> cad
# codifica_digrama ( diagrm, c, inc) recebe tres argumentos, digrm, um digrama,
# chave, uma chave, e o inteiro inc, que podera ser 1(encriptacao) ou
# -1 (desencriptacao).
# A funcao devolve o digrama correspondente a encriptacao/desencriptacao de digrm
# usando a chave.
def codifica_digrama ( diagrm, c, inc):
# Funcao figura que devolve as posicoes dos caracteres no diagrama
# e a sua posicao relativa (na mesma linha, coluna, ou em rectangulo)
coordenadas = figura (diagrm, c)
# Verifica o ultimo elemento retornado por figura se e 'l'
if coordenadas [0] == 'l':
# Chama a funcao codifica_l que devolve duas posicoes (encriptadas ou
# desencriptadas) que estejam na mesma linha
diretriz = codifica_l ( coordenadas [1], coordenadas [2], inc)
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave( c, faz_pos (linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave ( c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
# Se ultimo elemento retornado por figura e 'c'
if coordenadas [0] == 'c':
# Chama a funcao codifica_c que devolve duas posicoes (encriptadas ou
# desencriptadas) que estejam na mesma coluna
diretriz = codifica_c ( coordenadas [1], coordenadas [2], inc)
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave ( c, faz_pos (linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave (c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
else:
# Chama a funcao codifica_c que devolve duas posicoes (encriptadas ou
# desencriptadas) que nao estejam na mesma linha ou coluna (em rectangulo)
diretriz = codifica_r ( coordenadas [1], coordenadas [2])
# Devolve as dois caracteres correspondentes as duas posicoes
return ref_chave ( c,faz_pos(linha_pos(diretriz [0]), coluna_pos(diretriz [0]))) + ref_chave (c, faz_pos (linha_pos(diretriz [1]), coluna_pos(diretriz [1])))
# codifica: mens + chave + {1,-1} --> mens
# codifica (mens, chave, inc) recebe tres argumentos, mens, uma mensagem,
# chave, uma chave, e o inteiro inc, que podera ser 1(encriptacao) ou -1
# (desencriptacao).
# A funcao devolve a mensagem correspondente a encriptacao/desencriptacao de mens
# usando a chave.
def codifica (mens, chave, inc):
# Chama a funcao diagramas que devolve uma str igual a mens, sem espacos
# e sem caracteres repetidos
|
l = ('A','B','C','D','E','F','G','H','I','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
| mens = digramas (mens)
r = ''
# Codifica os caracteres dois a dois
for i in range (0, len(mens)-1, 2):
# Chama codifica_digrama que devolve dois caracteres codificados
r += codifica_digrama (mens[i:i+2], chave, inc)
return r | identifier_body |
NEF_tester.py |
import NEF
from matplotlib import pyplot as plt
from random import choice
from math import log,exp,sqrt,sin
from random import normalvariate,random
from pickle import dump,load
#from math import abs
import numpy as np
def Error(p,target,deltaT):
Error.value = Error.value*exp(-deltaT/Error.tau)
# Error.value += Error.grace - abs(target - p)
Error.value += Error.grace - deltaT*(target - p)/Error.tau
# if(p>target):
# print "p: ",p
# print "t: ",target
# exit()
return -(p-target)**2#exp(-(p-target)**2)#-(p/target-1)**2#-abs(p-target)#(exp(-abs(p-target)))#-(p-target)**2#+target*target#+3600
def SQError(p,target,deltaT):
return -(target-p)**2
def sigmoid(er):
return er
# return (2.0/(1.0+exp(-2*er))-1.0)
targetname = "target"
def weight_histogram(layer,binnum=None):
weights = [reduce(lambda x,synapse:x+synapse.inhibitory*synapse.Pval(),neuron.synapses,0) for neuron in layer.layer]
if(binnum == None):
plt.hist(weights,normed = True)
else:
plt.hist(weights,bins=binnum,normed = True)
def valtopair(x):
if(x>0):
c = x+random()*(2000-2*x)
else:
c = -x + random()*(2000+2*x)
# c = 400+400*random()
fp = (c+x)/2.0
fm = (c-x)/2.0
try:
assert(fp>=0 and fm>=0)
except:
print "c: ",c," x: ",x
exit()
return (fp,fm)
def pairtoval(pair):
return pair[0]-pair[1]
def target(x):
global targetname
targetname = "sin"
z = x[0]-x[1]
# return -z*z
return 400*sin(3.141592654/400*z)#400.0*(z/400.0)#sin(3.14159264/400.0*z)
def plotrange(f,xmin,xmax,resolution,alabel = None):
xvals = [x/float(resolution) for x in range(resolution*xmin,resolution*xmax)]
plt.plot(xvals,map(f,xvals),label = alabel)
def plotavs(layer,xmin,xmax,resolution,savename = None,display = True,title = ""):
plt.clf()
# plotrange(lambda x:layer.getaverage(valtopair(x)),xmin,xmax,"decoded values")
xvals = []
cms = []
decvals = []
dec_pairp=[]
dec_pairm=[]
dec_cms = []
for x in range(resolution*xmin,resolution*xmax):
pair = valtopair(x/float(resolution))
xvals.append(x/float(resolution))
dec = layer.getaverage(pair)
decvals.append(dec)
dec_pair = layer.getpair(pair)
dec_pairp.append(-dec_pair[0])
dec_pairm.append(-dec_pair[1])
cms.append((pair[0]+pair[1])/2.0)
plt.plot(xvals,decvals,label = "decoded values")
plt.plot(xvals,cms,label="input common mode")
plt.plot(xvals,dec_pairp,label="output f+")
plt.plot(xvals,dec_pairm,label="output f-")
# plotrange(layer.getCM,xmin,xmax,"common modes")
plotrange(lambda x: target(valtopair(x)),xmin,xmax,resolution,"target values")
# plotrange(lambda x:Error(layer.getaverage(valtopair(x)),target(valtopair(x)),1),xmin,xmax,resolution,"error")
ervals = [SQError(layer.getaverage(valtopair(x)),target(valtopair(x)),1) for x in [x/float(resolution) for x in range(resolution*xmin,resolution*xmax)]]
avsq = reduce(lambda x,y:x+y**2,ervals)/len(ervals)
avsq = 0
for er in ervals:
avsq += er
avsq = avsq/len(ervals)
rms = sqrt(-avsq)
if(title != ""):
title += " RMS Error: "+str(rms)
else:
title = "RMS Error: "+str(rms)
plt.title(title)
plt.legend(loc=2)
if(savename != None):
plt.savefig(savename)
if(display):
plt.show()
def plottuning(neuron,xvals):
|
def initplot(layer):
x = -2
t = target(x)
for a in range(int(0.5/deltaT)):
tvals.append(a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = -1
for a in range(int(0.5/deltaT)):
tvals.append(0.5+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = 1
for a in range(int(0.5/deltaT)):
tvals.append(1.0+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = 2
for a in range(int(0.5/deltaT)):
tvals.append(1.5+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
plt.plot(tvals,xhatvals)
plt.show()
def randunit(d):
v = 2
while(np.linalg.norm(v)>1):
v = np.array([random()*2-1.0 for x in range(d)])
return v/np.linalg.norm(v)
def randweighting(d):
var = 0.3
# var = 0.0
return np.array([var*(2*random()-1.0)+1.0,-(var*(2*random()-1.0)+1.0)])
#synapses = [NEF.Synapse(inhibitory = (x%2)*2-1,initialQ = 0.0) for x in range(1000)]
Error.grace = 0.0#70000.00
Error.value = 0.0
Error.tau = 0.000001*NEF.ms
layersize = 100
weight_val = 1#(10*NEF.ms)
inhibsynapses = [NEF.Synapse(inhibitory = -1,initialQ = 0*(random()-0.5)-4.0) for x in range(layersize)]
excitsynapses = [NEF.Synapse(inhibitory = 1,initialQ = 0*(random()-0.5)-4.0) for x in range(layersize)]
#neurons = [NEF.NEFneuron(synapse = x) for x in synapses]
neurons = [NEF.NEFneuron(synapses = [excitsynapses[i],inhibsynapses[i]],e = choice([-1,1])*randweighting(2),alpha = (1.0/400.0)*normalvariate(17*NEF.nA,5*NEF.nA),J_bias = normalvariate(10*NEF.nA,5*NEF.nA),tau_ref = normalvariate(1.5*NEF.ms,0.3*NEF.ms),tau_RC = normalvariate(20*NEF.ms,4*NEF.ms),J_th = normalvariate(1*NEF.nA,.2*NEF.nA)) for i in range(layersize)]
neurons = [NEF.NEFneuron(synapses = [excitsynapses[i],inhibsynapses[i]],e = choice([-1,1])*randweighting(2),alpha = (1.0/400.0)*normalvariate(17*NEF.nA,5*NEF.nA),J_bias = (random()*2-1.0)*20*NEF.nA+7*NEF.nA,tau_ref = normalvariate(1.5*NEF.ms,0.3*NEF.ms),tau_RC = normalvariate(20*NEF.ms,4*NEF.ms),J_th = normalvariate(1*NEF.nA,.2*NEF.nA)) for i in range(layersize)]
layer = NEF.NEF_layer(layer = neurons,tau_PSC = 10 * NEF.ms,weight = weight_val)
#fp = open("neflayer_allpoints")
#layer = load(fp)
#fp.close()
deltaT = 0.001#*NEF.ms
feedbackrate = 100
updaterate = 60.0#20.0#0.25
eta = 0.00003#0#0001
regularization = 0.0000001
samplefrac = 25#60
targetx = 10.0
x = 0.4
time = 2.0#
displaytime = 120
total = 0
print 3/deltaT
tvals = []
xhatvals = []
presolve = True
lstsq = True
#xvals = [x*0.01 for x in range(-200,200)]
res = 100.0
numxvals = 1000
#xvals = [(x*1.0/res,y*1.0/res) for x in range(0,int(res)) for y in range(0,int(res))]
xvals = [valtopair(400.0*(2*random()-1.0)) for x in range(numxvals)]
for i in range(100):
plottuning(choice(neurons),xvals)
plt.title("Noisy Tuning Curves")
if(lstsq):
plt.savefig("noisytuning-"+str(numxvals)+"samples-"+str(layersize)+"neurons")
plt.show()
if(presolve):
NEF.LeastSquaresSolve(xvals,target,layer,regularization=1000)
if(lstsq):
weight_histogram(layer,binnum=50)
plt.savefig("weight-histogram-"+str(numxvals)+"samples-"+str(layersize)+"neurons")
plt.show()
plotavs(layer,-400,400,1,savename = "cm-agnostic-decode-"+str(numxvals)+"samples-"+str(layersize)+"neurons",title="Common Mode Agnostic Decode "+str(numxvals)+" pts")
plt.show()
exit()
else:
plotavs(layer,-400,400,1,display=False)
#plotavs(layer,-400,400,1)
#plotavs(layer,-400,400,1)
#exit()
#initplot(layer)
#plt.savefig("dataplot0_slowrate")
#plt.show()
c = 0
pltcount = 0
erav = 0
eravcount = 0
etrack = 0
etrackcounter = 0
etracks = []
avx = 0
lastetrack = 0
esquaretrack = 0
while(1):
c+=1
for a in range(1):
x = choice([-2,-1,1,2])*0.2#random()*2.0-1.0
x = random()*4.0-2.0
x = choice([-2,-1,0,1,2])
x = targetx
x = 1.0
if(c%2):
x = 0.4
x = 400.0*(random()*2.0-1.0)
# x = 50
pair = valtopair(x)
t = target(pair)
# t = 100
display = (c%int(displaytime/time) == 0)
if(c%500000 == 0 ):
print "epoch: ",c
print "iteration: ",a
print "trying x= "+str(x)+" target is: "+str(t)+" current average is: "+str(layer.getaverage(pair))
print "display: ",display
etot = 0
avxtot = 0
count = 0
tvals = []
xhatvals = []
ervals = []
avvals = []
aver = 0.0
averc = 0
etot_up = 0
count_up = 0
# display = True
layer.xhat = 0
lastx = 0
for q in range(samplefrac):
lastx = 0
xtot = 0
count = 0
for z in range(int(time/(samplefrac*deltaT))):
val = layer.Process(pair,deltaT)
xtot += val/deltaT
lastx += val/deltaT
avxtot += layer.average
er = sigmoid(Error(val,t,deltaT))
layer.RecordErr(er)
aver += er
averc += 1
if(display):
tvals.append(a*1.0+z*deltaT)
xhatvals.append(val)
avvals.append(layer.average)
ervals.append(er*eta)
etot += er
count += 1
etot_up += er
count_up += 1
# if(random() <deltaT*feedbackrate):#c%int(updaterate/time)==0 and z ==0):#random() < deltaT*feedbackrate):
# print "updating!"
# layer.Update(-(etot/count)**2,eta)
# layer.RecUpdate(0,0)#abs(etot_up/count_up),eta)
# etot_up = 0
# count_up = 0
# layer.Update(abs(aver/averc),eta)
# aver = 0
# averc = 0
# print "xtot: ",xtot/count
erav += Error(xtot/count,t,1)
# print "recording error: ",Error(xtot/count,t,1)
# print "value: ",xtot/count
avx += xtot/count
eravcount += 1
reperr= erav/eravcount
etrack += layer.layer[0].synapses[0].etrackval()
lastetrack = layer.layer[0].synapses[0].etrackval()
esquaretrack += lastetrack**2
#
# print "diff: ",abs(erav/eravcount-Error(layer.getaverage(pair),t)*(-2*(1-x/float(t)))
errorD = Error(layer.getaverage(pair),t,1)*(2*(1-layer.getaverage(pair)/float(t)))
etracks.append(layer.layer[0].synapses[0].etrackval())
etrackcounter += 1
etrackval = layer.layer[0].synapses[0].etrackval()
# print "current ratio: ",etrackval/(xtot/count - layer.getaverage(pair))
# print (etrackval/(xtot/count - layer.getaverage(pair)) - 0.01)
# print (etrackval/0.01 - ( xtot/count - layer.getaverage(pair)))
# print (xtot/count - (layer.getaverage(pair)+etrackval/0.01))
# print (Error(xtot/count,t,1) - ( Error(layer.getaverage(pair),t,1)+etrackval/0.01))
# assert(etrackval*erav/eravcount == etrackv
# layer.RecUpdate(Error(xtot/count,t,1),eta)
layer.CorrectedRecUpdate(Error(xtot/count,t,1))
if(c% int(updaterate/time)==0):
# plt.hist(etracks)
# plt.show()
etracks = []
print "updating!\n\n"
print "time elapsed: ",c*time
print "xval: ",x
print "target: ",t
print "count: ",count
print "etrackcount: ",etrackcounter
print "last etrack: ",lastetrack
etrackav = etrack/etrackcounter
etracksqav = esquaretrack/etrackcounter
errorval = Error(layer.getaverage(pair),t,1)
errorD = 1#-errorval*abs(layer.getaverage(pair)-t)/(layer.getaverage(pair)-t)
print "error: ",errorval
delta = avx/etrackcounter - layer.getaverage(pair)
print "error plus delta: ",errorval+errorD*etrackav/0.01
print "error (x plus delta): ",Error(layer.getaverage(pair)+etrackav/0.01,t,1)
avgrad = etrack*errorval+esquaretrack/0.01*errorD
print "etrack: average ",(etrack/etrackcounter)
print "etracksquare average: ",esquaretrack/etrackcounter
print "ratio: ",(etrackav)/(avx/etrackcounter-layer.getaverage(pair))#etrackcounter#layer.layer[0].synapses[0].etrack#etrack/etrackcounter
print "average error: ",reperr#etot/count
print "average x: ",avx/etrackcounter
print "current x: ",layer.xhat/(count*deltaT)
print "aval: ",layer.layer[0].a(pair)
print "predicted average: ",layer.getaverage(pair)#avxtot/count
print "pval: ",layer.layer[0].synapses[0].Pval()
print "est grad: ",avgrad
print "average q: ",reduce(lambda x,y:x+y,[reduce(lambda
x,y:x+y.q,neuron.synapses,0) for neuron in layer.layer],0)/len(layer.layer)
etrack = 0
avx = 0
etrackcounter = 0
esquaretrack = 0
erav = 0
eravcount = 0
layer.CorrectedUpdate(eta,regularization)
# layer.finalUpdate(eta)
if(display):
pltcount += 1
savename = ("figs/savedgraph_frequencies_allpoints_correctedupdates_"+str(Error.grace)+"grace_"+str(presolve)+"presolve_"+str(displaytime)+"displaytime_"+str(time)+"perval_"+str(updaterate)+"updaterate_"+str(samplefrac)+"samplefrac_"+targetname+"_"+str(layersize)+"neurons_feedbackrate"+str(feedbackrate)+"_eta"+str(eta)+"_weight"+str(weight_val)+"_aver_clearerr_"+str(pltcount)).replace(".","p")
plt.clf()
plt.title("xvalue = "+str(x)+" target = "+str(t))
v = "1p0"
if (x==0.4):
v = "0p4"
plt.plot(tvals,xhatvals,label="decoded")
# plt.plot(tvals,ervals,label ="error")
plt.plot(tvals,avvals,label="a vals")
plt.legend()
# plt.show()
# plt.savefig("savedfig_allpoints_normalized_300neurons_etap05_woverallplots_"+str(c))
# plt.savefig("savedfig_both_"+v+"_wsigmoid_m3_"+str(c))
print "saving to: "+savename+".png"
#plt.show()
plotavs(layer,-400,400,1,savename,display = False)
# plt.show()
savename = ("dumps/dump_frequencies_allpoints_correctedupdates_"+str(Error.grace)+"grace_"+str(presolve)+"presolve_"+str(displaytime)+"displaytime_"+str(time)+"perval_"+str(updaterate)+"updaterate_"+str(samplefrac)+"samplefrac_"+targetname+"_"+str(layersize)+"neurons_feedbackrate"+str(feedbackrate)+"_eta"+str(eta)+"_weight"+str(weight_val)+"_aver_clearerr").replace(".","p")
fp = open(savename,"w")#"neflayer_5points_id_doublerange_morevariation","w")
dump(layer,fp)
fp.close()
# x = choice([-2,-1,1,2])*0.2#random()*2.0-1.0
# x = random()*4.0-2.0
# x = choice([-2,-1,0,1,2])
# x = targetx
# t = target(x)
# tvals = []
# xhatvals = []
# ervals = []
# for a in range(int(0.5/deltaT)):
# tvals.append(a*deltaT)
# val = layer.Process(x,deltaT)
# xhatvals.append(val)
# ervals.append(eta*Error(val,t,deltaT))
# plt.clf()
# plt.title("xvalue = "+str(x)+" target = "+str(t))
# plt.plot(tvals,xhatvals)
# plt.plot(tvals,ervals)
# plt.show()
# plt.savefig("dataplot_"+"5points_id_doublerange_morevatiation"+str(c))
| yvals = [neuron.a(x) for x in xvals]
vallist = zip(map(pairtoval,xvals),yvals)
vallist.sort(key = lambda x:x[0])
xvals = [x[0] for x in vallist]
yvals = [x[1] for x in vallist]
plt.plot(xvals,yvals) | identifier_body |
NEF_tester.py |
import NEF
from matplotlib import pyplot as plt
from random import choice
from math import log,exp,sqrt,sin
from random import normalvariate,random
from pickle import dump,load
#from math import abs
import numpy as np
def Error(p,target,deltaT):
Error.value = Error.value*exp(-deltaT/Error.tau)
# Error.value += Error.grace - abs(target - p)
Error.value += Error.grace - deltaT*(target - p)/Error.tau
# if(p>target):
# print "p: ",p
# print "t: ",target
# exit()
return -(p-target)**2#exp(-(p-target)**2)#-(p/target-1)**2#-abs(p-target)#(exp(-abs(p-target)))#-(p-target)**2#+target*target#+3600
def | (p,target,deltaT):
return -(target-p)**2
def sigmoid(er):
return er
# return (2.0/(1.0+exp(-2*er))-1.0)
targetname = "target"
def weight_histogram(layer,binnum=None):
weights = [reduce(lambda x,synapse:x+synapse.inhibitory*synapse.Pval(),neuron.synapses,0) for neuron in layer.layer]
if(binnum == None):
plt.hist(weights,normed = True)
else:
plt.hist(weights,bins=binnum,normed = True)
def valtopair(x):
if(x>0):
c = x+random()*(2000-2*x)
else:
c = -x + random()*(2000+2*x)
# c = 400+400*random()
fp = (c+x)/2.0
fm = (c-x)/2.0
try:
assert(fp>=0 and fm>=0)
except:
print "c: ",c," x: ",x
exit()
return (fp,fm)
def pairtoval(pair):
return pair[0]-pair[1]
def target(x):
global targetname
targetname = "sin"
z = x[0]-x[1]
# return -z*z
return 400*sin(3.141592654/400*z)#400.0*(z/400.0)#sin(3.14159264/400.0*z)
def plotrange(f,xmin,xmax,resolution,alabel = None):
xvals = [x/float(resolution) for x in range(resolution*xmin,resolution*xmax)]
plt.plot(xvals,map(f,xvals),label = alabel)
def plotavs(layer,xmin,xmax,resolution,savename = None,display = True,title = ""):
plt.clf()
# plotrange(lambda x:layer.getaverage(valtopair(x)),xmin,xmax,"decoded values")
xvals = []
cms = []
decvals = []
dec_pairp=[]
dec_pairm=[]
dec_cms = []
for x in range(resolution*xmin,resolution*xmax):
pair = valtopair(x/float(resolution))
xvals.append(x/float(resolution))
dec = layer.getaverage(pair)
decvals.append(dec)
dec_pair = layer.getpair(pair)
dec_pairp.append(-dec_pair[0])
dec_pairm.append(-dec_pair[1])
cms.append((pair[0]+pair[1])/2.0)
plt.plot(xvals,decvals,label = "decoded values")
plt.plot(xvals,cms,label="input common mode")
plt.plot(xvals,dec_pairp,label="output f+")
plt.plot(xvals,dec_pairm,label="output f-")
# plotrange(layer.getCM,xmin,xmax,"common modes")
plotrange(lambda x: target(valtopair(x)),xmin,xmax,resolution,"target values")
# plotrange(lambda x:Error(layer.getaverage(valtopair(x)),target(valtopair(x)),1),xmin,xmax,resolution,"error")
ervals = [SQError(layer.getaverage(valtopair(x)),target(valtopair(x)),1) for x in [x/float(resolution) for x in range(resolution*xmin,resolution*xmax)]]
avsq = reduce(lambda x,y:x+y**2,ervals)/len(ervals)
avsq = 0
for er in ervals:
avsq += er
avsq = avsq/len(ervals)
rms = sqrt(-avsq)
if(title != ""):
title += " RMS Error: "+str(rms)
else:
title = "RMS Error: "+str(rms)
plt.title(title)
plt.legend(loc=2)
if(savename != None):
plt.savefig(savename)
if(display):
plt.show()
def plottuning(neuron,xvals):
yvals = [neuron.a(x) for x in xvals]
vallist = zip(map(pairtoval,xvals),yvals)
vallist.sort(key = lambda x:x[0])
xvals = [x[0] for x in vallist]
yvals = [x[1] for x in vallist]
plt.plot(xvals,yvals)
def initplot(layer):
x = -2
t = target(x)
for a in range(int(0.5/deltaT)):
tvals.append(a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = -1
for a in range(int(0.5/deltaT)):
tvals.append(0.5+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = 1
for a in range(int(0.5/deltaT)):
tvals.append(1.0+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = 2
for a in range(int(0.5/deltaT)):
tvals.append(1.5+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
plt.plot(tvals,xhatvals)
plt.show()
def randunit(d):
v = 2
while(np.linalg.norm(v)>1):
v = np.array([random()*2-1.0 for x in range(d)])
return v/np.linalg.norm(v)
def randweighting(d):
var = 0.3
# var = 0.0
return np.array([var*(2*random()-1.0)+1.0,-(var*(2*random()-1.0)+1.0)])
#synapses = [NEF.Synapse(inhibitory = (x%2)*2-1,initialQ = 0.0) for x in range(1000)]
Error.grace = 0.0#70000.00
Error.value = 0.0
Error.tau = 0.000001*NEF.ms
layersize = 100
weight_val = 1#(10*NEF.ms)
inhibsynapses = [NEF.Synapse(inhibitory = -1,initialQ = 0*(random()-0.5)-4.0) for x in range(layersize)]
excitsynapses = [NEF.Synapse(inhibitory = 1,initialQ = 0*(random()-0.5)-4.0) for x in range(layersize)]
#neurons = [NEF.NEFneuron(synapse = x) for x in synapses]
neurons = [NEF.NEFneuron(synapses = [excitsynapses[i],inhibsynapses[i]],e = choice([-1,1])*randweighting(2),alpha = (1.0/400.0)*normalvariate(17*NEF.nA,5*NEF.nA),J_bias = normalvariate(10*NEF.nA,5*NEF.nA),tau_ref = normalvariate(1.5*NEF.ms,0.3*NEF.ms),tau_RC = normalvariate(20*NEF.ms,4*NEF.ms),J_th = normalvariate(1*NEF.nA,.2*NEF.nA)) for i in range(layersize)]
neurons = [NEF.NEFneuron(synapses = [excitsynapses[i],inhibsynapses[i]],e = choice([-1,1])*randweighting(2),alpha = (1.0/400.0)*normalvariate(17*NEF.nA,5*NEF.nA),J_bias = (random()*2-1.0)*20*NEF.nA+7*NEF.nA,tau_ref = normalvariate(1.5*NEF.ms,0.3*NEF.ms),tau_RC = normalvariate(20*NEF.ms,4*NEF.ms),J_th = normalvariate(1*NEF.nA,.2*NEF.nA)) for i in range(layersize)]
layer = NEF.NEF_layer(layer = neurons,tau_PSC = 10 * NEF.ms,weight = weight_val)
#fp = open("neflayer_allpoints")
#layer = load(fp)
#fp.close()
deltaT = 0.001#*NEF.ms
feedbackrate = 100
updaterate = 60.0#20.0#0.25
eta = 0.00003#0#0001
regularization = 0.0000001
samplefrac = 25#60
targetx = 10.0
x = 0.4
time = 2.0#
displaytime = 120
total = 0
print 3/deltaT
tvals = []
xhatvals = []
presolve = True
lstsq = True
#xvals = [x*0.01 for x in range(-200,200)]
res = 100.0
numxvals = 1000
#xvals = [(x*1.0/res,y*1.0/res) for x in range(0,int(res)) for y in range(0,int(res))]
xvals = [valtopair(400.0*(2*random()-1.0)) for x in range(numxvals)]
for i in range(100):
plottuning(choice(neurons),xvals)
plt.title("Noisy Tuning Curves")
if(lstsq):
plt.savefig("noisytuning-"+str(numxvals)+"samples-"+str(layersize)+"neurons")
plt.show()
if(presolve):
NEF.LeastSquaresSolve(xvals,target,layer,regularization=1000)
if(lstsq):
weight_histogram(layer,binnum=50)
plt.savefig("weight-histogram-"+str(numxvals)+"samples-"+str(layersize)+"neurons")
plt.show()
plotavs(layer,-400,400,1,savename = "cm-agnostic-decode-"+str(numxvals)+"samples-"+str(layersize)+"neurons",title="Common Mode Agnostic Decode "+str(numxvals)+" pts")
plt.show()
exit()
else:
plotavs(layer,-400,400,1,display=False)
#plotavs(layer,-400,400,1)
#plotavs(layer,-400,400,1)
#exit()
#initplot(layer)
#plt.savefig("dataplot0_slowrate")
#plt.show()
c = 0
pltcount = 0
erav = 0
eravcount = 0
etrack = 0
etrackcounter = 0
etracks = []
avx = 0
lastetrack = 0
esquaretrack = 0
while(1):
c+=1
for a in range(1):
x = choice([-2,-1,1,2])*0.2#random()*2.0-1.0
x = random()*4.0-2.0
x = choice([-2,-1,0,1,2])
x = targetx
x = 1.0
if(c%2):
x = 0.4
x = 400.0*(random()*2.0-1.0)
# x = 50
pair = valtopair(x)
t = target(pair)
# t = 100
display = (c%int(displaytime/time) == 0)
if(c%500000 == 0 ):
print "epoch: ",c
print "iteration: ",a
print "trying x= "+str(x)+" target is: "+str(t)+" current average is: "+str(layer.getaverage(pair))
print "display: ",display
etot = 0
avxtot = 0
count = 0
tvals = []
xhatvals = []
ervals = []
avvals = []
aver = 0.0
averc = 0
etot_up = 0
count_up = 0
# display = True
layer.xhat = 0
lastx = 0
for q in range(samplefrac):
lastx = 0
xtot = 0
count = 0
for z in range(int(time/(samplefrac*deltaT))):
val = layer.Process(pair,deltaT)
xtot += val/deltaT
lastx += val/deltaT
avxtot += layer.average
er = sigmoid(Error(val,t,deltaT))
layer.RecordErr(er)
aver += er
averc += 1
if(display):
tvals.append(a*1.0+z*deltaT)
xhatvals.append(val)
avvals.append(layer.average)
ervals.append(er*eta)
etot += er
count += 1
etot_up += er
count_up += 1
# if(random() <deltaT*feedbackrate):#c%int(updaterate/time)==0 and z ==0):#random() < deltaT*feedbackrate):
# print "updating!"
# layer.Update(-(etot/count)**2,eta)
# layer.RecUpdate(0,0)#abs(etot_up/count_up),eta)
# etot_up = 0
# count_up = 0
# layer.Update(abs(aver/averc),eta)
# aver = 0
# averc = 0
# print "xtot: ",xtot/count
erav += Error(xtot/count,t,1)
# print "recording error: ",Error(xtot/count,t,1)
# print "value: ",xtot/count
avx += xtot/count
eravcount += 1
reperr= erav/eravcount
etrack += layer.layer[0].synapses[0].etrackval()
lastetrack = layer.layer[0].synapses[0].etrackval()
esquaretrack += lastetrack**2
#
# print "diff: ",abs(erav/eravcount-Error(layer.getaverage(pair),t)*(-2*(1-x/float(t)))
errorD = Error(layer.getaverage(pair),t,1)*(2*(1-layer.getaverage(pair)/float(t)))
etracks.append(layer.layer[0].synapses[0].etrackval())
etrackcounter += 1
etrackval = layer.layer[0].synapses[0].etrackval()
# print "current ratio: ",etrackval/(xtot/count - layer.getaverage(pair))
# print (etrackval/(xtot/count - layer.getaverage(pair)) - 0.01)
# print (etrackval/0.01 - ( xtot/count - layer.getaverage(pair)))
# print (xtot/count - (layer.getaverage(pair)+etrackval/0.01))
# print (Error(xtot/count,t,1) - ( Error(layer.getaverage(pair),t,1)+etrackval/0.01))
# assert(etrackval*erav/eravcount == etrackv
# layer.RecUpdate(Error(xtot/count,t,1),eta)
layer.CorrectedRecUpdate(Error(xtot/count,t,1))
if(c% int(updaterate/time)==0):
# plt.hist(etracks)
# plt.show()
etracks = []
print "updating!\n\n"
print "time elapsed: ",c*time
print "xval: ",x
print "target: ",t
print "count: ",count
print "etrackcount: ",etrackcounter
print "last etrack: ",lastetrack
etrackav = etrack/etrackcounter
etracksqav = esquaretrack/etrackcounter
errorval = Error(layer.getaverage(pair),t,1)
errorD = 1#-errorval*abs(layer.getaverage(pair)-t)/(layer.getaverage(pair)-t)
print "error: ",errorval
delta = avx/etrackcounter - layer.getaverage(pair)
print "error plus delta: ",errorval+errorD*etrackav/0.01
print "error (x plus delta): ",Error(layer.getaverage(pair)+etrackav/0.01,t,1)
avgrad = etrack*errorval+esquaretrack/0.01*errorD
print "etrack: average ",(etrack/etrackcounter)
print "etracksquare average: ",esquaretrack/etrackcounter
print "ratio: ",(etrackav)/(avx/etrackcounter-layer.getaverage(pair))#etrackcounter#layer.layer[0].synapses[0].etrack#etrack/etrackcounter
print "average error: ",reperr#etot/count
print "average x: ",avx/etrackcounter
print "current x: ",layer.xhat/(count*deltaT)
print "aval: ",layer.layer[0].a(pair)
print "predicted average: ",layer.getaverage(pair)#avxtot/count
print "pval: ",layer.layer[0].synapses[0].Pval()
print "est grad: ",avgrad
print "average q: ",reduce(lambda x,y:x+y,[reduce(lambda
x,y:x+y.q,neuron.synapses,0) for neuron in layer.layer],0)/len(layer.layer)
etrack = 0
avx = 0
etrackcounter = 0
esquaretrack = 0
erav = 0
eravcount = 0
layer.CorrectedUpdate(eta,regularization)
# layer.finalUpdate(eta)
if(display):
pltcount += 1
savename = ("figs/savedgraph_frequencies_allpoints_correctedupdates_"+str(Error.grace)+"grace_"+str(presolve)+"presolve_"+str(displaytime)+"displaytime_"+str(time)+"perval_"+str(updaterate)+"updaterate_"+str(samplefrac)+"samplefrac_"+targetname+"_"+str(layersize)+"neurons_feedbackrate"+str(feedbackrate)+"_eta"+str(eta)+"_weight"+str(weight_val)+"_aver_clearerr_"+str(pltcount)).replace(".","p")
plt.clf()
plt.title("xvalue = "+str(x)+" target = "+str(t))
v = "1p0"
if (x==0.4):
v = "0p4"
plt.plot(tvals,xhatvals,label="decoded")
# plt.plot(tvals,ervals,label ="error")
plt.plot(tvals,avvals,label="a vals")
plt.legend()
# plt.show()
# plt.savefig("savedfig_allpoints_normalized_300neurons_etap05_woverallplots_"+str(c))
# plt.savefig("savedfig_both_"+v+"_wsigmoid_m3_"+str(c))
print "saving to: "+savename+".png"
#plt.show()
plotavs(layer,-400,400,1,savename,display = False)
# plt.show()
savename = ("dumps/dump_frequencies_allpoints_correctedupdates_"+str(Error.grace)+"grace_"+str(presolve)+"presolve_"+str(displaytime)+"displaytime_"+str(time)+"perval_"+str(updaterate)+"updaterate_"+str(samplefrac)+"samplefrac_"+targetname+"_"+str(layersize)+"neurons_feedbackrate"+str(feedbackrate)+"_eta"+str(eta)+"_weight"+str(weight_val)+"_aver_clearerr").replace(".","p")
fp = open(savename,"w")#"neflayer_5points_id_doublerange_morevariation","w")
dump(layer,fp)
fp.close()
# x = choice([-2,-1,1,2])*0.2#random()*2.0-1.0
# x = random()*4.0-2.0
# x = choice([-2,-1,0,1,2])
# x = targetx
# t = target(x)
# tvals = []
# xhatvals = []
# ervals = []
# for a in range(int(0.5/deltaT)):
# tvals.append(a*deltaT)
# val = layer.Process(x,deltaT)
# xhatvals.append(val)
# ervals.append(eta*Error(val,t,deltaT))
# plt.clf()
# plt.title("xvalue = "+str(x)+" target = "+str(t))
# plt.plot(tvals,xhatvals)
# plt.plot(tvals,ervals)
# plt.show()
# plt.savefig("dataplot_"+"5points_id_doublerange_morevatiation"+str(c))
| SQError | identifier_name |
NEF_tester.py | import NEF
from matplotlib import pyplot as plt
from random import choice
from math import log,exp,sqrt,sin
from random import normalvariate,random
from pickle import dump,load
#from math import abs
import numpy as np
def Error(p,target,deltaT):
Error.value = Error.value*exp(-deltaT/Error.tau)
# Error.value += Error.grace - abs(target - p)
Error.value += Error.grace - deltaT*(target - p)/Error.tau
# if(p>target):
# print "p: ",p
# print "t: ",target
# exit()
return -(p-target)**2#exp(-(p-target)**2)#-(p/target-1)**2#-abs(p-target)#(exp(-abs(p-target)))#-(p-target)**2#+target*target#+3600
def SQError(p,target,deltaT):
return -(target-p)**2
def sigmoid(er):
return er
# return (2.0/(1.0+exp(-2*er))-1.0)
targetname = "target"
def weight_histogram(layer,binnum=None):
weights = [reduce(lambda x,synapse:x+synapse.inhibitory*synapse.Pval(),neuron.synapses,0) for neuron in layer.layer]
if(binnum == None):
plt.hist(weights,normed = True)
else:
plt.hist(weights,bins=binnum,normed = True)
def valtopair(x):
if(x>0):
c = x+random()*(2000-2*x)
else:
c = -x + random()*(2000+2*x)
# c = 400+400*random()
fp = (c+x)/2.0
fm = (c-x)/2.0
try:
assert(fp>=0 and fm>=0)
except:
print "c: ",c," x: ",x
exit()
return (fp,fm)
def pairtoval(pair):
return pair[0]-pair[1]
def target(x):
global targetname
targetname = "sin"
z = x[0]-x[1]
# return -z*z
return 400*sin(3.141592654/400*z)#400.0*(z/400.0)#sin(3.14159264/400.0*z)
def plotrange(f,xmin,xmax,resolution,alabel = None):
xvals = [x/float(resolution) for x in range(resolution*xmin,resolution*xmax)]
plt.plot(xvals,map(f,xvals),label = alabel)
def plotavs(layer,xmin,xmax,resolution,savename = None,display = True,title = ""):
plt.clf()
# plotrange(lambda x:layer.getaverage(valtopair(x)),xmin,xmax,"decoded values")
xvals = []
cms = []
decvals = []
dec_pairp=[]
dec_pairm=[]
dec_cms = []
for x in range(resolution*xmin,resolution*xmax):
pair = valtopair(x/float(resolution))
xvals.append(x/float(resolution))
dec = layer.getaverage(pair)
decvals.append(dec)
dec_pair = layer.getpair(pair)
dec_pairp.append(-dec_pair[0])
dec_pairm.append(-dec_pair[1])
cms.append((pair[0]+pair[1])/2.0)
plt.plot(xvals,decvals,label = "decoded values")
plt.plot(xvals,cms,label="input common mode")
plt.plot(xvals,dec_pairp,label="output f+")
plt.plot(xvals,dec_pairm,label="output f-")
# plotrange(layer.getCM,xmin,xmax,"common modes")
plotrange(lambda x: target(valtopair(x)),xmin,xmax,resolution,"target values")
# plotrange(lambda x:Error(layer.getaverage(valtopair(x)),target(valtopair(x)),1),xmin,xmax,resolution,"error")
ervals = [SQError(layer.getaverage(valtopair(x)),target(valtopair(x)),1) for x in [x/float(resolution) for x in range(resolution*xmin,resolution*xmax)]]
avsq = reduce(lambda x,y:x+y**2,ervals)/len(ervals)
avsq = 0
for er in ervals:
avsq += er
avsq = avsq/len(ervals)
rms = sqrt(-avsq)
if(title != ""):
title += " RMS Error: "+str(rms)
else:
title = "RMS Error: "+str(rms)
plt.title(title)
plt.legend(loc=2)
if(savename != None):
plt.savefig(savename)
if(display):
plt.show()
def plottuning(neuron,xvals):
yvals = [neuron.a(x) for x in xvals]
vallist = zip(map(pairtoval,xvals),yvals)
vallist.sort(key = lambda x:x[0])
xvals = [x[0] for x in vallist]
yvals = [x[1] for x in vallist]
plt.plot(xvals,yvals)
def initplot(layer):
x = -2
t = target(x)
for a in range(int(0.5/deltaT)):
tvals.append(a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = -1
for a in range(int(0.5/deltaT)):
tvals.append(0.5+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = 1
for a in range(int(0.5/deltaT)):
tvals.append(1.0+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = 2
for a in range(int(0.5/deltaT)):
tvals.append(1.5+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
plt.plot(tvals,xhatvals)
plt.show()
def randunit(d):
v = 2
while(np.linalg.norm(v)>1):
v = np.array([random()*2-1.0 for x in range(d)])
return v/np.linalg.norm(v)
def randweighting(d):
var = 0.3
# var = 0.0
return np.array([var*(2*random()-1.0)+1.0,-(var*(2*random()-1.0)+1.0)])
#synapses = [NEF.Synapse(inhibitory = (x%2)*2-1,initialQ = 0.0) for x in range(1000)]
Error.grace = 0.0#70000.00
Error.value = 0.0
Error.tau = 0.000001*NEF.ms
layersize = 100
weight_val = 1#(10*NEF.ms)
inhibsynapses = [NEF.Synapse(inhibitory = -1,initialQ = 0*(random()-0.5)-4.0) for x in range(layersize)]
excitsynapses = [NEF.Synapse(inhibitory = 1,initialQ = 0*(random()-0.5)-4.0) for x in range(layersize)]
#neurons = [NEF.NEFneuron(synapse = x) for x in synapses]
neurons = [NEF.NEFneuron(synapses = [excitsynapses[i],inhibsynapses[i]],e = choice([-1,1])*randweighting(2),alpha = (1.0/400.0)*normalvariate(17*NEF.nA,5*NEF.nA),J_bias = normalvariate(10*NEF.nA,5*NEF.nA),tau_ref = normalvariate(1.5*NEF.ms,0.3*NEF.ms),tau_RC = normalvariate(20*NEF.ms,4*NEF.ms),J_th = normalvariate(1*NEF.nA,.2*NEF.nA)) for i in range(layersize)]
neurons = [NEF.NEFneuron(synapses = [excitsynapses[i],inhibsynapses[i]],e = choice([-1,1])*randweighting(2),alpha = (1.0/400.0)*normalvariate(17*NEF.nA,5*NEF.nA),J_bias = (random()*2-1.0)*20*NEF.nA+7*NEF.nA,tau_ref = normalvariate(1.5*NEF.ms,0.3*NEF.ms),tau_RC = normalvariate(20*NEF.ms,4*NEF.ms),J_th = normalvariate(1*NEF.nA,.2*NEF.nA)) for i in range(layersize)]
layer = NEF.NEF_layer(layer = neurons,tau_PSC = 10 * NEF.ms,weight = weight_val)
#fp = open("neflayer_allpoints")
#layer = load(fp)
#fp.close()
deltaT = 0.001#*NEF.ms
feedbackrate = 100
updaterate = 60.0#20.0#0.25
eta = 0.00003#0#0001
regularization = 0.0000001
samplefrac = 25#60
targetx = 10.0
x = 0.4
time = 2.0#
displaytime = 120
total = 0
print 3/deltaT
tvals = []
xhatvals = []
presolve = True
lstsq = True
#xvals = [x*0.01 for x in range(-200,200)]
res = 100.0
numxvals = 1000
#xvals = [(x*1.0/res,y*1.0/res) for x in range(0,int(res)) for y in range(0,int(res))]
xvals = [valtopair(400.0*(2*random()-1.0)) for x in range(numxvals)]
for i in range(100):
plottuning(choice(neurons),xvals)
plt.title("Noisy Tuning Curves") | plt.show()
if(presolve):
NEF.LeastSquaresSolve(xvals,target,layer,regularization=1000)
if(lstsq):
weight_histogram(layer,binnum=50)
plt.savefig("weight-histogram-"+str(numxvals)+"samples-"+str(layersize)+"neurons")
plt.show()
plotavs(layer,-400,400,1,savename = "cm-agnostic-decode-"+str(numxvals)+"samples-"+str(layersize)+"neurons",title="Common Mode Agnostic Decode "+str(numxvals)+" pts")
plt.show()
exit()
else:
plotavs(layer,-400,400,1,display=False)
#plotavs(layer,-400,400,1)
#plotavs(layer,-400,400,1)
#exit()
#initplot(layer)
#plt.savefig("dataplot0_slowrate")
#plt.show()
c = 0
pltcount = 0
erav = 0
eravcount = 0
etrack = 0
etrackcounter = 0
etracks = []
avx = 0
lastetrack = 0
esquaretrack = 0
while(1):
c+=1
for a in range(1):
x = choice([-2,-1,1,2])*0.2#random()*2.0-1.0
x = random()*4.0-2.0
x = choice([-2,-1,0,1,2])
x = targetx
x = 1.0
if(c%2):
x = 0.4
x = 400.0*(random()*2.0-1.0)
# x = 50
pair = valtopair(x)
t = target(pair)
# t = 100
display = (c%int(displaytime/time) == 0)
if(c%500000 == 0 ):
print "epoch: ",c
print "iteration: ",a
print "trying x= "+str(x)+" target is: "+str(t)+" current average is: "+str(layer.getaverage(pair))
print "display: ",display
etot = 0
avxtot = 0
count = 0
tvals = []
xhatvals = []
ervals = []
avvals = []
aver = 0.0
averc = 0
etot_up = 0
count_up = 0
# display = True
layer.xhat = 0
lastx = 0
for q in range(samplefrac):
lastx = 0
xtot = 0
count = 0
for z in range(int(time/(samplefrac*deltaT))):
val = layer.Process(pair,deltaT)
xtot += val/deltaT
lastx += val/deltaT
avxtot += layer.average
er = sigmoid(Error(val,t,deltaT))
layer.RecordErr(er)
aver += er
averc += 1
if(display):
tvals.append(a*1.0+z*deltaT)
xhatvals.append(val)
avvals.append(layer.average)
ervals.append(er*eta)
etot += er
count += 1
etot_up += er
count_up += 1
# if(random() <deltaT*feedbackrate):#c%int(updaterate/time)==0 and z ==0):#random() < deltaT*feedbackrate):
# print "updating!"
# layer.Update(-(etot/count)**2,eta)
# layer.RecUpdate(0,0)#abs(etot_up/count_up),eta)
# etot_up = 0
# count_up = 0
# layer.Update(abs(aver/averc),eta)
# aver = 0
# averc = 0
# print "xtot: ",xtot/count
erav += Error(xtot/count,t,1)
# print "recording error: ",Error(xtot/count,t,1)
# print "value: ",xtot/count
avx += xtot/count
eravcount += 1
reperr= erav/eravcount
etrack += layer.layer[0].synapses[0].etrackval()
lastetrack = layer.layer[0].synapses[0].etrackval()
esquaretrack += lastetrack**2
#
# print "diff: ",abs(erav/eravcount-Error(layer.getaverage(pair),t)*(-2*(1-x/float(t)))
errorD = Error(layer.getaverage(pair),t,1)*(2*(1-layer.getaverage(pair)/float(t)))
etracks.append(layer.layer[0].synapses[0].etrackval())
etrackcounter += 1
etrackval = layer.layer[0].synapses[0].etrackval()
# print "current ratio: ",etrackval/(xtot/count - layer.getaverage(pair))
# print (etrackval/(xtot/count - layer.getaverage(pair)) - 0.01)
# print (etrackval/0.01 - ( xtot/count - layer.getaverage(pair)))
# print (xtot/count - (layer.getaverage(pair)+etrackval/0.01))
# print (Error(xtot/count,t,1) - ( Error(layer.getaverage(pair),t,1)+etrackval/0.01))
# assert(etrackval*erav/eravcount == etrackv
# layer.RecUpdate(Error(xtot/count,t,1),eta)
layer.CorrectedRecUpdate(Error(xtot/count,t,1))
if(c% int(updaterate/time)==0):
# plt.hist(etracks)
# plt.show()
etracks = []
print "updating!\n\n"
print "time elapsed: ",c*time
print "xval: ",x
print "target: ",t
print "count: ",count
print "etrackcount: ",etrackcounter
print "last etrack: ",lastetrack
etrackav = etrack/etrackcounter
etracksqav = esquaretrack/etrackcounter
errorval = Error(layer.getaverage(pair),t,1)
errorD = 1#-errorval*abs(layer.getaverage(pair)-t)/(layer.getaverage(pair)-t)
print "error: ",errorval
delta = avx/etrackcounter - layer.getaverage(pair)
print "error plus delta: ",errorval+errorD*etrackav/0.01
print "error (x plus delta): ",Error(layer.getaverage(pair)+etrackav/0.01,t,1)
avgrad = etrack*errorval+esquaretrack/0.01*errorD
print "etrack: average ",(etrack/etrackcounter)
print "etracksquare average: ",esquaretrack/etrackcounter
print "ratio: ",(etrackav)/(avx/etrackcounter-layer.getaverage(pair))#etrackcounter#layer.layer[0].synapses[0].etrack#etrack/etrackcounter
print "average error: ",reperr#etot/count
print "average x: ",avx/etrackcounter
print "current x: ",layer.xhat/(count*deltaT)
print "aval: ",layer.layer[0].a(pair)
print "predicted average: ",layer.getaverage(pair)#avxtot/count
print "pval: ",layer.layer[0].synapses[0].Pval()
print "est grad: ",avgrad
print "average q: ",reduce(lambda x,y:x+y,[reduce(lambda
x,y:x+y.q,neuron.synapses,0) for neuron in layer.layer],0)/len(layer.layer)
etrack = 0
avx = 0
etrackcounter = 0
esquaretrack = 0
erav = 0
eravcount = 0
layer.CorrectedUpdate(eta,regularization)
# layer.finalUpdate(eta)
if(display):
pltcount += 1
savename = ("figs/savedgraph_frequencies_allpoints_correctedupdates_"+str(Error.grace)+"grace_"+str(presolve)+"presolve_"+str(displaytime)+"displaytime_"+str(time)+"perval_"+str(updaterate)+"updaterate_"+str(samplefrac)+"samplefrac_"+targetname+"_"+str(layersize)+"neurons_feedbackrate"+str(feedbackrate)+"_eta"+str(eta)+"_weight"+str(weight_val)+"_aver_clearerr_"+str(pltcount)).replace(".","p")
plt.clf()
plt.title("xvalue = "+str(x)+" target = "+str(t))
v = "1p0"
if (x==0.4):
v = "0p4"
plt.plot(tvals,xhatvals,label="decoded")
# plt.plot(tvals,ervals,label ="error")
plt.plot(tvals,avvals,label="a vals")
plt.legend()
# plt.show()
# plt.savefig("savedfig_allpoints_normalized_300neurons_etap05_woverallplots_"+str(c))
# plt.savefig("savedfig_both_"+v+"_wsigmoid_m3_"+str(c))
print "saving to: "+savename+".png"
#plt.show()
plotavs(layer,-400,400,1,savename,display = False)
# plt.show()
savename = ("dumps/dump_frequencies_allpoints_correctedupdates_"+str(Error.grace)+"grace_"+str(presolve)+"presolve_"+str(displaytime)+"displaytime_"+str(time)+"perval_"+str(updaterate)+"updaterate_"+str(samplefrac)+"samplefrac_"+targetname+"_"+str(layersize)+"neurons_feedbackrate"+str(feedbackrate)+"_eta"+str(eta)+"_weight"+str(weight_val)+"_aver_clearerr").replace(".","p")
fp = open(savename,"w")#"neflayer_5points_id_doublerange_morevariation","w")
dump(layer,fp)
fp.close()
# x = choice([-2,-1,1,2])*0.2#random()*2.0-1.0
# x = random()*4.0-2.0
# x = choice([-2,-1,0,1,2])
# x = targetx
# t = target(x)
# tvals = []
# xhatvals = []
# ervals = []
# for a in range(int(0.5/deltaT)):
# tvals.append(a*deltaT)
# val = layer.Process(x,deltaT)
# xhatvals.append(val)
# ervals.append(eta*Error(val,t,deltaT))
# plt.clf()
# plt.title("xvalue = "+str(x)+" target = "+str(t))
# plt.plot(tvals,xhatvals)
# plt.plot(tvals,ervals)
# plt.show()
# plt.savefig("dataplot_"+"5points_id_doublerange_morevatiation"+str(c)) | if(lstsq):
plt.savefig("noisytuning-"+str(numxvals)+"samples-"+str(layersize)+"neurons") | random_line_split |
NEF_tester.py |
import NEF
from matplotlib import pyplot as plt
from random import choice
from math import log,exp,sqrt,sin
from random import normalvariate,random
from pickle import dump,load
#from math import abs
import numpy as np
def Error(p,target,deltaT):
Error.value = Error.value*exp(-deltaT/Error.tau)
# Error.value += Error.grace - abs(target - p)
Error.value += Error.grace - deltaT*(target - p)/Error.tau
# if(p>target):
# print "p: ",p
# print "t: ",target
# exit()
return -(p-target)**2#exp(-(p-target)**2)#-(p/target-1)**2#-abs(p-target)#(exp(-abs(p-target)))#-(p-target)**2#+target*target#+3600
def SQError(p,target,deltaT):
return -(target-p)**2
def sigmoid(er):
return er
# return (2.0/(1.0+exp(-2*er))-1.0)
targetname = "target"
def weight_histogram(layer,binnum=None):
weights = [reduce(lambda x,synapse:x+synapse.inhibitory*synapse.Pval(),neuron.synapses,0) for neuron in layer.layer]
if(binnum == None):
plt.hist(weights,normed = True)
else:
plt.hist(weights,bins=binnum,normed = True)
def valtopair(x):
if(x>0):
c = x+random()*(2000-2*x)
else:
c = -x + random()*(2000+2*x)
# c = 400+400*random()
fp = (c+x)/2.0
fm = (c-x)/2.0
try:
assert(fp>=0 and fm>=0)
except:
print "c: ",c," x: ",x
exit()
return (fp,fm)
def pairtoval(pair):
return pair[0]-pair[1]
def target(x):
global targetname
targetname = "sin"
z = x[0]-x[1]
# return -z*z
return 400*sin(3.141592654/400*z)#400.0*(z/400.0)#sin(3.14159264/400.0*z)
def plotrange(f,xmin,xmax,resolution,alabel = None):
xvals = [x/float(resolution) for x in range(resolution*xmin,resolution*xmax)]
plt.plot(xvals,map(f,xvals),label = alabel)
def plotavs(layer,xmin,xmax,resolution,savename = None,display = True,title = ""):
plt.clf()
# plotrange(lambda x:layer.getaverage(valtopair(x)),xmin,xmax,"decoded values")
xvals = []
cms = []
decvals = []
dec_pairp=[]
dec_pairm=[]
dec_cms = []
for x in range(resolution*xmin,resolution*xmax):
pair = valtopair(x/float(resolution))
xvals.append(x/float(resolution))
dec = layer.getaverage(pair)
decvals.append(dec)
dec_pair = layer.getpair(pair)
dec_pairp.append(-dec_pair[0])
dec_pairm.append(-dec_pair[1])
cms.append((pair[0]+pair[1])/2.0)
plt.plot(xvals,decvals,label = "decoded values")
plt.plot(xvals,cms,label="input common mode")
plt.plot(xvals,dec_pairp,label="output f+")
plt.plot(xvals,dec_pairm,label="output f-")
# plotrange(layer.getCM,xmin,xmax,"common modes")
plotrange(lambda x: target(valtopair(x)),xmin,xmax,resolution,"target values")
# plotrange(lambda x:Error(layer.getaverage(valtopair(x)),target(valtopair(x)),1),xmin,xmax,resolution,"error")
ervals = [SQError(layer.getaverage(valtopair(x)),target(valtopair(x)),1) for x in [x/float(resolution) for x in range(resolution*xmin,resolution*xmax)]]
avsq = reduce(lambda x,y:x+y**2,ervals)/len(ervals)
avsq = 0
for er in ervals:
avsq += er
avsq = avsq/len(ervals)
rms = sqrt(-avsq)
if(title != ""):
title += " RMS Error: "+str(rms)
else:
title = "RMS Error: "+str(rms)
plt.title(title)
plt.legend(loc=2)
if(savename != None):
plt.savefig(savename)
if(display):
plt.show()
def plottuning(neuron,xvals):
yvals = [neuron.a(x) for x in xvals]
vallist = zip(map(pairtoval,xvals),yvals)
vallist.sort(key = lambda x:x[0])
xvals = [x[0] for x in vallist]
yvals = [x[1] for x in vallist]
plt.plot(xvals,yvals)
def initplot(layer):
x = -2
t = target(x)
for a in range(int(0.5/deltaT)):
tvals.append(a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = -1
for a in range(int(0.5/deltaT)):
tvals.append(0.5+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = 1
for a in range(int(0.5/deltaT)):
tvals.append(1.0+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
x = 2
for a in range(int(0.5/deltaT)):
tvals.append(1.5+a*deltaT)
xhatvals.append(layer.Process(x,deltaT))
plt.plot(tvals,xhatvals)
plt.show()
def randunit(d):
v = 2
while(np.linalg.norm(v)>1):
v = np.array([random()*2-1.0 for x in range(d)])
return v/np.linalg.norm(v)
def randweighting(d):
var = 0.3
# var = 0.0
return np.array([var*(2*random()-1.0)+1.0,-(var*(2*random()-1.0)+1.0)])
#synapses = [NEF.Synapse(inhibitory = (x%2)*2-1,initialQ = 0.0) for x in range(1000)]
Error.grace = 0.0#70000.00
Error.value = 0.0
Error.tau = 0.000001*NEF.ms
layersize = 100
weight_val = 1#(10*NEF.ms)
inhibsynapses = [NEF.Synapse(inhibitory = -1,initialQ = 0*(random()-0.5)-4.0) for x in range(layersize)]
excitsynapses = [NEF.Synapse(inhibitory = 1,initialQ = 0*(random()-0.5)-4.0) for x in range(layersize)]
#neurons = [NEF.NEFneuron(synapse = x) for x in synapses]
neurons = [NEF.NEFneuron(synapses = [excitsynapses[i],inhibsynapses[i]],e = choice([-1,1])*randweighting(2),alpha = (1.0/400.0)*normalvariate(17*NEF.nA,5*NEF.nA),J_bias = normalvariate(10*NEF.nA,5*NEF.nA),tau_ref = normalvariate(1.5*NEF.ms,0.3*NEF.ms),tau_RC = normalvariate(20*NEF.ms,4*NEF.ms),J_th = normalvariate(1*NEF.nA,.2*NEF.nA)) for i in range(layersize)]
neurons = [NEF.NEFneuron(synapses = [excitsynapses[i],inhibsynapses[i]],e = choice([-1,1])*randweighting(2),alpha = (1.0/400.0)*normalvariate(17*NEF.nA,5*NEF.nA),J_bias = (random()*2-1.0)*20*NEF.nA+7*NEF.nA,tau_ref = normalvariate(1.5*NEF.ms,0.3*NEF.ms),tau_RC = normalvariate(20*NEF.ms,4*NEF.ms),J_th = normalvariate(1*NEF.nA,.2*NEF.nA)) for i in range(layersize)]
layer = NEF.NEF_layer(layer = neurons,tau_PSC = 10 * NEF.ms,weight = weight_val)
#fp = open("neflayer_allpoints")
#layer = load(fp)
#fp.close()
deltaT = 0.001#*NEF.ms
feedbackrate = 100
updaterate = 60.0#20.0#0.25
eta = 0.00003#0#0001
regularization = 0.0000001
samplefrac = 25#60
targetx = 10.0
x = 0.4
time = 2.0#
displaytime = 120
total = 0
print 3/deltaT
tvals = []
xhatvals = []
presolve = True
lstsq = True
#xvals = [x*0.01 for x in range(-200,200)]
res = 100.0
numxvals = 1000
#xvals = [(x*1.0/res,y*1.0/res) for x in range(0,int(res)) for y in range(0,int(res))]
xvals = [valtopair(400.0*(2*random()-1.0)) for x in range(numxvals)]
for i in range(100):
plottuning(choice(neurons),xvals)
plt.title("Noisy Tuning Curves")
if(lstsq):
plt.savefig("noisytuning-"+str(numxvals)+"samples-"+str(layersize)+"neurons")
plt.show()
if(presolve):
NEF.LeastSquaresSolve(xvals,target,layer,regularization=1000)
if(lstsq):
weight_histogram(layer,binnum=50)
plt.savefig("weight-histogram-"+str(numxvals)+"samples-"+str(layersize)+"neurons")
plt.show()
plotavs(layer,-400,400,1,savename = "cm-agnostic-decode-"+str(numxvals)+"samples-"+str(layersize)+"neurons",title="Common Mode Agnostic Decode "+str(numxvals)+" pts")
plt.show()
exit()
else:
plotavs(layer,-400,400,1,display=False)
#plotavs(layer,-400,400,1)
#plotavs(layer,-400,400,1)
#exit()
#initplot(layer)
#plt.savefig("dataplot0_slowrate")
#plt.show()
c = 0
pltcount = 0
erav = 0
eravcount = 0
etrack = 0
etrackcounter = 0
etracks = []
avx = 0
lastetrack = 0
esquaretrack = 0
while(1):
c+=1
for a in range(1):
x = choice([-2,-1,1,2])*0.2#random()*2.0-1.0
x = random()*4.0-2.0
x = choice([-2,-1,0,1,2])
x = targetx
x = 1.0
if(c%2):
x = 0.4
x = 400.0*(random()*2.0-1.0)
# x = 50
pair = valtopair(x)
t = target(pair)
# t = 100
display = (c%int(displaytime/time) == 0)
if(c%500000 == 0 ):
print "epoch: ",c
print "iteration: ",a
print "trying x= "+str(x)+" target is: "+str(t)+" current average is: "+str(layer.getaverage(pair))
print "display: ",display
etot = 0
avxtot = 0
count = 0
tvals = []
xhatvals = []
ervals = []
avvals = []
aver = 0.0
averc = 0
etot_up = 0
count_up = 0
# display = True
layer.xhat = 0
lastx = 0
for q in range(samplefrac):
lastx = 0
xtot = 0
count = 0
for z in range(int(time/(samplefrac*deltaT))):
val = layer.Process(pair,deltaT)
xtot += val/deltaT
lastx += val/deltaT
avxtot += layer.average
er = sigmoid(Error(val,t,deltaT))
layer.RecordErr(er)
aver += er
averc += 1
if(display):
tvals.append(a*1.0+z*deltaT)
xhatvals.append(val)
avvals.append(layer.average)
ervals.append(er*eta)
etot += er
count += 1
etot_up += er
count_up += 1
# if(random() <deltaT*feedbackrate):#c%int(updaterate/time)==0 and z ==0):#random() < deltaT*feedbackrate):
# print "updating!"
# layer.Update(-(etot/count)**2,eta)
# layer.RecUpdate(0,0)#abs(etot_up/count_up),eta)
# etot_up = 0
# count_up = 0
# layer.Update(abs(aver/averc),eta)
# aver = 0
# averc = 0
# print "xtot: ",xtot/count
erav += Error(xtot/count,t,1)
# print "recording error: ",Error(xtot/count,t,1)
# print "value: ",xtot/count
avx += xtot/count
eravcount += 1
reperr= erav/eravcount
etrack += layer.layer[0].synapses[0].etrackval()
lastetrack = layer.layer[0].synapses[0].etrackval()
esquaretrack += lastetrack**2
#
# print "diff: ",abs(erav/eravcount-Error(layer.getaverage(pair),t)*(-2*(1-x/float(t)))
errorD = Error(layer.getaverage(pair),t,1)*(2*(1-layer.getaverage(pair)/float(t)))
etracks.append(layer.layer[0].synapses[0].etrackval())
etrackcounter += 1
etrackval = layer.layer[0].synapses[0].etrackval()
# print "current ratio: ",etrackval/(xtot/count - layer.getaverage(pair))
# print (etrackval/(xtot/count - layer.getaverage(pair)) - 0.01)
# print (etrackval/0.01 - ( xtot/count - layer.getaverage(pair)))
# print (xtot/count - (layer.getaverage(pair)+etrackval/0.01))
# print (Error(xtot/count,t,1) - ( Error(layer.getaverage(pair),t,1)+etrackval/0.01))
# assert(etrackval*erav/eravcount == etrackv
# layer.RecUpdate(Error(xtot/count,t,1),eta)
layer.CorrectedRecUpdate(Error(xtot/count,t,1))
if(c% int(updaterate/time)==0):
# plt.hist(etracks)
# plt.show()
etracks = []
print "updating!\n\n"
print "time elapsed: ",c*time
print "xval: ",x
print "target: ",t
print "count: ",count
print "etrackcount: ",etrackcounter
print "last etrack: ",lastetrack
etrackav = etrack/etrackcounter
etracksqav = esquaretrack/etrackcounter
errorval = Error(layer.getaverage(pair),t,1)
errorD = 1#-errorval*abs(layer.getaverage(pair)-t)/(layer.getaverage(pair)-t)
print "error: ",errorval
delta = avx/etrackcounter - layer.getaverage(pair)
print "error plus delta: ",errorval+errorD*etrackav/0.01
print "error (x plus delta): ",Error(layer.getaverage(pair)+etrackav/0.01,t,1)
avgrad = etrack*errorval+esquaretrack/0.01*errorD
print "etrack: average ",(etrack/etrackcounter)
print "etracksquare average: ",esquaretrack/etrackcounter
print "ratio: ",(etrackav)/(avx/etrackcounter-layer.getaverage(pair))#etrackcounter#layer.layer[0].synapses[0].etrack#etrack/etrackcounter
print "average error: ",reperr#etot/count
print "average x: ",avx/etrackcounter
print "current x: ",layer.xhat/(count*deltaT)
print "aval: ",layer.layer[0].a(pair)
print "predicted average: ",layer.getaverage(pair)#avxtot/count
print "pval: ",layer.layer[0].synapses[0].Pval()
print "est grad: ",avgrad
print "average q: ",reduce(lambda x,y:x+y,[reduce(lambda
x,y:x+y.q,neuron.synapses,0) for neuron in layer.layer],0)/len(layer.layer)
etrack = 0
avx = 0
etrackcounter = 0
esquaretrack = 0
erav = 0
eravcount = 0
layer.CorrectedUpdate(eta,regularization)
# layer.finalUpdate(eta)
if(display):
pltcount += 1
savename = ("figs/savedgraph_frequencies_allpoints_correctedupdates_"+str(Error.grace)+"grace_"+str(presolve)+"presolve_"+str(displaytime)+"displaytime_"+str(time)+"perval_"+str(updaterate)+"updaterate_"+str(samplefrac)+"samplefrac_"+targetname+"_"+str(layersize)+"neurons_feedbackrate"+str(feedbackrate)+"_eta"+str(eta)+"_weight"+str(weight_val)+"_aver_clearerr_"+str(pltcount)).replace(".","p")
plt.clf()
plt.title("xvalue = "+str(x)+" target = "+str(t))
v = "1p0"
if (x==0.4):
|
plt.plot(tvals,xhatvals,label="decoded")
# plt.plot(tvals,ervals,label ="error")
plt.plot(tvals,avvals,label="a vals")
plt.legend()
# plt.show()
# plt.savefig("savedfig_allpoints_normalized_300neurons_etap05_woverallplots_"+str(c))
# plt.savefig("savedfig_both_"+v+"_wsigmoid_m3_"+str(c))
print "saving to: "+savename+".png"
#plt.show()
plotavs(layer,-400,400,1,savename,display = False)
# plt.show()
savename = ("dumps/dump_frequencies_allpoints_correctedupdates_"+str(Error.grace)+"grace_"+str(presolve)+"presolve_"+str(displaytime)+"displaytime_"+str(time)+"perval_"+str(updaterate)+"updaterate_"+str(samplefrac)+"samplefrac_"+targetname+"_"+str(layersize)+"neurons_feedbackrate"+str(feedbackrate)+"_eta"+str(eta)+"_weight"+str(weight_val)+"_aver_clearerr").replace(".","p")
fp = open(savename,"w")#"neflayer_5points_id_doublerange_morevariation","w")
dump(layer,fp)
fp.close()
# x = choice([-2,-1,1,2])*0.2#random()*2.0-1.0
# x = random()*4.0-2.0
# x = choice([-2,-1,0,1,2])
# x = targetx
# t = target(x)
# tvals = []
# xhatvals = []
# ervals = []
# for a in range(int(0.5/deltaT)):
# tvals.append(a*deltaT)
# val = layer.Process(x,deltaT)
# xhatvals.append(val)
# ervals.append(eta*Error(val,t,deltaT))
# plt.clf()
# plt.title("xvalue = "+str(x)+" target = "+str(t))
# plt.plot(tvals,xhatvals)
# plt.plot(tvals,ervals)
# plt.show()
# plt.savefig("dataplot_"+"5points_id_doublerange_morevatiation"+str(c))
| v = "0p4" | conditional_block |
main.py | import email
import json
import logging
import os
import random
import re
import string
from collections import defaultdict
from typing import Dict
import numpy as np
from bs4 import BeautifulSoup
from nltk import SnowballStemmer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from scipy.sparse import csr_matrix
from sklearn.datasets import dump_svmlight_file, load_svmlight_file
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from HW_1.es_utils import EsUtils
from constants.constants import Constants
from utils.decorators import timing
from utils.utils import Utils
class Email:
def __init__(self) -> None:
self.subject = ''
self.body = ''
self.cleaned_subject_tokens = None
self.cleaned_body_tokens = None
self.file_name = None
def __repr__(self) -> str:
return "File: {file_name}, Subject: {subject}, Body: {body}" \
"C_Subject:{cleaned_subject_tokens}, C_Body:{cleaned_body_tokens}".format(**self.__dict__)
def __str__(self):
return self.__repr__()
class HW7:
_SPAM_EMAIL_DATA_DIR_PATH = '{}/SPAM_DATA/trec07p/data'.format("/media/sumeet/147A710C7A70EBBC")
_SPAM_EMAIL_LABELS_PATH = '{}/SPAM_DATA/trec07p/full/index'.format("/media/sumeet/147A710C7A70EBBC")
_CACHED_FEATURE_INDEX_NAME_TEMPLATE = 'feature_matrix_cache/{}-{}-feature_index.json'
_CACHED_FEATURES_FILE_PATH_TEMPLATE = 'feature_matrix_cache/{}-{}-features.txt'
_CACHED_FILENAME_PATH_TEMPLATE = 'feature_matrix_cache/{}-{}-filename_index.txt'
_SPLIT_REGEX = re.compile("\\s+")
_PUNCTUATION_TABLE = str.maketrans('', '', string.punctuation)
_STOPWORDS_SET = set(stopwords.words('english'))
_STEMMER = SnowballStemmer('english')
_PART_1_TRIAL_A_TOKENS_SET = None
_PART_1_TRIAL_B_TOKENS_SET = None
@classmethod
@timing
def _parse_labels(cls) -> Dict[str, int]:
logging.info("Parsing labels")
labels_dict = {}
with open(cls._SPAM_EMAIL_LABELS_PATH, 'r') as file:
for line in file:
parts = re.split(cls._SPLIT_REGEX, line)
if parts[0] == 'spam':
label = 1
elif parts[0] == 'ham':
label = 0
else:
raise ValueError("Invalid label")
file_name = parts[1].split("/")[-1]
labels_dict[file_name] = label
logging.info("{} Labels parsed".format(len(labels_dict)))
return labels_dict
@classmethod
def _text_cleaning_helper(cls, text_to_clean):
cleaned_tokens = []
tokens = word_tokenize(text_to_clean)
for token in tokens:
lowered_token = token.lower()
stripped_token = lowered_token.translate(cls._PUNCTUATION_TABLE)
if stripped_token.isalpha() and stripped_token not in cls._STOPWORDS_SET:
cleaned_tokens.append(cls._STEMMER.stem(stripped_token))
return cleaned_tokens
@classmethod
def _clean_email(cls, raw_email: Email) -> Email:
raw_email.cleaned_subject_tokens = cls._text_cleaning_helper(raw_email.subject)
raw_email.cleaned_body_tokens = cls._text_cleaning_helper(raw_email.body)
return raw_email
@classmethod
def _get_emails(cls, email_files):
for email_file in email_files:
email_file_path = '{}/{}'.format(cls._SPAM_EMAIL_DATA_DIR_PATH, email_file)
with open(email_file_path, 'r', encoding='ISO-8859-1') as email_file_fp:
parsed_raw_email = cls._parse_raw_email(email_file_fp)
parsed_raw_email.file_name = email_file
cleaned_email = cls._clean_email(parsed_raw_email)
yield cleaned_email
@classmethod
def _parse_email_payload_from_html(cls, raw_html) -> str:
bs = BeautifulSoup(raw_html, 'html.parser')
return bs.get_text().strip()
@classmethod
def _parse_raw_email(cls, email_file_fp) -> Email:
def _helper(email_body):
content_type = str(email_body.get_content_type())
content_disposition = str(email_body.get_content_disposition())
if content_type == 'text/plain' and 'attachment' not in content_disposition:
parsed_email.body += str(email_body.get_payload())
elif content_type == 'text/html' and 'attachment' not in content_disposition:
parsed_email.body += cls._parse_email_payload_from_html(str(email_body.get_payload()))
body = email.message_from_file(email_file_fp)
parsed_email = Email()
if body['subject']:
parsed_email.subject = body['subject']
if body.is_multipart():
for part in body.walk():
_helper(part)
else:
_helper(body)
return parsed_email
@classmethod
def _get_email_contents_and_labels(cls, email_files, labels_dict, token_filter):
ix = 1
email_contents = []
labels = []
for cleaned_email in cls._get_emails(email_files):
ix += 1
text = " ".join(filter(token_filter,
cleaned_email.cleaned_subject_tokens + cleaned_email.cleaned_body_tokens))
if text:
email_contents.append(text)
file_name = cleaned_email.file_name
labels.append((labels_dict[file_name], file_name))
if ix % 1000 == 0:
logging.info("Emails Read :{}".format(ix))
return email_contents, labels
@classmethod
def _generate_ngrams_using_ES(cls, corpus, all_labels):
def _mtermvector_query_helper(text_chunks):
return {
"docs": [
{"doc": {"text": text}} for text in text_chunks
]
}
def | (min_df=0.02, max_df=0.95):
min_df_value = int(min_df * len(corpus))
max_df_value = int(max_df * len(corpus))
_valid_ngrams = set()
for ngram, no_of_documents in all_ngrams.items():
if min_df_value < no_of_documents < max_df_value:
_valid_ngrams.add(ngram)
return _valid_ngrams
es_client = EsUtils.get_es_client()
all_ngrams = defaultdict(int)
docs = []
indptr = [0]
indices = []
data = []
vocabulary = {}
labels = []
ix = 0
for email_content_chunks in Utils.split_list_into_sub_lists(corpus, sub_list_size=5000):
response = es_client.mtermvectors(index=Constants.AP_DATA_INDEX_NAME,
body=_mtermvector_query_helper(email_content_chunks))
for response_obj in response['docs']:
ngrams = {}
termvectors = response_obj['term_vectors']
if 'text' in termvectors:
for term, term_info in termvectors['text']['terms'].items():
ngrams[term] = term_info['term_freq']
all_ngrams[term] += 1
docs.append(ngrams)
labels.append(all_labels[ix])
ix += 1
valid_ngrams = _get_valid_ngrams()
for d in docs:
for term, count in d.items():
if term in valid_ngrams:
index = vocabulary.setdefault(term, len(vocabulary))
indices.append(index)
data.append(count)
indptr.append(len(indices))
features = csr_matrix((data, indices, indptr), dtype=int)
np.testing.assert_equal(features.shape[0], len(labels))
return features, labels, list(vocabulary.keys())
@classmethod
@timing
def _generate_features(cls, token_filter, use_cached=True, ngram_range=(1, 1), virgil_replies_yes=False):
feature_file_path = cls._CACHED_FEATURES_FILE_PATH_TEMPLATE.format(token_filter.__name__, ngram_range)
feature_name_index_file_path = cls._CACHED_FEATURE_INDEX_NAME_TEMPLATE.format(token_filter.__name__,
ngram_range)
filename_index_path = cls._CACHED_FILENAME_PATH_TEMPLATE.format(token_filter.__name__, ngram_range)
if use_cached:
X, y = load_svmlight_file(feature_file_path)
with open(feature_name_index_file_path, 'r') as file, open(filename_index_path, 'r') as filename_index_file:
feature_name_index = json.load(file)
filename_index = json.load(filename_index_file)
else:
labels_dict = cls._parse_labels()
all_email_files = os.listdir(cls._SPAM_EMAIL_DATA_DIR_PATH)
results = Utils.run_tasks_parallelly_in_chunks(cls._get_email_contents_and_labels, all_email_files, 12,
# multi_process=False,
labels_dict=labels_dict,
token_filter=token_filter)
corpus = []
all_labels = []
for email_contents, labels in results:
corpus.extend(email_contents)
all_labels.extend(labels)
if virgil_replies_yes:
vectorizer = CountVectorizer(ngram_range=ngram_range, min_df=0.02, max_df=0.95)
X = vectorizer.fit_transform(corpus)
feature_name_index = vectorizer.get_feature_names()
else:
X, all_labels, feature_name_index = cls._generate_ngrams_using_ES(corpus, all_labels)
y = np.array([label[0] for label in all_labels])
filename_index = [label[1] for label in all_labels]
dump_svmlight_file(X, y, f=feature_file_path)
with open(feature_name_index_file_path, 'w') as file, open(filename_index_path, 'w') as filename_index_file:
json.dump(feature_name_index, file)
json.dump(filename_index, filename_index_file)
indices = np.arange(len(y))
train_ix, test_ix = train_test_split(indices, test_size=0.2, shuffle=True)
filename_index = np.array(filename_index)
X_train, X_test, Y_train, Y_test, test_filename_index = \
X[train_ix, :], X[test_ix, :], y[train_ix], y[test_ix], filename_index[test_ix]
return X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index
@classmethod
def _run_model(cls, model, model_name, X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index):
model.fit(X_train, Y_train)
def _run_prediction_phase(phase_name, X, Y_true):
Y_predict = model.predict(X)
Y_probs = model.predict_proba(X)[:, 1]
auc_score = roc_auc_score(Y_true, Y_probs)
logging.info("AUC score for {} for {} phase:{}".format(model_name, phase_name, auc_score))
return Y_probs
# _run_prediction_phase('training', X_train, Y_train)
scores = _run_prediction_phase('testing', X_test, Y_test)
logging.info("Top 10 spam documents:{}".format(test_filename_index[np.argsort(scores)[::-1][:10]]))
@classmethod
def _part_1_trial_a_filter(cls, token):
return token in cls._PART_1_TRIAL_A_TOKENS_SET
@classmethod
def _part_1_trial_b_filter(cls, token):
return token in cls._PART_1_TRIAL_B_TOKENS_SET
@classmethod
def _part_2_token_filter(cls, token):
return True
@classmethod
def main(cls):
cls._PART_1_TRIAL_A_TOKENS_SET = cls._text_cleaning_helper(
"free win porn click here hookups lottery trip tickets clearance meet singles biz credit fast cash off "
"prize Congratulations urgent nudes money vacation penis boobs enlargement")
cls._PART_1_TRIAL_B_TOKENS_SET = cls._text_cleaning_helper(
"free spam click buy clearance shopper order earn cash extra money double collect credit check affordable "
"fast price loans profit refinance hidden freedom chance miracle lose home remove success virus malware ad "
"subscribe sales performance viagra valium medicine diagnostics million join deal unsolicited trial prize "
"now legal bonus limited instant luxury legal celebrity only compare win viagra $$$ $discount click here "
"meet singles incredible deal lose weight act now 100% free fast cash million dollars lower interest rate "
"visit our website no credit check")
for token_filter in [cls._part_1_trial_a_filter, cls._part_1_trial_b_filter, cls._part_2_token_filter]:
logging.info("Using token filter:{}".format(token_filter.__name__))
X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index = cls._generate_features(
token_filter=token_filter,
ngram_range=(1, 1))
for model, model_name in [
(LogisticRegression(solver='newton-cg', fit_intercept=True), "LogisticRegression"),
(DecisionTreeClassifier(), "DecisionTree"),
# (DecisionTreeClassifier(max_depth=5), "DecisionTree-5"),
# (DecisionTreeClassifier(max_depth=10), "DecisionTree-10"),
# (DecisionTreeClassifier(max_depth=15), "DecisionTree-15"),
(BernoulliNB(), "BernoulliNB")
]:
cls._run_model(model, model_name, X_train, X_test, Y_train, Y_test, feature_name_index,
test_filename_index)
if __name__ == '__main__':
Utils.configure_logging()
seed = 1234
np.random.seed(seed)
random.seed(seed)
HW7.main()
| _get_valid_ngrams | identifier_name |
main.py | import email
import json
import logging
import os
import random
import re
import string
from collections import defaultdict
from typing import Dict
import numpy as np
from bs4 import BeautifulSoup
from nltk import SnowballStemmer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from scipy.sparse import csr_matrix
from sklearn.datasets import dump_svmlight_file, load_svmlight_file
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from HW_1.es_utils import EsUtils
from constants.constants import Constants
from utils.decorators import timing
from utils.utils import Utils
class Email:
def __init__(self) -> None:
self.subject = ''
self.body = ''
self.cleaned_subject_tokens = None
self.cleaned_body_tokens = None
self.file_name = None
def __repr__(self) -> str:
return "File: {file_name}, Subject: {subject}, Body: {body}" \
"C_Subject:{cleaned_subject_tokens}, C_Body:{cleaned_body_tokens}".format(**self.__dict__)
def __str__(self):
return self.__repr__()
class HW7:
_SPAM_EMAIL_DATA_DIR_PATH = '{}/SPAM_DATA/trec07p/data'.format("/media/sumeet/147A710C7A70EBBC")
_SPAM_EMAIL_LABELS_PATH = '{}/SPAM_DATA/trec07p/full/index'.format("/media/sumeet/147A710C7A70EBBC")
_CACHED_FEATURE_INDEX_NAME_TEMPLATE = 'feature_matrix_cache/{}-{}-feature_index.json'
_CACHED_FEATURES_FILE_PATH_TEMPLATE = 'feature_matrix_cache/{}-{}-features.txt'
_CACHED_FILENAME_PATH_TEMPLATE = 'feature_matrix_cache/{}-{}-filename_index.txt'
_SPLIT_REGEX = re.compile("\\s+")
_PUNCTUATION_TABLE = str.maketrans('', '', string.punctuation)
_STOPWORDS_SET = set(stopwords.words('english'))
_STEMMER = SnowballStemmer('english')
_PART_1_TRIAL_A_TOKENS_SET = None
_PART_1_TRIAL_B_TOKENS_SET = None
@classmethod
@timing
def _parse_labels(cls) -> Dict[str, int]:
logging.info("Parsing labels")
labels_dict = {}
with open(cls._SPAM_EMAIL_LABELS_PATH, 'r') as file:
for line in file:
parts = re.split(cls._SPLIT_REGEX, line)
if parts[0] == 'spam':
label = 1
elif parts[0] == 'ham':
label = 0
else:
raise ValueError("Invalid label")
file_name = parts[1].split("/")[-1]
labels_dict[file_name] = label
logging.info("{} Labels parsed".format(len(labels_dict)))
return labels_dict
@classmethod
def _text_cleaning_helper(cls, text_to_clean):
cleaned_tokens = []
tokens = word_tokenize(text_to_clean)
for token in tokens:
lowered_token = token.lower()
stripped_token = lowered_token.translate(cls._PUNCTUATION_TABLE)
if stripped_token.isalpha() and stripped_token not in cls._STOPWORDS_SET:
cleaned_tokens.append(cls._STEMMER.stem(stripped_token))
return cleaned_tokens
@classmethod
def _clean_email(cls, raw_email: Email) -> Email:
raw_email.cleaned_subject_tokens = cls._text_cleaning_helper(raw_email.subject)
raw_email.cleaned_body_tokens = cls._text_cleaning_helper(raw_email.body)
return raw_email
@classmethod
def _get_emails(cls, email_files):
for email_file in email_files:
email_file_path = '{}/{}'.format(cls._SPAM_EMAIL_DATA_DIR_PATH, email_file)
with open(email_file_path, 'r', encoding='ISO-8859-1') as email_file_fp:
parsed_raw_email = cls._parse_raw_email(email_file_fp)
parsed_raw_email.file_name = email_file
cleaned_email = cls._clean_email(parsed_raw_email)
yield cleaned_email
@classmethod
def _parse_email_payload_from_html(cls, raw_html) -> str:
bs = BeautifulSoup(raw_html, 'html.parser')
return bs.get_text().strip()
@classmethod
def _parse_raw_email(cls, email_file_fp) -> Email:
def _helper(email_body):
content_type = str(email_body.get_content_type())
content_disposition = str(email_body.get_content_disposition())
if content_type == 'text/plain' and 'attachment' not in content_disposition:
parsed_email.body += str(email_body.get_payload())
elif content_type == 'text/html' and 'attachment' not in content_disposition:
parsed_email.body += cls._parse_email_payload_from_html(str(email_body.get_payload()))
body = email.message_from_file(email_file_fp)
parsed_email = Email()
if body['subject']:
parsed_email.subject = body['subject']
if body.is_multipart():
for part in body.walk():
|
else:
_helper(body)
return parsed_email
@classmethod
def _get_email_contents_and_labels(cls, email_files, labels_dict, token_filter):
ix = 1
email_contents = []
labels = []
for cleaned_email in cls._get_emails(email_files):
ix += 1
text = " ".join(filter(token_filter,
cleaned_email.cleaned_subject_tokens + cleaned_email.cleaned_body_tokens))
if text:
email_contents.append(text)
file_name = cleaned_email.file_name
labels.append((labels_dict[file_name], file_name))
if ix % 1000 == 0:
logging.info("Emails Read :{}".format(ix))
return email_contents, labels
@classmethod
def _generate_ngrams_using_ES(cls, corpus, all_labels):
def _mtermvector_query_helper(text_chunks):
return {
"docs": [
{"doc": {"text": text}} for text in text_chunks
]
}
def _get_valid_ngrams(min_df=0.02, max_df=0.95):
min_df_value = int(min_df * len(corpus))
max_df_value = int(max_df * len(corpus))
_valid_ngrams = set()
for ngram, no_of_documents in all_ngrams.items():
if min_df_value < no_of_documents < max_df_value:
_valid_ngrams.add(ngram)
return _valid_ngrams
es_client = EsUtils.get_es_client()
all_ngrams = defaultdict(int)
docs = []
indptr = [0]
indices = []
data = []
vocabulary = {}
labels = []
ix = 0
for email_content_chunks in Utils.split_list_into_sub_lists(corpus, sub_list_size=5000):
response = es_client.mtermvectors(index=Constants.AP_DATA_INDEX_NAME,
body=_mtermvector_query_helper(email_content_chunks))
for response_obj in response['docs']:
ngrams = {}
termvectors = response_obj['term_vectors']
if 'text' in termvectors:
for term, term_info in termvectors['text']['terms'].items():
ngrams[term] = term_info['term_freq']
all_ngrams[term] += 1
docs.append(ngrams)
labels.append(all_labels[ix])
ix += 1
valid_ngrams = _get_valid_ngrams()
for d in docs:
for term, count in d.items():
if term in valid_ngrams:
index = vocabulary.setdefault(term, len(vocabulary))
indices.append(index)
data.append(count)
indptr.append(len(indices))
features = csr_matrix((data, indices, indptr), dtype=int)
np.testing.assert_equal(features.shape[0], len(labels))
return features, labels, list(vocabulary.keys())
@classmethod
@timing
def _generate_features(cls, token_filter, use_cached=True, ngram_range=(1, 1), virgil_replies_yes=False):
feature_file_path = cls._CACHED_FEATURES_FILE_PATH_TEMPLATE.format(token_filter.__name__, ngram_range)
feature_name_index_file_path = cls._CACHED_FEATURE_INDEX_NAME_TEMPLATE.format(token_filter.__name__,
ngram_range)
filename_index_path = cls._CACHED_FILENAME_PATH_TEMPLATE.format(token_filter.__name__, ngram_range)
if use_cached:
X, y = load_svmlight_file(feature_file_path)
with open(feature_name_index_file_path, 'r') as file, open(filename_index_path, 'r') as filename_index_file:
feature_name_index = json.load(file)
filename_index = json.load(filename_index_file)
else:
labels_dict = cls._parse_labels()
all_email_files = os.listdir(cls._SPAM_EMAIL_DATA_DIR_PATH)
results = Utils.run_tasks_parallelly_in_chunks(cls._get_email_contents_and_labels, all_email_files, 12,
# multi_process=False,
labels_dict=labels_dict,
token_filter=token_filter)
corpus = []
all_labels = []
for email_contents, labels in results:
corpus.extend(email_contents)
all_labels.extend(labels)
if virgil_replies_yes:
vectorizer = CountVectorizer(ngram_range=ngram_range, min_df=0.02, max_df=0.95)
X = vectorizer.fit_transform(corpus)
feature_name_index = vectorizer.get_feature_names()
else:
X, all_labels, feature_name_index = cls._generate_ngrams_using_ES(corpus, all_labels)
y = np.array([label[0] for label in all_labels])
filename_index = [label[1] for label in all_labels]
dump_svmlight_file(X, y, f=feature_file_path)
with open(feature_name_index_file_path, 'w') as file, open(filename_index_path, 'w') as filename_index_file:
json.dump(feature_name_index, file)
json.dump(filename_index, filename_index_file)
indices = np.arange(len(y))
train_ix, test_ix = train_test_split(indices, test_size=0.2, shuffle=True)
filename_index = np.array(filename_index)
X_train, X_test, Y_train, Y_test, test_filename_index = \
X[train_ix, :], X[test_ix, :], y[train_ix], y[test_ix], filename_index[test_ix]
return X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index
@classmethod
def _run_model(cls, model, model_name, X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index):
model.fit(X_train, Y_train)
def _run_prediction_phase(phase_name, X, Y_true):
Y_predict = model.predict(X)
Y_probs = model.predict_proba(X)[:, 1]
auc_score = roc_auc_score(Y_true, Y_probs)
logging.info("AUC score for {} for {} phase:{}".format(model_name, phase_name, auc_score))
return Y_probs
# _run_prediction_phase('training', X_train, Y_train)
scores = _run_prediction_phase('testing', X_test, Y_test)
logging.info("Top 10 spam documents:{}".format(test_filename_index[np.argsort(scores)[::-1][:10]]))
@classmethod
def _part_1_trial_a_filter(cls, token):
return token in cls._PART_1_TRIAL_A_TOKENS_SET
@classmethod
def _part_1_trial_b_filter(cls, token):
return token in cls._PART_1_TRIAL_B_TOKENS_SET
@classmethod
def _part_2_token_filter(cls, token):
return True
@classmethod
def main(cls):
cls._PART_1_TRIAL_A_TOKENS_SET = cls._text_cleaning_helper(
"free win porn click here hookups lottery trip tickets clearance meet singles biz credit fast cash off "
"prize Congratulations urgent nudes money vacation penis boobs enlargement")
cls._PART_1_TRIAL_B_TOKENS_SET = cls._text_cleaning_helper(
"free spam click buy clearance shopper order earn cash extra money double collect credit check affordable "
"fast price loans profit refinance hidden freedom chance miracle lose home remove success virus malware ad "
"subscribe sales performance viagra valium medicine diagnostics million join deal unsolicited trial prize "
"now legal bonus limited instant luxury legal celebrity only compare win viagra $$$ $discount click here "
"meet singles incredible deal lose weight act now 100% free fast cash million dollars lower interest rate "
"visit our website no credit check")
for token_filter in [cls._part_1_trial_a_filter, cls._part_1_trial_b_filter, cls._part_2_token_filter]:
logging.info("Using token filter:{}".format(token_filter.__name__))
X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index = cls._generate_features(
token_filter=token_filter,
ngram_range=(1, 1))
for model, model_name in [
(LogisticRegression(solver='newton-cg', fit_intercept=True), "LogisticRegression"),
(DecisionTreeClassifier(), "DecisionTree"),
# (DecisionTreeClassifier(max_depth=5), "DecisionTree-5"),
# (DecisionTreeClassifier(max_depth=10), "DecisionTree-10"),
# (DecisionTreeClassifier(max_depth=15), "DecisionTree-15"),
(BernoulliNB(), "BernoulliNB")
]:
cls._run_model(model, model_name, X_train, X_test, Y_train, Y_test, feature_name_index,
test_filename_index)
if __name__ == '__main__':
Utils.configure_logging()
seed = 1234
np.random.seed(seed)
random.seed(seed)
HW7.main()
| _helper(part) | conditional_block |
main.py | import email
import json
import logging | import random
import re
import string
from collections import defaultdict
from typing import Dict
import numpy as np
from bs4 import BeautifulSoup
from nltk import SnowballStemmer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from scipy.sparse import csr_matrix
from sklearn.datasets import dump_svmlight_file, load_svmlight_file
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from HW_1.es_utils import EsUtils
from constants.constants import Constants
from utils.decorators import timing
from utils.utils import Utils
class Email:
def __init__(self) -> None:
self.subject = ''
self.body = ''
self.cleaned_subject_tokens = None
self.cleaned_body_tokens = None
self.file_name = None
def __repr__(self) -> str:
return "File: {file_name}, Subject: {subject}, Body: {body}" \
"C_Subject:{cleaned_subject_tokens}, C_Body:{cleaned_body_tokens}".format(**self.__dict__)
def __str__(self):
return self.__repr__()
class HW7:
_SPAM_EMAIL_DATA_DIR_PATH = '{}/SPAM_DATA/trec07p/data'.format("/media/sumeet/147A710C7A70EBBC")
_SPAM_EMAIL_LABELS_PATH = '{}/SPAM_DATA/trec07p/full/index'.format("/media/sumeet/147A710C7A70EBBC")
_CACHED_FEATURE_INDEX_NAME_TEMPLATE = 'feature_matrix_cache/{}-{}-feature_index.json'
_CACHED_FEATURES_FILE_PATH_TEMPLATE = 'feature_matrix_cache/{}-{}-features.txt'
_CACHED_FILENAME_PATH_TEMPLATE = 'feature_matrix_cache/{}-{}-filename_index.txt'
_SPLIT_REGEX = re.compile("\\s+")
_PUNCTUATION_TABLE = str.maketrans('', '', string.punctuation)
_STOPWORDS_SET = set(stopwords.words('english'))
_STEMMER = SnowballStemmer('english')
_PART_1_TRIAL_A_TOKENS_SET = None
_PART_1_TRIAL_B_TOKENS_SET = None
@classmethod
@timing
def _parse_labels(cls) -> Dict[str, int]:
logging.info("Parsing labels")
labels_dict = {}
with open(cls._SPAM_EMAIL_LABELS_PATH, 'r') as file:
for line in file:
parts = re.split(cls._SPLIT_REGEX, line)
if parts[0] == 'spam':
label = 1
elif parts[0] == 'ham':
label = 0
else:
raise ValueError("Invalid label")
file_name = parts[1].split("/")[-1]
labels_dict[file_name] = label
logging.info("{} Labels parsed".format(len(labels_dict)))
return labels_dict
@classmethod
def _text_cleaning_helper(cls, text_to_clean):
cleaned_tokens = []
tokens = word_tokenize(text_to_clean)
for token in tokens:
lowered_token = token.lower()
stripped_token = lowered_token.translate(cls._PUNCTUATION_TABLE)
if stripped_token.isalpha() and stripped_token not in cls._STOPWORDS_SET:
cleaned_tokens.append(cls._STEMMER.stem(stripped_token))
return cleaned_tokens
@classmethod
def _clean_email(cls, raw_email: Email) -> Email:
raw_email.cleaned_subject_tokens = cls._text_cleaning_helper(raw_email.subject)
raw_email.cleaned_body_tokens = cls._text_cleaning_helper(raw_email.body)
return raw_email
@classmethod
def _get_emails(cls, email_files):
for email_file in email_files:
email_file_path = '{}/{}'.format(cls._SPAM_EMAIL_DATA_DIR_PATH, email_file)
with open(email_file_path, 'r', encoding='ISO-8859-1') as email_file_fp:
parsed_raw_email = cls._parse_raw_email(email_file_fp)
parsed_raw_email.file_name = email_file
cleaned_email = cls._clean_email(parsed_raw_email)
yield cleaned_email
@classmethod
def _parse_email_payload_from_html(cls, raw_html) -> str:
bs = BeautifulSoup(raw_html, 'html.parser')
return bs.get_text().strip()
@classmethod
def _parse_raw_email(cls, email_file_fp) -> Email:
def _helper(email_body):
content_type = str(email_body.get_content_type())
content_disposition = str(email_body.get_content_disposition())
if content_type == 'text/plain' and 'attachment' not in content_disposition:
parsed_email.body += str(email_body.get_payload())
elif content_type == 'text/html' and 'attachment' not in content_disposition:
parsed_email.body += cls._parse_email_payload_from_html(str(email_body.get_payload()))
body = email.message_from_file(email_file_fp)
parsed_email = Email()
if body['subject']:
parsed_email.subject = body['subject']
if body.is_multipart():
for part in body.walk():
_helper(part)
else:
_helper(body)
return parsed_email
@classmethod
def _get_email_contents_and_labels(cls, email_files, labels_dict, token_filter):
ix = 1
email_contents = []
labels = []
for cleaned_email in cls._get_emails(email_files):
ix += 1
text = " ".join(filter(token_filter,
cleaned_email.cleaned_subject_tokens + cleaned_email.cleaned_body_tokens))
if text:
email_contents.append(text)
file_name = cleaned_email.file_name
labels.append((labels_dict[file_name], file_name))
if ix % 1000 == 0:
logging.info("Emails Read :{}".format(ix))
return email_contents, labels
@classmethod
def _generate_ngrams_using_ES(cls, corpus, all_labels):
def _mtermvector_query_helper(text_chunks):
return {
"docs": [
{"doc": {"text": text}} for text in text_chunks
]
}
def _get_valid_ngrams(min_df=0.02, max_df=0.95):
min_df_value = int(min_df * len(corpus))
max_df_value = int(max_df * len(corpus))
_valid_ngrams = set()
for ngram, no_of_documents in all_ngrams.items():
if min_df_value < no_of_documents < max_df_value:
_valid_ngrams.add(ngram)
return _valid_ngrams
es_client = EsUtils.get_es_client()
all_ngrams = defaultdict(int)
docs = []
indptr = [0]
indices = []
data = []
vocabulary = {}
labels = []
ix = 0
for email_content_chunks in Utils.split_list_into_sub_lists(corpus, sub_list_size=5000):
response = es_client.mtermvectors(index=Constants.AP_DATA_INDEX_NAME,
body=_mtermvector_query_helper(email_content_chunks))
for response_obj in response['docs']:
ngrams = {}
termvectors = response_obj['term_vectors']
if 'text' in termvectors:
for term, term_info in termvectors['text']['terms'].items():
ngrams[term] = term_info['term_freq']
all_ngrams[term] += 1
docs.append(ngrams)
labels.append(all_labels[ix])
ix += 1
valid_ngrams = _get_valid_ngrams()
for d in docs:
for term, count in d.items():
if term in valid_ngrams:
index = vocabulary.setdefault(term, len(vocabulary))
indices.append(index)
data.append(count)
indptr.append(len(indices))
features = csr_matrix((data, indices, indptr), dtype=int)
np.testing.assert_equal(features.shape[0], len(labels))
return features, labels, list(vocabulary.keys())
@classmethod
@timing
def _generate_features(cls, token_filter, use_cached=True, ngram_range=(1, 1), virgil_replies_yes=False):
feature_file_path = cls._CACHED_FEATURES_FILE_PATH_TEMPLATE.format(token_filter.__name__, ngram_range)
feature_name_index_file_path = cls._CACHED_FEATURE_INDEX_NAME_TEMPLATE.format(token_filter.__name__,
ngram_range)
filename_index_path = cls._CACHED_FILENAME_PATH_TEMPLATE.format(token_filter.__name__, ngram_range)
if use_cached:
X, y = load_svmlight_file(feature_file_path)
with open(feature_name_index_file_path, 'r') as file, open(filename_index_path, 'r') as filename_index_file:
feature_name_index = json.load(file)
filename_index = json.load(filename_index_file)
else:
labels_dict = cls._parse_labels()
all_email_files = os.listdir(cls._SPAM_EMAIL_DATA_DIR_PATH)
results = Utils.run_tasks_parallelly_in_chunks(cls._get_email_contents_and_labels, all_email_files, 12,
# multi_process=False,
labels_dict=labels_dict,
token_filter=token_filter)
corpus = []
all_labels = []
for email_contents, labels in results:
corpus.extend(email_contents)
all_labels.extend(labels)
if virgil_replies_yes:
vectorizer = CountVectorizer(ngram_range=ngram_range, min_df=0.02, max_df=0.95)
X = vectorizer.fit_transform(corpus)
feature_name_index = vectorizer.get_feature_names()
else:
X, all_labels, feature_name_index = cls._generate_ngrams_using_ES(corpus, all_labels)
y = np.array([label[0] for label in all_labels])
filename_index = [label[1] for label in all_labels]
dump_svmlight_file(X, y, f=feature_file_path)
with open(feature_name_index_file_path, 'w') as file, open(filename_index_path, 'w') as filename_index_file:
json.dump(feature_name_index, file)
json.dump(filename_index, filename_index_file)
indices = np.arange(len(y))
train_ix, test_ix = train_test_split(indices, test_size=0.2, shuffle=True)
filename_index = np.array(filename_index)
X_train, X_test, Y_train, Y_test, test_filename_index = \
X[train_ix, :], X[test_ix, :], y[train_ix], y[test_ix], filename_index[test_ix]
return X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index
@classmethod
def _run_model(cls, model, model_name, X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index):
model.fit(X_train, Y_train)
def _run_prediction_phase(phase_name, X, Y_true):
Y_predict = model.predict(X)
Y_probs = model.predict_proba(X)[:, 1]
auc_score = roc_auc_score(Y_true, Y_probs)
logging.info("AUC score for {} for {} phase:{}".format(model_name, phase_name, auc_score))
return Y_probs
# _run_prediction_phase('training', X_train, Y_train)
scores = _run_prediction_phase('testing', X_test, Y_test)
logging.info("Top 10 spam documents:{}".format(test_filename_index[np.argsort(scores)[::-1][:10]]))
@classmethod
def _part_1_trial_a_filter(cls, token):
return token in cls._PART_1_TRIAL_A_TOKENS_SET
@classmethod
def _part_1_trial_b_filter(cls, token):
return token in cls._PART_1_TRIAL_B_TOKENS_SET
@classmethod
def _part_2_token_filter(cls, token):
return True
@classmethod
def main(cls):
cls._PART_1_TRIAL_A_TOKENS_SET = cls._text_cleaning_helper(
"free win porn click here hookups lottery trip tickets clearance meet singles biz credit fast cash off "
"prize Congratulations urgent nudes money vacation penis boobs enlargement")
cls._PART_1_TRIAL_B_TOKENS_SET = cls._text_cleaning_helper(
"free spam click buy clearance shopper order earn cash extra money double collect credit check affordable "
"fast price loans profit refinance hidden freedom chance miracle lose home remove success virus malware ad "
"subscribe sales performance viagra valium medicine diagnostics million join deal unsolicited trial prize "
"now legal bonus limited instant luxury legal celebrity only compare win viagra $$$ $discount click here "
"meet singles incredible deal lose weight act now 100% free fast cash million dollars lower interest rate "
"visit our website no credit check")
for token_filter in [cls._part_1_trial_a_filter, cls._part_1_trial_b_filter, cls._part_2_token_filter]:
logging.info("Using token filter:{}".format(token_filter.__name__))
X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index = cls._generate_features(
token_filter=token_filter,
ngram_range=(1, 1))
for model, model_name in [
(LogisticRegression(solver='newton-cg', fit_intercept=True), "LogisticRegression"),
(DecisionTreeClassifier(), "DecisionTree"),
# (DecisionTreeClassifier(max_depth=5), "DecisionTree-5"),
# (DecisionTreeClassifier(max_depth=10), "DecisionTree-10"),
# (DecisionTreeClassifier(max_depth=15), "DecisionTree-15"),
(BernoulliNB(), "BernoulliNB")
]:
cls._run_model(model, model_name, X_train, X_test, Y_train, Y_test, feature_name_index,
test_filename_index)
if __name__ == '__main__':
Utils.configure_logging()
seed = 1234
np.random.seed(seed)
random.seed(seed)
HW7.main() | import os | random_line_split |
main.py | import email
import json
import logging
import os
import random
import re
import string
from collections import defaultdict
from typing import Dict
import numpy as np
from bs4 import BeautifulSoup
from nltk import SnowballStemmer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from scipy.sparse import csr_matrix
from sklearn.datasets import dump_svmlight_file, load_svmlight_file
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from HW_1.es_utils import EsUtils
from constants.constants import Constants
from utils.decorators import timing
from utils.utils import Utils
class Email:
def __init__(self) -> None:
self.subject = ''
self.body = ''
self.cleaned_subject_tokens = None
self.cleaned_body_tokens = None
self.file_name = None
def __repr__(self) -> str:
return "File: {file_name}, Subject: {subject}, Body: {body}" \
"C_Subject:{cleaned_subject_tokens}, C_Body:{cleaned_body_tokens}".format(**self.__dict__)
def __str__(self):
return self.__repr__()
class HW7:
_SPAM_EMAIL_DATA_DIR_PATH = '{}/SPAM_DATA/trec07p/data'.format("/media/sumeet/147A710C7A70EBBC")
_SPAM_EMAIL_LABELS_PATH = '{}/SPAM_DATA/trec07p/full/index'.format("/media/sumeet/147A710C7A70EBBC")
_CACHED_FEATURE_INDEX_NAME_TEMPLATE = 'feature_matrix_cache/{}-{}-feature_index.json'
_CACHED_FEATURES_FILE_PATH_TEMPLATE = 'feature_matrix_cache/{}-{}-features.txt'
_CACHED_FILENAME_PATH_TEMPLATE = 'feature_matrix_cache/{}-{}-filename_index.txt'
_SPLIT_REGEX = re.compile("\\s+")
_PUNCTUATION_TABLE = str.maketrans('', '', string.punctuation)
_STOPWORDS_SET = set(stopwords.words('english'))
_STEMMER = SnowballStemmer('english')
_PART_1_TRIAL_A_TOKENS_SET = None
_PART_1_TRIAL_B_TOKENS_SET = None
@classmethod
@timing
def _parse_labels(cls) -> Dict[str, int]:
logging.info("Parsing labels")
labels_dict = {}
with open(cls._SPAM_EMAIL_LABELS_PATH, 'r') as file:
for line in file:
parts = re.split(cls._SPLIT_REGEX, line)
if parts[0] == 'spam':
label = 1
elif parts[0] == 'ham':
label = 0
else:
raise ValueError("Invalid label")
file_name = parts[1].split("/")[-1]
labels_dict[file_name] = label
logging.info("{} Labels parsed".format(len(labels_dict)))
return labels_dict
@classmethod
def _text_cleaning_helper(cls, text_to_clean):
|
@classmethod
def _clean_email(cls, raw_email: Email) -> Email:
raw_email.cleaned_subject_tokens = cls._text_cleaning_helper(raw_email.subject)
raw_email.cleaned_body_tokens = cls._text_cleaning_helper(raw_email.body)
return raw_email
@classmethod
def _get_emails(cls, email_files):
for email_file in email_files:
email_file_path = '{}/{}'.format(cls._SPAM_EMAIL_DATA_DIR_PATH, email_file)
with open(email_file_path, 'r', encoding='ISO-8859-1') as email_file_fp:
parsed_raw_email = cls._parse_raw_email(email_file_fp)
parsed_raw_email.file_name = email_file
cleaned_email = cls._clean_email(parsed_raw_email)
yield cleaned_email
@classmethod
def _parse_email_payload_from_html(cls, raw_html) -> str:
bs = BeautifulSoup(raw_html, 'html.parser')
return bs.get_text().strip()
@classmethod
def _parse_raw_email(cls, email_file_fp) -> Email:
def _helper(email_body):
content_type = str(email_body.get_content_type())
content_disposition = str(email_body.get_content_disposition())
if content_type == 'text/plain' and 'attachment' not in content_disposition:
parsed_email.body += str(email_body.get_payload())
elif content_type == 'text/html' and 'attachment' not in content_disposition:
parsed_email.body += cls._parse_email_payload_from_html(str(email_body.get_payload()))
body = email.message_from_file(email_file_fp)
parsed_email = Email()
if body['subject']:
parsed_email.subject = body['subject']
if body.is_multipart():
for part in body.walk():
_helper(part)
else:
_helper(body)
return parsed_email
@classmethod
def _get_email_contents_and_labels(cls, email_files, labels_dict, token_filter):
ix = 1
email_contents = []
labels = []
for cleaned_email in cls._get_emails(email_files):
ix += 1
text = " ".join(filter(token_filter,
cleaned_email.cleaned_subject_tokens + cleaned_email.cleaned_body_tokens))
if text:
email_contents.append(text)
file_name = cleaned_email.file_name
labels.append((labels_dict[file_name], file_name))
if ix % 1000 == 0:
logging.info("Emails Read :{}".format(ix))
return email_contents, labels
@classmethod
def _generate_ngrams_using_ES(cls, corpus, all_labels):
def _mtermvector_query_helper(text_chunks):
return {
"docs": [
{"doc": {"text": text}} for text in text_chunks
]
}
def _get_valid_ngrams(min_df=0.02, max_df=0.95):
min_df_value = int(min_df * len(corpus))
max_df_value = int(max_df * len(corpus))
_valid_ngrams = set()
for ngram, no_of_documents in all_ngrams.items():
if min_df_value < no_of_documents < max_df_value:
_valid_ngrams.add(ngram)
return _valid_ngrams
es_client = EsUtils.get_es_client()
all_ngrams = defaultdict(int)
docs = []
indptr = [0]
indices = []
data = []
vocabulary = {}
labels = []
ix = 0
for email_content_chunks in Utils.split_list_into_sub_lists(corpus, sub_list_size=5000):
response = es_client.mtermvectors(index=Constants.AP_DATA_INDEX_NAME,
body=_mtermvector_query_helper(email_content_chunks))
for response_obj in response['docs']:
ngrams = {}
termvectors = response_obj['term_vectors']
if 'text' in termvectors:
for term, term_info in termvectors['text']['terms'].items():
ngrams[term] = term_info['term_freq']
all_ngrams[term] += 1
docs.append(ngrams)
labels.append(all_labels[ix])
ix += 1
valid_ngrams = _get_valid_ngrams()
for d in docs:
for term, count in d.items():
if term in valid_ngrams:
index = vocabulary.setdefault(term, len(vocabulary))
indices.append(index)
data.append(count)
indptr.append(len(indices))
features = csr_matrix((data, indices, indptr), dtype=int)
np.testing.assert_equal(features.shape[0], len(labels))
return features, labels, list(vocabulary.keys())
@classmethod
@timing
def _generate_features(cls, token_filter, use_cached=True, ngram_range=(1, 1), virgil_replies_yes=False):
feature_file_path = cls._CACHED_FEATURES_FILE_PATH_TEMPLATE.format(token_filter.__name__, ngram_range)
feature_name_index_file_path = cls._CACHED_FEATURE_INDEX_NAME_TEMPLATE.format(token_filter.__name__,
ngram_range)
filename_index_path = cls._CACHED_FILENAME_PATH_TEMPLATE.format(token_filter.__name__, ngram_range)
if use_cached:
X, y = load_svmlight_file(feature_file_path)
with open(feature_name_index_file_path, 'r') as file, open(filename_index_path, 'r') as filename_index_file:
feature_name_index = json.load(file)
filename_index = json.load(filename_index_file)
else:
labels_dict = cls._parse_labels()
all_email_files = os.listdir(cls._SPAM_EMAIL_DATA_DIR_PATH)
results = Utils.run_tasks_parallelly_in_chunks(cls._get_email_contents_and_labels, all_email_files, 12,
# multi_process=False,
labels_dict=labels_dict,
token_filter=token_filter)
corpus = []
all_labels = []
for email_contents, labels in results:
corpus.extend(email_contents)
all_labels.extend(labels)
if virgil_replies_yes:
vectorizer = CountVectorizer(ngram_range=ngram_range, min_df=0.02, max_df=0.95)
X = vectorizer.fit_transform(corpus)
feature_name_index = vectorizer.get_feature_names()
else:
X, all_labels, feature_name_index = cls._generate_ngrams_using_ES(corpus, all_labels)
y = np.array([label[0] for label in all_labels])
filename_index = [label[1] for label in all_labels]
dump_svmlight_file(X, y, f=feature_file_path)
with open(feature_name_index_file_path, 'w') as file, open(filename_index_path, 'w') as filename_index_file:
json.dump(feature_name_index, file)
json.dump(filename_index, filename_index_file)
indices = np.arange(len(y))
train_ix, test_ix = train_test_split(indices, test_size=0.2, shuffle=True)
filename_index = np.array(filename_index)
X_train, X_test, Y_train, Y_test, test_filename_index = \
X[train_ix, :], X[test_ix, :], y[train_ix], y[test_ix], filename_index[test_ix]
return X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index
@classmethod
def _run_model(cls, model, model_name, X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index):
model.fit(X_train, Y_train)
def _run_prediction_phase(phase_name, X, Y_true):
Y_predict = model.predict(X)
Y_probs = model.predict_proba(X)[:, 1]
auc_score = roc_auc_score(Y_true, Y_probs)
logging.info("AUC score for {} for {} phase:{}".format(model_name, phase_name, auc_score))
return Y_probs
# _run_prediction_phase('training', X_train, Y_train)
scores = _run_prediction_phase('testing', X_test, Y_test)
logging.info("Top 10 spam documents:{}".format(test_filename_index[np.argsort(scores)[::-1][:10]]))
@classmethod
def _part_1_trial_a_filter(cls, token):
return token in cls._PART_1_TRIAL_A_TOKENS_SET
@classmethod
def _part_1_trial_b_filter(cls, token):
return token in cls._PART_1_TRIAL_B_TOKENS_SET
@classmethod
def _part_2_token_filter(cls, token):
return True
@classmethod
def main(cls):
cls._PART_1_TRIAL_A_TOKENS_SET = cls._text_cleaning_helper(
"free win porn click here hookups lottery trip tickets clearance meet singles biz credit fast cash off "
"prize Congratulations urgent nudes money vacation penis boobs enlargement")
cls._PART_1_TRIAL_B_TOKENS_SET = cls._text_cleaning_helper(
"free spam click buy clearance shopper order earn cash extra money double collect credit check affordable "
"fast price loans profit refinance hidden freedom chance miracle lose home remove success virus malware ad "
"subscribe sales performance viagra valium medicine diagnostics million join deal unsolicited trial prize "
"now legal bonus limited instant luxury legal celebrity only compare win viagra $$$ $discount click here "
"meet singles incredible deal lose weight act now 100% free fast cash million dollars lower interest rate "
"visit our website no credit check")
for token_filter in [cls._part_1_trial_a_filter, cls._part_1_trial_b_filter, cls._part_2_token_filter]:
logging.info("Using token filter:{}".format(token_filter.__name__))
X_train, X_test, Y_train, Y_test, feature_name_index, test_filename_index = cls._generate_features(
token_filter=token_filter,
ngram_range=(1, 1))
for model, model_name in [
(LogisticRegression(solver='newton-cg', fit_intercept=True), "LogisticRegression"),
(DecisionTreeClassifier(), "DecisionTree"),
# (DecisionTreeClassifier(max_depth=5), "DecisionTree-5"),
# (DecisionTreeClassifier(max_depth=10), "DecisionTree-10"),
# (DecisionTreeClassifier(max_depth=15), "DecisionTree-15"),
(BernoulliNB(), "BernoulliNB")
]:
cls._run_model(model, model_name, X_train, X_test, Y_train, Y_test, feature_name_index,
test_filename_index)
if __name__ == '__main__':
Utils.configure_logging()
seed = 1234
np.random.seed(seed)
random.seed(seed)
HW7.main()
| cleaned_tokens = []
tokens = word_tokenize(text_to_clean)
for token in tokens:
lowered_token = token.lower()
stripped_token = lowered_token.translate(cls._PUNCTUATION_TABLE)
if stripped_token.isalpha() and stripped_token not in cls._STOPWORDS_SET:
cleaned_tokens.append(cls._STEMMER.stem(stripped_token))
return cleaned_tokens | identifier_body |
func.py | import math
import warnings
import numpy as np
import scipy.sparse as sp
__all__ = ['median', 'nanmedian', 'nansum', 'nanmean', 'nanvar', 'nanstd',
'nanmin', 'nanmax', 'nanargmin', 'nanargmax', 'rankdata',
'nanrankdata', 'ss', 'nn', 'partsort', 'argpartsort', 'replace',
'anynan', 'allnan',
'bincount', 'valuecount', 'countnans', 'stats',
'contingency', 'nanequal']
def median(arr, axis=None):
"Slow median function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = np.median(arr, axis=axis)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def nansum(arr, axis=None):
"Slow nansum function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = np.nansum(arr, axis=axis)
if not hasattr(y, "dtype"):
y = arr.dtype.type(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def nanmedian(arr, axis=None):
"Slow nanmedian function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = scipy_nanmedian(arr, axis=axis)
if not hasattr(y, "dtype"):
if issubclass(arr.dtype.type, np.inexact):
y = arr.dtype.type(y)
else:
y = np.float64(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
if (y.size == 1) and (y.ndim == 0):
y = y[()]
return y
def nanmean(arr, axis=None):
"Slow nanmean function used for unaccelerated ndim/dtype combinations."
return np.nanmean(arr, axis=axis)
def nanvar(arr, axis=None, ddof=0):
"Slow nanvar function used for unaccelerated ndim/dtype combinations."
return np.nanvar(arr, axis=axis, ddof=ddof)
def nanstd(arr, axis=None, ddof=0):
"Slow nanstd function used for unaccelerated ndim/dtype combinations."
return np.nanstd(arr, axis=axis, ddof=ddof)
def nanmin(arr, axis=None):
"Slow nanmin function used for unaccelerated ndim/dtype combinations."
y = np.nanmin(arr, axis=axis)
if not hasattr(y, "dtype"):
# Numpy 1.5.1 doesn't return object with dtype when input is all NaN
y = arr.dtype.type(y)
return y
def nanmax(arr, axis=None):
"Slow nanmax function used for unaccelerated ndim/dtype combinations."
y = np.nanmax(arr, axis=axis)
if not hasattr(y, "dtype"):
# Numpy 1.5.1 doesn't return object with dtype when input is all NaN
y = arr.dtype.type(y)
return y
def nanargmin(arr, axis=None):
"Slow nanargmin function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmin(arr, axis=axis)
def nanargmax(arr, axis=None):
|
def rankdata(arr, axis=None):
"Slow rankdata function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
if axis is None:
arr = arr.ravel()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = np.empty(arr.shape)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in np.ndindex(*itshape):
ijslice = list(ij[:axis]) + [slice(None)] + list(ij[axis:])
y[ijslice] = scipy_rankdata(arr[ijslice].astype('float'))
return y
def nanrankdata(arr, axis=None):
"Slow nanrankdata function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
if axis is None:
arr = arr.ravel()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = np.empty(arr.shape)
y.fill(np.nan)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in np.ndindex(*itshape):
ijslice = list(ij[:axis]) + [slice(None)] + list(ij[axis:])
x1d = arr[ijslice].astype(float)
mask1d = ~np.isnan(x1d)
x1d[mask1d] = scipy_rankdata(x1d[mask1d])
y[ijslice] = x1d
return y
def ss(arr, axis=0):
"Slow sum of squares used for unaccelerated ndim/dtype combinations."
return scipy_ss(arr, axis)
def nn(arr, arr0, axis=1):
"Slow nearest neighbor used for unaccelerated ndim/dtype combinations."
arr = np.array(arr, copy=False)
arr0 = np.array(arr0, copy=False)
if arr.ndim != 2:
raise ValueError("`arr` must be 2d")
if arr0.ndim != 1:
raise ValueError("`arr0` must be 1d")
if axis == 1:
d = (arr - arr0) ** 2
elif axis == 0:
d = (arr - arr0.reshape(-1,1)) ** 2
else:
raise ValueError("`axis` must be 0 or 1.")
d = d.sum(axis)
idx = np.argmin(d)
return np.sqrt(d[idx]), idx
def partsort(arr, n, axis=-1):
"Slow partial sort used for unaccelerated ndim/dtype combinations."
return np.sort(arr, axis)
def argpartsort(arr, n, axis=-1):
"Slow partial argsort used for unaccelerated ndim/dtype combinations."
return np.argsort(arr, axis)
def replace(arr, old, new):
"Slow replace (inplace) used for unaccelerated ndim/dtype combinations."
if type(arr) is not np.ndarray:
raise TypeError("`arr` must be a numpy array.")
if not issubclass(arr.dtype.type, np.inexact):
if old != old:
# int arrays do not contain NaN
return
if int(old) != old:
raise ValueError("Cannot safely cast `old` to int.")
if int(new) != new:
raise ValueError("Cannot safely cast `new` to int.")
if old != old:
mask = np.isnan(arr)
else:
mask = arr == old
np.putmask(arr, mask, new)
def anynan(arr, axis=None):
"Slow check for Nans used for unaccelerated ndim/dtype combinations."
return np.isnan(arr).any(axis)
def allnan(arr, axis=None):
"Slow check for all Nans used for unaccelerated ndim/dtype combinations."
return np.isnan(arr).all(axis)
def nanequal(arr1, arr2, axis=None):
"Slow check for equality that ignores NaNs"
if axis is None:
nans = np.isnan(arr1) | np.isnan(arr2)
return np.array_equal(arr1[~nans], arr2[~nans])
if arr1.size == 0:
if axis < 0:
axis += arr1.ndim
return np.ones(arr1.shape[:axis]+arr1.shape[axis+1:], np.bool)
if arr1.size == 1:
return arr1 == arr2 or arr1 != arr1 or arr2 != arr2
return np.apply_along_axis(lambda x:nanequal(x["f0"], x["f1"]), axis,
np.core.records.fromarrays([arr1, arr2]))
def bincount(arr, max_val, weights=None, mask=None):
"Slow bincount"
if arr.ndim == 1:
out = np.zeros((max_val+1, ), float)
nans = 0.0
if mask is None or mask[0]:
for i, ai in enumerate(arr):
if ai != ai:
nans += 1 if weights is None else weights[i]
continue
ain = int(ai+0.1)
if abs(ain - ai) > 1e-6:
raise ValueError("%f is not integer" % ain)
if ain < 0:
raise ValueError("negative value in bincount")
if ain > max_val:
raise ValueError("value %i is greater than max_val (%i)" %
(ain, max_val))
out[ain] += 1 if weights is None else weights[i]
elif arr.ndim == 2:
out = np.zeros((arr.shape[1], max_val+1), float)
nans = np.zeros((arr.shape[1], ), float)
if sp.issparse(arr):
indptr, indices, data = arr.indptr, arr.indices, arr.data
for ri in range(arr.shape[0]):
wt = 1 if weights is None else weights[ri]
for i in range(indptr[ri], indptr[ri + 1]):
ci = indices[i]
if mask is None or mask[ci]:
out[ci, data[i]] += wt
else:
for i in range(arr.shape[1]):
if mask is None or mask[i]:
out[i, :], nans[i] = bincount(arr[:, i], max_val, weights)
else:
raise ValueError("bincount expects 1- or 2-dimensional array")
return out, nans
def contingency(arr, b, max_val, max_val2, weights=None, mask=None):
raise NotImplemented("bottlechest.slow.contingency is not implemented yet")
def valuecount(arr):
"slow valuecount"
if arr.ndim != 2 or arr.shape[0] != 2:
raise ValueError("valuecount expects an array with shape (2, N)")
N = arr.shape[1]
dst = 0
for src in range(1, N):
if math.isnan(arr[0, src]):
break
if arr[0, src] == arr[0, dst]:
arr[1, dst] += arr[1, src]
else:
dst += 1
arr[:, dst] = arr[:, src]
return arr[:, :dst + 1]
def countnans(arr, weights=None, axis=None):
if axis is None:
if weights is None:
return np.sum(np.isnan(arr))
else:
return np.sum(np.isnan(arr)*weights)
else:
if weights is not None:
if arr.shape[axis] != len(weights):
raise ValueError("shape of weights does not match the data")
return np.apply_along_axis(lambda a: np.sum(np.isnan(a)*weights),
axis, arr)
else:
return np.sum(np.isnan(arr), axis=axis)
def stats_object(arr, weights=None):
pinf, minf = float("inf"), float("-inf")
if arr.ndim == 1:
if weights is None:
nones = sum(np.equal(arr, None))
return pinf, minf, 0, 0, nones, len(arr) - nones
else:
nones = sum(np.equal(arr, None) * weights)
return pinf, minf, 0, 0, nones, sum(weights) - nones
if sp.issparse(arr) and weights is not None:
raise NotImplementedError("counting of missing values for"
"weighted arrays of type 'object' is not implemented")
y = np.zeros((arr.shape[1], 6), float)
y[:, 0] = pinf
y[:, 1] = minf
if sp.issparse(arr):
y[:, 4] = np.bincount(arr.indices, minlength=arr.shape[1])
elif weights is None:
y[:, 4] = np.sum(np.equal(arr, None), axis=0)
else:
y[:, 4] = np.sum(np.equal(arr, None) * weights[:, np.newaxis], axis=0)
y[:, 5] = arr.shape[0] - y[:, 4]
return y
def stats(arr, weights=None, compute_variance=False):
if not 1 <= arr.ndim <= 2:
raise ValueError("bottlechest.stats handles only 1-d and 2-d arrays")
if arr.dtype == object:
# can't compute min and max, but we can count 'nans'
return stats_object(arr, weights)
if arr.ndim == 1:
a_min, a_max = np.nanmin(arr), np.nanmax(arr)
if weights is None:
nans = np.sum(np.isnan(arr))
non_nans = len(arr) - nans
mean = np.nansum(arr) / non_nans
var = np.nansum((arr - mean) ** 2) / (non_nans - 1)
else:
tot_w = np.sum(weights)
nans = np.sum(np.isnan(arr) * weights)
non_nans = tot_w - nans
mean = np.nansum(arr * weights) / non_nans
var = np.nansum(weights * (arr - mean) ** 2)
tot_w2 = np.sum((1 - np.isnan(arr)) * weights ** 2)
d = non_nans ** 2 - tot_w2
if d > 1e-6:
var *= non_nans / d
return a_min, a_max, mean, var, nans, non_nans
if sp.issparse(arr):
arr = arr.todense()
y = np.zeros((arr.shape[1], 6), dtype=float)
y[:, 0] = nanmin(arr, 0)
y[:, 1] = nanmax(arr, 0)
if weights:
tot_w = np.sum(weights)
y[:, 4] = countnans(arr, weights, 0)
y[:, 5] = tot_w - y[:, 4]
y[:, 2] = nanmean(arr * weights, 0) / y[:, 4]
y[:, 3] = nansum(weights * (arr - y[:, 2]) ** 2, 0)
tot_w2 = np.sum((1 - np.isnan(arr)) * weights ** 2)
d = y[:, 5] ** 2 - tot_w2
if d > 1e-6:
y[:, 3] *= y[:, 5] / d
else:
y[:, 4] = countnans(arr, axis=0)
y[:, 5] = arr.shape[0] - y[:, 4]
y[:, 2] = nanmean(arr, 0) / y[:, 4]
y[:, 3] = nansum((arr - y[:, 2]) ** 2, 0) / (y[:, 4] - 1)
y[:, 2][np.isinf(y[:, 2])] = 0
return y
# ---------------------------------------------------------------------------
#
# SciPy
#
# Local copy of scipy.stats functions to avoid (by popular demand) a SciPy
# dependency. The SciPy license is included in the Bottleneck license file,
# which is distributed with Bottleneck.
#
# Code taken from scipy trunk on Dec 16, 2010.
# nanmedian taken from scipy trunk on Dec 17, 2010.
# rankdata taken from scipy HEAD on Mar 16, 2011.
def scipy_nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int, optional
Axis along which the mean is computed. Default is 0, i.e. the
first axis.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x,axis)
x = x.copy()
Norig = x.shape[axis]
factor = 1.0-np.sum(np.isnan(x),axis)*1.0/Norig
x[np.isnan(x)] = 0
return np.mean(x,axis)/factor
def scipy_nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x,axis)
x = x.copy()
Norig = x.shape[axis]
Nnan = np.sum(np.isnan(x),axis)*1.0
n = Norig - Nnan
x[np.isnan(x)] = 0.
m1 = np.sum(x,axis)/n
if axis:
d = (x - np.expand_dims(m1, axis))**2.0
else:
d = (x - m1)**2.0
m2 = np.sum(d,axis)-(m1*m1)*Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
cond = 1-np.isnan(arr1d)
x = np.sort(np.compress(cond,arr1d,axis=-1))
if x.size == 0:
return np.nan
return np.median(x)
# Feb 2011: patched nanmedian to handle nanmedian(a, 1) with a = np.ones((2,0))
def scipy_nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int, optional
Axis along which the median is computed. Default is 0, i.e. the
first axis.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
shape = list(x.shape)
shape.pop(axis)
if 0 in shape:
x = np.empty(shape)
else:
x = x.copy()
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
return a, outaxis
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def scipy_rankdata(a):
"""
Ranks the data, dealing with ties appropriately.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that set.
Ranks begin at 1, not 0.
Parameters
----------
a : array_like
This array is first flattened.
Returns
-------
rankdata : ndarray
An array of length equal to the size of `a`, containing rank scores.
Examples
--------
>>> stats.rankdata([0, 2, 2, 3])
array([ 1. , 2.5, 2.5, 4. ])
"""
a = np.ravel(a)
n = len(a)
svec, ivec = fastsort(a)
sumranks = 0
dupcount = 0
newarray = np.zeros(n, float)
for i in range(n):
sumranks += i
dupcount += 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def scipy_ss(a, axis=0):
"""
Squares each element of the input array, and returns the square(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
The axis along which to calculate. If None, use whole array.
Default is 0, i.e. along the first axis.
Returns
-------
ss : ndarray
The sum along the given axis for (a**2).
See also
--------
square_of_sums : The square(s) of the sum(s) (the opposite of `ss`).
Examples
--------
>>> from scipy import stats
>>> a = np.array([1., 2., 5.])
>>> stats.ss(a)
30.0
And calculating along an axis:
>>> b = np.array([[1., 2., 5.], [2., 5., 6.]])
>>> stats.ss(b, axis=1)
array([ 30., 65.])
"""
a, axis = _chk_asarray(a, axis)
if 'int' in str(a.dtype):
a = a.astype('int64')
return np.sum(a*a, axis)
| "Slow nanargmax function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmax(arr, axis=axis) | identifier_body |
func.py | import math
import warnings
import numpy as np
import scipy.sparse as sp
__all__ = ['median', 'nanmedian', 'nansum', 'nanmean', 'nanvar', 'nanstd',
'nanmin', 'nanmax', 'nanargmin', 'nanargmax', 'rankdata',
'nanrankdata', 'ss', 'nn', 'partsort', 'argpartsort', 'replace',
'anynan', 'allnan',
'bincount', 'valuecount', 'countnans', 'stats',
'contingency', 'nanequal']
def median(arr, axis=None):
"Slow median function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = np.median(arr, axis=axis)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def nansum(arr, axis=None):
"Slow nansum function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = np.nansum(arr, axis=axis)
if not hasattr(y, "dtype"):
y = arr.dtype.type(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def nanmedian(arr, axis=None):
"Slow nanmedian function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = scipy_nanmedian(arr, axis=axis)
if not hasattr(y, "dtype"):
if issubclass(arr.dtype.type, np.inexact):
y = arr.dtype.type(y)
else:
y = np.float64(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
if (y.size == 1) and (y.ndim == 0):
y = y[()]
return y
def nanmean(arr, axis=None):
"Slow nanmean function used for unaccelerated ndim/dtype combinations."
return np.nanmean(arr, axis=axis)
def nanvar(arr, axis=None, ddof=0):
"Slow nanvar function used for unaccelerated ndim/dtype combinations."
return np.nanvar(arr, axis=axis, ddof=ddof)
def nanstd(arr, axis=None, ddof=0):
"Slow nanstd function used for unaccelerated ndim/dtype combinations."
return np.nanstd(arr, axis=axis, ddof=ddof)
def nanmin(arr, axis=None):
"Slow nanmin function used for unaccelerated ndim/dtype combinations."
y = np.nanmin(arr, axis=axis)
if not hasattr(y, "dtype"):
# Numpy 1.5.1 doesn't return object with dtype when input is all NaN
y = arr.dtype.type(y)
return y
def nanmax(arr, axis=None):
"Slow nanmax function used for unaccelerated ndim/dtype combinations."
y = np.nanmax(arr, axis=axis)
if not hasattr(y, "dtype"):
# Numpy 1.5.1 doesn't return object with dtype when input is all NaN
y = arr.dtype.type(y)
return y
def nanargmin(arr, axis=None):
"Slow nanargmin function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmin(arr, axis=axis)
def nanargmax(arr, axis=None):
"Slow nanargmax function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmax(arr, axis=axis)
def rankdata(arr, axis=None):
"Slow rankdata function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
if axis is None:
arr = arr.ravel()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = np.empty(arr.shape)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in np.ndindex(*itshape):
ijslice = list(ij[:axis]) + [slice(None)] + list(ij[axis:])
y[ijslice] = scipy_rankdata(arr[ijslice].astype('float'))
return y
def nanrankdata(arr, axis=None):
"Slow nanrankdata function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
if axis is None:
arr = arr.ravel()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = np.empty(arr.shape)
y.fill(np.nan)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in np.ndindex(*itshape):
ijslice = list(ij[:axis]) + [slice(None)] + list(ij[axis:])
x1d = arr[ijslice].astype(float)
mask1d = ~np.isnan(x1d)
x1d[mask1d] = scipy_rankdata(x1d[mask1d])
y[ijslice] = x1d
return y
def ss(arr, axis=0):
"Slow sum of squares used for unaccelerated ndim/dtype combinations."
return scipy_ss(arr, axis)
def nn(arr, arr0, axis=1):
"Slow nearest neighbor used for unaccelerated ndim/dtype combinations."
arr = np.array(arr, copy=False)
arr0 = np.array(arr0, copy=False)
if arr.ndim != 2:
raise ValueError("`arr` must be 2d")
if arr0.ndim != 1:
raise ValueError("`arr0` must be 1d")
if axis == 1:
d = (arr - arr0) ** 2
elif axis == 0:
d = (arr - arr0.reshape(-1,1)) ** 2
else:
raise ValueError("`axis` must be 0 or 1.")
d = d.sum(axis)
idx = np.argmin(d)
return np.sqrt(d[idx]), idx
def partsort(arr, n, axis=-1):
"Slow partial sort used for unaccelerated ndim/dtype combinations."
return np.sort(arr, axis)
def argpartsort(arr, n, axis=-1):
"Slow partial argsort used for unaccelerated ndim/dtype combinations."
return np.argsort(arr, axis)
def replace(arr, old, new):
"Slow replace (inplace) used for unaccelerated ndim/dtype combinations."
if type(arr) is not np.ndarray:
raise TypeError("`arr` must be a numpy array.")
if not issubclass(arr.dtype.type, np.inexact):
if old != old:
# int arrays do not contain NaN
return
if int(old) != old:
raise ValueError("Cannot safely cast `old` to int.")
if int(new) != new:
raise ValueError("Cannot safely cast `new` to int.")
if old != old:
mask = np.isnan(arr)
else:
mask = arr == old
np.putmask(arr, mask, new)
def anynan(arr, axis=None):
"Slow check for Nans used for unaccelerated ndim/dtype combinations."
return np.isnan(arr).any(axis)
def allnan(arr, axis=None):
"Slow check for all Nans used for unaccelerated ndim/dtype combinations."
return np.isnan(arr).all(axis)
def nanequal(arr1, arr2, axis=None):
"Slow check for equality that ignores NaNs"
if axis is None:
nans = np.isnan(arr1) | np.isnan(arr2)
return np.array_equal(arr1[~nans], arr2[~nans])
if arr1.size == 0:
if axis < 0:
axis += arr1.ndim
return np.ones(arr1.shape[:axis]+arr1.shape[axis+1:], np.bool)
if arr1.size == 1:
return arr1 == arr2 or arr1 != arr1 or arr2 != arr2
return np.apply_along_axis(lambda x:nanequal(x["f0"], x["f1"]), axis,
np.core.records.fromarrays([arr1, arr2]))
def bincount(arr, max_val, weights=None, mask=None):
"Slow bincount"
if arr.ndim == 1:
out = np.zeros((max_val+1, ), float)
nans = 0.0
if mask is None or mask[0]:
for i, ai in enumerate(arr):
if ai != ai:
nans += 1 if weights is None else weights[i]
continue
ain = int(ai+0.1)
if abs(ain - ai) > 1e-6:
raise ValueError("%f is not integer" % ain)
if ain < 0:
raise ValueError("negative value in bincount")
if ain > max_val:
raise ValueError("value %i is greater than max_val (%i)" %
(ain, max_val))
out[ain] += 1 if weights is None else weights[i]
elif arr.ndim == 2:
out = np.zeros((arr.shape[1], max_val+1), float)
nans = np.zeros((arr.shape[1], ), float)
if sp.issparse(arr):
indptr, indices, data = arr.indptr, arr.indices, arr.data
for ri in range(arr.shape[0]):
wt = 1 if weights is None else weights[ri]
for i in range(indptr[ri], indptr[ri + 1]):
ci = indices[i]
if mask is None or mask[ci]:
out[ci, data[i]] += wt
else:
for i in range(arr.shape[1]):
if mask is None or mask[i]:
out[i, :], nans[i] = bincount(arr[:, i], max_val, weights)
else:
raise ValueError("bincount expects 1- or 2-dimensional array")
return out, nans
def contingency(arr, b, max_val, max_val2, weights=None, mask=None):
raise NotImplemented("bottlechest.slow.contingency is not implemented yet")
def valuecount(arr):
"slow valuecount"
if arr.ndim != 2 or arr.shape[0] != 2:
raise ValueError("valuecount expects an array with shape (2, N)")
N = arr.shape[1]
dst = 0
for src in range(1, N):
if math.isnan(arr[0, src]):
break
if arr[0, src] == arr[0, dst]:
arr[1, dst] += arr[1, src]
else:
dst += 1
arr[:, dst] = arr[:, src]
return arr[:, :dst + 1]
def countnans(arr, weights=None, axis=None):
if axis is None:
if weights is None:
return np.sum(np.isnan(arr))
else:
return np.sum(np.isnan(arr)*weights)
else:
if weights is not None:
if arr.shape[axis] != len(weights):
raise ValueError("shape of weights does not match the data")
return np.apply_along_axis(lambda a: np.sum(np.isnan(a)*weights),
axis, arr)
else:
return np.sum(np.isnan(arr), axis=axis)
def stats_object(arr, weights=None):
pinf, minf = float("inf"), float("-inf")
if arr.ndim == 1:
if weights is None:
nones = sum(np.equal(arr, None))
return pinf, minf, 0, 0, nones, len(arr) - nones
else:
nones = sum(np.equal(arr, None) * weights)
return pinf, minf, 0, 0, nones, sum(weights) - nones
if sp.issparse(arr) and weights is not None:
raise NotImplementedError("counting of missing values for"
"weighted arrays of type 'object' is not implemented")
y = np.zeros((arr.shape[1], 6), float)
y[:, 0] = pinf
y[:, 1] = minf
if sp.issparse(arr):
y[:, 4] = np.bincount(arr.indices, minlength=arr.shape[1])
elif weights is None:
y[:, 4] = np.sum(np.equal(arr, None), axis=0)
else:
y[:, 4] = np.sum(np.equal(arr, None) * weights[:, np.newaxis], axis=0)
y[:, 5] = arr.shape[0] - y[:, 4]
return y
def stats(arr, weights=None, compute_variance=False):
if not 1 <= arr.ndim <= 2:
raise ValueError("bottlechest.stats handles only 1-d and 2-d arrays")
if arr.dtype == object:
# can't compute min and max, but we can count 'nans'
return stats_object(arr, weights)
if arr.ndim == 1:
a_min, a_max = np.nanmin(arr), np.nanmax(arr)
if weights is None:
nans = np.sum(np.isnan(arr))
non_nans = len(arr) - nans
mean = np.nansum(arr) / non_nans
var = np.nansum((arr - mean) ** 2) / (non_nans - 1)
else:
tot_w = np.sum(weights)
nans = np.sum(np.isnan(arr) * weights)
non_nans = tot_w - nans
mean = np.nansum(arr * weights) / non_nans
var = np.nansum(weights * (arr - mean) ** 2)
tot_w2 = np.sum((1 - np.isnan(arr)) * weights ** 2)
d = non_nans ** 2 - tot_w2
if d > 1e-6:
|
return a_min, a_max, mean, var, nans, non_nans
if sp.issparse(arr):
arr = arr.todense()
y = np.zeros((arr.shape[1], 6), dtype=float)
y[:, 0] = nanmin(arr, 0)
y[:, 1] = nanmax(arr, 0)
if weights:
tot_w = np.sum(weights)
y[:, 4] = countnans(arr, weights, 0)
y[:, 5] = tot_w - y[:, 4]
y[:, 2] = nanmean(arr * weights, 0) / y[:, 4]
y[:, 3] = nansum(weights * (arr - y[:, 2]) ** 2, 0)
tot_w2 = np.sum((1 - np.isnan(arr)) * weights ** 2)
d = y[:, 5] ** 2 - tot_w2
if d > 1e-6:
y[:, 3] *= y[:, 5] / d
else:
y[:, 4] = countnans(arr, axis=0)
y[:, 5] = arr.shape[0] - y[:, 4]
y[:, 2] = nanmean(arr, 0) / y[:, 4]
y[:, 3] = nansum((arr - y[:, 2]) ** 2, 0) / (y[:, 4] - 1)
y[:, 2][np.isinf(y[:, 2])] = 0
return y
# ---------------------------------------------------------------------------
#
# SciPy
#
# Local copy of scipy.stats functions to avoid (by popular demand) a SciPy
# dependency. The SciPy license is included in the Bottleneck license file,
# which is distributed with Bottleneck.
#
# Code taken from scipy trunk on Dec 16, 2010.
# nanmedian taken from scipy trunk on Dec 17, 2010.
# rankdata taken from scipy HEAD on Mar 16, 2011.
def scipy_nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int, optional
Axis along which the mean is computed. Default is 0, i.e. the
first axis.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x,axis)
x = x.copy()
Norig = x.shape[axis]
factor = 1.0-np.sum(np.isnan(x),axis)*1.0/Norig
x[np.isnan(x)] = 0
return np.mean(x,axis)/factor
def scipy_nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x,axis)
x = x.copy()
Norig = x.shape[axis]
Nnan = np.sum(np.isnan(x),axis)*1.0
n = Norig - Nnan
x[np.isnan(x)] = 0.
m1 = np.sum(x,axis)/n
if axis:
d = (x - np.expand_dims(m1, axis))**2.0
else:
d = (x - m1)**2.0
m2 = np.sum(d,axis)-(m1*m1)*Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
cond = 1-np.isnan(arr1d)
x = np.sort(np.compress(cond,arr1d,axis=-1))
if x.size == 0:
return np.nan
return np.median(x)
# Feb 2011: patched nanmedian to handle nanmedian(a, 1) with a = np.ones((2,0))
def scipy_nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int, optional
Axis along which the median is computed. Default is 0, i.e. the
first axis.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
shape = list(x.shape)
shape.pop(axis)
if 0 in shape:
x = np.empty(shape)
else:
x = x.copy()
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
return a, outaxis
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def scipy_rankdata(a):
"""
Ranks the data, dealing with ties appropriately.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that set.
Ranks begin at 1, not 0.
Parameters
----------
a : array_like
This array is first flattened.
Returns
-------
rankdata : ndarray
An array of length equal to the size of `a`, containing rank scores.
Examples
--------
>>> stats.rankdata([0, 2, 2, 3])
array([ 1. , 2.5, 2.5, 4. ])
"""
a = np.ravel(a)
n = len(a)
svec, ivec = fastsort(a)
sumranks = 0
dupcount = 0
newarray = np.zeros(n, float)
for i in range(n):
sumranks += i
dupcount += 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def scipy_ss(a, axis=0):
"""
Squares each element of the input array, and returns the square(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
The axis along which to calculate. If None, use whole array.
Default is 0, i.e. along the first axis.
Returns
-------
ss : ndarray
The sum along the given axis for (a**2).
See also
--------
square_of_sums : The square(s) of the sum(s) (the opposite of `ss`).
Examples
--------
>>> from scipy import stats
>>> a = np.array([1., 2., 5.])
>>> stats.ss(a)
30.0
And calculating along an axis:
>>> b = np.array([[1., 2., 5.], [2., 5., 6.]])
>>> stats.ss(b, axis=1)
array([ 30., 65.])
"""
a, axis = _chk_asarray(a, axis)
if 'int' in str(a.dtype):
a = a.astype('int64')
return np.sum(a*a, axis)
| var *= non_nans / d | conditional_block |
func.py | import math
import warnings
import numpy as np
import scipy.sparse as sp
__all__ = ['median', 'nanmedian', 'nansum', 'nanmean', 'nanvar', 'nanstd',
'nanmin', 'nanmax', 'nanargmin', 'nanargmax', 'rankdata',
'nanrankdata', 'ss', 'nn', 'partsort', 'argpartsort', 'replace',
'anynan', 'allnan',
'bincount', 'valuecount', 'countnans', 'stats',
'contingency', 'nanequal']
def median(arr, axis=None):
"Slow median function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = np.median(arr, axis=axis)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def nansum(arr, axis=None):
"Slow nansum function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = np.nansum(arr, axis=axis)
if not hasattr(y, "dtype"):
y = arr.dtype.type(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def nanmedian(arr, axis=None):
"Slow nanmedian function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = scipy_nanmedian(arr, axis=axis)
if not hasattr(y, "dtype"):
if issubclass(arr.dtype.type, np.inexact):
y = arr.dtype.type(y)
else:
y = np.float64(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
if (y.size == 1) and (y.ndim == 0):
y = y[()]
return y
def nanmean(arr, axis=None):
"Slow nanmean function used for unaccelerated ndim/dtype combinations."
return np.nanmean(arr, axis=axis)
def nanvar(arr, axis=None, ddof=0):
"Slow nanvar function used for unaccelerated ndim/dtype combinations."
return np.nanvar(arr, axis=axis, ddof=ddof)
def nanstd(arr, axis=None, ddof=0):
"Slow nanstd function used for unaccelerated ndim/dtype combinations."
return np.nanstd(arr, axis=axis, ddof=ddof)
def nanmin(arr, axis=None):
"Slow nanmin function used for unaccelerated ndim/dtype combinations."
y = np.nanmin(arr, axis=axis)
if not hasattr(y, "dtype"):
# Numpy 1.5.1 doesn't return object with dtype when input is all NaN
y = arr.dtype.type(y)
return y
def nanmax(arr, axis=None):
"Slow nanmax function used for unaccelerated ndim/dtype combinations."
y = np.nanmax(arr, axis=axis)
if not hasattr(y, "dtype"):
# Numpy 1.5.1 doesn't return object with dtype when input is all NaN
y = arr.dtype.type(y)
return y
def nanargmin(arr, axis=None):
"Slow nanargmin function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmin(arr, axis=axis)
def nanargmax(arr, axis=None):
"Slow nanargmax function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmax(arr, axis=axis)
def rankdata(arr, axis=None):
"Slow rankdata function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
if axis is None:
arr = arr.ravel()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = np.empty(arr.shape)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in np.ndindex(*itshape):
ijslice = list(ij[:axis]) + [slice(None)] + list(ij[axis:])
y[ijslice] = scipy_rankdata(arr[ijslice].astype('float'))
return y
def nanrankdata(arr, axis=None):
"Slow nanrankdata function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
if axis is None:
arr = arr.ravel()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = np.empty(arr.shape)
y.fill(np.nan)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in np.ndindex(*itshape):
ijslice = list(ij[:axis]) + [slice(None)] + list(ij[axis:])
x1d = arr[ijslice].astype(float)
mask1d = ~np.isnan(x1d)
x1d[mask1d] = scipy_rankdata(x1d[mask1d])
y[ijslice] = x1d
return y
def ss(arr, axis=0):
"Slow sum of squares used for unaccelerated ndim/dtype combinations."
return scipy_ss(arr, axis)
def nn(arr, arr0, axis=1):
"Slow nearest neighbor used for unaccelerated ndim/dtype combinations."
arr = np.array(arr, copy=False)
arr0 = np.array(arr0, copy=False)
if arr.ndim != 2:
raise ValueError("`arr` must be 2d")
if arr0.ndim != 1:
raise ValueError("`arr0` must be 1d")
if axis == 1:
d = (arr - arr0) ** 2
elif axis == 0:
d = (arr - arr0.reshape(-1,1)) ** 2
else:
raise ValueError("`axis` must be 0 or 1.")
d = d.sum(axis)
idx = np.argmin(d)
return np.sqrt(d[idx]), idx
def partsort(arr, n, axis=-1):
"Slow partial sort used for unaccelerated ndim/dtype combinations."
return np.sort(arr, axis)
def argpartsort(arr, n, axis=-1):
"Slow partial argsort used for unaccelerated ndim/dtype combinations."
return np.argsort(arr, axis)
def replace(arr, old, new):
"Slow replace (inplace) used for unaccelerated ndim/dtype combinations."
if type(arr) is not np.ndarray:
raise TypeError("`arr` must be a numpy array.")
if not issubclass(arr.dtype.type, np.inexact):
if old != old:
# int arrays do not contain NaN
return
if int(old) != old:
raise ValueError("Cannot safely cast `old` to int.")
if int(new) != new:
raise ValueError("Cannot safely cast `new` to int.")
if old != old:
mask = np.isnan(arr)
else:
mask = arr == old
np.putmask(arr, mask, new)
def anynan(arr, axis=None):
"Slow check for Nans used for unaccelerated ndim/dtype combinations."
return np.isnan(arr).any(axis)
def allnan(arr, axis=None):
"Slow check for all Nans used for unaccelerated ndim/dtype combinations."
return np.isnan(arr).all(axis)
def nanequal(arr1, arr2, axis=None):
"Slow check for equality that ignores NaNs"
if axis is None:
nans = np.isnan(arr1) | np.isnan(arr2)
return np.array_equal(arr1[~nans], arr2[~nans])
if arr1.size == 0:
if axis < 0:
axis += arr1.ndim
return np.ones(arr1.shape[:axis]+arr1.shape[axis+1:], np.bool)
if arr1.size == 1:
return arr1 == arr2 or arr1 != arr1 or arr2 != arr2
return np.apply_along_axis(lambda x:nanequal(x["f0"], x["f1"]), axis,
np.core.records.fromarrays([arr1, arr2]))
def bincount(arr, max_val, weights=None, mask=None):
"Slow bincount"
if arr.ndim == 1:
out = np.zeros((max_val+1, ), float)
nans = 0.0
if mask is None or mask[0]:
for i, ai in enumerate(arr):
if ai != ai:
nans += 1 if weights is None else weights[i]
continue
ain = int(ai+0.1)
if abs(ain - ai) > 1e-6:
raise ValueError("%f is not integer" % ain)
if ain < 0:
raise ValueError("negative value in bincount")
if ain > max_val:
raise ValueError("value %i is greater than max_val (%i)" %
(ain, max_val))
out[ain] += 1 if weights is None else weights[i]
elif arr.ndim == 2:
out = np.zeros((arr.shape[1], max_val+1), float)
nans = np.zeros((arr.shape[1], ), float)
if sp.issparse(arr):
indptr, indices, data = arr.indptr, arr.indices, arr.data
for ri in range(arr.shape[0]):
wt = 1 if weights is None else weights[ri]
for i in range(indptr[ri], indptr[ri + 1]):
ci = indices[i]
if mask is None or mask[ci]:
out[ci, data[i]] += wt
else:
for i in range(arr.shape[1]):
if mask is None or mask[i]:
out[i, :], nans[i] = bincount(arr[:, i], max_val, weights)
else:
raise ValueError("bincount expects 1- or 2-dimensional array")
return out, nans
def contingency(arr, b, max_val, max_val2, weights=None, mask=None):
raise NotImplemented("bottlechest.slow.contingency is not implemented yet")
def valuecount(arr):
"slow valuecount"
if arr.ndim != 2 or arr.shape[0] != 2:
raise ValueError("valuecount expects an array with shape (2, N)")
N = arr.shape[1]
dst = 0
for src in range(1, N):
if math.isnan(arr[0, src]):
break
if arr[0, src] == arr[0, dst]:
arr[1, dst] += arr[1, src]
else:
dst += 1
arr[:, dst] = arr[:, src]
return arr[:, :dst + 1]
def countnans(arr, weights=None, axis=None):
if axis is None:
if weights is None:
return np.sum(np.isnan(arr))
else:
return np.sum(np.isnan(arr)*weights)
else:
if weights is not None:
if arr.shape[axis] != len(weights):
raise ValueError("shape of weights does not match the data")
return np.apply_along_axis(lambda a: np.sum(np.isnan(a)*weights),
axis, arr)
else:
return np.sum(np.isnan(arr), axis=axis)
def stats_object(arr, weights=None):
pinf, minf = float("inf"), float("-inf")
if arr.ndim == 1:
if weights is None:
nones = sum(np.equal(arr, None))
return pinf, minf, 0, 0, nones, len(arr) - nones
else:
nones = sum(np.equal(arr, None) * weights)
return pinf, minf, 0, 0, nones, sum(weights) - nones
if sp.issparse(arr) and weights is not None:
raise NotImplementedError("counting of missing values for"
"weighted arrays of type 'object' is not implemented")
y = np.zeros((arr.shape[1], 6), float)
y[:, 0] = pinf
y[:, 1] = minf
if sp.issparse(arr):
y[:, 4] = np.bincount(arr.indices, minlength=arr.shape[1])
elif weights is None:
y[:, 4] = np.sum(np.equal(arr, None), axis=0)
else:
y[:, 4] = np.sum(np.equal(arr, None) * weights[:, np.newaxis], axis=0)
y[:, 5] = arr.shape[0] - y[:, 4]
return y
def stats(arr, weights=None, compute_variance=False):
if not 1 <= arr.ndim <= 2:
raise ValueError("bottlechest.stats handles only 1-d and 2-d arrays")
if arr.dtype == object:
# can't compute min and max, but we can count 'nans'
return stats_object(arr, weights)
if arr.ndim == 1:
a_min, a_max = np.nanmin(arr), np.nanmax(arr)
if weights is None:
nans = np.sum(np.isnan(arr))
non_nans = len(arr) - nans
mean = np.nansum(arr) / non_nans
var = np.nansum((arr - mean) ** 2) / (non_nans - 1)
else:
tot_w = np.sum(weights)
nans = np.sum(np.isnan(arr) * weights)
non_nans = tot_w - nans
mean = np.nansum(arr * weights) / non_nans
var = np.nansum(weights * (arr - mean) ** 2)
tot_w2 = np.sum((1 - np.isnan(arr)) * weights ** 2)
d = non_nans ** 2 - tot_w2
if d > 1e-6:
var *= non_nans / d
return a_min, a_max, mean, var, nans, non_nans
if sp.issparse(arr):
arr = arr.todense()
y = np.zeros((arr.shape[1], 6), dtype=float)
y[:, 0] = nanmin(arr, 0)
y[:, 1] = nanmax(arr, 0)
if weights:
tot_w = np.sum(weights)
y[:, 4] = countnans(arr, weights, 0)
y[:, 5] = tot_w - y[:, 4]
y[:, 2] = nanmean(arr * weights, 0) / y[:, 4]
y[:, 3] = nansum(weights * (arr - y[:, 2]) ** 2, 0)
tot_w2 = np.sum((1 - np.isnan(arr)) * weights ** 2)
d = y[:, 5] ** 2 - tot_w2
if d > 1e-6:
y[:, 3] *= y[:, 5] / d
else:
y[:, 4] = countnans(arr, axis=0)
y[:, 5] = arr.shape[0] - y[:, 4]
y[:, 2] = nanmean(arr, 0) / y[:, 4]
y[:, 3] = nansum((arr - y[:, 2]) ** 2, 0) / (y[:, 4] - 1)
y[:, 2][np.isinf(y[:, 2])] = 0
return y
# ---------------------------------------------------------------------------
#
# SciPy
#
# Local copy of scipy.stats functions to avoid (by popular demand) a SciPy
# dependency. The SciPy license is included in the Bottleneck license file,
# which is distributed with Bottleneck.
#
# Code taken from scipy trunk on Dec 16, 2010.
# nanmedian taken from scipy trunk on Dec 17, 2010.
# rankdata taken from scipy HEAD on Mar 16, 2011.
def scipy_nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int, optional
Axis along which the mean is computed. Default is 0, i.e. the
first axis.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x,axis)
x = x.copy()
Norig = x.shape[axis]
factor = 1.0-np.sum(np.isnan(x),axis)*1.0/Norig
x[np.isnan(x)] = 0
return np.mean(x,axis)/factor
def scipy_nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x,axis)
x = x.copy()
Norig = x.shape[axis]
Nnan = np.sum(np.isnan(x),axis)*1.0
n = Norig - Nnan
x[np.isnan(x)] = 0.
m1 = np.sum(x,axis)/n
if axis:
d = (x - np.expand_dims(m1, axis))**2.0
else:
d = (x - m1)**2.0
m2 = np.sum(d,axis)-(m1*m1)*Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
cond = 1-np.isnan(arr1d)
x = np.sort(np.compress(cond,arr1d,axis=-1))
if x.size == 0:
return np.nan
return np.median(x)
# Feb 2011: patched nanmedian to handle nanmedian(a, 1) with a = np.ones((2,0))
def scipy_nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int, optional
Axis along which the median is computed. Default is 0, i.e. the
first axis.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
shape = list(x.shape)
shape.pop(axis)
if 0 in shape:
x = np.empty(shape)
else:
x = x.copy()
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
def | (a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
return a, outaxis
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def scipy_rankdata(a):
"""
Ranks the data, dealing with ties appropriately.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that set.
Ranks begin at 1, not 0.
Parameters
----------
a : array_like
This array is first flattened.
Returns
-------
rankdata : ndarray
An array of length equal to the size of `a`, containing rank scores.
Examples
--------
>>> stats.rankdata([0, 2, 2, 3])
array([ 1. , 2.5, 2.5, 4. ])
"""
a = np.ravel(a)
n = len(a)
svec, ivec = fastsort(a)
sumranks = 0
dupcount = 0
newarray = np.zeros(n, float)
for i in range(n):
sumranks += i
dupcount += 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def scipy_ss(a, axis=0):
"""
Squares each element of the input array, and returns the square(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
The axis along which to calculate. If None, use whole array.
Default is 0, i.e. along the first axis.
Returns
-------
ss : ndarray
The sum along the given axis for (a**2).
See also
--------
square_of_sums : The square(s) of the sum(s) (the opposite of `ss`).
Examples
--------
>>> from scipy import stats
>>> a = np.array([1., 2., 5.])
>>> stats.ss(a)
30.0
And calculating along an axis:
>>> b = np.array([[1., 2., 5.], [2., 5., 6.]])
>>> stats.ss(b, axis=1)
array([ 30., 65.])
"""
a, axis = _chk_asarray(a, axis)
if 'int' in str(a.dtype):
a = a.astype('int64')
return np.sum(a*a, axis)
| _chk_asarray | identifier_name |
func.py | import math
import warnings
import numpy as np
import scipy.sparse as sp
__all__ = ['median', 'nanmedian', 'nansum', 'nanmean', 'nanvar', 'nanstd',
'nanmin', 'nanmax', 'nanargmin', 'nanargmax', 'rankdata',
'nanrankdata', 'ss', 'nn', 'partsort', 'argpartsort', 'replace',
'anynan', 'allnan',
'bincount', 'valuecount', 'countnans', 'stats',
'contingency', 'nanequal']
def median(arr, axis=None):
"Slow median function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = np.median(arr, axis=axis)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def nansum(arr, axis=None):
"Slow nansum function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = np.nansum(arr, axis=axis)
if not hasattr(y, "dtype"):
y = arr.dtype.type(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def nanmedian(arr, axis=None):
"Slow nanmedian function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = scipy_nanmedian(arr, axis=axis)
if not hasattr(y, "dtype"):
if issubclass(arr.dtype.type, np.inexact):
y = arr.dtype.type(y)
else:
y = np.float64(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
if (y.size == 1) and (y.ndim == 0):
y = y[()]
return y
def nanmean(arr, axis=None):
"Slow nanmean function used for unaccelerated ndim/dtype combinations."
return np.nanmean(arr, axis=axis)
def nanvar(arr, axis=None, ddof=0):
"Slow nanvar function used for unaccelerated ndim/dtype combinations."
return np.nanvar(arr, axis=axis, ddof=ddof)
def nanstd(arr, axis=None, ddof=0):
"Slow nanstd function used for unaccelerated ndim/dtype combinations."
return np.nanstd(arr, axis=axis, ddof=ddof)
def nanmin(arr, axis=None):
"Slow nanmin function used for unaccelerated ndim/dtype combinations."
y = np.nanmin(arr, axis=axis)
if not hasattr(y, "dtype"):
# Numpy 1.5.1 doesn't return object with dtype when input is all NaN
y = arr.dtype.type(y)
return y
def nanmax(arr, axis=None):
"Slow nanmax function used for unaccelerated ndim/dtype combinations."
y = np.nanmax(arr, axis=axis)
if not hasattr(y, "dtype"):
# Numpy 1.5.1 doesn't return object with dtype when input is all NaN
y = arr.dtype.type(y)
return y
def nanargmin(arr, axis=None):
"Slow nanargmin function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmin(arr, axis=axis)
def nanargmax(arr, axis=None):
"Slow nanargmax function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmax(arr, axis=axis)
def rankdata(arr, axis=None):
"Slow rankdata function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
if axis is None:
arr = arr.ravel()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = np.empty(arr.shape)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in np.ndindex(*itshape):
ijslice = list(ij[:axis]) + [slice(None)] + list(ij[axis:])
y[ijslice] = scipy_rankdata(arr[ijslice].astype('float'))
return y
def nanrankdata(arr, axis=None):
"Slow nanrankdata function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
if axis is None:
arr = arr.ravel()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = np.empty(arr.shape)
y.fill(np.nan)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in np.ndindex(*itshape):
ijslice = list(ij[:axis]) + [slice(None)] + list(ij[axis:])
x1d = arr[ijslice].astype(float)
mask1d = ~np.isnan(x1d)
x1d[mask1d] = scipy_rankdata(x1d[mask1d])
y[ijslice] = x1d
return y
def ss(arr, axis=0):
"Slow sum of squares used for unaccelerated ndim/dtype combinations."
return scipy_ss(arr, axis)
def nn(arr, arr0, axis=1):
"Slow nearest neighbor used for unaccelerated ndim/dtype combinations."
arr = np.array(arr, copy=False)
arr0 = np.array(arr0, copy=False)
if arr.ndim != 2:
raise ValueError("`arr` must be 2d")
if arr0.ndim != 1:
raise ValueError("`arr0` must be 1d")
if axis == 1:
d = (arr - arr0) ** 2
elif axis == 0:
d = (arr - arr0.reshape(-1,1)) ** 2
else:
raise ValueError("`axis` must be 0 or 1.") | d = d.sum(axis)
idx = np.argmin(d)
return np.sqrt(d[idx]), idx
def partsort(arr, n, axis=-1):
"Slow partial sort used for unaccelerated ndim/dtype combinations."
return np.sort(arr, axis)
def argpartsort(arr, n, axis=-1):
"Slow partial argsort used for unaccelerated ndim/dtype combinations."
return np.argsort(arr, axis)
def replace(arr, old, new):
"Slow replace (inplace) used for unaccelerated ndim/dtype combinations."
if type(arr) is not np.ndarray:
raise TypeError("`arr` must be a numpy array.")
if not issubclass(arr.dtype.type, np.inexact):
if old != old:
# int arrays do not contain NaN
return
if int(old) != old:
raise ValueError("Cannot safely cast `old` to int.")
if int(new) != new:
raise ValueError("Cannot safely cast `new` to int.")
if old != old:
mask = np.isnan(arr)
else:
mask = arr == old
np.putmask(arr, mask, new)
def anynan(arr, axis=None):
"Slow check for Nans used for unaccelerated ndim/dtype combinations."
return np.isnan(arr).any(axis)
def allnan(arr, axis=None):
"Slow check for all Nans used for unaccelerated ndim/dtype combinations."
return np.isnan(arr).all(axis)
def nanequal(arr1, arr2, axis=None):
"Slow check for equality that ignores NaNs"
if axis is None:
nans = np.isnan(arr1) | np.isnan(arr2)
return np.array_equal(arr1[~nans], arr2[~nans])
if arr1.size == 0:
if axis < 0:
axis += arr1.ndim
return np.ones(arr1.shape[:axis]+arr1.shape[axis+1:], np.bool)
if arr1.size == 1:
return arr1 == arr2 or arr1 != arr1 or arr2 != arr2
return np.apply_along_axis(lambda x:nanequal(x["f0"], x["f1"]), axis,
np.core.records.fromarrays([arr1, arr2]))
def bincount(arr, max_val, weights=None, mask=None):
"Slow bincount"
if arr.ndim == 1:
out = np.zeros((max_val+1, ), float)
nans = 0.0
if mask is None or mask[0]:
for i, ai in enumerate(arr):
if ai != ai:
nans += 1 if weights is None else weights[i]
continue
ain = int(ai+0.1)
if abs(ain - ai) > 1e-6:
raise ValueError("%f is not integer" % ain)
if ain < 0:
raise ValueError("negative value in bincount")
if ain > max_val:
raise ValueError("value %i is greater than max_val (%i)" %
(ain, max_val))
out[ain] += 1 if weights is None else weights[i]
elif arr.ndim == 2:
out = np.zeros((arr.shape[1], max_val+1), float)
nans = np.zeros((arr.shape[1], ), float)
if sp.issparse(arr):
indptr, indices, data = arr.indptr, arr.indices, arr.data
for ri in range(arr.shape[0]):
wt = 1 if weights is None else weights[ri]
for i in range(indptr[ri], indptr[ri + 1]):
ci = indices[i]
if mask is None or mask[ci]:
out[ci, data[i]] += wt
else:
for i in range(arr.shape[1]):
if mask is None or mask[i]:
out[i, :], nans[i] = bincount(arr[:, i], max_val, weights)
else:
raise ValueError("bincount expects 1- or 2-dimensional array")
return out, nans
def contingency(arr, b, max_val, max_val2, weights=None, mask=None):
raise NotImplemented("bottlechest.slow.contingency is not implemented yet")
def valuecount(arr):
"slow valuecount"
if arr.ndim != 2 or arr.shape[0] != 2:
raise ValueError("valuecount expects an array with shape (2, N)")
N = arr.shape[1]
dst = 0
for src in range(1, N):
if math.isnan(arr[0, src]):
break
if arr[0, src] == arr[0, dst]:
arr[1, dst] += arr[1, src]
else:
dst += 1
arr[:, dst] = arr[:, src]
return arr[:, :dst + 1]
def countnans(arr, weights=None, axis=None):
if axis is None:
if weights is None:
return np.sum(np.isnan(arr))
else:
return np.sum(np.isnan(arr)*weights)
else:
if weights is not None:
if arr.shape[axis] != len(weights):
raise ValueError("shape of weights does not match the data")
return np.apply_along_axis(lambda a: np.sum(np.isnan(a)*weights),
axis, arr)
else:
return np.sum(np.isnan(arr), axis=axis)
def stats_object(arr, weights=None):
pinf, minf = float("inf"), float("-inf")
if arr.ndim == 1:
if weights is None:
nones = sum(np.equal(arr, None))
return pinf, minf, 0, 0, nones, len(arr) - nones
else:
nones = sum(np.equal(arr, None) * weights)
return pinf, minf, 0, 0, nones, sum(weights) - nones
if sp.issparse(arr) and weights is not None:
raise NotImplementedError("counting of missing values for"
"weighted arrays of type 'object' is not implemented")
y = np.zeros((arr.shape[1], 6), float)
y[:, 0] = pinf
y[:, 1] = minf
if sp.issparse(arr):
y[:, 4] = np.bincount(arr.indices, minlength=arr.shape[1])
elif weights is None:
y[:, 4] = np.sum(np.equal(arr, None), axis=0)
else:
y[:, 4] = np.sum(np.equal(arr, None) * weights[:, np.newaxis], axis=0)
y[:, 5] = arr.shape[0] - y[:, 4]
return y
def stats(arr, weights=None, compute_variance=False):
if not 1 <= arr.ndim <= 2:
raise ValueError("bottlechest.stats handles only 1-d and 2-d arrays")
if arr.dtype == object:
# can't compute min and max, but we can count 'nans'
return stats_object(arr, weights)
if arr.ndim == 1:
a_min, a_max = np.nanmin(arr), np.nanmax(arr)
if weights is None:
nans = np.sum(np.isnan(arr))
non_nans = len(arr) - nans
mean = np.nansum(arr) / non_nans
var = np.nansum((arr - mean) ** 2) / (non_nans - 1)
else:
tot_w = np.sum(weights)
nans = np.sum(np.isnan(arr) * weights)
non_nans = tot_w - nans
mean = np.nansum(arr * weights) / non_nans
var = np.nansum(weights * (arr - mean) ** 2)
tot_w2 = np.sum((1 - np.isnan(arr)) * weights ** 2)
d = non_nans ** 2 - tot_w2
if d > 1e-6:
var *= non_nans / d
return a_min, a_max, mean, var, nans, non_nans
if sp.issparse(arr):
arr = arr.todense()
y = np.zeros((arr.shape[1], 6), dtype=float)
y[:, 0] = nanmin(arr, 0)
y[:, 1] = nanmax(arr, 0)
if weights:
tot_w = np.sum(weights)
y[:, 4] = countnans(arr, weights, 0)
y[:, 5] = tot_w - y[:, 4]
y[:, 2] = nanmean(arr * weights, 0) / y[:, 4]
y[:, 3] = nansum(weights * (arr - y[:, 2]) ** 2, 0)
tot_w2 = np.sum((1 - np.isnan(arr)) * weights ** 2)
d = y[:, 5] ** 2 - tot_w2
if d > 1e-6:
y[:, 3] *= y[:, 5] / d
else:
y[:, 4] = countnans(arr, axis=0)
y[:, 5] = arr.shape[0] - y[:, 4]
y[:, 2] = nanmean(arr, 0) / y[:, 4]
y[:, 3] = nansum((arr - y[:, 2]) ** 2, 0) / (y[:, 4] - 1)
y[:, 2][np.isinf(y[:, 2])] = 0
return y
# ---------------------------------------------------------------------------
#
# SciPy
#
# Local copy of scipy.stats functions to avoid (by popular demand) a SciPy
# dependency. The SciPy license is included in the Bottleneck license file,
# which is distributed with Bottleneck.
#
# Code taken from scipy trunk on Dec 16, 2010.
# nanmedian taken from scipy trunk on Dec 17, 2010.
# rankdata taken from scipy HEAD on Mar 16, 2011.
def scipy_nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int, optional
Axis along which the mean is computed. Default is 0, i.e. the
first axis.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x,axis)
x = x.copy()
Norig = x.shape[axis]
factor = 1.0-np.sum(np.isnan(x),axis)*1.0/Norig
x[np.isnan(x)] = 0
return np.mean(x,axis)/factor
def scipy_nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x,axis)
x = x.copy()
Norig = x.shape[axis]
Nnan = np.sum(np.isnan(x),axis)*1.0
n = Norig - Nnan
x[np.isnan(x)] = 0.
m1 = np.sum(x,axis)/n
if axis:
d = (x - np.expand_dims(m1, axis))**2.0
else:
d = (x - m1)**2.0
m2 = np.sum(d,axis)-(m1*m1)*Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
cond = 1-np.isnan(arr1d)
x = np.sort(np.compress(cond,arr1d,axis=-1))
if x.size == 0:
return np.nan
return np.median(x)
# Feb 2011: patched nanmedian to handle nanmedian(a, 1) with a = np.ones((2,0))
def scipy_nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int, optional
Axis along which the median is computed. Default is 0, i.e. the
first axis.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
shape = list(x.shape)
shape.pop(axis)
if 0 in shape:
x = np.empty(shape)
else:
x = x.copy()
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
return a, outaxis
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def scipy_rankdata(a):
"""
Ranks the data, dealing with ties appropriately.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that set.
Ranks begin at 1, not 0.
Parameters
----------
a : array_like
This array is first flattened.
Returns
-------
rankdata : ndarray
An array of length equal to the size of `a`, containing rank scores.
Examples
--------
>>> stats.rankdata([0, 2, 2, 3])
array([ 1. , 2.5, 2.5, 4. ])
"""
a = np.ravel(a)
n = len(a)
svec, ivec = fastsort(a)
sumranks = 0
dupcount = 0
newarray = np.zeros(n, float)
for i in range(n):
sumranks += i
dupcount += 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def scipy_ss(a, axis=0):
"""
Squares each element of the input array, and returns the square(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
The axis along which to calculate. If None, use whole array.
Default is 0, i.e. along the first axis.
Returns
-------
ss : ndarray
The sum along the given axis for (a**2).
See also
--------
square_of_sums : The square(s) of the sum(s) (the opposite of `ss`).
Examples
--------
>>> from scipy import stats
>>> a = np.array([1., 2., 5.])
>>> stats.ss(a)
30.0
And calculating along an axis:
>>> b = np.array([[1., 2., 5.], [2., 5., 6.]])
>>> stats.ss(b, axis=1)
array([ 30., 65.])
"""
a, axis = _chk_asarray(a, axis)
if 'int' in str(a.dtype):
a = a.astype('int64')
return np.sum(a*a, axis) | random_line_split | |
yintai.js | window.onload=function(){
var box=getClass('bannera',document);
// console.dir(box);
var as=box[0].getElementsByTagName('a');
// console.dir(as);
var bodians=getClass('lunbodian',document);
// console.dir(bodians);
var bodiandiv=bodians[0].getElementsByTagName('div');
// console.dir(bodiandiv);
var lrclickbox=getClass("xuanzeanniu",document)[0]
var leftclick=getClass('left',box[0]);
// console.dir(leftclick);
var rightclick=getClass('right',box[0]);
// console.dir(rightclick);
// 设置第一张为默认图片 第一个轮播点为默认选中颜色
as[0].style.zIndex=10;
bodiandiv[0].style.background='#e5004f';
// 声明下标为0
var index=0;
// 调用函数
t=setInterval(move,2000);
// 封装函数
function move(){
index++;
// 判断下标如果等于图片的个数,就重新给下标赋值为零
if (index==as.length) {index=0};
// 循环遍历
for (var i = 0; i < as.length; i++) {
// 先把所有照片层级调低,轮播点的颜色为空
as[i].style.zIndex=0;
bodiandiv[i].style.background='';
};
as[index].style.zIndex=10;
bodiandiv[index].style.background='#e5004f';
}
box[0].onmouseover=function(){
clearInterval(t);
lrclickbox.style.zIndex=15;
};
box[0].onmouseout=function(){
t=setInterval(move,2000);
lrclickbox.style.zIndex=0;
};
for (var i = 0; i < bodiandiv.length; i++) {
bodiandiv[i].index=i;
bodiandiv[i].onmouseover=function(){
for (var j = 0; j < as.length; j++) {
bodiandiv[j].style.background='';
as[j].style.zIndex=0;
}
as[this.index].style.zIndex=10;
bodiandiv[this.index].style.background='#e5004f';
}
};
rightclick[0].onclick=function(){
move();
};
rightclick[0].onmouseover=function(){
rightclick[0].style.background='#cc477a';
}
rightclick[0].onmouseout=function(){
rightclick[0].style.background='';
}
leftclick[0].onmouseover=function(){
leftclick[0].style.background='#cc477a';
}
leftclick[0].onmouseout=function(){
leftclick[0].style.background='';
}
leftclick[0].onclick=function(){
index--
if (index<0) {index=as.length-1};
for (var i = 0; i < as.length; i++) {
as[i].style.zIndex=0;
bodiandiv[i].style.background=""
};
as[index].style.zIndex=10;
bodiandiv[index].style.background="#e5004f"
}
// 选项卡开始
var xxk=getClass("link",document);
var inner=xxk[0].getElementsByTagName("a");
var hongdian=xxk[0].getElementsByTagName("span");
var shows=getClass("zw3",document);
for (var i = 0; i < inner.length; i++) {
inner[i].aa=i;
shows[0].style.display='block';
hongdian[0].style.display="block";
inner[0].style.borderBottom="4px solid red";
inner[i].onmouseover=function(){
for (var j = 0; j <shows.length; j++) {
shows[j].style.display='none';
inner[j].style.borderBottom="";
hongdian[j].style.display="none";
};
shows[this.aa].style.display='block';
this.style.borderBottom="4px solid red";
hongdian[this.aa].style.display="block";
};
};
// 图片四边的动画效果
var zw4s=getClass('zw4',document);
// paomaxian(zw4s[0])
// paomaxian(zw4s[1])
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],220,263);
};
function paomaxian(zw4s,x,y){
var zuoshangs=getClass("zuoshang",zw4s);
var youxias=getClass("youxia",zw4s);
var shangzuos=getClass("shangzuo",zw4s);
var xiayous=getClass("xiayou",zw4s);
zw4s.onmouseover=function(){
animate(zuoshangs[0],{height:y},400);
animate(youxias[0],{height:y},400);
animate(shangzuos[0],{width:x},400);
animate(xiayous[0],{width:x},400);
}
zw4s.onmouseout=function(){
animate(zuoshangs[0],{height:0},400);
animate(youxias[0],{height:0},400);
animate(shangzuos[0],{width:0},400);
animate(xiayous[0],{width:0},400);
}
}
// 热门品牌动画
var zw4s=$('.zhengwen4')
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],198,250)
};
// 时尚名品动画
var zw4s=$('.shishang8a');
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],272,182)
};
// 热门品牌选项卡
var anniu=getClass('zhengwen3a',document);
var ab=anniu[0].getElementsByTagName('a');
var span1=anniu[0].getElementsByTagName('span');
var zhenwen=getClass('zhengwen4box',document);
for (var i = 0; i < ab.length; i++) {
ab[i].aa=i;
ab[0].style.borderBottom='3px solid red';
span1[0].style.display='block';
zhenwen[0].style.display='block';
ab[i].onmouseover=function(){
for (var j = 0; j < zhenwen.length; j++) {
ab[j].style.borderBottom='';
span1[j].style.display='none';
zhenwen[j].style.display='none';
};
span1[this.aa].style.display='block';
zhenwen[this.aa].style.display='block';
this.style.borderBottom='3px solid red';
};
};
// 时尚名品无缝轮播模式图模式
var box=$('.shishang7');
for(var i=0;i<box.length;i++){
fengzhuang(box[i])
}
function fengzhuang(box){
var n=0;
var next=0;
var boximg=$('.imgbox',box)[0]
var img=$("a",box);
img[0].style.left='0';
var anniu=$('.sh7a',box);
anniu[0].style.background='red';
var btn=$('.shanniu',box)[0];
var left=$('.shleft',btn);
var right=$('.shright',btn);
var iw=parseInt(getStyle(img[0],"width"));
//向左轮播
function lunbo(){
next=n+1;
if (next==img.length) {
return
};
for (var i = 0; i < img.length; i++) {
anniu[i].style.background=''
};
img[next].style.left=iw+'px';
animate(img[n],{left:-iw},700);
animate(img[next],{left:0},700);
anniu[n].style.background=""
anniu[next].style.background='red'
n=next;
};
//向右轮播
function fanxiang(){
next=n-1;
if (next<0) {
return;
};
for (var i = 0; i < img.length; i++) {
anniu[i].style.background=''
};
img[next].style.left=-iw+'px';
animate(img[n],{left:iw},700);
animate(img[next],{left:0},700);
anniu[n].style.background=""
anniu[next].style.background='red'
n=next;
}
anniu[0].onmouseover=function(){
fanxiang();
};
anniu[1].onmouseover=function(){
lunbo();
}
box.onmouseover=function(){
btn.style.display="block"
box.style.opacity='0.8';
}
box.onmouseout=function(){
btn.style.display="none"
box.style.opacity='1';
}
btn.onmousedown=function(){
return false;
}
right[0].onclick=function(){
lunbo();
}
left[0].onclick=function(){
fanxiang();
}
// 无缝轮播
var shishang5=$('.shishang5')
for (var i = 0; i < shishang5.length; i++) {
diaoyong(shishang5[i])
};
function diaoyong(shishang5){
var tupianbox=$('.shishang6',shishang5);
// alert(tupianbox.length)
var leftbtn=$('.shishang6b',shishang5);
var rightbtn=$('.shishang6c',shishang5);
var h=document.documentElement.clientHeight;
var zw=tupianbox[0].offsetWidth;
tupianbox[0].style.left="0"
var indexs=0;
var nexts=0;
// console.log(h)
// t=setInterval(aar,3000)
function aar(){
nexts++
if (nexts==tupianbox.length) {
nexts=0
};
tupianbox[nexts].style.left=zw+"px"
animate(tupianbox[indexs],{left:-zw},500);
animate(tupianbox[nexts],{left:0},500)
indexs=nexts
}
// shishang5.onmouseover=function(){
// clearInterval(t);
// };
// shishang5.onmouseout=function(){
// t=setInterval(aar,3000)
// };
// 左右按钮
leftbtn[0].onclick=function(){
aar()
}
rightbtn[0].onclick=function(){
// clearInterval(t);
nexts--
if (nexts<0) {
nexts=tupianbox.length-1
};
tupianbox[nexts].style.left=-zw+"px"
animate(tupianbox[indexs],{left:zw},1000);
animate(tupianbox[nexts],{left:0},1000)
indexs=nexts;
}
}
// 按需加载
var youhua=$('.youhua');
var hs=document.documentElement.clientHeight;
var boximg=[];
var flags=[];
var fc=$('a',$('.fc')[0]);
var aa=true
for (var i = 0; i < youhua.length; i++) {
boximg.push(youhua[i].offsetTop);
flags.push(true);
};
window.onscroll=function(){
var top=document.body.scrollTop||document.documentElement.scrollTop;
// console.log(h)
for (var i = 0; i < boximg.length; i++) {
if(boximg[i]<=top+hs&&flags[i]){
flags[i]=false;
var imgs=$('img',youhua[i]);
for (var j = 0; j < imgs.length; j++) {
imgs[j].src=imgs[j].getAttribute('asrc')
};
}
};
//浮窗导航高光
// 浮窗导航
var fca=$('.fc')[0];
// var h=document.documentElement.clientHeight
// document.onscroll=function(){
// var Ih=document.body.scrollTop||document.documentElement.scrollTop;
if(top<1000&&aa){
aa=false;
// fca.style.display='none'
animate(fca,{bottom:-545},150)
// console.log(hs)
}
if(top>=1000&&aa==false){
aa=true;
// fca.style.display='block'
animate(fca,{bottom:(hs-545)/2},150)
};
}
//楼层跳转
// var youhuatop=
// console.log(top)
// for(var i=0;i<fc.length;i++){
// for(var j=0;j<youhua.length;j++){
// var youhuatop=youhua[0].offsetTop
// console.log(youhuatop)
// if(top>youhuatop){
// fc[j].className="youhua tianjialei"
// }
// }
}
// 头部隐藏卡片
var wx=$('.wx')[0];
var hy=$('.wx-erweima')[0]
yidong(wx)
var tp=$('.ks4a')[0]
var aa=$('a',tp)[0]
var b=tp.getElementsByTagName('b')[0];
yidong(wx,tp,hy,aa,b)
| var tps=$(".weixin")[0];
var aas=$("a",tps)[0];
var bs=$("b",tps)[0];
yidong(wxs,tps,hys,aas,bs);
var kst=$(".ks3t")[0];
var ks=$(".ks33")[0];
var aar=$("a",ks)[0];
var ksb=$(".ks3b")[0];
var bb=$("b",ks)[0];
yidong(kst,ks,ksb,aar,bb);
function yidong(wx,tp,hy,aa,b){
hover(wx,function(){
tp.style.background="#fff"
hy.style.display="block"
aa.style.color="red"
b.style.background=" url(jpg/tubiao05.jpg) 0 -17px no-repeat"
},function(){
tp.style.background=""
hy.style.display="none"
aa.style.color=""
b.style.background=""
})
}
// banner右边详情页
var furongqi=$(".banner1c");
for (var i = 0; i < furongqi.length; i++) {cedao(furongqi[i])
// console.log(furongqi[i])
};
function cedao(sd){
var bosxq=$(".xiangqin",sd)[0];
hover(sd,function(){
bosxq.style.display='block';
},function(){
bosxq.style.display='none';
})
}
// banner上的右侧图标动画
var boxs=$(".banner1a")[0];
var imgy=$("img",boxs)[0];
imgy.onmouseover=function(){
animate(imgy,{left:583},300)
}
imgy.onmouseout=function(){
animate(imgy,{left:590},300)
}
// 页脚的三个标志
var yj=$(".yj")[0];
var yjimg=$("a",yj);
for (var i = 0; i < yjimg.length; i++) {
yejiao(yjimg[i])
};
function yejiao(aa){
hover(aa,function(){
aa.style.opacity='0.7'
},function(){
aa.style.opacity="1"
})
}
}; | var wxs=$('.shouji')[0];
var hys=$(".shoujji")[0]; | random_line_split |
yintai.js |
window.onload=function(){
var box=getClass('bannera',document);
// console.dir(box);
var as=box[0].getElementsByTagName('a');
// console.dir(as);
var bodians=getClass('lunbodian',document);
// console.dir(bodians);
var bodiandiv=bodians[0].getElementsByTagName('div');
// console.dir(bodiandiv);
var lrclickbox=getClass("xuanzeanniu",document)[0]
var leftclick=getClass('left',box[0]);
// console.dir(leftclick);
var rightclick=getClass('right',box[0]);
// console.dir(rightclick);
// 设置第一张为默认图片 第一个轮播点为默认选中颜色
as[0].style.zIndex=10;
bodiandiv[0].style.background='#e5004f';
// 声明下标为0
var index=0;
// 调用函数
t=setInterval(move,2000);
// 封装函数
function move(){
index++;
// 判断下标如果等于图片的个数,就重新给下标赋值为零
if (index==as.leng | {index=0};
// 循环遍历
for (var i = 0; i < as.length; i++) {
// 先把所有照片层级调低,轮播点的颜色为空
as[i].style.zIndex=0;
bodiandiv[i].style.background='';
};
as[index].style.zIndex=10;
bodiandiv[index].style.background='#e5004f';
}
box[0].onmouseover=function(){
clearInterval(t);
lrclickbox.style.zIndex=15;
};
box[0].onmouseout=function(){
t=setInterval(move,2000);
lrclickbox.style.zIndex=0;
};
for (var i = 0; i < bodiandiv.length; i++) {
bodiandiv[i].index=i;
bodiandiv[i].onmouseover=function(){
for (var j = 0; j < as.length; j++) {
bodiandiv[j].style.background='';
as[j].style.zIndex=0;
}
as[this.index].style.zIndex=10;
bodiandiv[this.index].style.background='#e5004f';
}
};
rightclick[0].onclick=function(){
move();
};
rightclick[0].onmouseover=function(){
rightclick[0].style.background='#cc477a';
}
rightclick[0].onmouseout=function(){
rightclick[0].style.background='';
}
leftclick[0].onmouseover=function(){
leftclick[0].style.background='#cc477a';
}
leftclick[0].onmouseout=function(){
leftclick[0].style.background='';
}
leftclick[0].onclick=function(){
index--
if (index<0) {index=as.length-1};
for (var i = 0; i < as.length; i++) {
as[i].style.zIndex=0;
bodiandiv[i].style.background=""
};
as[index].style.zIndex=10;
bodiandiv[index].style.background="#e5004f"
}
// 选项卡开始
var xxk=getClass("link",document);
var inner=xxk[0].getElementsByTagName("a");
var hongdian=xxk[0].getElementsByTagName("span");
var shows=getClass("zw3",document);
for (var i = 0; i < inner.length; i++) {
inner[i].aa=i;
shows[0].style.display='block';
hongdian[0].style.display="block";
inner[0].style.borderBottom="4px solid red";
inner[i].onmouseover=function(){
for (var j = 0; j <shows.length; j++) {
shows[j].style.display='none';
inner[j].style.borderBottom="";
hongdian[j].style.display="none";
};
shows[this.aa].style.display='block';
this.style.borderBottom="4px solid red";
hongdian[this.aa].style.display="block";
};
};
// 图片四边的动画效果
var zw4s=getClass('zw4',document);
// paomaxian(zw4s[0])
// paomaxian(zw4s[1])
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],220,263);
};
function paomaxian(zw4s,x,y){
var zuoshangs=getClass("zuoshang",zw4s);
var youxias=getClass("youxia",zw4s);
var shangzuos=getClass("shangzuo",zw4s);
var xiayous=getClass("xiayou",zw4s);
zw4s.onmouseover=function(){
animate(zuoshangs[0],{height:y},400);
animate(youxias[0],{height:y},400);
animate(shangzuos[0],{width:x},400);
animate(xiayous[0],{width:x},400);
}
zw4s.onmouseout=function(){
animate(zuoshangs[0],{height:0},400);
animate(youxias[0],{height:0},400);
animate(shangzuos[0],{width:0},400);
animate(xiayous[0],{width:0},400);
}
}
// 热门品牌动画
var zw4s=$('.zhengwen4')
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],198,250)
};
// 时尚名品动画
var zw4s=$('.shishang8a');
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],272,182)
};
// 热门品牌选项卡
var anniu=getClass('zhengwen3a',document);
var ab=anniu[0].getElementsByTagName('a');
var span1=anniu[0].getElementsByTagName('span');
var zhenwen=getClass('zhengwen4box',document);
for (var i = 0; i < ab.length; i++) {
ab[i].aa=i;
ab[0].style.borderBottom='3px solid red';
span1[0].style.display='block';
zhenwen[0].style.display='block';
ab[i].onmouseover=function(){
for (var j = 0; j < zhenwen.length; j++) {
ab[j].style.borderBottom='';
span1[j].style.display='none';
zhenwen[j].style.display='none';
};
span1[this.aa].style.display='block';
zhenwen[this.aa].style.display='block';
this.style.borderBottom='3px solid red';
};
};
// 时尚名品无缝轮播模式图模式
var box=$('.shishang7');
for(var i=0;i<box.length;i++){
fengzhuang(box[i])
}
function fengzhuang(box){
var n=0;
var next=0;
var boximg=$('.imgbox',box)[0]
var img=$("a",box);
img[0].style.left='0';
var anniu=$('.sh7a',box);
anniu[0].style.background='red';
var btn=$('.shanniu',box)[0];
var left=$('.shleft',btn);
var right=$('.shright',btn);
var iw=parseInt(getStyle(img[0],"width"));
//向左轮播
function lunbo(){
next=n+1;
if (next==img.length) {
return
};
for (var i = 0; i < img.length; i++) {
anniu[i].style.background=''
};
img[next].style.left=iw+'px';
animate(img[n],{left:-iw},700);
animate(img[next],{left:0},700);
anniu[n].style.background=""
anniu[next].style.background='red'
n=next;
};
//向右轮播
function fanxiang(){
next=n-1;
if (next<0) {
return;
};
for (var i = 0; i < img.length; i++) {
anniu[i].style.background=''
};
img[next].style.left=-iw+'px';
animate(img[n],{left:iw},700);
animate(img[next],{left:0},700);
anniu[n].style.background=""
anniu[next].style.background='red'
n=next;
}
anniu[0].onmouseover=function(){
fanxiang();
};
anniu[1].onmouseover=function(){
lunbo();
}
box.onmouseover=function(){
btn.style.display="block"
box.style.opacity='0.8';
}
box.onmouseout=function(){
btn.style.display="none"
box.style.opacity='1';
}
btn.onmousedown=function(){
return false;
}
right[0].onclick=function(){
lunbo();
}
left[0].onclick=function(){
fanxiang();
}
// 无缝轮播
var shishang5=$('.shishang5')
for (var i = 0; i < shishang5.length; i++) {
diaoyong(shishang5[i])
};
function diaoyong(shishang5){
var tupianbox=$('.shishang6',shishang5);
// alert(tupianbox.length)
var leftbtn=$('.shishang6b',shishang5);
var rightbtn=$('.shishang6c',shishang5);
var h=document.documentElement.clientHeight;
var zw=tupianbox[0].offsetWidth;
tupianbox[0].style.left="0"
var indexs=0;
var nexts=0;
// console.log(h)
// t=setInterval(aar,3000)
function aar(){
nexts++
if (nexts==tupianbox.length) {
nexts=0
};
tupianbox[nexts].style.left=zw+"px"
animate(tupianbox[indexs],{left:-zw},500);
animate(tupianbox[nexts],{left:0},500)
indexs=nexts
}
// shishang5.onmouseover=function(){
// clearInterval(t);
// };
// shishang5.onmouseout=function(){
// t=setInterval(aar,3000)
// };
// 左右按钮
leftbtn[0].onclick=function(){
aar()
}
rightbtn[0].onclick=function(){
// clearInterval(t);
nexts--
if (nexts<0) {
nexts=tupianbox.length-1
};
tupianbox[nexts].style.left=-zw+"px"
animate(tupianbox[indexs],{left:zw},1000);
animate(tupianbox[nexts],{left:0},1000)
indexs=nexts;
}
}
// 按需加载
var youhua=$('.youhua');
var hs=document.documentElement.clientHeight;
var boximg=[];
var flags=[];
var fc=$('a',$('.fc')[0]);
var aa=true
for (var i = 0; i < youhua.length; i++) {
boximg.push(youhua[i].offsetTop);
flags.push(true);
};
window.onscroll=function(){
var top=document.body.scrollTop||document.documentElement.scrollTop;
// console.log(h)
for (var i = 0; i < boximg.length; i++) {
if(boximg[i]<=top+hs&&flags[i]){
flags[i]=false;
var imgs=$('img',youhua[i]);
for (var j = 0; j < imgs.length; j++) {
imgs[j].src=imgs[j].getAttribute('asrc')
};
}
};
//浮窗导航高光
// 浮窗导航
var fca=$('.fc')[0];
// var h=document.documentElement.clientHeight
// document.onscroll=function(){
// var Ih=document.body.scrollTop||document.documentElement.scrollTop;
if(top<1000&&aa){
aa=false;
// fca.style.display='none'
animate(fca,{bottom:-545},150)
// console.log(hs)
}
if(top>=1000&&aa==false){
aa=true;
// fca.style.display='block'
animate(fca,{bottom:(hs-545)/2},150)
};
}
//楼层跳转
// var youhuatop=
// console.log(top)
// for(var i=0;i<fc.length;i++){
// for(var j=0;j<youhua.length;j++){
// var youhuatop=youhua[0].offsetTop
// console.log(youhuatop)
// if(top>youhuatop){
// fc[j].className="youhua tianjialei"
// }
// }
}
// 头部隐藏卡片
var wx=$('.wx')[0];
var hy=$('.wx-erweima')[0]
yidong(wx)
var tp=$('.ks4a')[0]
var aa=$('a',tp)[0]
var b=tp.getElementsByTagName('b')[0];
yidong(wx,tp,hy,aa,b)
var wxs=$('.shouji')[0];
var hys=$(".shoujji")[0];
var tps=$(".weixin")[0];
var aas=$("a",tps)[0];
var bs=$("b",tps)[0];
yidong(wxs,tps,hys,aas,bs);
var kst=$(".ks3t")[0];
var ks=$(".ks33")[0];
var aar=$("a",ks)[0];
var ksb=$(".ks3b")[0];
var bb=$("b",ks)[0];
yidong(kst,ks,ksb,aar,bb);
function yidong(wx,tp,hy,aa,b){
hover(wx,function(){
tp.style.background="#fff"
hy.style.display="block"
aa.style.color="red"
b.style.background=" url(jpg/tubiao05.jpg) 0 -17px no-repeat"
},function(){
tp.style.background=""
hy.style.display="none"
aa.style.color=""
b.style.background=""
})
}
// banner右边详情页
var furongqi=$(".banner1c");
for (var i = 0; i < furongqi.length; i++) {cedao(furongqi[i])
// console.log(furongqi[i])
};
function cedao(sd){
var bosxq=$(".xiangqin",sd)[0];
hover(sd,function(){
bosxq.style.display='block';
},function(){
bosxq.style.display='none';
})
}
// banner上的右侧图标动画
var boxs=$(".banner1a")[0];
var imgy=$("img",boxs)[0];
imgy.onmouseover=function(){
animate(imgy,{left:583},300)
}
imgy.onmouseout=function(){
animate(imgy,{left:590},300)
}
// 页脚的三个标志
var yj=$(".yj")[0];
var yjimg=$("a",yj);
for (var i = 0; i < yjimg.length; i++) {
yejiao(yjimg[i])
};
function yejiao(aa){
hover(aa,function(){
aa.style.opacity='0.7'
},function(){
aa.style.opacity="1"
})
}
}; | th) | identifier_name |
yintai.js |
window.onload=function(){
var box=getClass('bannera',document);
// console.dir(box);
var as=box[0].getElementsByTagName('a');
// console.dir(as);
var bodians=getClass('lunbodian',document);
// console.dir(bodians);
var bodiandiv=bodians[0].getElementsByTagName('div');
// console.dir(bodiandiv);
var lrclickbox=getClass("xuanzeanniu",document)[0]
var leftclick=getClass('left',box[0]);
// console.dir(leftclick);
var rightclick=getClass('right',box[0]);
// console.dir(rightclick);
// 设置第一张为默认图片 第一个轮播点为默认选中颜色
as[0].style.zIndex=10;
bodiandiv[0].style.background='#e5004f';
// 声明下标为0
var index=0;
// 调用函数
t=setInterval(move,2000);
// 封装函数
function move(){
index++;
// 判断下标如果等于图片的个数,就重新给下标赋值为零
if (index==as.length) {index=0};
// 循环遍历
for (var i = 0; i < as.length; i++) {
// 先把所有照片层级调低,轮播点的颜色为空
as[i].style.zIndex=0;
bodiandiv[i].style.background='';
};
as[index].style.zIndex=10;
bodiandiv[index].style.background='#e5004f';
}
box[0].onmouseover=function(){
clearInterval(t);
lrclickbox.style.zIndex=15;
};
box[0].onmouseout=function(){
t=setInterval(move,2000);
lrclickbox.style.zIndex=0;
};
for (var i = 0; i < bodiandiv.length; i++) {
bodiandiv[i].index=i;
bodiandiv[i].onmouseover=function(){
for (var j = 0; j < as.length; j++) {
bodiandiv[j].style.background='';
as[j].style.zIndex=0;
}
as[this.index].style.zIndex=10;
bodiandiv[this.index].style.background='#e5004f';
}
};
rightclick[0].onclick=function(){
move();
};
rightclick[0].onmouseover=function(){
rightclick[0].style.background='#cc477a';
}
rightclick[0].onmouseout=function(){
rightclick[0].style.background='';
}
leftclick[0].onmouseover=function(){
leftclick[0].style.background='#cc477a';
}
leftclick[0].onmouseout=function(){
leftclick[0].style.background='';
}
leftclick[0].onclick=function(){
index--
if (index<0) {index=as.length-1};
for (var i = 0; i < as.length; i++) {
as[i].style.zIndex=0;
bodiandiv[i].style.background=""
};
as[index].style.zIndex=10;
bodiandiv[index].style.background="#e5004f"
}
// 选项卡开始
var xxk=getClass("link",document);
var inner=xxk[0].getElementsByTagName("a");
var hongdian=xxk[0].getElementsByTagName("span");
var shows=getClass("zw3",document);
for (var i = 0; i < inner.length; i++) {
inner[i].aa=i;
shows[0].style.display='block';
hongdian[0].style.display="block";
inner[0].style.borderBottom="4px solid red";
inner[i].onmouseover=function(){
for (var j = 0; j <shows.length; j++) {
shows[j].style.display='none';
inner[j].style.borderBottom="";
hongdian[j].style.display="none";
};
shows[this.aa].style.display='block';
this.style.borderBottom="4px solid red";
hongdian[this.aa].style.display="block";
};
};
// 图片四边的动画效果
var zw4s=getClass('zw4',document);
// paomaxian(zw4s[0])
// paomaxian(zw4s[1])
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],220,263);
};
function paomaxian(zw4s,x,y){
var zuoshangs=getClass("zuoshang",zw4s);
var youxias=getClass("youxia",zw4s);
var shangzuos=getClass("shangzuo",zw4s);
var xiayous=getClass("xiayou",zw4s);
zw4s.onmouseover=function(){
animate(zuoshangs[0],{height:y},400);
animate(youxias[0],{height:y},400);
animate(shangzuos[0],{width:x},400);
animate(xiayous[0],{width:x},400);
}
zw4s.onmouseout=function(){
animate(zuoshangs[0],{height:0},400);
animate(youxias[0],{height:0},400);
animate(shangzuos[0],{width:0},400);
animate(xiayous[0],{width:0},400);
}
}
// 热门品牌动画
var zw4s=$('.zhengwen4')
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],198,250)
};
// 时尚名品动画
var zw4s=$('.shishang8a');
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],272,182)
};
// 热门品牌选项卡
var anniu=getClass('zhengwen3a',document);
var ab=anniu[0].getElementsByTagName('a');
var span1=anniu[0].getElementsByTagName('span');
var zhenwen=getClass('zhengwen4box',document);
for (var i = 0; i < ab.length; i++) {
ab[i].aa=i;
ab[0].style.borderBottom='3px solid red';
span1[0].style.display='block';
zhenwen[0].style.display='block';
ab[i].onmouseover=function(){
for (var j = 0; j < zhenwen.length; j++) {
ab[j].style.borderBottom='';
span1[j].style.display='none';
zhenwen[j].style.display='none';
};
span1[this.aa].style.display='block';
zhenwen[this.aa].style.display='block';
this.style.borderBottom='3px solid red';
};
};
// 时尚名品无缝轮播模式图模式
var box=$('.shishang7');
for(var i=0;i<box.length;i++){
fengzhuang(box[i])
}
function fengzhuang(box){
var n=0;
var next=0;
var boximg=$('.imgbox',box)[0]
var img=$("a",box);
img[0].style.left='0';
var anniu=$('.sh7a',box);
anniu[0].style.background='red';
var btn=$('.shanniu',box)[0];
var left=$('.shleft',btn);
var right=$('.shright',btn);
var iw=parseInt(getStyle(img[0],"width"));
//向左轮播
function lunbo(){
next=n+1;
if (next==img.length) {
return
};
for (var i = 0; i < img.length; i++) {
anniu[i].style.background=''
};
img[next].style.left=iw+'px';
animate(img[n],{left:-iw},700);
animate(img[next],{left:0},700);
anniu[n].style.background=""
anniu[next].style.background='red'
n=next;
};
//向右轮播
function fanxiang(){
next=n-1;
if (next<0) {
return;
};
for (var i = 0; i < img.length; i++) {
anniu[i].style.background=''
};
img[next].style.left=-iw+'px';
animate(img[n],{left:iw},700);
animate(img[next],{left:0},700);
anniu[n].style.background=""
anniu[next].style.background='red'
n=next;
}
anniu[0].onmouseover=function(){
fanxiang();
};
anniu[1].onmouseover=function(){
lunbo();
}
box.onmouseover=function(){
btn.style.display="block"
box.style.opacity='0.8';
}
box.onmouseout=function(){
btn.style.display="none"
box.style.opacity='1';
}
btn.onmousedown=function(){
return false;
}
right[0].onclick=function(){
lunbo();
}
left[0].onclick=function(){
fanxiang();
}
// 无缝轮播
var shishang5=$('.shishang5')
for (var i = 0; i < shishang5.length; i++) {
diaoyong(shishang5[i])
};
function diaoyong(shishang5){
var tupianbox=$('.shishang6',shishang5);
// alert(tupianbox.length)
var leftbtn=$('.shishang6b',shishang5);
var rightbtn=$('.shishang6c',shishang5);
var h=document.documentElement.clientHeight;
var zw=tupianbox[0].offsetWidth;
tupianbox[0].style.left="0"
var indexs=0;
var nexts=0;
// console.log(h)
// t=setInterval(aar,3000)
function aar(){
nexts++
if (nexts==tupianbox.length) {
nexts=0
};
tupianbox[nexts].style.left=zw+"px"
animate(tupianbox[indexs],{left:-zw},500);
animate(tupianbox[nexts],{left:0},500)
indexs=nexts
}
// shishang5.onmouseover=function(){
// clearInterval(t);
// };
// shishang5.onmouseout=function(){
// t=setInterval(aar,3000)
// };
// 左右按钮
leftbtn[0].onclick=function(){
aar()
}
rightbtn[0].onclick=function(){
// clearInterval(t);
nexts--
if (nexts<0) {
nexts=tupianbox.length-1
};
tupianbox[nexts].style.left=-zw+"px"
animate(tupianbox[indexs],{left:zw},1000);
animate(tupianbox[nexts],{left:0},1000)
indexs=nexts;
}
}
// 按需加载
var youhua=$('.youhua');
var hs=document.documentElement.clientHeight;
var boximg=[];
var flags=[];
var fc=$('a',$('.fc')[0]);
var aa=true
for (var i = 0; i < youhua.length; i++) {
boximg.push(youhua[i].offsetTop);
flags.push(true);
};
window.onscroll=function(){
var top=document.body.scrollTop||document.documentElement.scrollTop;
// console.log(h)
for (var i = 0; i < boximg.length; i++) {
if(boximg[i]<=top+hs&&flags[i]){
flags[i]=false;
var imgs=$('img',youhua[i]);
for (var j = 0; j < imgs.length; j++) {
imgs[j].src=imgs[j].getAttribute('asrc')
};
}
};
//浮窗导航高光
// 浮窗导航
var fca=$('.fc')[0];
// var h=document.documentElement.clientHeight
// document.onscroll=function(){
// var Ih=document.body.scrollTop||document.documentElement.scrollTop;
if(top<1000&&aa){
aa=false;
// fca.style.display='none'
animate(fca,{bottom:-545},150)
// console.log(hs)
}
if(top>=1000&&aa==false){
aa=true;
// fca.style.display='block'
animate(fca,{bottom:(hs-545)/2},150)
};
}
//楼层跳转
// var youhuatop=
// console.log(top)
// for(var i=0;i<fc.length;i++){
// for(var j=0;j<youhua.length;j++){
// var youhuatop=youhua[0].offsetTop
// console.log(youhuatop)
// if(top>youhuatop){
// fc[j].className="youhua tianjialei"
// }
// }
}
// 头部隐藏卡片
var wx=$('.wx')[0];
var hy=$('.wx-erweima')[0]
yidong(wx)
var tp=$('.ks4a')[0]
var aa=$('a',tp)[0]
var b=tp.getElementsByTagName('b')[0];
yidong(wx,tp,hy,aa,b)
var wxs=$('.shouji')[0];
var hys=$(".shoujji")[0];
var tps=$(".weixin")[0];
var aas=$("a",tps)[0];
var bs=$("b",tps)[0];
yidong(wxs,tps,hys,aas,bs);
var kst=$(".ks3t")[0];
var ks=$(".ks33")[0];
var aar=$("a",ks)[0];
var ksb=$(".ks3b")[0];
var bb=$("b",ks)[0];
yidong(kst,ks,ksb,aar,bb);
function yidong(wx,tp,hy,aa,b){
hover(wx,function(){
tp.style.background="#fff"
hy.style.display="block"
aa.style.color="red"
b.style.background=" url(jpg/tubiao05.jpg) 0 -17px no-repeat"
},function(){
tp.style.background=""
hy.style.display="none"
aa.style.color=""
b.style.background=""
})
}
// banner右边详情页
var furongqi=$(".banner1c");
for (var i = 0; i < furongqi.length; i++) {cedao(furongqi[i])
// console.log(furongqi[i])
};
function cedao(sd){
var bosxq=$(".xiangqin",sd)[0];
hover(sd,function(){
bosxq.style.display='block';
},function(){
bosxq.style.display='none';
})
}
// banner上的右侧图标动画
var boxs=$(".banner1a")[0];
var imgy=$("img",boxs)[0];
imgy.onmouseover=function(){
animate(imgy,{left:583},300)
}
imgy.onmouseout=function(){
animate(imgy,{left:590},300)
}
// | aa,function(){
aa.style.opacity='0.7'
},function(){
aa.style.opacity="1"
})
}
}; | 页脚的三个标志
var yj=$(".yj")[0];
var yjimg=$("a",yj);
for (var i = 0; i < yjimg.length; i++) {
yejiao(yjimg[i])
};
function yejiao(aa){
hover( | identifier_body |
yintai.js |
window.onload=function(){
var box=getClass('bannera',document);
// console.dir(box);
var as=box[0].getElementsByTagName('a');
// console.dir(as);
var bodians=getClass('lunbodian',document);
// console.dir(bodians);
var bodiandiv=bodians[0].getElementsByTagName('div');
// console.dir(bodiandiv);
var lrclickbox=getClass("xuanzeanniu",document)[0]
var leftclick=getClass('left',box[0]);
// console.dir(leftclick);
var rightclick=getClass('right',box[0]);
// console.dir(rightclick);
// 设置第一张为默认图片 第一个轮播点为默认选中颜色
as[0].style.zIndex=10;
bodiandiv[0].style.background='#e5004f';
// 声明下标为0
var index=0;
// 调用函数
t=setInterval(move,2000);
// 封装函数
function move(){
index++;
// 判断下标如果等于图片的个数,就重新给下标赋值为零
if (index==as.length) {index=0};
// 循环遍历
for (var i = 0; i < as.length; i++) {
// 先把所有照片层级调低,轮播点的颜色为空
as[i].style.zIndex=0;
bodiandiv[i].style.background='';
};
as[index].style.zIndex=10;
bodiandiv[index].style.background='#e5004f';
}
box[0].onmouseover=function(){
clearInterval(t);
lrclickbox.style.zIndex=15;
};
box[0].onmouseout=function(){
t=setInterval(move,2000);
lrclickbox.style.zIndex=0;
};
for (var i = 0; i < bodiandiv.length; i++) {
bodiandiv[i].index=i;
bodiandiv[i].onmouseover=function(){
for (var j = 0; j < as.length; j++) {
bodiandiv[j].style.background='';
as[j].style.zIndex=0;
}
as[this.index].style.zIndex=10;
bodiandiv[this.index].style.background='#e5004f';
| 0].onmouseover=function(){
rightclick[0].style.background='#cc477a';
}
rightclick[0].onmouseout=function(){
rightclick[0].style.background='';
}
leftclick[0].onmouseover=function(){
leftclick[0].style.background='#cc477a';
}
leftclick[0].onmouseout=function(){
leftclick[0].style.background='';
}
leftclick[0].onclick=function(){
index--
if (index<0) {index=as.length-1};
for (var i = 0; i < as.length; i++) {
as[i].style.zIndex=0;
bodiandiv[i].style.background=""
};
as[index].style.zIndex=10;
bodiandiv[index].style.background="#e5004f"
}
// 选项卡开始
var xxk=getClass("link",document);
var inner=xxk[0].getElementsByTagName("a");
var hongdian=xxk[0].getElementsByTagName("span");
var shows=getClass("zw3",document);
for (var i = 0; i < inner.length; i++) {
inner[i].aa=i;
shows[0].style.display='block';
hongdian[0].style.display="block";
inner[0].style.borderBottom="4px solid red";
inner[i].onmouseover=function(){
for (var j = 0; j <shows.length; j++) {
shows[j].style.display='none';
inner[j].style.borderBottom="";
hongdian[j].style.display="none";
};
shows[this.aa].style.display='block';
this.style.borderBottom="4px solid red";
hongdian[this.aa].style.display="block";
};
};
// 图片四边的动画效果
var zw4s=getClass('zw4',document);
// paomaxian(zw4s[0])
// paomaxian(zw4s[1])
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],220,263);
};
function paomaxian(zw4s,x,y){
var zuoshangs=getClass("zuoshang",zw4s);
var youxias=getClass("youxia",zw4s);
var shangzuos=getClass("shangzuo",zw4s);
var xiayous=getClass("xiayou",zw4s);
zw4s.onmouseover=function(){
animate(zuoshangs[0],{height:y},400);
animate(youxias[0],{height:y},400);
animate(shangzuos[0],{width:x},400);
animate(xiayous[0],{width:x},400);
}
zw4s.onmouseout=function(){
animate(zuoshangs[0],{height:0},400);
animate(youxias[0],{height:0},400);
animate(shangzuos[0],{width:0},400);
animate(xiayous[0],{width:0},400);
}
}
// 热门品牌动画
var zw4s=$('.zhengwen4')
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],198,250)
};
// 时尚名品动画
var zw4s=$('.shishang8a');
for (var i = 0; i < zw4s.length; i++) {
paomaxian(zw4s[i],272,182)
};
// 热门品牌选项卡
var anniu=getClass('zhengwen3a',document);
var ab=anniu[0].getElementsByTagName('a');
var span1=anniu[0].getElementsByTagName('span');
var zhenwen=getClass('zhengwen4box',document);
for (var i = 0; i < ab.length; i++) {
ab[i].aa=i;
ab[0].style.borderBottom='3px solid red';
span1[0].style.display='block';
zhenwen[0].style.display='block';
ab[i].onmouseover=function(){
for (var j = 0; j < zhenwen.length; j++) {
ab[j].style.borderBottom='';
span1[j].style.display='none';
zhenwen[j].style.display='none';
};
span1[this.aa].style.display='block';
zhenwen[this.aa].style.display='block';
this.style.borderBottom='3px solid red';
};
};
// 时尚名品无缝轮播模式图模式
var box=$('.shishang7');
for(var i=0;i<box.length;i++){
fengzhuang(box[i])
}
function fengzhuang(box){
var n=0;
var next=0;
var boximg=$('.imgbox',box)[0]
var img=$("a",box);
img[0].style.left='0';
var anniu=$('.sh7a',box);
anniu[0].style.background='red';
var btn=$('.shanniu',box)[0];
var left=$('.shleft',btn);
var right=$('.shright',btn);
var iw=parseInt(getStyle(img[0],"width"));
//向左轮播
function lunbo(){
next=n+1;
if (next==img.length) {
return
};
for (var i = 0; i < img.length; i++) {
anniu[i].style.background=''
};
img[next].style.left=iw+'px';
animate(img[n],{left:-iw},700);
animate(img[next],{left:0},700);
anniu[n].style.background=""
anniu[next].style.background='red'
n=next;
};
//向右轮播
function fanxiang(){
next=n-1;
if (next<0) {
return;
};
for (var i = 0; i < img.length; i++) {
anniu[i].style.background=''
};
img[next].style.left=-iw+'px';
animate(img[n],{left:iw},700);
animate(img[next],{left:0},700);
anniu[n].style.background=""
anniu[next].style.background='red'
n=next;
}
anniu[0].onmouseover=function(){
fanxiang();
};
anniu[1].onmouseover=function(){
lunbo();
}
box.onmouseover=function(){
btn.style.display="block"
box.style.opacity='0.8';
}
box.onmouseout=function(){
btn.style.display="none"
box.style.opacity='1';
}
btn.onmousedown=function(){
return false;
}
right[0].onclick=function(){
lunbo();
}
left[0].onclick=function(){
fanxiang();
}
// 无缝轮播
var shishang5=$('.shishang5')
for (var i = 0; i < shishang5.length; i++) {
diaoyong(shishang5[i])
};
function diaoyong(shishang5){
var tupianbox=$('.shishang6',shishang5);
// alert(tupianbox.length)
var leftbtn=$('.shishang6b',shishang5);
var rightbtn=$('.shishang6c',shishang5);
var h=document.documentElement.clientHeight;
var zw=tupianbox[0].offsetWidth;
tupianbox[0].style.left="0"
var indexs=0;
var nexts=0;
// console.log(h)
// t=setInterval(aar,3000)
function aar(){
nexts++
if (nexts==tupianbox.length) {
nexts=0
};
tupianbox[nexts].style.left=zw+"px"
animate(tupianbox[indexs],{left:-zw},500);
animate(tupianbox[nexts],{left:0},500)
indexs=nexts
}
// shishang5.onmouseover=function(){
// clearInterval(t);
// };
// shishang5.onmouseout=function(){
// t=setInterval(aar,3000)
// };
// 左右按钮
leftbtn[0].onclick=function(){
aar()
}
rightbtn[0].onclick=function(){
// clearInterval(t);
nexts--
if (nexts<0) {
nexts=tupianbox.length-1
};
tupianbox[nexts].style.left=-zw+"px"
animate(tupianbox[indexs],{left:zw},1000);
animate(tupianbox[nexts],{left:0},1000)
indexs=nexts;
}
}
// 按需加载
var youhua=$('.youhua');
var hs=document.documentElement.clientHeight;
var boximg=[];
var flags=[];
var fc=$('a',$('.fc')[0]);
var aa=true
for (var i = 0; i < youhua.length; i++) {
boximg.push(youhua[i].offsetTop);
flags.push(true);
};
window.onscroll=function(){
var top=document.body.scrollTop||document.documentElement.scrollTop;
// console.log(h)
for (var i = 0; i < boximg.length; i++) {
if(boximg[i]<=top+hs&&flags[i]){
flags[i]=false;
var imgs=$('img',youhua[i]);
for (var j = 0; j < imgs.length; j++) {
imgs[j].src=imgs[j].getAttribute('asrc')
};
}
};
//浮窗导航高光
// 浮窗导航
var fca=$('.fc')[0];
// var h=document.documentElement.clientHeight
// document.onscroll=function(){
// var Ih=document.body.scrollTop||document.documentElement.scrollTop;
if(top<1000&&aa){
aa=false;
// fca.style.display='none'
animate(fca,{bottom:-545},150)
// console.log(hs)
}
if(top>=1000&&aa==false){
aa=true;
// fca.style.display='block'
animate(fca,{bottom:(hs-545)/2},150)
};
}
//楼层跳转
// var youhuatop=
// console.log(top)
// for(var i=0;i<fc.length;i++){
// for(var j=0;j<youhua.length;j++){
// var youhuatop=youhua[0].offsetTop
// console.log(youhuatop)
// if(top>youhuatop){
// fc[j].className="youhua tianjialei"
// }
// }
}
// 头部隐藏卡片
var wx=$('.wx')[0];
var hy=$('.wx-erweima')[0]
yidong(wx)
var tp=$('.ks4a')[0]
var aa=$('a',tp)[0]
var b=tp.getElementsByTagName('b')[0];
yidong(wx,tp,hy,aa,b)
var wxs=$('.shouji')[0];
var hys=$(".shoujji")[0];
var tps=$(".weixin")[0];
var aas=$("a",tps)[0];
var bs=$("b",tps)[0];
yidong(wxs,tps,hys,aas,bs);
var kst=$(".ks3t")[0];
var ks=$(".ks33")[0];
var aar=$("a",ks)[0];
var ksb=$(".ks3b")[0];
var bb=$("b",ks)[0];
yidong(kst,ks,ksb,aar,bb);
function yidong(wx,tp,hy,aa,b){
hover(wx,function(){
tp.style.background="#fff"
hy.style.display="block"
aa.style.color="red"
b.style.background=" url(jpg/tubiao05.jpg) 0 -17px no-repeat"
},function(){
tp.style.background=""
hy.style.display="none"
aa.style.color=""
b.style.background=""
})
}
// banner右边详情页
var furongqi=$(".banner1c");
for (var i = 0; i < furongqi.length; i++) {cedao(furongqi[i])
// console.log(furongqi[i])
};
function cedao(sd){
var bosxq=$(".xiangqin",sd)[0];
hover(sd,function(){
bosxq.style.display='block';
},function(){
bosxq.style.display='none';
})
}
// banner上的右侧图标动画
var boxs=$(".banner1a")[0];
var imgy=$("img",boxs)[0];
imgy.onmouseover=function(){
animate(imgy,{left:583},300)
}
imgy.onmouseout=function(){
animate(imgy,{left:590},300)
}
// 页脚的三个标志
var yj=$(".yj")[0];
var yjimg=$("a",yj);
for (var i = 0; i < yjimg.length; i++) {
yejiao(yjimg[i])
};
function yejiao(aa){
hover(aa,function(){
aa.style.opacity='0.7'
},function(){
aa.style.opacity="1"
})
}
}; | }
};
rightclick[0].onclick=function(){
move();
};
rightclick[ | conditional_block |
manager.py | import logging
import re
from typing import List
import numpy as np
import pandas as pd
from . import note, split
from .note import Note, Link, Category
from .split import SplitNote
LOGGER = logging.getLogger(__name__)
NOTE_PARSE_REGEX = re.compile('id=\'([\d\w]+)\', note=\'([\d\w :,]+)\'')
class NoteManager:
"""Class to handle higher-level :class:`~budget.Note` manipulation
Attributes
----------
notes : :class:`~pandas.DataFrame`
:class:`~pandas.DataFrame` of the :class:`~budget.Note` objects. `Index` is the :class:`str` ID of the
transaction that each :class:`~budget.Note` is linked to
"""
SQL_NOTE_TABLE = 'notes'
def __init__(self):
self.notes = pd.Series(name='note', dtype='object')
self.logger = logging.getLogger(__name__)
def load_notes(self, con) -> pd.Series:
"""Loads the :class:`~budget.Note` :class:`~pandas.Series` using a connection to a SQL database using
Parameters
----------
con : SQLAlchemy connectable, :class:`str`, or :mod:`sqlite3` connection
SQL connection
Returns
-------
:class:`~pandas.Series`
"""
# Read the whole table of notes
notes = pd.read_sql_query(sql=f'select * from {self.SQL_NOTE_TABLE}', con=con)
self.logger.debug(f'{notes.shape[0]} notes loaded from \'{self.SQL_NOTE_TABLE}\'')
# Set up the index, which will be the ID of the transaction the note is attached to
notes.set_index(notes.columns[0], inplace=True)
try:
# Select only the first column (should only be one)
notes = notes.iloc[:, 0].map(NoteManager.eval_note)
except NameError:
self.logger.debug('No notes loaded')
pass
# Assign to attribute
self.notes = notes
return notes
@staticmethod
def eval_note(input: str) -> note.Note:
"""Evaluates the :func:`repr` string, which reconstructs a :class:`~budget.Note` object
Parameters
----------
input : :class:`str`
:class:`str` produced by the :func:`repr` of that object
Returns
-------
:class:`~budget.Note`
"""
try:
return eval(input)
except (NameError, SyntaxError):
m = NOTE_PARSE_REGEX.search(input)
return NoteManager.parse_note(m.group(1), m.group(2))
@staticmethod
def parse_note(id: str, input: str, add_note_types=None) -> note.Note:
"""Looks for the `tag` of each type of :class:`~budget.Note` in the `input` string, then constructs a new
:class:`~budget.Note` object when it finds one
Parameters
----------
id : :class:`str`
id of the `Note` to create
input : :class:`str`
input :class:`str` to look in
add_note_types :
additional :class:`~budget.Note` types to parse
Returns
-------
:class:`~budget.Note`
"""
note_types = [SplitNote, Link, Category]
if add_note_types is not None:
try:
note_types.append(add_note_types)
except:
note_types.extend(add_note_types)
if isinstance(input, str):
for nt in note_types:
try:
if nt._tag in input:
res = nt(id, input)
break
except AttributeError:
raise AttributeError('Notes must have a _tag attribute')
try:
return res
except NameError as e:
# res won't be set if none of the tags match
return Note(id, input)
else:
if isinstance(input, Note):
raise TypeError(f'\'{input}\' is already a {type(input)}')
else:
raise TypeError(f'unknown type of note: {type(input)}')
def validate_notes(self, ids: pd.Series) -> bool:
"""Checks to make sure that all of the :class:`~budget.Note`s are contained in the ids
Parameters
----------
ids : set or like-like
:class:`list` or something that can be used in :meth:`~pandas.Series.isin`
Returns
-------
bool
`True` if all of the `Notes` in the :class:`~budget.notes.NoteManager` are in the given list of IDs
"""
return self.notes.map(lambda n: n.id).isin(ids).all()
def | (self, id: str, note: str, drop_dups: bool = True):
"""Parses a string into a :class:`~budget.Note` object and adds it to the :class:`~budget.notes.NoteManager` using
:class:`~pandas.Series.append` and optionally uses :class:`~pandas.Series.drop_duplicates`
Parameters
----------
id : str
id of the transaction to attach the :class:`~budget.Note` to
note : str
input string used to create the :class:`~budget.Note` object
drop_dups : bool
Whether to drop the duplicate notes
"""
n = self.parse_note(id, note)
self.notes = self.notes.append([pd.Series([n], index=[n.id])])
if drop_dups:
self.drop_duplicates()
def drop(self, id: str, note_text: str):
"""Drops a specific :class:`~budget.Note` using its ID and text
Parameters
----------
id : str
id of the note to drop
note_text : str
text of the note to drop
"""
print(f'Dropping note from {id}: {note_text}')
self.notes = self.notes[~self.notes.apply(
lambda n: (n.note == note_text) and (n.id == id)
)]
def drop_duplicates(self):
"""Removes duplicate `Notes` in the :class:`~budget.notes.NoteManager`
"""
self.notes = self.notes[~self.notes.map(repr).duplicated()]
def save_notes(self, con):
self.notes.map(repr).to_sql(name=self.SQL_NOTE_TABLE, con=con, if_exists='replace')
def get_notes_by_id(self, ids: List[str]) -> pd.Series:
"""Gets the notes that match the IDs in the given list
Parameters
----------
ids : List[str]
list of ids to get the notes
Returns
-------
:class:`~pandas.Series`
"""
return self.notes[self.notes.apply(lambda n: n.id in ids)]
def get_notes_by_type(self, typ: type) -> pd.Series:
"""Gets the notes that match the given type
Parameters
----------
typ : type
type of :class:`~budget.Note` to get
Returns
-------
:class:`~pandas.Series`
"""
# doesn't use isinstance() to prevent subtypes from being selected
return self.notes[self.notes.apply(lambda n: type(n) is typ)]
def manual_ids(self, cat: str) -> np.ndarray:
"""Gets ids of transactions that have been manually categorized as the given category
Parameters
----------
cat : str
category of transactions to get IDs for
Returns
-------
:class:`~numpy.ndarray`
"""
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a Category type and have a matching categorization
lambda n: isinstance(n, note.Category) and n.category == cat
)
].apply(lambda n: n.id).values
def split_ids(self, cat: str) -> pd.Series:
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a SplitNote type and have the category in one of its parts
lambda n: isinstance(n, split.SplitNote) and cat in n.parts
)
# convert to the value of the id attribute of each note
].apply(lambda n: n.id)
def linked_ids(self, df: pd.DataFrame) -> np.ndarray:
"""Gets ids of transactions that target those in the given DataFrame
Example
Transactions A and B are both linked to transaction C, which appears in the given DataFrame
Returns a Series of ids that include the ids of A and B
Returns
-------
:class:`~numpy.ndarray`: str
"""
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a Link type and have a target id in the given DataFrame
lambda n: isinstance(n, note.Link) and n.target in df['id'].values
)
# convert to the value of the id attribute of each note
].apply(lambda n: n.id).values
def apply_linked(self, df: pd.DataFrame) -> pd.DataFrame:
"""Applies Link notes in the given DataFrame, adding the value of each linked transaction onto the one it targets
The DataFrame needs to include both the original transactions and the ones linked to them. The values of the linked
transactions will be set to 0 as they are added onto the target transaction
Parameters
----------
df : :class:`~pandas.DataFrame`
transactions to apply the linked notes to
Returns
-------
:class:`~pandas.DataFrame`
:class:`~pandas.DataFrame` of the modified transactions
"""
link_notes = self.get_notes_by_type(note.Link)
source_in_df = link_notes.apply(lambda n: n.id in df['id'].values)
target_in_df = link_notes.apply(lambda n: n.target in df['id'].values)
# assert (target_in_df & ~source_in_df).any()
df = df.reset_index().set_index('id')
try:
# if both source and target exist in the DataFrame, add the source Amount to the target Amount
for n in link_notes[source_in_df & target_in_df]:
df.loc[n.target, 'Amount'] += df.loc[n.id, 'Amount']
# set the values of all source transactions to 0
for n in link_notes[source_in_df]:
df.loc[n.id, 'Amount'] = 0
except Exception as e:
raise
df = df.reset_index().set_index(df.columns[0])
return df
def apply_split(self, df: pd.DataFrame, cat: str) -> pd.DataFrame:
split_notes = self.get_notes_by_type(split.SplitNote)
for_this_cat = split_notes.apply(lambda n: cat in n.parts)
trans_in_df = split_notes.apply(lambda n: n.id in df['id'].values)
df = df.reset_index().set_index('id')
try:
# If the split is for this category, set the Amount equal to the modified value
for n in split_notes[trans_in_df & for_this_cat]:
orig_val = df.loc[n.id, 'Amount']
df.loc[n.id, 'Amount'] = n.parts[cat].modify(orig_val)
# If the split is not for this category, then subtract all the other modified values
for n in split_notes[trans_in_df & ~for_this_cat]:
orig_val = df.loc[n.id, 'Amount']
for target_cat, split_obj in n.parts.items():
df.loc[n.id, 'Amount'] -= split_obj.modify(orig_val)
except Exception as e:
raise
df = df.reset_index().set_index(df.columns[0])
return df
def apply_notes(self, df: pd.DataFrame, cat: str) -> pd.DataFrame:
df = self.apply_linked(df)
df = self.apply_split(df, cat)
return df
def re_parse(self):
self.notes = self.notes.map(lambda n: self.parse_note(n.id, n.note))
@property
def note_text(self) -> pd.Series:
res = self.notes.apply(lambda n: n.note)
res.name = 'note text'
return res
def contains(self, input: str, case: bool = False, text: bool = False) -> pd.Series:
res = self.notes[self.note_text.str.contains(input, case=case)]
if text:
res = res.apply(lambda n: n.note)
return res
@property
def tagged_categories(self) -> pd.Series:
# returns a Series of the unique categories in Category notes
return self.get_notes_by_type(Category).apply(lambda n: n.category).drop_duplicates()
def drop_orphans(self, ids):
orphans = self.notes[~self.notes.index.isin(ids)]
self.notes = self.notes.drop(orphans.index)
LOGGER.debug(f'Dropped {orphans.shape[0]} orphaned messages')
| add_note | identifier_name |
manager.py | import logging
import re
from typing import List
import numpy as np
import pandas as pd
from . import note, split
from .note import Note, Link, Category
from .split import SplitNote
LOGGER = logging.getLogger(__name__)
NOTE_PARSE_REGEX = re.compile('id=\'([\d\w]+)\', note=\'([\d\w :,]+)\'')
class NoteManager:
"""Class to handle higher-level :class:`~budget.Note` manipulation
Attributes
----------
notes : :class:`~pandas.DataFrame`
:class:`~pandas.DataFrame` of the :class:`~budget.Note` objects. `Index` is the :class:`str` ID of the
transaction that each :class:`~budget.Note` is linked to
"""
SQL_NOTE_TABLE = 'notes'
def __init__(self):
self.notes = pd.Series(name='note', dtype='object')
self.logger = logging.getLogger(__name__)
def load_notes(self, con) -> pd.Series:
"""Loads the :class:`~budget.Note` :class:`~pandas.Series` using a connection to a SQL database using
Parameters
----------
con : SQLAlchemy connectable, :class:`str`, or :mod:`sqlite3` connection
SQL connection
Returns
-------
:class:`~pandas.Series`
"""
# Read the whole table of notes
notes = pd.read_sql_query(sql=f'select * from {self.SQL_NOTE_TABLE}', con=con)
self.logger.debug(f'{notes.shape[0]} notes loaded from \'{self.SQL_NOTE_TABLE}\'')
# Set up the index, which will be the ID of the transaction the note is attached to
notes.set_index(notes.columns[0], inplace=True)
try:
# Select only the first column (should only be one)
notes = notes.iloc[:, 0].map(NoteManager.eval_note)
except NameError:
self.logger.debug('No notes loaded')
pass
# Assign to attribute
self.notes = notes
return notes
@staticmethod
def eval_note(input: str) -> note.Note:
"""Evaluates the :func:`repr` string, which reconstructs a :class:`~budget.Note` object
Parameters
----------
input : :class:`str`
:class:`str` produced by the :func:`repr` of that object
Returns
-------
:class:`~budget.Note`
"""
try:
return eval(input)
except (NameError, SyntaxError):
m = NOTE_PARSE_REGEX.search(input)
return NoteManager.parse_note(m.group(1), m.group(2))
@staticmethod
def parse_note(id: str, input: str, add_note_types=None) -> note.Note:
"""Looks for the `tag` of each type of :class:`~budget.Note` in the `input` string, then constructs a new
:class:`~budget.Note` object when it finds one
Parameters
----------
id : :class:`str`
id of the `Note` to create
input : :class:`str`
input :class:`str` to look in
add_note_types :
additional :class:`~budget.Note` types to parse
Returns
-------
:class:`~budget.Note`
"""
note_types = [SplitNote, Link, Category]
if add_note_types is not None:
|
if isinstance(input, str):
for nt in note_types:
try:
if nt._tag in input:
res = nt(id, input)
break
except AttributeError:
raise AttributeError('Notes must have a _tag attribute')
try:
return res
except NameError as e:
# res won't be set if none of the tags match
return Note(id, input)
else:
if isinstance(input, Note):
raise TypeError(f'\'{input}\' is already a {type(input)}')
else:
raise TypeError(f'unknown type of note: {type(input)}')
def validate_notes(self, ids: pd.Series) -> bool:
"""Checks to make sure that all of the :class:`~budget.Note`s are contained in the ids
Parameters
----------
ids : set or like-like
:class:`list` or something that can be used in :meth:`~pandas.Series.isin`
Returns
-------
bool
`True` if all of the `Notes` in the :class:`~budget.notes.NoteManager` are in the given list of IDs
"""
return self.notes.map(lambda n: n.id).isin(ids).all()
def add_note(self, id: str, note: str, drop_dups: bool = True):
"""Parses a string into a :class:`~budget.Note` object and adds it to the :class:`~budget.notes.NoteManager` using
:class:`~pandas.Series.append` and optionally uses :class:`~pandas.Series.drop_duplicates`
Parameters
----------
id : str
id of the transaction to attach the :class:`~budget.Note` to
note : str
input string used to create the :class:`~budget.Note` object
drop_dups : bool
Whether to drop the duplicate notes
"""
n = self.parse_note(id, note)
self.notes = self.notes.append([pd.Series([n], index=[n.id])])
if drop_dups:
self.drop_duplicates()
def drop(self, id: str, note_text: str):
"""Drops a specific :class:`~budget.Note` using its ID and text
Parameters
----------
id : str
id of the note to drop
note_text : str
text of the note to drop
"""
print(f'Dropping note from {id}: {note_text}')
self.notes = self.notes[~self.notes.apply(
lambda n: (n.note == note_text) and (n.id == id)
)]
def drop_duplicates(self):
"""Removes duplicate `Notes` in the :class:`~budget.notes.NoteManager`
"""
self.notes = self.notes[~self.notes.map(repr).duplicated()]
def save_notes(self, con):
self.notes.map(repr).to_sql(name=self.SQL_NOTE_TABLE, con=con, if_exists='replace')
def get_notes_by_id(self, ids: List[str]) -> pd.Series:
"""Gets the notes that match the IDs in the given list
Parameters
----------
ids : List[str]
list of ids to get the notes
Returns
-------
:class:`~pandas.Series`
"""
return self.notes[self.notes.apply(lambda n: n.id in ids)]
def get_notes_by_type(self, typ: type) -> pd.Series:
"""Gets the notes that match the given type
Parameters
----------
typ : type
type of :class:`~budget.Note` to get
Returns
-------
:class:`~pandas.Series`
"""
# doesn't use isinstance() to prevent subtypes from being selected
return self.notes[self.notes.apply(lambda n: type(n) is typ)]
def manual_ids(self, cat: str) -> np.ndarray:
"""Gets ids of transactions that have been manually categorized as the given category
Parameters
----------
cat : str
category of transactions to get IDs for
Returns
-------
:class:`~numpy.ndarray`
"""
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a Category type and have a matching categorization
lambda n: isinstance(n, note.Category) and n.category == cat
)
].apply(lambda n: n.id).values
def split_ids(self, cat: str) -> pd.Series:
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a SplitNote type and have the category in one of its parts
lambda n: isinstance(n, split.SplitNote) and cat in n.parts
)
# convert to the value of the id attribute of each note
].apply(lambda n: n.id)
def linked_ids(self, df: pd.DataFrame) -> np.ndarray:
"""Gets ids of transactions that target those in the given DataFrame
Example
Transactions A and B are both linked to transaction C, which appears in the given DataFrame
Returns a Series of ids that include the ids of A and B
Returns
-------
:class:`~numpy.ndarray`: str
"""
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a Link type and have a target id in the given DataFrame
lambda n: isinstance(n, note.Link) and n.target in df['id'].values
)
# convert to the value of the id attribute of each note
].apply(lambda n: n.id).values
def apply_linked(self, df: pd.DataFrame) -> pd.DataFrame:
"""Applies Link notes in the given DataFrame, adding the value of each linked transaction onto the one it targets
The DataFrame needs to include both the original transactions and the ones linked to them. The values of the linked
transactions will be set to 0 as they are added onto the target transaction
Parameters
----------
df : :class:`~pandas.DataFrame`
transactions to apply the linked notes to
Returns
-------
:class:`~pandas.DataFrame`
:class:`~pandas.DataFrame` of the modified transactions
"""
link_notes = self.get_notes_by_type(note.Link)
source_in_df = link_notes.apply(lambda n: n.id in df['id'].values)
target_in_df = link_notes.apply(lambda n: n.target in df['id'].values)
# assert (target_in_df & ~source_in_df).any()
df = df.reset_index().set_index('id')
try:
# if both source and target exist in the DataFrame, add the source Amount to the target Amount
for n in link_notes[source_in_df & target_in_df]:
df.loc[n.target, 'Amount'] += df.loc[n.id, 'Amount']
# set the values of all source transactions to 0
for n in link_notes[source_in_df]:
df.loc[n.id, 'Amount'] = 0
except Exception as e:
raise
df = df.reset_index().set_index(df.columns[0])
return df
def apply_split(self, df: pd.DataFrame, cat: str) -> pd.DataFrame:
split_notes = self.get_notes_by_type(split.SplitNote)
for_this_cat = split_notes.apply(lambda n: cat in n.parts)
trans_in_df = split_notes.apply(lambda n: n.id in df['id'].values)
df = df.reset_index().set_index('id')
try:
# If the split is for this category, set the Amount equal to the modified value
for n in split_notes[trans_in_df & for_this_cat]:
orig_val = df.loc[n.id, 'Amount']
df.loc[n.id, 'Amount'] = n.parts[cat].modify(orig_val)
# If the split is not for this category, then subtract all the other modified values
for n in split_notes[trans_in_df & ~for_this_cat]:
orig_val = df.loc[n.id, 'Amount']
for target_cat, split_obj in n.parts.items():
df.loc[n.id, 'Amount'] -= split_obj.modify(orig_val)
except Exception as e:
raise
df = df.reset_index().set_index(df.columns[0])
return df
def apply_notes(self, df: pd.DataFrame, cat: str) -> pd.DataFrame:
df = self.apply_linked(df)
df = self.apply_split(df, cat)
return df
def re_parse(self):
self.notes = self.notes.map(lambda n: self.parse_note(n.id, n.note))
@property
def note_text(self) -> pd.Series:
res = self.notes.apply(lambda n: n.note)
res.name = 'note text'
return res
def contains(self, input: str, case: bool = False, text: bool = False) -> pd.Series:
res = self.notes[self.note_text.str.contains(input, case=case)]
if text:
res = res.apply(lambda n: n.note)
return res
@property
def tagged_categories(self) -> pd.Series:
# returns a Series of the unique categories in Category notes
return self.get_notes_by_type(Category).apply(lambda n: n.category).drop_duplicates()
def drop_orphans(self, ids):
orphans = self.notes[~self.notes.index.isin(ids)]
self.notes = self.notes.drop(orphans.index)
LOGGER.debug(f'Dropped {orphans.shape[0]} orphaned messages')
| try:
note_types.append(add_note_types)
except:
note_types.extend(add_note_types) | conditional_block |
manager.py | import logging
import re
from typing import List
import numpy as np
import pandas as pd
from . import note, split
from .note import Note, Link, Category
from .split import SplitNote
LOGGER = logging.getLogger(__name__)
NOTE_PARSE_REGEX = re.compile('id=\'([\d\w]+)\', note=\'([\d\w :,]+)\'')
class NoteManager:
"""Class to handle higher-level :class:`~budget.Note` manipulation
Attributes
----------
notes : :class:`~pandas.DataFrame`
:class:`~pandas.DataFrame` of the :class:`~budget.Note` objects. `Index` is the :class:`str` ID of the
transaction that each :class:`~budget.Note` is linked to
"""
SQL_NOTE_TABLE = 'notes'
def __init__(self):
self.notes = pd.Series(name='note', dtype='object')
self.logger = logging.getLogger(__name__)
def load_notes(self, con) -> pd.Series:
"""Loads the :class:`~budget.Note` :class:`~pandas.Series` using a connection to a SQL database using
Parameters
----------
con : SQLAlchemy connectable, :class:`str`, or :mod:`sqlite3` connection
SQL connection
Returns
-------
:class:`~pandas.Series`
"""
# Read the whole table of notes
notes = pd.read_sql_query(sql=f'select * from {self.SQL_NOTE_TABLE}', con=con)
self.logger.debug(f'{notes.shape[0]} notes loaded from \'{self.SQL_NOTE_TABLE}\'')
# Set up the index, which will be the ID of the transaction the note is attached to
notes.set_index(notes.columns[0], inplace=True)
try:
# Select only the first column (should only be one)
notes = notes.iloc[:, 0].map(NoteManager.eval_note)
except NameError:
self.logger.debug('No notes loaded')
pass
# Assign to attribute
self.notes = notes
return notes
@staticmethod
def eval_note(input: str) -> note.Note:
"""Evaluates the :func:`repr` string, which reconstructs a :class:`~budget.Note` object
Parameters
----------
input : :class:`str`
:class:`str` produced by the :func:`repr` of that object
Returns
-------
:class:`~budget.Note`
"""
try:
return eval(input)
except (NameError, SyntaxError):
m = NOTE_PARSE_REGEX.search(input)
return NoteManager.parse_note(m.group(1), m.group(2))
@staticmethod
def parse_note(id: str, input: str, add_note_types=None) -> note.Note:
"""Looks for the `tag` of each type of :class:`~budget.Note` in the `input` string, then constructs a new
:class:`~budget.Note` object when it finds one
Parameters
----------
id : :class:`str`
id of the `Note` to create
input : :class:`str`
input :class:`str` to look in
add_note_types :
additional :class:`~budget.Note` types to parse
Returns
-------
:class:`~budget.Note`
"""
note_types = [SplitNote, Link, Category]
if add_note_types is not None:
try:
note_types.append(add_note_types)
except:
note_types.extend(add_note_types)
if isinstance(input, str):
for nt in note_types:
try:
if nt._tag in input:
res = nt(id, input)
break
except AttributeError:
raise AttributeError('Notes must have a _tag attribute')
try:
return res
except NameError as e:
# res won't be set if none of the tags match
return Note(id, input)
else:
if isinstance(input, Note):
raise TypeError(f'\'{input}\' is already a {type(input)}')
else: | def validate_notes(self, ids: pd.Series) -> bool:
"""Checks to make sure that all of the :class:`~budget.Note`s are contained in the ids
Parameters
----------
ids : set or like-like
:class:`list` or something that can be used in :meth:`~pandas.Series.isin`
Returns
-------
bool
`True` if all of the `Notes` in the :class:`~budget.notes.NoteManager` are in the given list of IDs
"""
return self.notes.map(lambda n: n.id).isin(ids).all()
def add_note(self, id: str, note: str, drop_dups: bool = True):
"""Parses a string into a :class:`~budget.Note` object and adds it to the :class:`~budget.notes.NoteManager` using
:class:`~pandas.Series.append` and optionally uses :class:`~pandas.Series.drop_duplicates`
Parameters
----------
id : str
id of the transaction to attach the :class:`~budget.Note` to
note : str
input string used to create the :class:`~budget.Note` object
drop_dups : bool
Whether to drop the duplicate notes
"""
n = self.parse_note(id, note)
self.notes = self.notes.append([pd.Series([n], index=[n.id])])
if drop_dups:
self.drop_duplicates()
def drop(self, id: str, note_text: str):
"""Drops a specific :class:`~budget.Note` using its ID and text
Parameters
----------
id : str
id of the note to drop
note_text : str
text of the note to drop
"""
print(f'Dropping note from {id}: {note_text}')
self.notes = self.notes[~self.notes.apply(
lambda n: (n.note == note_text) and (n.id == id)
)]
def drop_duplicates(self):
"""Removes duplicate `Notes` in the :class:`~budget.notes.NoteManager`
"""
self.notes = self.notes[~self.notes.map(repr).duplicated()]
def save_notes(self, con):
self.notes.map(repr).to_sql(name=self.SQL_NOTE_TABLE, con=con, if_exists='replace')
def get_notes_by_id(self, ids: List[str]) -> pd.Series:
"""Gets the notes that match the IDs in the given list
Parameters
----------
ids : List[str]
list of ids to get the notes
Returns
-------
:class:`~pandas.Series`
"""
return self.notes[self.notes.apply(lambda n: n.id in ids)]
def get_notes_by_type(self, typ: type) -> pd.Series:
"""Gets the notes that match the given type
Parameters
----------
typ : type
type of :class:`~budget.Note` to get
Returns
-------
:class:`~pandas.Series`
"""
# doesn't use isinstance() to prevent subtypes from being selected
return self.notes[self.notes.apply(lambda n: type(n) is typ)]
def manual_ids(self, cat: str) -> np.ndarray:
"""Gets ids of transactions that have been manually categorized as the given category
Parameters
----------
cat : str
category of transactions to get IDs for
Returns
-------
:class:`~numpy.ndarray`
"""
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a Category type and have a matching categorization
lambda n: isinstance(n, note.Category) and n.category == cat
)
].apply(lambda n: n.id).values
def split_ids(self, cat: str) -> pd.Series:
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a SplitNote type and have the category in one of its parts
lambda n: isinstance(n, split.SplitNote) and cat in n.parts
)
# convert to the value of the id attribute of each note
].apply(lambda n: n.id)
def linked_ids(self, df: pd.DataFrame) -> np.ndarray:
"""Gets ids of transactions that target those in the given DataFrame
Example
Transactions A and B are both linked to transaction C, which appears in the given DataFrame
Returns a Series of ids that include the ids of A and B
Returns
-------
:class:`~numpy.ndarray`: str
"""
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a Link type and have a target id in the given DataFrame
lambda n: isinstance(n, note.Link) and n.target in df['id'].values
)
# convert to the value of the id attribute of each note
].apply(lambda n: n.id).values
def apply_linked(self, df: pd.DataFrame) -> pd.DataFrame:
"""Applies Link notes in the given DataFrame, adding the value of each linked transaction onto the one it targets
The DataFrame needs to include both the original transactions and the ones linked to them. The values of the linked
transactions will be set to 0 as they are added onto the target transaction
Parameters
----------
df : :class:`~pandas.DataFrame`
transactions to apply the linked notes to
Returns
-------
:class:`~pandas.DataFrame`
:class:`~pandas.DataFrame` of the modified transactions
"""
link_notes = self.get_notes_by_type(note.Link)
source_in_df = link_notes.apply(lambda n: n.id in df['id'].values)
target_in_df = link_notes.apply(lambda n: n.target in df['id'].values)
# assert (target_in_df & ~source_in_df).any()
df = df.reset_index().set_index('id')
try:
# if both source and target exist in the DataFrame, add the source Amount to the target Amount
for n in link_notes[source_in_df & target_in_df]:
df.loc[n.target, 'Amount'] += df.loc[n.id, 'Amount']
# set the values of all source transactions to 0
for n in link_notes[source_in_df]:
df.loc[n.id, 'Amount'] = 0
except Exception as e:
raise
df = df.reset_index().set_index(df.columns[0])
return df
def apply_split(self, df: pd.DataFrame, cat: str) -> pd.DataFrame:
split_notes = self.get_notes_by_type(split.SplitNote)
for_this_cat = split_notes.apply(lambda n: cat in n.parts)
trans_in_df = split_notes.apply(lambda n: n.id in df['id'].values)
df = df.reset_index().set_index('id')
try:
# If the split is for this category, set the Amount equal to the modified value
for n in split_notes[trans_in_df & for_this_cat]:
orig_val = df.loc[n.id, 'Amount']
df.loc[n.id, 'Amount'] = n.parts[cat].modify(orig_val)
# If the split is not for this category, then subtract all the other modified values
for n in split_notes[trans_in_df & ~for_this_cat]:
orig_val = df.loc[n.id, 'Amount']
for target_cat, split_obj in n.parts.items():
df.loc[n.id, 'Amount'] -= split_obj.modify(orig_val)
except Exception as e:
raise
df = df.reset_index().set_index(df.columns[0])
return df
def apply_notes(self, df: pd.DataFrame, cat: str) -> pd.DataFrame:
df = self.apply_linked(df)
df = self.apply_split(df, cat)
return df
def re_parse(self):
self.notes = self.notes.map(lambda n: self.parse_note(n.id, n.note))
@property
def note_text(self) -> pd.Series:
res = self.notes.apply(lambda n: n.note)
res.name = 'note text'
return res
def contains(self, input: str, case: bool = False, text: bool = False) -> pd.Series:
res = self.notes[self.note_text.str.contains(input, case=case)]
if text:
res = res.apply(lambda n: n.note)
return res
@property
def tagged_categories(self) -> pd.Series:
# returns a Series of the unique categories in Category notes
return self.get_notes_by_type(Category).apply(lambda n: n.category).drop_duplicates()
def drop_orphans(self, ids):
orphans = self.notes[~self.notes.index.isin(ids)]
self.notes = self.notes.drop(orphans.index)
LOGGER.debug(f'Dropped {orphans.shape[0]} orphaned messages') | raise TypeError(f'unknown type of note: {type(input)}')
| random_line_split |
manager.py | import logging
import re
from typing import List
import numpy as np
import pandas as pd
from . import note, split
from .note import Note, Link, Category
from .split import SplitNote
LOGGER = logging.getLogger(__name__)
NOTE_PARSE_REGEX = re.compile('id=\'([\d\w]+)\', note=\'([\d\w :,]+)\'')
class NoteManager:
| """Class to handle higher-level :class:`~budget.Note` manipulation
Attributes
----------
notes : :class:`~pandas.DataFrame`
:class:`~pandas.DataFrame` of the :class:`~budget.Note` objects. `Index` is the :class:`str` ID of the
transaction that each :class:`~budget.Note` is linked to
"""
SQL_NOTE_TABLE = 'notes'
def __init__(self):
self.notes = pd.Series(name='note', dtype='object')
self.logger = logging.getLogger(__name__)
def load_notes(self, con) -> pd.Series:
"""Loads the :class:`~budget.Note` :class:`~pandas.Series` using a connection to a SQL database using
Parameters
----------
con : SQLAlchemy connectable, :class:`str`, or :mod:`sqlite3` connection
SQL connection
Returns
-------
:class:`~pandas.Series`
"""
# Read the whole table of notes
notes = pd.read_sql_query(sql=f'select * from {self.SQL_NOTE_TABLE}', con=con)
self.logger.debug(f'{notes.shape[0]} notes loaded from \'{self.SQL_NOTE_TABLE}\'')
# Set up the index, which will be the ID of the transaction the note is attached to
notes.set_index(notes.columns[0], inplace=True)
try:
# Select only the first column (should only be one)
notes = notes.iloc[:, 0].map(NoteManager.eval_note)
except NameError:
self.logger.debug('No notes loaded')
pass
# Assign to attribute
self.notes = notes
return notes
@staticmethod
def eval_note(input: str) -> note.Note:
"""Evaluates the :func:`repr` string, which reconstructs a :class:`~budget.Note` object
Parameters
----------
input : :class:`str`
:class:`str` produced by the :func:`repr` of that object
Returns
-------
:class:`~budget.Note`
"""
try:
return eval(input)
except (NameError, SyntaxError):
m = NOTE_PARSE_REGEX.search(input)
return NoteManager.parse_note(m.group(1), m.group(2))
@staticmethod
def parse_note(id: str, input: str, add_note_types=None) -> note.Note:
"""Looks for the `tag` of each type of :class:`~budget.Note` in the `input` string, then constructs a new
:class:`~budget.Note` object when it finds one
Parameters
----------
id : :class:`str`
id of the `Note` to create
input : :class:`str`
input :class:`str` to look in
add_note_types :
additional :class:`~budget.Note` types to parse
Returns
-------
:class:`~budget.Note`
"""
note_types = [SplitNote, Link, Category]
if add_note_types is not None:
try:
note_types.append(add_note_types)
except:
note_types.extend(add_note_types)
if isinstance(input, str):
for nt in note_types:
try:
if nt._tag in input:
res = nt(id, input)
break
except AttributeError:
raise AttributeError('Notes must have a _tag attribute')
try:
return res
except NameError as e:
# res won't be set if none of the tags match
return Note(id, input)
else:
if isinstance(input, Note):
raise TypeError(f'\'{input}\' is already a {type(input)}')
else:
raise TypeError(f'unknown type of note: {type(input)}')
def validate_notes(self, ids: pd.Series) -> bool:
"""Checks to make sure that all of the :class:`~budget.Note`s are contained in the ids
Parameters
----------
ids : set or like-like
:class:`list` or something that can be used in :meth:`~pandas.Series.isin`
Returns
-------
bool
`True` if all of the `Notes` in the :class:`~budget.notes.NoteManager` are in the given list of IDs
"""
return self.notes.map(lambda n: n.id).isin(ids).all()
def add_note(self, id: str, note: str, drop_dups: bool = True):
"""Parses a string into a :class:`~budget.Note` object and adds it to the :class:`~budget.notes.NoteManager` using
:class:`~pandas.Series.append` and optionally uses :class:`~pandas.Series.drop_duplicates`
Parameters
----------
id : str
id of the transaction to attach the :class:`~budget.Note` to
note : str
input string used to create the :class:`~budget.Note` object
drop_dups : bool
Whether to drop the duplicate notes
"""
n = self.parse_note(id, note)
self.notes = self.notes.append([pd.Series([n], index=[n.id])])
if drop_dups:
self.drop_duplicates()
def drop(self, id: str, note_text: str):
"""Drops a specific :class:`~budget.Note` using its ID and text
Parameters
----------
id : str
id of the note to drop
note_text : str
text of the note to drop
"""
print(f'Dropping note from {id}: {note_text}')
self.notes = self.notes[~self.notes.apply(
lambda n: (n.note == note_text) and (n.id == id)
)]
def drop_duplicates(self):
"""Removes duplicate `Notes` in the :class:`~budget.notes.NoteManager`
"""
self.notes = self.notes[~self.notes.map(repr).duplicated()]
def save_notes(self, con):
self.notes.map(repr).to_sql(name=self.SQL_NOTE_TABLE, con=con, if_exists='replace')
def get_notes_by_id(self, ids: List[str]) -> pd.Series:
"""Gets the notes that match the IDs in the given list
Parameters
----------
ids : List[str]
list of ids to get the notes
Returns
-------
:class:`~pandas.Series`
"""
return self.notes[self.notes.apply(lambda n: n.id in ids)]
def get_notes_by_type(self, typ: type) -> pd.Series:
"""Gets the notes that match the given type
Parameters
----------
typ : type
type of :class:`~budget.Note` to get
Returns
-------
:class:`~pandas.Series`
"""
# doesn't use isinstance() to prevent subtypes from being selected
return self.notes[self.notes.apply(lambda n: type(n) is typ)]
def manual_ids(self, cat: str) -> np.ndarray:
"""Gets ids of transactions that have been manually categorized as the given category
Parameters
----------
cat : str
category of transactions to get IDs for
Returns
-------
:class:`~numpy.ndarray`
"""
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a Category type and have a matching categorization
lambda n: isinstance(n, note.Category) and n.category == cat
)
].apply(lambda n: n.id).values
def split_ids(self, cat: str) -> pd.Series:
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a SplitNote type and have the category in one of its parts
lambda n: isinstance(n, split.SplitNote) and cat in n.parts
)
# convert to the value of the id attribute of each note
].apply(lambda n: n.id)
def linked_ids(self, df: pd.DataFrame) -> np.ndarray:
"""Gets ids of transactions that target those in the given DataFrame
Example
Transactions A and B are both linked to transaction C, which appears in the given DataFrame
Returns a Series of ids that include the ids of A and B
Returns
-------
:class:`~numpy.ndarray`: str
"""
return self.notes[
# select from notes
self.notes.apply(
# the ones which are both a Link type and have a target id in the given DataFrame
lambda n: isinstance(n, note.Link) and n.target in df['id'].values
)
# convert to the value of the id attribute of each note
].apply(lambda n: n.id).values
def apply_linked(self, df: pd.DataFrame) -> pd.DataFrame:
"""Applies Link notes in the given DataFrame, adding the value of each linked transaction onto the one it targets
The DataFrame needs to include both the original transactions and the ones linked to them. The values of the linked
transactions will be set to 0 as they are added onto the target transaction
Parameters
----------
df : :class:`~pandas.DataFrame`
transactions to apply the linked notes to
Returns
-------
:class:`~pandas.DataFrame`
:class:`~pandas.DataFrame` of the modified transactions
"""
link_notes = self.get_notes_by_type(note.Link)
source_in_df = link_notes.apply(lambda n: n.id in df['id'].values)
target_in_df = link_notes.apply(lambda n: n.target in df['id'].values)
# assert (target_in_df & ~source_in_df).any()
df = df.reset_index().set_index('id')
try:
# if both source and target exist in the DataFrame, add the source Amount to the target Amount
for n in link_notes[source_in_df & target_in_df]:
df.loc[n.target, 'Amount'] += df.loc[n.id, 'Amount']
# set the values of all source transactions to 0
for n in link_notes[source_in_df]:
df.loc[n.id, 'Amount'] = 0
except Exception as e:
raise
df = df.reset_index().set_index(df.columns[0])
return df
def apply_split(self, df: pd.DataFrame, cat: str) -> pd.DataFrame:
split_notes = self.get_notes_by_type(split.SplitNote)
for_this_cat = split_notes.apply(lambda n: cat in n.parts)
trans_in_df = split_notes.apply(lambda n: n.id in df['id'].values)
df = df.reset_index().set_index('id')
try:
# If the split is for this category, set the Amount equal to the modified value
for n in split_notes[trans_in_df & for_this_cat]:
orig_val = df.loc[n.id, 'Amount']
df.loc[n.id, 'Amount'] = n.parts[cat].modify(orig_val)
# If the split is not for this category, then subtract all the other modified values
for n in split_notes[trans_in_df & ~for_this_cat]:
orig_val = df.loc[n.id, 'Amount']
for target_cat, split_obj in n.parts.items():
df.loc[n.id, 'Amount'] -= split_obj.modify(orig_val)
except Exception as e:
raise
df = df.reset_index().set_index(df.columns[0])
return df
def apply_notes(self, df: pd.DataFrame, cat: str) -> pd.DataFrame:
df = self.apply_linked(df)
df = self.apply_split(df, cat)
return df
def re_parse(self):
self.notes = self.notes.map(lambda n: self.parse_note(n.id, n.note))
@property
def note_text(self) -> pd.Series:
res = self.notes.apply(lambda n: n.note)
res.name = 'note text'
return res
def contains(self, input: str, case: bool = False, text: bool = False) -> pd.Series:
res = self.notes[self.note_text.str.contains(input, case=case)]
if text:
res = res.apply(lambda n: n.note)
return res
@property
def tagged_categories(self) -> pd.Series:
# returns a Series of the unique categories in Category notes
return self.get_notes_by_type(Category).apply(lambda n: n.category).drop_duplicates()
def drop_orphans(self, ids):
orphans = self.notes[~self.notes.index.isin(ids)]
self.notes = self.notes.drop(orphans.index)
LOGGER.debug(f'Dropped {orphans.shape[0]} orphaned messages') | identifier_body | |
payment.component.ts | import { Component, OnInit, Input, Output, ViewEncapsulation, EventEmitter, NgZone } from '@angular/core';
import { Http, Response, RequestOptions, Headers } from '@angular/http';
import { Router, ActivatedRoute } from '@angular/router';
import { AppService } from '../app-service.service';
import { AlertService } from '../alert.service';
import { LoaderService } from '../loader.service';
import { LoggerService } from '../logger.service';
import { environment } from '../../environments/environment';
import { NotificationsService } from 'angular2-notifications';
import { Title } from '@angular/platform-browser';
declare var Razorpay: any;
declare var $: any;
@Component({
selector: 'app-payment',
templateUrl: './payment.component.html',
styleUrls: ['./payment.component.scss'],
encapsulation: ViewEncapsulation.None
})
export class PaymentComponent implements OnInit {
// @Input() tab: string;
// @Input() user;
@Output() renewalDateUpdated: EventEmitter<any> = new EventEmitter();
@Output() planTypeUpdated: EventEmitter<any> = new EventEmitter();
httpOptions: RequestOptions;
session: any;
billingAmount: any;
invoices: any;
environment: any;
subscription: any;
isPlanActive: Boolean;
payments: any;
selectedDate: any;
selectedCurrency: string;
settingsTab: string;
paymentMethod: string;
step1: any;
activeAccordion: string;
durations:Array<any>;
planDuration:string;
currentInvoice = null;
constructor(private http: Http, private router: Router, private route: ActivatedRoute,
private _appService: AppService, private loadingService: LoaderService,
private alertService: AlertService, private notify: NotificationsService,
private loggerService: LoggerService, private ngZone: NgZone, private titleService: Title) { }
ngOnInit() {
this.titleService.setTitle('Payments and Subscription | OCLAVI');
this.loadingService.show('Getting your payment details...');
this.environment = environment;
this.httpOptions = new RequestOptions({ withCredentials: true });
this.session = JSON.parse(localStorage.getItem('user'));
this.selectedCurrency = 'USD';
this.paymentMethod = 'razorpay';
this.isPlanActive = true;
if (this.session.USER_TYPE == environment.USER_TYPE.TEAM.NAME) {
this.router.navigate(['profile']);
}
if (this.session.USER_TYPE == environment.USER_TYPE.ADMIN.NAME || this.session.USER_TYPE == environment.USER_TYPE.SELF.NAME) {
if (this.session.PLAN_END_DATE < (new Date().getTime())) {
this.alertService.show('error', 'Your subscription has expired. Please upgrade');
this.isPlanActive = false;
}
}
this.subscription = {
startDate: '',
nextDueDate: '',
subscriptionPlan: '',
lastPaymentStatus: 'Successfull'
}
this.durations = [{
durationLabel: '1 Month',
durationValue: 1,
}, {
durationLabel: '3 Months',
durationValue: 3,
}, {
durationLabel: '6 Months',
durationValue: 6,
}, {
durationLabel: '1 Year',
durationValue: 12,
}]
this.step1 = {
step: 1,
title: 'upgrage_plan_payment',
content: `Payments Section`,
class: '',
status: 'inactive',
methods: [{
name: 'razorpay',
description: 'For Indian Credit / Debit Cards'
}, {
name: 'paypal',
description: 'For international cards'
}],
location: '',
selectedMethod: 'razorpay'
};
this.http.get(environment.oclaviServer + 'subscriptionDetails', this.httpOptions).subscribe(res => {
this.payments = res.json();
this.loadingService.hide();
}, err => {
this.errorHandler(err, 'Error feteching subscription details.');
});
}
durationChanged(duration) {
this.planDuration = duration.durationValue;
}
cancelSubscription() {
this.alertService.show('warn', 'Your active subscription would be cancelled.<br /><br />Are you sure you want to cancel your subscription?');
this.alertService.positiveCallback = (() => {
this.alertService.hide();
this.loadingService.show('Cancelling your subscription details...');
this.session = JSON.parse(localStorage.getItem('user'));
this.http.post(environment.oclaviServer + 'cancelSubscription', { purpose: 'CANCEL_SUBSCRIPTION' }, this.httpOptions).subscribe(res => {
this.notify.success('Your subscription has been successfully cancelled.');
if (this.session.USER_TYPE == environment.USER_TYPE.ADMIN.NAME)
this.session.USER_TYPE = environment.USER_TYPE.STUDENT_ADMIN.NAME;
else if (this.session.USER_TYPE == environment.USER_TYPE.SELF.NAME)
this.session.USER_TYPE = environment.USER_TYPE.STUDENT_SELF.NAME;
this.session.STATUS = 'PENDING_FOR_CANCELLATION';
localStorage.setItem('user', JSON.stringify(this.session));
this.planTypeUpdated.emit(this.session.USER_TYPE);
this.loadingService.hide();
}, err => {
this.errorHandler(err, 'Error cancelling your subscription.');
});
});
}
openModal(id) {
for (let i = 1; i <= 1; i++) {
if (i === 1) {
this['step' + i].status = 'active';
this['step' + i].class = 'show';
this.activeAccordion = this['step' + i].title.replace(/_/g, ' ');
} else {
this['step' + i].status = 'inactive';
this['step' + i].class = '';
}
}
$('#' + id).modal(open);
}
payNow(vm, paymentMethod) {
$('#upgradePaymentModal').modal('hide');
if (paymentMethod == 'paypal')
vm.paypalCheckout(vm);
else if (paymentMethod == 'razorpay')
vm.razorpayCheckout(vm);
}
changeSettingsTab(tab) {
this.settingsTab = tab;
}
showPaymentModal() {
if(!this.planDuration || this.planDuration == '') {
this.notify.error('Please select plan duration.');
return;
}
this.loadingService.show('Loading payment information. Please wait...');
this.http.post(environment.oclaviServer + 'getBillingAmount', { type: 'upgrade', planDuration: this.planDuration }, this.httpOptions).subscribe(res => {
this.loadingService.hide();
this.openModal('upgradePaymentModal');
this.billingAmount = res.json();
this.billingAmount.planEndDate += ((new Date()).getTimezoneOffset() * 60 * 1000);
}, err => {
this.errorHandler(err, 'Error upgrading your subscription.');
});
}
razorpayCheckout(vm) {
vm.http.post(environment.oclaviServer + 'razorpay/getModalData', { type: 'upgrade', planDuration: this.planDuration }, vm.httpOptions).subscribe(res => {
vm.loadingService.hide();
let data = res.json();
if(data.AMOUNT < 100)
data.AMOUNT = 100;
var options = {
key: data.KEY,
name: data.MERCHANT_NAME,
amount: data.AMOUNT,
description: data.DESCRIPTION,
image: '../assets/images/Oclavi_Logo@2x.png',
prefill: {
name: data.EMAIL_ID,
email: data.EMAIL_ID | },
handler: (response) => {
vm.ngZone.run(() => {
data.PAYMENT_ID = response.razorpay_payment_id;
data.PAYMENT_SOURCE = 'RAZOR_PAY';
data.PLAN_START_DATE = vm.billingAmount.planStartDate;
data.PLAN_END_DATE = vm.billingAmount.planEndDate;
vm.http.post(environment.oclaviServer + 'upgradePlan', data, vm.httpOptions).subscribe(res => {
vm.router.navigate(['/payment-status/razorpay/success/upgrade'], {
queryParams: {
razorpay_payment_id: response.razorpay_payment_id
}
});
});
});
}
}
var razorpay = new Razorpay(options);
razorpay.open();
}, err => {
vm.errorHandler(err, 'Error upgrading your subscription.');
});
}
paymentMethodChanged($event) {
this.paymentMethod = $event.target.value;
if (this.paymentMethod == 'paypal')
this.selectedCurrency = 'USD';
else if (this.paymentMethod == 'razorpay')
this.selectedCurrency = 'INR';
}
paypalCheckout(vm) {
vm.loadingService.show('Creating your paypal transaction...');
vm.http.post(environment.oclaviServer + 'upgradePlan', { PAYMENT_SOURCE: 'PAYPAL', planDuration: this.planDuration }, vm.httpOptions).subscribe(res => {
vm.loadingService.show('Redirecting to payment page...');
var body = res.json();
window.location.href = body.approval_url;
}, err => {
vm.errorHandler(err, 'Error while buying more seats...');
});
}
selectInvoiceDate(newSelectedDate) {
this.selectedDate = newSelectedDate;
}
errorHandler(response, message) {
this.loadingService.hide();
if (response.status == 401) {
this.router.navigate(['login']);
localStorage.removeItem('user');
}
else {
var text = JSON.parse(response._body).message;
if (!text || text == '')
this.notify.error(null, message);
else
this.notify.error(null, text);
}
}
setCurrentInvoice (index) {
this.currentInvoice = this.payments[index];
}
} | },
theme: {
color: '#3D78E0' | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.