file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
response.rs | // rust imports
use std::io::Read;
use std::ffi::OsStr;
use std::path::{PathBuf, Path};
use std::fs::{self, File};
use std::collections::HashMap;
// 3rd-party imports
use rusqlite::Connection;
use rusqlite::types::ToSql;
use hyper::http::h1::HttpReader;
use hyper::buffer::BufReader;
use hyper::net::NetworkStream;
use hyper::header::{Headers, ContentType};
use hyper::mime::{Mime, TopLevel, SubLevel};
use multipart::server::{Multipart, Entries, SaveResult};
use url::percent_encoding::percent_decode;
use mime_types;
use csv;
use chrono::naive::date::NaiveDate;
use chrono::Datelike;
use serde::ser::Serialize;
use serde_json;
// local imports
use route::{Route, HumanError, APIError};
use database::Database;
// statics
lazy_static! {
static ref MIME_TYPES: mime_types::Types = mime_types::Types::new().unwrap();
}
// enums
pub enum Component {
Home,
NotFound,
}
#[derive(Serialize, Debug)]
pub struct JSONResponse {
pub error: Option<String>,
pub payload: Option<serde_json::Value>,
}
impl JSONResponse {
fn error(reason: Option<String>) -> Self {
JSONResponse {
error: reason,
payload: None,
}
}
fn payload<T: Serialize>(value: T) -> Self {
use serde_json::to_value;
JSONResponse {
error: None,
payload: Some(to_value(value).unwrap()),
}
}
}
pub enum AppResponse {
Component(Component),
Asset(ContentType, Vec<u8> /* content */),
MethodNotAllowed,
NotFound,
BadRequest,
InternalServerError,
JSONResponse(JSONResponse),
}
impl AppResponse {
pub fn process(db_conn: Database,
route: Route,
headers: Headers,
http_reader: HttpReader<&mut BufReader<&mut NetworkStream>>)
-> Self {
match route {
Route::Home => AppResponse::Component(Component::Home),
Route::FileUpload => handle_file_upload(db_conn, headers, http_reader),
Route::Asset(path_to_asset) => handle_asset(path_to_asset),
Route::HumanError(human_error) => {
match human_error {
HumanError::NotFound => AppResponse::Component(Component::NotFound),
}
}
Route::APIError(api_error) => {
match api_error {
APIError::MethodNotAllowed => AppResponse::MethodNotAllowed,
APIError::NotFound => AppResponse::NotFound,
}
}
}
}
}
fn handle_asset(path_to_asset: String) -> AppResponse {
#[inline]
fn decode_percents(string: &OsStr) -> String {
let string = format!("{}", string.to_string_lossy());
format!("{}", percent_decode(string.as_bytes()).decode_utf8_lossy())
}
// TODO: inlined resources here
// URL decode
let decoded_req_path = Path::new(&path_to_asset).iter().map(decode_percents);
let starts_with = match Path::new("./assets/").to_path_buf().canonicalize() {
Err(_) => {
return AppResponse::Component(Component::NotFound);
}
Ok(x) => x,
};
let mut req_path = starts_with.clone();
req_path.extend(decoded_req_path);
let req_path: PathBuf = req_path;
// TODO: this is a security bottle-neck
let req_path = match req_path.canonicalize() {
Err(_) => {
return AppResponse::Component(Component::NotFound);
}
Ok(req_path) => {
if !req_path.starts_with(starts_with.as_path()) {
return AppResponse::Component(Component::NotFound);
}
req_path
}
};
match fs::metadata(&req_path) {
Ok(metadata) => {
if !metadata.is_file() {
return AppResponse::Component(Component::NotFound);
}
// TODO: better way?
let path_str = format!("{}", &req_path.to_string_lossy());
// Set the content type based on the file extension
let mime_str = MIME_TYPES.mime_for_path(req_path.as_path());
let mut content_type = ContentType(Mime(TopLevel::Application, SubLevel::Json, vec![]));
let _ = mime_str.parse().map(|mime: Mime| {
content_type = ContentType(mime);
});
let mut file = File::open(req_path)
.ok()
.expect(&format!("No such file: {:?}", path_str));
let mut content = Vec::new();
file.read_to_end(&mut content).unwrap();
return AppResponse::Asset(content_type, content);
}
Err(_err) => {
return AppResponse::Component(Component::NotFound);
}
}
}
fn handle_file_upload(db_conn: Database,
headers: Headers,
http_reader: HttpReader<&mut BufReader<&mut NetworkStream>>)
-> AppResponse {
match process_multipart(headers, http_reader) {
None => AppResponse::BadRequest,
Some(mut multipart) => {
match multipart.save().temp() {
SaveResult::Full(entries) => process_entries(db_conn, entries),
SaveResult::Partial(_entries, error) => {
println!("Errors saving multipart:\n{:?}", error);
// TODO: fix
// process_entries(entries.into())
AppResponse::BadRequest
}
SaveResult::Error(error) => {
println!("Errors saving multipart:\n{:?}", error);
// Err(error)
AppResponse::BadRequest
}
}
}
}
}
fn process_entries(db_conn: Database, entries: Entries) -> AppResponse {
let files = match entries.files.get("uploads[]") {
Some(files) => {
if files.len() <= 0 {
return AppResponse::BadRequest;
}
files
}
None => {
return AppResponse::BadRequest;
}
};
let mut expense_tracker = ExpenseTracker::new();
let mut records = vec![];
for file in files {
let mut reader = match csv::Reader::from_file(file.path.clone()) {
Ok(reader) => reader.has_headers(true),
Err(error) => {
// TODO: error
println!("error: {}", error);
return AppResponse::InternalServerError;
}
};
for record in reader.decode() {
let (date,
category,
employee_name,
employee_address,
expense_description,
pre_tax_amount,
tax_name,
tax_amount): (String,
String,
String,
String,
String,
String,
String,
String) = match record {
Ok(x) => x,
Err(_) => {
return AppResponse::BadRequest;
}
};
let pre_tax_amount: f64 = {
let pre_tax_amount = pre_tax_amount.trim().replace(",", "");
match pre_tax_amount.parse::<f64>() {
Ok(x) => x,
Err(_) => {
return AppResponse::BadRequest;
}
}
};
let tax_amount: f64 = {
let tax_amount = tax_amount.trim().replace(",", "");
match tax_amount.parse::<f64>() {
Ok(x) => x,
Err(_) => {
return AppResponse::BadRequest;
}
}
};
let new_date = match NaiveDate::parse_from_str(&date, "%_m/%e/%Y") {
Ok(x) => x,
Err(_) => {
return AppResponse::BadRequest;
}
};
let record = Record(date,
category,
employee_name,
employee_address,
expense_description,
pre_tax_amount,
tax_name,
tax_amount);
records.push(record);
expense_tracker.add(new_date, pre_tax_amount + tax_amount);
}
}
add_to_database(db_conn, records);
return AppResponse::JSONResponse(JSONResponse::payload(expense_tracker));
}
fn add_to_database(db_connnection: Database, records: Vec<Record>) {
for record in records {
let Record(date,
category,
employee_name,
employee_address,
expense_description,
pre_tax_amount,
tax_name,
tax_amount) = record;
let query = format!("
INSERT INTO ExpenseHistory(date, category, \
employee_name, employee_address, expense_description, \
pre_tax_amount, tax_name, tax_amount)
VALUES (:date, \
:category, :employee_name, :employee_address, :expense_description, \
:pre_tax_amount, :tax_name, :tax_amount);
");
let params: &[(&str, &ToSql)] = &[(":date", &date),
(":category", &category),
(":employee_name", &employee_name),
(":employee_address", &employee_address),
(":expense_description", &expense_description),
(":pre_tax_amount", &pre_tax_amount),
(":tax_name", &tax_name),
(":tax_amount", &tax_amount)];
db_write_lock!(db_conn; db_connnection.clone());
let db_conn: &Connection = db_conn;
match db_conn.execute_named(&query, params) {
Err(sqlite_error) => {
panic!("{:?}", sqlite_error);
}
_ => {
/* query sucessfully executed */ | }
}
fn process_multipart<R: Read>(headers: Headers, http_reader: R) -> Option<Multipart<R>> {
let boundary = headers.get::<ContentType>().and_then(|ct| {
use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};
let ContentType(ref mime) = *ct;
let params = match *mime {
Mime(TopLevel::Multipart, SubLevel::FormData, ref params) => params,
_ => return None,
};
params.iter()
.find(|&&(ref name, _)| match *name {
Attr::Boundary => true,
_ => false,
})
.and_then(|&(_, ref val)| match *val {
Value::Ext(ref val) => Some(&**val),
_ => None,
})
});
match boundary.map(String::from) {
Some(boundary) => Some(Multipart::with_body(http_reader, boundary)),
None => None,
}
}
#[derive(Eq, PartialEq, Hash, Serialize)]
enum Month {
January,
February,
March,
April,
May,
June,
July,
August,
September,
October,
November,
December,
}
#[derive(Serialize)]
struct ExpenseTracker(HashMap<Month, f64>);
impl ExpenseTracker {
fn new() -> Self {
ExpenseTracker(HashMap::new())
}
fn add(&mut self, date: NaiveDate, expenses: f64) {
let month = match date.month() {
1 => Month::January,
2 => Month::February,
3 => Month::March,
4 => Month::April,
5 => Month::May,
6 => Month::June,
7 => Month::July,
8 => Month::August,
9 => Month::September,
10 => Month::October,
11 => Month::November,
12 => Month::December,
_ => unreachable!(),
};
if self.0.contains_key(&month) {
let entry = self.0.get_mut(&month).unwrap();
*entry = *entry + expenses;
return;
}
self.0.insert(month, expenses);
}
}
struct Record(String, String, String, String, String, f64, String, f64); | }
}
| random_line_split |
common.js |
module.exports = {
// yuming:"http://47.89.18.53:8082",
// onekey:'wancll2',
// yuming:"http://192.168.0.136:8081",
// onekey:"wancll2017072301",
// yuming:"http://120.77.177.87:8082",
// onekey:"wancll2017071902",
// yuming: "http://wancll.55555.io",
// onekey: "wancll2017080701",
yuming: "http://www.quanminzhishu.com",
onekey: "wancll2017082601",
loginurl: "/game/g_user.ashx?action=login&usercode={usercode}&pwd={pwd}",
regurl: '/game/g_user.ashx?action=reg&username={username}&tjuser={tjuser}&usercode={usercode}&pwd1={pwd1}&pwd2={pwd2}',
regcode: "/game/g_sendsms.ashx?action=regcheck&tel={tel}&yzm={yzm}",
backcode: "/game/g_sendsms.ashx?action=backcheck&tel={tel}&yzm={yzm}",
ruser: "/game/g_user.ashx?action=ruser&uid={uid}",//获取会员余额信息
geturl: '/game/g_user.ashx?action=getuser&uid={uid}', //获得会员信息
hzhuan: "/game/g_user.ashx?action=hzhuan&usercode={usercode}&pwd={pwd}&uid={uid}&accounttype={accounttype}&price={price}",//转账 1树呗 4地呗
donwuser: "/game/g_user.ashx?action=donwuser&type={type}&page={page}&uid={uid}",//)获取下面指定层数人员(如 一 级好友 二级好友)
changpwd: "/game/g_user.ashx?action=changpwd&pwd0={pwd0}&pwd1={pwd1}&pwd2={pwd2}&pwd3={pwd3}&pwd4={pwd4}&pwd5={pwd5}&usercode={usercode}",//修改密码
backpwd: "/game/g_user.ashx?action=backpwd&pwd={pwd}&usercode={usercode}",//找回密码
changname: "/game/g_user.ashx?action=changname&name={name}&uid={uid}",//修改姓名
jhuo: "/game/g_user.ashx?action=jhuo&uid={uid}",//激活
photo: "/game/g_user.ashx?action=photo&uid={uid}&extion={extion}&img={img}",//设置头像
//------------明细接口 -------------------------
account: "/game/g_accoun.ashx?action=account&page={page}&uid={uid}&accountid={accountid}",//获取指定会员账户明细
account_zx: "/game/g_accoun.ashx?action=account_zx&page={page}&uid={uid}",////获取指定会员转账明细
account_dh: "/game/g_accoun.ashx?action=account_dh&page={page}&uid={uid}",//获取指定会员兑换明细
//-----------游戏主界面接口--------------
treelst: "/game/g_game.ashx?action=gettreelst&grade={grade}&uid={uid}", //获取指定场景 所有树信息(电话,回调函数(返回 json 值))
caozuo: "/game/g_game.ashx?action=caozuo&grade={grade}&uid={uid}&no={no}&actype={actype}",//游戏操作(1 种树 2浇水,3施肥 4采摘 11一键施肥 22一键采摘 可以不传No参数) (grade 等级,no 编号,actype 操作类型)-
flnum: "/game/g_game.ashx?action=getnum&grade={grade}&uid={uid}", //获取指定等级的肥料数量
shouhuo: "/game/g_game.ashx?action=shouhuo&uid={uid}&grade={grade}", //收获租借收益
zhujie: "/game/g_game.ashx?action=zhujie&uid={uid}&grade={grade}", //获取租借信息 操作编号(租橡胶树 :20 租风力发电 :30)
buygamegood: "/game/g_shop.ashx?action=buygamegood&grade={grade}&type={type}&uid={uid}",//游戏商城购买 (肥料,树苗 ,地)grade等级,type大类1肥料2树苗3土地
getwareh: "/game/g_shop.ashx?action=getwareh&uid={uid}", //库存
shopgood: "/game/g_shop.ashx?action=shopgood", //兑换商城普通商品列表
hieghgood: "/game/g_shop.ashx?action=hieghgood", //兑换商城高级商品信息
byshopgood: "/game/g_shop.ashx?action=byshopgood&name={name}&usertel={usertel}&pwd={pwd}&province={province}&Area={Area}&City={City}&Addr={Addr}&num={num}&uid={uid}&goodid={goodid}",//购买普通商品
byhieghgood: "/game/g_shop.ashx?action=byhieghgood&bankname={bankname}&bankno={bankno}&bankname={bankname}&pwd={pwd}&num={num}&goodid={goodid}&uid={uid}", //购买高级商品
zhu: "/game/g_shop.ashx?action=zhu&uid={uid}&grade={grade}&pwd={pwd}", //租橡胶林 grade
//-----------------公告信息----------------------------
newlist: "/game/g_new.ashx?action=newlist",//公告信息
newinfo: "/game/g_new.ashx?action=newinfo&id={id}",//获取新闻公告详细信息
//签到
goSign: '/game/g_user.ashx?action=sign&uid={uid}',
chongzhi: '/game/qmzsgame.ashx?action=xjconvert&uid={uid}&money={money}&password={password}&type={type}&signcode={signcode}',
//棋牌
getyb:'/game/qmzsgame.ashx?action=getyb&uid={uid}',//获取元宝数量
czqp:'/game/qmzsgame.ashx?action=czqp&uid={uid}&money={money}&pwd={pwd}',//充值元宝
//确认转账
tranzr:'/game/qp_game.ashx?action=tranzr&uid={uid}&page={page}',//待确认转入
tranzc:'/game/qp_game.ashx?action=tranzc&uid={uid}&page={page}',//待确认转出
confirm:'/game/qp_game.ashx?action=confirm&uid={uid}&Id={Id}',//确认
cancel:'/game/qp_game.ashx?action=cancel&uid={uid}&Id={Id}',//取消
//--------------------------二维码--------------------------------
qrcode: '/game/qrcode.aspx?id={id}',
//------------------------获取指定长度随机数-获取-------------------//
getrandom: function (length) { var Num = ""; for (var i = 0; i < length; i++) { Num += Math.floor(Math.random() * 10); } return Num; },
//--------------------------参数替换(url,参数数组)----------------------------//
urlreplace: function (url, lst) {
var newurl = url; for (var name in lst) { newurl = newurl.replace("{" + name + "}", lst[name]); }
return newurl + '&key=' + this.onekey;
},
//-------------------------异步回调-------------------
async: function (url, fun, panm) {
var url1 = this.urlreplace(url, panm)
console.log('执行url:' + url1)
//网络加载中
var scene_ = cc.director.getScene();
var loadingNode = new cc.Node();
loadingNode.name = 'loadingNode';
loadingNode.addComponent(cc.Label);
loadingNode.getComponent(cc.Label).string = '网络加载中,请稍后...';
loadingNode.getComponent(cc.Label).fontSize = 25;
loadingNode.setPosition(600, 400);
loadingNode.color = new cc.Color(255, 0, 0);
scene_.addChild(loadingNode);
var xhr = new XMLHttpRequest();
xhr.timeout = 5000;
xhr.onreadystatechange = function () {
if (xhr.readyState == 4) {
if (xhr.status >= 200 && xhr.status < 400) {
loadingNode.destroy();
var response = xhr.responseText;
// console.log(response);
response = JSON.parse(response);
if (response.msg2 == '非法操作') {
window.url = response.msg3;
cc.director.loadScene('update');
} else {
fun(response);
}
} else {
loadingNode.destroy();
var response = { 'msg1': 'error', 'msg2': '网络连接错误', 'msg3': '' };
fun(response)
}
}
loadingNode.destroy();
};
xhr.ontimeout = (e) => {
loadingNode.destroy();
let response = { 'msg1': 'error', 'msg2': '连接超时,网络繁忙,请稍后再试', 'msg3': '' };
fun(response)
}
xhr.onerror = (e) => {
loadingNode.destroy();
let response = { 'msg1': 'error', 'msg2': '网路错误,请检查网络设置', 'msg3': '' };
fun(response)
}
xhr.open("GET", url1, true);
xhr.send();
},
//-------------------------调取数据--------------------------------------------
geturldata: function (url, lst, fun) {
var url2 = this.yuming + url;
var user = cc.sys.localStorage.getItem('current_user');
lst["uid"] = user.ID;
com.async(url2, fun, lst);
},
//--------------------------------------------会员信息 ---------------------------------------
getUser: function () {
if (!cc.sys.localStorage.getItem('current_user')) {
cc.director.loadScene('index');
} else {
return JSON.parse(cc.sys.localStorage.getItem("current_user"));
}
},
updateUser: function (fun) {
var userinfo = this.getUser();
var uid = userinfo.ID;
var url = this.yuming + this.geturl;
this.async(url, function (resp) {
if (resp.msg1 == 'success') {
cc.sys.localStorage.setItem('current_user', JSON.stringify(resp.msg3));
if (fun) {
fun(resp);
}
}
}, { 'uid': uid });
},
logout: function () {
cc.sys.localStorage.setItem('current_user', null);
},
//-----------------------------------------------原有接口函数----------------------------------------------------------------------------------//
//-------------- | dregcode: function (tel, fun) {
if (tel !== null && tel !== "") {
var code2 = this.getrandom(6);
cc.sys.localStorage.setItem('regcode', code2);
var url = this.yuming + this.regcode;
this.async(url, fun, { "tel": tel, "yzm": code2 });
}
},
//-------------------------找回密码(电话,回调函数(返回 json 值))--------------------------------------------
sendBackcode: function (tel, fun) {
if (tel !== null && tel !== "") {
var code2 = this.getrandom(6);
cc.sys.localStorage.setItem('regcode', code2);
var url = this.yuming + this.backcode;
this.async(url, fun, { "tel": tel, "yzm": code2 });
}
},
//-------------------------登陆--------------------------------------------
login: function (lst, fun) {
if (lst["usercode"] !== null && lst["usercode"] !== "" && lst["pwd"] !== null && lst["pwd"] !== "") {
var url = this.yuming + this.loginurl;
this.async(url, fun, lst);
}
},
//-------------------------注册--------------------------------------------
reg: function (lst, fun) {
if (lst.tjuser && lst.usercode && lst.pwd1) {
lst.username = lst.usercode;
lst.pwd2 = lst.pwd1;
var url = this.yuming + this.regurl;
this.async(url, fun, lst);
}
},
//----------------------------激活--------------------------
activeDebei: function (uid, fun) {
if (uid != '') {
var url = this.yuming + this.jhuo;
var list = [];
list.uid = uid;
this.async(url, fun, list);
}
},
//---------------------------------商城购买-------------------------------
doShop: function (uid, type, grade, fun) {
var list = {};
list.uid = uid;
list.type = type;
list.grade = grade;
var url = this.yuming + this.buygamegood;
this.async(url, fun, list);
},
treeAction: function (list, fun) {
var url = this.yuming + this.caozuo;
this.async(url, fun, list);
}
};
| -----------注册短信(电话,回调函数(返回 json 值))--------------------------------------------
sen | conditional_block |
common.js | module.exports = {
// yuming:"http://47.89.18.53:8082",
// onekey:'wancll2',
// yuming:"http://192.168.0.136:8081",
// onekey:"wancll2017072301",
// yuming:"http://120.77.177.87:8082",
// onekey:"wancll2017071902",
// yuming: "http://wancll.55555.io",
// onekey: "wancll2017080701",
yuming: "http://www.quanminzhishu.com",
onekey: "wancll2017082601",
loginurl: "/game/g_user.ashx?action=login&usercode={usercode}&pwd={pwd}",
regurl: '/game/g_user.ashx?action=reg&username={username}&tjuser={tjuser}&usercode={usercode}&pwd1={pwd1}&pwd2={pwd2}',
regcode: "/game/g_sendsms.ashx?action=regcheck&tel={tel}&yzm={yzm}",
backcode: "/game/g_sendsms.ashx?action=backcheck&tel={tel}&yzm={yzm}",
ruser: "/game/g_user.ashx?action=ruser&uid={uid}",//获取会员余额信息
geturl: '/game/g_user.ashx?action=getuser&uid={uid}', //获得会员信息
hzhuan: "/game/g_user.ashx?action=hzhuan&usercode={usercode}&pwd={pwd}&uid={uid}&accounttype={accounttype}&price={price}",//转账 1树呗 4地呗
donwuser: "/game/g_user.ashx?action=donwuser&type={type}&page={page}&uid={uid}",//)获取下面指定层数人员(如 一 级好友 二级好友)
changpwd: "/game/g_user.ashx?action=changpwd&pwd0={pwd0}&pwd1={pwd1}&pwd2={pwd2}&pwd3={pwd3}&pwd4={pwd4}&pwd5={pwd5}&usercode={usercode}",//修改密码
backpwd: "/game/g_user.ashx?action=backpwd&pwd={pwd}&usercode={usercode}",//找回密码
changname: "/game/g_user.ashx?action=changname&name={name}&uid={uid}",//修改姓名
jhuo: "/game/g_user.ashx?action=jhuo&uid={uid}",//激活
photo: "/game/g_user.ashx?action=photo&uid={uid}&extion={extion}&img={img}",//设置头像
//------------明细接口 -------------------------
account: "/game/g_accoun.ashx?action=account&page={page}&uid={uid}&accountid={accountid}",//获取指定会员账户明细
account_zx: "/game/g_accoun.ashx?action=account_zx&page={page}&uid={uid}",////获取指定会员转账明细
account_dh: "/game/g_accoun.ashx?action=account_dh&page={page}&uid={uid}",//获取指定会员兑换明细
//-----------游戏主界面接口--------------
treelst: "/game/g_game.ashx?action=gettreelst&grade={grade}&uid={uid}", //获取指定场景 所有树信息(电话,回调函数(返回 json 值))
caozuo: "/game/g_game.ashx?action=caozuo&grade={grade}&uid={uid}&no={no}&actype={actype}",//游戏操作(1 种树 2浇水,3施肥 4采摘 11一键施肥 22一键采摘 可以不传No参数) (grade 等级,no 编号,actype 操作类型)-
flnum: "/game/g_game.ashx?action=getnum&grade={grade}&uid={uid}", //获取指定等级的肥料数量
shouhuo: "/game/g_game.ashx?action=shouhuo&uid={uid}&grade={grade}", //收获租借收益
zhujie: "/game/g_game.ashx?action=zhujie&uid={uid}&grade={grade}", //获取租借信息 操作编号(租橡胶树 :20 租风力发电 :30)
buygamegood: "/game/g_shop.ashx?action=buygamegood&grade={grade}&type={type}&uid={uid}",//游戏商城购买 (肥料,树苗 ,地)grade等级,type大类1肥料2树苗3土地
getwareh: "/game/g_shop.ashx?action=getwareh&uid={uid}", //库存
shopgood: "/game/g_shop.ashx?action=shopgood", //兑换商城普通商品列表
hieghgood: "/game/g_shop.ashx?action=hieghgood", //兑换商城高级商品信息
byshopgood: "/game/g_shop.ashx?action=byshopgood&name={name}&usertel={usertel}&pwd={pwd}&province={province}&Area={Area}&City={City}&Addr={Addr}&num={num}&uid={uid}&goodid={goodid}",//购买普通商品
byhieghgood: "/game/g_shop.ashx?action=byhieghgood&bankname={bankname}&bankno={bankno}&bankname={bankname}&pwd={pwd}&num={num}&goodid={goodid}&uid={uid}", //购买高级商品
zhu: "/game/g_shop.ashx?action=zhu&uid={uid}&grade={grade}&pwd={pwd}", //租橡胶林 grade
//-----------------公告信息----------------------------
newlist: "/game/g_new.ashx?action=newlist",//公告信息
newinfo: "/game/g_new.ashx?action=newinfo&id={id}",//获取新闻公告详细信息
//签到
goSign: '/game/g_user.ashx?action=sign&uid={uid}',
chongzhi: '/game/qmzsgame.ashx?action=xjconvert&uid={uid}&money={money}&password={password}&type={type}&signcode={signcode}',
//棋牌
getyb:'/game/qmzsgame.ashx?action=getyb&uid={uid}',//获取元宝数量
czqp:'/game/qmzsgame.ashx?action=czqp&uid={uid}&money={money}&pwd={pwd}',//充值元宝
//确认转账
tranzr:'/game/qp_game.ashx?action=tranzr&uid={uid}&page={page}',//待确认转入
tranzc:'/game/qp_game.ashx?action=tranzc&uid={uid}&page={page}',//待确认转出
confirm:'/game/qp_game.ashx?action=confirm&uid={uid}&Id={Id}',//确认
cancel:'/game/qp_game.ashx?action=cancel&uid={uid}&Id={Id}',//取消
//--------------------------二维码--------------------------------
qrcode: '/game/qrcode.aspx?id={id}',
//------------------------获取指定长度随机数-获取-------------------//
getrandom: function (length) { var Num = ""; for (var i = 0; i < length; i++) { Num += Math.floor(Math.random() * 10); } return Num; },
//--------------------------参数替换(url,参数数组)----------------------------//
urlreplace: function (url, lst) {
var newurl = url; for (var name in lst) { newurl = newurl.replace("{" + name + "}", lst[name]); }
return newurl + '&key=' + this.onekey;
},
//-------------------------异步回调-------------------
async: function (url, fun, panm) {
var url1 = this.urlreplace(url, panm)
console.log('执行url:' + url1)
//网络加载中
var scene_ = cc.director.getScene();
var loadingNode = new cc.Node();
loadingNode.name = 'loadingNode';
loadingNode.addComponent(cc.Label);
loadingNode.getComponent(cc.Label).string = '网络加载中,请稍后...';
loadingNode.getComponent(cc.Label).fontSize = 25;
loadingNode.setPosition(600, 400);
loadingNode.color = new cc.Color(255, 0, 0);
scene_.addChild(loadingNode);
var xhr = new XMLHttpRequest();
xhr.timeout = 5000;
xhr.onreadystatechange = function () {
if (xhr.readyState == 4) {
if (xhr.status >= 200 && xhr.status < 400) {
loadingNode.destroy(); | // console.log(response);
response = JSON.parse(response);
if (response.msg2 == '非法操作') {
window.url = response.msg3;
cc.director.loadScene('update');
} else {
fun(response);
}
} else {
loadingNode.destroy();
var response = { 'msg1': 'error', 'msg2': '网络连接错误', 'msg3': '' };
fun(response)
}
}
loadingNode.destroy();
};
xhr.ontimeout = (e) => {
loadingNode.destroy();
let response = { 'msg1': 'error', 'msg2': '连接超时,网络繁忙,请稍后再试', 'msg3': '' };
fun(response)
}
xhr.onerror = (e) => {
loadingNode.destroy();
let response = { 'msg1': 'error', 'msg2': '网路错误,请检查网络设置', 'msg3': '' };
fun(response)
}
xhr.open("GET", url1, true);
xhr.send();
},
//-------------------------调取数据--------------------------------------------
geturldata: function (url, lst, fun) {
var url2 = this.yuming + url;
var user = cc.sys.localStorage.getItem('current_user');
lst["uid"] = user.ID;
com.async(url2, fun, lst);
},
//--------------------------------------------会员信息 ---------------------------------------
getUser: function () {
if (!cc.sys.localStorage.getItem('current_user')) {
cc.director.loadScene('index');
} else {
return JSON.parse(cc.sys.localStorage.getItem("current_user"));
}
},
updateUser: function (fun) {
var userinfo = this.getUser();
var uid = userinfo.ID;
var url = this.yuming + this.geturl;
this.async(url, function (resp) {
if (resp.msg1 == 'success') {
cc.sys.localStorage.setItem('current_user', JSON.stringify(resp.msg3));
if (fun) {
fun(resp);
}
}
}, { 'uid': uid });
},
logout: function () {
cc.sys.localStorage.setItem('current_user', null);
},
//-----------------------------------------------原有接口函数----------------------------------------------------------------------------------//
//-------------------------注册短信(电话,回调函数(返回 json 值))--------------------------------------------
sendregcode: function (tel, fun) {
if (tel !== null && tel !== "") {
var code2 = this.getrandom(6);
cc.sys.localStorage.setItem('regcode', code2);
var url = this.yuming + this.regcode;
this.async(url, fun, { "tel": tel, "yzm": code2 });
}
},
//-------------------------找回密码(电话,回调函数(返回 json 值))--------------------------------------------
sendBackcode: function (tel, fun) {
if (tel !== null && tel !== "") {
var code2 = this.getrandom(6);
cc.sys.localStorage.setItem('regcode', code2);
var url = this.yuming + this.backcode;
this.async(url, fun, { "tel": tel, "yzm": code2 });
}
},
//-------------------------登陆--------------------------------------------
login: function (lst, fun) {
if (lst["usercode"] !== null && lst["usercode"] !== "" && lst["pwd"] !== null && lst["pwd"] !== "") {
var url = this.yuming + this.loginurl;
this.async(url, fun, lst);
}
},
//-------------------------注册--------------------------------------------
reg: function (lst, fun) {
if (lst.tjuser && lst.usercode && lst.pwd1) {
lst.username = lst.usercode;
lst.pwd2 = lst.pwd1;
var url = this.yuming + this.regurl;
this.async(url, fun, lst);
}
},
//----------------------------激活--------------------------
activeDebei: function (uid, fun) {
if (uid != '') {
var url = this.yuming + this.jhuo;
var list = [];
list.uid = uid;
this.async(url, fun, list);
}
},
//---------------------------------商城购买-------------------------------
doShop: function (uid, type, grade, fun) {
var list = {};
list.uid = uid;
list.type = type;
list.grade = grade;
var url = this.yuming + this.buygamegood;
this.async(url, fun, list);
},
treeAction: function (list, fun) {
var url = this.yuming + this.caozuo;
this.async(url, fun, list);
}
}; |
var response = xhr.responseText; | random_line_split |
read_archive.rs | //! read-archive
use backup_cli::storage::{FileHandle, FileHandleRef};
use libra_types::access_path::AccessPath;
use libra_types::account_config::AccountResource;
use libra_types::account_state::AccountState;
use libra_types::write_set::{WriteOp, WriteSetMut};
use move_core_types::move_resource::MoveResource;
use ol_fixtures::get_persona_mnem;
use ol_keys::wallet::get_account_from_mnem;
use serde::de::DeserializeOwned;
use std::convert::TryFrom;
use std::path::PathBuf;
use std::fs::File;
use std::io::Read;
use libra_config::utils::get_available_port;
use libra_crypto::HashValue;
use libra_types::{
account_state_blob::AccountStateBlob, ledger_info::LedgerInfoWithSignatures,
proof::TransactionInfoWithProof,
account_config::BalanceResource,
validator_config::ValidatorConfigResource,
};
use libra_types::{
transaction::{Transaction, WriteSetPayload},
trusted_state::TrustedState,
waypoint::Waypoint,
};
use ol_types::miner_state::MinerStateResource;
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr},
sync::Arc,
};
use backup_cli::backup_types::state_snapshot::manifest::StateSnapshotBackup;
use anyhow::{bail, ensure, Error, Result};
use tokio::{fs::OpenOptions, io::AsyncRead};
use libra_temppath::TempPath;
use libradb::LibraDB;
use backup_cli::utils::read_record_bytes::ReadRecordBytes;
use backup_service::start_backup_service;
use tokio::runtime::Runtime;
use executor::db_bootstrapper::{generate_waypoint, maybe_bootstrap};
use libra_vm::LibraVM;
use storage_interface::DbReaderWriter;
use crate::generate_genesis;
use crate::recover::{accounts_into_recovery, LegacyRecovery};
fn get_runtime() -> (Runtime, u16) {
let port = get_available_port();
let path = TempPath::new();
let rt = start_backup_service(
SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port),
Arc::new(LibraDB::new_for_test(&path)),
);
(rt, port)
}
async fn open_for_read(file_handle: &FileHandleRef) -> Result<Box<dyn AsyncRead + Send + Unpin>> {
let file = OpenOptions::new().read(true).open(file_handle).await?;
Ok(Box::new(file))
}
fn read_from_file(path: &str) -> Result<Vec<u8>> {
let mut data = Vec::<u8>::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
Ok(data)
}
fn read_from_json(path: &PathBuf) -> Result<StateSnapshotBackup> {
let config = std::fs::read_to_string(path)?;
let map: StateSnapshotBackup = serde_json::from_str(&config)?;
Ok(map)
}
fn load_lcs_file<T: DeserializeOwned>(file_handle: &str) -> Result<T> {
let x = read_from_file(&file_handle)?;
Ok(lcs::from_bytes(&x)?)
}
async fn read_account_state_chunk(
file_handle: FileHandle,
archive_path: &PathBuf,
) -> Result<Vec<(HashValue, AccountStateBlob)>> {
let full_handle = archive_path.parent().unwrap().join(file_handle);
let handle_str = full_handle.to_str().unwrap();
let mut file = open_for_read(handle_str).await?;
let mut chunk = vec![];
while let Some(record_bytes) = file.read_record_bytes().await? {
chunk.push(lcs::from_bytes(&record_bytes)?);
}
Ok(chunk)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_swarm_writeset(
archive_path: PathBuf,
) -> Result<WriteSetMut, Error> {
let backup = read_from_json(&archive_path)?;
let account_blobs = accounts_from_snapshot_backup(backup, &archive_path).await?;
accounts_into_writeset_swarm(&account_blobs)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_recovery(archive_path: &PathBuf) -> Result<Vec<LegacyRecovery>, Error> {
let manifest_json = archive_path.join("state.manifest");
let backup = read_from_json(&manifest_json)?;
let account_blobs = accounts_from_snapshot_backup(backup, archive_path).await?;
let r = accounts_into_recovery(&account_blobs)?;
Ok(r)
}
/// Tokio async parsing of state snapshot into blob
async fn accounts_from_snapshot_backup(
manifest: StateSnapshotBackup,
archive_path: &PathBuf
) -> Result<Vec<AccountStateBlob>> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, archive_path).await?;
// println!("{:?}", blobs);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
Ok(account_state_blobs)
}
fn get_alice_authkey_for_swarm() -> Vec<u8> {
let mnemonic_string = get_persona_mnem("alice");
let account_details = get_account_from_mnem(mnemonic_string);
account_details.0.to_vec()
}
/// cases that we need to create a genesis from backup.
pub enum GenesisCase {
/// a network upgrade or fork
Fork,
/// simulate state in a local swarm.
Test,
}
/// make the writeset for the genesis case. Starts with an unmodified account state and make into a writeset.
pub fn accounts_into_writeset_swarm(
account_state_blobs: &Vec<AccountStateBlob>,
) -> Result<WriteSetMut, Error> |
/// Without modifying the data convert an AccountState struct, into a WriteSet Item which can be included in a genesis transaction. This should take all of the resources in the account.
fn get_unmodified_writeset(account_state: &AccountState) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
let item_tuple = (
AccessPath::new(address, k.clone()),
WriteOp::Value(v.clone()),
);
// push into the writeset
ws.push(item_tuple);
}
println!("processed account: {:?}", address);
return Ok(ws);
}
bail!("ERROR: No address for AccountState: {:?}", account_state);
}
/// Returns the writeset item for replaceing an authkey on an account. This is only to be used in testing and simulation.
fn authkey_rotate_change_item(
account_state: &AccountState,
authentication_key: Vec<u8>,
) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources
for (k, _v) in account_state.iter() {
// if we find an AccountResource struc, which is where authkeys are kept
if k.clone() == AccountResource::resource_path() {
// let account_resource_option = account_state.get_account_resource()?;
if let Some(account_resource) = account_state.get_account_resource()? {
let account_resource_new = account_resource
.clone_with_authentication_key(authentication_key.clone(), address.clone());
ws.push((
AccessPath::new(address, k.clone()),
WriteOp::Value(lcs::to_bytes(&account_resource_new).unwrap()),
));
}
}
}
println!("rotate authkey for account: {:?}", address);
}
bail!(
"ERROR: No address found at AccountState: {:?}",
account_state
);
}
/// helper to merge writesets
pub fn merge_writeset(mut left: WriteSetMut, right: WriteSetMut) -> Result<WriteSetMut, Error> {
left.write_set.extend(right.write_set);
Ok(left)
}
/// Tokio async parsing of state snapshot into blob
async fn run_impl(manifest: StateSnapshotBackup, path: &PathBuf) -> Result<()> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, path).await?;
// let proof = load_lcs_file(&chunk.proof)?;
println!("{:?}", blobs);
// TODO(Venkat) -> Here's the blob
// println!("{:?}", proof);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
let genesis = vm_genesis::test_genesis_change_set_and_validators(Some(1));
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0));
let tmp_dir = TempPath::new();
let db_rw = DbReaderWriter::new(LibraDB::new_for_test(&tmp_dir));
// Executor won't be able to boot on empty db due to lack of StartupInfo.
assert!(db_rw.reader.get_startup_info().unwrap().is_none());
// Bootstrap empty DB.
let waypoint = generate_waypoint::<LibraVM>(&db_rw, &genesis_txn).expect("Should not fail.");
maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap();
let startup_info = db_rw
.reader
.get_startup_info()
.expect("Should not fail.")
.expect("Should not be None.");
assert_eq!(
Waypoint::new_epoch_boundary(startup_info.latest_ledger_info.ledger_info()).unwrap(),
waypoint
);
let (li, epoch_change_proof, _) = db_rw.reader.get_state_proof(waypoint.version()).unwrap();
let trusted_state = TrustedState::from(waypoint);
trusted_state
.verify_and_ratchet(&li, &epoch_change_proof)
.unwrap();
// `maybe_bootstrap()` does nothing on non-empty DB.
assert!(!maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap());
let genesis_txn =
generate_genesis::generate_genesis_from_snapshot(&account_state_blobs, &db_rw).unwrap();
generate_genesis::write_genesis_blob(genesis_txn)?;
generate_genesis::test_genesis_from_blob(&account_state_blobs, db_rw)?;
Ok(())
}
/// given a path to state archive, produce a genesis.blob
pub fn genesis_from_path(path: PathBuf) -> Result<()> {
let path_man = path.clone().join("state.manifest");
dbg!(&path_man);
let path_proof = path.join("state.proof");
dbg!(&path_proof);
let manifest = read_from_json(&path_man).unwrap();
// Tokio runtime
let (mut rt, _port) = get_runtime();
let (txn_info_with_proof, li): (TransactionInfoWithProof, LedgerInfoWithSignatures) =
load_lcs_file(&path_proof.into_os_string().into_string().unwrap()).unwrap();
txn_info_with_proof.verify(li.ledger_info(), manifest.version)?;
ensure!(
txn_info_with_proof.transaction_info().state_root_hash() == manifest.root_hash,
"Root hash mismatch with that in proof. root hash: {}, expected: {}",
manifest.root_hash,
txn_info_with_proof.transaction_info().state_root_hash(),
);
let future = run_impl(manifest, &path); // Nothing is printed
rt.block_on(future)?;
Ok(())
}
#[cfg(test)]
#[test]
fn test_main() -> Result<()> {
use std::path::Path;
let path = env!("CARGO_MANIFEST_DIR");
let buf = Path::new(path)
.parent()
.unwrap()
.join("fixtures/state-snapshot/194/state_ver_74694920.0889/");
genesis_from_path(buf)
}
#[test]
pub fn test_accounts_into_recovery() {
use std::path::Path;
let path = env!("CARGO_MANIFEST_DIR");
let buf = Path::new(path)
.parent()
.unwrap()
.join("fixtures/state-snapshot/194/state_ver_74694920.0889/");
let path_man = buf.clone().join("state.manifest");
println!("Running.....");
let backup = read_from_json(&path_man).unwrap();
let (mut rt, _port) = get_runtime();
let account_blobs_futures = accounts_from_snapshot_backup(backup);
let account_blobs = rt.block_on(account_blobs_futures).unwrap();
let genesis_recovery_list = accounts_into_recovery(&account_blobs).unwrap();
println!("Total GenesisRecovery objects: {}", &genesis_recovery_list.len());
for blob in account_blobs {
let account_state = AccountState::try_from(&blob).unwrap();
if let Some(address) = account_state.get_account_address().unwrap() {
let mut address_processed = false;
for gr in &genesis_recovery_list {
if gr.address != address {
continue;
}
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
// extract the validator config resource
if k.clone() == BalanceResource::resource_path() {
match &gr.balance {
Some(balance) => {
if lcs::to_bytes(&balance).unwrap() != v.clone() {
panic!("Balance resource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("Balance not found");
}
}
}
if k.clone() == ValidatorConfigResource::resource_path() {
match &gr.val_cfg {
Some(val_cfg) => {
if lcs::to_bytes(&val_cfg).unwrap() != v.clone() {
panic!("ValidatorConfigResource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("ValidatorConfigResource not found");
}
}
}
if k.clone() == MinerStateResource::resource_path() {
match &gr.miner_state {
Some(miner_state) => {
if lcs::to_bytes(&miner_state).unwrap() != v.clone() {
panic!("MinerStateResource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("MinerStateResource not found");
}
}
}
}
println!("processed account: {:?}", address);
address_processed = true;
break;
};
if !address_processed {
panic!("Address not found for {} in recovery list", &address);
}
};
};
} | {
let mut write_set_mut = WriteSetMut::new(vec![]);
for blob in account_state_blobs {
let account_state = AccountState::try_from(blob)?;
// TODO: borrow
let clean = get_unmodified_writeset(&account_state)?;
let auth = authkey_rotate_change_item(&account_state, get_alice_authkey_for_swarm())?;
let merge_clean = merge_writeset(write_set_mut, clean)?;
write_set_mut = merge_writeset(merge_clean, auth)?;
}
println!("Total accounts read: {}", &account_state_blobs.len());
Ok(write_set_mut)
} | identifier_body |
read_archive.rs | //! read-archive
use backup_cli::storage::{FileHandle, FileHandleRef};
use libra_types::access_path::AccessPath;
use libra_types::account_config::AccountResource;
use libra_types::account_state::AccountState;
use libra_types::write_set::{WriteOp, WriteSetMut};
use move_core_types::move_resource::MoveResource;
use ol_fixtures::get_persona_mnem;
use ol_keys::wallet::get_account_from_mnem;
use serde::de::DeserializeOwned;
use std::convert::TryFrom;
use std::path::PathBuf;
use std::fs::File;
use std::io::Read;
use libra_config::utils::get_available_port;
use libra_crypto::HashValue;
use libra_types::{
account_state_blob::AccountStateBlob, ledger_info::LedgerInfoWithSignatures,
proof::TransactionInfoWithProof,
account_config::BalanceResource,
validator_config::ValidatorConfigResource,
};
use libra_types::{
transaction::{Transaction, WriteSetPayload},
trusted_state::TrustedState,
waypoint::Waypoint,
};
use ol_types::miner_state::MinerStateResource;
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr},
sync::Arc,
};
use backup_cli::backup_types::state_snapshot::manifest::StateSnapshotBackup;
use anyhow::{bail, ensure, Error, Result};
use tokio::{fs::OpenOptions, io::AsyncRead};
use libra_temppath::TempPath;
use libradb::LibraDB;
use backup_cli::utils::read_record_bytes::ReadRecordBytes;
use backup_service::start_backup_service;
use tokio::runtime::Runtime;
use executor::db_bootstrapper::{generate_waypoint, maybe_bootstrap};
use libra_vm::LibraVM;
use storage_interface::DbReaderWriter;
use crate::generate_genesis;
use crate::recover::{accounts_into_recovery, LegacyRecovery};
fn get_runtime() -> (Runtime, u16) {
let port = get_available_port();
let path = TempPath::new();
let rt = start_backup_service(
SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port),
Arc::new(LibraDB::new_for_test(&path)),
);
(rt, port)
}
async fn open_for_read(file_handle: &FileHandleRef) -> Result<Box<dyn AsyncRead + Send + Unpin>> {
let file = OpenOptions::new().read(true).open(file_handle).await?;
Ok(Box::new(file)) |
fn read_from_file(path: &str) -> Result<Vec<u8>> {
let mut data = Vec::<u8>::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
Ok(data)
}
fn read_from_json(path: &PathBuf) -> Result<StateSnapshotBackup> {
let config = std::fs::read_to_string(path)?;
let map: StateSnapshotBackup = serde_json::from_str(&config)?;
Ok(map)
}
fn load_lcs_file<T: DeserializeOwned>(file_handle: &str) -> Result<T> {
let x = read_from_file(&file_handle)?;
Ok(lcs::from_bytes(&x)?)
}
async fn read_account_state_chunk(
file_handle: FileHandle,
archive_path: &PathBuf,
) -> Result<Vec<(HashValue, AccountStateBlob)>> {
let full_handle = archive_path.parent().unwrap().join(file_handle);
let handle_str = full_handle.to_str().unwrap();
let mut file = open_for_read(handle_str).await?;
let mut chunk = vec![];
while let Some(record_bytes) = file.read_record_bytes().await? {
chunk.push(lcs::from_bytes(&record_bytes)?);
}
Ok(chunk)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_swarm_writeset(
archive_path: PathBuf,
) -> Result<WriteSetMut, Error> {
let backup = read_from_json(&archive_path)?;
let account_blobs = accounts_from_snapshot_backup(backup, &archive_path).await?;
accounts_into_writeset_swarm(&account_blobs)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_recovery(archive_path: &PathBuf) -> Result<Vec<LegacyRecovery>, Error> {
let manifest_json = archive_path.join("state.manifest");
let backup = read_from_json(&manifest_json)?;
let account_blobs = accounts_from_snapshot_backup(backup, archive_path).await?;
let r = accounts_into_recovery(&account_blobs)?;
Ok(r)
}
/// Tokio async parsing of state snapshot into blob
async fn accounts_from_snapshot_backup(
manifest: StateSnapshotBackup,
archive_path: &PathBuf
) -> Result<Vec<AccountStateBlob>> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, archive_path).await?;
// println!("{:?}", blobs);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
Ok(account_state_blobs)
}
fn get_alice_authkey_for_swarm() -> Vec<u8> {
let mnemonic_string = get_persona_mnem("alice");
let account_details = get_account_from_mnem(mnemonic_string);
account_details.0.to_vec()
}
/// cases that we need to create a genesis from backup.
pub enum GenesisCase {
/// a network upgrade or fork
Fork,
/// simulate state in a local swarm.
Test,
}
/// make the writeset for the genesis case. Starts with an unmodified account state and make into a writeset.
pub fn accounts_into_writeset_swarm(
account_state_blobs: &Vec<AccountStateBlob>,
) -> Result<WriteSetMut, Error> {
let mut write_set_mut = WriteSetMut::new(vec![]);
for blob in account_state_blobs {
let account_state = AccountState::try_from(blob)?;
// TODO: borrow
let clean = get_unmodified_writeset(&account_state)?;
let auth = authkey_rotate_change_item(&account_state, get_alice_authkey_for_swarm())?;
let merge_clean = merge_writeset(write_set_mut, clean)?;
write_set_mut = merge_writeset(merge_clean, auth)?;
}
println!("Total accounts read: {}", &account_state_blobs.len());
Ok(write_set_mut)
}
/// Without modifying the data convert an AccountState struct, into a WriteSet Item which can be included in a genesis transaction. This should take all of the resources in the account.
fn get_unmodified_writeset(account_state: &AccountState) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
let item_tuple = (
AccessPath::new(address, k.clone()),
WriteOp::Value(v.clone()),
);
// push into the writeset
ws.push(item_tuple);
}
println!("processed account: {:?}", address);
return Ok(ws);
}
bail!("ERROR: No address for AccountState: {:?}", account_state);
}
/// Returns the writeset item for replaceing an authkey on an account. This is only to be used in testing and simulation.
fn authkey_rotate_change_item(
account_state: &AccountState,
authentication_key: Vec<u8>,
) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources
for (k, _v) in account_state.iter() {
// if we find an AccountResource struc, which is where authkeys are kept
if k.clone() == AccountResource::resource_path() {
// let account_resource_option = account_state.get_account_resource()?;
if let Some(account_resource) = account_state.get_account_resource()? {
let account_resource_new = account_resource
.clone_with_authentication_key(authentication_key.clone(), address.clone());
ws.push((
AccessPath::new(address, k.clone()),
WriteOp::Value(lcs::to_bytes(&account_resource_new).unwrap()),
));
}
}
}
println!("rotate authkey for account: {:?}", address);
}
bail!(
"ERROR: No address found at AccountState: {:?}",
account_state
);
}
/// helper to merge writesets
pub fn merge_writeset(mut left: WriteSetMut, right: WriteSetMut) -> Result<WriteSetMut, Error> {
left.write_set.extend(right.write_set);
Ok(left)
}
/// Tokio async parsing of state snapshot into blob
async fn run_impl(manifest: StateSnapshotBackup, path: &PathBuf) -> Result<()> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, path).await?;
// let proof = load_lcs_file(&chunk.proof)?;
println!("{:?}", blobs);
// TODO(Venkat) -> Here's the blob
// println!("{:?}", proof);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
let genesis = vm_genesis::test_genesis_change_set_and_validators(Some(1));
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0));
let tmp_dir = TempPath::new();
let db_rw = DbReaderWriter::new(LibraDB::new_for_test(&tmp_dir));
// Executor won't be able to boot on empty db due to lack of StartupInfo.
assert!(db_rw.reader.get_startup_info().unwrap().is_none());
// Bootstrap empty DB.
let waypoint = generate_waypoint::<LibraVM>(&db_rw, &genesis_txn).expect("Should not fail.");
maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap();
let startup_info = db_rw
.reader
.get_startup_info()
.expect("Should not fail.")
.expect("Should not be None.");
assert_eq!(
Waypoint::new_epoch_boundary(startup_info.latest_ledger_info.ledger_info()).unwrap(),
waypoint
);
let (li, epoch_change_proof, _) = db_rw.reader.get_state_proof(waypoint.version()).unwrap();
let trusted_state = TrustedState::from(waypoint);
trusted_state
.verify_and_ratchet(&li, &epoch_change_proof)
.unwrap();
// `maybe_bootstrap()` does nothing on non-empty DB.
assert!(!maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap());
let genesis_txn =
generate_genesis::generate_genesis_from_snapshot(&account_state_blobs, &db_rw).unwrap();
generate_genesis::write_genesis_blob(genesis_txn)?;
generate_genesis::test_genesis_from_blob(&account_state_blobs, db_rw)?;
Ok(())
}
/// given a path to state archive, produce a genesis.blob
pub fn genesis_from_path(path: PathBuf) -> Result<()> {
let path_man = path.clone().join("state.manifest");
dbg!(&path_man);
let path_proof = path.join("state.proof");
dbg!(&path_proof);
let manifest = read_from_json(&path_man).unwrap();
// Tokio runtime
let (mut rt, _port) = get_runtime();
let (txn_info_with_proof, li): (TransactionInfoWithProof, LedgerInfoWithSignatures) =
load_lcs_file(&path_proof.into_os_string().into_string().unwrap()).unwrap();
txn_info_with_proof.verify(li.ledger_info(), manifest.version)?;
ensure!(
txn_info_with_proof.transaction_info().state_root_hash() == manifest.root_hash,
"Root hash mismatch with that in proof. root hash: {}, expected: {}",
manifest.root_hash,
txn_info_with_proof.transaction_info().state_root_hash(),
);
let future = run_impl(manifest, &path); // Nothing is printed
rt.block_on(future)?;
Ok(())
}
#[cfg(test)]
#[test]
fn test_main() -> Result<()> {
use std::path::Path;
let path = env!("CARGO_MANIFEST_DIR");
let buf = Path::new(path)
.parent()
.unwrap()
.join("fixtures/state-snapshot/194/state_ver_74694920.0889/");
genesis_from_path(buf)
}
#[test]
pub fn test_accounts_into_recovery() {
use std::path::Path;
let path = env!("CARGO_MANIFEST_DIR");
let buf = Path::new(path)
.parent()
.unwrap()
.join("fixtures/state-snapshot/194/state_ver_74694920.0889/");
let path_man = buf.clone().join("state.manifest");
println!("Running.....");
let backup = read_from_json(&path_man).unwrap();
let (mut rt, _port) = get_runtime();
let account_blobs_futures = accounts_from_snapshot_backup(backup);
let account_blobs = rt.block_on(account_blobs_futures).unwrap();
let genesis_recovery_list = accounts_into_recovery(&account_blobs).unwrap();
println!("Total GenesisRecovery objects: {}", &genesis_recovery_list.len());
for blob in account_blobs {
let account_state = AccountState::try_from(&blob).unwrap();
if let Some(address) = account_state.get_account_address().unwrap() {
let mut address_processed = false;
for gr in &genesis_recovery_list {
if gr.address != address {
continue;
}
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
// extract the validator config resource
if k.clone() == BalanceResource::resource_path() {
match &gr.balance {
Some(balance) => {
if lcs::to_bytes(&balance).unwrap() != v.clone() {
panic!("Balance resource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("Balance not found");
}
}
}
if k.clone() == ValidatorConfigResource::resource_path() {
match &gr.val_cfg {
Some(val_cfg) => {
if lcs::to_bytes(&val_cfg).unwrap() != v.clone() {
panic!("ValidatorConfigResource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("ValidatorConfigResource not found");
}
}
}
if k.clone() == MinerStateResource::resource_path() {
match &gr.miner_state {
Some(miner_state) => {
if lcs::to_bytes(&miner_state).unwrap() != v.clone() {
panic!("MinerStateResource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("MinerStateResource not found");
}
}
}
}
println!("processed account: {:?}", address);
address_processed = true;
break;
};
if !address_processed {
panic!("Address not found for {} in recovery list", &address);
}
};
};
} | } | random_line_split |
read_archive.rs | //! read-archive
use backup_cli::storage::{FileHandle, FileHandleRef};
use libra_types::access_path::AccessPath;
use libra_types::account_config::AccountResource;
use libra_types::account_state::AccountState;
use libra_types::write_set::{WriteOp, WriteSetMut};
use move_core_types::move_resource::MoveResource;
use ol_fixtures::get_persona_mnem;
use ol_keys::wallet::get_account_from_mnem;
use serde::de::DeserializeOwned;
use std::convert::TryFrom;
use std::path::PathBuf;
use std::fs::File;
use std::io::Read;
use libra_config::utils::get_available_port;
use libra_crypto::HashValue;
use libra_types::{
account_state_blob::AccountStateBlob, ledger_info::LedgerInfoWithSignatures,
proof::TransactionInfoWithProof,
account_config::BalanceResource,
validator_config::ValidatorConfigResource,
};
use libra_types::{
transaction::{Transaction, WriteSetPayload},
trusted_state::TrustedState,
waypoint::Waypoint,
};
use ol_types::miner_state::MinerStateResource;
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr},
sync::Arc,
};
use backup_cli::backup_types::state_snapshot::manifest::StateSnapshotBackup;
use anyhow::{bail, ensure, Error, Result};
use tokio::{fs::OpenOptions, io::AsyncRead};
use libra_temppath::TempPath;
use libradb::LibraDB;
use backup_cli::utils::read_record_bytes::ReadRecordBytes;
use backup_service::start_backup_service;
use tokio::runtime::Runtime;
use executor::db_bootstrapper::{generate_waypoint, maybe_bootstrap};
use libra_vm::LibraVM;
use storage_interface::DbReaderWriter;
use crate::generate_genesis;
use crate::recover::{accounts_into_recovery, LegacyRecovery};
fn get_runtime() -> (Runtime, u16) {
let port = get_available_port();
let path = TempPath::new();
let rt = start_backup_service(
SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port),
Arc::new(LibraDB::new_for_test(&path)),
);
(rt, port)
}
async fn open_for_read(file_handle: &FileHandleRef) -> Result<Box<dyn AsyncRead + Send + Unpin>> {
let file = OpenOptions::new().read(true).open(file_handle).await?;
Ok(Box::new(file))
}
fn read_from_file(path: &str) -> Result<Vec<u8>> {
let mut data = Vec::<u8>::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
Ok(data)
}
fn read_from_json(path: &PathBuf) -> Result<StateSnapshotBackup> {
let config = std::fs::read_to_string(path)?;
let map: StateSnapshotBackup = serde_json::from_str(&config)?;
Ok(map)
}
fn load_lcs_file<T: DeserializeOwned>(file_handle: &str) -> Result<T> {
let x = read_from_file(&file_handle)?;
Ok(lcs::from_bytes(&x)?)
}
async fn read_account_state_chunk(
file_handle: FileHandle,
archive_path: &PathBuf,
) -> Result<Vec<(HashValue, AccountStateBlob)>> {
let full_handle = archive_path.parent().unwrap().join(file_handle);
let handle_str = full_handle.to_str().unwrap();
let mut file = open_for_read(handle_str).await?;
let mut chunk = vec![];
while let Some(record_bytes) = file.read_record_bytes().await? {
chunk.push(lcs::from_bytes(&record_bytes)?);
}
Ok(chunk)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_swarm_writeset(
archive_path: PathBuf,
) -> Result<WriteSetMut, Error> {
let backup = read_from_json(&archive_path)?;
let account_blobs = accounts_from_snapshot_backup(backup, &archive_path).await?;
accounts_into_writeset_swarm(&account_blobs)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_recovery(archive_path: &PathBuf) -> Result<Vec<LegacyRecovery>, Error> {
let manifest_json = archive_path.join("state.manifest");
let backup = read_from_json(&manifest_json)?;
let account_blobs = accounts_from_snapshot_backup(backup, archive_path).await?;
let r = accounts_into_recovery(&account_blobs)?;
Ok(r)
}
/// Tokio async parsing of state snapshot into blob
async fn accounts_from_snapshot_backup(
manifest: StateSnapshotBackup,
archive_path: &PathBuf
) -> Result<Vec<AccountStateBlob>> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, archive_path).await?;
// println!("{:?}", blobs);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
Ok(account_state_blobs)
}
fn get_alice_authkey_for_swarm() -> Vec<u8> {
let mnemonic_string = get_persona_mnem("alice");
let account_details = get_account_from_mnem(mnemonic_string);
account_details.0.to_vec()
}
/// cases that we need to create a genesis from backup.
pub enum GenesisCase {
/// a network upgrade or fork
Fork,
/// simulate state in a local swarm.
Test,
}
/// make the writeset for the genesis case. Starts with an unmodified account state and make into a writeset.
pub fn accounts_into_writeset_swarm(
account_state_blobs: &Vec<AccountStateBlob>,
) -> Result<WriteSetMut, Error> {
let mut write_set_mut = WriteSetMut::new(vec![]);
for blob in account_state_blobs {
let account_state = AccountState::try_from(blob)?;
// TODO: borrow
let clean = get_unmodified_writeset(&account_state)?;
let auth = authkey_rotate_change_item(&account_state, get_alice_authkey_for_swarm())?;
let merge_clean = merge_writeset(write_set_mut, clean)?;
write_set_mut = merge_writeset(merge_clean, auth)?;
}
println!("Total accounts read: {}", &account_state_blobs.len());
Ok(write_set_mut)
}
/// Without modifying the data convert an AccountState struct, into a WriteSet Item which can be included in a genesis transaction. This should take all of the resources in the account.
fn get_unmodified_writeset(account_state: &AccountState) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
let item_tuple = (
AccessPath::new(address, k.clone()),
WriteOp::Value(v.clone()),
);
// push into the writeset
ws.push(item_tuple);
}
println!("processed account: {:?}", address);
return Ok(ws);
}
bail!("ERROR: No address for AccountState: {:?}", account_state);
}
/// Returns the writeset item for replaceing an authkey on an account. This is only to be used in testing and simulation.
fn authkey_rotate_change_item(
account_state: &AccountState,
authentication_key: Vec<u8>,
) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources
for (k, _v) in account_state.iter() {
// if we find an AccountResource struc, which is where authkeys are kept
if k.clone() == AccountResource::resource_path() {
// let account_resource_option = account_state.get_account_resource()?;
if let Some(account_resource) = account_state.get_account_resource()? {
let account_resource_new = account_resource
.clone_with_authentication_key(authentication_key.clone(), address.clone());
ws.push((
AccessPath::new(address, k.clone()),
WriteOp::Value(lcs::to_bytes(&account_resource_new).unwrap()),
));
}
}
}
println!("rotate authkey for account: {:?}", address);
}
bail!(
"ERROR: No address found at AccountState: {:?}",
account_state
);
}
/// helper to merge writesets
pub fn merge_writeset(mut left: WriteSetMut, right: WriteSetMut) -> Result<WriteSetMut, Error> {
left.write_set.extend(right.write_set);
Ok(left)
}
/// Tokio async parsing of state snapshot into blob
async fn run_impl(manifest: StateSnapshotBackup, path: &PathBuf) -> Result<()> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, path).await?;
// let proof = load_lcs_file(&chunk.proof)?;
println!("{:?}", blobs);
// TODO(Venkat) -> Here's the blob
// println!("{:?}", proof);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
let genesis = vm_genesis::test_genesis_change_set_and_validators(Some(1));
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0));
let tmp_dir = TempPath::new();
let db_rw = DbReaderWriter::new(LibraDB::new_for_test(&tmp_dir));
// Executor won't be able to boot on empty db due to lack of StartupInfo.
assert!(db_rw.reader.get_startup_info().unwrap().is_none());
// Bootstrap empty DB.
let waypoint = generate_waypoint::<LibraVM>(&db_rw, &genesis_txn).expect("Should not fail.");
maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap();
let startup_info = db_rw
.reader
.get_startup_info()
.expect("Should not fail.")
.expect("Should not be None.");
assert_eq!(
Waypoint::new_epoch_boundary(startup_info.latest_ledger_info.ledger_info()).unwrap(),
waypoint
);
let (li, epoch_change_proof, _) = db_rw.reader.get_state_proof(waypoint.version()).unwrap();
let trusted_state = TrustedState::from(waypoint);
trusted_state
.verify_and_ratchet(&li, &epoch_change_proof)
.unwrap();
// `maybe_bootstrap()` does nothing on non-empty DB.
assert!(!maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap());
let genesis_txn =
generate_genesis::generate_genesis_from_snapshot(&account_state_blobs, &db_rw).unwrap();
generate_genesis::write_genesis_blob(genesis_txn)?;
generate_genesis::test_genesis_from_blob(&account_state_blobs, db_rw)?;
Ok(())
}
/// given a path to state archive, produce a genesis.blob
pub fn genesis_from_path(path: PathBuf) -> Result<()> {
let path_man = path.clone().join("state.manifest");
dbg!(&path_man);
let path_proof = path.join("state.proof");
dbg!(&path_proof);
let manifest = read_from_json(&path_man).unwrap();
// Tokio runtime
let (mut rt, _port) = get_runtime();
let (txn_info_with_proof, li): (TransactionInfoWithProof, LedgerInfoWithSignatures) =
load_lcs_file(&path_proof.into_os_string().into_string().unwrap()).unwrap();
txn_info_with_proof.verify(li.ledger_info(), manifest.version)?;
ensure!(
txn_info_with_proof.transaction_info().state_root_hash() == manifest.root_hash,
"Root hash mismatch with that in proof. root hash: {}, expected: {}",
manifest.root_hash,
txn_info_with_proof.transaction_info().state_root_hash(),
);
let future = run_impl(manifest, &path); // Nothing is printed
rt.block_on(future)?;
Ok(())
}
#[cfg(test)]
#[test]
fn test_main() -> Result<()> {
use std::path::Path;
let path = env!("CARGO_MANIFEST_DIR");
let buf = Path::new(path)
.parent()
.unwrap()
.join("fixtures/state-snapshot/194/state_ver_74694920.0889/");
genesis_from_path(buf)
}
#[test]
pub fn test_accounts_into_recovery() {
use std::path::Path;
let path = env!("CARGO_MANIFEST_DIR");
let buf = Path::new(path)
.parent()
.unwrap()
.join("fixtures/state-snapshot/194/state_ver_74694920.0889/");
let path_man = buf.clone().join("state.manifest");
println!("Running.....");
let backup = read_from_json(&path_man).unwrap();
let (mut rt, _port) = get_runtime();
let account_blobs_futures = accounts_from_snapshot_backup(backup);
let account_blobs = rt.block_on(account_blobs_futures).unwrap();
let genesis_recovery_list = accounts_into_recovery(&account_blobs).unwrap();
println!("Total GenesisRecovery objects: {}", &genesis_recovery_list.len());
for blob in account_blobs {
let account_state = AccountState::try_from(&blob).unwrap();
if let Some(address) = account_state.get_account_address().unwrap() {
let mut address_processed = false;
for gr in &genesis_recovery_list {
if gr.address != address {
continue;
}
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
// extract the validator config resource
if k.clone() == BalanceResource::resource_path() {
match &gr.balance {
Some(balance) => {
if lcs::to_bytes(&balance).unwrap() != v.clone() {
panic!("Balance resource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("Balance not found");
}
}
}
if k.clone() == ValidatorConfigResource::resource_path() {
match &gr.val_cfg {
Some(val_cfg) => {
if lcs::to_bytes(&val_cfg).unwrap() != v.clone() {
panic!("ValidatorConfigResource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("ValidatorConfigResource not found");
}
}
}
if k.clone() == MinerStateResource::resource_path() {
match &gr.miner_state {
Some(miner_state) => {
if lcs::to_bytes(&miner_state).unwrap() != v.clone() {
panic!("MinerStateResource not found in GenesisRecovery object: {}", gr.address);
}
}, None => |
}
}
}
println!("processed account: {:?}", address);
address_processed = true;
break;
};
if !address_processed {
panic!("Address not found for {} in recovery list", &address);
}
};
};
} | {
panic!("MinerStateResource not found");
} | conditional_block |
read_archive.rs | //! read-archive
use backup_cli::storage::{FileHandle, FileHandleRef};
use libra_types::access_path::AccessPath;
use libra_types::account_config::AccountResource;
use libra_types::account_state::AccountState;
use libra_types::write_set::{WriteOp, WriteSetMut};
use move_core_types::move_resource::MoveResource;
use ol_fixtures::get_persona_mnem;
use ol_keys::wallet::get_account_from_mnem;
use serde::de::DeserializeOwned;
use std::convert::TryFrom;
use std::path::PathBuf;
use std::fs::File;
use std::io::Read;
use libra_config::utils::get_available_port;
use libra_crypto::HashValue;
use libra_types::{
account_state_blob::AccountStateBlob, ledger_info::LedgerInfoWithSignatures,
proof::TransactionInfoWithProof,
account_config::BalanceResource,
validator_config::ValidatorConfigResource,
};
use libra_types::{
transaction::{Transaction, WriteSetPayload},
trusted_state::TrustedState,
waypoint::Waypoint,
};
use ol_types::miner_state::MinerStateResource;
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr},
sync::Arc,
};
use backup_cli::backup_types::state_snapshot::manifest::StateSnapshotBackup;
use anyhow::{bail, ensure, Error, Result};
use tokio::{fs::OpenOptions, io::AsyncRead};
use libra_temppath::TempPath;
use libradb::LibraDB;
use backup_cli::utils::read_record_bytes::ReadRecordBytes;
use backup_service::start_backup_service;
use tokio::runtime::Runtime;
use executor::db_bootstrapper::{generate_waypoint, maybe_bootstrap};
use libra_vm::LibraVM;
use storage_interface::DbReaderWriter;
use crate::generate_genesis;
use crate::recover::{accounts_into_recovery, LegacyRecovery};
fn get_runtime() -> (Runtime, u16) {
let port = get_available_port();
let path = TempPath::new();
let rt = start_backup_service(
SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port),
Arc::new(LibraDB::new_for_test(&path)),
);
(rt, port)
}
async fn open_for_read(file_handle: &FileHandleRef) -> Result<Box<dyn AsyncRead + Send + Unpin>> {
let file = OpenOptions::new().read(true).open(file_handle).await?;
Ok(Box::new(file))
}
fn read_from_file(path: &str) -> Result<Vec<u8>> {
let mut data = Vec::<u8>::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
Ok(data)
}
fn read_from_json(path: &PathBuf) -> Result<StateSnapshotBackup> {
let config = std::fs::read_to_string(path)?;
let map: StateSnapshotBackup = serde_json::from_str(&config)?;
Ok(map)
}
fn load_lcs_file<T: DeserializeOwned>(file_handle: &str) -> Result<T> {
let x = read_from_file(&file_handle)?;
Ok(lcs::from_bytes(&x)?)
}
async fn read_account_state_chunk(
file_handle: FileHandle,
archive_path: &PathBuf,
) -> Result<Vec<(HashValue, AccountStateBlob)>> {
let full_handle = archive_path.parent().unwrap().join(file_handle);
let handle_str = full_handle.to_str().unwrap();
let mut file = open_for_read(handle_str).await?;
let mut chunk = vec![];
while let Some(record_bytes) = file.read_record_bytes().await? {
chunk.push(lcs::from_bytes(&record_bytes)?);
}
Ok(chunk)
}
/// take an archive file path and parse into a writeset
pub async fn | (
archive_path: PathBuf,
) -> Result<WriteSetMut, Error> {
let backup = read_from_json(&archive_path)?;
let account_blobs = accounts_from_snapshot_backup(backup, &archive_path).await?;
accounts_into_writeset_swarm(&account_blobs)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_recovery(archive_path: &PathBuf) -> Result<Vec<LegacyRecovery>, Error> {
let manifest_json = archive_path.join("state.manifest");
let backup = read_from_json(&manifest_json)?;
let account_blobs = accounts_from_snapshot_backup(backup, archive_path).await?;
let r = accounts_into_recovery(&account_blobs)?;
Ok(r)
}
/// Tokio async parsing of state snapshot into blob
async fn accounts_from_snapshot_backup(
manifest: StateSnapshotBackup,
archive_path: &PathBuf
) -> Result<Vec<AccountStateBlob>> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, archive_path).await?;
// println!("{:?}", blobs);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
Ok(account_state_blobs)
}
fn get_alice_authkey_for_swarm() -> Vec<u8> {
let mnemonic_string = get_persona_mnem("alice");
let account_details = get_account_from_mnem(mnemonic_string);
account_details.0.to_vec()
}
/// cases that we need to create a genesis from backup.
pub enum GenesisCase {
/// a network upgrade or fork
Fork,
/// simulate state in a local swarm.
Test,
}
/// make the writeset for the genesis case. Starts with an unmodified account state and make into a writeset.
pub fn accounts_into_writeset_swarm(
account_state_blobs: &Vec<AccountStateBlob>,
) -> Result<WriteSetMut, Error> {
let mut write_set_mut = WriteSetMut::new(vec![]);
for blob in account_state_blobs {
let account_state = AccountState::try_from(blob)?;
// TODO: borrow
let clean = get_unmodified_writeset(&account_state)?;
let auth = authkey_rotate_change_item(&account_state, get_alice_authkey_for_swarm())?;
let merge_clean = merge_writeset(write_set_mut, clean)?;
write_set_mut = merge_writeset(merge_clean, auth)?;
}
println!("Total accounts read: {}", &account_state_blobs.len());
Ok(write_set_mut)
}
/// Without modifying the data convert an AccountState struct, into a WriteSet Item which can be included in a genesis transaction. This should take all of the resources in the account.
fn get_unmodified_writeset(account_state: &AccountState) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
let item_tuple = (
AccessPath::new(address, k.clone()),
WriteOp::Value(v.clone()),
);
// push into the writeset
ws.push(item_tuple);
}
println!("processed account: {:?}", address);
return Ok(ws);
}
bail!("ERROR: No address for AccountState: {:?}", account_state);
}
/// Returns the writeset item for replaceing an authkey on an account. This is only to be used in testing and simulation.
fn authkey_rotate_change_item(
account_state: &AccountState,
authentication_key: Vec<u8>,
) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources
for (k, _v) in account_state.iter() {
// if we find an AccountResource struc, which is where authkeys are kept
if k.clone() == AccountResource::resource_path() {
// let account_resource_option = account_state.get_account_resource()?;
if let Some(account_resource) = account_state.get_account_resource()? {
let account_resource_new = account_resource
.clone_with_authentication_key(authentication_key.clone(), address.clone());
ws.push((
AccessPath::new(address, k.clone()),
WriteOp::Value(lcs::to_bytes(&account_resource_new).unwrap()),
));
}
}
}
println!("rotate authkey for account: {:?}", address);
}
bail!(
"ERROR: No address found at AccountState: {:?}",
account_state
);
}
/// helper to merge writesets
pub fn merge_writeset(mut left: WriteSetMut, right: WriteSetMut) -> Result<WriteSetMut, Error> {
left.write_set.extend(right.write_set);
Ok(left)
}
/// Tokio async parsing of state snapshot into blob
async fn run_impl(manifest: StateSnapshotBackup, path: &PathBuf) -> Result<()> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, path).await?;
// let proof = load_lcs_file(&chunk.proof)?;
println!("{:?}", blobs);
// TODO(Venkat) -> Here's the blob
// println!("{:?}", proof);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
let genesis = vm_genesis::test_genesis_change_set_and_validators(Some(1));
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0));
let tmp_dir = TempPath::new();
let db_rw = DbReaderWriter::new(LibraDB::new_for_test(&tmp_dir));
// Executor won't be able to boot on empty db due to lack of StartupInfo.
assert!(db_rw.reader.get_startup_info().unwrap().is_none());
// Bootstrap empty DB.
let waypoint = generate_waypoint::<LibraVM>(&db_rw, &genesis_txn).expect("Should not fail.");
maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap();
let startup_info = db_rw
.reader
.get_startup_info()
.expect("Should not fail.")
.expect("Should not be None.");
assert_eq!(
Waypoint::new_epoch_boundary(startup_info.latest_ledger_info.ledger_info()).unwrap(),
waypoint
);
let (li, epoch_change_proof, _) = db_rw.reader.get_state_proof(waypoint.version()).unwrap();
let trusted_state = TrustedState::from(waypoint);
trusted_state
.verify_and_ratchet(&li, &epoch_change_proof)
.unwrap();
// `maybe_bootstrap()` does nothing on non-empty DB.
assert!(!maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap());
let genesis_txn =
generate_genesis::generate_genesis_from_snapshot(&account_state_blobs, &db_rw).unwrap();
generate_genesis::write_genesis_blob(genesis_txn)?;
generate_genesis::test_genesis_from_blob(&account_state_blobs, db_rw)?;
Ok(())
}
/// given a path to state archive, produce a genesis.blob
pub fn genesis_from_path(path: PathBuf) -> Result<()> {
let path_man = path.clone().join("state.manifest");
dbg!(&path_man);
let path_proof = path.join("state.proof");
dbg!(&path_proof);
let manifest = read_from_json(&path_man).unwrap();
// Tokio runtime
let (mut rt, _port) = get_runtime();
let (txn_info_with_proof, li): (TransactionInfoWithProof, LedgerInfoWithSignatures) =
load_lcs_file(&path_proof.into_os_string().into_string().unwrap()).unwrap();
txn_info_with_proof.verify(li.ledger_info(), manifest.version)?;
ensure!(
txn_info_with_proof.transaction_info().state_root_hash() == manifest.root_hash,
"Root hash mismatch with that in proof. root hash: {}, expected: {}",
manifest.root_hash,
txn_info_with_proof.transaction_info().state_root_hash(),
);
let future = run_impl(manifest, &path); // Nothing is printed
rt.block_on(future)?;
Ok(())
}
#[cfg(test)]
#[test]
fn test_main() -> Result<()> {
use std::path::Path;
let path = env!("CARGO_MANIFEST_DIR");
let buf = Path::new(path)
.parent()
.unwrap()
.join("fixtures/state-snapshot/194/state_ver_74694920.0889/");
genesis_from_path(buf)
}
#[test]
pub fn test_accounts_into_recovery() {
use std::path::Path;
let path = env!("CARGO_MANIFEST_DIR");
let buf = Path::new(path)
.parent()
.unwrap()
.join("fixtures/state-snapshot/194/state_ver_74694920.0889/");
let path_man = buf.clone().join("state.manifest");
println!("Running.....");
let backup = read_from_json(&path_man).unwrap();
let (mut rt, _port) = get_runtime();
let account_blobs_futures = accounts_from_snapshot_backup(backup);
let account_blobs = rt.block_on(account_blobs_futures).unwrap();
let genesis_recovery_list = accounts_into_recovery(&account_blobs).unwrap();
println!("Total GenesisRecovery objects: {}", &genesis_recovery_list.len());
for blob in account_blobs {
let account_state = AccountState::try_from(&blob).unwrap();
if let Some(address) = account_state.get_account_address().unwrap() {
let mut address_processed = false;
for gr in &genesis_recovery_list {
if gr.address != address {
continue;
}
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
// extract the validator config resource
if k.clone() == BalanceResource::resource_path() {
match &gr.balance {
Some(balance) => {
if lcs::to_bytes(&balance).unwrap() != v.clone() {
panic!("Balance resource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("Balance not found");
}
}
}
if k.clone() == ValidatorConfigResource::resource_path() {
match &gr.val_cfg {
Some(val_cfg) => {
if lcs::to_bytes(&val_cfg).unwrap() != v.clone() {
panic!("ValidatorConfigResource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("ValidatorConfigResource not found");
}
}
}
if k.clone() == MinerStateResource::resource_path() {
match &gr.miner_state {
Some(miner_state) => {
if lcs::to_bytes(&miner_state).unwrap() != v.clone() {
panic!("MinerStateResource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("MinerStateResource not found");
}
}
}
}
println!("processed account: {:?}", address);
address_processed = true;
break;
};
if !address_processed {
panic!("Address not found for {} in recovery list", &address);
}
};
};
} | archive_into_swarm_writeset | identifier_name |
manager.go | package monitor
import (
"fmt"
"log"
"os"
"strconv"
"strings"
"sync"
"time"
"domeagent/monitor/container"
"domeagent/monitor/container/docker"
"domeagent/monitor/container/raw"
"domeagent/monitor/events"
"domeagent/monitor/fs"
"domeagent/monitor/info"
"domeagent/monitor/storage"
"domeagent/monitor/storage/influxdb"
"domeagent/monitor/utils/cpuload"
"domeagent/monitor/utils/oomparser"
"domeagent/monitor/utils/sysfs"
"github.com/docker/libcontainer/cgroups"
)
var globalHousekeepingInterval = 1 * time.Minute
// change enableLoadReader from true to false, to avoid "error failed to open cgroup path" error
var enableLoadReader = false
// The Manager interface defines operations for starting a manager and getting
// container and machine information & uploading to influxdb
type Manager interface {
// Start the manager. Calling other manager methods before this returns
// may produce undefined behavior.
Start() error
// Stops the manager.
Stop() error
// Get information about a container.
GetContainerInfo(containerName string) (*info.ContainerInfo, error)
// Gets all the Docker containers. Return is a map from full container name to ContainerInfo.
AllDockerContainers() (map[string]*info.ContainerInfo, error)
// Gets information about a specific Docker container. The specified name is within the Docker namespace.
DockerContainer(containerName string) (*info.ContainerInfo, error)
// Returns true if the named container exists.
Exists(containerName string) bool
// Get information about the machine.
GetMachineInfo() (*info.MachineInfo, error)
// Get version information about different components we depend on.
GetVersionInfo() (*info.VersionInfo, error)
// Get events streamed through passedChannel that fit the request.
WatchForEvents(request *events.Request) (*events.EventChannel, error)
// Get past events that have been detected and that fit the request.
GetPastEvents(request *events.Request) ([]*info.Event, error)
CloseEventChannel(watch_id int)
// Get status information about docker.
DockerInfo() (DockerStatus, error)
// Get details about interesting docker images.
DockerImages() ([]DockerImage, error)
}
type DockerStatus struct {
Version string `json:"version"`
KernelVersion string `json:"kernel_version"`
OS string `json:"os"`
Hostname string `json:"hostname"`
RootDir string `json:"root_dir"`
Driver string `json:"driver"`
DriverStatus map[string]string `json:"driver_status"`
ExecDriver string `json:"exec_driver"`
NumImages int `json:"num_images"`
NumContainers int `json:"num_containers"`
}
type DockerImage struct {
ID string `json:"id"`
RepoTags []string `json:"repo_tags"`
Created int64 `json:"created"`
VirtualSize int64 `json:"virtual_size"`
Size int64 `json:"size"`
}
type InfluxConfig struct {
Table string `json:"table,omitempty"`
Database string `json:"dababase,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Host string `json:"host,omitempty"`
BufferDuration time.Duration `json:"buffer_duration,omitempty"`
FilterPrefix string `json:"filter_prefix,omitempty"`
}
// A namespaced container name.
type namespacedContainerName struct {
// The namespace of the container. Can be empty for the root namespace.
Namespace string
// The name of the container in this namespace.
Name string
}
type manager struct {
containers map[namespacedContainerName]*containerData
containersLock sync.RWMutex
backendStorage storage.StorageDriver
fsInfo fs.FsInfo
machineInfo info.MachineInfo
versionInfo info.VersionInfo
quitChannels []chan error
selfContainer string
loadReader cpuload.CpuLoadReader
eventHandler events.EventManager
startupTime time.Time
housekeepingInterval time.Duration
inHostNamespace bool
}
// New returns a new manager.
func New(housekeepingInterval time.Duration, config *InfluxConfig) (Manager, error) {
// Initialize influxdb
hostname, err := os.Hostname() // Agent's host name
if err != nil {
return nil, err
}
influxdbStorage, err := influxdb.New(hostname,
config.Table,
config.Database,
config.Username,
config.Password,
config.Host,
config.BufferDuration,
config.FilterPrefix)
if err != nil {
return nil, err
}
//log.Printf("[Info] Connected to influxdb on: %q", config.Host)
sysfs, err := sysfs.NewRealSysFs()
if err != nil {
log.Printf("[Error] Failed to create a system interface: %s", err)
return nil, err
}
//log.Printf("[Info] Created a system interface)
// Detect the container we are running on.
selfContainer, err := cgroups.GetThisCgroupDir("cpu")
if err != nil {
return nil, err
}
//log.Printf("[Info] Running in container: %q", selfContainer)
dockerInfo, err := docker.DockerInfo()
if err != nil {
log.Printf("[Error] Unable to connect to Docker: %v", err)
}
context := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo}
fsInfo, err := fs.NewFsInfo(context)
if err != nil {
return nil, err
}
// If started with host's rootfs mounted, assume that its running
// in its own namespaces.
inHostNamespace := false
if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) {
inHostNamespace = true
}
newManager := &manager{
containers: make(map[namespacedContainerName]*containerData),
backendStorage: influxdbStorage,
quitChannels: make([]chan error, 0, 2),
fsInfo: fsInfo,
selfContainer: selfContainer,
inHostNamespace: inHostNamespace,
startupTime: time.Now(),
housekeepingInterval: housekeepingInterval,
}
machineInfo, err := getMachineInfo(sysfs, fsInfo)
if err != nil {
return nil, err
}
newManager.machineInfo = *machineInfo
//log.Printf("[Info] Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
}
newManager.versionInfo = *versionInfo
//log.Printf("[Info] Version: %+v", newManager.versionInfo)
newManager.eventHandler = events.NewEventManager(events.DefaultStoragePolicy())
return newManager, nil
}
// Start the container manager.
func (self *manager) Start() error {
// Register Docker container factory.
err := docker.Register(self, self.fsInfo)
if err != nil {
log.Printf("{Error] Docker container factory registration failed: %v.", err)
return err
}
// Register the raw driver.
err = raw.Register(self, self.fsInfo)
if err != nil {
log.Printf("[Error] Registration of the raw container factory failed: %v", err)
return err
}
self.DockerInfo()
self.DockerImages()
if enableLoadReader {
// Create cpu load reader.
cpuLoadReader, err := cpuload.New()
if err != nil {
log.Printf("[Error] Could not initialize cpu load reader: %s", err)
} else {
err = cpuLoadReader.Start()
if err != nil {
log.Printf("[Error] Could not start cpu load stat collector: %s", err)
} else {
self.loadReader = cpuLoadReader
}
}
}
// Watch for OOMs.
err = self.watchForNewOoms()
if err != nil {
log.Printf("[Error] Could not configure a source for OOM detection, disabling OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
if !container.HasFactories() {
return nil
}
// Create root and then recover all containers.
err = self.createContainer("/")
if err != nil {
return err
}
//log.Printf("[Info] Starting recovery of all containers")
err = self.detectSubcontainers("/")
if err != nil {
return err
}
//log.Printf("[Info] Recovery completed")
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
return nil
}
func (self *manager) Stop() error {
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
if self.loadReader != nil {
self.loadReader.Stop()
self.loadReader = nil
}
return nil
}
// Get a container by name.
func (self *manager) GetContainerInfo(containerName string) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont)
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
Name: containerName,
}]
}()
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) containerDataToContainerInfo(cont *containerData) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo()
if err != nil {
return nil, err
}
stats, err := cont.updateStats(false)
if err != nil {
return nil, err
}
// Make a copy of the info for the user.
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
}
}
return spec
}
func (self *manager) AllDockerContainers() (map[string]*info.ContainerInfo, error) {
containers := self.getAllDockerContainers()
output := make(map[string]*info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont)
if err != nil {
return nil, err
}
output[name] = inf
}
return output, nil
}
func (self *manager) getAllDockerContainers() map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers := make(map[string]*containerData, len(self.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
}
return containers
}
func (self *manager) DockerContainer(containerName string) (*info.ContainerInfo, error) {
container, err := self.getDockerContainer(containerName)
if err != nil {
return &info.ContainerInfo{}, err
}
inf, err := self.containerDataToContainerInfo(container)
if err != nil {
return &info.ContainerInfo{}, err
}
return inf, nil
}
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
if !ok {
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
}
return cont, nil
}
func (m *manager) Exists(containerName string) bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
_, ok := m.containers[namespacedName]
if ok {
return true
}
return false
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo.
return &m.machineInfo, nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
return &m.versionInfo, nil
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return self.eventHandler.WatchEvents(request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return self.eventHandler.GetEvents(request)
}
// called by the api when a client is no longer listening to the channel
func (self *manager) CloseEventChannel(watch_id int) |
func (m *manager) DockerInfo() (DockerStatus, error) {
info, err := docker.DockerInfo()
if err != nil {
return DockerStatus{}, err
}
out := DockerStatus{}
out.Version = m.versionInfo.DockerVersion
if val, ok := info["KernelVersion"]; ok {
out.KernelVersion = val
}
if val, ok := info["OperatingSystem"]; ok {
out.OS = val
}
if val, ok := info["Name"]; ok {
out.Hostname = val
}
if val, ok := info["DockerRootDir"]; ok {
out.RootDir = val
}
if val, ok := info["Driver"]; ok {
out.Driver = val
}
if val, ok := info["ExecutionDriver"]; ok {
out.ExecDriver = val
}
if val, ok := info["Images"]; ok {
n, err := strconv.Atoi(val)
if err == nil {
out.NumImages = n
}
}
if val, ok := info["Containers"]; ok {
n, err := strconv.Atoi(val)
if err == nil {
out.NumContainers = n
}
}
// cut, trim, cut - Example format:
// DriverStatus=[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirperm1 Supported","false"]]
if val, ok := info["DriverStatus"]; ok {
out.DriverStatus = make(map[string]string)
val = strings.TrimPrefix(val, "[[")
val = strings.TrimSuffix(val, "]]")
vals := strings.Split(val, "],[")
for _, v := range vals {
kv := strings.Split(v, "\",\"")
if len(kv) != 2 {
continue
} else {
out.DriverStatus[strings.Trim(kv[0], "\"")] = strings.Trim(kv[1], "\"")
}
}
}
return out, nil
}
func (m *manager) DockerImages() ([]DockerImage, error) {
images, err := docker.DockerImages()
if err != nil {
return nil, err
}
out := []DockerImage{}
const unknownTag = "<none>:<none>"
for _, image := range images {
if len(image.RepoTags) == 1 && image.RepoTags[0] == unknownTag {
// images with repo or tags are uninteresting.
continue
}
di := DockerImage{
ID: image.ID,
RepoTags: image.RepoTags,
Created: image.Created,
VirtualSize: image.VirtualSize,
Size: image.Size,
}
out = append(out, di)
}
return out, nil
}
func (self *manager) watchForNewOoms() error {
//log.Printf("[Info] Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New()
if err != nil {
return err
}
go oomLog.StreamOoms(outStream)
go func() {
for oomInstance := range outStream {
// Surface OOM and OOM kill events.
newEvent := &info.Event{
ContainerName: oomInstance.ContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOom,
}
err := self.eventHandler.AddEvent(newEvent)
if err != nil {
log.Printf("[Error] failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
}
//log.Printf("[Info] Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath)
newEvent = &info.Event{
ContainerName: oomInstance.VictimContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOomKill,
EventData: info.EventData{
OomKill: &info.OomKillEventData{
Pid: oomInstance.Pid,
ProcessName: oomInstance.ProcessName,
},
},
}
err = self.eventHandler.AddEvent(newEvent)
if err != nil {
log.Printf("[Error] failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
}
}
}()
return nil
}
// Create a container.
func (m *manager) createContainer(containerName string) error {
handler, accept, err := container.NewContainerHandler(containerName, m.inHostNamespace)
if err != nil {
return err
}
if !accept {
// ignoring this container.
log.Printf("[Info] ignoring container %q", containerName)
return nil
}
cont, err := newContainerData(containerName, m.backendStorage, handler, m.loadReader, m.housekeepingInterval)
if err != nil {
return err
}
// Add to the containers map.
alreadyExists := func() bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
// Check that the container didn't already exist.
_, ok := m.containers[namespacedName]
if ok {
return true
}
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
m.containers[namespacedName] = cont
for _, alias := range cont.info.Aliases {
m.containers[namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
}] = cont
}
return false
}()
if alreadyExists {
return nil
}
//log.Printf("[Info] Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contSpec, err := cont.handler.GetSpec()
if err != nil {
return err
}
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &info.Event{
ContainerName: contRef.Name,
Timestamp: contSpec.CreationTime,
EventType: info.EventContainerCreation,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
// Start the container's housekeeping.
cont.Start()
return nil
}
func (m *manager) destroyContainer(containerName string) error {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
cont, ok := m.containers[namespacedName]
if !ok {
// Already destroyed, done.
return nil
}
// Tell the container to stop.
err := cont.Stop()
if err != nil {
return err
}
// Remove the container from our records (and all its aliases).
delete(m.containers, namespacedName)
for _, alias := range cont.info.Aliases {
delete(m.containers, namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
})
}
//log.Printf("[Info] Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &info.Event{
ContainerName: contRef.Name,
Timestamp: time.Now(),
EventType: info.EventContainerDeletion,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
return nil
}
// Detect all containers that have been added or deleted from the specified container.
func (m *manager) getContainersDiff(containerName string) (added []info.ContainerReference, removed []info.ContainerReference, err error) {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
// Get all subcontainers recursively.
cont, ok := m.containers[namespacedContainerName{
Name: containerName,
}]
if !ok {
return nil, nil, fmt.Errorf("failed to find container %q while checking for new containers", containerName)
}
allContainers, err := cont.handler.ListContainers(container.ListRecursive)
if err != nil {
return nil, nil, err
}
allContainers = append(allContainers, info.ContainerReference{Name: containerName})
// Determine which were added and which were removed.
allContainersSet := make(map[string]*containerData)
for name, d := range m.containers {
// Only add the canonical name.
if d.info.Name == name.Name {
allContainersSet[name.Name] = d
}
}
// Added containers
for _, c := range allContainers {
delete(allContainersSet, c.Name)
_, ok := m.containers[namespacedContainerName{
Name: c.Name,
}]
if !ok {
added = append(added, c)
}
}
// Removed ones are no longer in the container listing.
for _, d := range allContainersSet {
removed = append(removed, d.info.ContainerReference)
}
return
}
// Detect the existing subcontainers and reflect the setup here.
func (m *manager) detectSubcontainers(containerName string) error {
added, removed, err := m.getContainersDiff(containerName)
if err != nil {
return err
}
// Add the new containers.
for _, cont := range added {
err = m.createContainer(cont.Name)
if err != nil {
log.Printf("[Error] Failed to create existing container: %s: %s", cont.Name, err)
}
}
// Remove the old containers.
for _, cont := range removed {
err = m.destroyContainer(cont.Name)
if err != nil {
log.Printf("[Error] Failed to destroy existing container: %s: %s", cont.Name, err)
}
}
return nil
}
// Watches for new containers started in the system. Runs forever unless there is a setup error.
func (self *manager) watchForNewContainers(quit chan error) error {
var root *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
root, ok = self.containers[namespacedContainerName{
Name: "/",
}]
}()
if !ok {
return fmt.Errorf("[Error] Root container does not exist when watching for new containers")
}
// Register for new subcontainers.
eventsChannel := make(chan container.SubcontainerEvent, 16)
err := root.handler.WatchSubcontainers(eventsChannel)
if err != nil {
return err
}
// There is a race between starting the watch and new container creation so we do a detection before we read new containers.
err = self.detectSubcontainers("/")
if err != nil {
return err
}
// Listen to events from the container handler.
go func() {
for {
select {
case event := <-eventsChannel:
switch {
case event.EventType == container.SubcontainerAdd:
err = self.createContainer(event.Name)
case event.EventType == container.SubcontainerDelete:
err = self.destroyContainer(event.Name)
}
if err != nil {
log.Printf("[Error] Failed to process watch event: %v", err)
}
case <-quit:
// Stop processing events if asked to quit.
err := root.handler.StopWatchingSubcontainers()
quit <- err
if err == nil {
log.Printf("[Info] Exiting thread watching subcontainers")
return
}
}
}
}()
return nil
}
func (self *manager) globalHousekeeping(quit chan error) {
// Long housekeeping is either 100ms or half of the housekeeping interval.
longHousekeeping := 100 * time.Millisecond
if globalHousekeepingInterval/2 < longHousekeeping {
longHousekeeping = globalHousekeepingInterval / 2
}
ticker := time.Tick(globalHousekeepingInterval)
for {
select {
case <-ticker:
start := time.Now()
// Check for new containers.
err := self.detectSubcontainers("/")
if err != nil {
log.Printf("[Error] Failed to detect containers: %s", err)
}
// Log if housekeeping took too long.
duration := time.Since(start)
if duration >= longHousekeeping {
//log.Printf("[Info] Global Housekeeping(%d) took %s", t.Unix(), duration)
}
case <-quit:
// Quit if asked to do so.
quit <- nil
log.Printf("[Info] Exiting global housekeeping thread")
return
}
}
}
| {
self.eventHandler.StopWatch(watch_id)
} | identifier_body |
manager.go | package monitor
import (
"fmt"
"log"
"os"
"strconv"
"strings"
"sync"
"time"
"domeagent/monitor/container"
"domeagent/monitor/container/docker"
"domeagent/monitor/container/raw"
"domeagent/monitor/events"
"domeagent/monitor/fs"
"domeagent/monitor/info"
"domeagent/monitor/storage"
"domeagent/monitor/storage/influxdb"
"domeagent/monitor/utils/cpuload"
"domeagent/monitor/utils/oomparser"
"domeagent/monitor/utils/sysfs"
"github.com/docker/libcontainer/cgroups"
)
var globalHousekeepingInterval = 1 * time.Minute
// change enableLoadReader from true to false, to avoid "error failed to open cgroup path" error
var enableLoadReader = false
// The Manager interface defines operations for starting a manager and getting
// container and machine information & uploading to influxdb
type Manager interface {
// Start the manager. Calling other manager methods before this returns
// may produce undefined behavior.
Start() error
// Stops the manager.
Stop() error
// Get information about a container.
GetContainerInfo(containerName string) (*info.ContainerInfo, error)
// Gets all the Docker containers. Return is a map from full container name to ContainerInfo.
AllDockerContainers() (map[string]*info.ContainerInfo, error)
// Gets information about a specific Docker container. The specified name is within the Docker namespace.
DockerContainer(containerName string) (*info.ContainerInfo, error)
// Returns true if the named container exists.
Exists(containerName string) bool
// Get information about the machine.
GetMachineInfo() (*info.MachineInfo, error)
// Get version information about different components we depend on.
GetVersionInfo() (*info.VersionInfo, error)
// Get events streamed through passedChannel that fit the request.
WatchForEvents(request *events.Request) (*events.EventChannel, error)
// Get past events that have been detected and that fit the request.
GetPastEvents(request *events.Request) ([]*info.Event, error)
CloseEventChannel(watch_id int)
// Get status information about docker.
DockerInfo() (DockerStatus, error)
// Get details about interesting docker images.
DockerImages() ([]DockerImage, error)
}
type DockerStatus struct {
Version string `json:"version"`
KernelVersion string `json:"kernel_version"`
OS string `json:"os"`
Hostname string `json:"hostname"`
RootDir string `json:"root_dir"`
Driver string `json:"driver"`
DriverStatus map[string]string `json:"driver_status"`
ExecDriver string `json:"exec_driver"`
NumImages int `json:"num_images"`
NumContainers int `json:"num_containers"`
}
type DockerImage struct {
ID string `json:"id"`
RepoTags []string `json:"repo_tags"`
Created int64 `json:"created"`
VirtualSize int64 `json:"virtual_size"`
Size int64 `json:"size"`
}
type InfluxConfig struct {
Table string `json:"table,omitempty"`
Database string `json:"dababase,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Host string `json:"host,omitempty"`
BufferDuration time.Duration `json:"buffer_duration,omitempty"`
FilterPrefix string `json:"filter_prefix,omitempty"`
}
// A namespaced container name.
type namespacedContainerName struct {
// The namespace of the container. Can be empty for the root namespace.
Namespace string
// The name of the container in this namespace.
Name string
}
type manager struct {
containers map[namespacedContainerName]*containerData
containersLock sync.RWMutex
backendStorage storage.StorageDriver
fsInfo fs.FsInfo
machineInfo info.MachineInfo
versionInfo info.VersionInfo
quitChannels []chan error
selfContainer string
loadReader cpuload.CpuLoadReader
eventHandler events.EventManager
startupTime time.Time
housekeepingInterval time.Duration
inHostNamespace bool
}
// New returns a new manager.
func New(housekeepingInterval time.Duration, config *InfluxConfig) (Manager, error) {
// Initialize influxdb
hostname, err := os.Hostname() // Agent's host name
if err != nil {
return nil, err
}
influxdbStorage, err := influxdb.New(hostname,
config.Table,
config.Database,
config.Username,
config.Password,
config.Host,
config.BufferDuration,
config.FilterPrefix)
if err != nil {
return nil, err
}
//log.Printf("[Info] Connected to influxdb on: %q", config.Host)
sysfs, err := sysfs.NewRealSysFs()
if err != nil {
log.Printf("[Error] Failed to create a system interface: %s", err)
return nil, err
}
//log.Printf("[Info] Created a system interface)
// Detect the container we are running on.
selfContainer, err := cgroups.GetThisCgroupDir("cpu")
if err != nil {
return nil, err
}
//log.Printf("[Info] Running in container: %q", selfContainer)
dockerInfo, err := docker.DockerInfo()
if err != nil {
log.Printf("[Error] Unable to connect to Docker: %v", err)
}
context := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo}
fsInfo, err := fs.NewFsInfo(context)
if err != nil {
return nil, err
}
// If started with host's rootfs mounted, assume that its running
// in its own namespaces.
inHostNamespace := false
if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) {
inHostNamespace = true
}
newManager := &manager{
containers: make(map[namespacedContainerName]*containerData),
backendStorage: influxdbStorage,
quitChannels: make([]chan error, 0, 2),
fsInfo: fsInfo,
selfContainer: selfContainer,
inHostNamespace: inHostNamespace,
startupTime: time.Now(),
housekeepingInterval: housekeepingInterval,
}
machineInfo, err := getMachineInfo(sysfs, fsInfo)
if err != nil {
return nil, err
}
newManager.machineInfo = *machineInfo
//log.Printf("[Info] Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
}
newManager.versionInfo = *versionInfo
//log.Printf("[Info] Version: %+v", newManager.versionInfo)
newManager.eventHandler = events.NewEventManager(events.DefaultStoragePolicy())
return newManager, nil
}
// Start the container manager.
func (self *manager) Start() error {
// Register Docker container factory.
err := docker.Register(self, self.fsInfo)
if err != nil {
log.Printf("{Error] Docker container factory registration failed: %v.", err)
return err
}
// Register the raw driver.
err = raw.Register(self, self.fsInfo)
if err != nil {
log.Printf("[Error] Registration of the raw container factory failed: %v", err)
return err
}
self.DockerInfo()
self.DockerImages()
if enableLoadReader {
// Create cpu load reader.
cpuLoadReader, err := cpuload.New()
if err != nil {
log.Printf("[Error] Could not initialize cpu load reader: %s", err)
} else {
err = cpuLoadReader.Start()
if err != nil {
log.Printf("[Error] Could not start cpu load stat collector: %s", err)
} else {
self.loadReader = cpuLoadReader
}
}
}
// Watch for OOMs.
err = self.watchForNewOoms()
if err != nil {
log.Printf("[Error] Could not configure a source for OOM detection, disabling OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
if !container.HasFactories() {
return nil
}
// Create root and then recover all containers.
err = self.createContainer("/")
if err != nil {
return err
}
//log.Printf("[Info] Starting recovery of all containers")
err = self.detectSubcontainers("/")
if err != nil {
return err
}
//log.Printf("[Info] Recovery completed")
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
return nil
}
func (self *manager) Stop() error {
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
if self.loadReader != nil {
self.loadReader.Stop()
self.loadReader = nil
}
return nil
}
// Get a container by name.
func (self *manager) GetContainerInfo(containerName string) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont)
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
Name: containerName,
}]
}()
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) containerDataToContainerInfo(cont *containerData) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo()
if err != nil {
return nil, err
}
stats, err := cont.updateStats(false)
if err != nil {
return nil, err
}
// Make a copy of the info for the user.
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
}
}
return spec
}
func (self *manager) AllDockerContainers() (map[string]*info.ContainerInfo, error) {
containers := self.getAllDockerContainers()
output := make(map[string]*info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont)
if err != nil {
return nil, err
}
output[name] = inf
}
return output, nil
}
func (self *manager) getAllDockerContainers() map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers := make(map[string]*containerData, len(self.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
}
return containers
}
func (self *manager) DockerContainer(containerName string) (*info.ContainerInfo, error) {
container, err := self.getDockerContainer(containerName)
if err != nil {
return &info.ContainerInfo{}, err
}
inf, err := self.containerDataToContainerInfo(container)
if err != nil {
return &info.ContainerInfo{}, err
}
return inf, nil
}
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
if !ok {
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
}
return cont, nil
}
func (m *manager) Exists(containerName string) bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
_, ok := m.containers[namespacedName]
if ok |
return false
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo.
return &m.machineInfo, nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
return &m.versionInfo, nil
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return self.eventHandler.WatchEvents(request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return self.eventHandler.GetEvents(request)
}
// called by the api when a client is no longer listening to the channel
func (self *manager) CloseEventChannel(watch_id int) {
self.eventHandler.StopWatch(watch_id)
}
func (m *manager) DockerInfo() (DockerStatus, error) {
info, err := docker.DockerInfo()
if err != nil {
return DockerStatus{}, err
}
out := DockerStatus{}
out.Version = m.versionInfo.DockerVersion
if val, ok := info["KernelVersion"]; ok {
out.KernelVersion = val
}
if val, ok := info["OperatingSystem"]; ok {
out.OS = val
}
if val, ok := info["Name"]; ok {
out.Hostname = val
}
if val, ok := info["DockerRootDir"]; ok {
out.RootDir = val
}
if val, ok := info["Driver"]; ok {
out.Driver = val
}
if val, ok := info["ExecutionDriver"]; ok {
out.ExecDriver = val
}
if val, ok := info["Images"]; ok {
n, err := strconv.Atoi(val)
if err == nil {
out.NumImages = n
}
}
if val, ok := info["Containers"]; ok {
n, err := strconv.Atoi(val)
if err == nil {
out.NumContainers = n
}
}
// cut, trim, cut - Example format:
// DriverStatus=[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirperm1 Supported","false"]]
if val, ok := info["DriverStatus"]; ok {
out.DriverStatus = make(map[string]string)
val = strings.TrimPrefix(val, "[[")
val = strings.TrimSuffix(val, "]]")
vals := strings.Split(val, "],[")
for _, v := range vals {
kv := strings.Split(v, "\",\"")
if len(kv) != 2 {
continue
} else {
out.DriverStatus[strings.Trim(kv[0], "\"")] = strings.Trim(kv[1], "\"")
}
}
}
return out, nil
}
func (m *manager) DockerImages() ([]DockerImage, error) {
images, err := docker.DockerImages()
if err != nil {
return nil, err
}
out := []DockerImage{}
const unknownTag = "<none>:<none>"
for _, image := range images {
if len(image.RepoTags) == 1 && image.RepoTags[0] == unknownTag {
// images with repo or tags are uninteresting.
continue
}
di := DockerImage{
ID: image.ID,
RepoTags: image.RepoTags,
Created: image.Created,
VirtualSize: image.VirtualSize,
Size: image.Size,
}
out = append(out, di)
}
return out, nil
}
func (self *manager) watchForNewOoms() error {
//log.Printf("[Info] Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New()
if err != nil {
return err
}
go oomLog.StreamOoms(outStream)
go func() {
for oomInstance := range outStream {
// Surface OOM and OOM kill events.
newEvent := &info.Event{
ContainerName: oomInstance.ContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOom,
}
err := self.eventHandler.AddEvent(newEvent)
if err != nil {
log.Printf("[Error] failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
}
//log.Printf("[Info] Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath)
newEvent = &info.Event{
ContainerName: oomInstance.VictimContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOomKill,
EventData: info.EventData{
OomKill: &info.OomKillEventData{
Pid: oomInstance.Pid,
ProcessName: oomInstance.ProcessName,
},
},
}
err = self.eventHandler.AddEvent(newEvent)
if err != nil {
log.Printf("[Error] failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
}
}
}()
return nil
}
// Create a container.
func (m *manager) createContainer(containerName string) error {
handler, accept, err := container.NewContainerHandler(containerName, m.inHostNamespace)
if err != nil {
return err
}
if !accept {
// ignoring this container.
log.Printf("[Info] ignoring container %q", containerName)
return nil
}
cont, err := newContainerData(containerName, m.backendStorage, handler, m.loadReader, m.housekeepingInterval)
if err != nil {
return err
}
// Add to the containers map.
alreadyExists := func() bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
// Check that the container didn't already exist.
_, ok := m.containers[namespacedName]
if ok {
return true
}
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
m.containers[namespacedName] = cont
for _, alias := range cont.info.Aliases {
m.containers[namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
}] = cont
}
return false
}()
if alreadyExists {
return nil
}
//log.Printf("[Info] Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contSpec, err := cont.handler.GetSpec()
if err != nil {
return err
}
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &info.Event{
ContainerName: contRef.Name,
Timestamp: contSpec.CreationTime,
EventType: info.EventContainerCreation,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
// Start the container's housekeeping.
cont.Start()
return nil
}
func (m *manager) destroyContainer(containerName string) error {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
cont, ok := m.containers[namespacedName]
if !ok {
// Already destroyed, done.
return nil
}
// Tell the container to stop.
err := cont.Stop()
if err != nil {
return err
}
// Remove the container from our records (and all its aliases).
delete(m.containers, namespacedName)
for _, alias := range cont.info.Aliases {
delete(m.containers, namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
})
}
//log.Printf("[Info] Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &info.Event{
ContainerName: contRef.Name,
Timestamp: time.Now(),
EventType: info.EventContainerDeletion,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
return nil
}
// Detect all containers that have been added or deleted from the specified container.
func (m *manager) getContainersDiff(containerName string) (added []info.ContainerReference, removed []info.ContainerReference, err error) {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
// Get all subcontainers recursively.
cont, ok := m.containers[namespacedContainerName{
Name: containerName,
}]
if !ok {
return nil, nil, fmt.Errorf("failed to find container %q while checking for new containers", containerName)
}
allContainers, err := cont.handler.ListContainers(container.ListRecursive)
if err != nil {
return nil, nil, err
}
allContainers = append(allContainers, info.ContainerReference{Name: containerName})
// Determine which were added and which were removed.
allContainersSet := make(map[string]*containerData)
for name, d := range m.containers {
// Only add the canonical name.
if d.info.Name == name.Name {
allContainersSet[name.Name] = d
}
}
// Added containers
for _, c := range allContainers {
delete(allContainersSet, c.Name)
_, ok := m.containers[namespacedContainerName{
Name: c.Name,
}]
if !ok {
added = append(added, c)
}
}
// Removed ones are no longer in the container listing.
for _, d := range allContainersSet {
removed = append(removed, d.info.ContainerReference)
}
return
}
// Detect the existing subcontainers and reflect the setup here.
func (m *manager) detectSubcontainers(containerName string) error {
added, removed, err := m.getContainersDiff(containerName)
if err != nil {
return err
}
// Add the new containers.
for _, cont := range added {
err = m.createContainer(cont.Name)
if err != nil {
log.Printf("[Error] Failed to create existing container: %s: %s", cont.Name, err)
}
}
// Remove the old containers.
for _, cont := range removed {
err = m.destroyContainer(cont.Name)
if err != nil {
log.Printf("[Error] Failed to destroy existing container: %s: %s", cont.Name, err)
}
}
return nil
}
// Watches for new containers started in the system. Runs forever unless there is a setup error.
func (self *manager) watchForNewContainers(quit chan error) error {
var root *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
root, ok = self.containers[namespacedContainerName{
Name: "/",
}]
}()
if !ok {
return fmt.Errorf("[Error] Root container does not exist when watching for new containers")
}
// Register for new subcontainers.
eventsChannel := make(chan container.SubcontainerEvent, 16)
err := root.handler.WatchSubcontainers(eventsChannel)
if err != nil {
return err
}
// There is a race between starting the watch and new container creation so we do a detection before we read new containers.
err = self.detectSubcontainers("/")
if err != nil {
return err
}
// Listen to events from the container handler.
go func() {
for {
select {
case event := <-eventsChannel:
switch {
case event.EventType == container.SubcontainerAdd:
err = self.createContainer(event.Name)
case event.EventType == container.SubcontainerDelete:
err = self.destroyContainer(event.Name)
}
if err != nil {
log.Printf("[Error] Failed to process watch event: %v", err)
}
case <-quit:
// Stop processing events if asked to quit.
err := root.handler.StopWatchingSubcontainers()
quit <- err
if err == nil {
log.Printf("[Info] Exiting thread watching subcontainers")
return
}
}
}
}()
return nil
}
func (self *manager) globalHousekeeping(quit chan error) {
// Long housekeeping is either 100ms or half of the housekeeping interval.
longHousekeeping := 100 * time.Millisecond
if globalHousekeepingInterval/2 < longHousekeeping {
longHousekeeping = globalHousekeepingInterval / 2
}
ticker := time.Tick(globalHousekeepingInterval)
for {
select {
case <-ticker:
start := time.Now()
// Check for new containers.
err := self.detectSubcontainers("/")
if err != nil {
log.Printf("[Error] Failed to detect containers: %s", err)
}
// Log if housekeeping took too long.
duration := time.Since(start)
if duration >= longHousekeeping {
//log.Printf("[Info] Global Housekeeping(%d) took %s", t.Unix(), duration)
}
case <-quit:
// Quit if asked to do so.
quit <- nil
log.Printf("[Info] Exiting global housekeeping thread")
return
}
}
}
| {
return true
} | conditional_block |
manager.go | package monitor
import (
"fmt"
"log"
"os"
"strconv"
"strings"
"sync"
"time"
"domeagent/monitor/container"
"domeagent/monitor/container/docker"
"domeagent/monitor/container/raw"
"domeagent/monitor/events"
"domeagent/monitor/fs"
"domeagent/monitor/info"
"domeagent/monitor/storage"
"domeagent/monitor/storage/influxdb"
"domeagent/monitor/utils/cpuload"
"domeagent/monitor/utils/oomparser"
"domeagent/monitor/utils/sysfs"
"github.com/docker/libcontainer/cgroups"
)
var globalHousekeepingInterval = 1 * time.Minute
// change enableLoadReader from true to false, to avoid "error failed to open cgroup path" error
var enableLoadReader = false
// The Manager interface defines operations for starting a manager and getting
// container and machine information & uploading to influxdb
type Manager interface {
// Start the manager. Calling other manager methods before this returns
// may produce undefined behavior.
Start() error
// Stops the manager.
Stop() error
// Get information about a container.
GetContainerInfo(containerName string) (*info.ContainerInfo, error)
// Gets all the Docker containers. Return is a map from full container name to ContainerInfo.
AllDockerContainers() (map[string]*info.ContainerInfo, error)
// Gets information about a specific Docker container. The specified name is within the Docker namespace.
DockerContainer(containerName string) (*info.ContainerInfo, error)
// Returns true if the named container exists.
Exists(containerName string) bool
// Get information about the machine.
GetMachineInfo() (*info.MachineInfo, error)
// Get version information about different components we depend on.
GetVersionInfo() (*info.VersionInfo, error)
// Get events streamed through passedChannel that fit the request.
WatchForEvents(request *events.Request) (*events.EventChannel, error)
// Get past events that have been detected and that fit the request.
GetPastEvents(request *events.Request) ([]*info.Event, error)
CloseEventChannel(watch_id int)
// Get status information about docker.
DockerInfo() (DockerStatus, error)
// Get details about interesting docker images.
DockerImages() ([]DockerImage, error)
}
type DockerStatus struct {
Version string `json:"version"`
KernelVersion string `json:"kernel_version"`
OS string `json:"os"`
Hostname string `json:"hostname"`
RootDir string `json:"root_dir"`
Driver string `json:"driver"`
DriverStatus map[string]string `json:"driver_status"`
ExecDriver string `json:"exec_driver"`
NumImages int `json:"num_images"`
NumContainers int `json:"num_containers"`
}
type DockerImage struct {
ID string `json:"id"`
RepoTags []string `json:"repo_tags"`
Created int64 `json:"created"`
VirtualSize int64 `json:"virtual_size"`
Size int64 `json:"size"`
}
type InfluxConfig struct {
Table string `json:"table,omitempty"`
Database string `json:"dababase,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Host string `json:"host,omitempty"`
BufferDuration time.Duration `json:"buffer_duration,omitempty"`
FilterPrefix string `json:"filter_prefix,omitempty"`
}
// A namespaced container name.
type namespacedContainerName struct {
// The namespace of the container. Can be empty for the root namespace.
Namespace string
// The name of the container in this namespace.
Name string
}
type manager struct {
containers map[namespacedContainerName]*containerData
containersLock sync.RWMutex
backendStorage storage.StorageDriver
fsInfo fs.FsInfo
machineInfo info.MachineInfo
versionInfo info.VersionInfo
quitChannels []chan error
selfContainer string
loadReader cpuload.CpuLoadReader
eventHandler events.EventManager
startupTime time.Time
housekeepingInterval time.Duration
inHostNamespace bool
}
// New returns a new manager.
func New(housekeepingInterval time.Duration, config *InfluxConfig) (Manager, error) {
// Initialize influxdb
hostname, err := os.Hostname() // Agent's host name
if err != nil {
return nil, err
}
influxdbStorage, err := influxdb.New(hostname,
config.Table,
config.Database,
config.Username,
config.Password,
config.Host,
config.BufferDuration,
config.FilterPrefix)
if err != nil {
return nil, err
}
//log.Printf("[Info] Connected to influxdb on: %q", config.Host)
sysfs, err := sysfs.NewRealSysFs()
if err != nil {
log.Printf("[Error] Failed to create a system interface: %s", err)
return nil, err
}
//log.Printf("[Info] Created a system interface)
// Detect the container we are running on.
selfContainer, err := cgroups.GetThisCgroupDir("cpu")
if err != nil {
return nil, err
}
//log.Printf("[Info] Running in container: %q", selfContainer)
dockerInfo, err := docker.DockerInfo()
if err != nil {
log.Printf("[Error] Unable to connect to Docker: %v", err)
}
context := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo}
fsInfo, err := fs.NewFsInfo(context)
if err != nil {
return nil, err
}
// If started with host's rootfs mounted, assume that its running
// in its own namespaces.
inHostNamespace := false
if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) {
inHostNamespace = true
}
newManager := &manager{
containers: make(map[namespacedContainerName]*containerData),
backendStorage: influxdbStorage,
quitChannels: make([]chan error, 0, 2),
fsInfo: fsInfo,
selfContainer: selfContainer,
inHostNamespace: inHostNamespace,
startupTime: time.Now(),
housekeepingInterval: housekeepingInterval,
}
machineInfo, err := getMachineInfo(sysfs, fsInfo)
if err != nil {
return nil, err
}
newManager.machineInfo = *machineInfo
//log.Printf("[Info] Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
}
newManager.versionInfo = *versionInfo
//log.Printf("[Info] Version: %+v", newManager.versionInfo)
newManager.eventHandler = events.NewEventManager(events.DefaultStoragePolicy())
return newManager, nil
}
// Start the container manager.
func (self *manager) Start() error {
// Register Docker container factory.
err := docker.Register(self, self.fsInfo)
if err != nil {
log.Printf("{Error] Docker container factory registration failed: %v.", err)
return err
}
// Register the raw driver.
err = raw.Register(self, self.fsInfo)
if err != nil {
log.Printf("[Error] Registration of the raw container factory failed: %v", err)
return err
}
self.DockerInfo()
self.DockerImages()
if enableLoadReader {
// Create cpu load reader.
cpuLoadReader, err := cpuload.New()
if err != nil {
log.Printf("[Error] Could not initialize cpu load reader: %s", err)
} else {
err = cpuLoadReader.Start()
if err != nil {
log.Printf("[Error] Could not start cpu load stat collector: %s", err)
} else {
self.loadReader = cpuLoadReader
}
}
}
// Watch for OOMs.
err = self.watchForNewOoms()
if err != nil {
log.Printf("[Error] Could not configure a source for OOM detection, disabling OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
if !container.HasFactories() {
return nil
}
// Create root and then recover all containers.
err = self.createContainer("/")
if err != nil {
return err
}
//log.Printf("[Info] Starting recovery of all containers")
err = self.detectSubcontainers("/")
if err != nil {
return err
}
//log.Printf("[Info] Recovery completed")
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
return nil
}
func (self *manager) Stop() error {
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
if self.loadReader != nil {
self.loadReader.Stop()
self.loadReader = nil
}
return nil
}
// Get a container by name.
func (self *manager) GetContainerInfo(containerName string) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont)
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
Name: containerName,
}]
}()
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) containerDataToContainerInfo(cont *containerData) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo()
if err != nil {
return nil, err
}
stats, err := cont.updateStats(false)
if err != nil {
return nil, err
}
// Make a copy of the info for the user.
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
}
}
return spec
}
func (self *manager) AllDockerContainers() (map[string]*info.ContainerInfo, error) {
containers := self.getAllDockerContainers()
output := make(map[string]*info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont)
if err != nil {
return nil, err
}
output[name] = inf
}
return output, nil
}
func (self *manager) | () map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers := make(map[string]*containerData, len(self.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
}
return containers
}
func (self *manager) DockerContainer(containerName string) (*info.ContainerInfo, error) {
container, err := self.getDockerContainer(containerName)
if err != nil {
return &info.ContainerInfo{}, err
}
inf, err := self.containerDataToContainerInfo(container)
if err != nil {
return &info.ContainerInfo{}, err
}
return inf, nil
}
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
if !ok {
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
}
return cont, nil
}
func (m *manager) Exists(containerName string) bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
_, ok := m.containers[namespacedName]
if ok {
return true
}
return false
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo.
return &m.machineInfo, nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
return &m.versionInfo, nil
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return self.eventHandler.WatchEvents(request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return self.eventHandler.GetEvents(request)
}
// called by the api when a client is no longer listening to the channel
func (self *manager) CloseEventChannel(watch_id int) {
self.eventHandler.StopWatch(watch_id)
}
func (m *manager) DockerInfo() (DockerStatus, error) {
info, err := docker.DockerInfo()
if err != nil {
return DockerStatus{}, err
}
out := DockerStatus{}
out.Version = m.versionInfo.DockerVersion
if val, ok := info["KernelVersion"]; ok {
out.KernelVersion = val
}
if val, ok := info["OperatingSystem"]; ok {
out.OS = val
}
if val, ok := info["Name"]; ok {
out.Hostname = val
}
if val, ok := info["DockerRootDir"]; ok {
out.RootDir = val
}
if val, ok := info["Driver"]; ok {
out.Driver = val
}
if val, ok := info["ExecutionDriver"]; ok {
out.ExecDriver = val
}
if val, ok := info["Images"]; ok {
n, err := strconv.Atoi(val)
if err == nil {
out.NumImages = n
}
}
if val, ok := info["Containers"]; ok {
n, err := strconv.Atoi(val)
if err == nil {
out.NumContainers = n
}
}
// cut, trim, cut - Example format:
// DriverStatus=[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirperm1 Supported","false"]]
if val, ok := info["DriverStatus"]; ok {
out.DriverStatus = make(map[string]string)
val = strings.TrimPrefix(val, "[[")
val = strings.TrimSuffix(val, "]]")
vals := strings.Split(val, "],[")
for _, v := range vals {
kv := strings.Split(v, "\",\"")
if len(kv) != 2 {
continue
} else {
out.DriverStatus[strings.Trim(kv[0], "\"")] = strings.Trim(kv[1], "\"")
}
}
}
return out, nil
}
func (m *manager) DockerImages() ([]DockerImage, error) {
images, err := docker.DockerImages()
if err != nil {
return nil, err
}
out := []DockerImage{}
const unknownTag = "<none>:<none>"
for _, image := range images {
if len(image.RepoTags) == 1 && image.RepoTags[0] == unknownTag {
// images with repo or tags are uninteresting.
continue
}
di := DockerImage{
ID: image.ID,
RepoTags: image.RepoTags,
Created: image.Created,
VirtualSize: image.VirtualSize,
Size: image.Size,
}
out = append(out, di)
}
return out, nil
}
func (self *manager) watchForNewOoms() error {
//log.Printf("[Info] Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New()
if err != nil {
return err
}
go oomLog.StreamOoms(outStream)
go func() {
for oomInstance := range outStream {
// Surface OOM and OOM kill events.
newEvent := &info.Event{
ContainerName: oomInstance.ContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOom,
}
err := self.eventHandler.AddEvent(newEvent)
if err != nil {
log.Printf("[Error] failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
}
//log.Printf("[Info] Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath)
newEvent = &info.Event{
ContainerName: oomInstance.VictimContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOomKill,
EventData: info.EventData{
OomKill: &info.OomKillEventData{
Pid: oomInstance.Pid,
ProcessName: oomInstance.ProcessName,
},
},
}
err = self.eventHandler.AddEvent(newEvent)
if err != nil {
log.Printf("[Error] failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
}
}
}()
return nil
}
// Create a container.
func (m *manager) createContainer(containerName string) error {
handler, accept, err := container.NewContainerHandler(containerName, m.inHostNamespace)
if err != nil {
return err
}
if !accept {
// ignoring this container.
log.Printf("[Info] ignoring container %q", containerName)
return nil
}
cont, err := newContainerData(containerName, m.backendStorage, handler, m.loadReader, m.housekeepingInterval)
if err != nil {
return err
}
// Add to the containers map.
alreadyExists := func() bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
// Check that the container didn't already exist.
_, ok := m.containers[namespacedName]
if ok {
return true
}
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
m.containers[namespacedName] = cont
for _, alias := range cont.info.Aliases {
m.containers[namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
}] = cont
}
return false
}()
if alreadyExists {
return nil
}
//log.Printf("[Info] Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contSpec, err := cont.handler.GetSpec()
if err != nil {
return err
}
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &info.Event{
ContainerName: contRef.Name,
Timestamp: contSpec.CreationTime,
EventType: info.EventContainerCreation,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
// Start the container's housekeeping.
cont.Start()
return nil
}
func (m *manager) destroyContainer(containerName string) error {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
cont, ok := m.containers[namespacedName]
if !ok {
// Already destroyed, done.
return nil
}
// Tell the container to stop.
err := cont.Stop()
if err != nil {
return err
}
// Remove the container from our records (and all its aliases).
delete(m.containers, namespacedName)
for _, alias := range cont.info.Aliases {
delete(m.containers, namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
})
}
//log.Printf("[Info] Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &info.Event{
ContainerName: contRef.Name,
Timestamp: time.Now(),
EventType: info.EventContainerDeletion,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
return nil
}
// Detect all containers that have been added or deleted from the specified container.
func (m *manager) getContainersDiff(containerName string) (added []info.ContainerReference, removed []info.ContainerReference, err error) {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
// Get all subcontainers recursively.
cont, ok := m.containers[namespacedContainerName{
Name: containerName,
}]
if !ok {
return nil, nil, fmt.Errorf("failed to find container %q while checking for new containers", containerName)
}
allContainers, err := cont.handler.ListContainers(container.ListRecursive)
if err != nil {
return nil, nil, err
}
allContainers = append(allContainers, info.ContainerReference{Name: containerName})
// Determine which were added and which were removed.
allContainersSet := make(map[string]*containerData)
for name, d := range m.containers {
// Only add the canonical name.
if d.info.Name == name.Name {
allContainersSet[name.Name] = d
}
}
// Added containers
for _, c := range allContainers {
delete(allContainersSet, c.Name)
_, ok := m.containers[namespacedContainerName{
Name: c.Name,
}]
if !ok {
added = append(added, c)
}
}
// Removed ones are no longer in the container listing.
for _, d := range allContainersSet {
removed = append(removed, d.info.ContainerReference)
}
return
}
// Detect the existing subcontainers and reflect the setup here.
func (m *manager) detectSubcontainers(containerName string) error {
added, removed, err := m.getContainersDiff(containerName)
if err != nil {
return err
}
// Add the new containers.
for _, cont := range added {
err = m.createContainer(cont.Name)
if err != nil {
log.Printf("[Error] Failed to create existing container: %s: %s", cont.Name, err)
}
}
// Remove the old containers.
for _, cont := range removed {
err = m.destroyContainer(cont.Name)
if err != nil {
log.Printf("[Error] Failed to destroy existing container: %s: %s", cont.Name, err)
}
}
return nil
}
// Watches for new containers started in the system. Runs forever unless there is a setup error.
func (self *manager) watchForNewContainers(quit chan error) error {
var root *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
root, ok = self.containers[namespacedContainerName{
Name: "/",
}]
}()
if !ok {
return fmt.Errorf("[Error] Root container does not exist when watching for new containers")
}
// Register for new subcontainers.
eventsChannel := make(chan container.SubcontainerEvent, 16)
err := root.handler.WatchSubcontainers(eventsChannel)
if err != nil {
return err
}
// There is a race between starting the watch and new container creation so we do a detection before we read new containers.
err = self.detectSubcontainers("/")
if err != nil {
return err
}
// Listen to events from the container handler.
go func() {
for {
select {
case event := <-eventsChannel:
switch {
case event.EventType == container.SubcontainerAdd:
err = self.createContainer(event.Name)
case event.EventType == container.SubcontainerDelete:
err = self.destroyContainer(event.Name)
}
if err != nil {
log.Printf("[Error] Failed to process watch event: %v", err)
}
case <-quit:
// Stop processing events if asked to quit.
err := root.handler.StopWatchingSubcontainers()
quit <- err
if err == nil {
log.Printf("[Info] Exiting thread watching subcontainers")
return
}
}
}
}()
return nil
}
func (self *manager) globalHousekeeping(quit chan error) {
// Long housekeeping is either 100ms or half of the housekeeping interval.
longHousekeeping := 100 * time.Millisecond
if globalHousekeepingInterval/2 < longHousekeeping {
longHousekeeping = globalHousekeepingInterval / 2
}
ticker := time.Tick(globalHousekeepingInterval)
for {
select {
case <-ticker:
start := time.Now()
// Check for new containers.
err := self.detectSubcontainers("/")
if err != nil {
log.Printf("[Error] Failed to detect containers: %s", err)
}
// Log if housekeeping took too long.
duration := time.Since(start)
if duration >= longHousekeeping {
//log.Printf("[Info] Global Housekeeping(%d) took %s", t.Unix(), duration)
}
case <-quit:
// Quit if asked to do so.
quit <- nil
log.Printf("[Info] Exiting global housekeeping thread")
return
}
}
}
| getAllDockerContainers | identifier_name |
manager.go | package monitor
import (
"fmt"
"log"
"os"
"strconv"
"strings"
"sync"
"time"
"domeagent/monitor/container"
"domeagent/monitor/container/docker"
"domeagent/monitor/container/raw"
"domeagent/monitor/events"
"domeagent/monitor/fs"
"domeagent/monitor/info"
"domeagent/monitor/storage"
"domeagent/monitor/storage/influxdb"
"domeagent/monitor/utils/cpuload"
"domeagent/monitor/utils/oomparser"
"domeagent/monitor/utils/sysfs"
"github.com/docker/libcontainer/cgroups"
)
var globalHousekeepingInterval = 1 * time.Minute
// change enableLoadReader from true to false, to avoid "error failed to open cgroup path" error
var enableLoadReader = false
// The Manager interface defines operations for starting a manager and getting
// container and machine information & uploading to influxdb
type Manager interface {
// Start the manager. Calling other manager methods before this returns
// may produce undefined behavior.
Start() error
// Stops the manager.
Stop() error
// Get information about a container.
GetContainerInfo(containerName string) (*info.ContainerInfo, error)
// Gets all the Docker containers. Return is a map from full container name to ContainerInfo.
AllDockerContainers() (map[string]*info.ContainerInfo, error)
// Gets information about a specific Docker container. The specified name is within the Docker namespace.
DockerContainer(containerName string) (*info.ContainerInfo, error)
// Returns true if the named container exists.
Exists(containerName string) bool
// Get information about the machine.
GetMachineInfo() (*info.MachineInfo, error)
// Get version information about different components we depend on.
GetVersionInfo() (*info.VersionInfo, error)
// Get events streamed through passedChannel that fit the request.
WatchForEvents(request *events.Request) (*events.EventChannel, error)
// Get past events that have been detected and that fit the request.
GetPastEvents(request *events.Request) ([]*info.Event, error)
CloseEventChannel(watch_id int)
// Get status information about docker.
DockerInfo() (DockerStatus, error)
// Get details about interesting docker images.
DockerImages() ([]DockerImage, error)
}
type DockerStatus struct {
Version string `json:"version"`
KernelVersion string `json:"kernel_version"`
OS string `json:"os"`
Hostname string `json:"hostname"`
RootDir string `json:"root_dir"`
Driver string `json:"driver"`
DriverStatus map[string]string `json:"driver_status"`
ExecDriver string `json:"exec_driver"`
NumImages int `json:"num_images"`
NumContainers int `json:"num_containers"`
}
type DockerImage struct {
ID string `json:"id"`
RepoTags []string `json:"repo_tags"`
Created int64 `json:"created"`
VirtualSize int64 `json:"virtual_size"`
Size int64 `json:"size"`
}
type InfluxConfig struct {
Table string `json:"table,omitempty"`
Database string `json:"dababase,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Host string `json:"host,omitempty"`
BufferDuration time.Duration `json:"buffer_duration,omitempty"`
FilterPrefix string `json:"filter_prefix,omitempty"`
}
// A namespaced container name.
type namespacedContainerName struct {
// The namespace of the container. Can be empty for the root namespace.
Namespace string
// The name of the container in this namespace.
Name string
}
type manager struct {
containers map[namespacedContainerName]*containerData
containersLock sync.RWMutex
backendStorage storage.StorageDriver
fsInfo fs.FsInfo
machineInfo info.MachineInfo
versionInfo info.VersionInfo
quitChannels []chan error
selfContainer string
loadReader cpuload.CpuLoadReader
eventHandler events.EventManager
startupTime time.Time
housekeepingInterval time.Duration
inHostNamespace bool
}
// New returns a new manager.
func New(housekeepingInterval time.Duration, config *InfluxConfig) (Manager, error) {
// Initialize influxdb
hostname, err := os.Hostname() // Agent's host name
if err != nil {
return nil, err
}
influxdbStorage, err := influxdb.New(hostname,
config.Table,
config.Database,
config.Username,
config.Password,
config.Host,
config.BufferDuration,
config.FilterPrefix)
if err != nil {
return nil, err
}
//log.Printf("[Info] Connected to influxdb on: %q", config.Host)
sysfs, err := sysfs.NewRealSysFs()
if err != nil {
log.Printf("[Error] Failed to create a system interface: %s", err)
return nil, err
}
//log.Printf("[Info] Created a system interface)
// Detect the container we are running on.
selfContainer, err := cgroups.GetThisCgroupDir("cpu")
if err != nil {
return nil, err
}
//log.Printf("[Info] Running in container: %q", selfContainer)
dockerInfo, err := docker.DockerInfo()
if err != nil {
log.Printf("[Error] Unable to connect to Docker: %v", err)
}
context := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo}
fsInfo, err := fs.NewFsInfo(context)
if err != nil {
return nil, err
}
// If started with host's rootfs mounted, assume that its running
// in its own namespaces.
inHostNamespace := false
if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) {
inHostNamespace = true
}
newManager := &manager{
containers: make(map[namespacedContainerName]*containerData),
backendStorage: influxdbStorage,
quitChannels: make([]chan error, 0, 2),
fsInfo: fsInfo,
selfContainer: selfContainer,
inHostNamespace: inHostNamespace,
startupTime: time.Now(),
housekeepingInterval: housekeepingInterval,
}
machineInfo, err := getMachineInfo(sysfs, fsInfo)
if err != nil {
return nil, err
}
newManager.machineInfo = *machineInfo
//log.Printf("[Info] Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
}
newManager.versionInfo = *versionInfo
//log.Printf("[Info] Version: %+v", newManager.versionInfo)
newManager.eventHandler = events.NewEventManager(events.DefaultStoragePolicy())
return newManager, nil
}
// Start the container manager.
func (self *manager) Start() error {
// Register Docker container factory.
err := docker.Register(self, self.fsInfo)
if err != nil {
log.Printf("{Error] Docker container factory registration failed: %v.", err)
return err
}
// Register the raw driver.
err = raw.Register(self, self.fsInfo)
if err != nil {
log.Printf("[Error] Registration of the raw container factory failed: %v", err)
return err
}
self.DockerInfo()
self.DockerImages()
if enableLoadReader {
// Create cpu load reader.
cpuLoadReader, err := cpuload.New()
if err != nil {
log.Printf("[Error] Could not initialize cpu load reader: %s", err)
} else {
err = cpuLoadReader.Start()
if err != nil {
log.Printf("[Error] Could not start cpu load stat collector: %s", err)
} else {
self.loadReader = cpuLoadReader
}
}
}
// Watch for OOMs.
err = self.watchForNewOoms()
if err != nil {
log.Printf("[Error] Could not configure a source for OOM detection, disabling OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
if !container.HasFactories() {
return nil
}
// Create root and then recover all containers.
err = self.createContainer("/")
if err != nil {
return err
}
//log.Printf("[Info] Starting recovery of all containers")
err = self.detectSubcontainers("/")
if err != nil {
return err
}
//log.Printf("[Info] Recovery completed")
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
return nil
}
func (self *manager) Stop() error {
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
if self.loadReader != nil {
self.loadReader.Stop()
self.loadReader = nil
}
return nil
}
// Get a container by name.
func (self *manager) GetContainerInfo(containerName string) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont)
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
Name: containerName,
}]
}()
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) containerDataToContainerInfo(cont *containerData) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo()
if err != nil {
return nil, err
}
stats, err := cont.updateStats(false)
if err != nil {
return nil, err
}
// Make a copy of the info for the user.
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
}
}
return spec
}
func (self *manager) AllDockerContainers() (map[string]*info.ContainerInfo, error) {
containers := self.getAllDockerContainers()
output := make(map[string]*info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont)
if err != nil {
return nil, err
}
output[name] = inf
}
return output, nil
}
func (self *manager) getAllDockerContainers() map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers := make(map[string]*containerData, len(self.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
}
return containers
}
func (self *manager) DockerContainer(containerName string) (*info.ContainerInfo, error) {
container, err := self.getDockerContainer(containerName)
if err != nil {
return &info.ContainerInfo{}, err
}
inf, err := self.containerDataToContainerInfo(container)
if err != nil {
return &info.ContainerInfo{}, err
}
return inf, nil
}
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
if !ok {
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
}
return cont, nil
}
func (m *manager) Exists(containerName string) bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
_, ok := m.containers[namespacedName]
if ok {
return true
}
return false
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo.
return &m.machineInfo, nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
return &m.versionInfo, nil
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return self.eventHandler.WatchEvents(request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return self.eventHandler.GetEvents(request)
}
// called by the api when a client is no longer listening to the channel
func (self *manager) CloseEventChannel(watch_id int) {
self.eventHandler.StopWatch(watch_id)
}
func (m *manager) DockerInfo() (DockerStatus, error) {
info, err := docker.DockerInfo()
if err != nil {
return DockerStatus{}, err
}
out := DockerStatus{}
out.Version = m.versionInfo.DockerVersion
if val, ok := info["KernelVersion"]; ok {
out.KernelVersion = val
}
if val, ok := info["OperatingSystem"]; ok {
out.OS = val
}
if val, ok := info["Name"]; ok {
out.Hostname = val
}
if val, ok := info["DockerRootDir"]; ok {
out.RootDir = val
}
if val, ok := info["Driver"]; ok {
out.Driver = val
}
if val, ok := info["ExecutionDriver"]; ok {
out.ExecDriver = val
}
if val, ok := info["Images"]; ok {
n, err := strconv.Atoi(val)
if err == nil {
out.NumImages = n
}
}
if val, ok := info["Containers"]; ok {
n, err := strconv.Atoi(val)
if err == nil {
out.NumContainers = n
}
}
// cut, trim, cut - Example format:
// DriverStatus=[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirperm1 Supported","false"]]
if val, ok := info["DriverStatus"]; ok {
out.DriverStatus = make(map[string]string)
val = strings.TrimPrefix(val, "[[")
val = strings.TrimSuffix(val, "]]")
vals := strings.Split(val, "],[")
for _, v := range vals {
kv := strings.Split(v, "\",\"")
if len(kv) != 2 {
continue
} else {
out.DriverStatus[strings.Trim(kv[0], "\"")] = strings.Trim(kv[1], "\"")
}
}
}
return out, nil
}
func (m *manager) DockerImages() ([]DockerImage, error) {
images, err := docker.DockerImages()
if err != nil {
return nil, err
}
out := []DockerImage{}
const unknownTag = "<none>:<none>"
for _, image := range images {
if len(image.RepoTags) == 1 && image.RepoTags[0] == unknownTag {
// images with repo or tags are uninteresting.
continue
}
di := DockerImage{
ID: image.ID,
RepoTags: image.RepoTags,
Created: image.Created,
VirtualSize: image.VirtualSize,
Size: image.Size,
}
out = append(out, di)
}
return out, nil
}
func (self *manager) watchForNewOoms() error {
//log.Printf("[Info] Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New()
if err != nil {
return err
}
go oomLog.StreamOoms(outStream)
go func() {
for oomInstance := range outStream {
// Surface OOM and OOM kill events.
newEvent := &info.Event{
ContainerName: oomInstance.ContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOom,
}
err := self.eventHandler.AddEvent(newEvent)
if err != nil {
log.Printf("[Error] failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
}
//log.Printf("[Info] Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath)
newEvent = &info.Event{
ContainerName: oomInstance.VictimContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOomKill,
EventData: info.EventData{
OomKill: &info.OomKillEventData{
Pid: oomInstance.Pid,
ProcessName: oomInstance.ProcessName,
},
},
}
err = self.eventHandler.AddEvent(newEvent)
if err != nil {
log.Printf("[Error] failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
}
} | }
// Create a container.
func (m *manager) createContainer(containerName string) error {
handler, accept, err := container.NewContainerHandler(containerName, m.inHostNamespace)
if err != nil {
return err
}
if !accept {
// ignoring this container.
log.Printf("[Info] ignoring container %q", containerName)
return nil
}
cont, err := newContainerData(containerName, m.backendStorage, handler, m.loadReader, m.housekeepingInterval)
if err != nil {
return err
}
// Add to the containers map.
alreadyExists := func() bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
// Check that the container didn't already exist.
_, ok := m.containers[namespacedName]
if ok {
return true
}
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
m.containers[namespacedName] = cont
for _, alias := range cont.info.Aliases {
m.containers[namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
}] = cont
}
return false
}()
if alreadyExists {
return nil
}
//log.Printf("[Info] Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contSpec, err := cont.handler.GetSpec()
if err != nil {
return err
}
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &info.Event{
ContainerName: contRef.Name,
Timestamp: contSpec.CreationTime,
EventType: info.EventContainerCreation,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
// Start the container's housekeeping.
cont.Start()
return nil
}
func (m *manager) destroyContainer(containerName string) error {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
cont, ok := m.containers[namespacedName]
if !ok {
// Already destroyed, done.
return nil
}
// Tell the container to stop.
err := cont.Stop()
if err != nil {
return err
}
// Remove the container from our records (and all its aliases).
delete(m.containers, namespacedName)
for _, alias := range cont.info.Aliases {
delete(m.containers, namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
})
}
//log.Printf("[Info] Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &info.Event{
ContainerName: contRef.Name,
Timestamp: time.Now(),
EventType: info.EventContainerDeletion,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
return nil
}
// Detect all containers that have been added or deleted from the specified container.
func (m *manager) getContainersDiff(containerName string) (added []info.ContainerReference, removed []info.ContainerReference, err error) {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
// Get all subcontainers recursively.
cont, ok := m.containers[namespacedContainerName{
Name: containerName,
}]
if !ok {
return nil, nil, fmt.Errorf("failed to find container %q while checking for new containers", containerName)
}
allContainers, err := cont.handler.ListContainers(container.ListRecursive)
if err != nil {
return nil, nil, err
}
allContainers = append(allContainers, info.ContainerReference{Name: containerName})
// Determine which were added and which were removed.
allContainersSet := make(map[string]*containerData)
for name, d := range m.containers {
// Only add the canonical name.
if d.info.Name == name.Name {
allContainersSet[name.Name] = d
}
}
// Added containers
for _, c := range allContainers {
delete(allContainersSet, c.Name)
_, ok := m.containers[namespacedContainerName{
Name: c.Name,
}]
if !ok {
added = append(added, c)
}
}
// Removed ones are no longer in the container listing.
for _, d := range allContainersSet {
removed = append(removed, d.info.ContainerReference)
}
return
}
// Detect the existing subcontainers and reflect the setup here.
func (m *manager) detectSubcontainers(containerName string) error {
added, removed, err := m.getContainersDiff(containerName)
if err != nil {
return err
}
// Add the new containers.
for _, cont := range added {
err = m.createContainer(cont.Name)
if err != nil {
log.Printf("[Error] Failed to create existing container: %s: %s", cont.Name, err)
}
}
// Remove the old containers.
for _, cont := range removed {
err = m.destroyContainer(cont.Name)
if err != nil {
log.Printf("[Error] Failed to destroy existing container: %s: %s", cont.Name, err)
}
}
return nil
}
// Watches for new containers started in the system. Runs forever unless there is a setup error.
func (self *manager) watchForNewContainers(quit chan error) error {
var root *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
root, ok = self.containers[namespacedContainerName{
Name: "/",
}]
}()
if !ok {
return fmt.Errorf("[Error] Root container does not exist when watching for new containers")
}
// Register for new subcontainers.
eventsChannel := make(chan container.SubcontainerEvent, 16)
err := root.handler.WatchSubcontainers(eventsChannel)
if err != nil {
return err
}
// There is a race between starting the watch and new container creation so we do a detection before we read new containers.
err = self.detectSubcontainers("/")
if err != nil {
return err
}
// Listen to events from the container handler.
go func() {
for {
select {
case event := <-eventsChannel:
switch {
case event.EventType == container.SubcontainerAdd:
err = self.createContainer(event.Name)
case event.EventType == container.SubcontainerDelete:
err = self.destroyContainer(event.Name)
}
if err != nil {
log.Printf("[Error] Failed to process watch event: %v", err)
}
case <-quit:
// Stop processing events if asked to quit.
err := root.handler.StopWatchingSubcontainers()
quit <- err
if err == nil {
log.Printf("[Info] Exiting thread watching subcontainers")
return
}
}
}
}()
return nil
}
func (self *manager) globalHousekeeping(quit chan error) {
// Long housekeeping is either 100ms or half of the housekeeping interval.
longHousekeeping := 100 * time.Millisecond
if globalHousekeepingInterval/2 < longHousekeeping {
longHousekeeping = globalHousekeepingInterval / 2
}
ticker := time.Tick(globalHousekeepingInterval)
for {
select {
case <-ticker:
start := time.Now()
// Check for new containers.
err := self.detectSubcontainers("/")
if err != nil {
log.Printf("[Error] Failed to detect containers: %s", err)
}
// Log if housekeeping took too long.
duration := time.Since(start)
if duration >= longHousekeeping {
//log.Printf("[Info] Global Housekeeping(%d) took %s", t.Unix(), duration)
}
case <-quit:
// Quit if asked to do so.
quit <- nil
log.Printf("[Info] Exiting global housekeeping thread")
return
}
}
} | }()
return nil | random_line_split |
main.go | package main
//necessary imports
import (
"net/http"
"fmt"
"encoding/json"
"context"
"time"
"log"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
//error class
type Error struct{
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
}
//new meeting
type new_meet struct{
Meet_ID string `json:"Id"`
}
//participant shema
type participant struct{
Name string `json:"Name" bson:"name"`
Email string `json:"Email" bson:"email"`
RSVP string `json:"RSVP" bson:"rsvp"`
}
//meeting schema
type meeting struct{
Id primitive.ObjectID `bson:"_id"`
Title string `json:"Title" bson:"title"`
Part []participant `json:"Participants" bson:"participants" `
Start time.Time `json:"Start Time" bson:"start" `
End time.Time `json:"End Time" bson:"end"`
Stamp time.Time `bson:"stamp"`
}
//schema for results of conditional meetings
type conditional_meets struct{
Meetings []meeting `json:"meetings"`
}
//invalid request response writer function
func invalid_request(w http.ResponseWriter, statCode int, message string){
w.Header().Set("Content-Type", "application/json")
switch statCode {
case 400: w.WriteHeader(http.StatusBadRequest)
case 403: w.WriteHeader(http.StatusForbidden)
case 404: w.WriteHeader(http.StatusNotFound)
default: w.WriteHeader(http.StatusNotFound)
}
err := Error {
StatusCode: statCode,
ErrorMessage: message}
json.NewEncoder(w).Encode(err)
}
//helper function to coneect to DB
func connectdb(ctx context.Context) (*mongo.Collection){
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017"))
if err != nil {
log.Fatal(err)
}
err = client.Connect(ctx)
if err != nil {
log.Fatal(err)
}
appointyDatabase := client.Database("appointy-task-ritvix")
meetingCollection := appointyDatabase.Collection("meetings")
//returns collection object
return meetingCollection
}
func main(){
fmt.Println("Server is up")
http.HandleFunc("/meetings" , meets_handler) // handler for /meetings end point
http.HandleFunc("/meeting/" , meeting_handler) // handler for rooted /meeting/
fmt.Println(http.ListenAndServe(":8082", nil)); // listen to port 8082
}
//handle requests at /meetings
func | (w http.ResponseWriter, r *http.Request){
switch r.Method{
//if method is POST
case "POST":
//disallow query strings with POST method
if keys := r.URL.Query(); len(keys)!=0{
invalid_request(w, 400, "Queries not allowed at this endpoint with this method")
}else{
//error handling if request not JSON
if ua := r.Header.Get("Content-Type"); ua!="application/json"{
invalid_request(w, 400, "This end point accepts only JSON request body")
}else{
var m meeting
dec := json.NewDecoder(r.Body)
dec.DisallowUnknownFields()
err := dec.Decode(&m)
//error if meeting details are not in right format
if err != nil {
invalid_request(w, 400, "Please recheck the meeting information")
return
}
m.Stamp = time.Now() //assign Creation stamp
m.Id = primitive.NewObjectID() //assign unique ObjectID
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //meeting collection
//check for overlap of participants
final_check := false
//iterate over al participants and find clashes is db
for _, particip := range m.Part{
var check meeting
check1 := true
check2 := true
check3 := true
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lte", m.Start}}, "end": bson.D{{"$gt", m.Start}}, "participants.email": particip.Email}).Decode(&check); err!=nil{
check1 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lt", m.End}}, "end": bson.D{{"$gte",m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check2 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$gte", m.Start}}, "end": bson.D{{"$lte", m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check3 = false
}
if check1 || check2 || check3 {
final_check =true
}
}
if final_check{
invalid_request(w, 400, "Meeting clashes with other meeting/s with some common participant/s")
}else{
insertResult, err := meetingCollection.InsertOne(ctx, m)
if err != nil {
log.Fatal(err)
return
}
//write back meeting id as JSON response
w.Header().Set("Content-Type", "application/json")
meet := new_meet{
Meet_ID: insertResult.InsertedID.(primitive.ObjectID).Hex()}
json.NewEncoder(w).Encode(meet)
}
}
}
//if method is GET
case "GET":
keys := r.URL.Query()
//cases to allow only valid queries
switch len(keys){
//no query string error
case 0:invalid_request(w, 400, "Not a valid query at this end point")
case 1:
//extract participant email
if email, ok := keys["participant"]; !ok || len(email[0])<1{
invalid_request(w, 400, "Not a valid query at this end point")
}else {
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //collection meetings
if len(email)>1{
invalid_request(w, 400, "Only one participant can be queried at a time")
return
}
//query the collection for the mail id
cursor, err := meetingCollection.Find(ctx, bson.M{"participants.email":bson.M{"$eq":email[0]}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//write back all his/her meetings as an array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
case 2:
start, okStart := keys["start"]
end, okEnd := keys["end"]
//check both start and end time are provided, else error
if !okStart || !okEnd {invalid_request(w, 400, "Not a valid query at this end point")
}else{
start_time := start[0]
end_time := end[0]
// fmt.Println(start_time, end_time)
start_tim, err := time.Parse(time.RFC3339, start_time)
//check if the time format is valid
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format- YY-MM-DDTHH-MM-SSZ")
return
}
end_tim, err := time.Parse(time.RFC3339, end_time)
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format - YY-MM-DDTHH-MM-SSZ")
return
}
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
//query the DB for the time window
cursor, err := meetingCollection.Find(ctx, bson.M{"start": bson.D{{"$gt", start_tim}}, "end": bson.D{{"$lt", end_tim}}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//return all such meetings as array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
default:invalid_request(w, 400, "Not a valid query at this end point")
}
//disallow any other method
default:invalid_request(w, 403, "Not a valid method at this end point")
}
}
//handler for meeting/ root
func meeting_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
case "GET":
//extract meeting id from url
if meet_id := r.URL.Path[len("/meeting/"):]; len(meet_id)==0{
invalid_request(w, 400, "Not a valid Meeting ID")
}else{
//check forvalid id
id, err := primitive.ObjectIDFromHex(meet_id)
if err!=nil{
invalid_request(w, 400, "Not a valid Meeting ID")
return
}
var meet meeting
filter := bson.M{"_id": id}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
err = meetingCollection.FindOne(ctx, filter).Decode(&meet)
if err != nil {
invalid_request(w, 404, "No meeting found with this ID")
return
}
//write back the meeting info
w.Header().Set("Content-Type", "application/json")
// fmt.Println(meet)
json.NewEncoder(w).Encode(meet)
}
default:invalid_request(w, 403, "Not a valid method at this end point")
}
}
| meets_handler | identifier_name |
main.go | package main
//necessary imports
import (
"net/http"
"fmt"
"encoding/json"
"context"
"time"
"log"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
//error class
type Error struct{
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
}
//new meeting
type new_meet struct{
Meet_ID string `json:"Id"`
}
//participant shema
type participant struct{
Name string `json:"Name" bson:"name"`
Email string `json:"Email" bson:"email"`
RSVP string `json:"RSVP" bson:"rsvp"`
}
//meeting schema
type meeting struct{
Id primitive.ObjectID `bson:"_id"`
Title string `json:"Title" bson:"title"`
Part []participant `json:"Participants" bson:"participants" `
Start time.Time `json:"Start Time" bson:"start" `
End time.Time `json:"End Time" bson:"end"`
Stamp time.Time `bson:"stamp"`
}
//schema for results of conditional meetings
type conditional_meets struct{
Meetings []meeting `json:"meetings"`
}
//invalid request response writer function
func invalid_request(w http.ResponseWriter, statCode int, message string){
w.Header().Set("Content-Type", "application/json")
switch statCode {
case 400: w.WriteHeader(http.StatusBadRequest)
case 403: w.WriteHeader(http.StatusForbidden)
case 404: w.WriteHeader(http.StatusNotFound)
default: w.WriteHeader(http.StatusNotFound)
}
err := Error {
StatusCode: statCode,
ErrorMessage: message}
json.NewEncoder(w).Encode(err)
}
//helper function to coneect to DB
func connectdb(ctx context.Context) (*mongo.Collection){
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017"))
if err != nil {
log.Fatal(err)
}
err = client.Connect(ctx)
if err != nil {
log.Fatal(err)
}
appointyDatabase := client.Database("appointy-task-ritvix")
meetingCollection := appointyDatabase.Collection("meetings")
//returns collection object
return meetingCollection
}
func main(){
fmt.Println("Server is up")
http.HandleFunc("/meetings" , meets_handler) // handler for /meetings end point
http.HandleFunc("/meeting/" , meeting_handler) // handler for rooted /meeting/
fmt.Println(http.ListenAndServe(":8082", nil)); // listen to port 8082
}
//handle requests at /meetings
func meets_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
//if method is POST
case "POST":
//disallow query strings with POST method
if keys := r.URL.Query(); len(keys)!=0{
invalid_request(w, 400, "Queries not allowed at this endpoint with this method")
}else{
//error handling if request not JSON
if ua := r.Header.Get("Content-Type"); ua!="application/json"{
invalid_request(w, 400, "This end point accepts only JSON request body")
}else{
var m meeting
dec := json.NewDecoder(r.Body)
dec.DisallowUnknownFields()
err := dec.Decode(&m)
//error if meeting details are not in right format
if err != nil {
invalid_request(w, 400, "Please recheck the meeting information")
return
}
m.Stamp = time.Now() //assign Creation stamp
m.Id = primitive.NewObjectID() //assign unique ObjectID
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //meeting collection
//check for overlap of participants
final_check := false
//iterate over al participants and find clashes is db
for _, particip := range m.Part{
var check meeting
check1 := true
check2 := true
check3 := true
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lte", m.Start}}, "end": bson.D{{"$gt", m.Start}}, "participants.email": particip.Email}).Decode(&check); err!=nil{
check1 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lt", m.End}}, "end": bson.D{{"$gte",m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check2 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$gte", m.Start}}, "end": bson.D{{"$lte", m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check3 = false
}
if check1 || check2 || check3 {
final_check =true
}
}
if final_check{
invalid_request(w, 400, "Meeting clashes with other meeting/s with some common participant/s")
}else{
insertResult, err := meetingCollection.InsertOne(ctx, m)
if err != nil {
log.Fatal(err)
return
}
//write back meeting id as JSON response
w.Header().Set("Content-Type", "application/json")
meet := new_meet{
Meet_ID: insertResult.InsertedID.(primitive.ObjectID).Hex()}
json.NewEncoder(w).Encode(meet)
}
}
}
//if method is GET
case "GET":
keys := r.URL.Query()
//cases to allow only valid queries
switch len(keys){
//no query string error
case 0:invalid_request(w, 400, "Not a valid query at this end point")
case 1:
//extract participant email
if email, ok := keys["participant"]; !ok || len(email[0])<1{
invalid_request(w, 400, "Not a valid query at this end point")
}else {
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //collection meetings
if len(email)>1{
invalid_request(w, 400, "Only one participant can be queried at a time")
return
}
//query the collection for the mail id
cursor, err := meetingCollection.Find(ctx, bson.M{"participants.email":bson.M{"$eq":email[0]}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//write back all his/her meetings as an array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
case 2:
start, okStart := keys["start"]
end, okEnd := keys["end"]
//check both start and end time are provided, else error
if !okStart || !okEnd {invalid_request(w, 400, "Not a valid query at this end point")
}else{
start_time := start[0]
end_time := end[0]
// fmt.Println(start_time, end_time)
start_tim, err := time.Parse(time.RFC3339, start_time)
//check if the time format is valid
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format- YY-MM-DDTHH-MM-SSZ")
return
}
end_tim, err := time.Parse(time.RFC3339, end_time)
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format - YY-MM-DDTHH-MM-SSZ")
return
}
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
//query the DB for the time window
cursor, err := meetingCollection.Find(ctx, bson.M{"start": bson.D{{"$gt", start_tim}}, "end": bson.D{{"$lt", end_tim}}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//return all such meetings as array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
default:invalid_request(w, 400, "Not a valid query at this end point")
}
//disallow any other method
default:invalid_request(w, 403, "Not a valid method at this end point")
}
}
//handler for meeting/ root
func meeting_handler(w http.ResponseWriter, r *http.Request) | {
switch r.Method{
case "GET":
//extract meeting id from url
if meet_id := r.URL.Path[len("/meeting/"):]; len(meet_id)==0{
invalid_request(w, 400, "Not a valid Meeting ID")
}else{
//check forvalid id
id, err := primitive.ObjectIDFromHex(meet_id)
if err!=nil{
invalid_request(w, 400, "Not a valid Meeting ID")
return
}
var meet meeting
filter := bson.M{"_id": id}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
err = meetingCollection.FindOne(ctx, filter).Decode(&meet)
if err != nil {
invalid_request(w, 404, "No meeting found with this ID")
return
}
//write back the meeting info
w.Header().Set("Content-Type", "application/json")
// fmt.Println(meet)
json.NewEncoder(w).Encode(meet)
}
default:invalid_request(w, 403, "Not a valid method at this end point")
}
} | identifier_body | |
main.go | package main
//necessary imports
import (
"net/http"
"fmt"
"encoding/json"
"context"
"time"
"log"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
//error class
type Error struct{
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
}
//new meeting
type new_meet struct{
Meet_ID string `json:"Id"`
}
//participant shema
type participant struct{
Name string `json:"Name" bson:"name"`
Email string `json:"Email" bson:"email"`
RSVP string `json:"RSVP" bson:"rsvp"`
}
//meeting schema
type meeting struct{
Id primitive.ObjectID `bson:"_id"`
Title string `json:"Title" bson:"title"`
Part []participant `json:"Participants" bson:"participants" `
Start time.Time `json:"Start Time" bson:"start" `
End time.Time `json:"End Time" bson:"end"`
Stamp time.Time `bson:"stamp"`
}
//schema for results of conditional meetings
type conditional_meets struct{
Meetings []meeting `json:"meetings"`
}
//invalid request response writer function
func invalid_request(w http.ResponseWriter, statCode int, message string){
w.Header().Set("Content-Type", "application/json")
switch statCode {
case 400: w.WriteHeader(http.StatusBadRequest)
case 403: w.WriteHeader(http.StatusForbidden)
case 404: w.WriteHeader(http.StatusNotFound)
default: w.WriteHeader(http.StatusNotFound)
}
err := Error {
StatusCode: statCode,
ErrorMessage: message}
json.NewEncoder(w).Encode(err)
}
//helper function to coneect to DB
func connectdb(ctx context.Context) (*mongo.Collection){
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017"))
if err != nil {
log.Fatal(err)
}
err = client.Connect(ctx)
if err != nil {
log.Fatal(err)
}
appointyDatabase := client.Database("appointy-task-ritvix")
meetingCollection := appointyDatabase.Collection("meetings")
//returns collection object
return meetingCollection
}
func main(){
fmt.Println("Server is up")
http.HandleFunc("/meetings" , meets_handler) // handler for /meetings end point
http.HandleFunc("/meeting/" , meeting_handler) // handler for rooted /meeting/
fmt.Println(http.ListenAndServe(":8082", nil)); // listen to port 8082
}
//handle requests at /meetings
func meets_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
//if method is POST
case "POST":
//disallow query strings with POST method
if keys := r.URL.Query(); len(keys)!=0{
invalid_request(w, 400, "Queries not allowed at this endpoint with this method")
}else{
//error handling if request not JSON
if ua := r.Header.Get("Content-Type"); ua!="application/json"{
invalid_request(w, 400, "This end point accepts only JSON request body")
}else{
var m meeting
dec := json.NewDecoder(r.Body)
dec.DisallowUnknownFields()
err := dec.Decode(&m)
//error if meeting details are not in right format
if err != nil {
invalid_request(w, 400, "Please recheck the meeting information")
return
}
m.Stamp = time.Now() //assign Creation stamp
m.Id = primitive.NewObjectID() //assign unique ObjectID
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //meeting collection
//check for overlap of participants
final_check := false
//iterate over al participants and find clashes is db
for _, particip := range m.Part{
var check meeting
check1 := true
check2 := true
check3 := true
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lte", m.Start}}, "end": bson.D{{"$gt", m.Start}}, "participants.email": particip.Email}).Decode(&check); err!=nil{
check1 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lt", m.End}}, "end": bson.D{{"$gte",m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check2 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$gte", m.Start}}, "end": bson.D{{"$lte", m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil |
if check1 || check2 || check3 {
final_check =true
}
}
if final_check{
invalid_request(w, 400, "Meeting clashes with other meeting/s with some common participant/s")
}else{
insertResult, err := meetingCollection.InsertOne(ctx, m)
if err != nil {
log.Fatal(err)
return
}
//write back meeting id as JSON response
w.Header().Set("Content-Type", "application/json")
meet := new_meet{
Meet_ID: insertResult.InsertedID.(primitive.ObjectID).Hex()}
json.NewEncoder(w).Encode(meet)
}
}
}
//if method is GET
case "GET":
keys := r.URL.Query()
//cases to allow only valid queries
switch len(keys){
//no query string error
case 0:invalid_request(w, 400, "Not a valid query at this end point")
case 1:
//extract participant email
if email, ok := keys["participant"]; !ok || len(email[0])<1{
invalid_request(w, 400, "Not a valid query at this end point")
}else {
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //collection meetings
if len(email)>1{
invalid_request(w, 400, "Only one participant can be queried at a time")
return
}
//query the collection for the mail id
cursor, err := meetingCollection.Find(ctx, bson.M{"participants.email":bson.M{"$eq":email[0]}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//write back all his/her meetings as an array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
case 2:
start, okStart := keys["start"]
end, okEnd := keys["end"]
//check both start and end time are provided, else error
if !okStart || !okEnd {invalid_request(w, 400, "Not a valid query at this end point")
}else{
start_time := start[0]
end_time := end[0]
// fmt.Println(start_time, end_time)
start_tim, err := time.Parse(time.RFC3339, start_time)
//check if the time format is valid
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format- YY-MM-DDTHH-MM-SSZ")
return
}
end_tim, err := time.Parse(time.RFC3339, end_time)
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format - YY-MM-DDTHH-MM-SSZ")
return
}
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
//query the DB for the time window
cursor, err := meetingCollection.Find(ctx, bson.M{"start": bson.D{{"$gt", start_tim}}, "end": bson.D{{"$lt", end_tim}}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//return all such meetings as array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
default:invalid_request(w, 400, "Not a valid query at this end point")
}
//disallow any other method
default:invalid_request(w, 403, "Not a valid method at this end point")
}
}
//handler for meeting/ root
func meeting_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
case "GET":
//extract meeting id from url
if meet_id := r.URL.Path[len("/meeting/"):]; len(meet_id)==0{
invalid_request(w, 400, "Not a valid Meeting ID")
}else{
//check forvalid id
id, err := primitive.ObjectIDFromHex(meet_id)
if err!=nil{
invalid_request(w, 400, "Not a valid Meeting ID")
return
}
var meet meeting
filter := bson.M{"_id": id}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
err = meetingCollection.FindOne(ctx, filter).Decode(&meet)
if err != nil {
invalid_request(w, 404, "No meeting found with this ID")
return
}
//write back the meeting info
w.Header().Set("Content-Type", "application/json")
// fmt.Println(meet)
json.NewEncoder(w).Encode(meet)
}
default:invalid_request(w, 403, "Not a valid method at this end point")
}
}
| {
check3 = false
} | conditional_block |
main.go | package main
//necessary imports
import (
"net/http"
"fmt"
"encoding/json"
"context"
"time"
"log"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
//error class
type Error struct{
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
}
//new meeting
type new_meet struct{
Meet_ID string `json:"Id"`
}
//participant shema
type participant struct{
Name string `json:"Name" bson:"name"`
Email string `json:"Email" bson:"email"`
RSVP string `json:"RSVP" bson:"rsvp"`
}
//meeting schema
type meeting struct{
Id primitive.ObjectID `bson:"_id"`
Title string `json:"Title" bson:"title"`
Part []participant `json:"Participants" bson:"participants" `
Start time.Time `json:"Start Time" bson:"start" `
End time.Time `json:"End Time" bson:"end"`
Stamp time.Time `bson:"stamp"`
}
//schema for results of conditional meetings
type conditional_meets struct{
Meetings []meeting `json:"meetings"`
}
//invalid request response writer function
func invalid_request(w http.ResponseWriter, statCode int, message string){
w.Header().Set("Content-Type", "application/json")
switch statCode {
case 400: w.WriteHeader(http.StatusBadRequest)
case 403: w.WriteHeader(http.StatusForbidden)
case 404: w.WriteHeader(http.StatusNotFound)
default: w.WriteHeader(http.StatusNotFound)
}
err := Error {
StatusCode: statCode,
ErrorMessage: message}
json.NewEncoder(w).Encode(err)
}
//helper function to coneect to DB
func connectdb(ctx context.Context) (*mongo.Collection){
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017"))
if err != nil {
log.Fatal(err)
}
err = client.Connect(ctx)
if err != nil {
log.Fatal(err)
}
appointyDatabase := client.Database("appointy-task-ritvix")
meetingCollection := appointyDatabase.Collection("meetings")
//returns collection object
return meetingCollection
}
func main(){
fmt.Println("Server is up")
http.HandleFunc("/meetings" , meets_handler) // handler for /meetings end point
http.HandleFunc("/meeting/" , meeting_handler) // handler for rooted /meeting/
fmt.Println(http.ListenAndServe(":8082", nil)); // listen to port 8082
}
//handle requests at /meetings
func meets_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
//if method is POST
case "POST":
//disallow query strings with POST method
if keys := r.URL.Query(); len(keys)!=0{
invalid_request(w, 400, "Queries not allowed at this endpoint with this method")
}else{
//error handling if request not JSON
if ua := r.Header.Get("Content-Type"); ua!="application/json"{
invalid_request(w, 400, "This end point accepts only JSON request body")
}else{
var m meeting
dec := json.NewDecoder(r.Body)
dec.DisallowUnknownFields()
err := dec.Decode(&m)
//error if meeting details are not in right format
if err != nil {
invalid_request(w, 400, "Please recheck the meeting information")
return
}
m.Stamp = time.Now() //assign Creation stamp
m.Id = primitive.NewObjectID() //assign unique ObjectID
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //meeting collection
//check for overlap of participants
final_check := false
//iterate over al participants and find clashes is db
for _, particip := range m.Part{
var check meeting
check1 := true
check2 := true
check3 := true
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lte", m.Start}}, "end": bson.D{{"$gt", m.Start}}, "participants.email": particip.Email}).Decode(&check); err!=nil{
check1 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lt", m.End}}, "end": bson.D{{"$gte",m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check2 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$gte", m.Start}}, "end": bson.D{{"$lte", m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check3 = false
}
if check1 || check2 || check3 {
final_check =true
}
}
if final_check{
invalid_request(w, 400, "Meeting clashes with other meeting/s with some common participant/s")
}else{
insertResult, err := meetingCollection.InsertOne(ctx, m)
if err != nil {
log.Fatal(err)
return
}
//write back meeting id as JSON response
w.Header().Set("Content-Type", "application/json")
meet := new_meet{
Meet_ID: insertResult.InsertedID.(primitive.ObjectID).Hex()}
json.NewEncoder(w).Encode(meet)
}
}
}
//if method is GET
case "GET":
keys := r.URL.Query()
//cases to allow only valid queries
switch len(keys){
//no query string error
case 0:invalid_request(w, 400, "Not a valid query at this end point")
case 1:
//extract participant email
if email, ok := keys["participant"]; !ok || len(email[0])<1{
invalid_request(w, 400, "Not a valid query at this end point")
}else {
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //collection meetings
if len(email)>1{
invalid_request(w, 400, "Only one participant can be queried at a time")
return
}
//query the collection for the mail id
cursor, err := meetingCollection.Find(ctx, bson.M{"participants.email":bson.M{"$eq":email[0]}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//write back all his/her meetings as an array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json") | case 2:
start, okStart := keys["start"]
end, okEnd := keys["end"]
//check both start and end time are provided, else error
if !okStart || !okEnd {invalid_request(w, 400, "Not a valid query at this end point")
}else{
start_time := start[0]
end_time := end[0]
// fmt.Println(start_time, end_time)
start_tim, err := time.Parse(time.RFC3339, start_time)
//check if the time format is valid
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format- YY-MM-DDTHH-MM-SSZ")
return
}
end_tim, err := time.Parse(time.RFC3339, end_time)
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format - YY-MM-DDTHH-MM-SSZ")
return
}
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
//query the DB for the time window
cursor, err := meetingCollection.Find(ctx, bson.M{"start": bson.D{{"$gt", start_tim}}, "end": bson.D{{"$lt", end_tim}}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//return all such meetings as array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
default:invalid_request(w, 400, "Not a valid query at this end point")
}
//disallow any other method
default:invalid_request(w, 403, "Not a valid method at this end point")
}
}
//handler for meeting/ root
func meeting_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
case "GET":
//extract meeting id from url
if meet_id := r.URL.Path[len("/meeting/"):]; len(meet_id)==0{
invalid_request(w, 400, "Not a valid Meeting ID")
}else{
//check forvalid id
id, err := primitive.ObjectIDFromHex(meet_id)
if err!=nil{
invalid_request(w, 400, "Not a valid Meeting ID")
return
}
var meet meeting
filter := bson.M{"_id": id}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
err = meetingCollection.FindOne(ctx, filter).Decode(&meet)
if err != nil {
invalid_request(w, 404, "No meeting found with this ID")
return
}
//write back the meeting info
w.Header().Set("Content-Type", "application/json")
// fmt.Println(meet)
json.NewEncoder(w).Encode(meet)
}
default:invalid_request(w, 403, "Not a valid method at this end point")
}
} | json.NewEncoder(w).Encode(my_meets)
} | random_line_split |
column_chunk.go | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metadata
import (
"bytes"
"context"
"io"
"reflect"
"github.com/apache/arrow/go/v14/arrow/memory"
"github.com/apache/arrow/go/v14/parquet"
"github.com/apache/arrow/go/v14/parquet/compress"
"github.com/apache/arrow/go/v14/parquet/internal/encryption"
format "github.com/apache/arrow/go/v14/parquet/internal/gen-go/parquet"
"github.com/apache/arrow/go/v14/parquet/internal/thrift"
"github.com/apache/arrow/go/v14/parquet/schema"
"golang.org/x/xerrors"
)
// PageEncodingStats is used for counting the number of pages of specific
// types with the given internal encoding.
type PageEncodingStats struct {
Encoding parquet.Encoding
PageType format.PageType
}
type statvalues struct {
*format.Statistics
}
func (s *statvalues) GetMin() []byte { return s.GetMinValue() }
func (s *statvalues) GetMax() []byte { return s.GetMaxValue() }
func (s *statvalues) IsSetMin() bool { return s.IsSetMinValue() }
func (s *statvalues) IsSetMax() bool { return s.IsSetMaxValue() }
func makeColumnStats(metadata *format.ColumnMetaData, descr *schema.Column, mem memory.Allocator) TypedStatistics {
if descr.ColumnOrder() == parquet.ColumnOrders.TypeDefinedOrder {
return NewStatisticsFromEncoded(descr, mem,
metadata.NumValues-metadata.Statistics.GetNullCount(),
&statvalues{metadata.Statistics})
}
return NewStatisticsFromEncoded(descr, mem,
metadata.NumValues-metadata.Statistics.GetNullCount(),
metadata.Statistics)
}
// ColumnChunkMetaData is a proxy around format.ColumnChunkMetaData
// containing all of the information and metadata for a given column chunk
// and it's associated Column
type ColumnChunkMetaData struct {
column *format.ColumnChunk
columnMeta *format.ColumnMetaData
decryptedMeta format.ColumnMetaData
descr *schema.Column
writerVersion *AppVersion
encodings []parquet.Encoding
encodingStats []format.PageEncodingStats
possibleStats TypedStatistics
mem memory.Allocator
}
// NewColumnChunkMetaData creates an instance of the metadata from a column chunk and descriptor
//
// this is primarily used internally or between the subpackages. ColumnChunkMetaDataBuilder should
// be used by consumers instead of using this directly.
func NewColumnChunkMetaData(column *format.ColumnChunk, descr *schema.Column, writerVersion *AppVersion, rowGroupOrdinal, columnOrdinal int16, fileDecryptor encryption.FileDecryptor) (*ColumnChunkMetaData, error) {
c := &ColumnChunkMetaData{
column: column,
columnMeta: column.GetMetaData(),
descr: descr,
writerVersion: writerVersion,
mem: memory.DefaultAllocator,
}
if column.IsSetCryptoMetadata() {
ccmd := column.CryptoMetadata
if ccmd.IsSetENCRYPTION_WITH_COLUMN_KEY() {
if fileDecryptor != nil && fileDecryptor.Properties() != nil | else {
return nil, xerrors.New("cannot decrypt column metadata. file decryption not setup correctly")
}
}
}
for _, enc := range c.columnMeta.Encodings {
c.encodings = append(c.encodings, parquet.Encoding(enc))
}
for _, enc := range c.columnMeta.EncodingStats {
c.encodingStats = append(c.encodingStats, *enc)
}
return c, nil
}
// CryptoMetadata returns the cryptographic metadata for how this column was
// encrypted and how to decrypt it.
func (c *ColumnChunkMetaData) CryptoMetadata() *format.ColumnCryptoMetaData {
return c.column.GetCryptoMetadata()
}
// FileOffset is the location in the file where the column data begins
func (c *ColumnChunkMetaData) FileOffset() int64 { return c.column.FileOffset }
// FilePath gives the name of the parquet file if provided in the metadata
func (c *ColumnChunkMetaData) FilePath() string { return c.column.GetFilePath() }
// Type is the physical storage type used in the parquet file for this column chunk.
func (c *ColumnChunkMetaData) Type() parquet.Type { return parquet.Type(c.columnMeta.Type) }
// NumValues is the number of values stored in just this chunk including nulls.
func (c *ColumnChunkMetaData) NumValues() int64 { return c.columnMeta.NumValues }
// PathInSchema is the full path to this column from the root of the schema including
// any nested columns
func (c *ColumnChunkMetaData) PathInSchema() parquet.ColumnPath {
return c.columnMeta.GetPathInSchema()
}
// Compression provides the type of compression used for this particular chunk.
func (c *ColumnChunkMetaData) Compression() compress.Compression {
return compress.Compression(c.columnMeta.Codec)
}
// Encodings returns the list of different encodings used in this chunk
func (c *ColumnChunkMetaData) Encodings() []parquet.Encoding { return c.encodings }
// EncodingStats connects the order of encodings based on the list of pages and types
func (c *ColumnChunkMetaData) EncodingStats() []PageEncodingStats {
ret := make([]PageEncodingStats, len(c.encodingStats))
for idx := range ret {
ret[idx].Encoding = parquet.Encoding(c.encodingStats[idx].Encoding)
ret[idx].PageType = c.encodingStats[idx].PageType
}
return ret
}
// HasDictionaryPage returns true if there is a dictionary page offset set in
// this metadata.
func (c *ColumnChunkMetaData) HasDictionaryPage() bool {
return c.columnMeta.IsSetDictionaryPageOffset()
}
// DictionaryPageOffset returns the location in the file where the dictionary page starts
func (c *ColumnChunkMetaData) DictionaryPageOffset() int64 {
return c.columnMeta.GetDictionaryPageOffset()
}
// DataPageOffset returns the location in the file where the data pages begin for this column
func (c *ColumnChunkMetaData) DataPageOffset() int64 { return c.columnMeta.GetDataPageOffset() }
// HasIndexPage returns true if the offset for the index page is set in the metadata
func (c *ColumnChunkMetaData) HasIndexPage() bool { return c.columnMeta.IsSetIndexPageOffset() }
// IndexPageOffset is the location in the file where the index page starts.
func (c *ColumnChunkMetaData) IndexPageOffset() int64 { return c.columnMeta.GetIndexPageOffset() }
// TotalCompressedSize will be equal to TotalUncompressedSize if the data is not compressed.
// Otherwise this will be the size of the actual data in the file.
func (c *ColumnChunkMetaData) TotalCompressedSize() int64 {
return c.columnMeta.GetTotalCompressedSize()
}
// TotalUncompressedSize is the total size of the raw data after uncompressing the chunk
func (c *ColumnChunkMetaData) TotalUncompressedSize() int64 {
return c.columnMeta.GetTotalUncompressedSize()
}
// BloomFilterOffset is the byte offset from the beginning of the file to the bloom
// filter data.
func (c *ColumnChunkMetaData) BloomFilterOffset() int64 {
return c.columnMeta.GetBloomFilterOffset()
}
// StatsSet returns true only if there are statistics set in the metadata and the column
// descriptor has a sort order that is not SortUnknown
//
// It also checks the writer version to ensure that it was not written by a version
// of parquet which is known to have incorrect stat computations.
func (c *ColumnChunkMetaData) StatsSet() (bool, error) {
if !c.columnMeta.IsSetStatistics() || c.descr.SortOrder() == schema.SortUNKNOWN {
return false, nil
}
if c.possibleStats == nil {
c.possibleStats = makeColumnStats(c.columnMeta, c.descr, c.mem)
}
encoded, err := c.possibleStats.Encode()
if err != nil {
return false, err
}
return c.writerVersion.HasCorrectStatistics(c.Type(), c.descr.LogicalType(), encoded, c.descr.SortOrder()), nil
}
func (c *ColumnChunkMetaData) Equals(other *ColumnChunkMetaData) bool {
return reflect.DeepEqual(c.columnMeta, other.columnMeta)
}
// Statistics can return nil if there are no stats in this metadata
func (c *ColumnChunkMetaData) Statistics() (TypedStatistics, error) {
ok, err := c.StatsSet()
if err != nil {
return nil, err
}
if ok {
return c.possibleStats, nil
}
return nil, nil
}
// ColumnChunkMetaDataBuilder is used during writing to construct metadata
// for a given column chunk while writing, providing a proxy around constructing
// the actual thrift object.
type ColumnChunkMetaDataBuilder struct {
chunk *format.ColumnChunk
props *parquet.WriterProperties
column *schema.Column
compressedSize int64
}
func NewColumnChunkMetaDataBuilder(props *parquet.WriterProperties, column *schema.Column) *ColumnChunkMetaDataBuilder {
return NewColumnChunkMetaDataBuilderWithContents(props, column, format.NewColumnChunk())
}
// NewColumnChunkMetaDataBuilderWithContents will construct a builder and start it with the provided
// column chunk information rather than with an empty column chunk.
func NewColumnChunkMetaDataBuilderWithContents(props *parquet.WriterProperties, column *schema.Column, chunk *format.ColumnChunk) *ColumnChunkMetaDataBuilder {
b := &ColumnChunkMetaDataBuilder{
props: props,
column: column,
chunk: chunk,
}
b.init(chunk)
return b
}
// Contents returns the underlying thrift ColumnChunk object so that it can be used
// for constructing or duplicating column metadata
func (c *ColumnChunkMetaDataBuilder) Contents() *format.ColumnChunk { return c.chunk }
func (c *ColumnChunkMetaDataBuilder) init(chunk *format.ColumnChunk) {
c.chunk = chunk
if !c.chunk.IsSetMetaData() {
c.chunk.MetaData = format.NewColumnMetaData()
}
c.chunk.MetaData.Type = format.Type(c.column.PhysicalType())
c.chunk.MetaData.PathInSchema = schema.ColumnPathFromNode(c.column.SchemaNode())
c.chunk.MetaData.Codec = format.CompressionCodec(c.props.CompressionFor(c.column.Path()))
}
func (c *ColumnChunkMetaDataBuilder) SetFilePath(val string) {
c.chunk.FilePath = &val
}
// Descr returns the associated column descriptor for this column chunk
func (c *ColumnChunkMetaDataBuilder) Descr() *schema.Column { return c.column }
func (c *ColumnChunkMetaDataBuilder) TotalCompressedSize() int64 {
// if this column is encrypted, after Finish is called, the MetaData
// field is set to nil and we store the compressed size so return that
if c.chunk.MetaData == nil {
return c.compressedSize
}
return c.chunk.MetaData.GetTotalCompressedSize()
}
func (c *ColumnChunkMetaDataBuilder) SetStats(val EncodedStatistics) {
c.chunk.MetaData.Statistics = val.ToThrift()
}
// ChunkMetaInfo is a helper struct for passing the offset and size information
// for finishing the building of column chunk metadata
type ChunkMetaInfo struct {
NumValues int64
DictPageOffset int64
IndexPageOffset int64
DataPageOffset int64
CompressedSize int64
UncompressedSize int64
}
// EncodingStats is a helper struct for passing the encoding stat information
// for finishing up metadata for a column chunk.
type EncodingStats struct {
DictEncodingStats map[parquet.Encoding]int32
DataEncodingStats map[parquet.Encoding]int32
}
// Finish finalizes the metadata with the given offsets,
// flushes any compression that needs to be done, and performs
// any encryption if an encryptor is provided.
func (c *ColumnChunkMetaDataBuilder) Finish(info ChunkMetaInfo, hasDict, dictFallback bool, encStats EncodingStats, metaEncryptor encryption.Encryptor) error {
if info.DictPageOffset > 0 {
c.chunk.MetaData.DictionaryPageOffset = &info.DictPageOffset
c.chunk.FileOffset = info.DictPageOffset + info.CompressedSize
} else {
c.chunk.FileOffset = info.DataPageOffset + info.CompressedSize
}
c.chunk.MetaData.NumValues = info.NumValues
if info.IndexPageOffset >= 0 {
c.chunk.MetaData.IndexPageOffset = &info.IndexPageOffset
}
c.chunk.MetaData.DataPageOffset = info.DataPageOffset
c.chunk.MetaData.TotalUncompressedSize = info.UncompressedSize
c.chunk.MetaData.TotalCompressedSize = info.CompressedSize
// no matter the configuration, the maximum number of thrift encodings we'll
// populate is going to be 3:
// 1. potential dictionary index encoding
// 2. page encoding
// 3. RLE for repetition and definition levels
// so let's preallocate a capacity of 3 but initialize the slice at 0 len
const maxEncodings = 3
thriftEncodings := make([]format.Encoding, 0, maxEncodings)
if hasDict {
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.DictionaryIndexEncoding()))
if c.props.Version() == parquet.V1_0 {
thriftEncodings = append(thriftEncodings, format.Encoding_PLAIN)
} else {
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.DictionaryPageEncoding()))
}
} else { // no dictionary
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.EncodingFor(c.column.Path())))
}
thriftEncodings = append(thriftEncodings, format.Encoding(parquet.Encodings.RLE))
// Only PLAIN encoding is supported for fallback in V1
// TODO(zeroshade): Use user specified encoding for V2
if dictFallback {
thriftEncodings = append(thriftEncodings, format.Encoding_PLAIN)
}
c.chunk.MetaData.Encodings = thriftEncodings
thriftEncodingStats := make([]*format.PageEncodingStats, 0, len(encStats.DictEncodingStats)+len(encStats.DataEncodingStats))
for k, v := range encStats.DictEncodingStats {
thriftEncodingStats = append(thriftEncodingStats, &format.PageEncodingStats{
PageType: format.PageType_DICTIONARY_PAGE,
Encoding: format.Encoding(k),
Count: v,
})
}
for k, v := range encStats.DataEncodingStats {
thriftEncodingStats = append(thriftEncodingStats, &format.PageEncodingStats{
PageType: format.PageType_DATA_PAGE,
Encoding: format.Encoding(k),
Count: v,
})
}
c.chunk.MetaData.EncodingStats = thriftEncodingStats
encryptProps := c.props.ColumnEncryptionProperties(c.column.Path())
if encryptProps != nil && encryptProps.IsEncrypted() {
ccmd := format.NewColumnCryptoMetaData()
if encryptProps.IsEncryptedWithFooterKey() {
ccmd.ENCRYPTION_WITH_FOOTER_KEY = format.NewEncryptionWithFooterKey()
} else {
ccmd.ENCRYPTION_WITH_COLUMN_KEY = &format.EncryptionWithColumnKey{
KeyMetadata: []byte(encryptProps.KeyMetadata()),
PathInSchema: c.column.ColumnPath(),
}
}
c.chunk.CryptoMetadata = ccmd
encryptedFooter := c.props.FileEncryptionProperties().EncryptedFooter()
encryptMetadata := !encryptedFooter || !encryptProps.IsEncryptedWithFooterKey()
if encryptMetadata {
// Serialize and encrypt ColumnMetadata separately
// Thrift-serialize the ColumnMetaData structure,
// encrypt it with the column key, and write to encrypted_column_metadata
serializer := thrift.NewThriftSerializer()
data, err := serializer.Write(context.Background(), c.chunk.MetaData)
if err != nil {
return err
}
var buf bytes.Buffer
metaEncryptor.Encrypt(&buf, data)
c.chunk.EncryptedColumnMetadata = buf.Bytes()
if encryptedFooter {
c.compressedSize = c.chunk.MetaData.GetTotalCompressedSize()
c.chunk.MetaData = nil
} else {
// Keep redacted metadata version for old readers
c.chunk.MetaData.Statistics = nil
c.chunk.MetaData.EncodingStats = nil
}
}
}
return nil
}
// WriteTo will always return 0 as the int64 since the thrift writer library
// does not return the number of bytes written, we only use the signature
// of (int64, error) in order to match the standard WriteTo interfaces.
func (c *ColumnChunkMetaDataBuilder) WriteTo(w io.Writer) (int64, error) {
return 0, thrift.SerializeThriftStream(c.chunk, w)
}
| {
// should decrypt metadata
path := parquet.ColumnPath(ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetPathInSchema())
keyMetadata := ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetKeyMetadata()
aadColumnMetadata := encryption.CreateModuleAad(fileDecryptor.FileAad(), encryption.ColumnMetaModule, rowGroupOrdinal, columnOrdinal, -1)
decryptor := fileDecryptor.GetColumnMetaDecryptor(path.String(), string(keyMetadata), aadColumnMetadata)
thrift.DeserializeThrift(&c.decryptedMeta, decryptor.Decrypt(column.GetEncryptedColumnMetadata()))
c.columnMeta = &c.decryptedMeta
} | conditional_block |
column_chunk.go | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metadata
import (
"bytes"
"context"
"io"
"reflect"
"github.com/apache/arrow/go/v14/arrow/memory"
"github.com/apache/arrow/go/v14/parquet"
"github.com/apache/arrow/go/v14/parquet/compress"
"github.com/apache/arrow/go/v14/parquet/internal/encryption"
format "github.com/apache/arrow/go/v14/parquet/internal/gen-go/parquet"
"github.com/apache/arrow/go/v14/parquet/internal/thrift"
"github.com/apache/arrow/go/v14/parquet/schema"
"golang.org/x/xerrors"
)
// PageEncodingStats is used for counting the number of pages of specific
// types with the given internal encoding.
type PageEncodingStats struct {
Encoding parquet.Encoding
PageType format.PageType
}
type statvalues struct {
*format.Statistics
}
func (s *statvalues) GetMin() []byte { return s.GetMinValue() }
func (s *statvalues) GetMax() []byte { return s.GetMaxValue() }
func (s *statvalues) IsSetMin() bool { return s.IsSetMinValue() }
func (s *statvalues) IsSetMax() bool { return s.IsSetMaxValue() }
func makeColumnStats(metadata *format.ColumnMetaData, descr *schema.Column, mem memory.Allocator) TypedStatistics {
if descr.ColumnOrder() == parquet.ColumnOrders.TypeDefinedOrder {
return NewStatisticsFromEncoded(descr, mem,
metadata.NumValues-metadata.Statistics.GetNullCount(),
&statvalues{metadata.Statistics})
}
return NewStatisticsFromEncoded(descr, mem,
metadata.NumValues-metadata.Statistics.GetNullCount(),
metadata.Statistics)
}
// ColumnChunkMetaData is a proxy around format.ColumnChunkMetaData
// containing all of the information and metadata for a given column chunk
// and it's associated Column
type ColumnChunkMetaData struct {
column *format.ColumnChunk
columnMeta *format.ColumnMetaData
decryptedMeta format.ColumnMetaData
descr *schema.Column
writerVersion *AppVersion
encodings []parquet.Encoding
encodingStats []format.PageEncodingStats
possibleStats TypedStatistics
mem memory.Allocator
}
// NewColumnChunkMetaData creates an instance of the metadata from a column chunk and descriptor
//
// this is primarily used internally or between the subpackages. ColumnChunkMetaDataBuilder should
// be used by consumers instead of using this directly.
func NewColumnChunkMetaData(column *format.ColumnChunk, descr *schema.Column, writerVersion *AppVersion, rowGroupOrdinal, columnOrdinal int16, fileDecryptor encryption.FileDecryptor) (*ColumnChunkMetaData, error) {
c := &ColumnChunkMetaData{
column: column,
columnMeta: column.GetMetaData(),
descr: descr,
writerVersion: writerVersion,
mem: memory.DefaultAllocator,
}
if column.IsSetCryptoMetadata() {
ccmd := column.CryptoMetadata
if ccmd.IsSetENCRYPTION_WITH_COLUMN_KEY() {
if fileDecryptor != nil && fileDecryptor.Properties() != nil {
// should decrypt metadata
path := parquet.ColumnPath(ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetPathInSchema())
keyMetadata := ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetKeyMetadata()
aadColumnMetadata := encryption.CreateModuleAad(fileDecryptor.FileAad(), encryption.ColumnMetaModule, rowGroupOrdinal, columnOrdinal, -1)
decryptor := fileDecryptor.GetColumnMetaDecryptor(path.String(), string(keyMetadata), aadColumnMetadata)
thrift.DeserializeThrift(&c.decryptedMeta, decryptor.Decrypt(column.GetEncryptedColumnMetadata()))
c.columnMeta = &c.decryptedMeta
} else {
return nil, xerrors.New("cannot decrypt column metadata. file decryption not setup correctly")
}
}
}
for _, enc := range c.columnMeta.Encodings {
c.encodings = append(c.encodings, parquet.Encoding(enc))
}
for _, enc := range c.columnMeta.EncodingStats {
c.encodingStats = append(c.encodingStats, *enc)
}
return c, nil
}
// CryptoMetadata returns the cryptographic metadata for how this column was
// encrypted and how to decrypt it.
func (c *ColumnChunkMetaData) CryptoMetadata() *format.ColumnCryptoMetaData {
return c.column.GetCryptoMetadata()
}
// FileOffset is the location in the file where the column data begins
func (c *ColumnChunkMetaData) FileOffset() int64 { return c.column.FileOffset }
// FilePath gives the name of the parquet file if provided in the metadata
func (c *ColumnChunkMetaData) FilePath() string { return c.column.GetFilePath() }
// Type is the physical storage type used in the parquet file for this column chunk.
func (c *ColumnChunkMetaData) Type() parquet.Type { return parquet.Type(c.columnMeta.Type) }
// NumValues is the number of values stored in just this chunk including nulls.
func (c *ColumnChunkMetaData) NumValues() int64 { return c.columnMeta.NumValues }
// PathInSchema is the full path to this column from the root of the schema including
// any nested columns
func (c *ColumnChunkMetaData) | () parquet.ColumnPath {
return c.columnMeta.GetPathInSchema()
}
// Compression provides the type of compression used for this particular chunk.
func (c *ColumnChunkMetaData) Compression() compress.Compression {
return compress.Compression(c.columnMeta.Codec)
}
// Encodings returns the list of different encodings used in this chunk
func (c *ColumnChunkMetaData) Encodings() []parquet.Encoding { return c.encodings }
// EncodingStats connects the order of encodings based on the list of pages and types
func (c *ColumnChunkMetaData) EncodingStats() []PageEncodingStats {
ret := make([]PageEncodingStats, len(c.encodingStats))
for idx := range ret {
ret[idx].Encoding = parquet.Encoding(c.encodingStats[idx].Encoding)
ret[idx].PageType = c.encodingStats[idx].PageType
}
return ret
}
// HasDictionaryPage returns true if there is a dictionary page offset set in
// this metadata.
func (c *ColumnChunkMetaData) HasDictionaryPage() bool {
return c.columnMeta.IsSetDictionaryPageOffset()
}
// DictionaryPageOffset returns the location in the file where the dictionary page starts
func (c *ColumnChunkMetaData) DictionaryPageOffset() int64 {
return c.columnMeta.GetDictionaryPageOffset()
}
// DataPageOffset returns the location in the file where the data pages begin for this column
func (c *ColumnChunkMetaData) DataPageOffset() int64 { return c.columnMeta.GetDataPageOffset() }
// HasIndexPage returns true if the offset for the index page is set in the metadata
func (c *ColumnChunkMetaData) HasIndexPage() bool { return c.columnMeta.IsSetIndexPageOffset() }
// IndexPageOffset is the location in the file where the index page starts.
func (c *ColumnChunkMetaData) IndexPageOffset() int64 { return c.columnMeta.GetIndexPageOffset() }
// TotalCompressedSize will be equal to TotalUncompressedSize if the data is not compressed.
// Otherwise this will be the size of the actual data in the file.
func (c *ColumnChunkMetaData) TotalCompressedSize() int64 {
return c.columnMeta.GetTotalCompressedSize()
}
// TotalUncompressedSize is the total size of the raw data after uncompressing the chunk
func (c *ColumnChunkMetaData) TotalUncompressedSize() int64 {
return c.columnMeta.GetTotalUncompressedSize()
}
// BloomFilterOffset is the byte offset from the beginning of the file to the bloom
// filter data.
func (c *ColumnChunkMetaData) BloomFilterOffset() int64 {
return c.columnMeta.GetBloomFilterOffset()
}
// StatsSet returns true only if there are statistics set in the metadata and the column
// descriptor has a sort order that is not SortUnknown
//
// It also checks the writer version to ensure that it was not written by a version
// of parquet which is known to have incorrect stat computations.
func (c *ColumnChunkMetaData) StatsSet() (bool, error) {
if !c.columnMeta.IsSetStatistics() || c.descr.SortOrder() == schema.SortUNKNOWN {
return false, nil
}
if c.possibleStats == nil {
c.possibleStats = makeColumnStats(c.columnMeta, c.descr, c.mem)
}
encoded, err := c.possibleStats.Encode()
if err != nil {
return false, err
}
return c.writerVersion.HasCorrectStatistics(c.Type(), c.descr.LogicalType(), encoded, c.descr.SortOrder()), nil
}
func (c *ColumnChunkMetaData) Equals(other *ColumnChunkMetaData) bool {
return reflect.DeepEqual(c.columnMeta, other.columnMeta)
}
// Statistics can return nil if there are no stats in this metadata
func (c *ColumnChunkMetaData) Statistics() (TypedStatistics, error) {
ok, err := c.StatsSet()
if err != nil {
return nil, err
}
if ok {
return c.possibleStats, nil
}
return nil, nil
}
// ColumnChunkMetaDataBuilder is used during writing to construct metadata
// for a given column chunk while writing, providing a proxy around constructing
// the actual thrift object.
type ColumnChunkMetaDataBuilder struct {
chunk *format.ColumnChunk
props *parquet.WriterProperties
column *schema.Column
compressedSize int64
}
func NewColumnChunkMetaDataBuilder(props *parquet.WriterProperties, column *schema.Column) *ColumnChunkMetaDataBuilder {
return NewColumnChunkMetaDataBuilderWithContents(props, column, format.NewColumnChunk())
}
// NewColumnChunkMetaDataBuilderWithContents will construct a builder and start it with the provided
// column chunk information rather than with an empty column chunk.
func NewColumnChunkMetaDataBuilderWithContents(props *parquet.WriterProperties, column *schema.Column, chunk *format.ColumnChunk) *ColumnChunkMetaDataBuilder {
b := &ColumnChunkMetaDataBuilder{
props: props,
column: column,
chunk: chunk,
}
b.init(chunk)
return b
}
// Contents returns the underlying thrift ColumnChunk object so that it can be used
// for constructing or duplicating column metadata
func (c *ColumnChunkMetaDataBuilder) Contents() *format.ColumnChunk { return c.chunk }
func (c *ColumnChunkMetaDataBuilder) init(chunk *format.ColumnChunk) {
c.chunk = chunk
if !c.chunk.IsSetMetaData() {
c.chunk.MetaData = format.NewColumnMetaData()
}
c.chunk.MetaData.Type = format.Type(c.column.PhysicalType())
c.chunk.MetaData.PathInSchema = schema.ColumnPathFromNode(c.column.SchemaNode())
c.chunk.MetaData.Codec = format.CompressionCodec(c.props.CompressionFor(c.column.Path()))
}
func (c *ColumnChunkMetaDataBuilder) SetFilePath(val string) {
c.chunk.FilePath = &val
}
// Descr returns the associated column descriptor for this column chunk
func (c *ColumnChunkMetaDataBuilder) Descr() *schema.Column { return c.column }
func (c *ColumnChunkMetaDataBuilder) TotalCompressedSize() int64 {
// if this column is encrypted, after Finish is called, the MetaData
// field is set to nil and we store the compressed size so return that
if c.chunk.MetaData == nil {
return c.compressedSize
}
return c.chunk.MetaData.GetTotalCompressedSize()
}
func (c *ColumnChunkMetaDataBuilder) SetStats(val EncodedStatistics) {
c.chunk.MetaData.Statistics = val.ToThrift()
}
// ChunkMetaInfo is a helper struct for passing the offset and size information
// for finishing the building of column chunk metadata
type ChunkMetaInfo struct {
NumValues int64
DictPageOffset int64
IndexPageOffset int64
DataPageOffset int64
CompressedSize int64
UncompressedSize int64
}
// EncodingStats is a helper struct for passing the encoding stat information
// for finishing up metadata for a column chunk.
type EncodingStats struct {
DictEncodingStats map[parquet.Encoding]int32
DataEncodingStats map[parquet.Encoding]int32
}
// Finish finalizes the metadata with the given offsets,
// flushes any compression that needs to be done, and performs
// any encryption if an encryptor is provided.
func (c *ColumnChunkMetaDataBuilder) Finish(info ChunkMetaInfo, hasDict, dictFallback bool, encStats EncodingStats, metaEncryptor encryption.Encryptor) error {
if info.DictPageOffset > 0 {
c.chunk.MetaData.DictionaryPageOffset = &info.DictPageOffset
c.chunk.FileOffset = info.DictPageOffset + info.CompressedSize
} else {
c.chunk.FileOffset = info.DataPageOffset + info.CompressedSize
}
c.chunk.MetaData.NumValues = info.NumValues
if info.IndexPageOffset >= 0 {
c.chunk.MetaData.IndexPageOffset = &info.IndexPageOffset
}
c.chunk.MetaData.DataPageOffset = info.DataPageOffset
c.chunk.MetaData.TotalUncompressedSize = info.UncompressedSize
c.chunk.MetaData.TotalCompressedSize = info.CompressedSize
// no matter the configuration, the maximum number of thrift encodings we'll
// populate is going to be 3:
// 1. potential dictionary index encoding
// 2. page encoding
// 3. RLE for repetition and definition levels
// so let's preallocate a capacity of 3 but initialize the slice at 0 len
const maxEncodings = 3
thriftEncodings := make([]format.Encoding, 0, maxEncodings)
if hasDict {
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.DictionaryIndexEncoding()))
if c.props.Version() == parquet.V1_0 {
thriftEncodings = append(thriftEncodings, format.Encoding_PLAIN)
} else {
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.DictionaryPageEncoding()))
}
} else { // no dictionary
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.EncodingFor(c.column.Path())))
}
thriftEncodings = append(thriftEncodings, format.Encoding(parquet.Encodings.RLE))
// Only PLAIN encoding is supported for fallback in V1
// TODO(zeroshade): Use user specified encoding for V2
if dictFallback {
thriftEncodings = append(thriftEncodings, format.Encoding_PLAIN)
}
c.chunk.MetaData.Encodings = thriftEncodings
thriftEncodingStats := make([]*format.PageEncodingStats, 0, len(encStats.DictEncodingStats)+len(encStats.DataEncodingStats))
for k, v := range encStats.DictEncodingStats {
thriftEncodingStats = append(thriftEncodingStats, &format.PageEncodingStats{
PageType: format.PageType_DICTIONARY_PAGE,
Encoding: format.Encoding(k),
Count: v,
})
}
for k, v := range encStats.DataEncodingStats {
thriftEncodingStats = append(thriftEncodingStats, &format.PageEncodingStats{
PageType: format.PageType_DATA_PAGE,
Encoding: format.Encoding(k),
Count: v,
})
}
c.chunk.MetaData.EncodingStats = thriftEncodingStats
encryptProps := c.props.ColumnEncryptionProperties(c.column.Path())
if encryptProps != nil && encryptProps.IsEncrypted() {
ccmd := format.NewColumnCryptoMetaData()
if encryptProps.IsEncryptedWithFooterKey() {
ccmd.ENCRYPTION_WITH_FOOTER_KEY = format.NewEncryptionWithFooterKey()
} else {
ccmd.ENCRYPTION_WITH_COLUMN_KEY = &format.EncryptionWithColumnKey{
KeyMetadata: []byte(encryptProps.KeyMetadata()),
PathInSchema: c.column.ColumnPath(),
}
}
c.chunk.CryptoMetadata = ccmd
encryptedFooter := c.props.FileEncryptionProperties().EncryptedFooter()
encryptMetadata := !encryptedFooter || !encryptProps.IsEncryptedWithFooterKey()
if encryptMetadata {
// Serialize and encrypt ColumnMetadata separately
// Thrift-serialize the ColumnMetaData structure,
// encrypt it with the column key, and write to encrypted_column_metadata
serializer := thrift.NewThriftSerializer()
data, err := serializer.Write(context.Background(), c.chunk.MetaData)
if err != nil {
return err
}
var buf bytes.Buffer
metaEncryptor.Encrypt(&buf, data)
c.chunk.EncryptedColumnMetadata = buf.Bytes()
if encryptedFooter {
c.compressedSize = c.chunk.MetaData.GetTotalCompressedSize()
c.chunk.MetaData = nil
} else {
// Keep redacted metadata version for old readers
c.chunk.MetaData.Statistics = nil
c.chunk.MetaData.EncodingStats = nil
}
}
}
return nil
}
// WriteTo will always return 0 as the int64 since the thrift writer library
// does not return the number of bytes written, we only use the signature
// of (int64, error) in order to match the standard WriteTo interfaces.
func (c *ColumnChunkMetaDataBuilder) WriteTo(w io.Writer) (int64, error) {
return 0, thrift.SerializeThriftStream(c.chunk, w)
}
| PathInSchema | identifier_name |
column_chunk.go | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metadata
import (
"bytes"
"context"
"io"
"reflect"
"github.com/apache/arrow/go/v14/arrow/memory"
"github.com/apache/arrow/go/v14/parquet"
"github.com/apache/arrow/go/v14/parquet/compress"
"github.com/apache/arrow/go/v14/parquet/internal/encryption"
format "github.com/apache/arrow/go/v14/parquet/internal/gen-go/parquet"
"github.com/apache/arrow/go/v14/parquet/internal/thrift"
"github.com/apache/arrow/go/v14/parquet/schema"
"golang.org/x/xerrors"
)
// PageEncodingStats is used for counting the number of pages of specific
// types with the given internal encoding.
type PageEncodingStats struct {
Encoding parquet.Encoding
PageType format.PageType
}
type statvalues struct {
*format.Statistics
}
func (s *statvalues) GetMin() []byte { return s.GetMinValue() }
func (s *statvalues) GetMax() []byte { return s.GetMaxValue() }
func (s *statvalues) IsSetMin() bool { return s.IsSetMinValue() }
func (s *statvalues) IsSetMax() bool { return s.IsSetMaxValue() }
func makeColumnStats(metadata *format.ColumnMetaData, descr *schema.Column, mem memory.Allocator) TypedStatistics {
if descr.ColumnOrder() == parquet.ColumnOrders.TypeDefinedOrder {
return NewStatisticsFromEncoded(descr, mem,
metadata.NumValues-metadata.Statistics.GetNullCount(),
&statvalues{metadata.Statistics})
}
return NewStatisticsFromEncoded(descr, mem,
metadata.NumValues-metadata.Statistics.GetNullCount(),
metadata.Statistics)
}
// ColumnChunkMetaData is a proxy around format.ColumnChunkMetaData
// containing all of the information and metadata for a given column chunk
// and it's associated Column
type ColumnChunkMetaData struct {
column *format.ColumnChunk
columnMeta *format.ColumnMetaData
decryptedMeta format.ColumnMetaData
descr *schema.Column
writerVersion *AppVersion
encodings []parquet.Encoding
encodingStats []format.PageEncodingStats
possibleStats TypedStatistics
mem memory.Allocator
}
// NewColumnChunkMetaData creates an instance of the metadata from a column chunk and descriptor
//
// this is primarily used internally or between the subpackages. ColumnChunkMetaDataBuilder should
// be used by consumers instead of using this directly.
func NewColumnChunkMetaData(column *format.ColumnChunk, descr *schema.Column, writerVersion *AppVersion, rowGroupOrdinal, columnOrdinal int16, fileDecryptor encryption.FileDecryptor) (*ColumnChunkMetaData, error) {
c := &ColumnChunkMetaData{
column: column,
columnMeta: column.GetMetaData(),
descr: descr,
writerVersion: writerVersion,
mem: memory.DefaultAllocator,
}
if column.IsSetCryptoMetadata() {
ccmd := column.CryptoMetadata
if ccmd.IsSetENCRYPTION_WITH_COLUMN_KEY() {
if fileDecryptor != nil && fileDecryptor.Properties() != nil {
// should decrypt metadata
path := parquet.ColumnPath(ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetPathInSchema())
keyMetadata := ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetKeyMetadata()
aadColumnMetadata := encryption.CreateModuleAad(fileDecryptor.FileAad(), encryption.ColumnMetaModule, rowGroupOrdinal, columnOrdinal, -1)
decryptor := fileDecryptor.GetColumnMetaDecryptor(path.String(), string(keyMetadata), aadColumnMetadata)
thrift.DeserializeThrift(&c.decryptedMeta, decryptor.Decrypt(column.GetEncryptedColumnMetadata()))
c.columnMeta = &c.decryptedMeta
} else {
return nil, xerrors.New("cannot decrypt column metadata. file decryption not setup correctly")
}
}
}
for _, enc := range c.columnMeta.Encodings {
c.encodings = append(c.encodings, parquet.Encoding(enc))
}
for _, enc := range c.columnMeta.EncodingStats {
c.encodingStats = append(c.encodingStats, *enc)
}
return c, nil
} | // encrypted and how to decrypt it.
func (c *ColumnChunkMetaData) CryptoMetadata() *format.ColumnCryptoMetaData {
return c.column.GetCryptoMetadata()
}
// FileOffset is the location in the file where the column data begins
func (c *ColumnChunkMetaData) FileOffset() int64 { return c.column.FileOffset }
// FilePath gives the name of the parquet file if provided in the metadata
func (c *ColumnChunkMetaData) FilePath() string { return c.column.GetFilePath() }
// Type is the physical storage type used in the parquet file for this column chunk.
func (c *ColumnChunkMetaData) Type() parquet.Type { return parquet.Type(c.columnMeta.Type) }
// NumValues is the number of values stored in just this chunk including nulls.
func (c *ColumnChunkMetaData) NumValues() int64 { return c.columnMeta.NumValues }
// PathInSchema is the full path to this column from the root of the schema including
// any nested columns
func (c *ColumnChunkMetaData) PathInSchema() parquet.ColumnPath {
return c.columnMeta.GetPathInSchema()
}
// Compression provides the type of compression used for this particular chunk.
func (c *ColumnChunkMetaData) Compression() compress.Compression {
return compress.Compression(c.columnMeta.Codec)
}
// Encodings returns the list of different encodings used in this chunk
func (c *ColumnChunkMetaData) Encodings() []parquet.Encoding { return c.encodings }
// EncodingStats connects the order of encodings based on the list of pages and types
func (c *ColumnChunkMetaData) EncodingStats() []PageEncodingStats {
ret := make([]PageEncodingStats, len(c.encodingStats))
for idx := range ret {
ret[idx].Encoding = parquet.Encoding(c.encodingStats[idx].Encoding)
ret[idx].PageType = c.encodingStats[idx].PageType
}
return ret
}
// HasDictionaryPage returns true if there is a dictionary page offset set in
// this metadata.
func (c *ColumnChunkMetaData) HasDictionaryPage() bool {
return c.columnMeta.IsSetDictionaryPageOffset()
}
// DictionaryPageOffset returns the location in the file where the dictionary page starts
func (c *ColumnChunkMetaData) DictionaryPageOffset() int64 {
return c.columnMeta.GetDictionaryPageOffset()
}
// DataPageOffset returns the location in the file where the data pages begin for this column
func (c *ColumnChunkMetaData) DataPageOffset() int64 { return c.columnMeta.GetDataPageOffset() }
// HasIndexPage returns true if the offset for the index page is set in the metadata
func (c *ColumnChunkMetaData) HasIndexPage() bool { return c.columnMeta.IsSetIndexPageOffset() }
// IndexPageOffset is the location in the file where the index page starts.
func (c *ColumnChunkMetaData) IndexPageOffset() int64 { return c.columnMeta.GetIndexPageOffset() }
// TotalCompressedSize will be equal to TotalUncompressedSize if the data is not compressed.
// Otherwise this will be the size of the actual data in the file.
func (c *ColumnChunkMetaData) TotalCompressedSize() int64 {
return c.columnMeta.GetTotalCompressedSize()
}
// TotalUncompressedSize is the total size of the raw data after uncompressing the chunk
func (c *ColumnChunkMetaData) TotalUncompressedSize() int64 {
return c.columnMeta.GetTotalUncompressedSize()
}
// BloomFilterOffset is the byte offset from the beginning of the file to the bloom
// filter data.
func (c *ColumnChunkMetaData) BloomFilterOffset() int64 {
return c.columnMeta.GetBloomFilterOffset()
}
// StatsSet returns true only if there are statistics set in the metadata and the column
// descriptor has a sort order that is not SortUnknown
//
// It also checks the writer version to ensure that it was not written by a version
// of parquet which is known to have incorrect stat computations.
func (c *ColumnChunkMetaData) StatsSet() (bool, error) {
if !c.columnMeta.IsSetStatistics() || c.descr.SortOrder() == schema.SortUNKNOWN {
return false, nil
}
if c.possibleStats == nil {
c.possibleStats = makeColumnStats(c.columnMeta, c.descr, c.mem)
}
encoded, err := c.possibleStats.Encode()
if err != nil {
return false, err
}
return c.writerVersion.HasCorrectStatistics(c.Type(), c.descr.LogicalType(), encoded, c.descr.SortOrder()), nil
}
func (c *ColumnChunkMetaData) Equals(other *ColumnChunkMetaData) bool {
return reflect.DeepEqual(c.columnMeta, other.columnMeta)
}
// Statistics can return nil if there are no stats in this metadata
func (c *ColumnChunkMetaData) Statistics() (TypedStatistics, error) {
ok, err := c.StatsSet()
if err != nil {
return nil, err
}
if ok {
return c.possibleStats, nil
}
return nil, nil
}
// ColumnChunkMetaDataBuilder is used during writing to construct metadata
// for a given column chunk while writing, providing a proxy around constructing
// the actual thrift object.
type ColumnChunkMetaDataBuilder struct {
chunk *format.ColumnChunk
props *parquet.WriterProperties
column *schema.Column
compressedSize int64
}
func NewColumnChunkMetaDataBuilder(props *parquet.WriterProperties, column *schema.Column) *ColumnChunkMetaDataBuilder {
return NewColumnChunkMetaDataBuilderWithContents(props, column, format.NewColumnChunk())
}
// NewColumnChunkMetaDataBuilderWithContents will construct a builder and start it with the provided
// column chunk information rather than with an empty column chunk.
func NewColumnChunkMetaDataBuilderWithContents(props *parquet.WriterProperties, column *schema.Column, chunk *format.ColumnChunk) *ColumnChunkMetaDataBuilder {
b := &ColumnChunkMetaDataBuilder{
props: props,
column: column,
chunk: chunk,
}
b.init(chunk)
return b
}
// Contents returns the underlying thrift ColumnChunk object so that it can be used
// for constructing or duplicating column metadata
func (c *ColumnChunkMetaDataBuilder) Contents() *format.ColumnChunk { return c.chunk }
func (c *ColumnChunkMetaDataBuilder) init(chunk *format.ColumnChunk) {
c.chunk = chunk
if !c.chunk.IsSetMetaData() {
c.chunk.MetaData = format.NewColumnMetaData()
}
c.chunk.MetaData.Type = format.Type(c.column.PhysicalType())
c.chunk.MetaData.PathInSchema = schema.ColumnPathFromNode(c.column.SchemaNode())
c.chunk.MetaData.Codec = format.CompressionCodec(c.props.CompressionFor(c.column.Path()))
}
func (c *ColumnChunkMetaDataBuilder) SetFilePath(val string) {
c.chunk.FilePath = &val
}
// Descr returns the associated column descriptor for this column chunk
func (c *ColumnChunkMetaDataBuilder) Descr() *schema.Column { return c.column }
func (c *ColumnChunkMetaDataBuilder) TotalCompressedSize() int64 {
// if this column is encrypted, after Finish is called, the MetaData
// field is set to nil and we store the compressed size so return that
if c.chunk.MetaData == nil {
return c.compressedSize
}
return c.chunk.MetaData.GetTotalCompressedSize()
}
func (c *ColumnChunkMetaDataBuilder) SetStats(val EncodedStatistics) {
c.chunk.MetaData.Statistics = val.ToThrift()
}
// ChunkMetaInfo is a helper struct for passing the offset and size information
// for finishing the building of column chunk metadata
type ChunkMetaInfo struct {
NumValues int64
DictPageOffset int64
IndexPageOffset int64
DataPageOffset int64
CompressedSize int64
UncompressedSize int64
}
// EncodingStats is a helper struct for passing the encoding stat information
// for finishing up metadata for a column chunk.
type EncodingStats struct {
DictEncodingStats map[parquet.Encoding]int32
DataEncodingStats map[parquet.Encoding]int32
}
// Finish finalizes the metadata with the given offsets,
// flushes any compression that needs to be done, and performs
// any encryption if an encryptor is provided.
func (c *ColumnChunkMetaDataBuilder) Finish(info ChunkMetaInfo, hasDict, dictFallback bool, encStats EncodingStats, metaEncryptor encryption.Encryptor) error {
if info.DictPageOffset > 0 {
c.chunk.MetaData.DictionaryPageOffset = &info.DictPageOffset
c.chunk.FileOffset = info.DictPageOffset + info.CompressedSize
} else {
c.chunk.FileOffset = info.DataPageOffset + info.CompressedSize
}
c.chunk.MetaData.NumValues = info.NumValues
if info.IndexPageOffset >= 0 {
c.chunk.MetaData.IndexPageOffset = &info.IndexPageOffset
}
c.chunk.MetaData.DataPageOffset = info.DataPageOffset
c.chunk.MetaData.TotalUncompressedSize = info.UncompressedSize
c.chunk.MetaData.TotalCompressedSize = info.CompressedSize
// no matter the configuration, the maximum number of thrift encodings we'll
// populate is going to be 3:
// 1. potential dictionary index encoding
// 2. page encoding
// 3. RLE for repetition and definition levels
// so let's preallocate a capacity of 3 but initialize the slice at 0 len
const maxEncodings = 3
thriftEncodings := make([]format.Encoding, 0, maxEncodings)
if hasDict {
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.DictionaryIndexEncoding()))
if c.props.Version() == parquet.V1_0 {
thriftEncodings = append(thriftEncodings, format.Encoding_PLAIN)
} else {
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.DictionaryPageEncoding()))
}
} else { // no dictionary
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.EncodingFor(c.column.Path())))
}
thriftEncodings = append(thriftEncodings, format.Encoding(parquet.Encodings.RLE))
// Only PLAIN encoding is supported for fallback in V1
// TODO(zeroshade): Use user specified encoding for V2
if dictFallback {
thriftEncodings = append(thriftEncodings, format.Encoding_PLAIN)
}
c.chunk.MetaData.Encodings = thriftEncodings
thriftEncodingStats := make([]*format.PageEncodingStats, 0, len(encStats.DictEncodingStats)+len(encStats.DataEncodingStats))
for k, v := range encStats.DictEncodingStats {
thriftEncodingStats = append(thriftEncodingStats, &format.PageEncodingStats{
PageType: format.PageType_DICTIONARY_PAGE,
Encoding: format.Encoding(k),
Count: v,
})
}
for k, v := range encStats.DataEncodingStats {
thriftEncodingStats = append(thriftEncodingStats, &format.PageEncodingStats{
PageType: format.PageType_DATA_PAGE,
Encoding: format.Encoding(k),
Count: v,
})
}
c.chunk.MetaData.EncodingStats = thriftEncodingStats
encryptProps := c.props.ColumnEncryptionProperties(c.column.Path())
if encryptProps != nil && encryptProps.IsEncrypted() {
ccmd := format.NewColumnCryptoMetaData()
if encryptProps.IsEncryptedWithFooterKey() {
ccmd.ENCRYPTION_WITH_FOOTER_KEY = format.NewEncryptionWithFooterKey()
} else {
ccmd.ENCRYPTION_WITH_COLUMN_KEY = &format.EncryptionWithColumnKey{
KeyMetadata: []byte(encryptProps.KeyMetadata()),
PathInSchema: c.column.ColumnPath(),
}
}
c.chunk.CryptoMetadata = ccmd
encryptedFooter := c.props.FileEncryptionProperties().EncryptedFooter()
encryptMetadata := !encryptedFooter || !encryptProps.IsEncryptedWithFooterKey()
if encryptMetadata {
// Serialize and encrypt ColumnMetadata separately
// Thrift-serialize the ColumnMetaData structure,
// encrypt it with the column key, and write to encrypted_column_metadata
serializer := thrift.NewThriftSerializer()
data, err := serializer.Write(context.Background(), c.chunk.MetaData)
if err != nil {
return err
}
var buf bytes.Buffer
metaEncryptor.Encrypt(&buf, data)
c.chunk.EncryptedColumnMetadata = buf.Bytes()
if encryptedFooter {
c.compressedSize = c.chunk.MetaData.GetTotalCompressedSize()
c.chunk.MetaData = nil
} else {
// Keep redacted metadata version for old readers
c.chunk.MetaData.Statistics = nil
c.chunk.MetaData.EncodingStats = nil
}
}
}
return nil
}
// WriteTo will always return 0 as the int64 since the thrift writer library
// does not return the number of bytes written, we only use the signature
// of (int64, error) in order to match the standard WriteTo interfaces.
func (c *ColumnChunkMetaDataBuilder) WriteTo(w io.Writer) (int64, error) {
return 0, thrift.SerializeThriftStream(c.chunk, w)
} |
// CryptoMetadata returns the cryptographic metadata for how this column was | random_line_split |
column_chunk.go | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metadata
import (
"bytes"
"context"
"io"
"reflect"
"github.com/apache/arrow/go/v14/arrow/memory"
"github.com/apache/arrow/go/v14/parquet"
"github.com/apache/arrow/go/v14/parquet/compress"
"github.com/apache/arrow/go/v14/parquet/internal/encryption"
format "github.com/apache/arrow/go/v14/parquet/internal/gen-go/parquet"
"github.com/apache/arrow/go/v14/parquet/internal/thrift"
"github.com/apache/arrow/go/v14/parquet/schema"
"golang.org/x/xerrors"
)
// PageEncodingStats is used for counting the number of pages of specific
// types with the given internal encoding.
type PageEncodingStats struct {
Encoding parquet.Encoding
PageType format.PageType
}
type statvalues struct {
*format.Statistics
}
func (s *statvalues) GetMin() []byte { return s.GetMinValue() }
func (s *statvalues) GetMax() []byte { return s.GetMaxValue() }
func (s *statvalues) IsSetMin() bool { return s.IsSetMinValue() }
func (s *statvalues) IsSetMax() bool { return s.IsSetMaxValue() }
func makeColumnStats(metadata *format.ColumnMetaData, descr *schema.Column, mem memory.Allocator) TypedStatistics {
if descr.ColumnOrder() == parquet.ColumnOrders.TypeDefinedOrder {
return NewStatisticsFromEncoded(descr, mem,
metadata.NumValues-metadata.Statistics.GetNullCount(),
&statvalues{metadata.Statistics})
}
return NewStatisticsFromEncoded(descr, mem,
metadata.NumValues-metadata.Statistics.GetNullCount(),
metadata.Statistics)
}
// ColumnChunkMetaData is a proxy around format.ColumnChunkMetaData
// containing all of the information and metadata for a given column chunk
// and it's associated Column
type ColumnChunkMetaData struct {
column *format.ColumnChunk
columnMeta *format.ColumnMetaData
decryptedMeta format.ColumnMetaData
descr *schema.Column
writerVersion *AppVersion
encodings []parquet.Encoding
encodingStats []format.PageEncodingStats
possibleStats TypedStatistics
mem memory.Allocator
}
// NewColumnChunkMetaData creates an instance of the metadata from a column chunk and descriptor
//
// this is primarily used internally or between the subpackages. ColumnChunkMetaDataBuilder should
// be used by consumers instead of using this directly.
func NewColumnChunkMetaData(column *format.ColumnChunk, descr *schema.Column, writerVersion *AppVersion, rowGroupOrdinal, columnOrdinal int16, fileDecryptor encryption.FileDecryptor) (*ColumnChunkMetaData, error) |
// CryptoMetadata returns the cryptographic metadata for how this column was
// encrypted and how to decrypt it.
func (c *ColumnChunkMetaData) CryptoMetadata() *format.ColumnCryptoMetaData {
return c.column.GetCryptoMetadata()
}
// FileOffset is the location in the file where the column data begins
func (c *ColumnChunkMetaData) FileOffset() int64 { return c.column.FileOffset }
// FilePath gives the name of the parquet file if provided in the metadata
func (c *ColumnChunkMetaData) FilePath() string { return c.column.GetFilePath() }
// Type is the physical storage type used in the parquet file for this column chunk.
func (c *ColumnChunkMetaData) Type() parquet.Type { return parquet.Type(c.columnMeta.Type) }
// NumValues is the number of values stored in just this chunk including nulls.
func (c *ColumnChunkMetaData) NumValues() int64 { return c.columnMeta.NumValues }
// PathInSchema is the full path to this column from the root of the schema including
// any nested columns
func (c *ColumnChunkMetaData) PathInSchema() parquet.ColumnPath {
return c.columnMeta.GetPathInSchema()
}
// Compression provides the type of compression used for this particular chunk.
func (c *ColumnChunkMetaData) Compression() compress.Compression {
return compress.Compression(c.columnMeta.Codec)
}
// Encodings returns the list of different encodings used in this chunk
func (c *ColumnChunkMetaData) Encodings() []parquet.Encoding { return c.encodings }
// EncodingStats connects the order of encodings based on the list of pages and types
func (c *ColumnChunkMetaData) EncodingStats() []PageEncodingStats {
ret := make([]PageEncodingStats, len(c.encodingStats))
for idx := range ret {
ret[idx].Encoding = parquet.Encoding(c.encodingStats[idx].Encoding)
ret[idx].PageType = c.encodingStats[idx].PageType
}
return ret
}
// HasDictionaryPage returns true if there is a dictionary page offset set in
// this metadata.
func (c *ColumnChunkMetaData) HasDictionaryPage() bool {
return c.columnMeta.IsSetDictionaryPageOffset()
}
// DictionaryPageOffset returns the location in the file where the dictionary page starts
func (c *ColumnChunkMetaData) DictionaryPageOffset() int64 {
return c.columnMeta.GetDictionaryPageOffset()
}
// DataPageOffset returns the location in the file where the data pages begin for this column
func (c *ColumnChunkMetaData) DataPageOffset() int64 { return c.columnMeta.GetDataPageOffset() }
// HasIndexPage returns true if the offset for the index page is set in the metadata
func (c *ColumnChunkMetaData) HasIndexPage() bool { return c.columnMeta.IsSetIndexPageOffset() }
// IndexPageOffset is the location in the file where the index page starts.
func (c *ColumnChunkMetaData) IndexPageOffset() int64 { return c.columnMeta.GetIndexPageOffset() }
// TotalCompressedSize will be equal to TotalUncompressedSize if the data is not compressed.
// Otherwise this will be the size of the actual data in the file.
func (c *ColumnChunkMetaData) TotalCompressedSize() int64 {
return c.columnMeta.GetTotalCompressedSize()
}
// TotalUncompressedSize is the total size of the raw data after uncompressing the chunk
func (c *ColumnChunkMetaData) TotalUncompressedSize() int64 {
return c.columnMeta.GetTotalUncompressedSize()
}
// BloomFilterOffset is the byte offset from the beginning of the file to the bloom
// filter data.
func (c *ColumnChunkMetaData) BloomFilterOffset() int64 {
return c.columnMeta.GetBloomFilterOffset()
}
// StatsSet returns true only if there are statistics set in the metadata and the column
// descriptor has a sort order that is not SortUnknown
//
// It also checks the writer version to ensure that it was not written by a version
// of parquet which is known to have incorrect stat computations.
func (c *ColumnChunkMetaData) StatsSet() (bool, error) {
if !c.columnMeta.IsSetStatistics() || c.descr.SortOrder() == schema.SortUNKNOWN {
return false, nil
}
if c.possibleStats == nil {
c.possibleStats = makeColumnStats(c.columnMeta, c.descr, c.mem)
}
encoded, err := c.possibleStats.Encode()
if err != nil {
return false, err
}
return c.writerVersion.HasCorrectStatistics(c.Type(), c.descr.LogicalType(), encoded, c.descr.SortOrder()), nil
}
func (c *ColumnChunkMetaData) Equals(other *ColumnChunkMetaData) bool {
return reflect.DeepEqual(c.columnMeta, other.columnMeta)
}
// Statistics can return nil if there are no stats in this metadata
func (c *ColumnChunkMetaData) Statistics() (TypedStatistics, error) {
ok, err := c.StatsSet()
if err != nil {
return nil, err
}
if ok {
return c.possibleStats, nil
}
return nil, nil
}
// ColumnChunkMetaDataBuilder is used during writing to construct metadata
// for a given column chunk while writing, providing a proxy around constructing
// the actual thrift object.
type ColumnChunkMetaDataBuilder struct {
chunk *format.ColumnChunk
props *parquet.WriterProperties
column *schema.Column
compressedSize int64
}
func NewColumnChunkMetaDataBuilder(props *parquet.WriterProperties, column *schema.Column) *ColumnChunkMetaDataBuilder {
return NewColumnChunkMetaDataBuilderWithContents(props, column, format.NewColumnChunk())
}
// NewColumnChunkMetaDataBuilderWithContents will construct a builder and start it with the provided
// column chunk information rather than with an empty column chunk.
func NewColumnChunkMetaDataBuilderWithContents(props *parquet.WriterProperties, column *schema.Column, chunk *format.ColumnChunk) *ColumnChunkMetaDataBuilder {
b := &ColumnChunkMetaDataBuilder{
props: props,
column: column,
chunk: chunk,
}
b.init(chunk)
return b
}
// Contents returns the underlying thrift ColumnChunk object so that it can be used
// for constructing or duplicating column metadata
func (c *ColumnChunkMetaDataBuilder) Contents() *format.ColumnChunk { return c.chunk }
func (c *ColumnChunkMetaDataBuilder) init(chunk *format.ColumnChunk) {
c.chunk = chunk
if !c.chunk.IsSetMetaData() {
c.chunk.MetaData = format.NewColumnMetaData()
}
c.chunk.MetaData.Type = format.Type(c.column.PhysicalType())
c.chunk.MetaData.PathInSchema = schema.ColumnPathFromNode(c.column.SchemaNode())
c.chunk.MetaData.Codec = format.CompressionCodec(c.props.CompressionFor(c.column.Path()))
}
func (c *ColumnChunkMetaDataBuilder) SetFilePath(val string) {
c.chunk.FilePath = &val
}
// Descr returns the associated column descriptor for this column chunk
func (c *ColumnChunkMetaDataBuilder) Descr() *schema.Column { return c.column }
func (c *ColumnChunkMetaDataBuilder) TotalCompressedSize() int64 {
// if this column is encrypted, after Finish is called, the MetaData
// field is set to nil and we store the compressed size so return that
if c.chunk.MetaData == nil {
return c.compressedSize
}
return c.chunk.MetaData.GetTotalCompressedSize()
}
func (c *ColumnChunkMetaDataBuilder) SetStats(val EncodedStatistics) {
c.chunk.MetaData.Statistics = val.ToThrift()
}
// ChunkMetaInfo is a helper struct for passing the offset and size information
// for finishing the building of column chunk metadata
type ChunkMetaInfo struct {
NumValues int64
DictPageOffset int64
IndexPageOffset int64
DataPageOffset int64
CompressedSize int64
UncompressedSize int64
}
// EncodingStats is a helper struct for passing the encoding stat information
// for finishing up metadata for a column chunk.
type EncodingStats struct {
DictEncodingStats map[parquet.Encoding]int32
DataEncodingStats map[parquet.Encoding]int32
}
// Finish finalizes the metadata with the given offsets,
// flushes any compression that needs to be done, and performs
// any encryption if an encryptor is provided.
func (c *ColumnChunkMetaDataBuilder) Finish(info ChunkMetaInfo, hasDict, dictFallback bool, encStats EncodingStats, metaEncryptor encryption.Encryptor) error {
if info.DictPageOffset > 0 {
c.chunk.MetaData.DictionaryPageOffset = &info.DictPageOffset
c.chunk.FileOffset = info.DictPageOffset + info.CompressedSize
} else {
c.chunk.FileOffset = info.DataPageOffset + info.CompressedSize
}
c.chunk.MetaData.NumValues = info.NumValues
if info.IndexPageOffset >= 0 {
c.chunk.MetaData.IndexPageOffset = &info.IndexPageOffset
}
c.chunk.MetaData.DataPageOffset = info.DataPageOffset
c.chunk.MetaData.TotalUncompressedSize = info.UncompressedSize
c.chunk.MetaData.TotalCompressedSize = info.CompressedSize
// no matter the configuration, the maximum number of thrift encodings we'll
// populate is going to be 3:
// 1. potential dictionary index encoding
// 2. page encoding
// 3. RLE for repetition and definition levels
// so let's preallocate a capacity of 3 but initialize the slice at 0 len
const maxEncodings = 3
thriftEncodings := make([]format.Encoding, 0, maxEncodings)
if hasDict {
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.DictionaryIndexEncoding()))
if c.props.Version() == parquet.V1_0 {
thriftEncodings = append(thriftEncodings, format.Encoding_PLAIN)
} else {
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.DictionaryPageEncoding()))
}
} else { // no dictionary
thriftEncodings = append(thriftEncodings, format.Encoding(c.props.EncodingFor(c.column.Path())))
}
thriftEncodings = append(thriftEncodings, format.Encoding(parquet.Encodings.RLE))
// Only PLAIN encoding is supported for fallback in V1
// TODO(zeroshade): Use user specified encoding for V2
if dictFallback {
thriftEncodings = append(thriftEncodings, format.Encoding_PLAIN)
}
c.chunk.MetaData.Encodings = thriftEncodings
thriftEncodingStats := make([]*format.PageEncodingStats, 0, len(encStats.DictEncodingStats)+len(encStats.DataEncodingStats))
for k, v := range encStats.DictEncodingStats {
thriftEncodingStats = append(thriftEncodingStats, &format.PageEncodingStats{
PageType: format.PageType_DICTIONARY_PAGE,
Encoding: format.Encoding(k),
Count: v,
})
}
for k, v := range encStats.DataEncodingStats {
thriftEncodingStats = append(thriftEncodingStats, &format.PageEncodingStats{
PageType: format.PageType_DATA_PAGE,
Encoding: format.Encoding(k),
Count: v,
})
}
c.chunk.MetaData.EncodingStats = thriftEncodingStats
encryptProps := c.props.ColumnEncryptionProperties(c.column.Path())
if encryptProps != nil && encryptProps.IsEncrypted() {
ccmd := format.NewColumnCryptoMetaData()
if encryptProps.IsEncryptedWithFooterKey() {
ccmd.ENCRYPTION_WITH_FOOTER_KEY = format.NewEncryptionWithFooterKey()
} else {
ccmd.ENCRYPTION_WITH_COLUMN_KEY = &format.EncryptionWithColumnKey{
KeyMetadata: []byte(encryptProps.KeyMetadata()),
PathInSchema: c.column.ColumnPath(),
}
}
c.chunk.CryptoMetadata = ccmd
encryptedFooter := c.props.FileEncryptionProperties().EncryptedFooter()
encryptMetadata := !encryptedFooter || !encryptProps.IsEncryptedWithFooterKey()
if encryptMetadata {
// Serialize and encrypt ColumnMetadata separately
// Thrift-serialize the ColumnMetaData structure,
// encrypt it with the column key, and write to encrypted_column_metadata
serializer := thrift.NewThriftSerializer()
data, err := serializer.Write(context.Background(), c.chunk.MetaData)
if err != nil {
return err
}
var buf bytes.Buffer
metaEncryptor.Encrypt(&buf, data)
c.chunk.EncryptedColumnMetadata = buf.Bytes()
if encryptedFooter {
c.compressedSize = c.chunk.MetaData.GetTotalCompressedSize()
c.chunk.MetaData = nil
} else {
// Keep redacted metadata version for old readers
c.chunk.MetaData.Statistics = nil
c.chunk.MetaData.EncodingStats = nil
}
}
}
return nil
}
// WriteTo will always return 0 as the int64 since the thrift writer library
// does not return the number of bytes written, we only use the signature
// of (int64, error) in order to match the standard WriteTo interfaces.
func (c *ColumnChunkMetaDataBuilder) WriteTo(w io.Writer) (int64, error) {
return 0, thrift.SerializeThriftStream(c.chunk, w)
}
| {
c := &ColumnChunkMetaData{
column: column,
columnMeta: column.GetMetaData(),
descr: descr,
writerVersion: writerVersion,
mem: memory.DefaultAllocator,
}
if column.IsSetCryptoMetadata() {
ccmd := column.CryptoMetadata
if ccmd.IsSetENCRYPTION_WITH_COLUMN_KEY() {
if fileDecryptor != nil && fileDecryptor.Properties() != nil {
// should decrypt metadata
path := parquet.ColumnPath(ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetPathInSchema())
keyMetadata := ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetKeyMetadata()
aadColumnMetadata := encryption.CreateModuleAad(fileDecryptor.FileAad(), encryption.ColumnMetaModule, rowGroupOrdinal, columnOrdinal, -1)
decryptor := fileDecryptor.GetColumnMetaDecryptor(path.String(), string(keyMetadata), aadColumnMetadata)
thrift.DeserializeThrift(&c.decryptedMeta, decryptor.Decrypt(column.GetEncryptedColumnMetadata()))
c.columnMeta = &c.decryptedMeta
} else {
return nil, xerrors.New("cannot decrypt column metadata. file decryption not setup correctly")
}
}
}
for _, enc := range c.columnMeta.Encodings {
c.encodings = append(c.encodings, parquet.Encoding(enc))
}
for _, enc := range c.columnMeta.EncodingStats {
c.encodingStats = append(c.encodingStats, *enc)
}
return c, nil
} | identifier_body |
store.go | // Copyright 2019 Matt Layher
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wgipamtest
import (
"fmt"
"net"
"sort"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/mdlayher/wgipam"
)
// Populated fixtures for use in tests.
var (
okLease = &wgipam.Lease{
IPs: []*net.IPNet{mustCIDR("192.0.2.0/32"), mustCIDR("2001:db8::/128")},
Start: time.Unix(1, 0),
Length: 10 * time.Second,
}
okSubnet4 = mustCIDR("192.0.2.0/24")
okIP4 = mustCIDR("192.0.2.1/32")
okSubnet6 = mustCIDR("2001:db8::/64")
okIP6 = mustCIDR("2001:db8::1/128")
)
// MakeStore is a function which produces a new wgipam.Store on each
// invocation. The Store should be completely empty when created.
type MakeStore func(t *testing.T) wgipam.Store
// TestStore tests a wgipam.Store type for compliance with the
// interface. The MakeStore function is invoked to retrieve a new and empty
// Store for each subtest.
func TestStore(t *testing.T, ms MakeStore) {
t.Helper()
tests := []struct {
name string
fn func(t *testing.T, s wgipam.Store)
}{
{
name: "leases empty",
fn: testLeasesEmpty,
},
{
name: "leases OK",
fn: testLeasesOK,
},
{
name: "lease not exist",
fn: testLeaseNotExist,
},
{
name: "save lease OK",
fn: testSaveLeaseOK,
},
{
name: "delete lease OK",
fn: testDeleteLeaseOK,
},
{
name: "purge OK",
fn: testPurgeOK,
},
{
name: "subnets empty",
fn: testSubnetsEmpty,
},
{
name: "subnets OK",
fn: testSubnetsOK,
},
{
name: "allocated IPs no subnet",
fn: testAllocatedIPsNoSubnet,
},
{
name: "allocate IP mismatched subnet",
fn: testAllocateIPMismatchedSubnet,
},
{
name: "allocate IP no subnet",
fn: testAllocateIPNoSubnet,
},
{
name: "allocate IP already allocated",
fn: testAllocateIPAlreadyAllocated,
},
{
name: "free IP mismatched subnet",
fn: testFreeIPMismatchedSubnet,
},
{
name: "free IP no subnet",
fn: testFreeIPNoSubnet,
},
{
name: "free IP OK",
fn: testFreeIPOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ms(t)
defer s.Close()
tt.fn(t, s)
})
}
}
func testLeasesEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
leases, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
if diff := cmp.Diff(0, len(leases)); diff != "" {
t.Fatalf("unexpected number of leases (-want +got):\n%s", diff)
}
}
func testLeasesOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic leases to be fetched again later.
for i := 0; i < 3; i++ {
if err := s.SaveLease(uint64(i), okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
want := []*wgipam.Lease{
okLease, okLease, okLease,
}
sort.SliceStable(want, func(i, j int) bool {
return want[i].Start.Before(want[j].Start)
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].Start.Before(got[j].Start)
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
}
func testLeaseNotExist(t *testing.T, s wgipam.Store) {
t.Helper()
l, ok, err := s.Lease(1)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("found a lease when none was expected")
}
if l != nil {
t.Fatal("returned non-nil lease when not found")
}
}
func testSaveLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
l, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if !ok {
t.Fatal("expected a lease but one was not found")
}
if diff := cmp.Diff(okLease, l); diff != "" {
t.Fatalf("unexpected Lease (-want +got):\n%s", diff)
}
}
func testDeleteLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
// Repeated deletions should be idempotent.
for i := 0; i < 3; i++ {
if err := s.DeleteLease(key); err != nil {
t.Fatalf("failed to delete lease: %v", err)
}
}
_, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("expected no lease but one was found")
}
}
func testPurgeOK(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
ipa, err := wgipam.DualStackIPAllocator(s, []wgipam.Subnet{
{Subnet: *okSubnet4},
{Subnet: *okSubnet6},
})
if err != nil {
t.Fatalf("failed to create IP allocator: %v", err)
}
// Leases start every 100 seconds and last 10 seconds.
const (
start = 100
length = 10
)
var want *wgipam.Lease
for i := 0; i < 3; i++ {
ips, ok, err := ipa.Allocate(wgipam.DualStack)
if err != nil {
t.Fatalf("failed to allocate IPs: %v", err)
}
if !ok {
t.Fatal("ran out of IP addresses")
}
// Create leases which start at regular intervals.
l := &wgipam.Lease{
IPs: ips,
Start: time.Unix((int64(i)+1)*start, 0),
Length: length * time.Second,
}
if i == 2 {
// Track final lease for later comparison.
want = l
}
if err := s.SaveLease(uint64(i), l); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
// Purge only some of the leases by selecting a time that matches the
// expiration time of the second lease.
purge := time.Unix(2*start+length, 0)
// Repeated purges with the same time should be idempotent.
for i := 0; i < 3; i++ {
stats, err := s.Purge(purge)
if err != nil {
t.Fatalf("failed to purge leases: %v", err)
}
// Expect addresses to be freed on the first iteration only.
var wantFreed int
if i == 0 {
wantFreed = 2
}
for k, v := range stats.FreedIPs {
if diff := cmp.Diff(wantFreed, v); diff != "" {
t.Fatalf("unexpected number of freed IPs for subnet %s (-want +got):\n%s", k, diff)
}
}
}
// Expect only one lease to remain.
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if diff := cmp.Diff([]*wgipam.Lease{want}, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
ip4s, err := s.AllocatedIPs(okSubnet4)
if err != nil {
t.Fatalf("failed to get allocated IPv4s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[0]}, ip4s); diff != "" {
t.Fatalf("unexpected remaining IPv4 allocation (-want +got):\n%s", diff)
}
ip6s, err := s.AllocatedIPs(okSubnet6)
if err != nil {
t.Fatalf("failed to get allocated IPv6s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[1]}, ip6s); diff != "" {
t.Fatalf("unexpected remaining IPv6 allocation (-want +got):\n%s", diff)
}
}
func testSubnetsEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
subnets, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", err)
}
if diff := cmp.Diff(0, len(subnets)); diff != "" {
t.Fatalf("unexpected number of subnets (-want +got):\n%s", diff)
}
}
func testSubnetsOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic subnets to be fetched again later.
want := []*net.IPNet{okSubnet4, okSubnet6}
for _, sub := range want {
if err := s.SaveSubnet(sub); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
}
got, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
sort.SliceStable(want, func(i, j int) bool {
return want[i].String() < want[j].String()
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].String() < got[j].String()
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Subnets (-want +got):\n%s", diff)
}
}
func testAllocatedIPsNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if _, err := s.AllocatedIPs(okSubnet4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func | (t *testing.T, s wgipam.Store) {
t.Helper()
// An IPv6 address cannot possibly reside in an IPv4 subnet.
if _, err := s.AllocateIP(okSubnet4, okIP6); err == nil {
t.Fatal("expected mismatched subnet error, but none occurred")
}
}
func testAllocateIPNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if _, err := s.AllocateIP(okSubnet4, okIP4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testAllocateIPAlreadyAllocated(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
// First call succeeds, second cannot.
ok, err := s.AllocateIP(okSubnet4, okIP4)
if err != nil || !ok {
t.Fatalf("failed to allocate IP: ok: %v, err: %v", ok, err)
}
ok, err = s.AllocateIP(okSubnet4, okIP4)
if err != nil || ok {
t.Fatalf("expected IP already allocated: ok: %v, err: %v", ok, err)
}
}
func testFreeIPMismatchedSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
// An IPv6 address cannot possibly reside in an IPv4 subnet.
if err := s.FreeIP(okSubnet4, okIP6); err == nil {
t.Fatal("expected mismatched subnet error, but none occurred")
}
}
func testFreeIPNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.FreeIP(okSubnet4, okIP4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testFreeIPOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Allocate IP addresses in multiple subnets and ensure they can also
// be freed idempotently.
pairs := [][2]*net.IPNet{
{okSubnet4, okIP4},
{okSubnet6, okIP6},
}
for _, p := range pairs {
if err := s.SaveSubnet(p[0]); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
if ok, err := s.AllocateIP(p[0], p[1]); err != nil || !ok {
t.Fatalf("failed to allocate IP: ok: %v, err: %v", ok, err)
}
// Repeated frees should be idempotent.
for i := 0; i < 3; i++ {
if err := s.FreeIP(p[0], p[1]); err != nil {
t.Fatalf("failed to free IP: %v", err)
}
}
}
}
func mustCIDR(s string) *net.IPNet {
_, ipn, err := net.ParseCIDR(s)
if err != nil {
panicf("failed to parse CIDR: %v", err)
}
return ipn
}
func panicf(format string, a ...interface{}) {
panic(fmt.Sprintf(format, a...))
}
| testAllocateIPMismatchedSubnet | identifier_name |
store.go | // Copyright 2019 Matt Layher
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wgipamtest
import (
"fmt"
"net"
"sort"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/mdlayher/wgipam"
)
// Populated fixtures for use in tests.
var (
okLease = &wgipam.Lease{
IPs: []*net.IPNet{mustCIDR("192.0.2.0/32"), mustCIDR("2001:db8::/128")},
Start: time.Unix(1, 0),
Length: 10 * time.Second,
}
okSubnet4 = mustCIDR("192.0.2.0/24")
okIP4 = mustCIDR("192.0.2.1/32")
okSubnet6 = mustCIDR("2001:db8::/64")
okIP6 = mustCIDR("2001:db8::1/128")
)
// MakeStore is a function which produces a new wgipam.Store on each
// invocation. The Store should be completely empty when created.
type MakeStore func(t *testing.T) wgipam.Store
// TestStore tests a wgipam.Store type for compliance with the
// interface. The MakeStore function is invoked to retrieve a new and empty
// Store for each subtest.
func TestStore(t *testing.T, ms MakeStore) {
t.Helper()
tests := []struct {
name string
fn func(t *testing.T, s wgipam.Store)
}{
{
name: "leases empty",
fn: testLeasesEmpty,
},
{
name: "leases OK",
fn: testLeasesOK,
},
{
name: "lease not exist",
fn: testLeaseNotExist,
},
{
name: "save lease OK",
fn: testSaveLeaseOK,
},
{
name: "delete lease OK",
fn: testDeleteLeaseOK,
},
{
name: "purge OK",
fn: testPurgeOK,
},
{
name: "subnets empty",
fn: testSubnetsEmpty,
},
{
name: "subnets OK",
fn: testSubnetsOK,
},
{
name: "allocated IPs no subnet",
fn: testAllocatedIPsNoSubnet,
},
{
name: "allocate IP mismatched subnet",
fn: testAllocateIPMismatchedSubnet,
},
{
name: "allocate IP no subnet",
fn: testAllocateIPNoSubnet,
},
{
name: "allocate IP already allocated",
fn: testAllocateIPAlreadyAllocated,
},
{
name: "free IP mismatched subnet",
fn: testFreeIPMismatchedSubnet,
},
{
name: "free IP no subnet",
fn: testFreeIPNoSubnet,
},
{
name: "free IP OK",
fn: testFreeIPOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ms(t)
defer s.Close()
tt.fn(t, s)
})
}
}
func testLeasesEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
leases, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
if diff := cmp.Diff(0, len(leases)); diff != "" {
t.Fatalf("unexpected number of leases (-want +got):\n%s", diff)
}
}
func testLeasesOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic leases to be fetched again later.
for i := 0; i < 3; i++ {
if err := s.SaveLease(uint64(i), okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
want := []*wgipam.Lease{
okLease, okLease, okLease,
}
sort.SliceStable(want, func(i, j int) bool {
return want[i].Start.Before(want[j].Start)
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].Start.Before(got[j].Start)
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
}
func testLeaseNotExist(t *testing.T, s wgipam.Store) {
t.Helper()
l, ok, err := s.Lease(1)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("found a lease when none was expected")
}
if l != nil {
t.Fatal("returned non-nil lease when not found")
}
}
func testSaveLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
l, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if !ok {
t.Fatal("expected a lease but one was not found")
}
if diff := cmp.Diff(okLease, l); diff != "" {
t.Fatalf("unexpected Lease (-want +got):\n%s", diff)
}
}
func testDeleteLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
// Repeated deletions should be idempotent.
for i := 0; i < 3; i++ {
if err := s.DeleteLease(key); err != nil {
t.Fatalf("failed to delete lease: %v", err)
}
}
_, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("expected no lease but one was found")
}
}
func testPurgeOK(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
ipa, err := wgipam.DualStackIPAllocator(s, []wgipam.Subnet{
{Subnet: *okSubnet4},
{Subnet: *okSubnet6},
})
if err != nil {
t.Fatalf("failed to create IP allocator: %v", err)
}
// Leases start every 100 seconds and last 10 seconds.
const (
start = 100
length = 10
)
var want *wgipam.Lease
for i := 0; i < 3; i++ {
ips, ok, err := ipa.Allocate(wgipam.DualStack)
if err != nil {
t.Fatalf("failed to allocate IPs: %v", err)
}
if !ok {
t.Fatal("ran out of IP addresses")
}
// Create leases which start at regular intervals.
l := &wgipam.Lease{
IPs: ips,
Start: time.Unix((int64(i)+1)*start, 0),
Length: length * time.Second,
}
if i == 2 {
// Track final lease for later comparison.
want = l
}
if err := s.SaveLease(uint64(i), l); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
// Purge only some of the leases by selecting a time that matches the
// expiration time of the second lease.
purge := time.Unix(2*start+length, 0)
// Repeated purges with the same time should be idempotent.
for i := 0; i < 3; i++ {
stats, err := s.Purge(purge)
if err != nil {
t.Fatalf("failed to purge leases: %v", err)
}
// Expect addresses to be freed on the first iteration only.
var wantFreed int
if i == 0 {
wantFreed = 2
}
for k, v := range stats.FreedIPs {
if diff := cmp.Diff(wantFreed, v); diff != "" {
t.Fatalf("unexpected number of freed IPs for subnet %s (-want +got):\n%s", k, diff)
}
}
}
// Expect only one lease to remain.
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if diff := cmp.Diff([]*wgipam.Lease{want}, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
ip4s, err := s.AllocatedIPs(okSubnet4)
if err != nil |
if diff := cmp.Diff([]*net.IPNet{want.IPs[0]}, ip4s); diff != "" {
t.Fatalf("unexpected remaining IPv4 allocation (-want +got):\n%s", diff)
}
ip6s, err := s.AllocatedIPs(okSubnet6)
if err != nil {
t.Fatalf("failed to get allocated IPv6s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[1]}, ip6s); diff != "" {
t.Fatalf("unexpected remaining IPv6 allocation (-want +got):\n%s", diff)
}
}
func testSubnetsEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
subnets, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", err)
}
if diff := cmp.Diff(0, len(subnets)); diff != "" {
t.Fatalf("unexpected number of subnets (-want +got):\n%s", diff)
}
}
func testSubnetsOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic subnets to be fetched again later.
want := []*net.IPNet{okSubnet4, okSubnet6}
for _, sub := range want {
if err := s.SaveSubnet(sub); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
}
got, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
sort.SliceStable(want, func(i, j int) bool {
return want[i].String() < want[j].String()
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].String() < got[j].String()
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Subnets (-want +got):\n%s", diff)
}
}
func testAllocatedIPsNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if _, err := s.AllocatedIPs(okSubnet4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testAllocateIPMismatchedSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
// An IPv6 address cannot possibly reside in an IPv4 subnet.
if _, err := s.AllocateIP(okSubnet4, okIP6); err == nil {
t.Fatal("expected mismatched subnet error, but none occurred")
}
}
func testAllocateIPNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if _, err := s.AllocateIP(okSubnet4, okIP4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testAllocateIPAlreadyAllocated(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
// First call succeeds, second cannot.
ok, err := s.AllocateIP(okSubnet4, okIP4)
if err != nil || !ok {
t.Fatalf("failed to allocate IP: ok: %v, err: %v", ok, err)
}
ok, err = s.AllocateIP(okSubnet4, okIP4)
if err != nil || ok {
t.Fatalf("expected IP already allocated: ok: %v, err: %v", ok, err)
}
}
func testFreeIPMismatchedSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
// An IPv6 address cannot possibly reside in an IPv4 subnet.
if err := s.FreeIP(okSubnet4, okIP6); err == nil {
t.Fatal("expected mismatched subnet error, but none occurred")
}
}
func testFreeIPNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.FreeIP(okSubnet4, okIP4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testFreeIPOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Allocate IP addresses in multiple subnets and ensure they can also
// be freed idempotently.
pairs := [][2]*net.IPNet{
{okSubnet4, okIP4},
{okSubnet6, okIP6},
}
for _, p := range pairs {
if err := s.SaveSubnet(p[0]); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
if ok, err := s.AllocateIP(p[0], p[1]); err != nil || !ok {
t.Fatalf("failed to allocate IP: ok: %v, err: %v", ok, err)
}
// Repeated frees should be idempotent.
for i := 0; i < 3; i++ {
if err := s.FreeIP(p[0], p[1]); err != nil {
t.Fatalf("failed to free IP: %v", err)
}
}
}
}
func mustCIDR(s string) *net.IPNet {
_, ipn, err := net.ParseCIDR(s)
if err != nil {
panicf("failed to parse CIDR: %v", err)
}
return ipn
}
func panicf(format string, a ...interface{}) {
panic(fmt.Sprintf(format, a...))
}
| {
t.Fatalf("failed to get allocated IPv4s: %v", err)
} | conditional_block |
store.go | // Copyright 2019 Matt Layher
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wgipamtest
import (
"fmt"
"net"
"sort"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/mdlayher/wgipam"
)
// Populated fixtures for use in tests.
var (
okLease = &wgipam.Lease{
IPs: []*net.IPNet{mustCIDR("192.0.2.0/32"), mustCIDR("2001:db8::/128")},
Start: time.Unix(1, 0),
Length: 10 * time.Second,
}
okSubnet4 = mustCIDR("192.0.2.0/24")
okIP4 = mustCIDR("192.0.2.1/32")
okSubnet6 = mustCIDR("2001:db8::/64")
okIP6 = mustCIDR("2001:db8::1/128")
)
// MakeStore is a function which produces a new wgipam.Store on each
// invocation. The Store should be completely empty when created.
type MakeStore func(t *testing.T) wgipam.Store
// TestStore tests a wgipam.Store type for compliance with the
// interface. The MakeStore function is invoked to retrieve a new and empty
// Store for each subtest.
func TestStore(t *testing.T, ms MakeStore) {
t.Helper()
tests := []struct {
name string
fn func(t *testing.T, s wgipam.Store)
}{
{
name: "leases empty",
fn: testLeasesEmpty,
},
{
name: "leases OK",
fn: testLeasesOK,
},
{
name: "lease not exist",
fn: testLeaseNotExist,
},
{
name: "save lease OK",
fn: testSaveLeaseOK,
},
{
name: "delete lease OK",
fn: testDeleteLeaseOK,
},
{
name: "purge OK",
fn: testPurgeOK,
},
{
name: "subnets empty",
fn: testSubnetsEmpty,
},
{
name: "subnets OK",
fn: testSubnetsOK,
},
{
name: "allocated IPs no subnet",
fn: testAllocatedIPsNoSubnet,
},
{
name: "allocate IP mismatched subnet",
fn: testAllocateIPMismatchedSubnet,
},
{
name: "allocate IP no subnet",
fn: testAllocateIPNoSubnet,
},
{
name: "allocate IP already allocated",
fn: testAllocateIPAlreadyAllocated,
},
{
name: "free IP mismatched subnet",
fn: testFreeIPMismatchedSubnet,
},
{
name: "free IP no subnet",
fn: testFreeIPNoSubnet,
},
{
name: "free IP OK",
fn: testFreeIPOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ms(t)
defer s.Close()
tt.fn(t, s)
})
}
}
func testLeasesEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
leases, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
if diff := cmp.Diff(0, len(leases)); diff != "" {
t.Fatalf("unexpected number of leases (-want +got):\n%s", diff)
}
}
func testLeasesOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic leases to be fetched again later.
for i := 0; i < 3; i++ {
if err := s.SaveLease(uint64(i), okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
want := []*wgipam.Lease{
okLease, okLease, okLease,
}
sort.SliceStable(want, func(i, j int) bool {
return want[i].Start.Before(want[j].Start)
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].Start.Before(got[j].Start)
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
}
func testLeaseNotExist(t *testing.T, s wgipam.Store) {
t.Helper()
l, ok, err := s.Lease(1)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("found a lease when none was expected")
}
if l != nil {
t.Fatal("returned non-nil lease when not found")
}
}
func testSaveLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
l, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if !ok {
t.Fatal("expected a lease but one was not found")
}
if diff := cmp.Diff(okLease, l); diff != "" {
t.Fatalf("unexpected Lease (-want +got):\n%s", diff)
}
}
func testDeleteLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
// Repeated deletions should be idempotent.
for i := 0; i < 3; i++ {
if err := s.DeleteLease(key); err != nil {
t.Fatalf("failed to delete lease: %v", err)
}
}
_, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("expected no lease but one was found")
}
}
func testPurgeOK(t *testing.T, s wgipam.Store) |
func testSubnetsEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
subnets, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", err)
}
if diff := cmp.Diff(0, len(subnets)); diff != "" {
t.Fatalf("unexpected number of subnets (-want +got):\n%s", diff)
}
}
func testSubnetsOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic subnets to be fetched again later.
want := []*net.IPNet{okSubnet4, okSubnet6}
for _, sub := range want {
if err := s.SaveSubnet(sub); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
}
got, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
sort.SliceStable(want, func(i, j int) bool {
return want[i].String() < want[j].String()
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].String() < got[j].String()
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Subnets (-want +got):\n%s", diff)
}
}
func testAllocatedIPsNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if _, err := s.AllocatedIPs(okSubnet4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testAllocateIPMismatchedSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
// An IPv6 address cannot possibly reside in an IPv4 subnet.
if _, err := s.AllocateIP(okSubnet4, okIP6); err == nil {
t.Fatal("expected mismatched subnet error, but none occurred")
}
}
func testAllocateIPNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if _, err := s.AllocateIP(okSubnet4, okIP4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testAllocateIPAlreadyAllocated(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
// First call succeeds, second cannot.
ok, err := s.AllocateIP(okSubnet4, okIP4)
if err != nil || !ok {
t.Fatalf("failed to allocate IP: ok: %v, err: %v", ok, err)
}
ok, err = s.AllocateIP(okSubnet4, okIP4)
if err != nil || ok {
t.Fatalf("expected IP already allocated: ok: %v, err: %v", ok, err)
}
}
func testFreeIPMismatchedSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
// An IPv6 address cannot possibly reside in an IPv4 subnet.
if err := s.FreeIP(okSubnet4, okIP6); err == nil {
t.Fatal("expected mismatched subnet error, but none occurred")
}
}
func testFreeIPNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.FreeIP(okSubnet4, okIP4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testFreeIPOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Allocate IP addresses in multiple subnets and ensure they can also
// be freed idempotently.
pairs := [][2]*net.IPNet{
{okSubnet4, okIP4},
{okSubnet6, okIP6},
}
for _, p := range pairs {
if err := s.SaveSubnet(p[0]); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
if ok, err := s.AllocateIP(p[0], p[1]); err != nil || !ok {
t.Fatalf("failed to allocate IP: ok: %v, err: %v", ok, err)
}
// Repeated frees should be idempotent.
for i := 0; i < 3; i++ {
if err := s.FreeIP(p[0], p[1]); err != nil {
t.Fatalf("failed to free IP: %v", err)
}
}
}
}
func mustCIDR(s string) *net.IPNet {
_, ipn, err := net.ParseCIDR(s)
if err != nil {
panicf("failed to parse CIDR: %v", err)
}
return ipn
}
func panicf(format string, a ...interface{}) {
panic(fmt.Sprintf(format, a...))
}
| {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
ipa, err := wgipam.DualStackIPAllocator(s, []wgipam.Subnet{
{Subnet: *okSubnet4},
{Subnet: *okSubnet6},
})
if err != nil {
t.Fatalf("failed to create IP allocator: %v", err)
}
// Leases start every 100 seconds and last 10 seconds.
const (
start = 100
length = 10
)
var want *wgipam.Lease
for i := 0; i < 3; i++ {
ips, ok, err := ipa.Allocate(wgipam.DualStack)
if err != nil {
t.Fatalf("failed to allocate IPs: %v", err)
}
if !ok {
t.Fatal("ran out of IP addresses")
}
// Create leases which start at regular intervals.
l := &wgipam.Lease{
IPs: ips,
Start: time.Unix((int64(i)+1)*start, 0),
Length: length * time.Second,
}
if i == 2 {
// Track final lease for later comparison.
want = l
}
if err := s.SaveLease(uint64(i), l); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
// Purge only some of the leases by selecting a time that matches the
// expiration time of the second lease.
purge := time.Unix(2*start+length, 0)
// Repeated purges with the same time should be idempotent.
for i := 0; i < 3; i++ {
stats, err := s.Purge(purge)
if err != nil {
t.Fatalf("failed to purge leases: %v", err)
}
// Expect addresses to be freed on the first iteration only.
var wantFreed int
if i == 0 {
wantFreed = 2
}
for k, v := range stats.FreedIPs {
if diff := cmp.Diff(wantFreed, v); diff != "" {
t.Fatalf("unexpected number of freed IPs for subnet %s (-want +got):\n%s", k, diff)
}
}
}
// Expect only one lease to remain.
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if diff := cmp.Diff([]*wgipam.Lease{want}, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
ip4s, err := s.AllocatedIPs(okSubnet4)
if err != nil {
t.Fatalf("failed to get allocated IPv4s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[0]}, ip4s); diff != "" {
t.Fatalf("unexpected remaining IPv4 allocation (-want +got):\n%s", diff)
}
ip6s, err := s.AllocatedIPs(okSubnet6)
if err != nil {
t.Fatalf("failed to get allocated IPv6s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[1]}, ip6s); diff != "" {
t.Fatalf("unexpected remaining IPv6 allocation (-want +got):\n%s", diff)
}
} | identifier_body |
store.go | // Copyright 2019 Matt Layher
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wgipamtest
import (
"fmt"
"net"
"sort"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/mdlayher/wgipam"
)
// Populated fixtures for use in tests.
var (
okLease = &wgipam.Lease{
IPs: []*net.IPNet{mustCIDR("192.0.2.0/32"), mustCIDR("2001:db8::/128")},
Start: time.Unix(1, 0),
Length: 10 * time.Second,
}
okSubnet4 = mustCIDR("192.0.2.0/24")
okIP4 = mustCIDR("192.0.2.1/32")
okSubnet6 = mustCIDR("2001:db8::/64")
okIP6 = mustCIDR("2001:db8::1/128")
)
// MakeStore is a function which produces a new wgipam.Store on each
// invocation. The Store should be completely empty when created.
type MakeStore func(t *testing.T) wgipam.Store
// TestStore tests a wgipam.Store type for compliance with the
// interface. The MakeStore function is invoked to retrieve a new and empty
// Store for each subtest.
func TestStore(t *testing.T, ms MakeStore) {
t.Helper()
tests := []struct {
name string
fn func(t *testing.T, s wgipam.Store)
}{
{
name: "leases empty",
fn: testLeasesEmpty,
},
{
name: "leases OK",
fn: testLeasesOK,
},
{
name: "lease not exist",
fn: testLeaseNotExist,
},
{
name: "save lease OK",
fn: testSaveLeaseOK,
},
{
name: "delete lease OK",
fn: testDeleteLeaseOK,
},
{
name: "purge OK", | },
{
name: "subnets OK",
fn: testSubnetsOK,
},
{
name: "allocated IPs no subnet",
fn: testAllocatedIPsNoSubnet,
},
{
name: "allocate IP mismatched subnet",
fn: testAllocateIPMismatchedSubnet,
},
{
name: "allocate IP no subnet",
fn: testAllocateIPNoSubnet,
},
{
name: "allocate IP already allocated",
fn: testAllocateIPAlreadyAllocated,
},
{
name: "free IP mismatched subnet",
fn: testFreeIPMismatchedSubnet,
},
{
name: "free IP no subnet",
fn: testFreeIPNoSubnet,
},
{
name: "free IP OK",
fn: testFreeIPOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ms(t)
defer s.Close()
tt.fn(t, s)
})
}
}
func testLeasesEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
leases, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
if diff := cmp.Diff(0, len(leases)); diff != "" {
t.Fatalf("unexpected number of leases (-want +got):\n%s", diff)
}
}
func testLeasesOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic leases to be fetched again later.
for i := 0; i < 3; i++ {
if err := s.SaveLease(uint64(i), okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
want := []*wgipam.Lease{
okLease, okLease, okLease,
}
sort.SliceStable(want, func(i, j int) bool {
return want[i].Start.Before(want[j].Start)
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].Start.Before(got[j].Start)
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
}
func testLeaseNotExist(t *testing.T, s wgipam.Store) {
t.Helper()
l, ok, err := s.Lease(1)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("found a lease when none was expected")
}
if l != nil {
t.Fatal("returned non-nil lease when not found")
}
}
func testSaveLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
l, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if !ok {
t.Fatal("expected a lease but one was not found")
}
if diff := cmp.Diff(okLease, l); diff != "" {
t.Fatalf("unexpected Lease (-want +got):\n%s", diff)
}
}
func testDeleteLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
// Repeated deletions should be idempotent.
for i := 0; i < 3; i++ {
if err := s.DeleteLease(key); err != nil {
t.Fatalf("failed to delete lease: %v", err)
}
}
_, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("expected no lease but one was found")
}
}
func testPurgeOK(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
ipa, err := wgipam.DualStackIPAllocator(s, []wgipam.Subnet{
{Subnet: *okSubnet4},
{Subnet: *okSubnet6},
})
if err != nil {
t.Fatalf("failed to create IP allocator: %v", err)
}
// Leases start every 100 seconds and last 10 seconds.
const (
start = 100
length = 10
)
var want *wgipam.Lease
for i := 0; i < 3; i++ {
ips, ok, err := ipa.Allocate(wgipam.DualStack)
if err != nil {
t.Fatalf("failed to allocate IPs: %v", err)
}
if !ok {
t.Fatal("ran out of IP addresses")
}
// Create leases which start at regular intervals.
l := &wgipam.Lease{
IPs: ips,
Start: time.Unix((int64(i)+1)*start, 0),
Length: length * time.Second,
}
if i == 2 {
// Track final lease for later comparison.
want = l
}
if err := s.SaveLease(uint64(i), l); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
// Purge only some of the leases by selecting a time that matches the
// expiration time of the second lease.
purge := time.Unix(2*start+length, 0)
// Repeated purges with the same time should be idempotent.
for i := 0; i < 3; i++ {
stats, err := s.Purge(purge)
if err != nil {
t.Fatalf("failed to purge leases: %v", err)
}
// Expect addresses to be freed on the first iteration only.
var wantFreed int
if i == 0 {
wantFreed = 2
}
for k, v := range stats.FreedIPs {
if diff := cmp.Diff(wantFreed, v); diff != "" {
t.Fatalf("unexpected number of freed IPs for subnet %s (-want +got):\n%s", k, diff)
}
}
}
// Expect only one lease to remain.
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if diff := cmp.Diff([]*wgipam.Lease{want}, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
ip4s, err := s.AllocatedIPs(okSubnet4)
if err != nil {
t.Fatalf("failed to get allocated IPv4s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[0]}, ip4s); diff != "" {
t.Fatalf("unexpected remaining IPv4 allocation (-want +got):\n%s", diff)
}
ip6s, err := s.AllocatedIPs(okSubnet6)
if err != nil {
t.Fatalf("failed to get allocated IPv6s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[1]}, ip6s); diff != "" {
t.Fatalf("unexpected remaining IPv6 allocation (-want +got):\n%s", diff)
}
}
func testSubnetsEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
subnets, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", err)
}
if diff := cmp.Diff(0, len(subnets)); diff != "" {
t.Fatalf("unexpected number of subnets (-want +got):\n%s", diff)
}
}
func testSubnetsOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic subnets to be fetched again later.
want := []*net.IPNet{okSubnet4, okSubnet6}
for _, sub := range want {
if err := s.SaveSubnet(sub); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
}
got, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
sort.SliceStable(want, func(i, j int) bool {
return want[i].String() < want[j].String()
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].String() < got[j].String()
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Subnets (-want +got):\n%s", diff)
}
}
func testAllocatedIPsNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if _, err := s.AllocatedIPs(okSubnet4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testAllocateIPMismatchedSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
// An IPv6 address cannot possibly reside in an IPv4 subnet.
if _, err := s.AllocateIP(okSubnet4, okIP6); err == nil {
t.Fatal("expected mismatched subnet error, but none occurred")
}
}
func testAllocateIPNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if _, err := s.AllocateIP(okSubnet4, okIP4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testAllocateIPAlreadyAllocated(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
// First call succeeds, second cannot.
ok, err := s.AllocateIP(okSubnet4, okIP4)
if err != nil || !ok {
t.Fatalf("failed to allocate IP: ok: %v, err: %v", ok, err)
}
ok, err = s.AllocateIP(okSubnet4, okIP4)
if err != nil || ok {
t.Fatalf("expected IP already allocated: ok: %v, err: %v", ok, err)
}
}
func testFreeIPMismatchedSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
// An IPv6 address cannot possibly reside in an IPv4 subnet.
if err := s.FreeIP(okSubnet4, okIP6); err == nil {
t.Fatal("expected mismatched subnet error, but none occurred")
}
}
func testFreeIPNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.FreeIP(okSubnet4, okIP4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func testFreeIPOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Allocate IP addresses in multiple subnets and ensure they can also
// be freed idempotently.
pairs := [][2]*net.IPNet{
{okSubnet4, okIP4},
{okSubnet6, okIP6},
}
for _, p := range pairs {
if err := s.SaveSubnet(p[0]); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
if ok, err := s.AllocateIP(p[0], p[1]); err != nil || !ok {
t.Fatalf("failed to allocate IP: ok: %v, err: %v", ok, err)
}
// Repeated frees should be idempotent.
for i := 0; i < 3; i++ {
if err := s.FreeIP(p[0], p[1]); err != nil {
t.Fatalf("failed to free IP: %v", err)
}
}
}
}
func mustCIDR(s string) *net.IPNet {
_, ipn, err := net.ParseCIDR(s)
if err != nil {
panicf("failed to parse CIDR: %v", err)
}
return ipn
}
func panicf(format string, a ...interface{}) {
panic(fmt.Sprintf(format, a...))
} | fn: testPurgeOK,
},
{
name: "subnets empty",
fn: testSubnetsEmpty, | random_line_split |
chat_template.js | 'use strict';
const util = require('util');
const EventEmitter = require('events').EventEmitter;
const WebSocket = require('ws');
const ircRegex = /^(?:@([^ ]+) )?(?:[:](\S+) )?(\S+)(?: (?!:)(.+?))?(?: [:](.+))?$/;
const tagsRegex = /([^=;]+)=([^;]*)/g;
const badgesRegex = /([^,\/]+)\/([^,]*)/g;
const emotesRegex = /([^\/]+):([^\/]*)/g;
const emoteIndexRegex = /([^,]+)-([^,]*)/g;
const actionRegex = /^\u0001ACTION (.*)\u0001$/g;
// can't do a username.tmi.twitch.tv since the latter part of the host could change at any point
// course this is just a relately standard IRC parser anyway.
// but this will trip a ReDoS scanner since >= 10
// A Twitch username is up to 25 letters, we'll leave some wiggle room
const hostRegex = /([a-z_0-9]{1,30})!([a-z_0-9]{1,30})@([a-z._0-9]{1,60})/;
class ChatBot extends EventEmitter {
constructor(opts) {
super();
this.reconnect = true;
this.ws = null;
this.pinger = {
clock: false,
start: () => {
if (this.pinger.clock) {
clearInterval(this.pinger.clock);
}
this.pinger.sendPing();
this.pinger.clock = setInterval(() => {
setTimeout(() => {
this.pinger.sendPing();
//jitter
}, Math.floor((Math.random() * 1000) + 1));
}, (4 * 60 * 1000));
// at least ever 5 minutes
},
sendPing: () => {
try {
this.ws.send('PING');
this.pinger.awaitPong();
} catch (e) {
console.log(e);
this.ws.close();
}
},
pingtimeout: false,
awaitPong: () => {
this.pinger.pingtimeout = setTimeout(() => {
//console.log('WS Pong Timeout');
this.ws.close();
}, 10000)
},
gotPong: () => {
clearTimeout(this.pinger.pingtimeout);
}
}
}
connect() {
console.log('init');
this.ws = new WebSocket('wss://irc-ws.chat.twitch.tv');
this.ws.onmessage = this._onMessage.bind(this);
this.ws.onerror = this._onError.bind(this);
this.ws.onclose = this._onClose.bind(this);
this.ws.onopen = this._onOpen.bind(this);
}
_reconnect() {
this.ws = null;
this.connect();
}
_onError() {
console.log('Got Error');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onClose() {
console.log('Got Close');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onOpen() {
// pinger
this.pinger.start();
this.ws.send('CAP REQ :twitch.tv/commands');
this.ws.send('CAP REQ :twitch.tv/tags');
this.emit('open');
}
_onMessage(event) {
let message = event.data.toString().trim().split(/\r?\n/);
// uncomment this line to log all inbounc messages
//console.log(message);
for (var x=0;x<message.length;x++) {
// the last line is empty
if (message[x].length == 0) {
return;
}
let payload = {
tags: {},
command: false,
message: '',
raw: message[x]
}
const data = ircRegex.exec(message[x].trim());
if (data === null) {
console.error(`Couldnt parse message '${message[x]}'`);
return;
}
// items
// 0 is unparsed message
// 1 ircV3 tags
// 2 tmi.twitch.tv
// 3 COMMAND
// 4 Room
// 5 rest/message
// 0 ignore
// 1 tags
let tagdata = data[1] ? data[1] : false;
if (tagdata) {
let m;
do {
m = tagsRegex.exec(tagdata);
if (m) {
// unparsed, a, b
const [, key, val] = m;
// interrupts
switch (key) {
case 'badges':
case 'badge-info':
payload.tags[key] = {};
let b;
do {
b = badgesRegex.exec(val);
if (b) {
const [, badge, tier] = b;
payload.tags[key][badge] = tier;
}
} while (b);
break;
case 'emotes':
payload.tags[key] = {};
let e;
do {
e = emotesRegex.exec(val);
if (e) {
const [, emoteID, indices] = e;
// and split again
let em;
do {
em = emoteIndexRegex.exec(indices);
if (em) {
const [, startIndex, endIndex] = em;
// arrays!
if (!payload.tags[key][emoteID]) {
payload.tags[key][emoteID] = new Array();
}
payload.tags[key][emoteID].push({
startIndex,
endIndex
});
}
} while (em);
}
} while (e);
break;
default:
payload.tags[key] = val.replace(/\\s/g, ' ').trim();// for \s (space)
}
}
} while (m);
// Javascript magic helper
for (let key in payload.tags) {
let new_key = key.replace(/-/g, '_');
payload.tags[new_key] = payload.tags[key];
// optionally nailed the bad keys with `-` in the name
if (new_key != key)
delete payload.tags[key];
}
}
// 2 host
let host = hostRegex.exec(data[2]);
payload.user = false;
if (host != null) {
payload.user = host[1];
}
// 3 command
payload.command = data[3];
// 4 room
payload.room = data[4];
// 5 message
payload.message = data[5];
payload.action = false;
// check for action
const actionCheck = actionRegex.exec(payload.message);
if (actionCheck != null) {
// it's an action
payload.action = true;
payload.message = actionCheck[1];
}
// https://tools.ietf.org/html/rfc1459
// commands the template needs to reply
switch (payload.command) {
case 'PING':
// Twitch sent a "R U STILL THERE?"
this.ws.send('PONG :' + payload.message);
case 'PONG':
this.pinger.gotPong();
break;
}
switch (payload.command) {
case '001':
case '002':
case '003':
case '004':
// do nothing
break;
case 'CAP':
this.emit('CAP ACK', payload.raw);
break;
case '372':
case '375':
case '376':
// motd
this.emit('MOTD', payload.raw);
break;
case '353':
case '366':
// names
break;
case 'PING':
case 'PONG':
case 'JOIN':
// You joined a room
case 'PART':
// as the result of a PART command
// you left a room
case 'GLOBALUSERSTATE':
// You connected to the server
// here is some info about the user
case 'USERSTATE':
// Often sent when you send a PRIVMSG to a room
case 'ROOMSTATE':
// You joined a room here is the intial state (followers only etc)
// The Room state was changed, on change only sends what changed, not the whole settings blob
case 'WHISPER':
// you received a whisper, good luck replying!
case 'PRIVMSG':
// heres where the magic happens
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('bits')) {
// it's a cheer message
// but it's also a privmsg
this.emit(
'cheer',
payload
);
}
}
case 'USERNOTICE':
// see https://dev.twitch.tv/docs/irc/tags#usernotice-twitch-tags
// An "Twitch event" occured, like a subscription or raid
if (payload.hasOwnProperty('tags')) |
case 'NOTICE':
// General notices about Twitch/rooms you are in
// https://dev.twitch.tv/docs/irc/commands#notice-twitch-commands
// moderationy stuff
case 'CLEARCHAT':
// A users message is to be removed
// as the result of a ban or timeout
case 'CLEARMSG':
// a single users message was deleted
case 'HOSTTARGET':
// the room you are in, is now hosting someone or has ended the host
this.emit(
payload.command,
payload
);
this.emit(
payload.command.toLowerCase(),
payload
);
break;
case 'RECONNECT':
// The server you are connected to is restarted
// you should restart the bot and reconnect
// close the socket and let the close handler grab it
this.ws.close();
break;
default:
console.log('No Process', payload.command, payload);
}
}
}
login = function(username, user_token, rooms) {
this.ws.send(`PASS oauth:${user_token}`);
this.ws.send(`NICK ${username}`);
if (typeof rooms == 'undefined') {
rooms = [];
} else if (typeof rooms == 'string') {
rooms = [rooms];
}
// could also concat joins....
for (let x=0;x<rooms.length;x++) {
if (!rooms[x].startsWith('#')) {
rooms[x] = `#${rooms[x]}`;
}
}
this.join(rooms);
}
join = function(rooms) {
this.ws.send(`JOIN ${rooms.join(',')}`);
}
send = function(room, message) {
if (!room.startsWith('#')) {
room = '#'+room;
}
console.log('>' + `PRIVMSG ${room} :${message}`);
this.ws.send(`PRIVMSG ${room} :${message}`);
}
reply = function(room, id, message) {
console.log(`@reply-parent-msg-id=${id} PRIVMSG ${room} :${message}`);
this.ws.send(`@reply-parent-msg-id=${id} PRIVMSG ${room} :${message}`);
}
close = function() {
try {
this.ws.close();
} catch (err) {
console.log(err);
}
}
}
module.exports = ChatBot;
| {
if (payload.tags.hasOwnProperty('msg-id')) {
this.emit(
`usernotice_${payload.tags['msg-id']}`,
payload
);
}
} | conditional_block |
chat_template.js | 'use strict';
const util = require('util');
const EventEmitter = require('events').EventEmitter;
const WebSocket = require('ws');
const ircRegex = /^(?:@([^ ]+) )?(?:[:](\S+) )?(\S+)(?: (?!:)(.+?))?(?: [:](.+))?$/;
const tagsRegex = /([^=;]+)=([^;]*)/g;
const badgesRegex = /([^,\/]+)\/([^,]*)/g;
const emotesRegex = /([^\/]+):([^\/]*)/g;
const emoteIndexRegex = /([^,]+)-([^,]*)/g;
const actionRegex = /^\u0001ACTION (.*)\u0001$/g;
// can't do a username.tmi.twitch.tv since the latter part of the host could change at any point
// course this is just a relately standard IRC parser anyway.
// but this will trip a ReDoS scanner since >= 10
// A Twitch username is up to 25 letters, we'll leave some wiggle room
const hostRegex = /([a-z_0-9]{1,30})!([a-z_0-9]{1,30})@([a-z._0-9]{1,60})/;
class ChatBot extends EventEmitter {
constructor(opts) {
super();
this.reconnect = true;
this.ws = null;
this.pinger = {
clock: false,
start: () => {
if (this.pinger.clock) {
clearInterval(this.pinger.clock);
}
this.pinger.sendPing();
this.pinger.clock = setInterval(() => {
setTimeout(() => {
this.pinger.sendPing();
//jitter
}, Math.floor((Math.random() * 1000) + 1));
}, (4 * 60 * 1000));
// at least ever 5 minutes
},
sendPing: () => {
try {
this.ws.send('PING');
this.pinger.awaitPong();
} catch (e) {
console.log(e);
this.ws.close();
}
},
pingtimeout: false,
awaitPong: () => {
this.pinger.pingtimeout = setTimeout(() => {
//console.log('WS Pong Timeout');
this.ws.close();
}, 10000)
},
gotPong: () => {
clearTimeout(this.pinger.pingtimeout);
}
}
}
connect() {
console.log('init');
this.ws = new WebSocket('wss://irc-ws.chat.twitch.tv');
this.ws.onmessage = this._onMessage.bind(this);
this.ws.onerror = this._onError.bind(this);
this.ws.onclose = this._onClose.bind(this);
this.ws.onopen = this._onOpen.bind(this);
}
_reconnect() {
this.ws = null;
this.connect();
}
_onError() {
console.log('Got Error');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onClose() {
console.log('Got Close');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onOpen() {
// pinger
this.pinger.start();
this.ws.send('CAP REQ :twitch.tv/commands');
this.ws.send('CAP REQ :twitch.tv/tags');
this.emit('open');
}
_onMessage(event) {
let message = event.data.toString().trim().split(/\r?\n/);
// uncomment this line to log all inbounc messages
//console.log(message);
for (var x=0;x<message.length;x++) {
// the last line is empty
if (message[x].length == 0) {
return;
}
let payload = {
tags: {},
command: false,
message: '',
raw: message[x]
}
const data = ircRegex.exec(message[x].trim());
if (data === null) {
console.error(`Couldnt parse message '${message[x]}'`);
return;
}
// items
// 0 is unparsed message
// 1 ircV3 tags
// 2 tmi.twitch.tv
// 3 COMMAND
// 4 Room
// 5 rest/message
// 0 ignore
// 1 tags
let tagdata = data[1] ? data[1] : false;
if (tagdata) {
let m;
do {
m = tagsRegex.exec(tagdata);
if (m) {
// unparsed, a, b
const [, key, val] = m;
// interrupts
switch (key) {
case 'badges':
case 'badge-info':
payload.tags[key] = {};
let b;
do {
b = badgesRegex.exec(val);
if (b) {
const [, badge, tier] = b;
payload.tags[key][badge] = tier;
}
} while (b);
break;
case 'emotes':
payload.tags[key] = {};
let e;
do {
e = emotesRegex.exec(val);
if (e) {
const [, emoteID, indices] = e;
// and split again
let em;
do {
em = emoteIndexRegex.exec(indices);
if (em) {
const [, startIndex, endIndex] = em;
// arrays!
if (!payload.tags[key][emoteID]) {
payload.tags[key][emoteID] = new Array();
}
payload.tags[key][emoteID].push({
startIndex,
endIndex
});
}
} while (em);
}
} while (e);
break;
default:
payload.tags[key] = val.replace(/\\s/g, ' ').trim();// for \s (space)
}
}
} while (m);
// Javascript magic helper
for (let key in payload.tags) {
let new_key = key.replace(/-/g, '_');
payload.tags[new_key] = payload.tags[key];
// optionally nailed the bad keys with `-` in the name
if (new_key != key)
delete payload.tags[key];
}
}
// 2 host
let host = hostRegex.exec(data[2]);
payload.user = false;
if (host != null) {
payload.user = host[1];
}
// 3 command
payload.command = data[3];
// 4 room
payload.room = data[4];
// 5 message
payload.message = data[5];
payload.action = false;
// check for action
const actionCheck = actionRegex.exec(payload.message);
if (actionCheck != null) {
// it's an action
payload.action = true;
payload.message = actionCheck[1];
}
// https://tools.ietf.org/html/rfc1459
// commands the template needs to reply
switch (payload.command) {
case 'PING':
// Twitch sent a "R U STILL THERE?"
this.ws.send('PONG :' + payload.message);
case 'PONG':
this.pinger.gotPong();
break;
}
switch (payload.command) {
case '001':
case '002':
case '003':
case '004':
// do nothing
break;
case 'CAP':
this.emit('CAP ACK', payload.raw);
break;
case '372':
case '375':
case '376':
// motd
this.emit('MOTD', payload.raw);
break;
case '353':
case '366':
// names
break;
case 'PING':
case 'PONG':
case 'JOIN':
// You joined a room
case 'PART':
// as the result of a PART command
// you left a room
case 'GLOBALUSERSTATE':
// You connected to the server
// here is some info about the user
case 'USERSTATE':
// Often sent when you send a PRIVMSG to a room
case 'ROOMSTATE':
// You joined a room here is the intial state (followers only etc)
// The Room state was changed, on change only sends what changed, not the whole settings blob
case 'WHISPER':
// you received a whisper, good luck replying!
case 'PRIVMSG':
// heres where the magic happens
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('bits')) {
// it's a cheer message
// but it's also a privmsg
this.emit(
'cheer',
payload
);
}
}
case 'USERNOTICE':
// see https://dev.twitch.tv/docs/irc/tags#usernotice-twitch-tags
// An "Twitch event" occured, like a subscription or raid
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('msg-id')) {
this.emit(
`usernotice_${payload.tags['msg-id']}`,
payload
);
}
}
case 'NOTICE':
// General notices about Twitch/rooms you are in
// https://dev.twitch.tv/docs/irc/commands#notice-twitch-commands
// moderationy stuff
case 'CLEARCHAT':
// A users message is to be removed
// as the result of a ban or timeout
case 'CLEARMSG':
// a single users message was deleted
case 'HOSTTARGET':
// the room you are in, is now hosting someone or has ended the host
this.emit(
payload.command,
payload
);
this.emit(
payload.command.toLowerCase(),
payload
);
break;
case 'RECONNECT':
// The server you are connected to is restarted
// you should restart the bot and reconnect
|
default:
console.log('No Process', payload.command, payload);
}
}
}
login = function(username, user_token, rooms) {
this.ws.send(`PASS oauth:${user_token}`);
this.ws.send(`NICK ${username}`);
if (typeof rooms == 'undefined') {
rooms = [];
} else if (typeof rooms == 'string') {
rooms = [rooms];
}
// could also concat joins....
for (let x=0;x<rooms.length;x++) {
if (!rooms[x].startsWith('#')) {
rooms[x] = `#${rooms[x]}`;
}
}
this.join(rooms);
}
join = function(rooms) {
this.ws.send(`JOIN ${rooms.join(',')}`);
}
send = function(room, message) {
if (!room.startsWith('#')) {
room = '#'+room;
}
console.log('>' + `PRIVMSG ${room} :${message}`);
this.ws.send(`PRIVMSG ${room} :${message}`);
}
reply = function(room, id, message) {
console.log(`@reply-parent-msg-id=${id} PRIVMSG ${room} :${message}`);
this.ws.send(`@reply-parent-msg-id=${id} PRIVMSG ${room} :${message}`);
}
close = function() {
try {
this.ws.close();
} catch (err) {
console.log(err);
}
}
}
module.exports = ChatBot; | // close the socket and let the close handler grab it
this.ws.close();
break; | random_line_split |
chat_template.js | 'use strict';
const util = require('util');
const EventEmitter = require('events').EventEmitter;
const WebSocket = require('ws');
const ircRegex = /^(?:@([^ ]+) )?(?:[:](\S+) )?(\S+)(?: (?!:)(.+?))?(?: [:](.+))?$/;
const tagsRegex = /([^=;]+)=([^;]*)/g;
const badgesRegex = /([^,\/]+)\/([^,]*)/g;
const emotesRegex = /([^\/]+):([^\/]*)/g;
const emoteIndexRegex = /([^,]+)-([^,]*)/g;
const actionRegex = /^\u0001ACTION (.*)\u0001$/g;
// can't do a username.tmi.twitch.tv since the latter part of the host could change at any point
// course this is just a relately standard IRC parser anyway.
// but this will trip a ReDoS scanner since >= 10
// A Twitch username is up to 25 letters, we'll leave some wiggle room
const hostRegex = /([a-z_0-9]{1,30})!([a-z_0-9]{1,30})@([a-z._0-9]{1,60})/;
class ChatBot extends EventEmitter {
constructor(opts) {
super();
this.reconnect = true;
this.ws = null;
this.pinger = {
clock: false,
start: () => {
if (this.pinger.clock) {
clearInterval(this.pinger.clock);
}
this.pinger.sendPing();
this.pinger.clock = setInterval(() => {
setTimeout(() => {
this.pinger.sendPing();
//jitter
}, Math.floor((Math.random() * 1000) + 1));
}, (4 * 60 * 1000));
// at least ever 5 minutes
},
sendPing: () => {
try {
this.ws.send('PING');
this.pinger.awaitPong();
} catch (e) {
console.log(e);
this.ws.close();
}
},
pingtimeout: false,
awaitPong: () => {
this.pinger.pingtimeout = setTimeout(() => {
//console.log('WS Pong Timeout');
this.ws.close();
}, 10000)
},
gotPong: () => {
clearTimeout(this.pinger.pingtimeout);
}
}
}
connect() {
console.log('init');
this.ws = new WebSocket('wss://irc-ws.chat.twitch.tv');
this.ws.onmessage = this._onMessage.bind(this);
this.ws.onerror = this._onError.bind(this);
this.ws.onclose = this._onClose.bind(this);
this.ws.onopen = this._onOpen.bind(this);
}
_reconnect() {
this.ws = null;
this.connect();
}
| () {
console.log('Got Error');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onClose() {
console.log('Got Close');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onOpen() {
// pinger
this.pinger.start();
this.ws.send('CAP REQ :twitch.tv/commands');
this.ws.send('CAP REQ :twitch.tv/tags');
this.emit('open');
}
_onMessage(event) {
let message = event.data.toString().trim().split(/\r?\n/);
// uncomment this line to log all inbounc messages
//console.log(message);
for (var x=0;x<message.length;x++) {
// the last line is empty
if (message[x].length == 0) {
return;
}
let payload = {
tags: {},
command: false,
message: '',
raw: message[x]
}
const data = ircRegex.exec(message[x].trim());
if (data === null) {
console.error(`Couldnt parse message '${message[x]}'`);
return;
}
// items
// 0 is unparsed message
// 1 ircV3 tags
// 2 tmi.twitch.tv
// 3 COMMAND
// 4 Room
// 5 rest/message
// 0 ignore
// 1 tags
let tagdata = data[1] ? data[1] : false;
if (tagdata) {
let m;
do {
m = tagsRegex.exec(tagdata);
if (m) {
// unparsed, a, b
const [, key, val] = m;
// interrupts
switch (key) {
case 'badges':
case 'badge-info':
payload.tags[key] = {};
let b;
do {
b = badgesRegex.exec(val);
if (b) {
const [, badge, tier] = b;
payload.tags[key][badge] = tier;
}
} while (b);
break;
case 'emotes':
payload.tags[key] = {};
let e;
do {
e = emotesRegex.exec(val);
if (e) {
const [, emoteID, indices] = e;
// and split again
let em;
do {
em = emoteIndexRegex.exec(indices);
if (em) {
const [, startIndex, endIndex] = em;
// arrays!
if (!payload.tags[key][emoteID]) {
payload.tags[key][emoteID] = new Array();
}
payload.tags[key][emoteID].push({
startIndex,
endIndex
});
}
} while (em);
}
} while (e);
break;
default:
payload.tags[key] = val.replace(/\\s/g, ' ').trim();// for \s (space)
}
}
} while (m);
// Javascript magic helper
for (let key in payload.tags) {
let new_key = key.replace(/-/g, '_');
payload.tags[new_key] = payload.tags[key];
// optionally nailed the bad keys with `-` in the name
if (new_key != key)
delete payload.tags[key];
}
}
// 2 host
let host = hostRegex.exec(data[2]);
payload.user = false;
if (host != null) {
payload.user = host[1];
}
// 3 command
payload.command = data[3];
// 4 room
payload.room = data[4];
// 5 message
payload.message = data[5];
payload.action = false;
// check for action
const actionCheck = actionRegex.exec(payload.message);
if (actionCheck != null) {
// it's an action
payload.action = true;
payload.message = actionCheck[1];
}
// https://tools.ietf.org/html/rfc1459
// commands the template needs to reply
switch (payload.command) {
case 'PING':
// Twitch sent a "R U STILL THERE?"
this.ws.send('PONG :' + payload.message);
case 'PONG':
this.pinger.gotPong();
break;
}
switch (payload.command) {
case '001':
case '002':
case '003':
case '004':
// do nothing
break;
case 'CAP':
this.emit('CAP ACK', payload.raw);
break;
case '372':
case '375':
case '376':
// motd
this.emit('MOTD', payload.raw);
break;
case '353':
case '366':
// names
break;
case 'PING':
case 'PONG':
case 'JOIN':
// You joined a room
case 'PART':
// as the result of a PART command
// you left a room
case 'GLOBALUSERSTATE':
// You connected to the server
// here is some info about the user
case 'USERSTATE':
// Often sent when you send a PRIVMSG to a room
case 'ROOMSTATE':
// You joined a room here is the intial state (followers only etc)
// The Room state was changed, on change only sends what changed, not the whole settings blob
case 'WHISPER':
// you received a whisper, good luck replying!
case 'PRIVMSG':
// heres where the magic happens
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('bits')) {
// it's a cheer message
// but it's also a privmsg
this.emit(
'cheer',
payload
);
}
}
case 'USERNOTICE':
// see https://dev.twitch.tv/docs/irc/tags#usernotice-twitch-tags
// An "Twitch event" occured, like a subscription or raid
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('msg-id')) {
this.emit(
`usernotice_${payload.tags['msg-id']}`,
payload
);
}
}
case 'NOTICE':
// General notices about Twitch/rooms you are in
// https://dev.twitch.tv/docs/irc/commands#notice-twitch-commands
// moderationy stuff
case 'CLEARCHAT':
// A users message is to be removed
// as the result of a ban or timeout
case 'CLEARMSG':
// a single users message was deleted
case 'HOSTTARGET':
// the room you are in, is now hosting someone or has ended the host
this.emit(
payload.command,
payload
);
this.emit(
payload.command.toLowerCase(),
payload
);
break;
case 'RECONNECT':
// The server you are connected to is restarted
// you should restart the bot and reconnect
// close the socket and let the close handler grab it
this.ws.close();
break;
default:
console.log('No Process', payload.command, payload);
}
}
}
login = function(username, user_token, rooms) {
this.ws.send(`PASS oauth:${user_token}`);
this.ws.send(`NICK ${username}`);
if (typeof rooms == 'undefined') {
rooms = [];
} else if (typeof rooms == 'string') {
rooms = [rooms];
}
// could also concat joins....
for (let x=0;x<rooms.length;x++) {
if (!rooms[x].startsWith('#')) {
rooms[x] = `#${rooms[x]}`;
}
}
this.join(rooms);
}
join = function(rooms) {
this.ws.send(`JOIN ${rooms.join(',')}`);
}
send = function(room, message) {
if (!room.startsWith('#')) {
room = '#'+room;
}
console.log('>' + `PRIVMSG ${room} :${message}`);
this.ws.send(`PRIVMSG ${room} :${message}`);
}
reply = function(room, id, message) {
console.log(`@reply-parent-msg-id=${id} PRIVMSG ${room} :${message}`);
this.ws.send(`@reply-parent-msg-id=${id} PRIVMSG ${room} :${message}`);
}
close = function() {
try {
this.ws.close();
} catch (err) {
console.log(err);
}
}
}
module.exports = ChatBot;
| _onError | identifier_name |
chat_template.js | 'use strict';
const util = require('util');
const EventEmitter = require('events').EventEmitter;
const WebSocket = require('ws');
const ircRegex = /^(?:@([^ ]+) )?(?:[:](\S+) )?(\S+)(?: (?!:)(.+?))?(?: [:](.+))?$/;
const tagsRegex = /([^=;]+)=([^;]*)/g;
const badgesRegex = /([^,\/]+)\/([^,]*)/g;
const emotesRegex = /([^\/]+):([^\/]*)/g;
const emoteIndexRegex = /([^,]+)-([^,]*)/g;
const actionRegex = /^\u0001ACTION (.*)\u0001$/g;
// can't do a username.tmi.twitch.tv since the latter part of the host could change at any point
// course this is just a relately standard IRC parser anyway.
// but this will trip a ReDoS scanner since >= 10
// A Twitch username is up to 25 letters, we'll leave some wiggle room
const hostRegex = /([a-z_0-9]{1,30})!([a-z_0-9]{1,30})@([a-z._0-9]{1,60})/;
class ChatBot extends EventEmitter {
constructor(opts) {
super();
this.reconnect = true;
this.ws = null;
this.pinger = {
clock: false,
start: () => {
if (this.pinger.clock) {
clearInterval(this.pinger.clock);
}
this.pinger.sendPing();
this.pinger.clock = setInterval(() => {
setTimeout(() => {
this.pinger.sendPing();
//jitter
}, Math.floor((Math.random() * 1000) + 1));
}, (4 * 60 * 1000));
// at least ever 5 minutes
},
sendPing: () => {
try {
this.ws.send('PING');
this.pinger.awaitPong();
} catch (e) {
console.log(e);
this.ws.close();
}
},
pingtimeout: false,
awaitPong: () => {
this.pinger.pingtimeout = setTimeout(() => {
//console.log('WS Pong Timeout');
this.ws.close();
}, 10000)
},
gotPong: () => {
clearTimeout(this.pinger.pingtimeout);
}
}
}
connect() {
console.log('init');
this.ws = new WebSocket('wss://irc-ws.chat.twitch.tv');
this.ws.onmessage = this._onMessage.bind(this);
this.ws.onerror = this._onError.bind(this);
this.ws.onclose = this._onClose.bind(this);
this.ws.onopen = this._onOpen.bind(this);
}
_reconnect() {
this.ws = null;
this.connect();
}
_onError() {
console.log('Got Error');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onClose() {
console.log('Got Close');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onOpen() |
_onMessage(event) {
let message = event.data.toString().trim().split(/\r?\n/);
// uncomment this line to log all inbounc messages
//console.log(message);
for (var x=0;x<message.length;x++) {
// the last line is empty
if (message[x].length == 0) {
return;
}
let payload = {
tags: {},
command: false,
message: '',
raw: message[x]
}
const data = ircRegex.exec(message[x].trim());
if (data === null) {
console.error(`Couldnt parse message '${message[x]}'`);
return;
}
// items
// 0 is unparsed message
// 1 ircV3 tags
// 2 tmi.twitch.tv
// 3 COMMAND
// 4 Room
// 5 rest/message
// 0 ignore
// 1 tags
let tagdata = data[1] ? data[1] : false;
if (tagdata) {
let m;
do {
m = tagsRegex.exec(tagdata);
if (m) {
// unparsed, a, b
const [, key, val] = m;
// interrupts
switch (key) {
case 'badges':
case 'badge-info':
payload.tags[key] = {};
let b;
do {
b = badgesRegex.exec(val);
if (b) {
const [, badge, tier] = b;
payload.tags[key][badge] = tier;
}
} while (b);
break;
case 'emotes':
payload.tags[key] = {};
let e;
do {
e = emotesRegex.exec(val);
if (e) {
const [, emoteID, indices] = e;
// and split again
let em;
do {
em = emoteIndexRegex.exec(indices);
if (em) {
const [, startIndex, endIndex] = em;
// arrays!
if (!payload.tags[key][emoteID]) {
payload.tags[key][emoteID] = new Array();
}
payload.tags[key][emoteID].push({
startIndex,
endIndex
});
}
} while (em);
}
} while (e);
break;
default:
payload.tags[key] = val.replace(/\\s/g, ' ').trim();// for \s (space)
}
}
} while (m);
// Javascript magic helper
for (let key in payload.tags) {
let new_key = key.replace(/-/g, '_');
payload.tags[new_key] = payload.tags[key];
// optionally nailed the bad keys with `-` in the name
if (new_key != key)
delete payload.tags[key];
}
}
// 2 host
let host = hostRegex.exec(data[2]);
payload.user = false;
if (host != null) {
payload.user = host[1];
}
// 3 command
payload.command = data[3];
// 4 room
payload.room = data[4];
// 5 message
payload.message = data[5];
payload.action = false;
// check for action
const actionCheck = actionRegex.exec(payload.message);
if (actionCheck != null) {
// it's an action
payload.action = true;
payload.message = actionCheck[1];
}
// https://tools.ietf.org/html/rfc1459
// commands the template needs to reply
switch (payload.command) {
case 'PING':
// Twitch sent a "R U STILL THERE?"
this.ws.send('PONG :' + payload.message);
case 'PONG':
this.pinger.gotPong();
break;
}
switch (payload.command) {
case '001':
case '002':
case '003':
case '004':
// do nothing
break;
case 'CAP':
this.emit('CAP ACK', payload.raw);
break;
case '372':
case '375':
case '376':
// motd
this.emit('MOTD', payload.raw);
break;
case '353':
case '366':
// names
break;
case 'PING':
case 'PONG':
case 'JOIN':
// You joined a room
case 'PART':
// as the result of a PART command
// you left a room
case 'GLOBALUSERSTATE':
// You connected to the server
// here is some info about the user
case 'USERSTATE':
// Often sent when you send a PRIVMSG to a room
case 'ROOMSTATE':
// You joined a room here is the intial state (followers only etc)
// The Room state was changed, on change only sends what changed, not the whole settings blob
case 'WHISPER':
// you received a whisper, good luck replying!
case 'PRIVMSG':
// heres where the magic happens
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('bits')) {
// it's a cheer message
// but it's also a privmsg
this.emit(
'cheer',
payload
);
}
}
case 'USERNOTICE':
// see https://dev.twitch.tv/docs/irc/tags#usernotice-twitch-tags
// An "Twitch event" occured, like a subscription or raid
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('msg-id')) {
this.emit(
`usernotice_${payload.tags['msg-id']}`,
payload
);
}
}
case 'NOTICE':
// General notices about Twitch/rooms you are in
// https://dev.twitch.tv/docs/irc/commands#notice-twitch-commands
// moderationy stuff
case 'CLEARCHAT':
// A users message is to be removed
// as the result of a ban or timeout
case 'CLEARMSG':
// a single users message was deleted
case 'HOSTTARGET':
// the room you are in, is now hosting someone or has ended the host
this.emit(
payload.command,
payload
);
this.emit(
payload.command.toLowerCase(),
payload
);
break;
case 'RECONNECT':
// The server you are connected to is restarted
// you should restart the bot and reconnect
// close the socket and let the close handler grab it
this.ws.close();
break;
default:
console.log('No Process', payload.command, payload);
}
}
}
login = function(username, user_token, rooms) {
this.ws.send(`PASS oauth:${user_token}`);
this.ws.send(`NICK ${username}`);
if (typeof rooms == 'undefined') {
rooms = [];
} else if (typeof rooms == 'string') {
rooms = [rooms];
}
// could also concat joins....
for (let x=0;x<rooms.length;x++) {
if (!rooms[x].startsWith('#')) {
rooms[x] = `#${rooms[x]}`;
}
}
this.join(rooms);
}
join = function(rooms) {
this.ws.send(`JOIN ${rooms.join(',')}`);
}
send = function(room, message) {
if (!room.startsWith('#')) {
room = '#'+room;
}
console.log('>' + `PRIVMSG ${room} :${message}`);
this.ws.send(`PRIVMSG ${room} :${message}`);
}
reply = function(room, id, message) {
console.log(`@reply-parent-msg-id=${id} PRIVMSG ${room} :${message}`);
this.ws.send(`@reply-parent-msg-id=${id} PRIVMSG ${room} :${message}`);
}
close = function() {
try {
this.ws.close();
} catch (err) {
console.log(err);
}
}
}
module.exports = ChatBot;
| {
// pinger
this.pinger.start();
this.ws.send('CAP REQ :twitch.tv/commands');
this.ws.send('CAP REQ :twitch.tv/tags');
this.emit('open');
} | identifier_body |
download-ganglia-metrics.py | #!/usr/bin/env python
import os
import re
import json
import collections
from datetime import datetime
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import requests
import pandas as pd
import numpy as np
WORKFLOW_STEPS = [
'metaextract', 'metaconfig', 'imextract',
'corilla', 'illuminati', 'jterator'
]
RAW_METRICS = {
'bytes_out': {
'uri': 'm=bytes_out&vl=bytes%2Fsec&ti=Bytes%20Sent',
'unit': 'bytes per second',
'name': 'send data',
},
'bytes_in': {
'uri': 'm=bytes_in&vl=bytes%2Fsec&ti=Bytes%20Received',
'unit': 'bytes per second',
'name': 'received data',
},
'mem_free': {
'uri': 'm=mem_free&vl=KB&ti=Free%20Memory',
'unit': 'kilobytes',
'name': 'free memory'
},
'mem_total': {
'uri': 'm=mem_total&vl=KB&ti=Total%20Memory',
'unit': 'kilobytes',
'name': 'total memory'
},
'cpu_user': {
'uri': 'm=cpu_user&vl=%25&ti=CPU%20User',
'unit': 'percent',
'name': 'CPU user'
},
'cpu_system': {
'uri': 'm=cpu_system&vl=%25&ti=CPU%20System',
'unit': 'percent',
'name': 'CPU system'
},
'cpu_num': {
'uri': 'm=cpu_num&vl=%25&ti=CPU%20Number',
'unit': '',
'name': 'number of processors'
},
'load_one': {
'uri': 'm=load_one&vl=%20&ti=One%20Minute%20Load%20Average',
'unit': '',
'name': 'one minute load average'
}
}
FORMATTED_METRICS = {
'memory': {
'func': lambda m: (m['mem_total'] - m['mem_free']) / float(m['mem_total']) * 100,
'unit': 'percent',
'name': 'memory usage'
},
'cpu': {
'func': lambda m: m['cpu_user'] + m['cpu_system'],
'unit': 'percent',
'name': 'CPU usage'
},
'input': {
'func': lambda m: m['bytes_in'],
'unit': 'bytes per second',
'name': 'data input'
},
'output': {
'func': lambda m: m['bytes_out'],
'unit': 'bytes per second',
'name': 'data output'
}
}
def _get_number_of_processors(cluster):
match = re.search(r'^cluster-([0-9]+)$', cluster)
n = match.group(1)
# All cluster architectuers use nodes with 4 processors
return int(n)/4
def _get_compute_nodes(cluster):
n = _get_number_of_processors(cluster)
return tuple([
'{0}-slurm-worker-{1:03d}'.format(cluster, i+1) for i in range(n)
])
def _get_fs_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-glusterfs-server-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
def _get_db_coordinator_nodes(cluster):
return tuple(['{0}-postgresql-master-001'.format(cluster)])
def _get_db_worker_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-postgresql-worker-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
HOST_GROUPS = {
'compute': {
'name': 'compute',
'hosts': lambda cluster: _get_compute_nodes(cluster)
},
'fs': {
'name': 'filesystem',
'hosts': lambda cluster: _get_fs_nodes(cluster)
},
'db_coordinator': {
'name': 'database coordinator',
'hosts': lambda cluster: _get_db_coordinator_nodes(cluster)
},
'db_worker': {
'name': 'database worker',
'hosts': lambda cluster: _get_db_worker_nodes(cluster)
}
}
def download_raw_metrics(host, cluster, workflow_statistics):
base_uri = 'http://{address}/ganglia/graph.php?r=4hr&c={cluster}'.format(
address=host, cluster=cluster
)
ganglia_dt_format = '%m%%2F%d%%2F%Y+%H%%3A%M'
workflow_dt_format = '%Y-%m-%d %H:%M:%S'
data = dict()
for step in WORKFLOW_STEPS:
current_index = np.where(workflow_statistics['name'] == step)[0][0]
current_step_stats = workflow_statistics.loc[current_index, :]
first_task_index = np.where(
workflow_statistics['name'] == '{}_init'.format(step)
)[0][0]
first_task_stats = workflow_statistics.loc[first_task_index, :]
start = first_task_stats['updated_at'].split('.')[0]
start = datetime.strptime(start, workflow_dt_format)
end = current_step_stats['updated_at'].split('.')[0]
end = datetime.strptime(end, workflow_dt_format)
start_uri = 'cs={}'.format(start.strftime(ganglia_dt_format))
end_uri = 'ce={}'.format(end.strftime(ganglia_dt_format))
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
metric_uri = RAW_METRICS[metric]['uri']
tmp_data = list()
for node in HOST_GROUPS[group]['hosts'](cluster):
node_uri = 'h={}'.format(node)
url = '&'.join([
base_uri, node_uri, start_uri, end_uri, metric_uri,
'csv=1'
])
response = requests.get(url)
f = StringIO(response.content)
stats = pd.read_csv(f, header=0, index_col=0, names=[node])
tmp_data.append(stats)
data[step][group][metric] = pd.concat(tmp_data, axis=1)
return data
def format_raw_metrics(data, workflow_statistics):
formatted_data = dict()
for step in WORKFLOW_STEPS:
|
return formatted_data
def save_formatted_metrics(data, directory):
for step in WORKFLOW_STEPS:
for metric in FORMATTED_METRICS:
subdirectory = os.path.join(directory, step)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, 'metrics.csv')
with open(filepath, 'w') as f:
data[step].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
filepath = os.path.join(directory, step, 'metrics.csv')
with open(filepath, 'r') as f:
data[step] = pd.read_csv(f, header=0, index_col=0)
return data
def save_raw_metrics(data, directory):
for step in WORKFLOW_STEPS:
for group in HOST_GROUPS:
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
subdirectory = os.path.join(directory, step, group)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, filename)
with open(filepath, 'w') as f:
data[step][group][metric].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
filepath = os.path.join(directory, step, group, filename)
with open(filepath, 'r') as f:
df = pd.read_csv(f, header=0, index_col=0)
data[step][group][metric] = df
return data
def load_workflow_statistics(filename):
return pd.read_csv(filename, header=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='''
Download Ganglia metrics obtained as part of a TissueMAPS benchmark
test. Raw metrics for each host are downloaded and persisted on disk
in CSV format. In addition, summary statistics are computed for
groups of hosts (compute, filesystem, database coordinator and
database worker) and persisted on disk in CSV format as well.
The program expects a file named ``{cluster}_jobs.csv`` in the
specified directory that contains the status of each workflow step
in CSV format.
'''
)
parser.add_argument(
'-H', '--host', required=True,
help='IP address or DNS name of the Ganglia server'
)
parser.add_argument(
'-p', '--provider', required=True,
help='name of the cloud provider'
)
parser.add_argument(
'-c', '--cluster', required=True,
help='name of the cluster'
)
parser.add_argument(
'-d', '--directory', required=True,
help='path to a directory on disk where data should be stored'
)
args = parser.parse_args()
workflow_stats_filename = os.path.join(
args.data_dir, args.provider, '{}_jobs.csv'.format(args.cluster)
)
workflow_statistics = load_workflow_statistics(workflow_stats_filename)
raw_data = download_raw_metrics(args.host, args.cluster, workflow_statistics)
output_dir = os.path.join(args.data_dir, args.provider, args.cluster)
if not os.path.exists(output_dir):
print('Create output directory: {}'.format(output_dir))
os.makedirs(output_dir)
save_raw_metrics(raw_data, output_dir)
formatted_data = format_raw_metrics(raw_data, workflow_statistics)
save_formatted_metrics(formatted_data, output_dir)
| formatted_data[step] = pd.DataFrame(
index=HOST_GROUPS.keys(), columns=FORMATTED_METRICS.keys()
)
for group in HOST_GROUPS:
aggregates = dict()
for metric in RAW_METRICS:
values = data[step][group][metric]
# TODO: cutoff?
# Some steps may not execute jobs on all nodes, which may
# introduce a bias upon summary statistics.
index = values > 0
if index.any().any():
values = values[index]
aggregates[metric] = np.nanmean(values)
for metric in FORMATTED_METRICS:
func = FORMATTED_METRICS[metric]['func']
formatted_data[step].loc[group, metric] = func(aggregates) | conditional_block |
download-ganglia-metrics.py | #!/usr/bin/env python
import os
import re
import json
import collections
from datetime import datetime
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import requests
import pandas as pd
import numpy as np
WORKFLOW_STEPS = [
'metaextract', 'metaconfig', 'imextract',
'corilla', 'illuminati', 'jterator'
]
RAW_METRICS = {
'bytes_out': {
'uri': 'm=bytes_out&vl=bytes%2Fsec&ti=Bytes%20Sent',
'unit': 'bytes per second',
'name': 'send data',
},
'bytes_in': {
'uri': 'm=bytes_in&vl=bytes%2Fsec&ti=Bytes%20Received',
'unit': 'bytes per second',
'name': 'received data',
},
'mem_free': {
'uri': 'm=mem_free&vl=KB&ti=Free%20Memory',
'unit': 'kilobytes',
'name': 'free memory'
},
'mem_total': {
'uri': 'm=mem_total&vl=KB&ti=Total%20Memory',
'unit': 'kilobytes',
'name': 'total memory'
},
'cpu_user': {
'uri': 'm=cpu_user&vl=%25&ti=CPU%20User',
'unit': 'percent',
'name': 'CPU user'
},
'cpu_system': {
'uri': 'm=cpu_system&vl=%25&ti=CPU%20System',
'unit': 'percent',
'name': 'CPU system'
},
'cpu_num': {
'uri': 'm=cpu_num&vl=%25&ti=CPU%20Number',
'unit': '',
'name': 'number of processors'
},
'load_one': {
'uri': 'm=load_one&vl=%20&ti=One%20Minute%20Load%20Average',
'unit': '',
'name': 'one minute load average'
}
}
FORMATTED_METRICS = {
'memory': {
'func': lambda m: (m['mem_total'] - m['mem_free']) / float(m['mem_total']) * 100,
'unit': 'percent',
'name': 'memory usage'
},
'cpu': {
'func': lambda m: m['cpu_user'] + m['cpu_system'],
'unit': 'percent',
'name': 'CPU usage'
},
'input': {
'func': lambda m: m['bytes_in'],
'unit': 'bytes per second',
'name': 'data input'
},
'output': {
'func': lambda m: m['bytes_out'],
'unit': 'bytes per second',
'name': 'data output'
}
}
def _get_number_of_processors(cluster):
|
def _get_compute_nodes(cluster):
n = _get_number_of_processors(cluster)
return tuple([
'{0}-slurm-worker-{1:03d}'.format(cluster, i+1) for i in range(n)
])
def _get_fs_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-glusterfs-server-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
def _get_db_coordinator_nodes(cluster):
return tuple(['{0}-postgresql-master-001'.format(cluster)])
def _get_db_worker_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-postgresql-worker-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
HOST_GROUPS = {
'compute': {
'name': 'compute',
'hosts': lambda cluster: _get_compute_nodes(cluster)
},
'fs': {
'name': 'filesystem',
'hosts': lambda cluster: _get_fs_nodes(cluster)
},
'db_coordinator': {
'name': 'database coordinator',
'hosts': lambda cluster: _get_db_coordinator_nodes(cluster)
},
'db_worker': {
'name': 'database worker',
'hosts': lambda cluster: _get_db_worker_nodes(cluster)
}
}
def download_raw_metrics(host, cluster, workflow_statistics):
base_uri = 'http://{address}/ganglia/graph.php?r=4hr&c={cluster}'.format(
address=host, cluster=cluster
)
ganglia_dt_format = '%m%%2F%d%%2F%Y+%H%%3A%M'
workflow_dt_format = '%Y-%m-%d %H:%M:%S'
data = dict()
for step in WORKFLOW_STEPS:
current_index = np.where(workflow_statistics['name'] == step)[0][0]
current_step_stats = workflow_statistics.loc[current_index, :]
first_task_index = np.where(
workflow_statistics['name'] == '{}_init'.format(step)
)[0][0]
first_task_stats = workflow_statistics.loc[first_task_index, :]
start = first_task_stats['updated_at'].split('.')[0]
start = datetime.strptime(start, workflow_dt_format)
end = current_step_stats['updated_at'].split('.')[0]
end = datetime.strptime(end, workflow_dt_format)
start_uri = 'cs={}'.format(start.strftime(ganglia_dt_format))
end_uri = 'ce={}'.format(end.strftime(ganglia_dt_format))
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
metric_uri = RAW_METRICS[metric]['uri']
tmp_data = list()
for node in HOST_GROUPS[group]['hosts'](cluster):
node_uri = 'h={}'.format(node)
url = '&'.join([
base_uri, node_uri, start_uri, end_uri, metric_uri,
'csv=1'
])
response = requests.get(url)
f = StringIO(response.content)
stats = pd.read_csv(f, header=0, index_col=0, names=[node])
tmp_data.append(stats)
data[step][group][metric] = pd.concat(tmp_data, axis=1)
return data
def format_raw_metrics(data, workflow_statistics):
formatted_data = dict()
for step in WORKFLOW_STEPS:
formatted_data[step] = pd.DataFrame(
index=HOST_GROUPS.keys(), columns=FORMATTED_METRICS.keys()
)
for group in HOST_GROUPS:
aggregates = dict()
for metric in RAW_METRICS:
values = data[step][group][metric]
# TODO: cutoff?
# Some steps may not execute jobs on all nodes, which may
# introduce a bias upon summary statistics.
index = values > 0
if index.any().any():
values = values[index]
aggregates[metric] = np.nanmean(values)
for metric in FORMATTED_METRICS:
func = FORMATTED_METRICS[metric]['func']
formatted_data[step].loc[group, metric] = func(aggregates)
return formatted_data
def save_formatted_metrics(data, directory):
for step in WORKFLOW_STEPS:
for metric in FORMATTED_METRICS:
subdirectory = os.path.join(directory, step)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, 'metrics.csv')
with open(filepath, 'w') as f:
data[step].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
filepath = os.path.join(directory, step, 'metrics.csv')
with open(filepath, 'r') as f:
data[step] = pd.read_csv(f, header=0, index_col=0)
return data
def save_raw_metrics(data, directory):
for step in WORKFLOW_STEPS:
for group in HOST_GROUPS:
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
subdirectory = os.path.join(directory, step, group)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, filename)
with open(filepath, 'w') as f:
data[step][group][metric].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
filepath = os.path.join(directory, step, group, filename)
with open(filepath, 'r') as f:
df = pd.read_csv(f, header=0, index_col=0)
data[step][group][metric] = df
return data
def load_workflow_statistics(filename):
return pd.read_csv(filename, header=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='''
Download Ganglia metrics obtained as part of a TissueMAPS benchmark
test. Raw metrics for each host are downloaded and persisted on disk
in CSV format. In addition, summary statistics are computed for
groups of hosts (compute, filesystem, database coordinator and
database worker) and persisted on disk in CSV format as well.
The program expects a file named ``{cluster}_jobs.csv`` in the
specified directory that contains the status of each workflow step
in CSV format.
'''
)
parser.add_argument(
'-H', '--host', required=True,
help='IP address or DNS name of the Ganglia server'
)
parser.add_argument(
'-p', '--provider', required=True,
help='name of the cloud provider'
)
parser.add_argument(
'-c', '--cluster', required=True,
help='name of the cluster'
)
parser.add_argument(
'-d', '--directory', required=True,
help='path to a directory on disk where data should be stored'
)
args = parser.parse_args()
workflow_stats_filename = os.path.join(
args.data_dir, args.provider, '{}_jobs.csv'.format(args.cluster)
)
workflow_statistics = load_workflow_statistics(workflow_stats_filename)
raw_data = download_raw_metrics(args.host, args.cluster, workflow_statistics)
output_dir = os.path.join(args.data_dir, args.provider, args.cluster)
if not os.path.exists(output_dir):
print('Create output directory: {}'.format(output_dir))
os.makedirs(output_dir)
save_raw_metrics(raw_data, output_dir)
formatted_data = format_raw_metrics(raw_data, workflow_statistics)
save_formatted_metrics(formatted_data, output_dir)
| match = re.search(r'^cluster-([0-9]+)$', cluster)
n = match.group(1)
# All cluster architectuers use nodes with 4 processors
return int(n)/4 | identifier_body |
download-ganglia-metrics.py | #!/usr/bin/env python
import os
import re
import json
import collections
from datetime import datetime
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import requests
import pandas as pd
import numpy as np
WORKFLOW_STEPS = [
'metaextract', 'metaconfig', 'imextract',
'corilla', 'illuminati', 'jterator'
]
RAW_METRICS = {
'bytes_out': {
'uri': 'm=bytes_out&vl=bytes%2Fsec&ti=Bytes%20Sent',
'unit': 'bytes per second',
'name': 'send data',
},
'bytes_in': {
'uri': 'm=bytes_in&vl=bytes%2Fsec&ti=Bytes%20Received',
'unit': 'bytes per second',
'name': 'received data',
},
'mem_free': {
'uri': 'm=mem_free&vl=KB&ti=Free%20Memory',
'unit': 'kilobytes',
'name': 'free memory'
},
'mem_total': {
'uri': 'm=mem_total&vl=KB&ti=Total%20Memory',
'unit': 'kilobytes',
'name': 'total memory'
},
'cpu_user': {
'uri': 'm=cpu_user&vl=%25&ti=CPU%20User',
'unit': 'percent',
'name': 'CPU user'
},
'cpu_system': {
'uri': 'm=cpu_system&vl=%25&ti=CPU%20System',
'unit': 'percent',
'name': 'CPU system'
},
'cpu_num': {
'uri': 'm=cpu_num&vl=%25&ti=CPU%20Number',
'unit': '',
'name': 'number of processors'
},
'load_one': {
'uri': 'm=load_one&vl=%20&ti=One%20Minute%20Load%20Average',
'unit': '',
'name': 'one minute load average'
}
}
FORMATTED_METRICS = {
'memory': {
'func': lambda m: (m['mem_total'] - m['mem_free']) / float(m['mem_total']) * 100,
'unit': 'percent',
'name': 'memory usage'
},
'cpu': {
'func': lambda m: m['cpu_user'] + m['cpu_system'],
'unit': 'percent',
'name': 'CPU usage'
}, | 'output': {
'func': lambda m: m['bytes_out'],
'unit': 'bytes per second',
'name': 'data output'
}
}
def _get_number_of_processors(cluster):
match = re.search(r'^cluster-([0-9]+)$', cluster)
n = match.group(1)
# All cluster architectuers use nodes with 4 processors
return int(n)/4
def _get_compute_nodes(cluster):
n = _get_number_of_processors(cluster)
return tuple([
'{0}-slurm-worker-{1:03d}'.format(cluster, i+1) for i in range(n)
])
def _get_fs_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-glusterfs-server-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
def _get_db_coordinator_nodes(cluster):
return tuple(['{0}-postgresql-master-001'.format(cluster)])
def _get_db_worker_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-postgresql-worker-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
HOST_GROUPS = {
'compute': {
'name': 'compute',
'hosts': lambda cluster: _get_compute_nodes(cluster)
},
'fs': {
'name': 'filesystem',
'hosts': lambda cluster: _get_fs_nodes(cluster)
},
'db_coordinator': {
'name': 'database coordinator',
'hosts': lambda cluster: _get_db_coordinator_nodes(cluster)
},
'db_worker': {
'name': 'database worker',
'hosts': lambda cluster: _get_db_worker_nodes(cluster)
}
}
def download_raw_metrics(host, cluster, workflow_statistics):
base_uri = 'http://{address}/ganglia/graph.php?r=4hr&c={cluster}'.format(
address=host, cluster=cluster
)
ganglia_dt_format = '%m%%2F%d%%2F%Y+%H%%3A%M'
workflow_dt_format = '%Y-%m-%d %H:%M:%S'
data = dict()
for step in WORKFLOW_STEPS:
current_index = np.where(workflow_statistics['name'] == step)[0][0]
current_step_stats = workflow_statistics.loc[current_index, :]
first_task_index = np.where(
workflow_statistics['name'] == '{}_init'.format(step)
)[0][0]
first_task_stats = workflow_statistics.loc[first_task_index, :]
start = first_task_stats['updated_at'].split('.')[0]
start = datetime.strptime(start, workflow_dt_format)
end = current_step_stats['updated_at'].split('.')[0]
end = datetime.strptime(end, workflow_dt_format)
start_uri = 'cs={}'.format(start.strftime(ganglia_dt_format))
end_uri = 'ce={}'.format(end.strftime(ganglia_dt_format))
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
metric_uri = RAW_METRICS[metric]['uri']
tmp_data = list()
for node in HOST_GROUPS[group]['hosts'](cluster):
node_uri = 'h={}'.format(node)
url = '&'.join([
base_uri, node_uri, start_uri, end_uri, metric_uri,
'csv=1'
])
response = requests.get(url)
f = StringIO(response.content)
stats = pd.read_csv(f, header=0, index_col=0, names=[node])
tmp_data.append(stats)
data[step][group][metric] = pd.concat(tmp_data, axis=1)
return data
def format_raw_metrics(data, workflow_statistics):
formatted_data = dict()
for step in WORKFLOW_STEPS:
formatted_data[step] = pd.DataFrame(
index=HOST_GROUPS.keys(), columns=FORMATTED_METRICS.keys()
)
for group in HOST_GROUPS:
aggregates = dict()
for metric in RAW_METRICS:
values = data[step][group][metric]
# TODO: cutoff?
# Some steps may not execute jobs on all nodes, which may
# introduce a bias upon summary statistics.
index = values > 0
if index.any().any():
values = values[index]
aggregates[metric] = np.nanmean(values)
for metric in FORMATTED_METRICS:
func = FORMATTED_METRICS[metric]['func']
formatted_data[step].loc[group, metric] = func(aggregates)
return formatted_data
def save_formatted_metrics(data, directory):
for step in WORKFLOW_STEPS:
for metric in FORMATTED_METRICS:
subdirectory = os.path.join(directory, step)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, 'metrics.csv')
with open(filepath, 'w') as f:
data[step].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
filepath = os.path.join(directory, step, 'metrics.csv')
with open(filepath, 'r') as f:
data[step] = pd.read_csv(f, header=0, index_col=0)
return data
def save_raw_metrics(data, directory):
for step in WORKFLOW_STEPS:
for group in HOST_GROUPS:
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
subdirectory = os.path.join(directory, step, group)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, filename)
with open(filepath, 'w') as f:
data[step][group][metric].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
filepath = os.path.join(directory, step, group, filename)
with open(filepath, 'r') as f:
df = pd.read_csv(f, header=0, index_col=0)
data[step][group][metric] = df
return data
def load_workflow_statistics(filename):
return pd.read_csv(filename, header=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='''
Download Ganglia metrics obtained as part of a TissueMAPS benchmark
test. Raw metrics for each host are downloaded and persisted on disk
in CSV format. In addition, summary statistics are computed for
groups of hosts (compute, filesystem, database coordinator and
database worker) and persisted on disk in CSV format as well.
The program expects a file named ``{cluster}_jobs.csv`` in the
specified directory that contains the status of each workflow step
in CSV format.
'''
)
parser.add_argument(
'-H', '--host', required=True,
help='IP address or DNS name of the Ganglia server'
)
parser.add_argument(
'-p', '--provider', required=True,
help='name of the cloud provider'
)
parser.add_argument(
'-c', '--cluster', required=True,
help='name of the cluster'
)
parser.add_argument(
'-d', '--directory', required=True,
help='path to a directory on disk where data should be stored'
)
args = parser.parse_args()
workflow_stats_filename = os.path.join(
args.data_dir, args.provider, '{}_jobs.csv'.format(args.cluster)
)
workflow_statistics = load_workflow_statistics(workflow_stats_filename)
raw_data = download_raw_metrics(args.host, args.cluster, workflow_statistics)
output_dir = os.path.join(args.data_dir, args.provider, args.cluster)
if not os.path.exists(output_dir):
print('Create output directory: {}'.format(output_dir))
os.makedirs(output_dir)
save_raw_metrics(raw_data, output_dir)
formatted_data = format_raw_metrics(raw_data, workflow_statistics)
save_formatted_metrics(formatted_data, output_dir) | 'input': {
'func': lambda m: m['bytes_in'],
'unit': 'bytes per second',
'name': 'data input'
}, | random_line_split |
download-ganglia-metrics.py | #!/usr/bin/env python
import os
import re
import json
import collections
from datetime import datetime
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import requests
import pandas as pd
import numpy as np
WORKFLOW_STEPS = [
'metaextract', 'metaconfig', 'imextract',
'corilla', 'illuminati', 'jterator'
]
RAW_METRICS = {
'bytes_out': {
'uri': 'm=bytes_out&vl=bytes%2Fsec&ti=Bytes%20Sent',
'unit': 'bytes per second',
'name': 'send data',
},
'bytes_in': {
'uri': 'm=bytes_in&vl=bytes%2Fsec&ti=Bytes%20Received',
'unit': 'bytes per second',
'name': 'received data',
},
'mem_free': {
'uri': 'm=mem_free&vl=KB&ti=Free%20Memory',
'unit': 'kilobytes',
'name': 'free memory'
},
'mem_total': {
'uri': 'm=mem_total&vl=KB&ti=Total%20Memory',
'unit': 'kilobytes',
'name': 'total memory'
},
'cpu_user': {
'uri': 'm=cpu_user&vl=%25&ti=CPU%20User',
'unit': 'percent',
'name': 'CPU user'
},
'cpu_system': {
'uri': 'm=cpu_system&vl=%25&ti=CPU%20System',
'unit': 'percent',
'name': 'CPU system'
},
'cpu_num': {
'uri': 'm=cpu_num&vl=%25&ti=CPU%20Number',
'unit': '',
'name': 'number of processors'
},
'load_one': {
'uri': 'm=load_one&vl=%20&ti=One%20Minute%20Load%20Average',
'unit': '',
'name': 'one minute load average'
}
}
FORMATTED_METRICS = {
'memory': {
'func': lambda m: (m['mem_total'] - m['mem_free']) / float(m['mem_total']) * 100,
'unit': 'percent',
'name': 'memory usage'
},
'cpu': {
'func': lambda m: m['cpu_user'] + m['cpu_system'],
'unit': 'percent',
'name': 'CPU usage'
},
'input': {
'func': lambda m: m['bytes_in'],
'unit': 'bytes per second',
'name': 'data input'
},
'output': {
'func': lambda m: m['bytes_out'],
'unit': 'bytes per second',
'name': 'data output'
}
}
def _get_number_of_processors(cluster):
match = re.search(r'^cluster-([0-9]+)$', cluster)
n = match.group(1)
# All cluster architectuers use nodes with 4 processors
return int(n)/4
def _get_compute_nodes(cluster):
n = _get_number_of_processors(cluster)
return tuple([
'{0}-slurm-worker-{1:03d}'.format(cluster, i+1) for i in range(n)
])
def _get_fs_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-glusterfs-server-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
def | (cluster):
return tuple(['{0}-postgresql-master-001'.format(cluster)])
def _get_db_worker_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-postgresql-worker-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
HOST_GROUPS = {
'compute': {
'name': 'compute',
'hosts': lambda cluster: _get_compute_nodes(cluster)
},
'fs': {
'name': 'filesystem',
'hosts': lambda cluster: _get_fs_nodes(cluster)
},
'db_coordinator': {
'name': 'database coordinator',
'hosts': lambda cluster: _get_db_coordinator_nodes(cluster)
},
'db_worker': {
'name': 'database worker',
'hosts': lambda cluster: _get_db_worker_nodes(cluster)
}
}
def download_raw_metrics(host, cluster, workflow_statistics):
base_uri = 'http://{address}/ganglia/graph.php?r=4hr&c={cluster}'.format(
address=host, cluster=cluster
)
ganglia_dt_format = '%m%%2F%d%%2F%Y+%H%%3A%M'
workflow_dt_format = '%Y-%m-%d %H:%M:%S'
data = dict()
for step in WORKFLOW_STEPS:
current_index = np.where(workflow_statistics['name'] == step)[0][0]
current_step_stats = workflow_statistics.loc[current_index, :]
first_task_index = np.where(
workflow_statistics['name'] == '{}_init'.format(step)
)[0][0]
first_task_stats = workflow_statistics.loc[first_task_index, :]
start = first_task_stats['updated_at'].split('.')[0]
start = datetime.strptime(start, workflow_dt_format)
end = current_step_stats['updated_at'].split('.')[0]
end = datetime.strptime(end, workflow_dt_format)
start_uri = 'cs={}'.format(start.strftime(ganglia_dt_format))
end_uri = 'ce={}'.format(end.strftime(ganglia_dt_format))
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
metric_uri = RAW_METRICS[metric]['uri']
tmp_data = list()
for node in HOST_GROUPS[group]['hosts'](cluster):
node_uri = 'h={}'.format(node)
url = '&'.join([
base_uri, node_uri, start_uri, end_uri, metric_uri,
'csv=1'
])
response = requests.get(url)
f = StringIO(response.content)
stats = pd.read_csv(f, header=0, index_col=0, names=[node])
tmp_data.append(stats)
data[step][group][metric] = pd.concat(tmp_data, axis=1)
return data
def format_raw_metrics(data, workflow_statistics):
formatted_data = dict()
for step in WORKFLOW_STEPS:
formatted_data[step] = pd.DataFrame(
index=HOST_GROUPS.keys(), columns=FORMATTED_METRICS.keys()
)
for group in HOST_GROUPS:
aggregates = dict()
for metric in RAW_METRICS:
values = data[step][group][metric]
# TODO: cutoff?
# Some steps may not execute jobs on all nodes, which may
# introduce a bias upon summary statistics.
index = values > 0
if index.any().any():
values = values[index]
aggregates[metric] = np.nanmean(values)
for metric in FORMATTED_METRICS:
func = FORMATTED_METRICS[metric]['func']
formatted_data[step].loc[group, metric] = func(aggregates)
return formatted_data
def save_formatted_metrics(data, directory):
for step in WORKFLOW_STEPS:
for metric in FORMATTED_METRICS:
subdirectory = os.path.join(directory, step)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, 'metrics.csv')
with open(filepath, 'w') as f:
data[step].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
filepath = os.path.join(directory, step, 'metrics.csv')
with open(filepath, 'r') as f:
data[step] = pd.read_csv(f, header=0, index_col=0)
return data
def save_raw_metrics(data, directory):
for step in WORKFLOW_STEPS:
for group in HOST_GROUPS:
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
subdirectory = os.path.join(directory, step, group)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, filename)
with open(filepath, 'w') as f:
data[step][group][metric].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
filepath = os.path.join(directory, step, group, filename)
with open(filepath, 'r') as f:
df = pd.read_csv(f, header=0, index_col=0)
data[step][group][metric] = df
return data
def load_workflow_statistics(filename):
return pd.read_csv(filename, header=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='''
Download Ganglia metrics obtained as part of a TissueMAPS benchmark
test. Raw metrics for each host are downloaded and persisted on disk
in CSV format. In addition, summary statistics are computed for
groups of hosts (compute, filesystem, database coordinator and
database worker) and persisted on disk in CSV format as well.
The program expects a file named ``{cluster}_jobs.csv`` in the
specified directory that contains the status of each workflow step
in CSV format.
'''
)
parser.add_argument(
'-H', '--host', required=True,
help='IP address or DNS name of the Ganglia server'
)
parser.add_argument(
'-p', '--provider', required=True,
help='name of the cloud provider'
)
parser.add_argument(
'-c', '--cluster', required=True,
help='name of the cluster'
)
parser.add_argument(
'-d', '--directory', required=True,
help='path to a directory on disk where data should be stored'
)
args = parser.parse_args()
workflow_stats_filename = os.path.join(
args.data_dir, args.provider, '{}_jobs.csv'.format(args.cluster)
)
workflow_statistics = load_workflow_statistics(workflow_stats_filename)
raw_data = download_raw_metrics(args.host, args.cluster, workflow_statistics)
output_dir = os.path.join(args.data_dir, args.provider, args.cluster)
if not os.path.exists(output_dir):
print('Create output directory: {}'.format(output_dir))
os.makedirs(output_dir)
save_raw_metrics(raw_data, output_dir)
formatted_data = format_raw_metrics(raw_data, workflow_statistics)
save_formatted_metrics(formatted_data, output_dir)
| _get_db_coordinator_nodes | identifier_name |
mfg_event_converter.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert a TestRecord into a mfg_event proto for upload to mfg inspector.
Also includes utilities to handle multi-dim conversion into an attachment
and the reverse.
A decision had to be made on how to handle phases, measurements and attachments
with non-unique names. Approach taken is to append a _X to the names.
"""
import collections
import dataclasses
import datetime
import itertools
import json
import logging
import numbers
import os
import sys
from typing import Mapping, Optional, Tuple
from openhtf.core import measurements
from openhtf.core import test_record as htf_test_record
from openhtf.output.proto import mfg_event_pb2
from openhtf.output.proto import test_runs_converter
from openhtf.output.proto import test_runs_pb2
from openhtf.util import data as htf_data
from openhtf.util import units
from openhtf.util import validators
TEST_RECORD_ATTACHMENT_NAME = 'OpenHTF_record.json'
# To be lazy loaded by _LazyLoadUnitsByCode when needed.
UNITS_BY_CODE = {}
# Map test run Status (proto) name to measurement Outcome (python) enum's and
# the reverse. Note: there is data lost in converting an UNSET/PARTIALLY_SET to
# an ERROR so we can't completely reverse the transformation.
MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME = {
measurements.Outcome.PASS: 'PASS',
measurements.Outcome.FAIL: 'FAIL',
measurements.Outcome.UNSET: 'ERROR',
measurements.Outcome.PARTIALLY_SET: 'ERROR',
}
TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME = {
'PASS': measurements.Outcome.PASS,
'MARGINAL_PASS': measurements.Outcome.PASS,
'FAIL': measurements.Outcome.FAIL,
'ERROR': measurements.Outcome.UNSET
}
_GIBI_BYTE_TO_BASE = 1 << 30
MAX_TOTAL_ATTACHMENT_BYTES = int(1.9 * _GIBI_BYTE_TO_BASE)
_LOGGER = logging.getLogger(__name__)
@dataclasses.dataclass(eq=True, frozen=True) # Ensures __hash__ is generated.
class AttachmentCacheKey:
name: str
size: int
AttachmentCacheT = Mapping[AttachmentCacheKey, mfg_event_pb2.EventAttachment]
def _measurement_outcome_to_test_run_status_name(outcome: measurements.Outcome,
marginal: bool) -> str:
"""Returns the test run status name given the outcome and marginal args."""
return ('MARGINAL_PASS'
if marginal else MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME[outcome])
def _test_run_status_name_to_measurement_outcome_and_marginal(
name: str) -> Tuple[measurements.Outcome, bool]:
"""Returns the outcome and marginal args given the test run status name."""
return TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME[name], 'MARGINAL' in name
def _lazy_load_units_by_code():
"""Populate dict of units by code iff UNITS_BY_CODE is empty."""
if UNITS_BY_CODE:
# already populated
return
for unit in units.UNITS_BY_NAME.values():
UNITS_BY_CODE[unit.code] = unit
def mfg_event_from_test_record(
record: htf_test_record.TestRecord,
attachment_cache: Optional[AttachmentCacheT] = None,
) -> mfg_event_pb2.MfgEvent:
"""Convert an OpenHTF TestRecord to an MfgEvent proto.
Most fields are copied over directly and some are pulled out of metadata
(listed below). Multi-dimensional measurements are stored only in the JSON
dump of the record.
Important Note: This function mutates the test_record so any output callbacks
called after this callback will operate on the mutated record.
Metadata fields:
test_name: The name field from the test's TestOptions.
config: The OpenHTF config, as a dictionary.
assembly_events: List of AssemblyEvent protos.
(see proto/assembly_event.proto).
operator_name: Name of the test operator.
Args:
record: An OpenHTF TestRecord.
attachment_cache: Provides a lookup to get EventAttachment protos for
already uploaded (or converted) attachments.
Returns:
An MfgEvent proto representing the given test record.
"""
mfg_event = mfg_event_pb2.MfgEvent()
_populate_basic_data(mfg_event, record)
_attach_record_as_json(mfg_event, record)
_attach_argv(mfg_event)
_attach_config(mfg_event, record)
# Only include assembly events if the test passed.
if ('assembly_events' in record.metadata and
mfg_event.test_status == test_runs_pb2.PASS):
for assembly_event in record.metadata['assembly_events']:
mfg_event.assembly_events.add().CopyFrom(assembly_event)
convert_multidim_measurements(record.phases)
phase_copier = PhaseCopier(phase_uniquizer(record.phases), attachment_cache)
phase_copier.copy_measurements(mfg_event)
if not phase_copier.copy_attachments(mfg_event):
mfg_event.test_run_type = mfg_event_pb2.TEST_RUN_PARTIAL
return mfg_event
def _populate_basic_data(mfg_event: mfg_event_pb2.MfgEvent,
record: htf_test_record.TestRecord) -> None:
"""Copies data from the OpenHTF TestRecord to the MfgEvent proto."""
# TODO(openhtf-team):
# * Missing in proto: set run name from metadata.
# * `part_tags` field on proto is unused
# * `timings` field on proto is unused.
# * Handle arbitrary units as uom_code/uom_suffix.
# Populate non-repeated fields.
mfg_event.dut_serial = record.dut_id
mfg_event.start_time_ms = record.start_time_millis
mfg_event.end_time_ms = record.end_time_millis
mfg_event.tester_name = record.station_id
mfg_event.test_name = record.metadata.get('test_name') or record.station_id
mfg_event.operator_name = record.metadata.get('operator_name', '')
mfg_event.test_version = str(record.metadata.get('test_version', ''))
mfg_event.test_description = record.metadata.get('test_description', '')
mfg_event.test_status = (
test_runs_pb2.MARGINAL_PASS
if record.marginal else test_runs_converter.OUTCOME_MAP[record.outcome])
# Populate part_tags.
mfg_event.part_tags.extend(record.metadata.get('part_tags', []))
# Populate phases.
for phase in record.phases:
mfg_phase = mfg_event.phases.add()
mfg_phase.name = phase.name
mfg_phase.description = phase.codeinfo.sourcecode
mfg_phase.timing.start_time_millis = phase.start_time_millis
mfg_phase.timing.end_time_millis = phase.end_time_millis
# Populate failure codes.
for details in record.outcome_details:
failure_code = mfg_event.failure_codes.add()
failure_code.code = details.code
failure_code.details = details.description
# Populate test logs.
for log_record in record.log_records:
test_log = mfg_event.test_logs.add()
test_log.timestamp_millis = log_record.timestamp_millis
test_log.log_message = log_record.message
test_log.logger_name = log_record.logger_name
test_log.levelno = log_record.level
if log_record.level <= logging.DEBUG:
test_log.level = test_runs_pb2.TestRunLogMessage.DEBUG
elif log_record.level <= logging.INFO:
test_log.level = test_runs_pb2.TestRunLogMessage.INFO
elif log_record.level <= logging.WARNING:
test_log.level = test_runs_pb2.TestRunLogMessage.WARNING
elif log_record.level <= logging.ERROR:
test_log.level = test_runs_pb2.TestRunLogMessage.ERROR
elif log_record.level <= logging.CRITICAL:
test_log.level = test_runs_pb2.TestRunLogMessage.CRITICAL
test_log.log_source = log_record.source
test_log.lineno = log_record.lineno
def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8
def _convert_object_to_json(obj): # pylint: disable=missing-function-docstring
# Since there will be parts of this that may have unicode, either as
# measurement or in the logs, we have to be careful and convert everything
# to unicode, merge, then encode to UTF-8 to put it into the proto.
def unsupported_type_handler(o):
# For bytes, JSONEncoder will fallback to this function to convert to str.
if isinstance(o, bytes):
return o.decode(encoding='utf-8', errors='replace')
elif isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
else:
raise TypeError(repr(o) + ' is not JSON serializable')
json_encoder = json.JSONEncoder(
sort_keys=True,
indent=2,
ensure_ascii=False,
default=unsupported_type_handler)
return json_encoder.encode(obj).encode('utf-8', errors='replace')
def _attach_config(mfg_event, record):
"""Attaches the OpenHTF config file as JSON."""
if 'config' not in record.metadata:
return
attachment = mfg_event.attachment.add()
attachment.name = 'config'
attachment.value_binary = _convert_object_to_json(record.metadata['config'])
attachment.type = test_runs_pb2.TEXT_UTF8
def _attach_argv(mfg_event):
attachment = mfg_event.attachment.add()
attachment.name = 'argv'
argv = [os.path.realpath(sys.argv[0])] + sys.argv[1:]
attachment.value_binary = _convert_object_to_json(argv)
attachment.type = test_runs_pb2.TEXT_UTF8
class UniqueNameMaker(object):
"""Makes unique names for phases, attachments, etc with duplicate names."""
def __init__(self, all_names):
self._counts = collections.Counter(all_names)
self._seen = collections.Counter()
def make_unique(self, name): # pylint: disable=missing-function-docstring
count = self._counts[name]
assert count >= 1, 'Seeing a new name that was not given to the constructor'
if count == 1:
# It's unique, so let's skip extra calculations.
return name
# Count the number of times we've seen this and return this one's index.
self._seen[name] += 1
main, ext = os.path.splitext(name)
return '%s_%d%s' % (main, self._seen[name] - 1, ext)
def phase_uniquizer(all_phases):
"""Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified.
"""
measurement_name_maker = UniqueNameMaker(
itertools.chain.from_iterable(
phase.measurements.keys() for phase in all_phases
if phase.measurements))
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Make measurements unique.
for name, _ in sorted(phase.measurements.items()):
old_name = name
name = measurement_name_maker.make_unique(name)
phase.measurements[old_name].name = name
phase.measurements[name] = phase.measurements.pop(old_name)
# Make attachments unique.
for name, _ in sorted(phase.attachments.items()):
old_name = name
name = attachment_name_maker.make_unique(name)
phase.attachments[name] = phase.attachments.pop(old_name)
return all_phases
def multidim_measurement_to_attachment(name, measurement):
"""Convert a multi-dim measurement to an `openhtf.test_record.Attachment`."""
dimensions = list(measurement.dimensions)
if measurement.units:
dimensions.append(
measurements.Dimension.from_unit_descriptor(measurement.units))
dims = []
for d in dimensions:
if d.suffix is None:
suffix = u''
else:
suffix = d.suffix
dims.append({
'uom_suffix': suffix,
'uom_code': d.code,
'name': d.name,
})
# Refer to the module docstring for the expected schema.
dimensioned_measured_value = measurement.measured_value
value = (
sorted(dimensioned_measured_value.value, key=lambda x: x[0])
if dimensioned_measured_value.is_value_set else None)
outcome_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
data = _convert_object_to_json({
'outcome': outcome_str,
'name': name,
'dimensions': dims,
'value': value,
})
attachment = htf_test_record.Attachment(data, test_runs_pb2.MULTIDIM_JSON) # pytype: disable=wrong-arg-types # gen-stub-imports
return attachment
def convert_multidim_measurements(all_phases):
"""Converts each multidim measurements into attachments for all phases.."""
# Combine actual attachments with attachments we make from multi-dim
# measurements.
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Process multi-dim measurements into unique attachments.
for name, measurement in sorted(phase.measurements.items()):
if measurement.dimensions:
old_name = name
name = attachment_name_maker.make_unique('multidim_%s' % name)
attachment = multidim_measurement_to_attachment(name, measurement)
phase.attachments[name] = attachment
phase.measurements.pop(old_name)
return all_phases
class PhaseCopier(object):
"""Copies measurements and attachments to an MfgEvent."""
def | (self,
all_phases,
attachment_cache: Optional[AttachmentCacheT] = None):
self._phases = all_phases
self._using_partial_uploads = attachment_cache is not None
self._attachment_cache = (
attachment_cache if self._using_partial_uploads else {})
def copy_measurements(self, mfg_event):
for phase in self._phases:
for name, measurement in sorted(phase.measurements.items()):
# Multi-dim measurements should already have been removed.
assert measurement.dimensions is None
self._copy_unidimensional_measurement(phase, name, measurement,
mfg_event)
def _copy_unidimensional_measurement(self, phase, name, measurement,
mfg_event):
"""Copy uni-dimensional measurements to the MfgEvent."""
mfg_measurement = mfg_event.measurement.add()
# Copy basic measurement fields.
mfg_measurement.name = name
if measurement.docstring:
mfg_measurement.description = measurement.docstring
mfg_measurement.parameter_tag.append(phase.name)
if (measurement.units and
measurement.units.code in test_runs_converter.UOM_CODE_MAP):
mfg_measurement.unit_code = (
test_runs_converter.UOM_CODE_MAP[measurement.units.code])
# Copy failed measurements as failure_codes. This happens early to include
# unset measurements.
if (measurement.outcome != measurements.Outcome.PASS and
phase.outcome != htf_test_record.PhaseOutcome.SKIP):
failure_code = mfg_event.failure_codes.add()
failure_code.code = name
failure_code.details = '\n'.join(str(v) for v in measurement.validators)
# Copy measurement value.
measured_value = measurement.measured_value
status_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
mfg_measurement.status = test_runs_pb2.Status.Value(status_str)
if not measured_value.is_value_set:
return
value = measured_value.value
if isinstance(value, numbers.Number):
mfg_measurement.numeric_value = float(value)
elif isinstance(value, bytes):
mfg_measurement.text_value = value.decode(errors='replace')
else:
# Coercing to string.
mfg_measurement.text_value = str(value)
# Copy measurement validators.
for validator in measurement.validators:
if isinstance(validator, validators.RangeValidatorBase):
if validator.minimum is not None:
mfg_measurement.numeric_minimum = float(validator.minimum)
if validator.maximum is not None:
mfg_measurement.numeric_maximum = float(validator.maximum)
if validator.marginal_minimum is not None:
mfg_measurement.numeric_marginal_minimum = float(
validator.marginal_minimum)
if validator.marginal_maximum is not None:
mfg_measurement.numeric_marginal_maximum = float(
validator.marginal_maximum)
elif isinstance(validator, validators.RegexMatcher):
mfg_measurement.expected_text = validator.regex
else:
mfg_measurement.description += '\nValidator: ' + str(validator)
def copy_attachments(self, mfg_event: mfg_event_pb2.MfgEvent) -> bool:
"""Copies attachments into the MfgEvent from the configured phases.
If partial uploads are in use (indicated by configuring this class instance
with an Attachments cache), this function will exit early if the total
attachment data size exceeds a reasonable threshold to avoid the 2 GB
serialized proto limit.
Args:
mfg_event: The MfgEvent to copy into.
Returns:
True if all attachments are copied and False if only some attachments
were copied (only possible when partial uploads are being used).
"""
value_copied_attachment_sizes = []
skipped_attachment_names = []
for phase in self._phases:
for name, attachment in sorted(phase.attachments.items()):
size = attachment.size
attachment_cache_key = AttachmentCacheKey(name, size)
if attachment_cache_key in self._attachment_cache:
mfg_event.attachment.append(
self._attachment_cache[attachment_cache_key])
else:
at_least_one_attachment_for_partial_uploads = (
self._using_partial_uploads and value_copied_attachment_sizes)
if at_least_one_attachment_for_partial_uploads and (
sum(value_copied_attachment_sizes) + size >
MAX_TOTAL_ATTACHMENT_BYTES):
skipped_attachment_names.append(name)
else:
value_copied_attachment_sizes.append(size)
self._copy_attachment(name, attachment.data, attachment.mimetype,
mfg_event)
if skipped_attachment_names:
_LOGGER.info(
'Skipping upload of %r attachments for this cycle. '
'To avoid max proto size issues.', skipped_attachment_names)
return False
return True
def _copy_attachment(self, name, data, mimetype, mfg_event):
"""Copies an attachment to mfg_event."""
attachment = mfg_event.attachment.add()
attachment.name = name
attachment.value_binary = data
if mimetype in test_runs_converter.MIMETYPE_MAP:
attachment.type = test_runs_converter.MIMETYPE_MAP[mimetype]
elif mimetype == test_runs_pb2.MULTIDIM_JSON:
attachment.type = mimetype
else:
attachment.type = test_runs_pb2.BINARY
def test_record_from_mfg_event(mfg_event):
"""Extract the original test_record saved as an attachment on a mfg_event."""
for attachment in mfg_event.attachment:
if attachment.name == TEST_RECORD_ATTACHMENT_NAME:
return json.loads(attachment.value_binary)
raise ValueError('Could not find test record JSON in the given MfgEvent.')
def attachment_to_multidim_measurement(attachment, name=None):
"""Convert an OpenHTF test record attachment to a multi-dim measurement.
This is a best effort attempt to reverse, as some data is lost in converting
from a multidim to an attachment.
Args:
attachment: an `openhtf.test_record.Attachment` from a multi-dim.
name: an optional name for the measurement. If not provided will use the
name included in the attachment.
Returns:
An multi-dim `openhtf.Measurement`.
"""
data = json.loads(attachment.data)
name = name or data.get('name')
# attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code'
attachment_dims = data.get('dimensions', [])
# attachment_value is a list of lists [[t1, x1, y1, f1], [t2, x2, y2, f2]]
attachment_values = data.get('value')
attachment_outcome_str = data.get('outcome')
if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME:
# Fpr backward compatibility with saved data we'll convert integers to str
try:
attachment_outcome_str = test_runs_pb2.Status.Name(
int(attachment_outcome_str))
except ValueError:
attachment_outcome_str = None
# Convert test status outcome str to measurement outcome
if attachment_outcome_str:
outcome, marginal = (
_test_run_status_name_to_measurement_outcome_and_marginal(
attachment_outcome_str))
else:
outcome = None
marginal = False
# convert dimensions into htf.Dimensions
_lazy_load_units_by_code()
dims = []
for d in attachment_dims:
# Try to convert into htf.Dimension including backwards compatibility.
unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)
description = d.get('name', '')
dims.append(measurements.Dimension(description=description, unit=unit))
# Attempt to determine if units are included.
if attachment_values and len(dims) == len(attachment_values[0]):
# units provided
units_ = dims[-1].unit
dimensions = dims[:-1]
else:
units_ = None
dimensions = dims
# created dimensioned_measured_value and populate with values.
measured_value = measurements.DimensionedMeasuredValue(
name=name, num_dimensions=len(dimensions))
for row in attachment_values:
coordinates = tuple(row[:-1])
val = row[-1]
measured_value[coordinates] = val
measurement = measurements.Measurement(
name=name,
units=units_,
dimensions=tuple(dimensions),
measured_value=measured_value,
outcome=outcome,
marginal=marginal)
return measurement
| __init__ | identifier_name |
mfg_event_converter.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert a TestRecord into a mfg_event proto for upload to mfg inspector.
Also includes utilities to handle multi-dim conversion into an attachment
and the reverse.
A decision had to be made on how to handle phases, measurements and attachments
with non-unique names. Approach taken is to append a _X to the names.
"""
import collections
import dataclasses
import datetime
import itertools
import json
import logging
import numbers
import os
import sys
from typing import Mapping, Optional, Tuple
from openhtf.core import measurements
from openhtf.core import test_record as htf_test_record
from openhtf.output.proto import mfg_event_pb2
from openhtf.output.proto import test_runs_converter
from openhtf.output.proto import test_runs_pb2
from openhtf.util import data as htf_data
from openhtf.util import units
from openhtf.util import validators
TEST_RECORD_ATTACHMENT_NAME = 'OpenHTF_record.json'
# To be lazy loaded by _LazyLoadUnitsByCode when needed.
UNITS_BY_CODE = {}
# Map test run Status (proto) name to measurement Outcome (python) enum's and
# the reverse. Note: there is data lost in converting an UNSET/PARTIALLY_SET to
# an ERROR so we can't completely reverse the transformation.
MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME = {
measurements.Outcome.PASS: 'PASS',
measurements.Outcome.FAIL: 'FAIL',
measurements.Outcome.UNSET: 'ERROR',
measurements.Outcome.PARTIALLY_SET: 'ERROR',
}
TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME = {
'PASS': measurements.Outcome.PASS,
'MARGINAL_PASS': measurements.Outcome.PASS,
'FAIL': measurements.Outcome.FAIL,
'ERROR': measurements.Outcome.UNSET
}
_GIBI_BYTE_TO_BASE = 1 << 30
MAX_TOTAL_ATTACHMENT_BYTES = int(1.9 * _GIBI_BYTE_TO_BASE)
_LOGGER = logging.getLogger(__name__)
@dataclasses.dataclass(eq=True, frozen=True) # Ensures __hash__ is generated.
class AttachmentCacheKey:
name: str
size: int
AttachmentCacheT = Mapping[AttachmentCacheKey, mfg_event_pb2.EventAttachment]
def _measurement_outcome_to_test_run_status_name(outcome: measurements.Outcome,
marginal: bool) -> str:
"""Returns the test run status name given the outcome and marginal args."""
return ('MARGINAL_PASS'
if marginal else MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME[outcome])
def _test_run_status_name_to_measurement_outcome_and_marginal(
name: str) -> Tuple[measurements.Outcome, bool]:
"""Returns the outcome and marginal args given the test run status name."""
return TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME[name], 'MARGINAL' in name
def _lazy_load_units_by_code():
"""Populate dict of units by code iff UNITS_BY_CODE is empty."""
if UNITS_BY_CODE:
# already populated
return
for unit in units.UNITS_BY_NAME.values():
UNITS_BY_CODE[unit.code] = unit
def mfg_event_from_test_record(
record: htf_test_record.TestRecord,
attachment_cache: Optional[AttachmentCacheT] = None,
) -> mfg_event_pb2.MfgEvent:
"""Convert an OpenHTF TestRecord to an MfgEvent proto.
Most fields are copied over directly and some are pulled out of metadata
(listed below). Multi-dimensional measurements are stored only in the JSON
dump of the record.
Important Note: This function mutates the test_record so any output callbacks
called after this callback will operate on the mutated record.
Metadata fields:
test_name: The name field from the test's TestOptions.
config: The OpenHTF config, as a dictionary.
assembly_events: List of AssemblyEvent protos.
(see proto/assembly_event.proto).
operator_name: Name of the test operator.
Args:
record: An OpenHTF TestRecord.
attachment_cache: Provides a lookup to get EventAttachment protos for
already uploaded (or converted) attachments.
Returns:
An MfgEvent proto representing the given test record.
"""
mfg_event = mfg_event_pb2.MfgEvent()
_populate_basic_data(mfg_event, record)
_attach_record_as_json(mfg_event, record)
_attach_argv(mfg_event)
_attach_config(mfg_event, record)
# Only include assembly events if the test passed.
if ('assembly_events' in record.metadata and
mfg_event.test_status == test_runs_pb2.PASS):
for assembly_event in record.metadata['assembly_events']:
mfg_event.assembly_events.add().CopyFrom(assembly_event)
convert_multidim_measurements(record.phases)
phase_copier = PhaseCopier(phase_uniquizer(record.phases), attachment_cache)
phase_copier.copy_measurements(mfg_event)
if not phase_copier.copy_attachments(mfg_event):
mfg_event.test_run_type = mfg_event_pb2.TEST_RUN_PARTIAL
return mfg_event
def _populate_basic_data(mfg_event: mfg_event_pb2.MfgEvent,
record: htf_test_record.TestRecord) -> None:
"""Copies data from the OpenHTF TestRecord to the MfgEvent proto."""
# TODO(openhtf-team):
# * Missing in proto: set run name from metadata.
# * `part_tags` field on proto is unused
# * `timings` field on proto is unused.
# * Handle arbitrary units as uom_code/uom_suffix.
# Populate non-repeated fields.
mfg_event.dut_serial = record.dut_id
mfg_event.start_time_ms = record.start_time_millis
mfg_event.end_time_ms = record.end_time_millis
mfg_event.tester_name = record.station_id
mfg_event.test_name = record.metadata.get('test_name') or record.station_id
mfg_event.operator_name = record.metadata.get('operator_name', '')
mfg_event.test_version = str(record.metadata.get('test_version', ''))
mfg_event.test_description = record.metadata.get('test_description', '')
mfg_event.test_status = (
test_runs_pb2.MARGINAL_PASS
if record.marginal else test_runs_converter.OUTCOME_MAP[record.outcome])
# Populate part_tags.
mfg_event.part_tags.extend(record.metadata.get('part_tags', []))
# Populate phases.
for phase in record.phases:
mfg_phase = mfg_event.phases.add()
mfg_phase.name = phase.name
mfg_phase.description = phase.codeinfo.sourcecode
mfg_phase.timing.start_time_millis = phase.start_time_millis
mfg_phase.timing.end_time_millis = phase.end_time_millis
# Populate failure codes.
for details in record.outcome_details:
failure_code = mfg_event.failure_codes.add()
failure_code.code = details.code
failure_code.details = details.description
# Populate test logs.
for log_record in record.log_records:
test_log = mfg_event.test_logs.add()
test_log.timestamp_millis = log_record.timestamp_millis
test_log.log_message = log_record.message
test_log.logger_name = log_record.logger_name
test_log.levelno = log_record.level
if log_record.level <= logging.DEBUG:
test_log.level = test_runs_pb2.TestRunLogMessage.DEBUG
elif log_record.level <= logging.INFO:
test_log.level = test_runs_pb2.TestRunLogMessage.INFO
elif log_record.level <= logging.WARNING:
test_log.level = test_runs_pb2.TestRunLogMessage.WARNING
elif log_record.level <= logging.ERROR:
test_log.level = test_runs_pb2.TestRunLogMessage.ERROR
elif log_record.level <= logging.CRITICAL:
test_log.level = test_runs_pb2.TestRunLogMessage.CRITICAL
test_log.log_source = log_record.source
test_log.lineno = log_record.lineno
def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8
def _convert_object_to_json(obj): # pylint: disable=missing-function-docstring
# Since there will be parts of this that may have unicode, either as
# measurement or in the logs, we have to be careful and convert everything
# to unicode, merge, then encode to UTF-8 to put it into the proto.
def unsupported_type_handler(o):
# For bytes, JSONEncoder will fallback to this function to convert to str.
if isinstance(o, bytes):
return o.decode(encoding='utf-8', errors='replace')
elif isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
else:
raise TypeError(repr(o) + ' is not JSON serializable')
json_encoder = json.JSONEncoder(
sort_keys=True,
indent=2,
ensure_ascii=False,
default=unsupported_type_handler)
return json_encoder.encode(obj).encode('utf-8', errors='replace')
def _attach_config(mfg_event, record):
"""Attaches the OpenHTF config file as JSON."""
if 'config' not in record.metadata:
return
attachment = mfg_event.attachment.add()
attachment.name = 'config'
attachment.value_binary = _convert_object_to_json(record.metadata['config'])
attachment.type = test_runs_pb2.TEXT_UTF8
def _attach_argv(mfg_event):
attachment = mfg_event.attachment.add()
attachment.name = 'argv'
argv = [os.path.realpath(sys.argv[0])] + sys.argv[1:]
attachment.value_binary = _convert_object_to_json(argv)
attachment.type = test_runs_pb2.TEXT_UTF8
class UniqueNameMaker(object):
"""Makes unique names for phases, attachments, etc with duplicate names."""
def __init__(self, all_names):
self._counts = collections.Counter(all_names)
self._seen = collections.Counter()
def make_unique(self, name): # pylint: disable=missing-function-docstring
count = self._counts[name]
assert count >= 1, 'Seeing a new name that was not given to the constructor'
if count == 1:
# It's unique, so let's skip extra calculations.
return name
# Count the number of times we've seen this and return this one's index.
self._seen[name] += 1
main, ext = os.path.splitext(name)
return '%s_%d%s' % (main, self._seen[name] - 1, ext)
def phase_uniquizer(all_phases):
"""Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified.
"""
measurement_name_maker = UniqueNameMaker(
itertools.chain.from_iterable(
phase.measurements.keys() for phase in all_phases
if phase.measurements))
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Make measurements unique.
for name, _ in sorted(phase.measurements.items()):
old_name = name
name = measurement_name_maker.make_unique(name)
phase.measurements[old_name].name = name
phase.measurements[name] = phase.measurements.pop(old_name)
# Make attachments unique.
for name, _ in sorted(phase.attachments.items()):
old_name = name
name = attachment_name_maker.make_unique(name)
phase.attachments[name] = phase.attachments.pop(old_name)
return all_phases
def multidim_measurement_to_attachment(name, measurement):
"""Convert a multi-dim measurement to an `openhtf.test_record.Attachment`."""
dimensions = list(measurement.dimensions)
if measurement.units:
dimensions.append(
measurements.Dimension.from_unit_descriptor(measurement.units))
dims = []
for d in dimensions:
if d.suffix is None:
suffix = u''
else:
suffix = d.suffix
dims.append({
'uom_suffix': suffix,
'uom_code': d.code,
'name': d.name,
})
# Refer to the module docstring for the expected schema.
dimensioned_measured_value = measurement.measured_value
value = (
sorted(dimensioned_measured_value.value, key=lambda x: x[0])
if dimensioned_measured_value.is_value_set else None)
outcome_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
data = _convert_object_to_json({
'outcome': outcome_str,
'name': name,
'dimensions': dims,
'value': value,
})
attachment = htf_test_record.Attachment(data, test_runs_pb2.MULTIDIM_JSON) # pytype: disable=wrong-arg-types # gen-stub-imports
return attachment
def convert_multidim_measurements(all_phases):
"""Converts each multidim measurements into attachments for all phases.."""
# Combine actual attachments with attachments we make from multi-dim
# measurements.
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Process multi-dim measurements into unique attachments.
for name, measurement in sorted(phase.measurements.items()):
if measurement.dimensions:
old_name = name
name = attachment_name_maker.make_unique('multidim_%s' % name)
attachment = multidim_measurement_to_attachment(name, measurement)
phase.attachments[name] = attachment
phase.measurements.pop(old_name)
return all_phases
class PhaseCopier(object):
"""Copies measurements and attachments to an MfgEvent."""
def __init__(self,
all_phases,
attachment_cache: Optional[AttachmentCacheT] = None):
self._phases = all_phases
self._using_partial_uploads = attachment_cache is not None
self._attachment_cache = (
attachment_cache if self._using_partial_uploads else {})
def copy_measurements(self, mfg_event):
for phase in self._phases:
for name, measurement in sorted(phase.measurements.items()):
# Multi-dim measurements should already have been removed.
assert measurement.dimensions is None
self._copy_unidimensional_measurement(phase, name, measurement,
mfg_event)
def _copy_unidimensional_measurement(self, phase, name, measurement,
mfg_event):
"""Copy uni-dimensional measurements to the MfgEvent."""
mfg_measurement = mfg_event.measurement.add()
# Copy basic measurement fields.
mfg_measurement.name = name
if measurement.docstring:
mfg_measurement.description = measurement.docstring
mfg_measurement.parameter_tag.append(phase.name)
if (measurement.units and
measurement.units.code in test_runs_converter.UOM_CODE_MAP):
mfg_measurement.unit_code = (
test_runs_converter.UOM_CODE_MAP[measurement.units.code])
# Copy failed measurements as failure_codes. This happens early to include
# unset measurements.
if (measurement.outcome != measurements.Outcome.PASS and
phase.outcome != htf_test_record.PhaseOutcome.SKIP):
failure_code = mfg_event.failure_codes.add()
failure_code.code = name
failure_code.details = '\n'.join(str(v) for v in measurement.validators)
# Copy measurement value.
measured_value = measurement.measured_value
status_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
mfg_measurement.status = test_runs_pb2.Status.Value(status_str)
if not measured_value.is_value_set:
return
value = measured_value.value
if isinstance(value, numbers.Number):
mfg_measurement.numeric_value = float(value)
elif isinstance(value, bytes):
mfg_measurement.text_value = value.decode(errors='replace')
else:
# Coercing to string.
mfg_measurement.text_value = str(value)
# Copy measurement validators.
for validator in measurement.validators:
if isinstance(validator, validators.RangeValidatorBase):
if validator.minimum is not None:
mfg_measurement.numeric_minimum = float(validator.minimum)
if validator.maximum is not None:
mfg_measurement.numeric_maximum = float(validator.maximum)
if validator.marginal_minimum is not None:
mfg_measurement.numeric_marginal_minimum = float(
validator.marginal_minimum)
if validator.marginal_maximum is not None:
mfg_measurement.numeric_marginal_maximum = float(
validator.marginal_maximum)
elif isinstance(validator, validators.RegexMatcher):
mfg_measurement.expected_text = validator.regex
else:
mfg_measurement.description += '\nValidator: ' + str(validator)
def copy_attachments(self, mfg_event: mfg_event_pb2.MfgEvent) -> bool:
"""Copies attachments into the MfgEvent from the configured phases.
If partial uploads are in use (indicated by configuring this class instance
with an Attachments cache), this function will exit early if the total
attachment data size exceeds a reasonable threshold to avoid the 2 GB
serialized proto limit.
Args:
mfg_event: The MfgEvent to copy into.
Returns:
True if all attachments are copied and False if only some attachments
were copied (only possible when partial uploads are being used).
"""
value_copied_attachment_sizes = []
skipped_attachment_names = []
for phase in self._phases:
for name, attachment in sorted(phase.attachments.items()):
size = attachment.size
attachment_cache_key = AttachmentCacheKey(name, size)
if attachment_cache_key in self._attachment_cache:
mfg_event.attachment.append(
self._attachment_cache[attachment_cache_key])
else:
at_least_one_attachment_for_partial_uploads = (
self._using_partial_uploads and value_copied_attachment_sizes)
if at_least_one_attachment_for_partial_uploads and ( | self._copy_attachment(name, attachment.data, attachment.mimetype,
mfg_event)
if skipped_attachment_names:
_LOGGER.info(
'Skipping upload of %r attachments for this cycle. '
'To avoid max proto size issues.', skipped_attachment_names)
return False
return True
def _copy_attachment(self, name, data, mimetype, mfg_event):
"""Copies an attachment to mfg_event."""
attachment = mfg_event.attachment.add()
attachment.name = name
attachment.value_binary = data
if mimetype in test_runs_converter.MIMETYPE_MAP:
attachment.type = test_runs_converter.MIMETYPE_MAP[mimetype]
elif mimetype == test_runs_pb2.MULTIDIM_JSON:
attachment.type = mimetype
else:
attachment.type = test_runs_pb2.BINARY
def test_record_from_mfg_event(mfg_event):
"""Extract the original test_record saved as an attachment on a mfg_event."""
for attachment in mfg_event.attachment:
if attachment.name == TEST_RECORD_ATTACHMENT_NAME:
return json.loads(attachment.value_binary)
raise ValueError('Could not find test record JSON in the given MfgEvent.')
def attachment_to_multidim_measurement(attachment, name=None):
"""Convert an OpenHTF test record attachment to a multi-dim measurement.
This is a best effort attempt to reverse, as some data is lost in converting
from a multidim to an attachment.
Args:
attachment: an `openhtf.test_record.Attachment` from a multi-dim.
name: an optional name for the measurement. If not provided will use the
name included in the attachment.
Returns:
An multi-dim `openhtf.Measurement`.
"""
data = json.loads(attachment.data)
name = name or data.get('name')
# attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code'
attachment_dims = data.get('dimensions', [])
# attachment_value is a list of lists [[t1, x1, y1, f1], [t2, x2, y2, f2]]
attachment_values = data.get('value')
attachment_outcome_str = data.get('outcome')
if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME:
# Fpr backward compatibility with saved data we'll convert integers to str
try:
attachment_outcome_str = test_runs_pb2.Status.Name(
int(attachment_outcome_str))
except ValueError:
attachment_outcome_str = None
# Convert test status outcome str to measurement outcome
if attachment_outcome_str:
outcome, marginal = (
_test_run_status_name_to_measurement_outcome_and_marginal(
attachment_outcome_str))
else:
outcome = None
marginal = False
# convert dimensions into htf.Dimensions
_lazy_load_units_by_code()
dims = []
for d in attachment_dims:
# Try to convert into htf.Dimension including backwards compatibility.
unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)
description = d.get('name', '')
dims.append(measurements.Dimension(description=description, unit=unit))
# Attempt to determine if units are included.
if attachment_values and len(dims) == len(attachment_values[0]):
# units provided
units_ = dims[-1].unit
dimensions = dims[:-1]
else:
units_ = None
dimensions = dims
# created dimensioned_measured_value and populate with values.
measured_value = measurements.DimensionedMeasuredValue(
name=name, num_dimensions=len(dimensions))
for row in attachment_values:
coordinates = tuple(row[:-1])
val = row[-1]
measured_value[coordinates] = val
measurement = measurements.Measurement(
name=name,
units=units_,
dimensions=tuple(dimensions),
measured_value=measured_value,
outcome=outcome,
marginal=marginal)
return measurement | sum(value_copied_attachment_sizes) + size >
MAX_TOTAL_ATTACHMENT_BYTES):
skipped_attachment_names.append(name)
else:
value_copied_attachment_sizes.append(size) | random_line_split |
mfg_event_converter.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert a TestRecord into a mfg_event proto for upload to mfg inspector.
Also includes utilities to handle multi-dim conversion into an attachment
and the reverse.
A decision had to be made on how to handle phases, measurements and attachments
with non-unique names. Approach taken is to append a _X to the names.
"""
import collections
import dataclasses
import datetime
import itertools
import json
import logging
import numbers
import os
import sys
from typing import Mapping, Optional, Tuple
from openhtf.core import measurements
from openhtf.core import test_record as htf_test_record
from openhtf.output.proto import mfg_event_pb2
from openhtf.output.proto import test_runs_converter
from openhtf.output.proto import test_runs_pb2
from openhtf.util import data as htf_data
from openhtf.util import units
from openhtf.util import validators
TEST_RECORD_ATTACHMENT_NAME = 'OpenHTF_record.json'
# To be lazy loaded by _LazyLoadUnitsByCode when needed.
UNITS_BY_CODE = {}
# Map test run Status (proto) name to measurement Outcome (python) enum's and
# the reverse. Note: there is data lost in converting an UNSET/PARTIALLY_SET to
# an ERROR so we can't completely reverse the transformation.
MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME = {
measurements.Outcome.PASS: 'PASS',
measurements.Outcome.FAIL: 'FAIL',
measurements.Outcome.UNSET: 'ERROR',
measurements.Outcome.PARTIALLY_SET: 'ERROR',
}
TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME = {
'PASS': measurements.Outcome.PASS,
'MARGINAL_PASS': measurements.Outcome.PASS,
'FAIL': measurements.Outcome.FAIL,
'ERROR': measurements.Outcome.UNSET
}
_GIBI_BYTE_TO_BASE = 1 << 30
MAX_TOTAL_ATTACHMENT_BYTES = int(1.9 * _GIBI_BYTE_TO_BASE)
_LOGGER = logging.getLogger(__name__)
@dataclasses.dataclass(eq=True, frozen=True) # Ensures __hash__ is generated.
class AttachmentCacheKey:
name: str
size: int
AttachmentCacheT = Mapping[AttachmentCacheKey, mfg_event_pb2.EventAttachment]
def _measurement_outcome_to_test_run_status_name(outcome: measurements.Outcome,
marginal: bool) -> str:
"""Returns the test run status name given the outcome and marginal args."""
return ('MARGINAL_PASS'
if marginal else MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME[outcome])
def _test_run_status_name_to_measurement_outcome_and_marginal(
name: str) -> Tuple[measurements.Outcome, bool]:
"""Returns the outcome and marginal args given the test run status name."""
return TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME[name], 'MARGINAL' in name
def _lazy_load_units_by_code():
"""Populate dict of units by code iff UNITS_BY_CODE is empty."""
if UNITS_BY_CODE:
# already populated
|
for unit in units.UNITS_BY_NAME.values():
UNITS_BY_CODE[unit.code] = unit
def mfg_event_from_test_record(
record: htf_test_record.TestRecord,
attachment_cache: Optional[AttachmentCacheT] = None,
) -> mfg_event_pb2.MfgEvent:
"""Convert an OpenHTF TestRecord to an MfgEvent proto.
Most fields are copied over directly and some are pulled out of metadata
(listed below). Multi-dimensional measurements are stored only in the JSON
dump of the record.
Important Note: This function mutates the test_record so any output callbacks
called after this callback will operate on the mutated record.
Metadata fields:
test_name: The name field from the test's TestOptions.
config: The OpenHTF config, as a dictionary.
assembly_events: List of AssemblyEvent protos.
(see proto/assembly_event.proto).
operator_name: Name of the test operator.
Args:
record: An OpenHTF TestRecord.
attachment_cache: Provides a lookup to get EventAttachment protos for
already uploaded (or converted) attachments.
Returns:
An MfgEvent proto representing the given test record.
"""
mfg_event = mfg_event_pb2.MfgEvent()
_populate_basic_data(mfg_event, record)
_attach_record_as_json(mfg_event, record)
_attach_argv(mfg_event)
_attach_config(mfg_event, record)
# Only include assembly events if the test passed.
if ('assembly_events' in record.metadata and
mfg_event.test_status == test_runs_pb2.PASS):
for assembly_event in record.metadata['assembly_events']:
mfg_event.assembly_events.add().CopyFrom(assembly_event)
convert_multidim_measurements(record.phases)
phase_copier = PhaseCopier(phase_uniquizer(record.phases), attachment_cache)
phase_copier.copy_measurements(mfg_event)
if not phase_copier.copy_attachments(mfg_event):
mfg_event.test_run_type = mfg_event_pb2.TEST_RUN_PARTIAL
return mfg_event
def _populate_basic_data(mfg_event: mfg_event_pb2.MfgEvent,
record: htf_test_record.TestRecord) -> None:
"""Copies data from the OpenHTF TestRecord to the MfgEvent proto."""
# TODO(openhtf-team):
# * Missing in proto: set run name from metadata.
# * `part_tags` field on proto is unused
# * `timings` field on proto is unused.
# * Handle arbitrary units as uom_code/uom_suffix.
# Populate non-repeated fields.
mfg_event.dut_serial = record.dut_id
mfg_event.start_time_ms = record.start_time_millis
mfg_event.end_time_ms = record.end_time_millis
mfg_event.tester_name = record.station_id
mfg_event.test_name = record.metadata.get('test_name') or record.station_id
mfg_event.operator_name = record.metadata.get('operator_name', '')
mfg_event.test_version = str(record.metadata.get('test_version', ''))
mfg_event.test_description = record.metadata.get('test_description', '')
mfg_event.test_status = (
test_runs_pb2.MARGINAL_PASS
if record.marginal else test_runs_converter.OUTCOME_MAP[record.outcome])
# Populate part_tags.
mfg_event.part_tags.extend(record.metadata.get('part_tags', []))
# Populate phases.
for phase in record.phases:
mfg_phase = mfg_event.phases.add()
mfg_phase.name = phase.name
mfg_phase.description = phase.codeinfo.sourcecode
mfg_phase.timing.start_time_millis = phase.start_time_millis
mfg_phase.timing.end_time_millis = phase.end_time_millis
# Populate failure codes.
for details in record.outcome_details:
failure_code = mfg_event.failure_codes.add()
failure_code.code = details.code
failure_code.details = details.description
# Populate test logs.
for log_record in record.log_records:
test_log = mfg_event.test_logs.add()
test_log.timestamp_millis = log_record.timestamp_millis
test_log.log_message = log_record.message
test_log.logger_name = log_record.logger_name
test_log.levelno = log_record.level
if log_record.level <= logging.DEBUG:
test_log.level = test_runs_pb2.TestRunLogMessage.DEBUG
elif log_record.level <= logging.INFO:
test_log.level = test_runs_pb2.TestRunLogMessage.INFO
elif log_record.level <= logging.WARNING:
test_log.level = test_runs_pb2.TestRunLogMessage.WARNING
elif log_record.level <= logging.ERROR:
test_log.level = test_runs_pb2.TestRunLogMessage.ERROR
elif log_record.level <= logging.CRITICAL:
test_log.level = test_runs_pb2.TestRunLogMessage.CRITICAL
test_log.log_source = log_record.source
test_log.lineno = log_record.lineno
def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8
def _convert_object_to_json(obj): # pylint: disable=missing-function-docstring
# Since there will be parts of this that may have unicode, either as
# measurement or in the logs, we have to be careful and convert everything
# to unicode, merge, then encode to UTF-8 to put it into the proto.
def unsupported_type_handler(o):
# For bytes, JSONEncoder will fallback to this function to convert to str.
if isinstance(o, bytes):
return o.decode(encoding='utf-8', errors='replace')
elif isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
else:
raise TypeError(repr(o) + ' is not JSON serializable')
json_encoder = json.JSONEncoder(
sort_keys=True,
indent=2,
ensure_ascii=False,
default=unsupported_type_handler)
return json_encoder.encode(obj).encode('utf-8', errors='replace')
def _attach_config(mfg_event, record):
"""Attaches the OpenHTF config file as JSON."""
if 'config' not in record.metadata:
return
attachment = mfg_event.attachment.add()
attachment.name = 'config'
attachment.value_binary = _convert_object_to_json(record.metadata['config'])
attachment.type = test_runs_pb2.TEXT_UTF8
def _attach_argv(mfg_event):
attachment = mfg_event.attachment.add()
attachment.name = 'argv'
argv = [os.path.realpath(sys.argv[0])] + sys.argv[1:]
attachment.value_binary = _convert_object_to_json(argv)
attachment.type = test_runs_pb2.TEXT_UTF8
class UniqueNameMaker(object):
"""Makes unique names for phases, attachments, etc with duplicate names."""
def __init__(self, all_names):
self._counts = collections.Counter(all_names)
self._seen = collections.Counter()
def make_unique(self, name): # pylint: disable=missing-function-docstring
count = self._counts[name]
assert count >= 1, 'Seeing a new name that was not given to the constructor'
if count == 1:
# It's unique, so let's skip extra calculations.
return name
# Count the number of times we've seen this and return this one's index.
self._seen[name] += 1
main, ext = os.path.splitext(name)
return '%s_%d%s' % (main, self._seen[name] - 1, ext)
def phase_uniquizer(all_phases):
"""Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified.
"""
measurement_name_maker = UniqueNameMaker(
itertools.chain.from_iterable(
phase.measurements.keys() for phase in all_phases
if phase.measurements))
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Make measurements unique.
for name, _ in sorted(phase.measurements.items()):
old_name = name
name = measurement_name_maker.make_unique(name)
phase.measurements[old_name].name = name
phase.measurements[name] = phase.measurements.pop(old_name)
# Make attachments unique.
for name, _ in sorted(phase.attachments.items()):
old_name = name
name = attachment_name_maker.make_unique(name)
phase.attachments[name] = phase.attachments.pop(old_name)
return all_phases
def multidim_measurement_to_attachment(name, measurement):
"""Convert a multi-dim measurement to an `openhtf.test_record.Attachment`."""
dimensions = list(measurement.dimensions)
if measurement.units:
dimensions.append(
measurements.Dimension.from_unit_descriptor(measurement.units))
dims = []
for d in dimensions:
if d.suffix is None:
suffix = u''
else:
suffix = d.suffix
dims.append({
'uom_suffix': suffix,
'uom_code': d.code,
'name': d.name,
})
# Refer to the module docstring for the expected schema.
dimensioned_measured_value = measurement.measured_value
value = (
sorted(dimensioned_measured_value.value, key=lambda x: x[0])
if dimensioned_measured_value.is_value_set else None)
outcome_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
data = _convert_object_to_json({
'outcome': outcome_str,
'name': name,
'dimensions': dims,
'value': value,
})
attachment = htf_test_record.Attachment(data, test_runs_pb2.MULTIDIM_JSON) # pytype: disable=wrong-arg-types # gen-stub-imports
return attachment
def convert_multidim_measurements(all_phases):
"""Converts each multidim measurements into attachments for all phases.."""
# Combine actual attachments with attachments we make from multi-dim
# measurements.
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Process multi-dim measurements into unique attachments.
for name, measurement in sorted(phase.measurements.items()):
if measurement.dimensions:
old_name = name
name = attachment_name_maker.make_unique('multidim_%s' % name)
attachment = multidim_measurement_to_attachment(name, measurement)
phase.attachments[name] = attachment
phase.measurements.pop(old_name)
return all_phases
class PhaseCopier(object):
"""Copies measurements and attachments to an MfgEvent."""
def __init__(self,
all_phases,
attachment_cache: Optional[AttachmentCacheT] = None):
self._phases = all_phases
self._using_partial_uploads = attachment_cache is not None
self._attachment_cache = (
attachment_cache if self._using_partial_uploads else {})
def copy_measurements(self, mfg_event):
for phase in self._phases:
for name, measurement in sorted(phase.measurements.items()):
# Multi-dim measurements should already have been removed.
assert measurement.dimensions is None
self._copy_unidimensional_measurement(phase, name, measurement,
mfg_event)
def _copy_unidimensional_measurement(self, phase, name, measurement,
mfg_event):
"""Copy uni-dimensional measurements to the MfgEvent."""
mfg_measurement = mfg_event.measurement.add()
# Copy basic measurement fields.
mfg_measurement.name = name
if measurement.docstring:
mfg_measurement.description = measurement.docstring
mfg_measurement.parameter_tag.append(phase.name)
if (measurement.units and
measurement.units.code in test_runs_converter.UOM_CODE_MAP):
mfg_measurement.unit_code = (
test_runs_converter.UOM_CODE_MAP[measurement.units.code])
# Copy failed measurements as failure_codes. This happens early to include
# unset measurements.
if (measurement.outcome != measurements.Outcome.PASS and
phase.outcome != htf_test_record.PhaseOutcome.SKIP):
failure_code = mfg_event.failure_codes.add()
failure_code.code = name
failure_code.details = '\n'.join(str(v) for v in measurement.validators)
# Copy measurement value.
measured_value = measurement.measured_value
status_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
mfg_measurement.status = test_runs_pb2.Status.Value(status_str)
if not measured_value.is_value_set:
return
value = measured_value.value
if isinstance(value, numbers.Number):
mfg_measurement.numeric_value = float(value)
elif isinstance(value, bytes):
mfg_measurement.text_value = value.decode(errors='replace')
else:
# Coercing to string.
mfg_measurement.text_value = str(value)
# Copy measurement validators.
for validator in measurement.validators:
if isinstance(validator, validators.RangeValidatorBase):
if validator.minimum is not None:
mfg_measurement.numeric_minimum = float(validator.minimum)
if validator.maximum is not None:
mfg_measurement.numeric_maximum = float(validator.maximum)
if validator.marginal_minimum is not None:
mfg_measurement.numeric_marginal_minimum = float(
validator.marginal_minimum)
if validator.marginal_maximum is not None:
mfg_measurement.numeric_marginal_maximum = float(
validator.marginal_maximum)
elif isinstance(validator, validators.RegexMatcher):
mfg_measurement.expected_text = validator.regex
else:
mfg_measurement.description += '\nValidator: ' + str(validator)
def copy_attachments(self, mfg_event: mfg_event_pb2.MfgEvent) -> bool:
"""Copies attachments into the MfgEvent from the configured phases.
If partial uploads are in use (indicated by configuring this class instance
with an Attachments cache), this function will exit early if the total
attachment data size exceeds a reasonable threshold to avoid the 2 GB
serialized proto limit.
Args:
mfg_event: The MfgEvent to copy into.
Returns:
True if all attachments are copied and False if only some attachments
were copied (only possible when partial uploads are being used).
"""
value_copied_attachment_sizes = []
skipped_attachment_names = []
for phase in self._phases:
for name, attachment in sorted(phase.attachments.items()):
size = attachment.size
attachment_cache_key = AttachmentCacheKey(name, size)
if attachment_cache_key in self._attachment_cache:
mfg_event.attachment.append(
self._attachment_cache[attachment_cache_key])
else:
at_least_one_attachment_for_partial_uploads = (
self._using_partial_uploads and value_copied_attachment_sizes)
if at_least_one_attachment_for_partial_uploads and (
sum(value_copied_attachment_sizes) + size >
MAX_TOTAL_ATTACHMENT_BYTES):
skipped_attachment_names.append(name)
else:
value_copied_attachment_sizes.append(size)
self._copy_attachment(name, attachment.data, attachment.mimetype,
mfg_event)
if skipped_attachment_names:
_LOGGER.info(
'Skipping upload of %r attachments for this cycle. '
'To avoid max proto size issues.', skipped_attachment_names)
return False
return True
def _copy_attachment(self, name, data, mimetype, mfg_event):
"""Copies an attachment to mfg_event."""
attachment = mfg_event.attachment.add()
attachment.name = name
attachment.value_binary = data
if mimetype in test_runs_converter.MIMETYPE_MAP:
attachment.type = test_runs_converter.MIMETYPE_MAP[mimetype]
elif mimetype == test_runs_pb2.MULTIDIM_JSON:
attachment.type = mimetype
else:
attachment.type = test_runs_pb2.BINARY
def test_record_from_mfg_event(mfg_event):
"""Extract the original test_record saved as an attachment on a mfg_event."""
for attachment in mfg_event.attachment:
if attachment.name == TEST_RECORD_ATTACHMENT_NAME:
return json.loads(attachment.value_binary)
raise ValueError('Could not find test record JSON in the given MfgEvent.')
def attachment_to_multidim_measurement(attachment, name=None):
"""Convert an OpenHTF test record attachment to a multi-dim measurement.
This is a best effort attempt to reverse, as some data is lost in converting
from a multidim to an attachment.
Args:
attachment: an `openhtf.test_record.Attachment` from a multi-dim.
name: an optional name for the measurement. If not provided will use the
name included in the attachment.
Returns:
An multi-dim `openhtf.Measurement`.
"""
data = json.loads(attachment.data)
name = name or data.get('name')
# attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code'
attachment_dims = data.get('dimensions', [])
# attachment_value is a list of lists [[t1, x1, y1, f1], [t2, x2, y2, f2]]
attachment_values = data.get('value')
attachment_outcome_str = data.get('outcome')
if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME:
# Fpr backward compatibility with saved data we'll convert integers to str
try:
attachment_outcome_str = test_runs_pb2.Status.Name(
int(attachment_outcome_str))
except ValueError:
attachment_outcome_str = None
# Convert test status outcome str to measurement outcome
if attachment_outcome_str:
outcome, marginal = (
_test_run_status_name_to_measurement_outcome_and_marginal(
attachment_outcome_str))
else:
outcome = None
marginal = False
# convert dimensions into htf.Dimensions
_lazy_load_units_by_code()
dims = []
for d in attachment_dims:
# Try to convert into htf.Dimension including backwards compatibility.
unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)
description = d.get('name', '')
dims.append(measurements.Dimension(description=description, unit=unit))
# Attempt to determine if units are included.
if attachment_values and len(dims) == len(attachment_values[0]):
# units provided
units_ = dims[-1].unit
dimensions = dims[:-1]
else:
units_ = None
dimensions = dims
# created dimensioned_measured_value and populate with values.
measured_value = measurements.DimensionedMeasuredValue(
name=name, num_dimensions=len(dimensions))
for row in attachment_values:
coordinates = tuple(row[:-1])
val = row[-1]
measured_value[coordinates] = val
measurement = measurements.Measurement(
name=name,
units=units_,
dimensions=tuple(dimensions),
measured_value=measured_value,
outcome=outcome,
marginal=marginal)
return measurement
| return | conditional_block |
mfg_event_converter.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert a TestRecord into a mfg_event proto for upload to mfg inspector.
Also includes utilities to handle multi-dim conversion into an attachment
and the reverse.
A decision had to be made on how to handle phases, measurements and attachments
with non-unique names. Approach taken is to append a _X to the names.
"""
import collections
import dataclasses
import datetime
import itertools
import json
import logging
import numbers
import os
import sys
from typing import Mapping, Optional, Tuple
from openhtf.core import measurements
from openhtf.core import test_record as htf_test_record
from openhtf.output.proto import mfg_event_pb2
from openhtf.output.proto import test_runs_converter
from openhtf.output.proto import test_runs_pb2
from openhtf.util import data as htf_data
from openhtf.util import units
from openhtf.util import validators
TEST_RECORD_ATTACHMENT_NAME = 'OpenHTF_record.json'
# To be lazy loaded by _LazyLoadUnitsByCode when needed.
UNITS_BY_CODE = {}
# Map test run Status (proto) name to measurement Outcome (python) enum's and
# the reverse. Note: there is data lost in converting an UNSET/PARTIALLY_SET to
# an ERROR so we can't completely reverse the transformation.
MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME = {
measurements.Outcome.PASS: 'PASS',
measurements.Outcome.FAIL: 'FAIL',
measurements.Outcome.UNSET: 'ERROR',
measurements.Outcome.PARTIALLY_SET: 'ERROR',
}
TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME = {
'PASS': measurements.Outcome.PASS,
'MARGINAL_PASS': measurements.Outcome.PASS,
'FAIL': measurements.Outcome.FAIL,
'ERROR': measurements.Outcome.UNSET
}
_GIBI_BYTE_TO_BASE = 1 << 30
MAX_TOTAL_ATTACHMENT_BYTES = int(1.9 * _GIBI_BYTE_TO_BASE)
_LOGGER = logging.getLogger(__name__)
@dataclasses.dataclass(eq=True, frozen=True) # Ensures __hash__ is generated.
class AttachmentCacheKey:
name: str
size: int
AttachmentCacheT = Mapping[AttachmentCacheKey, mfg_event_pb2.EventAttachment]
def _measurement_outcome_to_test_run_status_name(outcome: measurements.Outcome,
marginal: bool) -> str:
"""Returns the test run status name given the outcome and marginal args."""
return ('MARGINAL_PASS'
if marginal else MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME[outcome])
def _test_run_status_name_to_measurement_outcome_and_marginal(
name: str) -> Tuple[measurements.Outcome, bool]:
"""Returns the outcome and marginal args given the test run status name."""
return TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME[name], 'MARGINAL' in name
def _lazy_load_units_by_code():
"""Populate dict of units by code iff UNITS_BY_CODE is empty."""
if UNITS_BY_CODE:
# already populated
return
for unit in units.UNITS_BY_NAME.values():
UNITS_BY_CODE[unit.code] = unit
def mfg_event_from_test_record(
record: htf_test_record.TestRecord,
attachment_cache: Optional[AttachmentCacheT] = None,
) -> mfg_event_pb2.MfgEvent:
"""Convert an OpenHTF TestRecord to an MfgEvent proto.
Most fields are copied over directly and some are pulled out of metadata
(listed below). Multi-dimensional measurements are stored only in the JSON
dump of the record.
Important Note: This function mutates the test_record so any output callbacks
called after this callback will operate on the mutated record.
Metadata fields:
test_name: The name field from the test's TestOptions.
config: The OpenHTF config, as a dictionary.
assembly_events: List of AssemblyEvent protos.
(see proto/assembly_event.proto).
operator_name: Name of the test operator.
Args:
record: An OpenHTF TestRecord.
attachment_cache: Provides a lookup to get EventAttachment protos for
already uploaded (or converted) attachments.
Returns:
An MfgEvent proto representing the given test record.
"""
mfg_event = mfg_event_pb2.MfgEvent()
_populate_basic_data(mfg_event, record)
_attach_record_as_json(mfg_event, record)
_attach_argv(mfg_event)
_attach_config(mfg_event, record)
# Only include assembly events if the test passed.
if ('assembly_events' in record.metadata and
mfg_event.test_status == test_runs_pb2.PASS):
for assembly_event in record.metadata['assembly_events']:
mfg_event.assembly_events.add().CopyFrom(assembly_event)
convert_multidim_measurements(record.phases)
phase_copier = PhaseCopier(phase_uniquizer(record.phases), attachment_cache)
phase_copier.copy_measurements(mfg_event)
if not phase_copier.copy_attachments(mfg_event):
mfg_event.test_run_type = mfg_event_pb2.TEST_RUN_PARTIAL
return mfg_event
def _populate_basic_data(mfg_event: mfg_event_pb2.MfgEvent,
record: htf_test_record.TestRecord) -> None:
"""Copies data from the OpenHTF TestRecord to the MfgEvent proto."""
# TODO(openhtf-team):
# * Missing in proto: set run name from metadata.
# * `part_tags` field on proto is unused
# * `timings` field on proto is unused.
# * Handle arbitrary units as uom_code/uom_suffix.
# Populate non-repeated fields.
mfg_event.dut_serial = record.dut_id
mfg_event.start_time_ms = record.start_time_millis
mfg_event.end_time_ms = record.end_time_millis
mfg_event.tester_name = record.station_id
mfg_event.test_name = record.metadata.get('test_name') or record.station_id
mfg_event.operator_name = record.metadata.get('operator_name', '')
mfg_event.test_version = str(record.metadata.get('test_version', ''))
mfg_event.test_description = record.metadata.get('test_description', '')
mfg_event.test_status = (
test_runs_pb2.MARGINAL_PASS
if record.marginal else test_runs_converter.OUTCOME_MAP[record.outcome])
# Populate part_tags.
mfg_event.part_tags.extend(record.metadata.get('part_tags', []))
# Populate phases.
for phase in record.phases:
mfg_phase = mfg_event.phases.add()
mfg_phase.name = phase.name
mfg_phase.description = phase.codeinfo.sourcecode
mfg_phase.timing.start_time_millis = phase.start_time_millis
mfg_phase.timing.end_time_millis = phase.end_time_millis
# Populate failure codes.
for details in record.outcome_details:
failure_code = mfg_event.failure_codes.add()
failure_code.code = details.code
failure_code.details = details.description
# Populate test logs.
for log_record in record.log_records:
test_log = mfg_event.test_logs.add()
test_log.timestamp_millis = log_record.timestamp_millis
test_log.log_message = log_record.message
test_log.logger_name = log_record.logger_name
test_log.levelno = log_record.level
if log_record.level <= logging.DEBUG:
test_log.level = test_runs_pb2.TestRunLogMessage.DEBUG
elif log_record.level <= logging.INFO:
test_log.level = test_runs_pb2.TestRunLogMessage.INFO
elif log_record.level <= logging.WARNING:
test_log.level = test_runs_pb2.TestRunLogMessage.WARNING
elif log_record.level <= logging.ERROR:
test_log.level = test_runs_pb2.TestRunLogMessage.ERROR
elif log_record.level <= logging.CRITICAL:
test_log.level = test_runs_pb2.TestRunLogMessage.CRITICAL
test_log.log_source = log_record.source
test_log.lineno = log_record.lineno
def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8
def _convert_object_to_json(obj): # pylint: disable=missing-function-docstring
# Since there will be parts of this that may have unicode, either as
# measurement or in the logs, we have to be careful and convert everything
# to unicode, merge, then encode to UTF-8 to put it into the proto.
def unsupported_type_handler(o):
# For bytes, JSONEncoder will fallback to this function to convert to str.
|
json_encoder = json.JSONEncoder(
sort_keys=True,
indent=2,
ensure_ascii=False,
default=unsupported_type_handler)
return json_encoder.encode(obj).encode('utf-8', errors='replace')
def _attach_config(mfg_event, record):
"""Attaches the OpenHTF config file as JSON."""
if 'config' not in record.metadata:
return
attachment = mfg_event.attachment.add()
attachment.name = 'config'
attachment.value_binary = _convert_object_to_json(record.metadata['config'])
attachment.type = test_runs_pb2.TEXT_UTF8
def _attach_argv(mfg_event):
attachment = mfg_event.attachment.add()
attachment.name = 'argv'
argv = [os.path.realpath(sys.argv[0])] + sys.argv[1:]
attachment.value_binary = _convert_object_to_json(argv)
attachment.type = test_runs_pb2.TEXT_UTF8
class UniqueNameMaker(object):
"""Makes unique names for phases, attachments, etc with duplicate names."""
def __init__(self, all_names):
self._counts = collections.Counter(all_names)
self._seen = collections.Counter()
def make_unique(self, name): # pylint: disable=missing-function-docstring
count = self._counts[name]
assert count >= 1, 'Seeing a new name that was not given to the constructor'
if count == 1:
# It's unique, so let's skip extra calculations.
return name
# Count the number of times we've seen this and return this one's index.
self._seen[name] += 1
main, ext = os.path.splitext(name)
return '%s_%d%s' % (main, self._seen[name] - 1, ext)
def phase_uniquizer(all_phases):
"""Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified.
"""
measurement_name_maker = UniqueNameMaker(
itertools.chain.from_iterable(
phase.measurements.keys() for phase in all_phases
if phase.measurements))
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Make measurements unique.
for name, _ in sorted(phase.measurements.items()):
old_name = name
name = measurement_name_maker.make_unique(name)
phase.measurements[old_name].name = name
phase.measurements[name] = phase.measurements.pop(old_name)
# Make attachments unique.
for name, _ in sorted(phase.attachments.items()):
old_name = name
name = attachment_name_maker.make_unique(name)
phase.attachments[name] = phase.attachments.pop(old_name)
return all_phases
def multidim_measurement_to_attachment(name, measurement):
"""Convert a multi-dim measurement to an `openhtf.test_record.Attachment`."""
dimensions = list(measurement.dimensions)
if measurement.units:
dimensions.append(
measurements.Dimension.from_unit_descriptor(measurement.units))
dims = []
for d in dimensions:
if d.suffix is None:
suffix = u''
else:
suffix = d.suffix
dims.append({
'uom_suffix': suffix,
'uom_code': d.code,
'name': d.name,
})
# Refer to the module docstring for the expected schema.
dimensioned_measured_value = measurement.measured_value
value = (
sorted(dimensioned_measured_value.value, key=lambda x: x[0])
if dimensioned_measured_value.is_value_set else None)
outcome_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
data = _convert_object_to_json({
'outcome': outcome_str,
'name': name,
'dimensions': dims,
'value': value,
})
attachment = htf_test_record.Attachment(data, test_runs_pb2.MULTIDIM_JSON) # pytype: disable=wrong-arg-types # gen-stub-imports
return attachment
def convert_multidim_measurements(all_phases):
"""Converts each multidim measurements into attachments for all phases.."""
# Combine actual attachments with attachments we make from multi-dim
# measurements.
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Process multi-dim measurements into unique attachments.
for name, measurement in sorted(phase.measurements.items()):
if measurement.dimensions:
old_name = name
name = attachment_name_maker.make_unique('multidim_%s' % name)
attachment = multidim_measurement_to_attachment(name, measurement)
phase.attachments[name] = attachment
phase.measurements.pop(old_name)
return all_phases
class PhaseCopier(object):
"""Copies measurements and attachments to an MfgEvent."""
def __init__(self,
all_phases,
attachment_cache: Optional[AttachmentCacheT] = None):
self._phases = all_phases
self._using_partial_uploads = attachment_cache is not None
self._attachment_cache = (
attachment_cache if self._using_partial_uploads else {})
def copy_measurements(self, mfg_event):
for phase in self._phases:
for name, measurement in sorted(phase.measurements.items()):
# Multi-dim measurements should already have been removed.
assert measurement.dimensions is None
self._copy_unidimensional_measurement(phase, name, measurement,
mfg_event)
def _copy_unidimensional_measurement(self, phase, name, measurement,
mfg_event):
"""Copy uni-dimensional measurements to the MfgEvent."""
mfg_measurement = mfg_event.measurement.add()
# Copy basic measurement fields.
mfg_measurement.name = name
if measurement.docstring:
mfg_measurement.description = measurement.docstring
mfg_measurement.parameter_tag.append(phase.name)
if (measurement.units and
measurement.units.code in test_runs_converter.UOM_CODE_MAP):
mfg_measurement.unit_code = (
test_runs_converter.UOM_CODE_MAP[measurement.units.code])
# Copy failed measurements as failure_codes. This happens early to include
# unset measurements.
if (measurement.outcome != measurements.Outcome.PASS and
phase.outcome != htf_test_record.PhaseOutcome.SKIP):
failure_code = mfg_event.failure_codes.add()
failure_code.code = name
failure_code.details = '\n'.join(str(v) for v in measurement.validators)
# Copy measurement value.
measured_value = measurement.measured_value
status_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
mfg_measurement.status = test_runs_pb2.Status.Value(status_str)
if not measured_value.is_value_set:
return
value = measured_value.value
if isinstance(value, numbers.Number):
mfg_measurement.numeric_value = float(value)
elif isinstance(value, bytes):
mfg_measurement.text_value = value.decode(errors='replace')
else:
# Coercing to string.
mfg_measurement.text_value = str(value)
# Copy measurement validators.
for validator in measurement.validators:
if isinstance(validator, validators.RangeValidatorBase):
if validator.minimum is not None:
mfg_measurement.numeric_minimum = float(validator.minimum)
if validator.maximum is not None:
mfg_measurement.numeric_maximum = float(validator.maximum)
if validator.marginal_minimum is not None:
mfg_measurement.numeric_marginal_minimum = float(
validator.marginal_minimum)
if validator.marginal_maximum is not None:
mfg_measurement.numeric_marginal_maximum = float(
validator.marginal_maximum)
elif isinstance(validator, validators.RegexMatcher):
mfg_measurement.expected_text = validator.regex
else:
mfg_measurement.description += '\nValidator: ' + str(validator)
def copy_attachments(self, mfg_event: mfg_event_pb2.MfgEvent) -> bool:
"""Copies attachments into the MfgEvent from the configured phases.
If partial uploads are in use (indicated by configuring this class instance
with an Attachments cache), this function will exit early if the total
attachment data size exceeds a reasonable threshold to avoid the 2 GB
serialized proto limit.
Args:
mfg_event: The MfgEvent to copy into.
Returns:
True if all attachments are copied and False if only some attachments
were copied (only possible when partial uploads are being used).
"""
value_copied_attachment_sizes = []
skipped_attachment_names = []
for phase in self._phases:
for name, attachment in sorted(phase.attachments.items()):
size = attachment.size
attachment_cache_key = AttachmentCacheKey(name, size)
if attachment_cache_key in self._attachment_cache:
mfg_event.attachment.append(
self._attachment_cache[attachment_cache_key])
else:
at_least_one_attachment_for_partial_uploads = (
self._using_partial_uploads and value_copied_attachment_sizes)
if at_least_one_attachment_for_partial_uploads and (
sum(value_copied_attachment_sizes) + size >
MAX_TOTAL_ATTACHMENT_BYTES):
skipped_attachment_names.append(name)
else:
value_copied_attachment_sizes.append(size)
self._copy_attachment(name, attachment.data, attachment.mimetype,
mfg_event)
if skipped_attachment_names:
_LOGGER.info(
'Skipping upload of %r attachments for this cycle. '
'To avoid max proto size issues.', skipped_attachment_names)
return False
return True
def _copy_attachment(self, name, data, mimetype, mfg_event):
"""Copies an attachment to mfg_event."""
attachment = mfg_event.attachment.add()
attachment.name = name
attachment.value_binary = data
if mimetype in test_runs_converter.MIMETYPE_MAP:
attachment.type = test_runs_converter.MIMETYPE_MAP[mimetype]
elif mimetype == test_runs_pb2.MULTIDIM_JSON:
attachment.type = mimetype
else:
attachment.type = test_runs_pb2.BINARY
def test_record_from_mfg_event(mfg_event):
"""Extract the original test_record saved as an attachment on a mfg_event."""
for attachment in mfg_event.attachment:
if attachment.name == TEST_RECORD_ATTACHMENT_NAME:
return json.loads(attachment.value_binary)
raise ValueError('Could not find test record JSON in the given MfgEvent.')
def attachment_to_multidim_measurement(attachment, name=None):
"""Convert an OpenHTF test record attachment to a multi-dim measurement.
This is a best effort attempt to reverse, as some data is lost in converting
from a multidim to an attachment.
Args:
attachment: an `openhtf.test_record.Attachment` from a multi-dim.
name: an optional name for the measurement. If not provided will use the
name included in the attachment.
Returns:
An multi-dim `openhtf.Measurement`.
"""
data = json.loads(attachment.data)
name = name or data.get('name')
# attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code'
attachment_dims = data.get('dimensions', [])
# attachment_value is a list of lists [[t1, x1, y1, f1], [t2, x2, y2, f2]]
attachment_values = data.get('value')
attachment_outcome_str = data.get('outcome')
if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME:
# Fpr backward compatibility with saved data we'll convert integers to str
try:
attachment_outcome_str = test_runs_pb2.Status.Name(
int(attachment_outcome_str))
except ValueError:
attachment_outcome_str = None
# Convert test status outcome str to measurement outcome
if attachment_outcome_str:
outcome, marginal = (
_test_run_status_name_to_measurement_outcome_and_marginal(
attachment_outcome_str))
else:
outcome = None
marginal = False
# convert dimensions into htf.Dimensions
_lazy_load_units_by_code()
dims = []
for d in attachment_dims:
# Try to convert into htf.Dimension including backwards compatibility.
unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)
description = d.get('name', '')
dims.append(measurements.Dimension(description=description, unit=unit))
# Attempt to determine if units are included.
if attachment_values and len(dims) == len(attachment_values[0]):
# units provided
units_ = dims[-1].unit
dimensions = dims[:-1]
else:
units_ = None
dimensions = dims
# created dimensioned_measured_value and populate with values.
measured_value = measurements.DimensionedMeasuredValue(
name=name, num_dimensions=len(dimensions))
for row in attachment_values:
coordinates = tuple(row[:-1])
val = row[-1]
measured_value[coordinates] = val
measurement = measurements.Measurement(
name=name,
units=units_,
dimensions=tuple(dimensions),
measured_value=measured_value,
outcome=outcome,
marginal=marginal)
return measurement
| if isinstance(o, bytes):
return o.decode(encoding='utf-8', errors='replace')
elif isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
else:
raise TypeError(repr(o) + ' is not JSON serializable') | identifier_body |
fit_nixing.py | #coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
import matplotlib.pyplot as plt
import cPickle
from easydict import EasyDict as edict
from matplotlib.pyplot import MultipleLocator
from bfs_group import bfs_clustering
import cv2
import glob
from random import random as rand
from PIL import Image, ImageDraw, ImageFont
import json
import os
config = edict()
config.minimum_points = 50
config.max_group = 3
config.max_neighbor_distance = 10
config.resize_factor = 0.5
color_map = {'White':'白色', 'Silver_gray': '银灰色', 'Black': '黑色', 'Red': '红色', 'Brown': '棕色', 'Blue': '蓝色',
'Yellow': '黄色', 'Purple': '紫色', 'Green': '绿色', 'Pink': '粉色', 'Ching': '青色', 'Golden': '金色', 'other': '其他'}
letter = [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'J', u'K', u'L', u'M',
u'N', u'P', u'Q', u'R', u'S', u'T', u'U', u'V', u'W', u'X', u'Y', u'Z']
province = [u'京', u'津', u'沪', u'渝', u'黑', u'吉', u'辽', u'冀', u'晋', u'鲁', u'豫', u'陕', u'甘', u'青', u'苏', u'浙',
u'皖', u'鄂', u'湘', u'闽', u'赣', u'川', u'贵', u'云', u'粤', u'琼', u'蒙', u'宁', u'新', u'桂', u'藏']
type_map = {'BigTruck': '货车', 'Bus': '公交车', 'Lorry': '货车', 'MPV': '轿车', 'MiniVan': '轿车', 'MiniBus': '公交车',
'SUV': '轿车', 'Scooter': '轿车', 'Sedan_Car': '轿车', 'Special_vehicle': '其他', 'Three_Wheeled_Truck':'其他', 'other': '其他', 'Minibus': '公交车'}
def draw_box_v2(img, box, alphaReserve=0.8, color=None):
color = (rand() * 255, rand() * 255, rand() * 255) if color is None else color
h,w,_ = img.shape
x1 = max(0, int(float(box[0])))
y1 = max(0, int(float(box[1])))
x2 = min(w-1, int(float(box[2])))
y2 = min(h-1, int(float(box[3])))
B, G, R = color
img[y1:y2, x1:x2, 0] = img[y1:y2, x1:x2, 0] * alphaReserve + B * (1 - alphaReserve)
img[y1:y2, x1:x2, 1] = img[y1:y2, x1:x2, 1] * alphaReserve + G * (1 - alphaReserve)
img[y1:y2, x1:x2, 2] = img[y1:y2, x1:x2, 2] * alphaReserve + R * (1 - alphaReserve)
cv2.line(img, (x1, y1), (x1+7, y1), (255,255,255), thickness=1)
cv2.line(img, (x1, y1), (x1, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2-7, y1), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1+7, y2), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1, y2-7), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2-7, y2), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2, y2-7), (255,255,255), thickness=1)
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20, font_path="./LiHeiPro.ttf"):
if (isinstance(img, np.ndarray)):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(font_pa | = [_ for i, _ in enumerate(history_record) if history_cnt[i]>0]
history_platenum = [_ for i, _ in enumerate(history_platenum) if history_cnt[i]>0]
history_cnt = [_-1 for i, _ in enumerate(history_cnt) if history_cnt[i]>0]
for i, plate in enumerate(history):
ph, pw = plate.shape[:2]
if 70+50*i+ph >= blend_img.shape[0]:
continue
blend_img[70+50*i:70+50*i+ph,w-290:w-290+pw,:] = plate
text = '违章记录:第%d帧' %history_record[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+5, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
if history_platenum[i] != ' ':
text = '车牌识别:'+ history_platenum[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+25, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
return blend_img, history, history_cnt, history_record, history_platenum
def cal_iou(box1, box2):
iw = min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1
if iw > 0:
ih = min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1
if ih > 0:
box1_area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
all_area = float(box1_area + box2_area - iw * ih)
return iw * ih / all_area
return 0
# judge whether line segment (xc,yc)->(xr,yr) is crossed with infinite line (x1,y1)->(x2,y2)
def is_cross(xc,yc,xr,yr,x1,y1,x2,y2):
if x1 == x2:
if (xc-x1) * (xr-x1) < 0:
return True
else:
return False
return ((y2-y1)/(x2-x1)*(xc-x1)+y1-yc) * \
((y2-y1)/(x2-x1)*(xr-x1)+y1-yr) < 0
def filter_area(boxes, area=50):
if len(boxes) > 0:
return np.where((boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0]) > area**2)[0]
else:
return np.array([], dtype=np.int)
def indicator(x):
x_square_sum, x_sum = np.sum(x**2), np.sum(x)
det = len(x) * x_square_sum - x_sum**2
return x_square_sum, x_sum, det
def solve_k_b(x, y):
x_square_sum, x_sum, det = indicator(x)
while det == 0:
x = x[:-1]
y = y[:-1]
x_square_sum, x_sum, det = indicator(x)
N_ = len(x)
k_ = np.sum(y * (N_*x-x_sum)) / det
b_ = np.sum(y * (x_square_sum-x*x_sum)) / det
return N_, k_, b_
if __name__ == "__main__":
json_path = 'nixing/nixingattrs.json'
boxes_results = []
with open(json_path, 'r') as f:
line = f.readline()
while line:
this_img = json.loads(line.strip())
boxes_results.append(this_img)
line = f.readline()
save_dir = 'nixing_v3'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.system('rm ./*.jpg ./*.png ./%s/*.jpg' %save_dir)
with open('nixing/nixing_mask_res.pkl', 'rb') as f:
img_list = cPickle.load(f)['all_seg_results']
img_list = [_['seg_results'] for _ in img_list]
img_dir = './nixing/frames'
num_img = len(os.listdir(img_dir))
history = []
history_cnt = []
history_record = []
history_platenum = []
for cnt in range(num_img):
print('%d/%d' %(cnt,num_img))
# if cnt < 110:
# continue
img = img_list[cnt]
im_path = os.path.join(img_dir, 'nixing.mp4_%06d.jpg' %(cnt+1))
raw_img = cv2.imread(im_path)
lane_img = 255 * np.ones_like(raw_img, dtype=np.uint8)
lane_img[np.where(img == 1)] = [0,225,0]
lane_img[np.where(img == 2)] = [0,225,255]
blend_img = cv2.addWeighted(raw_img, 1.0, lane_img, 0.3, gamma=0)
# parse the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
vehicle_boxes = [_['data'] for _ in boxes_results[cnt]['vehicle']]
vehicle_attrs = [_['attrs'] for _ in boxes_results[cnt]['vehicle']]
plate_data = boxes_results[cnt]['plate_box']
if plate_data != []:
plate_boxes = [_['data'] for _ in plate_data]
plate_nums = [_['attrs']['plate_num']]
for i in range(len(plate_nums)):
if len(plate_nums[i]) >= 7 and plate_nums[i][0] in province and plate_nums[i][1] in letter:
plate_nums.append(plate_nums[i])
else:
plate_nums[i] = ' '
print(plate_nums[-1])
else:
plate_boxes, plate_nums = [], []
head_box, tail_box = [], []
for item in boxes_results[cnt]['common_box']:
if item['attrs']['head'] == 'tail':
tail_box.append(item['data'])
elif item['attrs']['head'] == 'head':
head_box.append(item['data'])
else:
raise ValueError('unsupported attr!')
# draw the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
for box, attrs in zip(vehicle_boxes, vehicle_attrs):
draw_box_v2(blend_img, box, color=(255,0,0), alphaReserve=0.9)
text = color_map[attrs['color']]
text += type_map[attrs['type']]
cv2.rectangle(blend_img, (int(box[0]), int(box[1])-20), (int(box[0])+70, int(box[1])), (128, 128, 128), thickness=-1)
blend_img = cv2ImgAddText(blend_img, text, int(box[0]), int(box[1]-20), textColor=(255, 255, 255),\
textSize=15, font_path="./LiHeiPro.ttf")
for box in plate_boxes:
draw_box_v2(blend_img, box, color=(0,0,255), alphaReserve=0.7)
for box in head_box:
draw_box_v2(blend_img, box, color=(0,0,128), alphaReserve=0.7)
for box in tail_box:
draw_box_v2(blend_img, box, color=(0,0,128))
# cluster the lane points
neighbor = list(range(1, config.max_neighbor_distance+1))
neighbor.extend([-i for i in neighbor])
neighbor.append(0)
dsize = (int(img.shape[1]*config.resize_factor), int(img.shape[0]*config.resize_factor))
resized_img = cv2.resize(img, dsize, fx=config.resize_factor,fy=config.resize_factor)
group_res = bfs_clustering(resized_img, neighbor, ig_cls=0, show=False)
h, w = img.shape[:2]
resized_h, resized_w = resized_img.shape[:2]
# title = '基于X2的"去中心化"违章记录仪'
# blend_img = cv2ImgAddText(blend_img, title, 20,20, textColor=(0, 0, 0),\
# textSize=45, font_path="./LiHeiPro.ttf")
title = '逆行车辆:'
blend_img = cv2ImgAddText(blend_img, title, w-200,20, textColor=(255, 0, 0),\
textSize=25, font_path="./LiHeiPro.ttf")
lanes = []
b = []
for cls in group_res:
print('----cls %d----' %cls)
for g in group_res[cls]:
if len(g) < config.minimum_points:
continue
print('group length: %d' %(len(g)))
x, y = [], []
for i, j in g:
x.append(j)
y.append(resized_h-1-i)
x = np.array(x, dtype='float32') / config.resize_factor
y = np.array(y, dtype='float32') / config.resize_factor
N_, k_, b_ = solve_k_b(x, y)
print(N_, k_, b_)
x1, x2 = np.min(x), np.max(x)
y1, y2 = k_ * x1 + b_, k_ * x2 + b_
y1, y2 = h-1-y1, h-1-y2
if cls == 1:
color = (0,225,0)
else:
color = (0,225,225)
if k_ > 0.1:
lanes.append([x1,y1,x2,y2])
b.append(b_)
# cv2.line(blend_img,(int(x1),int(y1)),(int(x2),int(y2)), color, thickness=3)
# find the central yellow solid line
lane = lanes[np.argmax(-1 * np.array(b))]
# judge whether cross solid lane
for box in head_box:
if (box[2] - box[0] + 1) * (box[3] - box[1] + 1) < 50*50:
continue
ref_line = [0,0,(box[0]+box[2])/2,(box[1]+box[3])/2] # (x1,y2,x2,y2)
input1 = ref_line + lane
if is_cross(*input1):
text = '逆行危险!'
print(text)
blend_img = cv2ImgAddText(blend_img, text, int((box[0]+box[2])/2-20),int(box[1]), textColor=(255, 0, 0),\
textSize=15, font_path="./LiHeiPro.ttf")
ious = np.array([cal_iou(_, box) for _ in plate_boxes])
if ious.size > 0:
max_idx = np.argmax(ious)
pbox = plate_boxes[max_idx]
pnum = plate_nums[max_idx]
pbox[0] -= 10
pbox[2] += 10
pbox[1] -= 10
pbox[3] += 10
ratio = (pbox[3]-pbox[1]) / (pbox[2]-pbox[0])
ph = 50
pw = int(ph / ratio)
pbox = [int(_) for _ in pbox]
plate = raw_img[pbox[1]:pbox[3],pbox[0]:pbox[2],:]
plate = cv2.resize(plate, (pw,ph))
history.insert(0, plate)
history_cnt.insert(0, 1)
history_record.insert(0, cnt)
history_platenum.insert(0, pnum)
blend_img, history, history_cnt, history_record, history_platenum = \
draw_history(blend_img, history, history_cnt, history_record, history_platenum)
cv2.imwrite('./%s/tmp%d.jpg' %(save_dir,cnt), blend_img) | th, textSize, encoding="utf-8")
draw.text((left, top), unicode(text.decode('utf-8')) , textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def draw_history(blend_img, history, history_cnt, history_record, history_platenum):
history = [_ for i, _ in enumerate(history) if history_cnt[i]>0]
history_record | identifier_body |
fit_nixing.py | #coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
import matplotlib.pyplot as plt
import cPickle
from easydict import EasyDict as edict
from matplotlib.pyplot import MultipleLocator
from bfs_group import bfs_clustering
import cv2
import glob
from random import random as rand
from PIL import Image, ImageDraw, ImageFont
import json
import os
config = edict()
config.minimum_points = 50
config.max_group = 3
config.max_neighbor_distance = 10
config.resize_factor = 0.5
color_map = {'White':'白色', 'Silver_gray': '银灰色', 'Black': '黑色', 'Red': '红色', 'Brown': '棕色', 'Blue': '蓝色',
'Yellow': '黄色', 'Purple': '紫色', 'Green': '绿色', 'Pink': '粉色', 'Ching': '青色', 'Golden': '金色', 'other': '其他'}
letter = [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'J', u'K', u'L', u'M',
u'N', u'P', u'Q', u'R', u'S', u'T', u'U', u'V', u'W', u'X', u'Y', u'Z']
province = [u'京', u'津', u'沪', u'渝', u'黑', u'吉', u'辽', u'冀', u'晋', u'鲁', u'豫', u'陕', u'甘', u'青', u'苏', u'浙',
u'皖', u'鄂', u'湘', u'闽', u'赣', u'川', u'贵', u'云', u'粤', u'琼', u'蒙', u'宁', u'新', u'桂', u'藏']
|
def draw_box_v2(img, box, alphaReserve=0.8, color=None):
color = (rand() * 255, rand() * 255, rand() * 255) if color is None else color
h,w,_ = img.shape
x1 = max(0, int(float(box[0])))
y1 = max(0, int(float(box[1])))
x2 = min(w-1, int(float(box[2])))
y2 = min(h-1, int(float(box[3])))
B, G, R = color
img[y1:y2, x1:x2, 0] = img[y1:y2, x1:x2, 0] * alphaReserve + B * (1 - alphaReserve)
img[y1:y2, x1:x2, 1] = img[y1:y2, x1:x2, 1] * alphaReserve + G * (1 - alphaReserve)
img[y1:y2, x1:x2, 2] = img[y1:y2, x1:x2, 2] * alphaReserve + R * (1 - alphaReserve)
cv2.line(img, (x1, y1), (x1+7, y1), (255,255,255), thickness=1)
cv2.line(img, (x1, y1), (x1, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2-7, y1), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1+7, y2), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1, y2-7), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2-7, y2), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2, y2-7), (255,255,255), thickness=1)
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20, font_path="./LiHeiPro.ttf"):
if (isinstance(img, np.ndarray)):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(font_path, textSize, encoding="utf-8")
draw.text((left, top), unicode(text.decode('utf-8')) , textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def draw_history(blend_img, history, history_cnt, history_record, history_platenum):
history = [_ for i, _ in enumerate(history) if history_cnt[i]>0]
history_record = [_ for i, _ in enumerate(history_record) if history_cnt[i]>0]
history_platenum = [_ for i, _ in enumerate(history_platenum) if history_cnt[i]>0]
history_cnt = [_-1 for i, _ in enumerate(history_cnt) if history_cnt[i]>0]
for i, plate in enumerate(history):
ph, pw = plate.shape[:2]
if 70+50*i+ph >= blend_img.shape[0]:
continue
blend_img[70+50*i:70+50*i+ph,w-290:w-290+pw,:] = plate
text = '违章记录:第%d帧' %history_record[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+5, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
if history_platenum[i] != ' ':
text = '车牌识别:'+ history_platenum[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+25, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
return blend_img, history, history_cnt, history_record, history_platenum
def cal_iou(box1, box2):
iw = min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1
if iw > 0:
ih = min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1
if ih > 0:
box1_area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
all_area = float(box1_area + box2_area - iw * ih)
return iw * ih / all_area
return 0
# judge whether line segment (xc,yc)->(xr,yr) is crossed with infinite line (x1,y1)->(x2,y2)
def is_cross(xc,yc,xr,yr,x1,y1,x2,y2):
if x1 == x2:
if (xc-x1) * (xr-x1) < 0:
return True
else:
return False
return ((y2-y1)/(x2-x1)*(xc-x1)+y1-yc) * \
((y2-y1)/(x2-x1)*(xr-x1)+y1-yr) < 0
def filter_area(boxes, area=50):
if len(boxes) > 0:
return np.where((boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0]) > area**2)[0]
else:
return np.array([], dtype=np.int)
def indicator(x):
x_square_sum, x_sum = np.sum(x**2), np.sum(x)
det = len(x) * x_square_sum - x_sum**2
return x_square_sum, x_sum, det
def solve_k_b(x, y):
x_square_sum, x_sum, det = indicator(x)
while det == 0:
x = x[:-1]
y = y[:-1]
x_square_sum, x_sum, det = indicator(x)
N_ = len(x)
k_ = np.sum(y * (N_*x-x_sum)) / det
b_ = np.sum(y * (x_square_sum-x*x_sum)) / det
return N_, k_, b_
if __name__ == "__main__":
json_path = 'nixing/nixingattrs.json'
boxes_results = []
with open(json_path, 'r') as f:
line = f.readline()
while line:
this_img = json.loads(line.strip())
boxes_results.append(this_img)
line = f.readline()
save_dir = 'nixing_v3'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.system('rm ./*.jpg ./*.png ./%s/*.jpg' %save_dir)
with open('nixing/nixing_mask_res.pkl', 'rb') as f:
img_list = cPickle.load(f)['all_seg_results']
img_list = [_['seg_results'] for _ in img_list]
img_dir = './nixing/frames'
num_img = len(os.listdir(img_dir))
history = []
history_cnt = []
history_record = []
history_platenum = []
for cnt in range(num_img):
print('%d/%d' %(cnt,num_img))
# if cnt < 110:
# continue
img = img_list[cnt]
im_path = os.path.join(img_dir, 'nixing.mp4_%06d.jpg' %(cnt+1))
raw_img = cv2.imread(im_path)
lane_img = 255 * np.ones_like(raw_img, dtype=np.uint8)
lane_img[np.where(img == 1)] = [0,225,0]
lane_img[np.where(img == 2)] = [0,225,255]
blend_img = cv2.addWeighted(raw_img, 1.0, lane_img, 0.3, gamma=0)
# parse the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
vehicle_boxes = [_['data'] for _ in boxes_results[cnt]['vehicle']]
vehicle_attrs = [_['attrs'] for _ in boxes_results[cnt]['vehicle']]
plate_data = boxes_results[cnt]['plate_box']
if plate_data != []:
plate_boxes = [_['data'] for _ in plate_data]
plate_nums = [_['attrs']['plate_num']]
for i in range(len(plate_nums)):
if len(plate_nums[i]) >= 7 and plate_nums[i][0] in province and plate_nums[i][1] in letter:
plate_nums.append(plate_nums[i])
else:
plate_nums[i] = ' '
print(plate_nums[-1])
else:
plate_boxes, plate_nums = [], []
head_box, tail_box = [], []
for item in boxes_results[cnt]['common_box']:
if item['attrs']['head'] == 'tail':
tail_box.append(item['data'])
elif item['attrs']['head'] == 'head':
head_box.append(item['data'])
else:
raise ValueError('unsupported attr!')
# draw the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
for box, attrs in zip(vehicle_boxes, vehicle_attrs):
draw_box_v2(blend_img, box, color=(255,0,0), alphaReserve=0.9)
text = color_map[attrs['color']]
text += type_map[attrs['type']]
cv2.rectangle(blend_img, (int(box[0]), int(box[1])-20), (int(box[0])+70, int(box[1])), (128, 128, 128), thickness=-1)
blend_img = cv2ImgAddText(blend_img, text, int(box[0]), int(box[1]-20), textColor=(255, 255, 255),\
textSize=15, font_path="./LiHeiPro.ttf")
for box in plate_boxes:
draw_box_v2(blend_img, box, color=(0,0,255), alphaReserve=0.7)
for box in head_box:
draw_box_v2(blend_img, box, color=(0,0,128), alphaReserve=0.7)
for box in tail_box:
draw_box_v2(blend_img, box, color=(0,0,128))
# cluster the lane points
neighbor = list(range(1, config.max_neighbor_distance+1))
neighbor.extend([-i for i in neighbor])
neighbor.append(0)
dsize = (int(img.shape[1]*config.resize_factor), int(img.shape[0]*config.resize_factor))
resized_img = cv2.resize(img, dsize, fx=config.resize_factor,fy=config.resize_factor)
group_res = bfs_clustering(resized_img, neighbor, ig_cls=0, show=False)
h, w = img.shape[:2]
resized_h, resized_w = resized_img.shape[:2]
# title = '基于X2的"去中心化"违章记录仪'
# blend_img = cv2ImgAddText(blend_img, title, 20,20, textColor=(0, 0, 0),\
# textSize=45, font_path="./LiHeiPro.ttf")
title = '逆行车辆:'
blend_img = cv2ImgAddText(blend_img, title, w-200,20, textColor=(255, 0, 0),\
textSize=25, font_path="./LiHeiPro.ttf")
lanes = []
b = []
for cls in group_res:
print('----cls %d----' %cls)
for g in group_res[cls]:
if len(g) < config.minimum_points:
continue
print('group length: %d' %(len(g)))
x, y = [], []
for i, j in g:
x.append(j)
y.append(resized_h-1-i)
x = np.array(x, dtype='float32') / config.resize_factor
y = np.array(y, dtype='float32') / config.resize_factor
N_, k_, b_ = solve_k_b(x, y)
print(N_, k_, b_)
x1, x2 = np.min(x), np.max(x)
y1, y2 = k_ * x1 + b_, k_ * x2 + b_
y1, y2 = h-1-y1, h-1-y2
if cls == 1:
color = (0,225,0)
else:
color = (0,225,225)
if k_ > 0.1:
lanes.append([x1,y1,x2,y2])
b.append(b_)
# cv2.line(blend_img,(int(x1),int(y1)),(int(x2),int(y2)), color, thickness=3)
# find the central yellow solid line
lane = lanes[np.argmax(-1 * np.array(b))]
# judge whether cross solid lane
for box in head_box:
if (box[2] - box[0] + 1) * (box[3] - box[1] + 1) < 50*50:
continue
ref_line = [0,0,(box[0]+box[2])/2,(box[1]+box[3])/2] # (x1,y2,x2,y2)
input1 = ref_line + lane
if is_cross(*input1):
text = '逆行危险!'
print(text)
blend_img = cv2ImgAddText(blend_img, text, int((box[0]+box[2])/2-20),int(box[1]), textColor=(255, 0, 0),\
textSize=15, font_path="./LiHeiPro.ttf")
ious = np.array([cal_iou(_, box) for _ in plate_boxes])
if ious.size > 0:
max_idx = np.argmax(ious)
pbox = plate_boxes[max_idx]
pnum = plate_nums[max_idx]
pbox[0] -= 10
pbox[2] += 10
pbox[1] -= 10
pbox[3] += 10
ratio = (pbox[3]-pbox[1]) / (pbox[2]-pbox[0])
ph = 50
pw = int(ph / ratio)
pbox = [int(_) for _ in pbox]
plate = raw_img[pbox[1]:pbox[3],pbox[0]:pbox[2],:]
plate = cv2.resize(plate, (pw,ph))
history.insert(0, plate)
history_cnt.insert(0, 1)
history_record.insert(0, cnt)
history_platenum.insert(0, pnum)
blend_img, history, history_cnt, history_record, history_platenum = \
draw_history(blend_img, history, history_cnt, history_record, history_platenum)
cv2.imwrite('./%s/tmp%d.jpg' %(save_dir,cnt), blend_img) | type_map = {'BigTruck': '货车', 'Bus': '公交车', 'Lorry': '货车', 'MPV': '轿车', 'MiniVan': '轿车', 'MiniBus': '公交车',
'SUV': '轿车', 'Scooter': '轿车', 'Sedan_Car': '轿车', 'Special_vehicle': '其他', 'Three_Wheeled_Truck':'其他', 'other': '其他', 'Minibus': '公交车'}
| random_line_split |
fit_nixing.py | #coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
import matplotlib.pyplot as plt
import cPickle
from easydict import EasyDict as edict
from matplotlib.pyplot import MultipleLocator
from bfs_group import bfs_clustering
import cv2
import glob
from random import random as rand
from PIL import Image, ImageDraw, ImageFont
import json
import os
config = edict()
config.minimum_points = 50
config.max_group = 3
config.max_neighbor_distance = 10
config.resize_factor = 0.5
color_map = {'White':'白色', 'Silver_gray': '银灰色', 'Black': '黑色', 'Red': '红色', 'Brown': '棕色', 'Blue': '蓝色',
'Yellow': '黄色', 'Purple': '紫色', 'Green': '绿色', 'Pink': '粉色', 'Ching': '青色', 'Golden': '金色', 'other': '其他'}
letter = [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'J', u'K', u'L', u'M',
u'N', u'P', u'Q', u'R', u'S', u'T', u'U', u'V', u'W', u'X', u'Y', u'Z']
province = [u'京', u'津', u'沪', u'渝', u'黑', u'吉', u'辽', u'冀', u'晋', u'鲁', u'豫', u'陕', u'甘', u'青', u'苏', u'浙',
u'皖', u'鄂', u'湘', u'闽', u'赣', u'川', u'贵', u'云', u'粤', u'琼', u'蒙', u'宁', u'新', u'桂', u'藏']
type_map = {'BigTruck': '货车', 'Bus': '公交车', 'Lorry': '货车', 'MPV': '轿车', 'MiniVan': '轿车', 'MiniBus': '公交车',
'SUV': '轿车', 'Scooter': '轿车', 'Sedan_Car': '轿车', 'Special_vehicle': '其他', 'Three_Wheeled_Truck':'其他', 'other': '其他', 'Minibus': '公交车'}
def draw_box_v2(img, box, alphaReserve=0.8, color=None):
color = (rand() * 255, rand() * 255, rand() * 255) if color is None else color
h,w,_ = img.shape
x1 = max(0, int(float(box[0])))
y1 = max(0, int(float(box[1])))
x2 = min(w-1, int(float(box[2])))
y2 = min(h-1, int(float(box[3])))
B, G, R = color
img[y1:y2, x1:x2, 0] = img[y1:y2, x1:x2, 0] * alphaReserve + B * (1 - alphaReserve)
img[y1:y2, x1:x2, 1] = img[y1:y2, x1:x2, 1] * alphaReserve + G * (1 - alphaReserve)
img[y1:y2, x1:x2, 2] = img[y1:y2, x1:x2, 2] * alphaReserve + R * (1 - alphaReserve)
cv2.line(img, (x1, y1), (x1+7, y1), (255,255,255), thickness=1)
cv2.line(img, (x1, y1), (x1, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2-7, y1), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1+7, y2), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1, y2-7), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2-7, y2), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2, y2-7), (255,255,255), thickness=1)
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20, font_path="./LiHeiPro.ttf"):
if (isinstance(img, np.ndarray)):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(font_path, textSize, encoding="utf-8")
draw.text((left, top), unicode(text.decode('utf-8')) , textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def draw_history(blend_img, history, history_cnt, history_record, history_platenum):
history = [_ for i, _ in enumerate(history) if history_cnt[i]>0]
history_record = [_ for i, _ in enumerate(history_record) if history_cnt[i]>0]
history_platenum = [_ for i, _ in enumerate(history_platenum) if history_cnt[i]>0]
history_cnt = [_-1 for i, _ in enumerate(history_cnt) if history_cnt[i]>0]
for i, plate in enumerate(history):
ph, pw = plate.shape[:2]
if 70+50*i+ph >= blend_img.shape[0]:
continue
blend_img[70+50*i:70+50*i+ph,w-290:w-290+pw,:] = plate
text = '违章记录:第%d帧' %history_record[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+5, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
if history_platenum[i] != ' ':
text = '车牌识别:'+ history_platenum[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+25, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
return blend_img, history, history_cnt, history_record, history_platenum
def cal_iou(box1, box2):
iw = min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1
if iw > 0:
ih = min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1
if ih > 0:
box1_ | (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
all_area = float(box1_area + box2_area - iw * ih)
return iw * ih / all_area
return 0
# judge whether line segment (xc,yc)->(xr,yr) is crossed with infinite line (x1,y1)->(x2,y2)
def is_cross(xc,yc,xr,yr,x1,y1,x2,y2):
if x1 == x2:
if (xc-x1) * (xr-x1) < 0:
return True
else:
return False
return ((y2-y1)/(x2-x1)*(xc-x1)+y1-yc) * \
((y2-y1)/(x2-x1)*(xr-x1)+y1-yr) < 0
def filter_area(boxes, area=50):
if len(boxes) > 0:
return np.where((boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0]) > area**2)[0]
else:
return np.array([], dtype=np.int)
def indicator(x):
x_square_sum, x_sum = np.sum(x**2), np.sum(x)
det = len(x) * x_square_sum - x_sum**2
return x_square_sum, x_sum, det
def solve_k_b(x, y):
x_square_sum, x_sum, det = indicator(x)
while det == 0:
x = x[:-1]
y = y[:-1]
x_square_sum, x_sum, det = indicator(x)
N_ = len(x)
k_ = np.sum(y * (N_*x-x_sum)) / det
b_ = np.sum(y * (x_square_sum-x*x_sum)) / det
return N_, k_, b_
if __name__ == "__main__":
json_path = 'nixing/nixingattrs.json'
boxes_results = []
with open(json_path, 'r') as f:
line = f.readline()
while line:
this_img = json.loads(line.strip())
boxes_results.append(this_img)
line = f.readline()
save_dir = 'nixing_v3'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.system('rm ./*.jpg ./*.png ./%s/*.jpg' %save_dir)
with open('nixing/nixing_mask_res.pkl', 'rb') as f:
img_list = cPickle.load(f)['all_seg_results']
img_list = [_['seg_results'] for _ in img_list]
img_dir = './nixing/frames'
num_img = len(os.listdir(img_dir))
history = []
history_cnt = []
history_record = []
history_platenum = []
for cnt in range(num_img):
print('%d/%d' %(cnt,num_img))
# if cnt < 110:
# continue
img = img_list[cnt]
im_path = os.path.join(img_dir, 'nixing.mp4_%06d.jpg' %(cnt+1))
raw_img = cv2.imread(im_path)
lane_img = 255 * np.ones_like(raw_img, dtype=np.uint8)
lane_img[np.where(img == 1)] = [0,225,0]
lane_img[np.where(img == 2)] = [0,225,255]
blend_img = cv2.addWeighted(raw_img, 1.0, lane_img, 0.3, gamma=0)
# parse the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
vehicle_boxes = [_['data'] for _ in boxes_results[cnt]['vehicle']]
vehicle_attrs = [_['attrs'] for _ in boxes_results[cnt]['vehicle']]
plate_data = boxes_results[cnt]['plate_box']
if plate_data != []:
plate_boxes = [_['data'] for _ in plate_data]
plate_nums = [_['attrs']['plate_num']]
for i in range(len(plate_nums)):
if len(plate_nums[i]) >= 7 and plate_nums[i][0] in province and plate_nums[i][1] in letter:
plate_nums.append(plate_nums[i])
else:
plate_nums[i] = ' '
print(plate_nums[-1])
else:
plate_boxes, plate_nums = [], []
head_box, tail_box = [], []
for item in boxes_results[cnt]['common_box']:
if item['attrs']['head'] == 'tail':
tail_box.append(item['data'])
elif item['attrs']['head'] == 'head':
head_box.append(item['data'])
else:
raise ValueError('unsupported attr!')
# draw the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
for box, attrs in zip(vehicle_boxes, vehicle_attrs):
draw_box_v2(blend_img, box, color=(255,0,0), alphaReserve=0.9)
text = color_map[attrs['color']]
text += type_map[attrs['type']]
cv2.rectangle(blend_img, (int(box[0]), int(box[1])-20), (int(box[0])+70, int(box[1])), (128, 128, 128), thickness=-1)
blend_img = cv2ImgAddText(blend_img, text, int(box[0]), int(box[1]-20), textColor=(255, 255, 255),\
textSize=15, font_path="./LiHeiPro.ttf")
for box in plate_boxes:
draw_box_v2(blend_img, box, color=(0,0,255), alphaReserve=0.7)
for box in head_box:
draw_box_v2(blend_img, box, color=(0,0,128), alphaReserve=0.7)
for box in tail_box:
draw_box_v2(blend_img, box, color=(0,0,128))
# cluster the lane points
neighbor = list(range(1, config.max_neighbor_distance+1))
neighbor.extend([-i for i in neighbor])
neighbor.append(0)
dsize = (int(img.shape[1]*config.resize_factor), int(img.shape[0]*config.resize_factor))
resized_img = cv2.resize(img, dsize, fx=config.resize_factor,fy=config.resize_factor)
group_res = bfs_clustering(resized_img, neighbor, ig_cls=0, show=False)
h, w = img.shape[:2]
resized_h, resized_w = resized_img.shape[:2]
# title = '基于X2的"去中心化"违章记录仪'
# blend_img = cv2ImgAddText(blend_img, title, 20,20, textColor=(0, 0, 0),\
# textSize=45, font_path="./LiHeiPro.ttf")
title = '逆行车辆:'
blend_img = cv2ImgAddText(blend_img, title, w-200,20, textColor=(255, 0, 0),\
textSize=25, font_path="./LiHeiPro.ttf")
lanes = []
b = []
for cls in group_res:
print('----cls %d----' %cls)
for g in group_res[cls]:
if len(g) < config.minimum_points:
continue
print('group length: %d' %(len(g)))
x, y = [], []
for i, j in g:
x.append(j)
y.append(resized_h-1-i)
x = np.array(x, dtype='float32') / config.resize_factor
y = np.array(y, dtype='float32') / config.resize_factor
N_, k_, b_ = solve_k_b(x, y)
print(N_, k_, b_)
x1, x2 = np.min(x), np.max(x)
y1, y2 = k_ * x1 + b_, k_ * x2 + b_
y1, y2 = h-1-y1, h-1-y2
if cls == 1:
color = (0,225,0)
else:
color = (0,225,225)
if k_ > 0.1:
lanes.append([x1,y1,x2,y2])
b.append(b_)
# cv2.line(blend_img,(int(x1),int(y1)),(int(x2),int(y2)), color, thickness=3)
# find the central yellow solid line
lane = lanes[np.argmax(-1 * np.array(b))]
# judge whether cross solid lane
for box in head_box:
if (box[2] - box[0] + 1) * (box[3] - box[1] + 1) < 50*50:
continue
ref_line = [0,0,(box[0]+box[2])/2,(box[1]+box[3])/2] # (x1,y2,x2,y2)
input1 = ref_line + lane
if is_cross(*input1):
text = '逆行危险!'
print(text)
blend_img = cv2ImgAddText(blend_img, text, int((box[0]+box[2])/2-20),int(box[1]), textColor=(255, 0, 0),\
textSize=15, font_path="./LiHeiPro.ttf")
ious = np.array([cal_iou(_, box) for _ in plate_boxes])
if ious.size > 0:
max_idx = np.argmax(ious)
pbox = plate_boxes[max_idx]
pnum = plate_nums[max_idx]
pbox[0] -= 10
pbox[2] += 10
pbox[1] -= 10
pbox[3] += 10
ratio = (pbox[3]-pbox[1]) / (pbox[2]-pbox[0])
ph = 50
pw = int(ph / ratio)
pbox = [int(_) for _ in pbox]
plate = raw_img[pbox[1]:pbox[3],pbox[0]:pbox[2],:]
plate = cv2.resize(plate, (pw,ph))
history.insert(0, plate)
history_cnt.insert(0, 1)
history_record.insert(0, cnt)
history_platenum.insert(0, pnum)
blend_img, history, history_cnt, history_record, history_platenum = \
draw_history(blend_img, history, history_cnt, history_record, history_platenum)
cv2.imwrite('./%s/tmp%d.jpg' %(save_dir,cnt), blend_img) | area = | identifier_name |
fit_nixing.py | #coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
import matplotlib.pyplot as plt
import cPickle
from easydict import EasyDict as edict
from matplotlib.pyplot import MultipleLocator
from bfs_group import bfs_clustering
import cv2
import glob
from random import random as rand
from PIL import Image, ImageDraw, ImageFont
import json
import os
config = edict()
config.minimum_points = 50
config.max_group = 3
config.max_neighbor_distance = 10
config.resize_factor = 0.5
color_map = {'White':'白色', 'Silver_gray': '银灰色', 'Black': '黑色', 'Red': '红色', 'Brown': '棕色', 'Blue': '蓝色',
'Yellow': '黄色', 'Purple': '紫色', 'Green': '绿色', 'Pink': '粉色', 'Ching': '青色', 'Golden': '金色', 'other': '其他'}
letter = [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'J', u'K', u'L', u'M',
u'N', u'P', u'Q', u'R', u'S', u'T', u'U', u'V', u'W', u'X', u'Y', u'Z']
province = [u'京', u'津', u'沪', u'渝', u'黑', u'吉', u'辽', u'冀', u'晋', u'鲁', u'豫', u'陕', u'甘', u'青', u'苏', u'浙',
u'皖', u'鄂', u'湘', u'闽', u'赣', u'川', u'贵', u'云', u'粤', u'琼', u'蒙', u'宁', u'新', u'桂', u'藏']
type_map = {'BigTruck': '货车', 'Bus': '公交车', 'Lorry': '货车', 'MPV': '轿车', 'MiniVan': '轿车', 'MiniBus': '公交车',
'SUV': '轿车', 'Scooter': '轿车', 'Sedan_Car': '轿车', 'Special_vehicle': '其他', 'Three_Wheeled_Truck':'其他', 'other': '其他', 'Minibus': '公交车'}
def draw_box_v2(img, box, alphaReserve=0.8, color=None):
color = (rand() * 255, rand() * 255, rand() * 255) if color is None else color
h,w,_ = img.shape
x1 = max(0, int(float(box[0])))
y1 = max(0, int(float(box[1])))
x2 = min(w-1, int(float(box[2])))
y2 = min(h-1, int(float(box[3])))
B, G, R = color
img[y1:y2, x1:x2, 0] = img[y1:y2, x1:x2, 0] * alphaReserve + B * (1 - alphaReserve)
img[y1:y2, x1:x2, 1] = img[y1:y2, x1:x2, 1] * alphaReserve + G * (1 - alphaReserve)
img[y1:y2, x1:x2, 2] = img[y1:y2, x1:x2, 2] * alphaReserve + R * (1 - alphaReserve)
cv2.line(img, (x1, y1), (x1+7, y1), (255,255,255), thickness=1)
cv2.line(img, (x1, y1), (x1, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2-7, y1), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1+7, y2), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1, y2-7), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2-7, y2), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2, y2-7), (255,255,255), thickness=1)
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20, font_path="./LiHeiPro.ttf"):
if (isinstance(img, np.ndarray)):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(font_path, textSize, encoding="utf-8")
draw.text((left, top), unicode(text.decode('utf-8')) , textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def draw_history(blend_img, history, history_cnt, history_record, history_platenum):
history = [_ for i, _ in enumerate(history) if history_cnt[i]>0]
history_record = [_ for i, _ in enumerate(history_record) if history_cnt[i]>0]
history_platenum = [_ for i, _ in enumerate(history_platenum) if history_cnt[i]>0]
history_cnt = [_-1 for i, _ in enumerate(history_cnt) if history_cnt[i]>0]
for i, plate in enumerate(history):
ph, pw = plate.shape[:2]
if 70+50*i+ph >= blend_img.shape[0]:
continue
blend_img[70+50*i:70+50*i+ph,w-290:w-290+pw,:] = plate
text = '违章记录:第%d帧' %history_record[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+5, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
if history_platenum[i] != ' ':
text = '车牌识别:'+ history_platenum[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+25, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
return blend_img, history, history_cnt, history_record, history_platenum
def cal_iou(box1, box2):
iw = min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1
if iw > 0:
ih = min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1
if ih > 0:
box1_area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
all_area = float(box1_area + box2_area - iw * ih)
return iw * ih / all_area
return 0
# judge whether line segment (xc,yc)->(xr,yr) is crossed with infinite line (x1,y1)->(x2,y2)
def is_cross(xc,yc,xr,yr,x1,y1,x2,y2):
if x1 == x2:
if (xc-x1) * (xr-x1) < 0:
return True
else:
return False
return ((y2-y1)/(x2-x1)*(xc-x1)+y1-yc) * \
((y2-y1)/(x2-x1)*(xr-x1)+y1-yr) < 0
def filter_area(boxes, area=50):
if len(boxes) > 0:
return np.where((boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0]) > area**2)[0]
else:
return np.array([], dtype=np.int)
def indicator(x):
x_square_sum, x_sum = np.sum(x**2), np.sum(x)
det = len(x) * x_square_sum - x_sum**2
return x_square_sum, x_sum, det
def solve_k_b(x, y):
x_square_sum, x_sum, det = indicator(x)
while det == 0:
x = x[:-1]
y = y[:-1]
x_square_sum, x_sum, det = indicator(x)
N_ = len(x)
k_ = np.sum(y * (N_*x-x_sum)) / det
b_ = np.sum(y * (x_square_sum-x*x_sum)) / det
return N_, k_, b_
if __name__ == "__main__":
json_path = 'nixing/nixingattrs.json'
boxes_results = []
with open(json_path, 'r') as f:
line = f.readline()
while line:
this_img = json.loads(line.strip())
boxes_results.append(this_img)
line = f.readline()
save_dir = 'nixing_v3'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.system('rm ./*.jpg ./*.png ./%s/*.jpg' %save_dir)
with open('nixing/nixing_mask_res.pkl', 'rb') as f:
img_list = cPickle.load(f)['all_seg_results']
img_list = [_['seg_results'] for _ in img_list]
img_dir = './nixing/frames'
num_img = len(os.listdir(img_dir))
history = []
history_cnt = []
history_record = []
history_platenum = []
for cnt in range(num_img):
print('%d/%d' %(cnt,num_img))
# if cnt < 110:
# continue
img = img_list[cnt]
im_path = os.path.join(img_dir, 'nixing.mp4_%06d.jpg' %(cnt+1))
raw_img = cv2.imread(im_path)
lane_img = 255 * np.ones_like(raw_img, dtype=np.uint8)
lane_img[np.where(img == 1)] = [0,225,0]
lane_img[np.where(img == 2)] = [0,225,255]
blend_img = cv2.addWeighted(raw_img, 1.0, lane_img, 0.3, gamma=0)
# parse the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
vehicle_boxes = [_['data'] for _ in boxes_results[cnt]['vehicle']]
vehicle_attrs = [_['attrs'] for _ in boxes_results[cnt]['vehicle']]
plate_data = boxes_results[cnt]['plate_box']
if plate_data != []:
plate_boxes = [_['data'] for _ in plate_data]
plate_nums = [_['attrs']['plate_num']]
for i in range(len(plate_nums)):
if len(plate_nums[i]) >= 7 and plate_nums[i][0] in province and plate_nums[i][1] in letter:
plate_nums.append(plate_nums[i])
else:
plate_nums[i] = ' '
print(plate_nums[-1])
else:
plate_boxes, plate_nums = [], []
head_box, tail_box = [], []
for item in boxes_results[cnt]['common_box']:
if item['attrs']['head'] == 'tail':
tail_box.append(item['data'])
elif item['attrs']['head'] == 'head':
head_box.append(item['data'])
else:
raise ValueError('unsupported attr!')
# draw the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
for box, attrs in zip(vehicle_boxes, vehicle_attrs):
draw_box_v2(blend_img, box, color=(255,0,0), alphaReserve=0.9)
text = color_map[attrs['color']]
text += type_map[attrs['type']]
cv2.rectangle(blend_img, (int(box[0]), int(box[1])-20), (int(box[0])+70, int(box[1])), (128, 128, 128), thickness=-1)
blend_img = cv2ImgAddText(blend_img, text, int(box[0]), int(box[1]-20), textColor=(255, 255, 255),\
textSize=15, font_path="./LiHeiPro.ttf")
for box in plate_boxes:
draw_box_v2(blend_img, box, color=(0,0,255), alphaReserve=0.7)
for box in head_box:
draw_box_v2(blend_img, box, color=(0,0,128), alphaReserve=0.7)
for box in tail_box:
draw_box_v2(blend_img, box, color=(0,0,128))
# cluster the lane points
neighbor = list(range(1, config.max_neighbor_distance+1))
neighbor.extend([-i for i in neighbor])
neighbor.append(0)
dsize = (int(img.shape[1]*config.resize_factor), int(img.shape[0]*config.resize_factor))
resized_img = cv2.resize(img, dsize, fx=config.resize_factor,fy=config.resize_factor)
group_res = bfs_clustering(resized_img, neighbor, ig_cls=0, show=False)
h, w = img.shape[:2]
resized_h, resized_w = resized_img.shape[:2]
# title = '基于X2的"去中心化"违章记录仪'
# blend_img = cv2ImgAddText(blend_img, title, 20,20, textColor=(0, 0, 0),\
# textSize=45, font_path="./LiHeiPro.ttf")
title = '逆行车辆:'
blend_img = cv2ImgAddText(blend_img, title, w-200,20, textColor=(255, 0, 0),\
textSize=25, font_path="./LiHeiPro.ttf")
lanes = []
b = []
for cls in group_res:
print('----cls %d----' %cls)
for g in group_res[cls]:
if len(g) < config.minimum_points:
continue
print('group length: %d' %(len(g)))
x, y = [], []
for i, j in g:
x.append(j)
y.append(resized_h-1-i)
x = np.array(x, dtype='float32') / config.resize_factor
y = np.array(y, dtype='float32') / config.resize_factor
N_, k_, b_ = solve_k_b(x, y)
print(N_, k_, b_)
x1, x2 = np.min(x), np.max(x)
y1, y2 = k_ * x1 + b_, k_ * x2 + b_
y1, y2 = h-1-y1, h-1-y2
if cls == 1:
color = (0,225,0)
else:
color = (0,225,225)
if k_ > 0.1:
lanes.append([x1,y1,x2,y2])
b.append(b_)
| img,(int(x1),int(y1)),(int(x2),int(y2)), color, thickness=3)
# find the central yellow solid line
lane = lanes[np.argmax(-1 * np.array(b))]
# judge whether cross solid lane
for box in head_box:
if (box[2] - box[0] + 1) * (box[3] - box[1] + 1) < 50*50:
continue
ref_line = [0,0,(box[0]+box[2])/2,(box[1]+box[3])/2] # (x1,y2,x2,y2)
input1 = ref_line + lane
if is_cross(*input1):
text = '逆行危险!'
print(text)
blend_img = cv2ImgAddText(blend_img, text, int((box[0]+box[2])/2-20),int(box[1]), textColor=(255, 0, 0),\
textSize=15, font_path="./LiHeiPro.ttf")
ious = np.array([cal_iou(_, box) for _ in plate_boxes])
if ious.size > 0:
max_idx = np.argmax(ious)
pbox = plate_boxes[max_idx]
pnum = plate_nums[max_idx]
pbox[0] -= 10
pbox[2] += 10
pbox[1] -= 10
pbox[3] += 10
ratio = (pbox[3]-pbox[1]) / (pbox[2]-pbox[0])
ph = 50
pw = int(ph / ratio)
pbox = [int(_) for _ in pbox]
plate = raw_img[pbox[1]:pbox[3],pbox[0]:pbox[2],:]
plate = cv2.resize(plate, (pw,ph))
history.insert(0, plate)
history_cnt.insert(0, 1)
history_record.insert(0, cnt)
history_platenum.insert(0, pnum)
blend_img, history, history_cnt, history_record, history_platenum = \
draw_history(blend_img, history, history_cnt, history_record, history_platenum)
cv2.imwrite('./%s/tmp%d.jpg' %(save_dir,cnt), blend_img) | # cv2.line(blend_ | conditional_block |
main.rs | // This implementation is inspired by https://github.com/dlundquist/sniproxy, but I wrote it from
// scratch based on a careful reading of the TLS 1.3 specification.
use std::net::SocketAddr;
use std::path::PathBuf;
use std::time::Duration;
use tokio::io::{self, AsyncReadExt, AsyncWriteExt, Error, ErrorKind};
use tokio::net;
use tokio::signal::unix::{signal, SignalKind};
use tokio::task;
use tokio::time::{timeout, Elapsed};
// Unless otherwise specified, all quotes are from RFC 8446 (TLS 1.3).
// legacy_record_version: "MUST be set to 0x0303 for all records generated by a TLS
// 1.3 implementation"
const TLS_LEGACY_RECORD_VERSION: [u8; 2] = [0x03, 0x03];
const TLS_HANDSHAKE_CONTENT_TYPE: u8 = 0x16;
const TLS_HANDSHAKE_TYPE_CLIENT_HELLO: u8 = 0x01;
const TLS_EXTENSION_SNI: usize = 0x0000;
const TLS_SNI_HOST_NAME_TYPE: u8 = 0;
const TLS_ALERT_CONTENT_TYPE: u8 = 21;
const TLS_ALERT_LENGTH: [u8; 2] = [0x00, 0x02];
const TLS_ALERT_LEVEL_FATAL: u8 = 2;
enum TlsError {
UnexpectedMessage = 10,
RecordOverflow = 22,
DecodeError = 50,
InternalError = 80,
UserCanceled = 90,
UnrecognizedName = 112,
}
impl From<Error> for TlsError {
fn from(_error: Error) -> Self {
TlsError::InternalError
}
}
impl From<Elapsed> for TlsError {
fn from(_error: Elapsed) -> Self {
TlsError::UserCanceled
}
}
type TlsResult<O> = Result<O, TlsError>;
struct TlsHandshakeReader<R> {
source: R,
buffer: Vec<u8>,
offset: usize,
limit: usize,
}
fn check_length(length: usize, limit: &mut usize) -> TlsResult<()> {
*limit = limit.checked_sub(length).ok_or(TlsError::DecodeError)?;
Ok(())
}
impl<R: AsyncReadExt> TlsHandshakeReader<R> {
fn new(source: R) -> Self {
TlsHandshakeReader {
source: source,
buffer: Vec::with_capacity(4096),
offset: 0,
limit: 0,
}
}
fn seek(&mut self, offset: usize, limit: &mut usize) -> TlsResult<()> {
self.offset += offset;
check_length(offset, limit)
}
async fn fill_to(&mut self, target: usize) -> TlsResult<()> {
while self.buffer.len() < target {
if self.source.read_buf(&mut self.buffer).await? == 0 {
return Err(TlsError::DecodeError);
}
}
Ok(())
}
async fn read(&mut self) -> TlsResult<u8> |
async fn read_length(&mut self, length: u8) -> TlsResult<usize> {
debug_assert!(length > 0 && length <= 4);
let mut result = 0;
for _ in 0..length {
result <<= 8;
result |= self.read().await? as usize;
}
Ok(result)
}
async fn into_source<W: AsyncWriteExt + Unpin>(self, dest: &mut W) -> io::Result<R> {
dest.write_all(&self.buffer[..]).await?;
Ok(self.source)
}
}
async fn get_server_name<R: AsyncReadExt>(source: &mut TlsHandshakeReader<R>) -> TlsResult<String> {
// section 4.1.2: "When a client first connects to a server, it is REQUIRED to send the
// ClientHello as its first TLS message."
if source.read().await? != TLS_HANDSHAKE_TYPE_CLIENT_HELLO {
return Err(TlsError::UnexpectedMessage);
}
let mut hello_length = source.read_length(3).await?;
// skip legacy_version (2) and random (32)
source.seek(34, &mut hello_length)?;
// skip legacy_session_id
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// skip cipher_suites
check_length(2, &mut hello_length)?;
let length = source.read_length(2).await?;
source.seek(length, &mut hello_length)?;
// skip legacy_compression_methods
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// section 4.1.2: "TLS 1.3 servers might receive ClientHello messages without an extensions
// field from prior versions of TLS. The presence of extensions can be detected by determining
// whether there are bytes following the compression_methods field at the end of the
// ClientHello. Note that this method of detecting optional data differs from the normal TLS
// method of having a variable-length field, but it is used for compatibility with TLS before
// extensions were defined. ... If negotiating a version of TLS prior to 1.3, a server MUST
// check that the message either contains no data after legacy_compression_methods or that it
// contains a valid extensions block with no data following. If not, then it MUST abort the
// handshake with a "decode_error" alert."
//
// If there is no extensions block, treat it like a server name extension was present but with
// an unrecognized name. I don't think the spec allows this, but it doesn't NOT allow it?
if hello_length == 0 {
return Err(TlsError::UnrecognizedName);
}
// ClientHello ends immediately after the extensions
check_length(2, &mut hello_length)?;
if hello_length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while hello_length > 0 {
check_length(4, &mut hello_length)?;
let extension = source.read_length(2).await?;
let mut length = source.read_length(2).await?;
if extension != TLS_EXTENSION_SNI {
source.seek(length, &mut hello_length)?;
continue;
}
check_length(length, &mut hello_length)?;
// This extension ends immediately after server_name_list
check_length(2, &mut length)?;
if length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while length > 0 {
check_length(3, &mut length)?;
let name_type = source.read().await?;
let name_length = source.read_length(2).await?;
if name_type != TLS_SNI_HOST_NAME_TYPE {
source.seek(name_length, &mut length)?;
continue;
}
check_length(name_length, &mut length)?;
// RFC 6066 section 3: "The ServerNameList MUST NOT contain more than one name of the
// same name_type." So we can just extract the first one we find.
// Hostnames are limited to 255 octets with a trailing dot, but RFC 6066 prohibits the
// trailing dot, so the limit here is 254 octets. Enforcing this limit ensures an
// attacker can't make us heap-allocate 64kB for a hostname we'll never match.
if name_length > 254 {
return Err(TlsError::UnrecognizedName);
}
// The following validation rules ensure that we won't return a hostname which could
// lead to pathname traversal (e.g. "..", "", or "a/b") and that semantically
// equivalent hostnames are only returned in a canonical form. This does not validate
// anything else about the hostname, such as length limits on individual labels.
let mut name = Vec::with_capacity(name_length);
let mut start_of_label = true;
for _ in 0..name_length {
let b = source.read().await?.to_ascii_lowercase();
if start_of_label && (b == b'-' || b == b'.') {
// a hostname label can't start with dot or dash
return Err(TlsError::UnrecognizedName);
}
// the next byte is the start of a label iff this one was a dot
start_of_label = b'.' == b;
match b {
b'a'..=b'z' | b'0'..=b'9' | b'-' | b'.' => name.push(b),
_ => return Err(TlsError::UnrecognizedName),
}
}
// If we're expecting a new label after reading the whole hostname, then either the
// name was empty or it ended with a dot; neither is allowed.
if start_of_label {
return Err(TlsError::UnrecognizedName);
}
// safety: every byte was already checked for being a valid subset of UTF-8
let name = unsafe { String::from_utf8_unchecked(name) };
return Ok(name);
}
// None of the names were of the right type, and section 4.2 says "There MUST NOT be more
// than one extension of the same type in a given extension block", so there definitely
// isn't a server name in this ClientHello.
break;
}
// Like when the extensions block is absent, pretend as if a server name was present but not
// recognized.
Err(TlsError::UnrecognizedName)
}
fn hash_hostname(hostname: String) -> PathBuf {
#[cfg(feature = "hashed")]
let hostname = {
use blake2::{Blake2s, Digest};
let hash = Blake2s::digest(hostname.as_bytes());
base64::encode_config(&hash, base64::URL_SAFE_NO_PAD)
};
hostname.into()
}
async fn connect_backend<R: AsyncReadExt>(
source: R,
local: SocketAddr,
remote: SocketAddr,
) -> TlsResult<(R, net::UnixStream)> {
let mut source = TlsHandshakeReader::new(source);
// timeout can return a "Elapsed" error, or else return the result from get_server_name, which
// might be a TlsError. So there are two "?" here to unwrap both.
let name = timeout(Duration::from_secs(10), get_server_name(&mut source)).await??;
let path = hash_hostname(name);
// The client sent a name and it's been validated to be safe to use as a path. Consider it a
// valid server name if connecting to the path doesn't return any of these errors:
// - is a directory (NotFound after joining a relative path)
// - which contains an entry named "tls-socket" (NotFound)
// - which is accessible to this proxy (PermissionDenied)
// - and is a listening socket (ConnectionRefused)
// If it isn't a valid server name, then that's the error to report. Anything else is not the
// client's fault.
let mut backend = net::UnixStream::connect(path.join("tls-socket"))
.await
.map_err(|e| match e.kind() {
ErrorKind::NotFound | ErrorKind::PermissionDenied | ErrorKind::ConnectionRefused => {
TlsError::UnrecognizedName
}
_ => TlsError::InternalError,
})?;
// After this point, all I/O errors are internal errors.
// If this file exists, turn on the PROXY protocol.
// NOTE: This is a blocking syscall, but stat should be fast enough that it's not worth
// spawning off a thread.
if std::fs::metadata(path.join("send-proxy-v1")).is_ok() {
let header = format!(
"PROXY {} {} {} {} {}\r\n",
match remote {
SocketAddr::V4(_) => "TCP4",
SocketAddr::V6(_) => "TCP6",
},
remote.ip(),
local.ip(),
remote.port(),
local.port(),
);
backend.write_all(header.as_bytes()).await?;
}
let source = source.into_source(&mut backend).await?;
Ok((source, backend))
}
async fn handle_connection(mut client: net::TcpStream, local: SocketAddr, remote: SocketAddr) {
let (client_in, mut client_out) = client.split();
let (client_in, mut backend) = match connect_backend(client_in, local, remote).await {
Ok(r) => r,
Err(e) => {
// Try to send an alert before closing the connection, but if that fails, don't worry
// about it... they'll figure it out eventually.
let _ = client_out
.write_all(&[
TLS_ALERT_CONTENT_TYPE,
TLS_LEGACY_RECORD_VERSION[0],
TLS_LEGACY_RECORD_VERSION[1],
TLS_ALERT_LENGTH[0],
TLS_ALERT_LENGTH[1],
TLS_ALERT_LEVEL_FATAL,
// AlertDescription comes from the returned error; see TlsError above
e as u8,
])
.await;
return;
}
};
let (backend_in, backend_out) = backend.split();
// Ignore errors in either direction; just half-close the destination when the source stops
// being readable. And if that fails, ignore that too.
async fn copy_all<R, W>(mut from: R, mut to: W)
where
R: AsyncReadExt + Unpin,
W: AsyncWriteExt + Unpin,
{
let _ = io::copy(&mut from, &mut to).await;
let _ = to.shutdown().await;
}
tokio::join!(
copy_all(client_in, backend_out),
copy_all(backend_in, client_out),
);
}
async fn main_loop() -> io::Result<()> {
// safety: the rest of the program must not use stdin
let listener = unsafe { std::os::unix::io::FromRawFd::from_raw_fd(0) };
// Assume stdin is an already bound and listening TCP socket.
let mut listener = net::TcpListener::from_std(listener)?;
// Asking for the listening socket's local address has the side effect of checking that it is
// actually a TCP socket.
let local = listener.local_addr()?;
println!("listening on {}", local);
let mut graceful_shutdown = signal(SignalKind::hangup())?;
loop {
tokio::select!(
result = listener.accept() => result.map(|(socket, remote)| {
let local = socket.local_addr().unwrap_or(local);
task::spawn_local(handle_connection(socket, local, remote));
})?,
Some(_) = graceful_shutdown.recv() => break,
);
}
println!("got SIGHUP, shutting down");
Ok(())
}
#[tokio::main]
async fn main() -> io::Result<()> {
let local = task::LocalSet::new();
local.run_until(main_loop()).await?;
timeout(Duration::from_secs(10), local)
.await
.map_err(|_| ErrorKind::TimedOut.into())
}
| {
while self.offset >= self.limit {
self.fill_to(self.limit + 5).await?;
// section 5.1: "Handshake messages MUST NOT be interleaved with other record types.
// That is, if a handshake message is split over two or more records, there MUST NOT be
// any other records between them."
if self.buffer[self.limit] != TLS_HANDSHAKE_CONTENT_TYPE {
return Err(TlsError::UnexpectedMessage);
}
let length = (self.buffer[self.limit + 3] as usize) << 8
| (self.buffer[self.limit + 4] as usize);
// section 5.1: "Implementations MUST NOT send zero-length fragments of Handshake
// types, even if those fragments contain padding."
if length == 0 {
return Err(TlsError::DecodeError);
}
// section 5.1: "The record layer fragments information blocks into TLSPlaintext
// records carrying data in chunks of 2^14 bytes or less."
if length > (1 << 14) {
return Err(TlsError::RecordOverflow);
}
self.offset += 5;
self.limit += 5 + length;
}
self.fill_to(self.offset + 1).await?;
let v = self.buffer[self.offset];
self.offset += 1;
Ok(v)
} | identifier_body |
main.rs | // This implementation is inspired by https://github.com/dlundquist/sniproxy, but I wrote it from
// scratch based on a careful reading of the TLS 1.3 specification.
use std::net::SocketAddr;
use std::path::PathBuf;
use std::time::Duration;
use tokio::io::{self, AsyncReadExt, AsyncWriteExt, Error, ErrorKind};
use tokio::net;
use tokio::signal::unix::{signal, SignalKind};
use tokio::task;
use tokio::time::{timeout, Elapsed};
// Unless otherwise specified, all quotes are from RFC 8446 (TLS 1.3).
// legacy_record_version: "MUST be set to 0x0303 for all records generated by a TLS
// 1.3 implementation"
const TLS_LEGACY_RECORD_VERSION: [u8; 2] = [0x03, 0x03];
const TLS_HANDSHAKE_CONTENT_TYPE: u8 = 0x16;
const TLS_HANDSHAKE_TYPE_CLIENT_HELLO: u8 = 0x01;
const TLS_EXTENSION_SNI: usize = 0x0000;
const TLS_SNI_HOST_NAME_TYPE: u8 = 0;
const TLS_ALERT_CONTENT_TYPE: u8 = 21;
const TLS_ALERT_LENGTH: [u8; 2] = [0x00, 0x02];
const TLS_ALERT_LEVEL_FATAL: u8 = 2;
enum TlsError {
UnexpectedMessage = 10,
RecordOverflow = 22,
DecodeError = 50,
InternalError = 80,
UserCanceled = 90,
UnrecognizedName = 112,
}
impl From<Error> for TlsError {
fn from(_error: Error) -> Self {
TlsError::InternalError
}
}
impl From<Elapsed> for TlsError {
fn from(_error: Elapsed) -> Self {
TlsError::UserCanceled
}
}
type TlsResult<O> = Result<O, TlsError>;
struct TlsHandshakeReader<R> {
source: R,
buffer: Vec<u8>,
offset: usize,
limit: usize,
}
fn check_length(length: usize, limit: &mut usize) -> TlsResult<()> {
*limit = limit.checked_sub(length).ok_or(TlsError::DecodeError)?;
Ok(())
}
impl<R: AsyncReadExt> TlsHandshakeReader<R> {
fn new(source: R) -> Self {
TlsHandshakeReader {
source: source,
buffer: Vec::with_capacity(4096),
offset: 0,
limit: 0,
}
}
fn seek(&mut self, offset: usize, limit: &mut usize) -> TlsResult<()> {
self.offset += offset;
check_length(offset, limit)
}
async fn fill_to(&mut self, target: usize) -> TlsResult<()> {
while self.buffer.len() < target {
if self.source.read_buf(&mut self.buffer).await? == 0 {
return Err(TlsError::DecodeError);
}
}
Ok(())
}
async fn read(&mut self) -> TlsResult<u8> {
while self.offset >= self.limit {
self.fill_to(self.limit + 5).await?;
// section 5.1: "Handshake messages MUST NOT be interleaved with other record types.
// That is, if a handshake message is split over two or more records, there MUST NOT be
// any other records between them."
if self.buffer[self.limit] != TLS_HANDSHAKE_CONTENT_TYPE {
return Err(TlsError::UnexpectedMessage);
}
let length = (self.buffer[self.limit + 3] as usize) << 8
| (self.buffer[self.limit + 4] as usize);
// section 5.1: "Implementations MUST NOT send zero-length fragments of Handshake
// types, even if those fragments contain padding."
if length == 0 {
return Err(TlsError::DecodeError);
}
// section 5.1: "The record layer fragments information blocks into TLSPlaintext
// records carrying data in chunks of 2^14 bytes or less."
if length > (1 << 14) {
return Err(TlsError::RecordOverflow);
}
self.offset += 5;
self.limit += 5 + length;
}
self.fill_to(self.offset + 1).await?;
let v = self.buffer[self.offset];
self.offset += 1;
Ok(v)
}
async fn read_length(&mut self, length: u8) -> TlsResult<usize> {
debug_assert!(length > 0 && length <= 4);
let mut result = 0;
for _ in 0..length {
result <<= 8;
result |= self.read().await? as usize;
}
Ok(result)
}
async fn into_source<W: AsyncWriteExt + Unpin>(self, dest: &mut W) -> io::Result<R> {
dest.write_all(&self.buffer[..]).await?;
Ok(self.source)
}
}
async fn get_server_name<R: AsyncReadExt>(source: &mut TlsHandshakeReader<R>) -> TlsResult<String> {
// section 4.1.2: "When a client first connects to a server, it is REQUIRED to send the
// ClientHello as its first TLS message."
if source.read().await? != TLS_HANDSHAKE_TYPE_CLIENT_HELLO {
return Err(TlsError::UnexpectedMessage);
}
let mut hello_length = source.read_length(3).await?;
// skip legacy_version (2) and random (32)
source.seek(34, &mut hello_length)?;
// skip legacy_session_id
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// skip cipher_suites
check_length(2, &mut hello_length)?;
let length = source.read_length(2).await?;
source.seek(length, &mut hello_length)?;
// skip legacy_compression_methods
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// section 4.1.2: "TLS 1.3 servers might receive ClientHello messages without an extensions
// field from prior versions of TLS. The presence of extensions can be detected by determining
// whether there are bytes following the compression_methods field at the end of the
// ClientHello. Note that this method of detecting optional data differs from the normal TLS
// method of having a variable-length field, but it is used for compatibility with TLS before
// extensions were defined. ... If negotiating a version of TLS prior to 1.3, a server MUST
// check that the message either contains no data after legacy_compression_methods or that it
// contains a valid extensions block with no data following. If not, then it MUST abort the
// handshake with a "decode_error" alert."
//
// If there is no extensions block, treat it like a server name extension was present but with
// an unrecognized name. I don't think the spec allows this, but it doesn't NOT allow it?
if hello_length == 0 {
return Err(TlsError::UnrecognizedName);
}
// ClientHello ends immediately after the extensions
check_length(2, &mut hello_length)?;
if hello_length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while hello_length > 0 {
check_length(4, &mut hello_length)?;
let extension = source.read_length(2).await?;
let mut length = source.read_length(2).await?;
if extension != TLS_EXTENSION_SNI {
source.seek(length, &mut hello_length)?;
continue;
}
check_length(length, &mut hello_length)?;
// This extension ends immediately after server_name_list
check_length(2, &mut length)?;
if length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while length > 0 {
check_length(3, &mut length)?;
let name_type = source.read().await?;
let name_length = source.read_length(2).await?;
if name_type != TLS_SNI_HOST_NAME_TYPE {
source.seek(name_length, &mut length)?;
continue;
}
check_length(name_length, &mut length)?;
// RFC 6066 section 3: "The ServerNameList MUST NOT contain more than one name of the
// same name_type." So we can just extract the first one we find.
// Hostnames are limited to 255 octets with a trailing dot, but RFC 6066 prohibits the
// trailing dot, so the limit here is 254 octets. Enforcing this limit ensures an
// attacker can't make us heap-allocate 64kB for a hostname we'll never match.
if name_length > 254 {
return Err(TlsError::UnrecognizedName);
}
// The following validation rules ensure that we won't return a hostname which could
// lead to pathname traversal (e.g. "..", "", or "a/b") and that semantically
// equivalent hostnames are only returned in a canonical form. This does not validate
// anything else about the hostname, such as length limits on individual labels.
let mut name = Vec::with_capacity(name_length);
let mut start_of_label = true;
for _ in 0..name_length {
let b = source.read().await?.to_ascii_lowercase();
if start_of_label && (b == b'-' || b == b'.') {
// a hostname label can't start with dot or dash
return Err(TlsError::UnrecognizedName);
}
// the next byte is the start of a label iff this one was a dot
start_of_label = b'.' == b;
match b {
b'a'..=b'z' | b'0'..=b'9' | b'-' | b'.' => name.push(b),
_ => return Err(TlsError::UnrecognizedName),
}
}
// If we're expecting a new label after reading the whole hostname, then either the
// name was empty or it ended with a dot; neither is allowed.
if start_of_label {
return Err(TlsError::UnrecognizedName);
}
// safety: every byte was already checked for being a valid subset of UTF-8
let name = unsafe { String::from_utf8_unchecked(name) };
return Ok(name);
}
// None of the names were of the right type, and section 4.2 says "There MUST NOT be more
// than one extension of the same type in a given extension block", so there definitely
// isn't a server name in this ClientHello.
break;
}
// Like when the extensions block is absent, pretend as if a server name was present but not
// recognized.
Err(TlsError::UnrecognizedName)
}
fn hash_hostname(hostname: String) -> PathBuf {
#[cfg(feature = "hashed")]
let hostname = {
use blake2::{Blake2s, Digest};
let hash = Blake2s::digest(hostname.as_bytes());
base64::encode_config(&hash, base64::URL_SAFE_NO_PAD)
};
hostname.into()
}
async fn connect_backend<R: AsyncReadExt>(
source: R,
local: SocketAddr,
remote: SocketAddr,
) -> TlsResult<(R, net::UnixStream)> {
let mut source = TlsHandshakeReader::new(source);
// timeout can return a "Elapsed" error, or else return the result from get_server_name, which
// might be a TlsError. So there are two "?" here to unwrap both.
let name = timeout(Duration::from_secs(10), get_server_name(&mut source)).await??;
let path = hash_hostname(name);
// The client sent a name and it's been validated to be safe to use as a path. Consider it a
// valid server name if connecting to the path doesn't return any of these errors:
// - is a directory (NotFound after joining a relative path)
// - which contains an entry named "tls-socket" (NotFound)
// - which is accessible to this proxy (PermissionDenied)
// - and is a listening socket (ConnectionRefused)
// If it isn't a valid server name, then that's the error to report. Anything else is not the
// client's fault.
let mut backend = net::UnixStream::connect(path.join("tls-socket"))
.await
.map_err(|e| match e.kind() {
ErrorKind::NotFound | ErrorKind::PermissionDenied | ErrorKind::ConnectionRefused => |
_ => TlsError::InternalError,
})?;
// After this point, all I/O errors are internal errors.
// If this file exists, turn on the PROXY protocol.
// NOTE: This is a blocking syscall, but stat should be fast enough that it's not worth
// spawning off a thread.
if std::fs::metadata(path.join("send-proxy-v1")).is_ok() {
let header = format!(
"PROXY {} {} {} {} {}\r\n",
match remote {
SocketAddr::V4(_) => "TCP4",
SocketAddr::V6(_) => "TCP6",
},
remote.ip(),
local.ip(),
remote.port(),
local.port(),
);
backend.write_all(header.as_bytes()).await?;
}
let source = source.into_source(&mut backend).await?;
Ok((source, backend))
}
async fn handle_connection(mut client: net::TcpStream, local: SocketAddr, remote: SocketAddr) {
let (client_in, mut client_out) = client.split();
let (client_in, mut backend) = match connect_backend(client_in, local, remote).await {
Ok(r) => r,
Err(e) => {
// Try to send an alert before closing the connection, but if that fails, don't worry
// about it... they'll figure it out eventually.
let _ = client_out
.write_all(&[
TLS_ALERT_CONTENT_TYPE,
TLS_LEGACY_RECORD_VERSION[0],
TLS_LEGACY_RECORD_VERSION[1],
TLS_ALERT_LENGTH[0],
TLS_ALERT_LENGTH[1],
TLS_ALERT_LEVEL_FATAL,
// AlertDescription comes from the returned error; see TlsError above
e as u8,
])
.await;
return;
}
};
let (backend_in, backend_out) = backend.split();
// Ignore errors in either direction; just half-close the destination when the source stops
// being readable. And if that fails, ignore that too.
async fn copy_all<R, W>(mut from: R, mut to: W)
where
R: AsyncReadExt + Unpin,
W: AsyncWriteExt + Unpin,
{
let _ = io::copy(&mut from, &mut to).await;
let _ = to.shutdown().await;
}
tokio::join!(
copy_all(client_in, backend_out),
copy_all(backend_in, client_out),
);
}
async fn main_loop() -> io::Result<()> {
// safety: the rest of the program must not use stdin
let listener = unsafe { std::os::unix::io::FromRawFd::from_raw_fd(0) };
// Assume stdin is an already bound and listening TCP socket.
let mut listener = net::TcpListener::from_std(listener)?;
// Asking for the listening socket's local address has the side effect of checking that it is
// actually a TCP socket.
let local = listener.local_addr()?;
println!("listening on {}", local);
let mut graceful_shutdown = signal(SignalKind::hangup())?;
loop {
tokio::select!(
result = listener.accept() => result.map(|(socket, remote)| {
let local = socket.local_addr().unwrap_or(local);
task::spawn_local(handle_connection(socket, local, remote));
})?,
Some(_) = graceful_shutdown.recv() => break,
);
}
println!("got SIGHUP, shutting down");
Ok(())
}
#[tokio::main]
async fn main() -> io::Result<()> {
let local = task::LocalSet::new();
local.run_until(main_loop()).await?;
timeout(Duration::from_secs(10), local)
.await
.map_err(|_| ErrorKind::TimedOut.into())
}
| {
TlsError::UnrecognizedName
} | conditional_block |
main.rs | // This implementation is inspired by https://github.com/dlundquist/sniproxy, but I wrote it from
// scratch based on a careful reading of the TLS 1.3 specification.
use std::net::SocketAddr;
use std::path::PathBuf;
use std::time::Duration;
use tokio::io::{self, AsyncReadExt, AsyncWriteExt, Error, ErrorKind};
use tokio::net;
use tokio::signal::unix::{signal, SignalKind};
use tokio::task;
use tokio::time::{timeout, Elapsed};
// Unless otherwise specified, all quotes are from RFC 8446 (TLS 1.3).
// legacy_record_version: "MUST be set to 0x0303 for all records generated by a TLS
// 1.3 implementation"
const TLS_LEGACY_RECORD_VERSION: [u8; 2] = [0x03, 0x03];
const TLS_HANDSHAKE_CONTENT_TYPE: u8 = 0x16;
const TLS_HANDSHAKE_TYPE_CLIENT_HELLO: u8 = 0x01;
const TLS_EXTENSION_SNI: usize = 0x0000;
const TLS_SNI_HOST_NAME_TYPE: u8 = 0;
const TLS_ALERT_CONTENT_TYPE: u8 = 21;
const TLS_ALERT_LENGTH: [u8; 2] = [0x00, 0x02];
const TLS_ALERT_LEVEL_FATAL: u8 = 2;
enum TlsError {
UnexpectedMessage = 10,
RecordOverflow = 22,
DecodeError = 50,
InternalError = 80,
UserCanceled = 90,
UnrecognizedName = 112,
}
impl From<Error> for TlsError {
fn from(_error: Error) -> Self {
TlsError::InternalError
}
}
impl From<Elapsed> for TlsError {
fn from(_error: Elapsed) -> Self {
TlsError::UserCanceled
}
}
type TlsResult<O> = Result<O, TlsError>;
struct TlsHandshakeReader<R> {
source: R,
buffer: Vec<u8>,
offset: usize,
limit: usize,
}
fn check_length(length: usize, limit: &mut usize) -> TlsResult<()> {
*limit = limit.checked_sub(length).ok_or(TlsError::DecodeError)?;
Ok(())
}
impl<R: AsyncReadExt> TlsHandshakeReader<R> {
fn new(source: R) -> Self {
TlsHandshakeReader {
source: source,
buffer: Vec::with_capacity(4096),
offset: 0,
limit: 0,
}
}
fn seek(&mut self, offset: usize, limit: &mut usize) -> TlsResult<()> {
self.offset += offset;
check_length(offset, limit)
}
async fn fill_to(&mut self, target: usize) -> TlsResult<()> {
while self.buffer.len() < target {
if self.source.read_buf(&mut self.buffer).await? == 0 {
return Err(TlsError::DecodeError);
}
}
Ok(())
}
async fn read(&mut self) -> TlsResult<u8> {
while self.offset >= self.limit {
self.fill_to(self.limit + 5).await?;
// section 5.1: "Handshake messages MUST NOT be interleaved with other record types.
// That is, if a handshake message is split over two or more records, there MUST NOT be
// any other records between them."
if self.buffer[self.limit] != TLS_HANDSHAKE_CONTENT_TYPE {
return Err(TlsError::UnexpectedMessage);
}
let length = (self.buffer[self.limit + 3] as usize) << 8
| (self.buffer[self.limit + 4] as usize);
// section 5.1: "Implementations MUST NOT send zero-length fragments of Handshake
// types, even if those fragments contain padding."
if length == 0 {
return Err(TlsError::DecodeError);
}
// section 5.1: "The record layer fragments information blocks into TLSPlaintext
// records carrying data in chunks of 2^14 bytes or less."
if length > (1 << 14) {
return Err(TlsError::RecordOverflow);
}
self.offset += 5;
self.limit += 5 + length; | Ok(v)
}
async fn read_length(&mut self, length: u8) -> TlsResult<usize> {
debug_assert!(length > 0 && length <= 4);
let mut result = 0;
for _ in 0..length {
result <<= 8;
result |= self.read().await? as usize;
}
Ok(result)
}
async fn into_source<W: AsyncWriteExt + Unpin>(self, dest: &mut W) -> io::Result<R> {
dest.write_all(&self.buffer[..]).await?;
Ok(self.source)
}
}
async fn get_server_name<R: AsyncReadExt>(source: &mut TlsHandshakeReader<R>) -> TlsResult<String> {
// section 4.1.2: "When a client first connects to a server, it is REQUIRED to send the
// ClientHello as its first TLS message."
if source.read().await? != TLS_HANDSHAKE_TYPE_CLIENT_HELLO {
return Err(TlsError::UnexpectedMessage);
}
let mut hello_length = source.read_length(3).await?;
// skip legacy_version (2) and random (32)
source.seek(34, &mut hello_length)?;
// skip legacy_session_id
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// skip cipher_suites
check_length(2, &mut hello_length)?;
let length = source.read_length(2).await?;
source.seek(length, &mut hello_length)?;
// skip legacy_compression_methods
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// section 4.1.2: "TLS 1.3 servers might receive ClientHello messages without an extensions
// field from prior versions of TLS. The presence of extensions can be detected by determining
// whether there are bytes following the compression_methods field at the end of the
// ClientHello. Note that this method of detecting optional data differs from the normal TLS
// method of having a variable-length field, but it is used for compatibility with TLS before
// extensions were defined. ... If negotiating a version of TLS prior to 1.3, a server MUST
// check that the message either contains no data after legacy_compression_methods or that it
// contains a valid extensions block with no data following. If not, then it MUST abort the
// handshake with a "decode_error" alert."
//
// If there is no extensions block, treat it like a server name extension was present but with
// an unrecognized name. I don't think the spec allows this, but it doesn't NOT allow it?
if hello_length == 0 {
return Err(TlsError::UnrecognizedName);
}
// ClientHello ends immediately after the extensions
check_length(2, &mut hello_length)?;
if hello_length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while hello_length > 0 {
check_length(4, &mut hello_length)?;
let extension = source.read_length(2).await?;
let mut length = source.read_length(2).await?;
if extension != TLS_EXTENSION_SNI {
source.seek(length, &mut hello_length)?;
continue;
}
check_length(length, &mut hello_length)?;
// This extension ends immediately after server_name_list
check_length(2, &mut length)?;
if length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while length > 0 {
check_length(3, &mut length)?;
let name_type = source.read().await?;
let name_length = source.read_length(2).await?;
if name_type != TLS_SNI_HOST_NAME_TYPE {
source.seek(name_length, &mut length)?;
continue;
}
check_length(name_length, &mut length)?;
// RFC 6066 section 3: "The ServerNameList MUST NOT contain more than one name of the
// same name_type." So we can just extract the first one we find.
// Hostnames are limited to 255 octets with a trailing dot, but RFC 6066 prohibits the
// trailing dot, so the limit here is 254 octets. Enforcing this limit ensures an
// attacker can't make us heap-allocate 64kB for a hostname we'll never match.
if name_length > 254 {
return Err(TlsError::UnrecognizedName);
}
// The following validation rules ensure that we won't return a hostname which could
// lead to pathname traversal (e.g. "..", "", or "a/b") and that semantically
// equivalent hostnames are only returned in a canonical form. This does not validate
// anything else about the hostname, such as length limits on individual labels.
let mut name = Vec::with_capacity(name_length);
let mut start_of_label = true;
for _ in 0..name_length {
let b = source.read().await?.to_ascii_lowercase();
if start_of_label && (b == b'-' || b == b'.') {
// a hostname label can't start with dot or dash
return Err(TlsError::UnrecognizedName);
}
// the next byte is the start of a label iff this one was a dot
start_of_label = b'.' == b;
match b {
b'a'..=b'z' | b'0'..=b'9' | b'-' | b'.' => name.push(b),
_ => return Err(TlsError::UnrecognizedName),
}
}
// If we're expecting a new label after reading the whole hostname, then either the
// name was empty or it ended with a dot; neither is allowed.
if start_of_label {
return Err(TlsError::UnrecognizedName);
}
// safety: every byte was already checked for being a valid subset of UTF-8
let name = unsafe { String::from_utf8_unchecked(name) };
return Ok(name);
}
// None of the names were of the right type, and section 4.2 says "There MUST NOT be more
// than one extension of the same type in a given extension block", so there definitely
// isn't a server name in this ClientHello.
break;
}
// Like when the extensions block is absent, pretend as if a server name was present but not
// recognized.
Err(TlsError::UnrecognizedName)
}
fn hash_hostname(hostname: String) -> PathBuf {
#[cfg(feature = "hashed")]
let hostname = {
use blake2::{Blake2s, Digest};
let hash = Blake2s::digest(hostname.as_bytes());
base64::encode_config(&hash, base64::URL_SAFE_NO_PAD)
};
hostname.into()
}
async fn connect_backend<R: AsyncReadExt>(
source: R,
local: SocketAddr,
remote: SocketAddr,
) -> TlsResult<(R, net::UnixStream)> {
let mut source = TlsHandshakeReader::new(source);
// timeout can return a "Elapsed" error, or else return the result from get_server_name, which
// might be a TlsError. So there are two "?" here to unwrap both.
let name = timeout(Duration::from_secs(10), get_server_name(&mut source)).await??;
let path = hash_hostname(name);
// The client sent a name and it's been validated to be safe to use as a path. Consider it a
// valid server name if connecting to the path doesn't return any of these errors:
// - is a directory (NotFound after joining a relative path)
// - which contains an entry named "tls-socket" (NotFound)
// - which is accessible to this proxy (PermissionDenied)
// - and is a listening socket (ConnectionRefused)
// If it isn't a valid server name, then that's the error to report. Anything else is not the
// client's fault.
let mut backend = net::UnixStream::connect(path.join("tls-socket"))
.await
.map_err(|e| match e.kind() {
ErrorKind::NotFound | ErrorKind::PermissionDenied | ErrorKind::ConnectionRefused => {
TlsError::UnrecognizedName
}
_ => TlsError::InternalError,
})?;
// After this point, all I/O errors are internal errors.
// If this file exists, turn on the PROXY protocol.
// NOTE: This is a blocking syscall, but stat should be fast enough that it's not worth
// spawning off a thread.
if std::fs::metadata(path.join("send-proxy-v1")).is_ok() {
let header = format!(
"PROXY {} {} {} {} {}\r\n",
match remote {
SocketAddr::V4(_) => "TCP4",
SocketAddr::V6(_) => "TCP6",
},
remote.ip(),
local.ip(),
remote.port(),
local.port(),
);
backend.write_all(header.as_bytes()).await?;
}
let source = source.into_source(&mut backend).await?;
Ok((source, backend))
}
async fn handle_connection(mut client: net::TcpStream, local: SocketAddr, remote: SocketAddr) {
let (client_in, mut client_out) = client.split();
let (client_in, mut backend) = match connect_backend(client_in, local, remote).await {
Ok(r) => r,
Err(e) => {
// Try to send an alert before closing the connection, but if that fails, don't worry
// about it... they'll figure it out eventually.
let _ = client_out
.write_all(&[
TLS_ALERT_CONTENT_TYPE,
TLS_LEGACY_RECORD_VERSION[0],
TLS_LEGACY_RECORD_VERSION[1],
TLS_ALERT_LENGTH[0],
TLS_ALERT_LENGTH[1],
TLS_ALERT_LEVEL_FATAL,
// AlertDescription comes from the returned error; see TlsError above
e as u8,
])
.await;
return;
}
};
let (backend_in, backend_out) = backend.split();
// Ignore errors in either direction; just half-close the destination when the source stops
// being readable. And if that fails, ignore that too.
async fn copy_all<R, W>(mut from: R, mut to: W)
where
R: AsyncReadExt + Unpin,
W: AsyncWriteExt + Unpin,
{
let _ = io::copy(&mut from, &mut to).await;
let _ = to.shutdown().await;
}
tokio::join!(
copy_all(client_in, backend_out),
copy_all(backend_in, client_out),
);
}
async fn main_loop() -> io::Result<()> {
// safety: the rest of the program must not use stdin
let listener = unsafe { std::os::unix::io::FromRawFd::from_raw_fd(0) };
// Assume stdin is an already bound and listening TCP socket.
let mut listener = net::TcpListener::from_std(listener)?;
// Asking for the listening socket's local address has the side effect of checking that it is
// actually a TCP socket.
let local = listener.local_addr()?;
println!("listening on {}", local);
let mut graceful_shutdown = signal(SignalKind::hangup())?;
loop {
tokio::select!(
result = listener.accept() => result.map(|(socket, remote)| {
let local = socket.local_addr().unwrap_or(local);
task::spawn_local(handle_connection(socket, local, remote));
})?,
Some(_) = graceful_shutdown.recv() => break,
);
}
println!("got SIGHUP, shutting down");
Ok(())
}
#[tokio::main]
async fn main() -> io::Result<()> {
let local = task::LocalSet::new();
local.run_until(main_loop()).await?;
timeout(Duration::from_secs(10), local)
.await
.map_err(|_| ErrorKind::TimedOut.into())
} | }
self.fill_to(self.offset + 1).await?;
let v = self.buffer[self.offset];
self.offset += 1; | random_line_split |
main.rs | // This implementation is inspired by https://github.com/dlundquist/sniproxy, but I wrote it from
// scratch based on a careful reading of the TLS 1.3 specification.
use std::net::SocketAddr;
use std::path::PathBuf;
use std::time::Duration;
use tokio::io::{self, AsyncReadExt, AsyncWriteExt, Error, ErrorKind};
use tokio::net;
use tokio::signal::unix::{signal, SignalKind};
use tokio::task;
use tokio::time::{timeout, Elapsed};
// Unless otherwise specified, all quotes are from RFC 8446 (TLS 1.3).
// legacy_record_version: "MUST be set to 0x0303 for all records generated by a TLS
// 1.3 implementation"
const TLS_LEGACY_RECORD_VERSION: [u8; 2] = [0x03, 0x03];
const TLS_HANDSHAKE_CONTENT_TYPE: u8 = 0x16;
const TLS_HANDSHAKE_TYPE_CLIENT_HELLO: u8 = 0x01;
const TLS_EXTENSION_SNI: usize = 0x0000;
const TLS_SNI_HOST_NAME_TYPE: u8 = 0;
const TLS_ALERT_CONTENT_TYPE: u8 = 21;
const TLS_ALERT_LENGTH: [u8; 2] = [0x00, 0x02];
const TLS_ALERT_LEVEL_FATAL: u8 = 2;
enum TlsError {
UnexpectedMessage = 10,
RecordOverflow = 22,
DecodeError = 50,
InternalError = 80,
UserCanceled = 90,
UnrecognizedName = 112,
}
impl From<Error> for TlsError {
fn from(_error: Error) -> Self {
TlsError::InternalError
}
}
impl From<Elapsed> for TlsError {
fn from(_error: Elapsed) -> Self {
TlsError::UserCanceled
}
}
type TlsResult<O> = Result<O, TlsError>;
struct TlsHandshakeReader<R> {
source: R,
buffer: Vec<u8>,
offset: usize,
limit: usize,
}
fn | (length: usize, limit: &mut usize) -> TlsResult<()> {
*limit = limit.checked_sub(length).ok_or(TlsError::DecodeError)?;
Ok(())
}
impl<R: AsyncReadExt> TlsHandshakeReader<R> {
fn new(source: R) -> Self {
TlsHandshakeReader {
source: source,
buffer: Vec::with_capacity(4096),
offset: 0,
limit: 0,
}
}
fn seek(&mut self, offset: usize, limit: &mut usize) -> TlsResult<()> {
self.offset += offset;
check_length(offset, limit)
}
async fn fill_to(&mut self, target: usize) -> TlsResult<()> {
while self.buffer.len() < target {
if self.source.read_buf(&mut self.buffer).await? == 0 {
return Err(TlsError::DecodeError);
}
}
Ok(())
}
async fn read(&mut self) -> TlsResult<u8> {
while self.offset >= self.limit {
self.fill_to(self.limit + 5).await?;
// section 5.1: "Handshake messages MUST NOT be interleaved with other record types.
// That is, if a handshake message is split over two or more records, there MUST NOT be
// any other records between them."
if self.buffer[self.limit] != TLS_HANDSHAKE_CONTENT_TYPE {
return Err(TlsError::UnexpectedMessage);
}
let length = (self.buffer[self.limit + 3] as usize) << 8
| (self.buffer[self.limit + 4] as usize);
// section 5.1: "Implementations MUST NOT send zero-length fragments of Handshake
// types, even if those fragments contain padding."
if length == 0 {
return Err(TlsError::DecodeError);
}
// section 5.1: "The record layer fragments information blocks into TLSPlaintext
// records carrying data in chunks of 2^14 bytes or less."
if length > (1 << 14) {
return Err(TlsError::RecordOverflow);
}
self.offset += 5;
self.limit += 5 + length;
}
self.fill_to(self.offset + 1).await?;
let v = self.buffer[self.offset];
self.offset += 1;
Ok(v)
}
async fn read_length(&mut self, length: u8) -> TlsResult<usize> {
debug_assert!(length > 0 && length <= 4);
let mut result = 0;
for _ in 0..length {
result <<= 8;
result |= self.read().await? as usize;
}
Ok(result)
}
async fn into_source<W: AsyncWriteExt + Unpin>(self, dest: &mut W) -> io::Result<R> {
dest.write_all(&self.buffer[..]).await?;
Ok(self.source)
}
}
async fn get_server_name<R: AsyncReadExt>(source: &mut TlsHandshakeReader<R>) -> TlsResult<String> {
// section 4.1.2: "When a client first connects to a server, it is REQUIRED to send the
// ClientHello as its first TLS message."
if source.read().await? != TLS_HANDSHAKE_TYPE_CLIENT_HELLO {
return Err(TlsError::UnexpectedMessage);
}
let mut hello_length = source.read_length(3).await?;
// skip legacy_version (2) and random (32)
source.seek(34, &mut hello_length)?;
// skip legacy_session_id
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// skip cipher_suites
check_length(2, &mut hello_length)?;
let length = source.read_length(2).await?;
source.seek(length, &mut hello_length)?;
// skip legacy_compression_methods
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// section 4.1.2: "TLS 1.3 servers might receive ClientHello messages without an extensions
// field from prior versions of TLS. The presence of extensions can be detected by determining
// whether there are bytes following the compression_methods field at the end of the
// ClientHello. Note that this method of detecting optional data differs from the normal TLS
// method of having a variable-length field, but it is used for compatibility with TLS before
// extensions were defined. ... If negotiating a version of TLS prior to 1.3, a server MUST
// check that the message either contains no data after legacy_compression_methods or that it
// contains a valid extensions block with no data following. If not, then it MUST abort the
// handshake with a "decode_error" alert."
//
// If there is no extensions block, treat it like a server name extension was present but with
// an unrecognized name. I don't think the spec allows this, but it doesn't NOT allow it?
if hello_length == 0 {
return Err(TlsError::UnrecognizedName);
}
// ClientHello ends immediately after the extensions
check_length(2, &mut hello_length)?;
if hello_length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while hello_length > 0 {
check_length(4, &mut hello_length)?;
let extension = source.read_length(2).await?;
let mut length = source.read_length(2).await?;
if extension != TLS_EXTENSION_SNI {
source.seek(length, &mut hello_length)?;
continue;
}
check_length(length, &mut hello_length)?;
// This extension ends immediately after server_name_list
check_length(2, &mut length)?;
if length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while length > 0 {
check_length(3, &mut length)?;
let name_type = source.read().await?;
let name_length = source.read_length(2).await?;
if name_type != TLS_SNI_HOST_NAME_TYPE {
source.seek(name_length, &mut length)?;
continue;
}
check_length(name_length, &mut length)?;
// RFC 6066 section 3: "The ServerNameList MUST NOT contain more than one name of the
// same name_type." So we can just extract the first one we find.
// Hostnames are limited to 255 octets with a trailing dot, but RFC 6066 prohibits the
// trailing dot, so the limit here is 254 octets. Enforcing this limit ensures an
// attacker can't make us heap-allocate 64kB for a hostname we'll never match.
if name_length > 254 {
return Err(TlsError::UnrecognizedName);
}
// The following validation rules ensure that we won't return a hostname which could
// lead to pathname traversal (e.g. "..", "", or "a/b") and that semantically
// equivalent hostnames are only returned in a canonical form. This does not validate
// anything else about the hostname, such as length limits on individual labels.
let mut name = Vec::with_capacity(name_length);
let mut start_of_label = true;
for _ in 0..name_length {
let b = source.read().await?.to_ascii_lowercase();
if start_of_label && (b == b'-' || b == b'.') {
// a hostname label can't start with dot or dash
return Err(TlsError::UnrecognizedName);
}
// the next byte is the start of a label iff this one was a dot
start_of_label = b'.' == b;
match b {
b'a'..=b'z' | b'0'..=b'9' | b'-' | b'.' => name.push(b),
_ => return Err(TlsError::UnrecognizedName),
}
}
// If we're expecting a new label after reading the whole hostname, then either the
// name was empty or it ended with a dot; neither is allowed.
if start_of_label {
return Err(TlsError::UnrecognizedName);
}
// safety: every byte was already checked for being a valid subset of UTF-8
let name = unsafe { String::from_utf8_unchecked(name) };
return Ok(name);
}
// None of the names were of the right type, and section 4.2 says "There MUST NOT be more
// than one extension of the same type in a given extension block", so there definitely
// isn't a server name in this ClientHello.
break;
}
// Like when the extensions block is absent, pretend as if a server name was present but not
// recognized.
Err(TlsError::UnrecognizedName)
}
fn hash_hostname(hostname: String) -> PathBuf {
#[cfg(feature = "hashed")]
let hostname = {
use blake2::{Blake2s, Digest};
let hash = Blake2s::digest(hostname.as_bytes());
base64::encode_config(&hash, base64::URL_SAFE_NO_PAD)
};
hostname.into()
}
async fn connect_backend<R: AsyncReadExt>(
source: R,
local: SocketAddr,
remote: SocketAddr,
) -> TlsResult<(R, net::UnixStream)> {
let mut source = TlsHandshakeReader::new(source);
// timeout can return a "Elapsed" error, or else return the result from get_server_name, which
// might be a TlsError. So there are two "?" here to unwrap both.
let name = timeout(Duration::from_secs(10), get_server_name(&mut source)).await??;
let path = hash_hostname(name);
// The client sent a name and it's been validated to be safe to use as a path. Consider it a
// valid server name if connecting to the path doesn't return any of these errors:
// - is a directory (NotFound after joining a relative path)
// - which contains an entry named "tls-socket" (NotFound)
// - which is accessible to this proxy (PermissionDenied)
// - and is a listening socket (ConnectionRefused)
// If it isn't a valid server name, then that's the error to report. Anything else is not the
// client's fault.
let mut backend = net::UnixStream::connect(path.join("tls-socket"))
.await
.map_err(|e| match e.kind() {
ErrorKind::NotFound | ErrorKind::PermissionDenied | ErrorKind::ConnectionRefused => {
TlsError::UnrecognizedName
}
_ => TlsError::InternalError,
})?;
// After this point, all I/O errors are internal errors.
// If this file exists, turn on the PROXY protocol.
// NOTE: This is a blocking syscall, but stat should be fast enough that it's not worth
// spawning off a thread.
if std::fs::metadata(path.join("send-proxy-v1")).is_ok() {
let header = format!(
"PROXY {} {} {} {} {}\r\n",
match remote {
SocketAddr::V4(_) => "TCP4",
SocketAddr::V6(_) => "TCP6",
},
remote.ip(),
local.ip(),
remote.port(),
local.port(),
);
backend.write_all(header.as_bytes()).await?;
}
let source = source.into_source(&mut backend).await?;
Ok((source, backend))
}
async fn handle_connection(mut client: net::TcpStream, local: SocketAddr, remote: SocketAddr) {
let (client_in, mut client_out) = client.split();
let (client_in, mut backend) = match connect_backend(client_in, local, remote).await {
Ok(r) => r,
Err(e) => {
// Try to send an alert before closing the connection, but if that fails, don't worry
// about it... they'll figure it out eventually.
let _ = client_out
.write_all(&[
TLS_ALERT_CONTENT_TYPE,
TLS_LEGACY_RECORD_VERSION[0],
TLS_LEGACY_RECORD_VERSION[1],
TLS_ALERT_LENGTH[0],
TLS_ALERT_LENGTH[1],
TLS_ALERT_LEVEL_FATAL,
// AlertDescription comes from the returned error; see TlsError above
e as u8,
])
.await;
return;
}
};
let (backend_in, backend_out) = backend.split();
// Ignore errors in either direction; just half-close the destination when the source stops
// being readable. And if that fails, ignore that too.
async fn copy_all<R, W>(mut from: R, mut to: W)
where
R: AsyncReadExt + Unpin,
W: AsyncWriteExt + Unpin,
{
let _ = io::copy(&mut from, &mut to).await;
let _ = to.shutdown().await;
}
tokio::join!(
copy_all(client_in, backend_out),
copy_all(backend_in, client_out),
);
}
async fn main_loop() -> io::Result<()> {
// safety: the rest of the program must not use stdin
let listener = unsafe { std::os::unix::io::FromRawFd::from_raw_fd(0) };
// Assume stdin is an already bound and listening TCP socket.
let mut listener = net::TcpListener::from_std(listener)?;
// Asking for the listening socket's local address has the side effect of checking that it is
// actually a TCP socket.
let local = listener.local_addr()?;
println!("listening on {}", local);
let mut graceful_shutdown = signal(SignalKind::hangup())?;
loop {
tokio::select!(
result = listener.accept() => result.map(|(socket, remote)| {
let local = socket.local_addr().unwrap_or(local);
task::spawn_local(handle_connection(socket, local, remote));
})?,
Some(_) = graceful_shutdown.recv() => break,
);
}
println!("got SIGHUP, shutting down");
Ok(())
}
#[tokio::main]
async fn main() -> io::Result<()> {
let local = task::LocalSet::new();
local.run_until(main_loop()).await?;
timeout(Duration::from_secs(10), local)
.await
.map_err(|_| ErrorKind::TimedOut.into())
}
| check_length | identifier_name |
app.rs | use crate::{
auth::Resource,
default_resource, diffable,
error::ServiceError,
generation,
models::{
fix_null_default,
sql::{slice_iter, SelectBuilder},
Lock, TypedAlias,
},
update_aliases, Client,
};
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use core::pin::Pin;
use drogue_client::{meta, registry};
use drogue_cloud_service_api::{auth::user::UserInformation, labels::LabelSelector};
use futures::{future, Stream, TryStreamExt};
use indexmap::map::IndexMap;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use std::collections::{hash_map::RandomState, HashMap, HashSet};
use tokio_postgres::{
types::{Json, ToSql, Type},
Row,
};
use uuid::Uuid;
/// An application entity record.
pub struct Application {
pub uid: Uuid,
pub name: String,
pub labels: HashMap<String, String>,
pub annotations: HashMap<String, String>,
pub creation_timestamp: DateTime<Utc>,
pub resource_version: Uuid,
pub generation: u64,
pub deletion_timestamp: Option<DateTime<Utc>>,
pub finalizers: Vec<String>,
/// ownership information
pub owner: Option<String>,
/// transfer to new owner
pub transfer_owner: Option<String>,
/// members list
pub members: IndexMap<String, MemberEntry>,
/// arbitrary payload
pub data: Value,
}
diffable!(Application);
generation!(Application => generation);
default_resource!(Application);
impl Resource for Application {
fn owner(&self) -> Option<&str> {
self.owner.as_deref()
}
fn members(&self) -> &IndexMap<String, MemberEntry> {
&self.members
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Role {
/// Allow everything, including changing members
Admin,
/// Allow reading and writing, but not changing members.
Manager,
/// Allow reading only.
Reader,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MemberEntry {
pub role: Role,
}
/// Extract a section from the application data. Prevents cloning the whole struct.
fn extract_sect(mut app: Application, key: &str) -> (Application, Option<Map<String, Value>>) {
let sect = app
.data
.get_mut(key)
.map(|v| v.take())
.and_then(|v| match v {
Value::Object(v) => Some(v),
_ => None,
});
(app, sect)
}
impl From<Application> for registry::v1::Application {
fn from(app: Application) -> Self {
let (app, spec) = extract_sect(app, "spec");
let (app, status) = extract_sect(app, "status");
registry::v1::Application {
metadata: meta::v1::NonScopedMetadata {
uid: app.uid.to_string(),
name: app.name,
labels: app.labels,
annotations: app.annotations,
creation_timestamp: app.creation_timestamp,
generation: app.generation,
resource_version: app.resource_version.to_string(),
deletion_timestamp: app.deletion_timestamp,
finalizers: app.finalizers,
},
spec: spec.unwrap_or_default(),
status: status.unwrap_or_default(),
}
}
}
#[async_trait]
pub trait ApplicationAccessor {
/// Lookup an application
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError>;
/// Delete an application
async fn delete(&self, app: &str) -> Result<(), ServiceError>;
/// Get an application
async fn get(&self, app: &str, lock: Lock) -> Result<Option<Application>, ServiceError> {
Ok(self
.list(
Some(app),
LabelSelector::default(),
Some(1),
None,
None,
lock,
&[],
)
.await?
.try_next()
.await?)
}
/// Get a list of applications
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>;
/// Create a new application
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError>;
/// Update an existing application's data
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError>;
/// Update an existing application's owner information
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError>;
/// Set the member list
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError>;
}
pub struct PostgresApplicationAccessor<'c, C: Client> {
client: &'c C,
}
impl<'c, C: Client> PostgresApplicationAccessor<'c, C> {
pub fn new(client: &'c C) -> Self {
Self { client }
}
pub fn from_row(row: Row) -> Result<Application, tokio_postgres::Error> {
log::debug!("Row: {:?}", row);
Ok(Application {
uid: row.try_get("UID")?,
name: row.try_get("NAME")?,
creation_timestamp: row.try_get("CREATION_TIMESTAMP")?,
generation: row.try_get::<_, i64>("GENERATION")? as u64,
resource_version: row.try_get("RESOURCE_VERSION")?,
labels: super::row_to_map(&row, "LABELS")?,
annotations: super::row_to_map(&row, "ANNOTATIONS")?,
deletion_timestamp: row.try_get("DELETION_TIMESTAMP")?,
finalizers: super::row_to_vec(&row, "FINALIZERS")?,
owner: row.try_get("OWNER")?,
transfer_owner: row.try_get("TRANSFER_OWNER")?,
members: row
.try_get::<_, Json<IndexMap<String, MemberEntry>>>("MEMBERS")
.map(|json| json.0)
.or_else(fix_null_default)?,
data: row.try_get::<_, Json<_>>("DATA")?.0,
})
}
async fn insert_aliases(
&self,
id: &str,
aliases: &HashSet<TypedAlias>,
) -> Result<(), tokio_postgres::Error> {
if aliases.is_empty() {
return Ok(());
}
let stmt = self
.client
.prepare_typed(
"INSERT INTO APPLICATION_ALIASES (APP, TYPE, ALIAS) VALUES ($1, $2, $3)",
&[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR],
)
.await?;
for alias in aliases {
self.client
.execute(&stmt, &[&id, &alias.0, &alias.1])
.await?;
}
Ok(())
}
}
trait Param: ToSql + Sync {}
#[async_trait]
impl<'c, C: Client> ApplicationAccessor for PostgresApplicationAccessor<'c, C> {
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError> {
let sql = r#"
SELECT
A2.NAME,
A2.UID,
A2.LABELS,
A2.CREATION_TIMESTAMP,
A2.GENERATION,
A2.RESOURCE_VERSION,
A2.ANNOTATIONS,
A2.DELETION_TIMESTAMP,
A2.FINALIZERS,
A2.OWNER,
A2.TRANSFER_OWNER,
A2.MEMBERS,
A2.DATA
FROM
APPLICATION_ALIASES A1 INNER JOIN APPLICATIONS A2
ON
A1.APP=A2.NAME WHERE A1.ALIAS = $1
"#;
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let row = self.client.query_opt(&stmt, &[&alias]).await?;
Ok(row.map(Self::from_row).transpose()?)
}
async fn delete(&self, id: &str) -> Result<(), ServiceError> {
let sql = "DELETE FROM APPLICATIONS WHERE NAME = $1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let count = self.client.execute(&stmt, &[&id]).await?;
if count > 0 {
Ok(())
} else {
Err(ServiceError::NotFound)
}
}
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>
|
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError> {
let name = application.name;
let data = application.data;
let labels = application.labels;
let annotations = application.annotations;
self.client
.execute(
r#"
INSERT INTO APPLICATIONS (
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
FINALIZERS,
OWNER,
DATA
) VALUES (
$1,
$2,
$3,
$4,
$5,
$6,
$7,
$8,
$9,
$10
)"#,
&[
&name,
&application.uid,
&Json(labels),
&Json(annotations),
&Utc::now(),
&(application.generation as i64),
&Uuid::new_v4(),
&application.finalizers,
&application.owner,
&Json(data),
],
)
.await?;
self.insert_aliases(&name, &aliases).await?;
Ok(())
}
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError> {
let name = application.name;
let labels = application.labels;
let data = application.data;
let annotations = application.annotations;
// update device
let count = self
.client
.execute(
r#"
UPDATE APPLICATIONS
SET
LABELS = $2,
ANNOTATIONS = $3,
GENERATION = $4,
RESOURCE_VERSION = $5,
DELETION_TIMESTAMP = $6,
FINALIZERS = $7,
DATA = $8
WHERE
NAME = $1
"#,
&[
&name,
&Json(labels),
&Json(annotations),
&(application.generation as i64),
&Uuid::new_v4(),
&application.deletion_timestamp,
&application.finalizers,
&Json(data),
],
)
.await?;
update_aliases!(count, aliases, |aliases| {
// clear existing aliases
let sql = "DELETE FROM APPLICATION_ALIASES WHERE APP=$1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
self.client.execute(&stmt, &[&name]).await?;
// insert new alias set
self.insert_aliases(&name, &aliases).await?;
Ok(count)
})
}
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError> {
// update application
let sql = r#"
UPDATE APPLICATIONS
SET
OWNER = $2,
TRANSFER_OWNER = $3
WHERE
NAME = $1
"#;
let stmt = self
.client
.prepare_typed(sql, &[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR])
.await?;
let count = self
.client
.execute(&stmt, &[&app, &owner, &transfer_owner])
.await?;
Ok(count)
}
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError> {
// update application
let sql = r#"
UPDATE APPLICATIONS
SET
MEMBERS = $2
WHERE
NAME = $1
"#;
let stmt = self
.client
.prepare_typed(sql, &[Type::VARCHAR, Type::JSONB])
.await?;
let count = self.client.execute(&stmt, &[&app, &Json(members)]).await?;
Ok(count)
}
}
| {
let select = r#"
SELECT
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
DELETION_TIMESTAMP,
FINALIZERS,
OWNER,
TRANSFER_OWNER,
MEMBERS,
DATA
FROM APPLICATIONS
"#
.to_string();
let builder = SelectBuilder::new(select, Vec::new(), Vec::new())
.name(&name)
.labels(&labels.0)
.auth_read(&id)
.lock(lock)
.sort(sort)
.limit(limit)
.offset(offset);
let (select, params, types) = builder.build();
let stmt = self.client.prepare_typed(&select, &types).await?;
let stream = self
.client
.query_raw(&stmt, slice_iter(¶ms[..]))
.await
.map_err(|err| {
log::debug!("Failed to get: {}", err);
err
})?
.and_then(|row| future::ready(Self::from_row(row)))
.map_err(ServiceError::Database);
Ok(Box::pin(stream))
} | identifier_body |
app.rs | use crate::{
auth::Resource,
default_resource, diffable,
error::ServiceError,
generation,
models::{
fix_null_default,
sql::{slice_iter, SelectBuilder},
Lock, TypedAlias,
},
update_aliases, Client,
};
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use core::pin::Pin;
use drogue_client::{meta, registry};
use drogue_cloud_service_api::{auth::user::UserInformation, labels::LabelSelector};
use futures::{future, Stream, TryStreamExt};
use indexmap::map::IndexMap;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use std::collections::{hash_map::RandomState, HashMap, HashSet};
use tokio_postgres::{
types::{Json, ToSql, Type},
Row,
};
use uuid::Uuid;
/// An application entity record.
pub struct Application {
pub uid: Uuid,
pub name: String,
pub labels: HashMap<String, String>,
pub annotations: HashMap<String, String>,
pub creation_timestamp: DateTime<Utc>,
pub resource_version: Uuid,
pub generation: u64,
pub deletion_timestamp: Option<DateTime<Utc>>,
pub finalizers: Vec<String>,
/// ownership information
pub owner: Option<String>,
/// transfer to new owner
pub transfer_owner: Option<String>,
/// members list
pub members: IndexMap<String, MemberEntry>,
/// arbitrary payload
pub data: Value,
}
diffable!(Application);
generation!(Application => generation);
default_resource!(Application);
impl Resource for Application {
fn owner(&self) -> Option<&str> {
self.owner.as_deref()
}
fn members(&self) -> &IndexMap<String, MemberEntry> {
&self.members
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Role {
/// Allow everything, including changing members
Admin,
/// Allow reading and writing, but not changing members.
Manager,
/// Allow reading only.
Reader,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MemberEntry {
pub role: Role,
}
/// Extract a section from the application data. Prevents cloning the whole struct.
fn extract_sect(mut app: Application, key: &str) -> (Application, Option<Map<String, Value>>) {
let sect = app
.data
.get_mut(key)
.map(|v| v.take())
.and_then(|v| match v {
Value::Object(v) => Some(v),
_ => None,
});
(app, sect)
}
impl From<Application> for registry::v1::Application {
fn from(app: Application) -> Self {
let (app, spec) = extract_sect(app, "spec");
let (app, status) = extract_sect(app, "status");
registry::v1::Application {
metadata: meta::v1::NonScopedMetadata {
uid: app.uid.to_string(),
name: app.name,
labels: app.labels,
annotations: app.annotations,
creation_timestamp: app.creation_timestamp,
generation: app.generation,
resource_version: app.resource_version.to_string(),
deletion_timestamp: app.deletion_timestamp,
finalizers: app.finalizers,
},
spec: spec.unwrap_or_default(),
status: status.unwrap_or_default(),
}
}
}
#[async_trait]
pub trait ApplicationAccessor {
/// Lookup an application
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError>;
/// Delete an application
async fn delete(&self, app: &str) -> Result<(), ServiceError>;
/// Get an application
async fn get(&self, app: &str, lock: Lock) -> Result<Option<Application>, ServiceError> {
Ok(self
.list(
Some(app),
LabelSelector::default(),
Some(1),
None,
None,
lock,
&[],
)
.await?
.try_next()
.await?)
}
/// Get a list of applications
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>;
/// Create a new application
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError>;
/// Update an existing application's data
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError>;
/// Update an existing application's owner information
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError>;
/// Set the member list
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError>;
}
pub struct PostgresApplicationAccessor<'c, C: Client> {
client: &'c C,
}
impl<'c, C: Client> PostgresApplicationAccessor<'c, C> {
pub fn new(client: &'c C) -> Self {
Self { client }
}
pub fn from_row(row: Row) -> Result<Application, tokio_postgres::Error> {
log::debug!("Row: {:?}", row);
Ok(Application {
uid: row.try_get("UID")?,
name: row.try_get("NAME")?,
creation_timestamp: row.try_get("CREATION_TIMESTAMP")?,
generation: row.try_get::<_, i64>("GENERATION")? as u64,
resource_version: row.try_get("RESOURCE_VERSION")?,
labels: super::row_to_map(&row, "LABELS")?,
annotations: super::row_to_map(&row, "ANNOTATIONS")?,
deletion_timestamp: row.try_get("DELETION_TIMESTAMP")?,
finalizers: super::row_to_vec(&row, "FINALIZERS")?,
owner: row.try_get("OWNER")?,
transfer_owner: row.try_get("TRANSFER_OWNER")?,
members: row
.try_get::<_, Json<IndexMap<String, MemberEntry>>>("MEMBERS")
.map(|json| json.0)
.or_else(fix_null_default)?,
data: row.try_get::<_, Json<_>>("DATA")?.0,
})
}
async fn insert_aliases(
&self,
id: &str,
aliases: &HashSet<TypedAlias>,
) -> Result<(), tokio_postgres::Error> {
if aliases.is_empty() {
return Ok(());
}
let stmt = self
.client
.prepare_typed(
"INSERT INTO APPLICATION_ALIASES (APP, TYPE, ALIAS) VALUES ($1, $2, $3)",
&[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR],
)
.await?;
for alias in aliases {
self.client
.execute(&stmt, &[&id, &alias.0, &alias.1])
.await?;
}
Ok(())
}
}
trait Param: ToSql + Sync {}
#[async_trait]
impl<'c, C: Client> ApplicationAccessor for PostgresApplicationAccessor<'c, C> {
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError> {
let sql = r#"
SELECT
A2.NAME,
A2.UID,
A2.LABELS,
A2.CREATION_TIMESTAMP,
A2.GENERATION,
A2.RESOURCE_VERSION,
A2.ANNOTATIONS,
A2.DELETION_TIMESTAMP,
A2.FINALIZERS,
A2.OWNER,
A2.TRANSFER_OWNER,
A2.MEMBERS,
A2.DATA
FROM
APPLICATION_ALIASES A1 INNER JOIN APPLICATIONS A2
ON
A1.APP=A2.NAME WHERE A1.ALIAS = $1
"#;
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let row = self.client.query_opt(&stmt, &[&alias]).await?;
Ok(row.map(Self::from_row).transpose()?)
}
async fn delete(&self, id: &str) -> Result<(), ServiceError> {
let sql = "DELETE FROM APPLICATIONS WHERE NAME = $1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let count = self.client.execute(&stmt, &[&id]).await?;
if count > 0 {
Ok(())
} else {
Err(ServiceError::NotFound)
}
}
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>, | {
let select = r#"
SELECT
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
DELETION_TIMESTAMP,
FINALIZERS,
OWNER,
TRANSFER_OWNER,
MEMBERS,
DATA
FROM APPLICATIONS
"#
.to_string();
let builder = SelectBuilder::new(select, Vec::new(), Vec::new())
.name(&name)
.labels(&labels.0)
.auth_read(&id)
.lock(lock)
.sort(sort)
.limit(limit)
.offset(offset);
let (select, params, types) = builder.build();
let stmt = self.client.prepare_typed(&select, &types).await?;
let stream = self
.client
.query_raw(&stmt, slice_iter(¶ms[..]))
.await
.map_err(|err| {
log::debug!("Failed to get: {}", err);
err
})?
.and_then(|row| future::ready(Self::from_row(row)))
.map_err(ServiceError::Database);
Ok(Box::pin(stream))
}
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError> {
let name = application.name;
let data = application.data;
let labels = application.labels;
let annotations = application.annotations;
self.client
.execute(
r#"
INSERT INTO APPLICATIONS (
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
FINALIZERS,
OWNER,
DATA
) VALUES (
$1,
$2,
$3,
$4,
$5,
$6,
$7,
$8,
$9,
$10
)"#,
&[
&name,
&application.uid,
&Json(labels),
&Json(annotations),
&Utc::now(),
&(application.generation as i64),
&Uuid::new_v4(),
&application.finalizers,
&application.owner,
&Json(data),
],
)
.await?;
self.insert_aliases(&name, &aliases).await?;
Ok(())
}
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError> {
let name = application.name;
let labels = application.labels;
let data = application.data;
let annotations = application.annotations;
// update device
let count = self
.client
.execute(
r#"
UPDATE APPLICATIONS
SET
LABELS = $2,
ANNOTATIONS = $3,
GENERATION = $4,
RESOURCE_VERSION = $5,
DELETION_TIMESTAMP = $6,
FINALIZERS = $7,
DATA = $8
WHERE
NAME = $1
"#,
&[
&name,
&Json(labels),
&Json(annotations),
&(application.generation as i64),
&Uuid::new_v4(),
&application.deletion_timestamp,
&application.finalizers,
&Json(data),
],
)
.await?;
update_aliases!(count, aliases, |aliases| {
// clear existing aliases
let sql = "DELETE FROM APPLICATION_ALIASES WHERE APP=$1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
self.client.execute(&stmt, &[&name]).await?;
// insert new alias set
self.insert_aliases(&name, &aliases).await?;
Ok(count)
})
}
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError> {
// update application
let sql = r#"
UPDATE APPLICATIONS
SET
OWNER = $2,
TRANSFER_OWNER = $3
WHERE
NAME = $1
"#;
let stmt = self
.client
.prepare_typed(sql, &[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR])
.await?;
let count = self
.client
.execute(&stmt, &[&app, &owner, &transfer_owner])
.await?;
Ok(count)
}
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError> {
// update application
let sql = r#"
UPDATE APPLICATIONS
SET
MEMBERS = $2
WHERE
NAME = $1
"#;
let stmt = self
.client
.prepare_typed(sql, &[Type::VARCHAR, Type::JSONB])
.await?;
let count = self.client.execute(&stmt, &[&app, &Json(members)]).await?;
Ok(count)
}
} | offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError> | random_line_split |
app.rs | use crate::{
auth::Resource,
default_resource, diffable,
error::ServiceError,
generation,
models::{
fix_null_default,
sql::{slice_iter, SelectBuilder},
Lock, TypedAlias,
},
update_aliases, Client,
};
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use core::pin::Pin;
use drogue_client::{meta, registry};
use drogue_cloud_service_api::{auth::user::UserInformation, labels::LabelSelector};
use futures::{future, Stream, TryStreamExt};
use indexmap::map::IndexMap;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use std::collections::{hash_map::RandomState, HashMap, HashSet};
use tokio_postgres::{
types::{Json, ToSql, Type},
Row,
};
use uuid::Uuid;
/// An application entity record.
pub struct Application {
pub uid: Uuid,
pub name: String,
pub labels: HashMap<String, String>,
pub annotations: HashMap<String, String>,
pub creation_timestamp: DateTime<Utc>,
pub resource_version: Uuid,
pub generation: u64,
pub deletion_timestamp: Option<DateTime<Utc>>,
pub finalizers: Vec<String>,
/// ownership information
pub owner: Option<String>,
/// transfer to new owner
pub transfer_owner: Option<String>,
/// members list
pub members: IndexMap<String, MemberEntry>,
/// arbitrary payload
pub data: Value,
}
diffable!(Application);
generation!(Application => generation);
default_resource!(Application);
impl Resource for Application {
fn owner(&self) -> Option<&str> {
self.owner.as_deref()
}
fn members(&self) -> &IndexMap<String, MemberEntry> {
&self.members
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Role {
/// Allow everything, including changing members
Admin,
/// Allow reading and writing, but not changing members.
Manager,
/// Allow reading only.
Reader,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MemberEntry {
pub role: Role,
}
/// Extract a section from the application data. Prevents cloning the whole struct.
fn extract_sect(mut app: Application, key: &str) -> (Application, Option<Map<String, Value>>) {
let sect = app
.data
.get_mut(key)
.map(|v| v.take())
.and_then(|v| match v {
Value::Object(v) => Some(v),
_ => None,
});
(app, sect)
}
impl From<Application> for registry::v1::Application {
fn from(app: Application) -> Self {
let (app, spec) = extract_sect(app, "spec");
let (app, status) = extract_sect(app, "status");
registry::v1::Application {
metadata: meta::v1::NonScopedMetadata {
uid: app.uid.to_string(),
name: app.name,
labels: app.labels,
annotations: app.annotations,
creation_timestamp: app.creation_timestamp,
generation: app.generation,
resource_version: app.resource_version.to_string(),
deletion_timestamp: app.deletion_timestamp,
finalizers: app.finalizers,
},
spec: spec.unwrap_or_default(),
status: status.unwrap_or_default(),
}
}
}
#[async_trait]
pub trait ApplicationAccessor {
/// Lookup an application
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError>;
/// Delete an application
async fn delete(&self, app: &str) -> Result<(), ServiceError>;
/// Get an application
async fn get(&self, app: &str, lock: Lock) -> Result<Option<Application>, ServiceError> {
Ok(self
.list(
Some(app),
LabelSelector::default(),
Some(1),
None,
None,
lock,
&[],
)
.await?
.try_next()
.await?)
}
/// Get a list of applications
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>;
/// Create a new application
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError>;
/// Update an existing application's data
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError>;
/// Update an existing application's owner information
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError>;
/// Set the member list
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError>;
}
pub struct PostgresApplicationAccessor<'c, C: Client> {
client: &'c C,
}
impl<'c, C: Client> PostgresApplicationAccessor<'c, C> {
pub fn new(client: &'c C) -> Self {
Self { client }
}
pub fn from_row(row: Row) -> Result<Application, tokio_postgres::Error> {
log::debug!("Row: {:?}", row);
Ok(Application {
uid: row.try_get("UID")?,
name: row.try_get("NAME")?,
creation_timestamp: row.try_get("CREATION_TIMESTAMP")?,
generation: row.try_get::<_, i64>("GENERATION")? as u64,
resource_version: row.try_get("RESOURCE_VERSION")?,
labels: super::row_to_map(&row, "LABELS")?,
annotations: super::row_to_map(&row, "ANNOTATIONS")?,
deletion_timestamp: row.try_get("DELETION_TIMESTAMP")?,
finalizers: super::row_to_vec(&row, "FINALIZERS")?,
owner: row.try_get("OWNER")?,
transfer_owner: row.try_get("TRANSFER_OWNER")?,
members: row
.try_get::<_, Json<IndexMap<String, MemberEntry>>>("MEMBERS")
.map(|json| json.0)
.or_else(fix_null_default)?,
data: row.try_get::<_, Json<_>>("DATA")?.0,
})
}
async fn insert_aliases(
&self,
id: &str,
aliases: &HashSet<TypedAlias>,
) -> Result<(), tokio_postgres::Error> {
if aliases.is_empty() {
return Ok(());
}
let stmt = self
.client
.prepare_typed(
"INSERT INTO APPLICATION_ALIASES (APP, TYPE, ALIAS) VALUES ($1, $2, $3)",
&[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR],
)
.await?;
for alias in aliases {
self.client
.execute(&stmt, &[&id, &alias.0, &alias.1])
.await?;
}
Ok(())
}
}
trait Param: ToSql + Sync {}
#[async_trait]
impl<'c, C: Client> ApplicationAccessor for PostgresApplicationAccessor<'c, C> {
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError> {
let sql = r#"
SELECT
A2.NAME,
A2.UID,
A2.LABELS,
A2.CREATION_TIMESTAMP,
A2.GENERATION,
A2.RESOURCE_VERSION,
A2.ANNOTATIONS,
A2.DELETION_TIMESTAMP,
A2.FINALIZERS,
A2.OWNER,
A2.TRANSFER_OWNER,
A2.MEMBERS,
A2.DATA
FROM
APPLICATION_ALIASES A1 INNER JOIN APPLICATIONS A2
ON
A1.APP=A2.NAME WHERE A1.ALIAS = $1
"#;
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let row = self.client.query_opt(&stmt, &[&alias]).await?;
Ok(row.map(Self::from_row).transpose()?)
}
async fn | (&self, id: &str) -> Result<(), ServiceError> {
let sql = "DELETE FROM APPLICATIONS WHERE NAME = $1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let count = self.client.execute(&stmt, &[&id]).await?;
if count > 0 {
Ok(())
} else {
Err(ServiceError::NotFound)
}
}
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>
{
let select = r#"
SELECT
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
DELETION_TIMESTAMP,
FINALIZERS,
OWNER,
TRANSFER_OWNER,
MEMBERS,
DATA
FROM APPLICATIONS
"#
.to_string();
let builder = SelectBuilder::new(select, Vec::new(), Vec::new())
.name(&name)
.labels(&labels.0)
.auth_read(&id)
.lock(lock)
.sort(sort)
.limit(limit)
.offset(offset);
let (select, params, types) = builder.build();
let stmt = self.client.prepare_typed(&select, &types).await?;
let stream = self
.client
.query_raw(&stmt, slice_iter(¶ms[..]))
.await
.map_err(|err| {
log::debug!("Failed to get: {}", err);
err
})?
.and_then(|row| future::ready(Self::from_row(row)))
.map_err(ServiceError::Database);
Ok(Box::pin(stream))
}
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError> {
let name = application.name;
let data = application.data;
let labels = application.labels;
let annotations = application.annotations;
self.client
.execute(
r#"
INSERT INTO APPLICATIONS (
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
FINALIZERS,
OWNER,
DATA
) VALUES (
$1,
$2,
$3,
$4,
$5,
$6,
$7,
$8,
$9,
$10
)"#,
&[
&name,
&application.uid,
&Json(labels),
&Json(annotations),
&Utc::now(),
&(application.generation as i64),
&Uuid::new_v4(),
&application.finalizers,
&application.owner,
&Json(data),
],
)
.await?;
self.insert_aliases(&name, &aliases).await?;
Ok(())
}
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError> {
let name = application.name;
let labels = application.labels;
let data = application.data;
let annotations = application.annotations;
// update device
let count = self
.client
.execute(
r#"
UPDATE APPLICATIONS
SET
LABELS = $2,
ANNOTATIONS = $3,
GENERATION = $4,
RESOURCE_VERSION = $5,
DELETION_TIMESTAMP = $6,
FINALIZERS = $7,
DATA = $8
WHERE
NAME = $1
"#,
&[
&name,
&Json(labels),
&Json(annotations),
&(application.generation as i64),
&Uuid::new_v4(),
&application.deletion_timestamp,
&application.finalizers,
&Json(data),
],
)
.await?;
update_aliases!(count, aliases, |aliases| {
// clear existing aliases
let sql = "DELETE FROM APPLICATION_ALIASES WHERE APP=$1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
self.client.execute(&stmt, &[&name]).await?;
// insert new alias set
self.insert_aliases(&name, &aliases).await?;
Ok(count)
})
}
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError> {
// update application
let sql = r#"
UPDATE APPLICATIONS
SET
OWNER = $2,
TRANSFER_OWNER = $3
WHERE
NAME = $1
"#;
let stmt = self
.client
.prepare_typed(sql, &[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR])
.await?;
let count = self
.client
.execute(&stmt, &[&app, &owner, &transfer_owner])
.await?;
Ok(count)
}
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError> {
// update application
let sql = r#"
UPDATE APPLICATIONS
SET
MEMBERS = $2
WHERE
NAME = $1
"#;
let stmt = self
.client
.prepare_typed(sql, &[Type::VARCHAR, Type::JSONB])
.await?;
let count = self.client.execute(&stmt, &[&app, &Json(members)]).await?;
Ok(count)
}
}
| delete | identifier_name |
app.rs | use crate::{
auth::Resource,
default_resource, diffable,
error::ServiceError,
generation,
models::{
fix_null_default,
sql::{slice_iter, SelectBuilder},
Lock, TypedAlias,
},
update_aliases, Client,
};
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use core::pin::Pin;
use drogue_client::{meta, registry};
use drogue_cloud_service_api::{auth::user::UserInformation, labels::LabelSelector};
use futures::{future, Stream, TryStreamExt};
use indexmap::map::IndexMap;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use std::collections::{hash_map::RandomState, HashMap, HashSet};
use tokio_postgres::{
types::{Json, ToSql, Type},
Row,
};
use uuid::Uuid;
/// An application entity record.
pub struct Application {
pub uid: Uuid,
pub name: String,
pub labels: HashMap<String, String>,
pub annotations: HashMap<String, String>,
pub creation_timestamp: DateTime<Utc>,
pub resource_version: Uuid,
pub generation: u64,
pub deletion_timestamp: Option<DateTime<Utc>>,
pub finalizers: Vec<String>,
/// ownership information
pub owner: Option<String>,
/// transfer to new owner
pub transfer_owner: Option<String>,
/// members list
pub members: IndexMap<String, MemberEntry>,
/// arbitrary payload
pub data: Value,
}
diffable!(Application);
generation!(Application => generation);
default_resource!(Application);
impl Resource for Application {
fn owner(&self) -> Option<&str> {
self.owner.as_deref()
}
fn members(&self) -> &IndexMap<String, MemberEntry> {
&self.members
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Role {
/// Allow everything, including changing members
Admin,
/// Allow reading and writing, but not changing members.
Manager,
/// Allow reading only.
Reader,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MemberEntry {
pub role: Role,
}
/// Extract a section from the application data. Prevents cloning the whole struct.
fn extract_sect(mut app: Application, key: &str) -> (Application, Option<Map<String, Value>>) {
let sect = app
.data
.get_mut(key)
.map(|v| v.take())
.and_then(|v| match v {
Value::Object(v) => Some(v),
_ => None,
});
(app, sect)
}
impl From<Application> for registry::v1::Application {
fn from(app: Application) -> Self {
let (app, spec) = extract_sect(app, "spec");
let (app, status) = extract_sect(app, "status");
registry::v1::Application {
metadata: meta::v1::NonScopedMetadata {
uid: app.uid.to_string(),
name: app.name,
labels: app.labels,
annotations: app.annotations,
creation_timestamp: app.creation_timestamp,
generation: app.generation,
resource_version: app.resource_version.to_string(),
deletion_timestamp: app.deletion_timestamp,
finalizers: app.finalizers,
},
spec: spec.unwrap_or_default(),
status: status.unwrap_or_default(),
}
}
}
#[async_trait]
pub trait ApplicationAccessor {
/// Lookup an application
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError>;
/// Delete an application
async fn delete(&self, app: &str) -> Result<(), ServiceError>;
/// Get an application
async fn get(&self, app: &str, lock: Lock) -> Result<Option<Application>, ServiceError> {
Ok(self
.list(
Some(app),
LabelSelector::default(),
Some(1),
None,
None,
lock,
&[],
)
.await?
.try_next()
.await?)
}
/// Get a list of applications
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>;
/// Create a new application
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError>;
/// Update an existing application's data
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError>;
/// Update an existing application's owner information
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError>;
/// Set the member list
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError>;
}
pub struct PostgresApplicationAccessor<'c, C: Client> {
client: &'c C,
}
impl<'c, C: Client> PostgresApplicationAccessor<'c, C> {
pub fn new(client: &'c C) -> Self {
Self { client }
}
pub fn from_row(row: Row) -> Result<Application, tokio_postgres::Error> {
log::debug!("Row: {:?}", row);
Ok(Application {
uid: row.try_get("UID")?,
name: row.try_get("NAME")?,
creation_timestamp: row.try_get("CREATION_TIMESTAMP")?,
generation: row.try_get::<_, i64>("GENERATION")? as u64,
resource_version: row.try_get("RESOURCE_VERSION")?,
labels: super::row_to_map(&row, "LABELS")?,
annotations: super::row_to_map(&row, "ANNOTATIONS")?,
deletion_timestamp: row.try_get("DELETION_TIMESTAMP")?,
finalizers: super::row_to_vec(&row, "FINALIZERS")?,
owner: row.try_get("OWNER")?,
transfer_owner: row.try_get("TRANSFER_OWNER")?,
members: row
.try_get::<_, Json<IndexMap<String, MemberEntry>>>("MEMBERS")
.map(|json| json.0)
.or_else(fix_null_default)?,
data: row.try_get::<_, Json<_>>("DATA")?.0,
})
}
async fn insert_aliases(
&self,
id: &str,
aliases: &HashSet<TypedAlias>,
) -> Result<(), tokio_postgres::Error> {
if aliases.is_empty() |
let stmt = self
.client
.prepare_typed(
"INSERT INTO APPLICATION_ALIASES (APP, TYPE, ALIAS) VALUES ($1, $2, $3)",
&[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR],
)
.await?;
for alias in aliases {
self.client
.execute(&stmt, &[&id, &alias.0, &alias.1])
.await?;
}
Ok(())
}
}
trait Param: ToSql + Sync {}
#[async_trait]
impl<'c, C: Client> ApplicationAccessor for PostgresApplicationAccessor<'c, C> {
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError> {
let sql = r#"
SELECT
A2.NAME,
A2.UID,
A2.LABELS,
A2.CREATION_TIMESTAMP,
A2.GENERATION,
A2.RESOURCE_VERSION,
A2.ANNOTATIONS,
A2.DELETION_TIMESTAMP,
A2.FINALIZERS,
A2.OWNER,
A2.TRANSFER_OWNER,
A2.MEMBERS,
A2.DATA
FROM
APPLICATION_ALIASES A1 INNER JOIN APPLICATIONS A2
ON
A1.APP=A2.NAME WHERE A1.ALIAS = $1
"#;
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let row = self.client.query_opt(&stmt, &[&alias]).await?;
Ok(row.map(Self::from_row).transpose()?)
}
async fn delete(&self, id: &str) -> Result<(), ServiceError> {
let sql = "DELETE FROM APPLICATIONS WHERE NAME = $1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let count = self.client.execute(&stmt, &[&id]).await?;
if count > 0 {
Ok(())
} else {
Err(ServiceError::NotFound)
}
}
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>
{
let select = r#"
SELECT
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
DELETION_TIMESTAMP,
FINALIZERS,
OWNER,
TRANSFER_OWNER,
MEMBERS,
DATA
FROM APPLICATIONS
"#
.to_string();
let builder = SelectBuilder::new(select, Vec::new(), Vec::new())
.name(&name)
.labels(&labels.0)
.auth_read(&id)
.lock(lock)
.sort(sort)
.limit(limit)
.offset(offset);
let (select, params, types) = builder.build();
let stmt = self.client.prepare_typed(&select, &types).await?;
let stream = self
.client
.query_raw(&stmt, slice_iter(¶ms[..]))
.await
.map_err(|err| {
log::debug!("Failed to get: {}", err);
err
})?
.and_then(|row| future::ready(Self::from_row(row)))
.map_err(ServiceError::Database);
Ok(Box::pin(stream))
}
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError> {
let name = application.name;
let data = application.data;
let labels = application.labels;
let annotations = application.annotations;
self.client
.execute(
r#"
INSERT INTO APPLICATIONS (
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
FINALIZERS,
OWNER,
DATA
) VALUES (
$1,
$2,
$3,
$4,
$5,
$6,
$7,
$8,
$9,
$10
)"#,
&[
&name,
&application.uid,
&Json(labels),
&Json(annotations),
&Utc::now(),
&(application.generation as i64),
&Uuid::new_v4(),
&application.finalizers,
&application.owner,
&Json(data),
],
)
.await?;
self.insert_aliases(&name, &aliases).await?;
Ok(())
}
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError> {
let name = application.name;
let labels = application.labels;
let data = application.data;
let annotations = application.annotations;
// update device
let count = self
.client
.execute(
r#"
UPDATE APPLICATIONS
SET
LABELS = $2,
ANNOTATIONS = $3,
GENERATION = $4,
RESOURCE_VERSION = $5,
DELETION_TIMESTAMP = $6,
FINALIZERS = $7,
DATA = $8
WHERE
NAME = $1
"#,
&[
&name,
&Json(labels),
&Json(annotations),
&(application.generation as i64),
&Uuid::new_v4(),
&application.deletion_timestamp,
&application.finalizers,
&Json(data),
],
)
.await?;
update_aliases!(count, aliases, |aliases| {
// clear existing aliases
let sql = "DELETE FROM APPLICATION_ALIASES WHERE APP=$1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
self.client.execute(&stmt, &[&name]).await?;
// insert new alias set
self.insert_aliases(&name, &aliases).await?;
Ok(count)
})
}
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError> {
// update application
let sql = r#"
UPDATE APPLICATIONS
SET
OWNER = $2,
TRANSFER_OWNER = $3
WHERE
NAME = $1
"#;
let stmt = self
.client
.prepare_typed(sql, &[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR])
.await?;
let count = self
.client
.execute(&stmt, &[&app, &owner, &transfer_owner])
.await?;
Ok(count)
}
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError> {
// update application
let sql = r#"
UPDATE APPLICATIONS
SET
MEMBERS = $2
WHERE
NAME = $1
"#;
let stmt = self
.client
.prepare_typed(sql, &[Type::VARCHAR, Type::JSONB])
.await?;
let count = self.client.execute(&stmt, &[&app, &Json(members)]).await?;
Ok(count)
}
}
| {
return Ok(());
} | conditional_block |
main.rs | #![no_std]
#![feature(
test,
start,
array_map,
const_panic, | isa_attribute,
core_intrinsics,
maybe_uninit_ref,
bindings_after_at,
stmt_expr_attributes,
default_alloc_error_handler,
const_fn_floating_point_arithmetic,
)]
extern crate alloc;
mod gfx;
mod heap;
mod mem;
use core::fmt::Write;
use alloc::{vec::Vec, vec};
use vek::*;
use num_traits::float::Float;
use gba::{
io::{
irq::{set_irq_handler, IrqFlags, IrqEnableSetting, IE, IME, BIOS_IF},
display::{
DisplayControlSetting, DisplayStatusSetting, DisplayMode,
DISPCNT, DISPSTAT, VCOUNT, VBLANK_SCANLINE,
},
background::{BackgroundControlSetting, BG2HOFS},
timers::{TimerControlSetting, TimerTickRate, TM2CNT_H, TM2CNT_L},
keypad::read_key_input,
},
bios,
vram::bitmap::{Mode3, Mode5},
Color,
};
pub use mem::*;
pub type F32 = fixed::types::I16F16;
pub const fn num(x: f32) -> F32 {
use fixed::traits::Fixed;
F32::from_bits((x * (1 << F32::FRAC_NBITS) as f32) as <F32 as Fixed>::Bits)
}
fn normalize_quat_fast(q: Quaternion<F32>) -> Quaternion<F32> {
fn finvsqrt(x: f32) -> f32 {
let y = f32::from_bits(0x5f375a86 - (x.to_bits() >> 1));
y * (1.5 - ( x * 0.5 * y * y ))
}
fn fsqrt(x: f32) -> f32 {
f32::from_bits((x.to_bits() + (127 << 23)) >> 1)
}
let v = q.into_vec4();
(v * F32::from_num(finvsqrt(v.magnitude_squared().to_num::<f32>()))).into()
}
fn cos_fast(mut x: F32) -> F32 {
use core::f32;
x *= num(f32::consts::FRAC_1_PI / 2.0);
x -= num(0.25) + (x + num(0.25)).floor();
x *= num(16.0) * (x.abs() - num(0.5));
x += num(0.225) * x * (x.abs() - num(1.0));
x
}
fn sin_fast(x: F32) -> F32 {
use core::f32;
cos_fast(x - num(f32::consts::PI / 2.0))
}
fn tan_fast(x: F32) -> F32 {
sin_fast(x) / cos_fast(x)
}
fn rotation_3d(angle_radians: F32, axis: Vec3<F32>) -> Quaternion<F32> {
// let axis = axis.normalized();
let Vec3 { x, y, z } = axis * sin_fast(angle_radians * num(0.5));
let w = cos_fast(angle_radians * num(0.5));
Quaternion { x, y, z, w }
}
#[repr(transparent)]
#[derive(Copy, Clone)]
struct NumWrap(F32);
impl core::ops::Mul<NumWrap> for NumWrap {
type Output = NumWrap;
fn mul(self, rhs: Self) -> Self { NumWrap(self.0 * rhs.0) }
}
impl vek::ops::MulAdd<NumWrap, NumWrap> for NumWrap {
type Output = NumWrap;
fn mul_add(self, mul: NumWrap, add: NumWrap) -> NumWrap {
NumWrap(self.0 * mul.0 + add.0)
}
}
fn apply(m: Mat3<F32>, n: Mat3<F32>) -> Mat3<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
fn apply4(m: Mat4<F32>, n: Mat4<F32>) -> Mat4<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
gba::error!("Panic: {:?}", info);
Mode3::clear_to(Color::from_rgb(0xFF, 0, 0));
loop {}
}
#[start]
fn main(_argc: isize, _argv: *const *const u8) -> isize {
heap::init();
gba::info!("Starting...");
set_irq_handler(irq_handler);
IME.write(IrqEnableSetting::IRQ_YES);
DISPSTAT.write(DisplayStatusSetting::new()
.with_hblank_irq_enable(true)
.with_vblank_irq_enable(true));
TM2CNT_H.write(TimerControlSetting::new()
.with_tick_rate(TimerTickRate::CPU1024)
.with_enabled(true));
let model = wavefront::Obj::from_lines(include_str!("../data/ship-small.obj").lines()).unwrap();
let mut ship_verts = Vec::new();
let mut ship_tris = Vec::new();
for &p in model.positions() {
ship_verts.push(Vec3::<f32>::from(p).map(num));
}
model
.triangles()
.for_each(|vs| {
let pos = vs.map(|v| Vec3::<f32>::from(v.position()));
let cross = (pos[1] - pos[0]).cross(pos[2] - pos[0]);
ship_tris.push((
(cross / micromath::F32Ext::sqrt(cross.magnitude_squared())).map(num),
vs.map(|v| v.position_index() as u16),
));
});
gba::info!("Model has {} vertices and {} triangles", ship_verts.len(), ship_tris.len());
let mut pos = Vec3::new(0.0, 0.0, 3.0).map(num);
let mut ori = normalize_quat_fast(Quaternion::<F32>::identity());
let mut tick = 0;
let mut last_time = 0;
let mut sum_fps = 0.0;
let mut screen = unsafe { gfx::mode5::init() };
let mut scene = unsafe { gfx::scene::init() };
let mut time_mvp = 0;
let mut time_clear = 0;
let mut time_model = 0;
let mut time_vertices = 0;
let mut time_faces = 0;
let mut time_render = 0;
loop {
let new_time = TM2CNT_L.read();
if tick % 32 == 0 {
if new_time > last_time {
gba::info!("FPS: {}", sum_fps / 32.0);
gba::info!(
"Timings: {{ mvp = {}, clear = {}, model = {}, vertices = {}, faces = {}, render = {} }}",
time_mvp,
time_clear,
time_model,
time_vertices,
time_faces,
time_render,
);
}
sum_fps = 0.0;
}
let fps = (16_780_000.0 / (new_time - last_time) as f32) / 1024.0;
sum_fps += fps;
last_time = new_time;
let dt = num(fps).recip();
// Wait for vblank
IE.write(IrqFlags::new().with_vblank(true));
// bios::vblank_interrupt_wait();
screen.flip();
let keys = read_key_input();
time_mvp = gba::time_this01! {{
ori = normalize_quat_fast(ori
* rotation_3d(
if keys.down() { num(4.0) * dt } else { num(0.0) }
- if keys.up() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_x(),
)
* rotation_3d(
if keys.right() { num(4.0) * dt } else { num(0.0) }
- if keys.left() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_y(),
)
* rotation_3d(
if keys.r() { num(4.0) * dt } else { num(0.0) }
- if keys.l() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_z(),
));
pos += gfx::scene::transform_pos(Mat4::from(ori).transposed(), Vec3::unit_z() * (
if keys.a() { num(0.05) } else { num(0.0) }
- if keys.b() { num(0.05) } else { num(0.0) }
)).xyz();
}};
let mut fb = screen.back();
time_clear = gba::time_this01! {{
fb.clear(Color::from_rgb(1, 3, 4).0);
}};
fn perspective_fov_rh_zo(fov_y_radians: F32, width: F32, height: F32, near: F32, far: F32) -> Mat4<F32> {
let rad = fov_y_radians;
let h = cos_fast(rad * num(0.5)) / sin_fast(rad * num(0.5));
let w = h * height / width;
let m00 = w;
let m11 = h;
let m22 = -(far + near) / (far - near);
let m23 = -(num(2.0) * far * near) / (far - near);
let m32 = -num(1.0);
let mut m = Mat4::new(
m00, num(0.0), num(0.0), num(0.0),
num(0.0), m11, num(0.0), num(0.0),
num(0.0), num(0.0), m22, m23,
num(0.0), num(0.0), m32, num(0.0)
);
m
}
let proj = perspective_fov_rh_zo(num(1.0), num(fb.screen_size().x as f32), num(fb.screen_size().y as f32), num(0.5), num(256.0));
let mut frame = scene.begin_frame(gfx::scene::SceneState {
proj,
view: Mat4::identity(),
light_dir: Vec3::new(0.0, -1.0, 0.0).normalized().map(num),
ambiance: num(0.2),
light_col: Rgb::new(1.0, 0.0, 0.5).map(num),
});
let mut ship_model;
time_model = gba::time_this01! {{
ship_model = frame.add_model(apply4(Mat4::translation_3d(pos), Mat4::from(apply(Mat3::from(ori), Mat3::scaling_3d(num(0.2))))));
}};
time_vertices = gba::time_this01! {{
for &v in &ship_verts {
frame.add_vert(ship_model, v);
}
}};
time_faces = gba::time_this01! {{
for &(norm, indices) in &ship_tris {
let color = Rgb::new(1.0, 1.0, 1.0).map(num);
let verts = [
indices[0],
indices[1],
indices[2],
];
frame.add_convex(ship_model, (verts, color), norm);
}
}};
// frame.add_flat_quad(
// 0,
// ([
// Vec3::new(-0.3, -0.5, 0.0).map(num),
// Vec3::new(0.0, 1.0, 0.0).map(num),
// Vec3::new(0.8, 0.8, 0.0).map(num),
// Vec3::new(1.0, 0.0, 0.0).map(num),
// ], Rgb::broadcast(num(1.0))),
// -Vec3::unit_z(),
// );
time_render = gba::time_this01! {{
frame.render(fb);
}};
tick += 1;
}
}
extern "C" fn irq_handler(flags: IrqFlags) {
if flags.vblank() {
vblank_handler();
}
if flags.hblank() {
hblank_handler();
}
if flags.vcounter() {
vcounter_handler();
}
if flags.timer0() {
timer0_handler();
}
if flags.timer1() {
timer1_handler();
}
}
fn vblank_handler() { BIOS_IF.write(BIOS_IF.read().with_vblank(true)); }
fn hblank_handler() { BIOS_IF.write(BIOS_IF.read().with_hblank(true)); }
fn vcounter_handler() { BIOS_IF.write(BIOS_IF.read().with_vcounter(true)); }
fn timer0_handler() { BIOS_IF.write(BIOS_IF.read().with_timer0(true)); }
fn timer1_handler() { BIOS_IF.write(BIOS_IF.read().with_timer1(true)); }
#[no_mangle]
pub unsafe extern fn __truncdfsf2() {}
// #[no_mangle]
// pub unsafe extern "C" fn memcpy(dst: *mut u8, src: *const u8, n: usize) -> *mut u8 {
// mem::copy_fast(
// core::slice::from_raw_parts(src, n),
// core::slice::from_raw_parts_mut(dst, n),
// );
// dst
// } | random_line_split | |
main.rs | #![no_std]
#![feature(
test,
start,
array_map,
const_panic,
isa_attribute,
core_intrinsics,
maybe_uninit_ref,
bindings_after_at,
stmt_expr_attributes,
default_alloc_error_handler,
const_fn_floating_point_arithmetic,
)]
extern crate alloc;
mod gfx;
mod heap;
mod mem;
use core::fmt::Write;
use alloc::{vec::Vec, vec};
use vek::*;
use num_traits::float::Float;
use gba::{
io::{
irq::{set_irq_handler, IrqFlags, IrqEnableSetting, IE, IME, BIOS_IF},
display::{
DisplayControlSetting, DisplayStatusSetting, DisplayMode,
DISPCNT, DISPSTAT, VCOUNT, VBLANK_SCANLINE,
},
background::{BackgroundControlSetting, BG2HOFS},
timers::{TimerControlSetting, TimerTickRate, TM2CNT_H, TM2CNT_L},
keypad::read_key_input,
},
bios,
vram::bitmap::{Mode3, Mode5},
Color,
};
pub use mem::*;
pub type F32 = fixed::types::I16F16;
pub const fn num(x: f32) -> F32 {
use fixed::traits::Fixed;
F32::from_bits((x * (1 << F32::FRAC_NBITS) as f32) as <F32 as Fixed>::Bits)
}
fn normalize_quat_fast(q: Quaternion<F32>) -> Quaternion<F32> {
fn finvsqrt(x: f32) -> f32 {
let y = f32::from_bits(0x5f375a86 - (x.to_bits() >> 1));
y * (1.5 - ( x * 0.5 * y * y ))
}
fn fsqrt(x: f32) -> f32 {
f32::from_bits((x.to_bits() + (127 << 23)) >> 1)
}
let v = q.into_vec4();
(v * F32::from_num(finvsqrt(v.magnitude_squared().to_num::<f32>()))).into()
}
fn cos_fast(mut x: F32) -> F32 {
use core::f32;
x *= num(f32::consts::FRAC_1_PI / 2.0);
x -= num(0.25) + (x + num(0.25)).floor();
x *= num(16.0) * (x.abs() - num(0.5));
x += num(0.225) * x * (x.abs() - num(1.0));
x
}
fn sin_fast(x: F32) -> F32 {
use core::f32;
cos_fast(x - num(f32::consts::PI / 2.0))
}
fn tan_fast(x: F32) -> F32 {
sin_fast(x) / cos_fast(x)
}
fn rotation_3d(angle_radians: F32, axis: Vec3<F32>) -> Quaternion<F32> {
// let axis = axis.normalized();
let Vec3 { x, y, z } = axis * sin_fast(angle_radians * num(0.5));
let w = cos_fast(angle_radians * num(0.5));
Quaternion { x, y, z, w }
}
#[repr(transparent)]
#[derive(Copy, Clone)]
struct NumWrap(F32);
impl core::ops::Mul<NumWrap> for NumWrap {
type Output = NumWrap;
fn mul(self, rhs: Self) -> Self { NumWrap(self.0 * rhs.0) }
}
impl vek::ops::MulAdd<NumWrap, NumWrap> for NumWrap {
type Output = NumWrap;
fn mul_add(self, mul: NumWrap, add: NumWrap) -> NumWrap {
NumWrap(self.0 * mul.0 + add.0)
}
}
fn apply(m: Mat3<F32>, n: Mat3<F32>) -> Mat3<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
fn apply4(m: Mat4<F32>, n: Mat4<F32>) -> Mat4<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
gba::error!("Panic: {:?}", info);
Mode3::clear_to(Color::from_rgb(0xFF, 0, 0));
loop {}
}
#[start]
fn main(_argc: isize, _argv: *const *const u8) -> isize {
heap::init();
gba::info!("Starting...");
set_irq_handler(irq_handler);
IME.write(IrqEnableSetting::IRQ_YES);
DISPSTAT.write(DisplayStatusSetting::new()
.with_hblank_irq_enable(true)
.with_vblank_irq_enable(true));
TM2CNT_H.write(TimerControlSetting::new()
.with_tick_rate(TimerTickRate::CPU1024)
.with_enabled(true));
let model = wavefront::Obj::from_lines(include_str!("../data/ship-small.obj").lines()).unwrap();
let mut ship_verts = Vec::new();
let mut ship_tris = Vec::new();
for &p in model.positions() {
ship_verts.push(Vec3::<f32>::from(p).map(num));
}
model
.triangles()
.for_each(|vs| {
let pos = vs.map(|v| Vec3::<f32>::from(v.position()));
let cross = (pos[1] - pos[0]).cross(pos[2] - pos[0]);
ship_tris.push((
(cross / micromath::F32Ext::sqrt(cross.magnitude_squared())).map(num),
vs.map(|v| v.position_index() as u16),
));
});
gba::info!("Model has {} vertices and {} triangles", ship_verts.len(), ship_tris.len());
let mut pos = Vec3::new(0.0, 0.0, 3.0).map(num);
let mut ori = normalize_quat_fast(Quaternion::<F32>::identity());
let mut tick = 0;
let mut last_time = 0;
let mut sum_fps = 0.0;
let mut screen = unsafe { gfx::mode5::init() };
let mut scene = unsafe { gfx::scene::init() };
let mut time_mvp = 0;
let mut time_clear = 0;
let mut time_model = 0;
let mut time_vertices = 0;
let mut time_faces = 0;
let mut time_render = 0;
loop {
let new_time = TM2CNT_L.read();
if tick % 32 == 0 {
if new_time > last_time {
gba::info!("FPS: {}", sum_fps / 32.0);
gba::info!(
"Timings: {{ mvp = {}, clear = {}, model = {}, vertices = {}, faces = {}, render = {} }}",
time_mvp,
time_clear,
time_model,
time_vertices,
time_faces,
time_render,
);
}
sum_fps = 0.0;
}
let fps = (16_780_000.0 / (new_time - last_time) as f32) / 1024.0;
sum_fps += fps;
last_time = new_time;
let dt = num(fps).recip();
// Wait for vblank
IE.write(IrqFlags::new().with_vblank(true));
// bios::vblank_interrupt_wait();
screen.flip();
let keys = read_key_input();
time_mvp = gba::time_this01! {{
ori = normalize_quat_fast(ori
* rotation_3d(
if keys.down() { num(4.0) * dt } else { num(0.0) }
- if keys.up() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_x(),
)
* rotation_3d(
if keys.right() { num(4.0) * dt } else { num(0.0) }
- if keys.left() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_y(),
)
* rotation_3d(
if keys.r() { num(4.0) * dt } else { num(0.0) }
- if keys.l() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_z(),
));
pos += gfx::scene::transform_pos(Mat4::from(ori).transposed(), Vec3::unit_z() * (
if keys.a() { num(0.05) } else { num(0.0) }
- if keys.b() { num(0.05) } else { num(0.0) }
)).xyz();
}};
let mut fb = screen.back();
time_clear = gba::time_this01! {{
fb.clear(Color::from_rgb(1, 3, 4).0);
}};
fn perspective_fov_rh_zo(fov_y_radians: F32, width: F32, height: F32, near: F32, far: F32) -> Mat4<F32> {
let rad = fov_y_radians;
let h = cos_fast(rad * num(0.5)) / sin_fast(rad * num(0.5));
let w = h * height / width;
let m00 = w;
let m11 = h;
let m22 = -(far + near) / (far - near);
let m23 = -(num(2.0) * far * near) / (far - near);
let m32 = -num(1.0);
let mut m = Mat4::new(
m00, num(0.0), num(0.0), num(0.0),
num(0.0), m11, num(0.0), num(0.0),
num(0.0), num(0.0), m22, m23,
num(0.0), num(0.0), m32, num(0.0)
);
m
}
let proj = perspective_fov_rh_zo(num(1.0), num(fb.screen_size().x as f32), num(fb.screen_size().y as f32), num(0.5), num(256.0));
let mut frame = scene.begin_frame(gfx::scene::SceneState {
proj,
view: Mat4::identity(),
light_dir: Vec3::new(0.0, -1.0, 0.0).normalized().map(num),
ambiance: num(0.2),
light_col: Rgb::new(1.0, 0.0, 0.5).map(num),
});
let mut ship_model;
time_model = gba::time_this01! {{
ship_model = frame.add_model(apply4(Mat4::translation_3d(pos), Mat4::from(apply(Mat3::from(ori), Mat3::scaling_3d(num(0.2))))));
}};
time_vertices = gba::time_this01! {{
for &v in &ship_verts {
frame.add_vert(ship_model, v);
}
}};
time_faces = gba::time_this01! {{
for &(norm, indices) in &ship_tris {
let color = Rgb::new(1.0, 1.0, 1.0).map(num);
let verts = [
indices[0],
indices[1],
indices[2],
];
frame.add_convex(ship_model, (verts, color), norm);
}
}};
// frame.add_flat_quad(
// 0,
// ([
// Vec3::new(-0.3, -0.5, 0.0).map(num),
// Vec3::new(0.0, 1.0, 0.0).map(num),
// Vec3::new(0.8, 0.8, 0.0).map(num),
// Vec3::new(1.0, 0.0, 0.0).map(num),
// ], Rgb::broadcast(num(1.0))),
// -Vec3::unit_z(),
// );
time_render = gba::time_this01! {{
frame.render(fb);
}};
tick += 1;
}
}
extern "C" fn irq_handler(flags: IrqFlags) {
if flags.vblank() {
vblank_handler();
}
if flags.hblank() {
hblank_handler();
}
if flags.vcounter() {
vcounter_handler();
}
if flags.timer0() |
if flags.timer1() {
timer1_handler();
}
}
fn vblank_handler() { BIOS_IF.write(BIOS_IF.read().with_vblank(true)); }
fn hblank_handler() { BIOS_IF.write(BIOS_IF.read().with_hblank(true)); }
fn vcounter_handler() { BIOS_IF.write(BIOS_IF.read().with_vcounter(true)); }
fn timer0_handler() { BIOS_IF.write(BIOS_IF.read().with_timer0(true)); }
fn timer1_handler() { BIOS_IF.write(BIOS_IF.read().with_timer1(true)); }
#[no_mangle]
pub unsafe extern fn __truncdfsf2() {}
// #[no_mangle]
// pub unsafe extern "C" fn memcpy(dst: *mut u8, src: *const u8, n: usize) -> *mut u8 {
// mem::copy_fast(
// core::slice::from_raw_parts(src, n),
// core::slice::from_raw_parts_mut(dst, n),
// );
// dst
// }
| {
timer0_handler();
} | conditional_block |
main.rs | #![no_std]
#![feature(
test,
start,
array_map,
const_panic,
isa_attribute,
core_intrinsics,
maybe_uninit_ref,
bindings_after_at,
stmt_expr_attributes,
default_alloc_error_handler,
const_fn_floating_point_arithmetic,
)]
extern crate alloc;
mod gfx;
mod heap;
mod mem;
use core::fmt::Write;
use alloc::{vec::Vec, vec};
use vek::*;
use num_traits::float::Float;
use gba::{
io::{
irq::{set_irq_handler, IrqFlags, IrqEnableSetting, IE, IME, BIOS_IF},
display::{
DisplayControlSetting, DisplayStatusSetting, DisplayMode,
DISPCNT, DISPSTAT, VCOUNT, VBLANK_SCANLINE,
},
background::{BackgroundControlSetting, BG2HOFS},
timers::{TimerControlSetting, TimerTickRate, TM2CNT_H, TM2CNT_L},
keypad::read_key_input,
},
bios,
vram::bitmap::{Mode3, Mode5},
Color,
};
pub use mem::*;
pub type F32 = fixed::types::I16F16;
pub const fn num(x: f32) -> F32 {
use fixed::traits::Fixed;
F32::from_bits((x * (1 << F32::FRAC_NBITS) as f32) as <F32 as Fixed>::Bits)
}
fn normalize_quat_fast(q: Quaternion<F32>) -> Quaternion<F32> {
fn finvsqrt(x: f32) -> f32 {
let y = f32::from_bits(0x5f375a86 - (x.to_bits() >> 1));
y * (1.5 - ( x * 0.5 * y * y ))
}
fn fsqrt(x: f32) -> f32 {
f32::from_bits((x.to_bits() + (127 << 23)) >> 1)
}
let v = q.into_vec4();
(v * F32::from_num(finvsqrt(v.magnitude_squared().to_num::<f32>()))).into()
}
fn cos_fast(mut x: F32) -> F32 {
use core::f32;
x *= num(f32::consts::FRAC_1_PI / 2.0);
x -= num(0.25) + (x + num(0.25)).floor();
x *= num(16.0) * (x.abs() - num(0.5));
x += num(0.225) * x * (x.abs() - num(1.0));
x
}
fn sin_fast(x: F32) -> F32 {
use core::f32;
cos_fast(x - num(f32::consts::PI / 2.0))
}
fn tan_fast(x: F32) -> F32 {
sin_fast(x) / cos_fast(x)
}
fn rotation_3d(angle_radians: F32, axis: Vec3<F32>) -> Quaternion<F32> {
// let axis = axis.normalized();
let Vec3 { x, y, z } = axis * sin_fast(angle_radians * num(0.5));
let w = cos_fast(angle_radians * num(0.5));
Quaternion { x, y, z, w }
}
#[repr(transparent)]
#[derive(Copy, Clone)]
struct NumWrap(F32);
impl core::ops::Mul<NumWrap> for NumWrap {
type Output = NumWrap;
fn mul(self, rhs: Self) -> Self { NumWrap(self.0 * rhs.0) }
}
impl vek::ops::MulAdd<NumWrap, NumWrap> for NumWrap {
type Output = NumWrap;
fn mul_add(self, mul: NumWrap, add: NumWrap) -> NumWrap {
NumWrap(self.0 * mul.0 + add.0)
}
}
fn apply(m: Mat3<F32>, n: Mat3<F32>) -> Mat3<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
fn | (m: Mat4<F32>, n: Mat4<F32>) -> Mat4<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
gba::error!("Panic: {:?}", info);
Mode3::clear_to(Color::from_rgb(0xFF, 0, 0));
loop {}
}
#[start]
fn main(_argc: isize, _argv: *const *const u8) -> isize {
heap::init();
gba::info!("Starting...");
set_irq_handler(irq_handler);
IME.write(IrqEnableSetting::IRQ_YES);
DISPSTAT.write(DisplayStatusSetting::new()
.with_hblank_irq_enable(true)
.with_vblank_irq_enable(true));
TM2CNT_H.write(TimerControlSetting::new()
.with_tick_rate(TimerTickRate::CPU1024)
.with_enabled(true));
let model = wavefront::Obj::from_lines(include_str!("../data/ship-small.obj").lines()).unwrap();
let mut ship_verts = Vec::new();
let mut ship_tris = Vec::new();
for &p in model.positions() {
ship_verts.push(Vec3::<f32>::from(p).map(num));
}
model
.triangles()
.for_each(|vs| {
let pos = vs.map(|v| Vec3::<f32>::from(v.position()));
let cross = (pos[1] - pos[0]).cross(pos[2] - pos[0]);
ship_tris.push((
(cross / micromath::F32Ext::sqrt(cross.magnitude_squared())).map(num),
vs.map(|v| v.position_index() as u16),
));
});
gba::info!("Model has {} vertices and {} triangles", ship_verts.len(), ship_tris.len());
let mut pos = Vec3::new(0.0, 0.0, 3.0).map(num);
let mut ori = normalize_quat_fast(Quaternion::<F32>::identity());
let mut tick = 0;
let mut last_time = 0;
let mut sum_fps = 0.0;
let mut screen = unsafe { gfx::mode5::init() };
let mut scene = unsafe { gfx::scene::init() };
let mut time_mvp = 0;
let mut time_clear = 0;
let mut time_model = 0;
let mut time_vertices = 0;
let mut time_faces = 0;
let mut time_render = 0;
loop {
let new_time = TM2CNT_L.read();
if tick % 32 == 0 {
if new_time > last_time {
gba::info!("FPS: {}", sum_fps / 32.0);
gba::info!(
"Timings: {{ mvp = {}, clear = {}, model = {}, vertices = {}, faces = {}, render = {} }}",
time_mvp,
time_clear,
time_model,
time_vertices,
time_faces,
time_render,
);
}
sum_fps = 0.0;
}
let fps = (16_780_000.0 / (new_time - last_time) as f32) / 1024.0;
sum_fps += fps;
last_time = new_time;
let dt = num(fps).recip();
// Wait for vblank
IE.write(IrqFlags::new().with_vblank(true));
// bios::vblank_interrupt_wait();
screen.flip();
let keys = read_key_input();
time_mvp = gba::time_this01! {{
ori = normalize_quat_fast(ori
* rotation_3d(
if keys.down() { num(4.0) * dt } else { num(0.0) }
- if keys.up() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_x(),
)
* rotation_3d(
if keys.right() { num(4.0) * dt } else { num(0.0) }
- if keys.left() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_y(),
)
* rotation_3d(
if keys.r() { num(4.0) * dt } else { num(0.0) }
- if keys.l() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_z(),
));
pos += gfx::scene::transform_pos(Mat4::from(ori).transposed(), Vec3::unit_z() * (
if keys.a() { num(0.05) } else { num(0.0) }
- if keys.b() { num(0.05) } else { num(0.0) }
)).xyz();
}};
let mut fb = screen.back();
time_clear = gba::time_this01! {{
fb.clear(Color::from_rgb(1, 3, 4).0);
}};
fn perspective_fov_rh_zo(fov_y_radians: F32, width: F32, height: F32, near: F32, far: F32) -> Mat4<F32> {
let rad = fov_y_radians;
let h = cos_fast(rad * num(0.5)) / sin_fast(rad * num(0.5));
let w = h * height / width;
let m00 = w;
let m11 = h;
let m22 = -(far + near) / (far - near);
let m23 = -(num(2.0) * far * near) / (far - near);
let m32 = -num(1.0);
let mut m = Mat4::new(
m00, num(0.0), num(0.0), num(0.0),
num(0.0), m11, num(0.0), num(0.0),
num(0.0), num(0.0), m22, m23,
num(0.0), num(0.0), m32, num(0.0)
);
m
}
let proj = perspective_fov_rh_zo(num(1.0), num(fb.screen_size().x as f32), num(fb.screen_size().y as f32), num(0.5), num(256.0));
let mut frame = scene.begin_frame(gfx::scene::SceneState {
proj,
view: Mat4::identity(),
light_dir: Vec3::new(0.0, -1.0, 0.0).normalized().map(num),
ambiance: num(0.2),
light_col: Rgb::new(1.0, 0.0, 0.5).map(num),
});
let mut ship_model;
time_model = gba::time_this01! {{
ship_model = frame.add_model(apply4(Mat4::translation_3d(pos), Mat4::from(apply(Mat3::from(ori), Mat3::scaling_3d(num(0.2))))));
}};
time_vertices = gba::time_this01! {{
for &v in &ship_verts {
frame.add_vert(ship_model, v);
}
}};
time_faces = gba::time_this01! {{
for &(norm, indices) in &ship_tris {
let color = Rgb::new(1.0, 1.0, 1.0).map(num);
let verts = [
indices[0],
indices[1],
indices[2],
];
frame.add_convex(ship_model, (verts, color), norm);
}
}};
// frame.add_flat_quad(
// 0,
// ([
// Vec3::new(-0.3, -0.5, 0.0).map(num),
// Vec3::new(0.0, 1.0, 0.0).map(num),
// Vec3::new(0.8, 0.8, 0.0).map(num),
// Vec3::new(1.0, 0.0, 0.0).map(num),
// ], Rgb::broadcast(num(1.0))),
// -Vec3::unit_z(),
// );
time_render = gba::time_this01! {{
frame.render(fb);
}};
tick += 1;
}
}
extern "C" fn irq_handler(flags: IrqFlags) {
if flags.vblank() {
vblank_handler();
}
if flags.hblank() {
hblank_handler();
}
if flags.vcounter() {
vcounter_handler();
}
if flags.timer0() {
timer0_handler();
}
if flags.timer1() {
timer1_handler();
}
}
fn vblank_handler() { BIOS_IF.write(BIOS_IF.read().with_vblank(true)); }
fn hblank_handler() { BIOS_IF.write(BIOS_IF.read().with_hblank(true)); }
fn vcounter_handler() { BIOS_IF.write(BIOS_IF.read().with_vcounter(true)); }
fn timer0_handler() { BIOS_IF.write(BIOS_IF.read().with_timer0(true)); }
fn timer1_handler() { BIOS_IF.write(BIOS_IF.read().with_timer1(true)); }
#[no_mangle]
pub unsafe extern fn __truncdfsf2() {}
// #[no_mangle]
// pub unsafe extern "C" fn memcpy(dst: *mut u8, src: *const u8, n: usize) -> *mut u8 {
// mem::copy_fast(
// core::slice::from_raw_parts(src, n),
// core::slice::from_raw_parts_mut(dst, n),
// );
// dst
// }
| apply4 | identifier_name |
main.rs | #![no_std]
#![feature(
test,
start,
array_map,
const_panic,
isa_attribute,
core_intrinsics,
maybe_uninit_ref,
bindings_after_at,
stmt_expr_attributes,
default_alloc_error_handler,
const_fn_floating_point_arithmetic,
)]
extern crate alloc;
mod gfx;
mod heap;
mod mem;
use core::fmt::Write;
use alloc::{vec::Vec, vec};
use vek::*;
use num_traits::float::Float;
use gba::{
io::{
irq::{set_irq_handler, IrqFlags, IrqEnableSetting, IE, IME, BIOS_IF},
display::{
DisplayControlSetting, DisplayStatusSetting, DisplayMode,
DISPCNT, DISPSTAT, VCOUNT, VBLANK_SCANLINE,
},
background::{BackgroundControlSetting, BG2HOFS},
timers::{TimerControlSetting, TimerTickRate, TM2CNT_H, TM2CNT_L},
keypad::read_key_input,
},
bios,
vram::bitmap::{Mode3, Mode5},
Color,
};
pub use mem::*;
pub type F32 = fixed::types::I16F16;
pub const fn num(x: f32) -> F32 {
use fixed::traits::Fixed;
F32::from_bits((x * (1 << F32::FRAC_NBITS) as f32) as <F32 as Fixed>::Bits)
}
fn normalize_quat_fast(q: Quaternion<F32>) -> Quaternion<F32> {
fn finvsqrt(x: f32) -> f32 {
let y = f32::from_bits(0x5f375a86 - (x.to_bits() >> 1));
y * (1.5 - ( x * 0.5 * y * y ))
}
fn fsqrt(x: f32) -> f32 {
f32::from_bits((x.to_bits() + (127 << 23)) >> 1)
}
let v = q.into_vec4();
(v * F32::from_num(finvsqrt(v.magnitude_squared().to_num::<f32>()))).into()
}
fn cos_fast(mut x: F32) -> F32 {
use core::f32;
x *= num(f32::consts::FRAC_1_PI / 2.0);
x -= num(0.25) + (x + num(0.25)).floor();
x *= num(16.0) * (x.abs() - num(0.5));
x += num(0.225) * x * (x.abs() - num(1.0));
x
}
fn sin_fast(x: F32) -> F32 {
use core::f32;
cos_fast(x - num(f32::consts::PI / 2.0))
}
fn tan_fast(x: F32) -> F32 {
sin_fast(x) / cos_fast(x)
}
fn rotation_3d(angle_radians: F32, axis: Vec3<F32>) -> Quaternion<F32> {
// let axis = axis.normalized();
let Vec3 { x, y, z } = axis * sin_fast(angle_radians * num(0.5));
let w = cos_fast(angle_radians * num(0.5));
Quaternion { x, y, z, w }
}
#[repr(transparent)]
#[derive(Copy, Clone)]
struct NumWrap(F32);
impl core::ops::Mul<NumWrap> for NumWrap {
type Output = NumWrap;
fn mul(self, rhs: Self) -> Self |
}
impl vek::ops::MulAdd<NumWrap, NumWrap> for NumWrap {
type Output = NumWrap;
fn mul_add(self, mul: NumWrap, add: NumWrap) -> NumWrap {
NumWrap(self.0 * mul.0 + add.0)
}
}
fn apply(m: Mat3<F32>, n: Mat3<F32>) -> Mat3<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
fn apply4(m: Mat4<F32>, n: Mat4<F32>) -> Mat4<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
gba::error!("Panic: {:?}", info);
Mode3::clear_to(Color::from_rgb(0xFF, 0, 0));
loop {}
}
#[start]
fn main(_argc: isize, _argv: *const *const u8) -> isize {
heap::init();
gba::info!("Starting...");
set_irq_handler(irq_handler);
IME.write(IrqEnableSetting::IRQ_YES);
DISPSTAT.write(DisplayStatusSetting::new()
.with_hblank_irq_enable(true)
.with_vblank_irq_enable(true));
TM2CNT_H.write(TimerControlSetting::new()
.with_tick_rate(TimerTickRate::CPU1024)
.with_enabled(true));
let model = wavefront::Obj::from_lines(include_str!("../data/ship-small.obj").lines()).unwrap();
let mut ship_verts = Vec::new();
let mut ship_tris = Vec::new();
for &p in model.positions() {
ship_verts.push(Vec3::<f32>::from(p).map(num));
}
model
.triangles()
.for_each(|vs| {
let pos = vs.map(|v| Vec3::<f32>::from(v.position()));
let cross = (pos[1] - pos[0]).cross(pos[2] - pos[0]);
ship_tris.push((
(cross / micromath::F32Ext::sqrt(cross.magnitude_squared())).map(num),
vs.map(|v| v.position_index() as u16),
));
});
gba::info!("Model has {} vertices and {} triangles", ship_verts.len(), ship_tris.len());
let mut pos = Vec3::new(0.0, 0.0, 3.0).map(num);
let mut ori = normalize_quat_fast(Quaternion::<F32>::identity());
let mut tick = 0;
let mut last_time = 0;
let mut sum_fps = 0.0;
let mut screen = unsafe { gfx::mode5::init() };
let mut scene = unsafe { gfx::scene::init() };
let mut time_mvp = 0;
let mut time_clear = 0;
let mut time_model = 0;
let mut time_vertices = 0;
let mut time_faces = 0;
let mut time_render = 0;
loop {
let new_time = TM2CNT_L.read();
if tick % 32 == 0 {
if new_time > last_time {
gba::info!("FPS: {}", sum_fps / 32.0);
gba::info!(
"Timings: {{ mvp = {}, clear = {}, model = {}, vertices = {}, faces = {}, render = {} }}",
time_mvp,
time_clear,
time_model,
time_vertices,
time_faces,
time_render,
);
}
sum_fps = 0.0;
}
let fps = (16_780_000.0 / (new_time - last_time) as f32) / 1024.0;
sum_fps += fps;
last_time = new_time;
let dt = num(fps).recip();
// Wait for vblank
IE.write(IrqFlags::new().with_vblank(true));
// bios::vblank_interrupt_wait();
screen.flip();
let keys = read_key_input();
time_mvp = gba::time_this01! {{
ori = normalize_quat_fast(ori
* rotation_3d(
if keys.down() { num(4.0) * dt } else { num(0.0) }
- if keys.up() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_x(),
)
* rotation_3d(
if keys.right() { num(4.0) * dt } else { num(0.0) }
- if keys.left() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_y(),
)
* rotation_3d(
if keys.r() { num(4.0) * dt } else { num(0.0) }
- if keys.l() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_z(),
));
pos += gfx::scene::transform_pos(Mat4::from(ori).transposed(), Vec3::unit_z() * (
if keys.a() { num(0.05) } else { num(0.0) }
- if keys.b() { num(0.05) } else { num(0.0) }
)).xyz();
}};
let mut fb = screen.back();
time_clear = gba::time_this01! {{
fb.clear(Color::from_rgb(1, 3, 4).0);
}};
fn perspective_fov_rh_zo(fov_y_radians: F32, width: F32, height: F32, near: F32, far: F32) -> Mat4<F32> {
let rad = fov_y_radians;
let h = cos_fast(rad * num(0.5)) / sin_fast(rad * num(0.5));
let w = h * height / width;
let m00 = w;
let m11 = h;
let m22 = -(far + near) / (far - near);
let m23 = -(num(2.0) * far * near) / (far - near);
let m32 = -num(1.0);
let mut m = Mat4::new(
m00, num(0.0), num(0.0), num(0.0),
num(0.0), m11, num(0.0), num(0.0),
num(0.0), num(0.0), m22, m23,
num(0.0), num(0.0), m32, num(0.0)
);
m
}
let proj = perspective_fov_rh_zo(num(1.0), num(fb.screen_size().x as f32), num(fb.screen_size().y as f32), num(0.5), num(256.0));
let mut frame = scene.begin_frame(gfx::scene::SceneState {
proj,
view: Mat4::identity(),
light_dir: Vec3::new(0.0, -1.0, 0.0).normalized().map(num),
ambiance: num(0.2),
light_col: Rgb::new(1.0, 0.0, 0.5).map(num),
});
let mut ship_model;
time_model = gba::time_this01! {{
ship_model = frame.add_model(apply4(Mat4::translation_3d(pos), Mat4::from(apply(Mat3::from(ori), Mat3::scaling_3d(num(0.2))))));
}};
time_vertices = gba::time_this01! {{
for &v in &ship_verts {
frame.add_vert(ship_model, v);
}
}};
time_faces = gba::time_this01! {{
for &(norm, indices) in &ship_tris {
let color = Rgb::new(1.0, 1.0, 1.0).map(num);
let verts = [
indices[0],
indices[1],
indices[2],
];
frame.add_convex(ship_model, (verts, color), norm);
}
}};
// frame.add_flat_quad(
// 0,
// ([
// Vec3::new(-0.3, -0.5, 0.0).map(num),
// Vec3::new(0.0, 1.0, 0.0).map(num),
// Vec3::new(0.8, 0.8, 0.0).map(num),
// Vec3::new(1.0, 0.0, 0.0).map(num),
// ], Rgb::broadcast(num(1.0))),
// -Vec3::unit_z(),
// );
time_render = gba::time_this01! {{
frame.render(fb);
}};
tick += 1;
}
}
extern "C" fn irq_handler(flags: IrqFlags) {
if flags.vblank() {
vblank_handler();
}
if flags.hblank() {
hblank_handler();
}
if flags.vcounter() {
vcounter_handler();
}
if flags.timer0() {
timer0_handler();
}
if flags.timer1() {
timer1_handler();
}
}
fn vblank_handler() { BIOS_IF.write(BIOS_IF.read().with_vblank(true)); }
fn hblank_handler() { BIOS_IF.write(BIOS_IF.read().with_hblank(true)); }
fn vcounter_handler() { BIOS_IF.write(BIOS_IF.read().with_vcounter(true)); }
fn timer0_handler() { BIOS_IF.write(BIOS_IF.read().with_timer0(true)); }
fn timer1_handler() { BIOS_IF.write(BIOS_IF.read().with_timer1(true)); }
#[no_mangle]
pub unsafe extern fn __truncdfsf2() {}
// #[no_mangle]
// pub unsafe extern "C" fn memcpy(dst: *mut u8, src: *const u8, n: usize) -> *mut u8 {
// mem::copy_fast(
// core::slice::from_raw_parts(src, n),
// core::slice::from_raw_parts_mut(dst, n),
// );
// dst
// }
| { NumWrap(self.0 * rhs.0) } | identifier_body |
Assign_LLID_Toxics_DO_unverified_LASAR_Stations.py | # -*- coding: utf-8 -*-
# Assign LLID's to sampling stations
# I'm including code in here beyond simply the station location. It'll add huc, stream names, and other important info.
# Import necessary modules
custom_script_location = r'E:\GitHub\ToxicsRedo\Python_Scripts'
if custom_script_location not in sys.path:
sys.path.append(custom_script_location)
from IR2012_Functions import *
import arcpy
import numpy
import os.path
arcpy.env.overwriteOutput = True
temp_location = "E:/GitHub/ToxicsRedo/StationsToLocate/FinalList/"
workspace = "E:/GitHub/ToxicsRedo/StationsToLocate/FinalList/assign_llid_temp.gdb"
temp_gdb = "assign_llid_temp.gdb"
final_gdb = "Toxics_do_post_toxicsRedo_Stations.gdb"
original_sampling_stations = "E:/GitHub/ToxicsRedo/StationsToLocate/Post_ToxicsRedo_Stations/toxics_do_unverified_all.shp"
sampling_stations = "stations_copy"
stream_network = "F:/Base_Data/DEQ_Data/WQ_2010_IntegratedReport_V3/WQ_2010_IntegratedReport_V3/Assessment.gdb/DEQ_Streams_25APR2013"
station_river_name_field = "LOCATION_D"
streams_river_name_field = "NAME"
rid = "LLID"
search_radius = 12000
output_table = "E:/GitHub/ToxicsRedo/StationsToLocate/FinalList/assign_llid_temp.gdb/out1"
output_success = "out_success"
output_fail = "out_fail"
qc_lyr = "qc_lyr"
qc_success = "qc_success"
qc_review = "qc_needs_review"
outside_threshold = "outside_threshold"
properties = "RID POINT MEAS"
# # Subset the 57 new lasar stations from master lasar station shapefile.
# # Four of these were not in the shapefile, and had to be converted manually.
# ls_df = pd.read_csv(r'E:\GitHub\ToxicsRedo\StationsToLocate\Post_ToxicsRedo_Stations\toxics_do_unverified.csv', header=0)
# ls_keys = ls_df['STATION'].values
#
# in_feature = "//Deqlead03/gis_wa/Project_Working_Folders/LASAR_Stations/LASAR_Stations/LASAR_Stations_26sept13.shp"
# out_feature = "E:/GitHub/ToxicsRedo/StationsToLocate/Post_ToxicsRedo_Stations/toxics_do_unverified.shp"
# lstations = "lstations"
# query = """ "STATION_KE" in """ + "(" + ', '.join([str(i) for i in ls_keys]) +")"
#
# arcpy.MakeFeatureLayer_management(in_feature, lstations)
# arcpy.SelectLayerByAttribute_management(lstations, "NEW_SELECTION", query)
# arcpy.GetCount_management(lstations).getOutput(0)
# arcpy.CopyFeatures_management(lstations, out_feature)
# Check to see if a temp geodatabase exists. If not, create it.
if os.path.exists(temp_location + temp_gdb):
print "It exist!"
else:
arcpy.CreateFileGDB_management(temp_location, temp_gdb)
if os.path.exists((temp_location + final_gdb)):
print "It exist!"
else:
arcpy.CreateFileGDB_management(temp_location, final_gdb)
arcpy.env.workspace = workspace
arcpy.CopyFeatures_management(original_sampling_stations, sampling_stations)
arcpy.AddField_management(sampling_stations, "Unique_ID", "DOUBLE")
arcpy.CalculateField_management(sampling_stations, "Unique_ID", "!OBJECTID!", "PYTHON")
nrow = arcpy.GetCount_management(sampling_stations)
# Execute LocateFeaturesAlongRoutes
arcpy.LocateFeaturesAlongRoutes_lr(sampling_stations, stream_network, rid, search_radius, output_table,
properties)
successful_features = arcpy.da.TableToNumPyArray(output_table, 'Unique_ID')['Unique_ID']
#Add QC fields to table
arcpy.AddField_management(output_table, "QAQC1", "STRING")
arcpy.AddField_management(output_table, "QAQC2", "STRING")
#Now, begin primary qc by using character matching to verify that successful rows have matching stream names.
stream_names_from_deq_streams = arcpy.da.TableToNumPyArray(stream_network, ['LLID', streams_river_name_field])[['LLID', streams_river_name_field]]
with arcpy.da.UpdateCursor(output_table, [station_river_name_field,'RID','QAQC1', 'QAQC2']) as cursor:
for row in cursor:
deq_streams = stream_names_from_deq_streams[streams_river_name_field][numpy.nonzero(stream_names_from_deq_streams['LLID'] == row[1])][0]
if row[0].replace(" ", "").lower() == deq_streams.replace(" ", "").lower():
row[2] = 'Reviewed'
row[3] = 'Not Required'
else:
row[2] = 'Needs Secondary Review'
cursor.updateRow(row)
#Create a 'success' fc and a 'fail' fc
#First, copy the original station fc to new fcs. One for success, one for failure.
arcpy.CopyFeatures_management(sampling_stations, output_success)
arcpy.CopyFeatures_management(sampling_stations, output_fail)
#Then, use cursors to remove failed rows from the success fc
with arcpy.da.UpdateCursor(output_success, "Unique_ID") as cursor:
for row in cursor:
if row not in successful_features:
cursor.deleteRow()
#And remove successful rows from the fail fc
with arcpy.da.UpdateCursor(output_fail, "Unique_ID") as cursor:
for row in cursor:
if row in successful_features:
cursor.deleteRow()
#Note: With a large enough search radius the fail fc will be empty.
#Remove all fields from the success fc except the Unique_ID so it can be merged with the output table
#Note: I'm not sure what would happen here if the success fc is empty. I suspect it would throw an exception.
# If this happens, increase the search radius.
fieldList = arcpy.ListFields(output_success)
fields_to_drop = []
for field in fieldList:
|
arcpy.DeleteField_management(output_success, fields_to_drop)
#Merge with output table
arcpy.JoinField_management(output_success, 'Unique_ID', output_table, 'Unique_ID')
#Now split success fc into one fc with successful qc and one with stations needing review
arcpy.MakeFeatureLayer_management(output_success, qc_lyr)
arcpy.SelectLayerByAttribute_management(qc_lyr, "NEW_SELECTION", """ "QAQC1" = 'Reviewed' """)
if int(arcpy.GetCount_management(qc_lyr).getOutput(0)) == len(successful_features):
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_success))
elif int(arcpy.GetCount_management(qc_lyr).getOutput(0)) == 0:
arcpy.CopyFeatures_management(output_success, (temp_location + final_gdb + "/" + qc_review))
elif int(arcpy.GetCount_management(qc_lyr).getOutput(0)) < len(successful_features) and int(arcpy.GetCount_management(qc_lyr).getOutput(0)) > 0:
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_success))
arcpy.SelectLayerByAttribute_management(qc_lyr, "NEW_SELECTION", """ "QAQC1" = 'Needs Secondary Review' """)
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_review))
arcpy.CopyFeatures_management(output_fail, (temp_location + final_gdb + '/' + outside_threshold))
arcpy.SelectLayerByAttribute_management(qc_lyr, "CLEAR_SELECTION")
#Once this process is complete, add attribute information.
fc_original = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review'
fc_copy = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review_copy'
arcpy.CopyFeatures_management(fc_original, fc_copy)
arcpy.AddField_management(fc_copy, 'RIVER_MILE', 'DOUBLE')
arcpy.CalculateField_management(fc_copy, 'RIVER_MILE', '!MEAS!/5280', "PYTHON_9.3")
#Spatially join HUC 3 and 4 field
huc3 = 'F:/Base_Data/Hydrography/NHD/NHDH_OR_931v210/NHDH_OR.gdb/WBD/WBD_HU6'
huc4 = 'F:/Base_Data/Hydrography/NHD/NHDH_OR_931v210/NHDH_OR.gdb/WBD/WBD_HU8'
in_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review_copy'
out_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc3'
arcpy.SpatialJoin_analysis(in_file, huc3, out_file)
in_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc3'
out_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc4'
arcpy.SpatialJoin_analysis(in_file, huc4, out_file)
#Copy fc and remove Unwanted fields so fc is ready to merge with 2010 stations
stations2010_formatting = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_final'
arcpy.CopyFeatures_management(out_file, stations2010_formatting)
#Join the following fields using LLID: GIS_STREAMNAME, LAKE_NAME, GIS_Source_LAKE, GIS_Source
renameField(stations2010_formatting, 'RID', 'LLID')
arcpy.AddField_management(stations2010_formatting, 'LAKE_LLID', 'TEXT')
in_file = stations2010_formatting
stream_names = "F:/Base_Data/DEQ_Data/WQ_2010_IntegratedReport_V3/WQ_2010_IntegratedReport_V3/Assessment.gdb/DEQ_Streams_25APR2013"
lake_names = "F:/Base_Data/DEQ_Data/WQ_2010_IntegratedReport_V3/WQ_2010_IntegratedReport_V3/Assessment.gdb/DEQLakes_14JUN2013"
arcpy.JoinField_management(in_file, 'LLID', stream_names, 'LLID', ['NAME', 'SOURCE'])
arcpy.JoinField_management(in_file, 'LAKE_LLID', lake_names, 'WATERBODYI', ['NAME', 'SOURCE'])
#Change these new field names to meaningful ones.
renameField(in_file, "NAME", "GIS_STREAMNAME")
renameField(in_file, "NAME_1", "LAKE_NAME")
renameField(in_file, "Source", "GIS_Source")
renameField(in_file, "SOURCE_1", "GIS_Source_LAKE")
renameField(in_file, "STATION_KE", "STATION")
renameField(in_file, "LOCATION_D", "DESCRIPTION")
renameField(in_file, "Latitude", "DEC_LAT")
renameField(in_file, "Longitude", "DEC_LONG")
fieldList = arcpy.ListFields(stations2010_formatting)
fields_to_drop = []
for field in fieldList:
if field.name not in ['Shape','OBJECTID', 'LLID', 'LAKE_LLID', 'RIVER_MILE', 'AGENCY', 'AGENCY_ID', 'STATION', 'DEC_LAT', 'DEC_LONG',
'DESCRIPTION', 'QAQC1', 'QAQC2', 'Comments', 'HUC_6', 'HU_6_Name', 'HUC_8', 'HU_8_Name', 'GIS_STREAMNAME', 'LAKE_NAME',
'GIS_Source', 'GIS_Source_LAKE']:
fields_to_drop.append(field.name)
arcpy.DeleteField_management(stations2010_formatting, fields_to_drop)
out_fc = r'E:\GitHub\ToxicsRedo\StationsToLocate\Post_ToxicsRedo_Stations\post_toxicsRedo_stations_final.shp'
arcpy.CopyFeatures_management(stations2010_formatting, out_fc)
| if field.name not in ['Unique_ID', 'Shape','OBJECTID']:
fields_to_drop.append(field.name) | conditional_block |
Assign_LLID_Toxics_DO_unverified_LASAR_Stations.py | # -*- coding: utf-8 -*-
# Assign LLID's to sampling stations
# I'm including code in here beyond simply the station location. It'll add huc, stream names, and other important info.
# Import necessary modules
custom_script_location = r'E:\GitHub\ToxicsRedo\Python_Scripts'
if custom_script_location not in sys.path:
sys.path.append(custom_script_location)
from IR2012_Functions import *
import arcpy
import numpy
import os.path
arcpy.env.overwriteOutput = True
temp_location = "E:/GitHub/ToxicsRedo/StationsToLocate/FinalList/"
workspace = "E:/GitHub/ToxicsRedo/StationsToLocate/FinalList/assign_llid_temp.gdb"
temp_gdb = "assign_llid_temp.gdb"
final_gdb = "Toxics_do_post_toxicsRedo_Stations.gdb"
original_sampling_stations = "E:/GitHub/ToxicsRedo/StationsToLocate/Post_ToxicsRedo_Stations/toxics_do_unverified_all.shp"
sampling_stations = "stations_copy"
stream_network = "F:/Base_Data/DEQ_Data/WQ_2010_IntegratedReport_V3/WQ_2010_IntegratedReport_V3/Assessment.gdb/DEQ_Streams_25APR2013"
station_river_name_field = "LOCATION_D"
streams_river_name_field = "NAME"
rid = "LLID"
search_radius = 12000
output_table = "E:/GitHub/ToxicsRedo/StationsToLocate/FinalList/assign_llid_temp.gdb/out1"
output_success = "out_success"
output_fail = "out_fail"
qc_lyr = "qc_lyr"
qc_success = "qc_success"
qc_review = "qc_needs_review"
outside_threshold = "outside_threshold"
properties = "RID POINT MEAS"
# # Subset the 57 new lasar stations from master lasar station shapefile.
# # Four of these were not in the shapefile, and had to be converted manually.
# ls_df = pd.read_csv(r'E:\GitHub\ToxicsRedo\StationsToLocate\Post_ToxicsRedo_Stations\toxics_do_unverified.csv', header=0)
# ls_keys = ls_df['STATION'].values
#
# in_feature = "//Deqlead03/gis_wa/Project_Working_Folders/LASAR_Stations/LASAR_Stations/LASAR_Stations_26sept13.shp"
# out_feature = "E:/GitHub/ToxicsRedo/StationsToLocate/Post_ToxicsRedo_Stations/toxics_do_unverified.shp"
# lstations = "lstations"
# query = """ "STATION_KE" in """ + "(" + ', '.join([str(i) for i in ls_keys]) +")"
#
# arcpy.MakeFeatureLayer_management(in_feature, lstations)
# arcpy.SelectLayerByAttribute_management(lstations, "NEW_SELECTION", query)
# arcpy.GetCount_management(lstations).getOutput(0)
# arcpy.CopyFeatures_management(lstations, out_feature)
# Check to see if a temp geodatabase exists. If not, create it.
if os.path.exists(temp_location + temp_gdb):
print "It exist!"
else:
arcpy.CreateFileGDB_management(temp_location, temp_gdb)
if os.path.exists((temp_location + final_gdb)): | else:
arcpy.CreateFileGDB_management(temp_location, final_gdb)
arcpy.env.workspace = workspace
arcpy.CopyFeatures_management(original_sampling_stations, sampling_stations)
arcpy.AddField_management(sampling_stations, "Unique_ID", "DOUBLE")
arcpy.CalculateField_management(sampling_stations, "Unique_ID", "!OBJECTID!", "PYTHON")
nrow = arcpy.GetCount_management(sampling_stations)
# Execute LocateFeaturesAlongRoutes
arcpy.LocateFeaturesAlongRoutes_lr(sampling_stations, stream_network, rid, search_radius, output_table,
properties)
successful_features = arcpy.da.TableToNumPyArray(output_table, 'Unique_ID')['Unique_ID']
#Add QC fields to table
arcpy.AddField_management(output_table, "QAQC1", "STRING")
arcpy.AddField_management(output_table, "QAQC2", "STRING")
#Now, begin primary qc by using character matching to verify that successful rows have matching stream names.
stream_names_from_deq_streams = arcpy.da.TableToNumPyArray(stream_network, ['LLID', streams_river_name_field])[['LLID', streams_river_name_field]]
with arcpy.da.UpdateCursor(output_table, [station_river_name_field,'RID','QAQC1', 'QAQC2']) as cursor:
for row in cursor:
deq_streams = stream_names_from_deq_streams[streams_river_name_field][numpy.nonzero(stream_names_from_deq_streams['LLID'] == row[1])][0]
if row[0].replace(" ", "").lower() == deq_streams.replace(" ", "").lower():
row[2] = 'Reviewed'
row[3] = 'Not Required'
else:
row[2] = 'Needs Secondary Review'
cursor.updateRow(row)
#Create a 'success' fc and a 'fail' fc
#First, copy the original station fc to new fcs. One for success, one for failure.
arcpy.CopyFeatures_management(sampling_stations, output_success)
arcpy.CopyFeatures_management(sampling_stations, output_fail)
#Then, use cursors to remove failed rows from the success fc
with arcpy.da.UpdateCursor(output_success, "Unique_ID") as cursor:
for row in cursor:
if row not in successful_features:
cursor.deleteRow()
#And remove successful rows from the fail fc
with arcpy.da.UpdateCursor(output_fail, "Unique_ID") as cursor:
for row in cursor:
if row in successful_features:
cursor.deleteRow()
#Note: With a large enough search radius the fail fc will be empty.
#Remove all fields from the success fc except the Unique_ID so it can be merged with the output table
#Note: I'm not sure what would happen here if the success fc is empty. I suspect it would throw an exception.
# If this happens, increase the search radius.
fieldList = arcpy.ListFields(output_success)
fields_to_drop = []
for field in fieldList:
if field.name not in ['Unique_ID', 'Shape','OBJECTID']:
fields_to_drop.append(field.name)
arcpy.DeleteField_management(output_success, fields_to_drop)
#Merge with output table
arcpy.JoinField_management(output_success, 'Unique_ID', output_table, 'Unique_ID')
#Now split success fc into one fc with successful qc and one with stations needing review
arcpy.MakeFeatureLayer_management(output_success, qc_lyr)
arcpy.SelectLayerByAttribute_management(qc_lyr, "NEW_SELECTION", """ "QAQC1" = 'Reviewed' """)
if int(arcpy.GetCount_management(qc_lyr).getOutput(0)) == len(successful_features):
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_success))
elif int(arcpy.GetCount_management(qc_lyr).getOutput(0)) == 0:
arcpy.CopyFeatures_management(output_success, (temp_location + final_gdb + "/" + qc_review))
elif int(arcpy.GetCount_management(qc_lyr).getOutput(0)) < len(successful_features) and int(arcpy.GetCount_management(qc_lyr).getOutput(0)) > 0:
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_success))
arcpy.SelectLayerByAttribute_management(qc_lyr, "NEW_SELECTION", """ "QAQC1" = 'Needs Secondary Review' """)
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_review))
arcpy.CopyFeatures_management(output_fail, (temp_location + final_gdb + '/' + outside_threshold))
arcpy.SelectLayerByAttribute_management(qc_lyr, "CLEAR_SELECTION")
#Once this process is complete, add attribute information.
fc_original = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review'
fc_copy = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review_copy'
arcpy.CopyFeatures_management(fc_original, fc_copy)
arcpy.AddField_management(fc_copy, 'RIVER_MILE', 'DOUBLE')
arcpy.CalculateField_management(fc_copy, 'RIVER_MILE', '!MEAS!/5280', "PYTHON_9.3")
#Spatially join HUC 3 and 4 field
huc3 = 'F:/Base_Data/Hydrography/NHD/NHDH_OR_931v210/NHDH_OR.gdb/WBD/WBD_HU6'
huc4 = 'F:/Base_Data/Hydrography/NHD/NHDH_OR_931v210/NHDH_OR.gdb/WBD/WBD_HU8'
in_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review_copy'
out_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc3'
arcpy.SpatialJoin_analysis(in_file, huc3, out_file)
in_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc3'
out_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc4'
arcpy.SpatialJoin_analysis(in_file, huc4, out_file)
#Copy fc and remove Unwanted fields so fc is ready to merge with 2010 stations
stations2010_formatting = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_final'
arcpy.CopyFeatures_management(out_file, stations2010_formatting)
#Join the following fields using LLID: GIS_STREAMNAME, LAKE_NAME, GIS_Source_LAKE, GIS_Source
renameField(stations2010_formatting, 'RID', 'LLID')
arcpy.AddField_management(stations2010_formatting, 'LAKE_LLID', 'TEXT')
in_file = stations2010_formatting
stream_names = "F:/Base_Data/DEQ_Data/WQ_2010_IntegratedReport_V3/WQ_2010_IntegratedReport_V3/Assessment.gdb/DEQ_Streams_25APR2013"
lake_names = "F:/Base_Data/DEQ_Data/WQ_2010_IntegratedReport_V3/WQ_2010_IntegratedReport_V3/Assessment.gdb/DEQLakes_14JUN2013"
arcpy.JoinField_management(in_file, 'LLID', stream_names, 'LLID', ['NAME', 'SOURCE'])
arcpy.JoinField_management(in_file, 'LAKE_LLID', lake_names, 'WATERBODYI', ['NAME', 'SOURCE'])
#Change these new field names to meaningful ones.
renameField(in_file, "NAME", "GIS_STREAMNAME")
renameField(in_file, "NAME_1", "LAKE_NAME")
renameField(in_file, "Source", "GIS_Source")
renameField(in_file, "SOURCE_1", "GIS_Source_LAKE")
renameField(in_file, "STATION_KE", "STATION")
renameField(in_file, "LOCATION_D", "DESCRIPTION")
renameField(in_file, "Latitude", "DEC_LAT")
renameField(in_file, "Longitude", "DEC_LONG")
fieldList = arcpy.ListFields(stations2010_formatting)
fields_to_drop = []
for field in fieldList:
if field.name not in ['Shape','OBJECTID', 'LLID', 'LAKE_LLID', 'RIVER_MILE', 'AGENCY', 'AGENCY_ID', 'STATION', 'DEC_LAT', 'DEC_LONG',
'DESCRIPTION', 'QAQC1', 'QAQC2', 'Comments', 'HUC_6', 'HU_6_Name', 'HUC_8', 'HU_8_Name', 'GIS_STREAMNAME', 'LAKE_NAME',
'GIS_Source', 'GIS_Source_LAKE']:
fields_to_drop.append(field.name)
arcpy.DeleteField_management(stations2010_formatting, fields_to_drop)
out_fc = r'E:\GitHub\ToxicsRedo\StationsToLocate\Post_ToxicsRedo_Stations\post_toxicsRedo_stations_final.shp'
arcpy.CopyFeatures_management(stations2010_formatting, out_fc) | print "It exist!" | random_line_split |
main.rs | use cargo::core::dependency::Kind;
use cargo::core::manifest::ManifestMetadata;
use cargo::core::package::PackageSet;
use cargo::core::registry::PackageRegistry;
use cargo::core::resolver::Method;
use cargo::core::shell::Shell;
use cargo::core::{Package, PackageId, Resolve, Workspace};
use cargo::ops;
use cargo::util::{self, important_paths, CargoResult, Cfg, Rustc};
use cargo::{CliResult, Config};
use failure::bail;
use petgraph::graph::NodeIndex;
use petgraph::visit::EdgeRef;
use petgraph::EdgeDirection;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::str::{self, FromStr};
use structopt::clap::AppSettings;
use structopt::StructOpt;
use crate::format::Pattern;
mod format;
#[derive(StructOpt)]
#[structopt(bin_name = "cargo")]
enum Opts {
#[structopt(
name = "tree",
raw(
setting = "AppSettings::UnifiedHelpMessage",
setting = "AppSettings::DeriveDisplayOrder",
setting = "AppSettings::DontCollapseArgsInUsage"
)
)]
/// Display a tree visualization of a dependency graph
Tree(Args),
}
#[derive(StructOpt)]
struct Args {
#[structopt(long = "package", short = "p", value_name = "SPEC")]
/// Package to be used as the root of the tree
package: Option<String>,
#[structopt(long = "features", value_name = "FEATURES")]
/// Space-separated list of features to activate
features: Option<String>,
#[structopt(long = "all-features")]
/// Activate all available features
all_features: bool,
#[structopt(long = "no-default-features")]
/// Do not activate the `default` feature
no_default_features: bool,
#[structopt(long = "target", value_name = "TARGET")]
/// Set the target triple
target: Option<String>,
/// Directory for all generated artifacts
#[structopt(long = "target-dir", value_name = "DIRECTORY", parse(from_os_str))]
target_dir: Option<PathBuf>,
#[structopt(long = "all-targets")]
/// Return dependencies for all targets. By default only the host target is matched.
all_targets: bool,
#[structopt(long = "no-dev-dependencies")]
/// Skip dev dependencies.
no_dev_dependencies: bool,
#[structopt(long = "manifest-path", value_name = "PATH", parse(from_os_str))]
/// Path to Cargo.toml
manifest_path: Option<PathBuf>,
#[structopt(long = "invert", short = "i")]
/// Invert the tree direction
invert: bool,
#[structopt(long = "no-indent")]
/// Display the dependencies as a list (rather than a tree)
no_indent: bool,
#[structopt(long = "prefix-depth")]
/// Display the dependencies as a list (rather than a tree), but prefixed with the depth
prefix_depth: bool,
#[structopt(long = "all", short = "a")]
/// Don't truncate dependencies that have already been displayed
all: bool,
#[structopt(long = "duplicate", short = "d")]
/// Show only dependencies which come in multiple versions (implies -i)
duplicates: bool,
#[structopt(long = "charset", value_name = "CHARSET", default_value = "utf8")]
/// Character set to use in output: utf8, ascii
charset: Charset,
#[structopt(
long = "format",
short = "f",
value_name = "FORMAT",
default_value = "{p}"
)]
/// Format string used for printing dependencies
format: String,
#[structopt(long = "verbose", short = "v", parse(from_occurrences))]
/// Use verbose output (-vv very verbose/build.rs output)
verbose: u32,
#[structopt(long = "quiet", short = "q")]
/// No output printed to stdout other than the tree
quiet: Option<bool>,
#[structopt(long = "color", value_name = "WHEN")]
/// Coloring: auto, always, never
color: Option<String>,
#[structopt(long = "frozen")]
/// Require Cargo.lock and cache are up to date
frozen: bool,
#[structopt(long = "locked")]
/// Require Cargo.lock is up to date
locked: bool,
#[structopt(short = "Z", value_name = "FLAG")]
/// Unstable (nightly-only) flags to Cargo
unstable_flags: Vec<String>,
}
enum Charset {
Utf8,
Ascii,
}
#[derive(Clone, Copy)]
enum Prefix {
None,
Indent,
Depth,
}
impl FromStr for Charset {
type Err = &'static str;
fn from_str(s: &str) -> Result<Charset, &'static str> {
match s {
"utf8" => Ok(Charset::Utf8),
"ascii" => Ok(Charset::Ascii),
_ => Err("invalid charset"),
}
}
}
struct Symbols {
down: &'static str,
tee: &'static str,
ell: &'static str,
right: &'static str,
}
static UTF8_SYMBOLS: Symbols = Symbols {
down: "│",
tee: "├",
ell: "└",
right: "─",
};
static ASCII_SYMBOLS: Symbols = Symbols {
down: "|",
tee: "|",
ell: "`",
right: "-",
};
fn main() {
env_logger::init();
let mut config = match Config::default() {
Ok(cfg) => cfg,
Err(e) => {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
};
let Opts::Tree(args) = Opts::from_args();
if let Err(e) = real_main(args, &mut config) {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
}
fn real_main(args: Args, config: &mut Config) -> CliResult {
config.configure(
args.verbose,
args.quiet,
&args.color,
args.frozen,
args.locked,
&args.target_dir,
&args.unstable_flags,
)?;
let workspace = workspace(config, args.manifest_path)?;
let package = workspace.current()?;
let mut registry = registry(config, &package)?;
let (packages, resolve) = resolve(
&mut registry,
&workspace,
args.features,
args.all_features,
args.no_default_features,
args.no_dev_dependencies,
)?;
let ids = packages.package_ids().collect::<Vec<_>>();
let packages = registry.get(&ids)?;
let root = match args.package {
Some(ref pkg) => resolve.query(pkg)?,
None => package.package_id(),
};
let rustc = config.rustc(Some(&workspace))?;
let target = if args.all_targets {
None
} else {
Some(args.target.as_ref().unwrap_or(&rustc.host).as_str())
};
let format = Pattern::new(&args.format).map_err(|e| failure::err_msg(e.to_string()))?;
let cfgs = get_cfgs(&rustc, &args.target)?;
let graph = build_graph(
&resolve,
&packages,
package.package_id(),
target,
cfgs.as_ref().map(|r| &**r),
)?;
let direction = if args.invert || args.duplicates {
EdgeDirection::Incoming
} else {
EdgeDirection::Outgoing
};
let symbols = match args.charset {
Charset::Ascii => &ASCII_SYMBOLS,
Charset::Utf8 => &UTF8_SYMBOLS,
};
let prefix = if args.prefix_depth {
Prefix::Depth
} else if args.no_indent {
Prefix::None
} else {
Prefix::Indent
};
if args.duplicates {
let dups = find_duplicates(&graph);
for dup in &dups {
print_tree(dup, &graph, &format, direction, symbols, prefix, args.all)?;
println!();
}
} else {
print_tree(&root, &graph, &format, direction, symbols, prefix, args.all)?;
}
Ok(())
}
fn find_duplicates<'a>(graph: &Graph<'a>) -> Vec<PackageId> {
let mut counts = HashMap::new();
// Count by name only. Source and version are irrelevant here.
for package in graph.nodes.keys() {
*counts.entry(package.name()).or_insert(0) += 1;
}
// Theoretically inefficient, but in practice we're only listing duplicates and
// there won't be enough dependencies for it to matter.
let mut dup_ids = Vec::new();
for name in counts.drain().filter(|&(_, v)| v > 1).map(|(k, _)| k) {
dup_ids.extend(graph.nodes.keys().filter(|p| p.name() == name));
}
dup_ids.sort();
dup_ids
}
fn get_cfgs(rustc: &Rustc, target: &Option<String>) -> CargoResult<Option<Vec<Cfg>>> {
let mut process = util::process(&rustc.path);
process.arg("--print=cfg").env_remove("RUST_LOG");
if let Some(ref s) = *target {
process.arg("--target").arg(s);
}
let output = match process.exec_with_output() {
Ok(output) => output,
Err(e) => return Err(e),
};
let output = str::from_utf8(&output.stdout).unwrap();
let lines = output.lines();
Ok(Some(
lines.map(Cfg::from_str).collect::<CargoResult<Vec<_>>>()?,
))
}
fn workspace(config: &Config, manifest_path: Option<PathBuf>) -> CargoResult<Workspace<'_>> {
let root = match manifest_path {
Some(path) => path,
None => important_paths::find_root_manifest_for_wd(config.cwd())?,
};
Workspace::new(&root, config)
}
fn registry<'a>(config: &'a Config, package: &Package) -> CargoResult<PackageRegistry<'a>> {
let mut registry = PackageRegistry::new(config)?;
registry.add_sources(Some(package.package_id().source_id().clone()))?;
Ok(registry)
}
fn resolve<'a, 'cfg>(
registry: &mut PackageRegistry<'cfg>,
workspace: &'a Workspace<'cfg>,
features: Option<String>,
all_features: bool,
no_default_features: bool,
no_dev_dependencies: bool,
) -> CargoResult<(PackageSet<'a>, Resolve)> {
let features = Method::split_features(&features.into_iter().collect::<Vec<_>>());
let (packages, resolve) = ops::resolve_ws(workspace)?;
let method = Method::Required {
dev_deps: !no_dev_dependencies,
features: &features,
all_features,
uses_default_features: !no_default_features,
};
let resolve = ops::resolve_with_previous(
registry,
workspace,
method,
Some(&resolve),
None,
&[],
true,
true,
)?;
Ok((packages, resolve))
}
struct Node<'a> {
id: PackageId,
metadata: &'a ManifestMetadata,
}
struct Graph<'a | graph: petgraph::Graph<Node<'a>, Kind>,
nodes: HashMap<PackageId, NodeIndex>,
}
fn build_graph<'a>(
resolve: &'a Resolve,
packages: &'a PackageSet<'_>,
root: PackageId,
target: Option<&str>,
cfgs: Option<&[Cfg]>,
) -> CargoResult<Graph<'a>> {
let mut graph = Graph {
graph: petgraph::Graph::new(),
nodes: HashMap::new(),
};
let node = Node {
id: root.clone(),
metadata: packages.get_one(root)?.manifest().metadata(),
};
graph.nodes.insert(root.clone(), graph.graph.add_node(node));
let mut pending = vec![root];
while let Some(pkg_id) = pending.pop() {
let idx = graph.nodes[&pkg_id];
let pkg = packages.get_one(pkg_id)?;
for raw_dep_id in resolve.deps_not_replaced(pkg_id) {
let it = pkg
.dependencies()
.iter()
.filter(|d| d.matches_ignoring_source(raw_dep_id))
.filter(|d| {
d.platform()
.and_then(|p| target.map(|t| p.matches(t, cfgs)))
.unwrap_or(true)
});
let dep_id = match resolve.replacement(raw_dep_id) {
Some(id) => id,
None => raw_dep_id,
};
for dep in it {
let dep_idx = match graph.nodes.entry(dep_id) {
Entry::Occupied(e) => *e.get(),
Entry::Vacant(e) => {
pending.push(dep_id);
let node = Node {
id: dep_id,
metadata: packages.get_one(dep_id)?.manifest().metadata(),
};
*e.insert(graph.graph.add_node(node))
}
};
graph.graph.add_edge(idx, dep_idx, dep.kind());
}
}
}
Ok(graph)
}
fn print_tree<'a>(
package: &'a PackageId,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
prefix: Prefix,
all: bool,
) -> CargoResult<()> {
let mut visited_deps = HashSet::new();
let mut levels_continue = vec![];
let package = match graph.nodes.get(package) {
Some(package) => package,
None => bail!("package {} not found", package),
};
let node = &graph.graph[*package];
print_dependency(
node,
&graph,
format,
direction,
symbols,
&mut visited_deps,
&mut levels_continue,
prefix,
all,
);
Ok(())
}
fn print_dependency<'a>(
package: &Node<'a>,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
visited_deps: &mut HashSet<PackageId>,
levels_continue: &mut Vec<bool>,
prefix: Prefix,
all: bool,
) {
let new = all || visited_deps.insert(package.id);
let star = if new { "" } else { " (*)" };
match prefix {
Prefix::Depth => print!("{} ", levels_continue.len()),
Prefix::Indent => {
if let Some((&last_continues, rest)) = levels_continue.split_last() {
for &continues in rest {
let c = if continues { symbols.down } else { " " };
print!("{} ", c);
}
let c = if last_continues {
symbols.tee
} else {
symbols.ell
};
print!("{0}{1}{1} ", c, symbols.right);
}
}
Prefix::None => (),
}
println!("{}{}", format.display(&package.id, package.metadata), star);
if !new {
return;
}
let mut normal = vec![];
let mut build = vec![];
let mut development = vec![];
for edge in graph
.graph
.edges_directed(graph.nodes[&package.id], direction)
{
let dep = match direction {
EdgeDirection::Incoming => &graph.graph[edge.source()],
EdgeDirection::Outgoing => &graph.graph[edge.target()],
};
match *edge.weight() {
Kind::Normal => normal.push(dep),
Kind::Build => build.push(dep),
Kind::Development => development.push(dep),
}
}
print_dependency_kind(
Kind::Normal,
normal,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
print_dependency_kind(
Kind::Build,
build,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
print_dependency_kind(
Kind::Development,
development,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
}
fn print_dependency_kind<'a>(
kind: Kind,
mut deps: Vec<&Node<'a>>,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
visited_deps: &mut HashSet<PackageId>,
levels_continue: &mut Vec<bool>,
prefix: Prefix,
all: bool,
) {
if deps.is_empty() {
return;
}
// Resolve uses Hash data types internally but we want consistent output ordering
deps.sort_by_key(|n| n.id);
let name = match kind {
Kind::Normal => None,
Kind::Build => Some("[build-dependencies]"),
Kind::Development => Some("[dev-dependencies]"),
};
if let Prefix::Indent = prefix {
if let Some(name) = name {
for &continues in &**levels_continue {
let c = if continues { symbols.down } else { " " };
print!("{} ", c);
}
println!("{}", name);
}
}
let mut it = deps.iter().peekable();
while let Some(dependency) = it.next() {
levels_continue.push(it.peek().is_some());
print_dependency(
dependency,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
levels_continue.pop();
}
}
| > {
| identifier_name |
main.rs | use cargo::core::dependency::Kind;
use cargo::core::manifest::ManifestMetadata;
use cargo::core::package::PackageSet;
use cargo::core::registry::PackageRegistry;
use cargo::core::resolver::Method;
use cargo::core::shell::Shell;
use cargo::core::{Package, PackageId, Resolve, Workspace};
use cargo::ops;
use cargo::util::{self, important_paths, CargoResult, Cfg, Rustc};
use cargo::{CliResult, Config};
use failure::bail;
use petgraph::graph::NodeIndex;
use petgraph::visit::EdgeRef;
use petgraph::EdgeDirection;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::str::{self, FromStr};
use structopt::clap::AppSettings;
use structopt::StructOpt;
use crate::format::Pattern;
mod format;
#[derive(StructOpt)]
#[structopt(bin_name = "cargo")]
enum Opts {
#[structopt(
name = "tree",
raw(
setting = "AppSettings::UnifiedHelpMessage",
setting = "AppSettings::DeriveDisplayOrder",
setting = "AppSettings::DontCollapseArgsInUsage"
)
)]
/// Display a tree visualization of a dependency graph
Tree(Args),
}
#[derive(StructOpt)]
struct Args {
#[structopt(long = "package", short = "p", value_name = "SPEC")]
/// Package to be used as the root of the tree
package: Option<String>,
#[structopt(long = "features", value_name = "FEATURES")]
/// Space-separated list of features to activate
features: Option<String>,
#[structopt(long = "all-features")]
/// Activate all available features
all_features: bool,
#[structopt(long = "no-default-features")]
/// Do not activate the `default` feature
no_default_features: bool,
#[structopt(long = "target", value_name = "TARGET")]
/// Set the target triple
target: Option<String>,
/// Directory for all generated artifacts
#[structopt(long = "target-dir", value_name = "DIRECTORY", parse(from_os_str))]
target_dir: Option<PathBuf>,
#[structopt(long = "all-targets")]
/// Return dependencies for all targets. By default only the host target is matched.
all_targets: bool,
#[structopt(long = "no-dev-dependencies")]
/// Skip dev dependencies.
no_dev_dependencies: bool,
#[structopt(long = "manifest-path", value_name = "PATH", parse(from_os_str))]
/// Path to Cargo.toml
manifest_path: Option<PathBuf>,
#[structopt(long = "invert", short = "i")]
/// Invert the tree direction
invert: bool,
#[structopt(long = "no-indent")]
/// Display the dependencies as a list (rather than a tree)
no_indent: bool,
#[structopt(long = "prefix-depth")]
/// Display the dependencies as a list (rather than a tree), but prefixed with the depth
prefix_depth: bool,
#[structopt(long = "all", short = "a")]
/// Don't truncate dependencies that have already been displayed
all: bool,
#[structopt(long = "duplicate", short = "d")]
/// Show only dependencies which come in multiple versions (implies -i)
duplicates: bool,
#[structopt(long = "charset", value_name = "CHARSET", default_value = "utf8")]
/// Character set to use in output: utf8, ascii
charset: Charset,
#[structopt(
long = "format",
short = "f",
value_name = "FORMAT",
default_value = "{p}"
)]
/// Format string used for printing dependencies
format: String,
#[structopt(long = "verbose", short = "v", parse(from_occurrences))]
/// Use verbose output (-vv very verbose/build.rs output)
verbose: u32,
#[structopt(long = "quiet", short = "q")]
/// No output printed to stdout other than the tree
quiet: Option<bool>,
#[structopt(long = "color", value_name = "WHEN")]
/// Coloring: auto, always, never
color: Option<String>,
#[structopt(long = "frozen")]
/// Require Cargo.lock and cache are up to date
frozen: bool,
#[structopt(long = "locked")]
/// Require Cargo.lock is up to date
locked: bool,
#[structopt(short = "Z", value_name = "FLAG")]
/// Unstable (nightly-only) flags to Cargo
unstable_flags: Vec<String>,
}
enum Charset {
Utf8,
Ascii,
}
#[derive(Clone, Copy)]
enum Prefix {
None,
Indent,
Depth,
}
impl FromStr for Charset {
type Err = &'static str;
fn from_str(s: &str) -> Result<Charset, &'static str> {
match s {
"utf8" => Ok(Charset::Utf8),
"ascii" => Ok(Charset::Ascii),
_ => Err("invalid charset"),
}
}
}
struct Symbols {
down: &'static str,
tee: &'static str,
ell: &'static str,
right: &'static str,
}
static UTF8_SYMBOLS: Symbols = Symbols {
down: "│",
tee: "├",
ell: "└",
right: "─",
};
static ASCII_SYMBOLS: Symbols = Symbols {
down: "|",
tee: "|",
ell: "`",
right: "-",
};
fn main() {
en | l_main(args: Args, config: &mut Config) -> CliResult {
config.configure(
args.verbose,
args.quiet,
&args.color,
args.frozen,
args.locked,
&args.target_dir,
&args.unstable_flags,
)?;
let workspace = workspace(config, args.manifest_path)?;
let package = workspace.current()?;
let mut registry = registry(config, &package)?;
let (packages, resolve) = resolve(
&mut registry,
&workspace,
args.features,
args.all_features,
args.no_default_features,
args.no_dev_dependencies,
)?;
let ids = packages.package_ids().collect::<Vec<_>>();
let packages = registry.get(&ids)?;
let root = match args.package {
Some(ref pkg) => resolve.query(pkg)?,
None => package.package_id(),
};
let rustc = config.rustc(Some(&workspace))?;
let target = if args.all_targets {
None
} else {
Some(args.target.as_ref().unwrap_or(&rustc.host).as_str())
};
let format = Pattern::new(&args.format).map_err(|e| failure::err_msg(e.to_string()))?;
let cfgs = get_cfgs(&rustc, &args.target)?;
let graph = build_graph(
&resolve,
&packages,
package.package_id(),
target,
cfgs.as_ref().map(|r| &**r),
)?;
let direction = if args.invert || args.duplicates {
EdgeDirection::Incoming
} else {
EdgeDirection::Outgoing
};
let symbols = match args.charset {
Charset::Ascii => &ASCII_SYMBOLS,
Charset::Utf8 => &UTF8_SYMBOLS,
};
let prefix = if args.prefix_depth {
Prefix::Depth
} else if args.no_indent {
Prefix::None
} else {
Prefix::Indent
};
if args.duplicates {
let dups = find_duplicates(&graph);
for dup in &dups {
print_tree(dup, &graph, &format, direction, symbols, prefix, args.all)?;
println!();
}
} else {
print_tree(&root, &graph, &format, direction, symbols, prefix, args.all)?;
}
Ok(())
}
fn find_duplicates<'a>(graph: &Graph<'a>) -> Vec<PackageId> {
let mut counts = HashMap::new();
// Count by name only. Source and version are irrelevant here.
for package in graph.nodes.keys() {
*counts.entry(package.name()).or_insert(0) += 1;
}
// Theoretically inefficient, but in practice we're only listing duplicates and
// there won't be enough dependencies for it to matter.
let mut dup_ids = Vec::new();
for name in counts.drain().filter(|&(_, v)| v > 1).map(|(k, _)| k) {
dup_ids.extend(graph.nodes.keys().filter(|p| p.name() == name));
}
dup_ids.sort();
dup_ids
}
fn get_cfgs(rustc: &Rustc, target: &Option<String>) -> CargoResult<Option<Vec<Cfg>>> {
let mut process = util::process(&rustc.path);
process.arg("--print=cfg").env_remove("RUST_LOG");
if let Some(ref s) = *target {
process.arg("--target").arg(s);
}
let output = match process.exec_with_output() {
Ok(output) => output,
Err(e) => return Err(e),
};
let output = str::from_utf8(&output.stdout).unwrap();
let lines = output.lines();
Ok(Some(
lines.map(Cfg::from_str).collect::<CargoResult<Vec<_>>>()?,
))
}
fn workspace(config: &Config, manifest_path: Option<PathBuf>) -> CargoResult<Workspace<'_>> {
let root = match manifest_path {
Some(path) => path,
None => important_paths::find_root_manifest_for_wd(config.cwd())?,
};
Workspace::new(&root, config)
}
fn registry<'a>(config: &'a Config, package: &Package) -> CargoResult<PackageRegistry<'a>> {
let mut registry = PackageRegistry::new(config)?;
registry.add_sources(Some(package.package_id().source_id().clone()))?;
Ok(registry)
}
fn resolve<'a, 'cfg>(
registry: &mut PackageRegistry<'cfg>,
workspace: &'a Workspace<'cfg>,
features: Option<String>,
all_features: bool,
no_default_features: bool,
no_dev_dependencies: bool,
) -> CargoResult<(PackageSet<'a>, Resolve)> {
let features = Method::split_features(&features.into_iter().collect::<Vec<_>>());
let (packages, resolve) = ops::resolve_ws(workspace)?;
let method = Method::Required {
dev_deps: !no_dev_dependencies,
features: &features,
all_features,
uses_default_features: !no_default_features,
};
let resolve = ops::resolve_with_previous(
registry,
workspace,
method,
Some(&resolve),
None,
&[],
true,
true,
)?;
Ok((packages, resolve))
}
struct Node<'a> {
id: PackageId,
metadata: &'a ManifestMetadata,
}
struct Graph<'a> {
graph: petgraph::Graph<Node<'a>, Kind>,
nodes: HashMap<PackageId, NodeIndex>,
}
fn build_graph<'a>(
resolve: &'a Resolve,
packages: &'a PackageSet<'_>,
root: PackageId,
target: Option<&str>,
cfgs: Option<&[Cfg]>,
) -> CargoResult<Graph<'a>> {
let mut graph = Graph {
graph: petgraph::Graph::new(),
nodes: HashMap::new(),
};
let node = Node {
id: root.clone(),
metadata: packages.get_one(root)?.manifest().metadata(),
};
graph.nodes.insert(root.clone(), graph.graph.add_node(node));
let mut pending = vec![root];
while let Some(pkg_id) = pending.pop() {
let idx = graph.nodes[&pkg_id];
let pkg = packages.get_one(pkg_id)?;
for raw_dep_id in resolve.deps_not_replaced(pkg_id) {
let it = pkg
.dependencies()
.iter()
.filter(|d| d.matches_ignoring_source(raw_dep_id))
.filter(|d| {
d.platform()
.and_then(|p| target.map(|t| p.matches(t, cfgs)))
.unwrap_or(true)
});
let dep_id = match resolve.replacement(raw_dep_id) {
Some(id) => id,
None => raw_dep_id,
};
for dep in it {
let dep_idx = match graph.nodes.entry(dep_id) {
Entry::Occupied(e) => *e.get(),
Entry::Vacant(e) => {
pending.push(dep_id);
let node = Node {
id: dep_id,
metadata: packages.get_one(dep_id)?.manifest().metadata(),
};
*e.insert(graph.graph.add_node(node))
}
};
graph.graph.add_edge(idx, dep_idx, dep.kind());
}
}
}
Ok(graph)
}
fn print_tree<'a>(
package: &'a PackageId,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
prefix: Prefix,
all: bool,
) -> CargoResult<()> {
let mut visited_deps = HashSet::new();
let mut levels_continue = vec![];
let package = match graph.nodes.get(package) {
Some(package) => package,
None => bail!("package {} not found", package),
};
let node = &graph.graph[*package];
print_dependency(
node,
&graph,
format,
direction,
symbols,
&mut visited_deps,
&mut levels_continue,
prefix,
all,
);
Ok(())
}
fn print_dependency<'a>(
package: &Node<'a>,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
visited_deps: &mut HashSet<PackageId>,
levels_continue: &mut Vec<bool>,
prefix: Prefix,
all: bool,
) {
let new = all || visited_deps.insert(package.id);
let star = if new { "" } else { " (*)" };
match prefix {
Prefix::Depth => print!("{} ", levels_continue.len()),
Prefix::Indent => {
if let Some((&last_continues, rest)) = levels_continue.split_last() {
for &continues in rest {
let c = if continues { symbols.down } else { " " };
print!("{} ", c);
}
let c = if last_continues {
symbols.tee
} else {
symbols.ell
};
print!("{0}{1}{1} ", c, symbols.right);
}
}
Prefix::None => (),
}
println!("{}{}", format.display(&package.id, package.metadata), star);
if !new {
return;
}
let mut normal = vec![];
let mut build = vec![];
let mut development = vec![];
for edge in graph
.graph
.edges_directed(graph.nodes[&package.id], direction)
{
let dep = match direction {
EdgeDirection::Incoming => &graph.graph[edge.source()],
EdgeDirection::Outgoing => &graph.graph[edge.target()],
};
match *edge.weight() {
Kind::Normal => normal.push(dep),
Kind::Build => build.push(dep),
Kind::Development => development.push(dep),
}
}
print_dependency_kind(
Kind::Normal,
normal,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
print_dependency_kind(
Kind::Build,
build,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
print_dependency_kind(
Kind::Development,
development,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
}
fn print_dependency_kind<'a>(
kind: Kind,
mut deps: Vec<&Node<'a>>,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
visited_deps: &mut HashSet<PackageId>,
levels_continue: &mut Vec<bool>,
prefix: Prefix,
all: bool,
) {
if deps.is_empty() {
return;
}
// Resolve uses Hash data types internally but we want consistent output ordering
deps.sort_by_key(|n| n.id);
let name = match kind {
Kind::Normal => None,
Kind::Build => Some("[build-dependencies]"),
Kind::Development => Some("[dev-dependencies]"),
};
if let Prefix::Indent = prefix {
if let Some(name) = name {
for &continues in &**levels_continue {
let c = if continues { symbols.down } else { " " };
print!("{} ", c);
}
println!("{}", name);
}
}
let mut it = deps.iter().peekable();
while let Some(dependency) = it.next() {
levels_continue.push(it.peek().is_some());
print_dependency(
dependency,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
levels_continue.pop();
}
}
| v_logger::init();
let mut config = match Config::default() {
Ok(cfg) => cfg,
Err(e) => {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
};
let Opts::Tree(args) = Opts::from_args();
if let Err(e) = real_main(args, &mut config) {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
}
fn rea | identifier_body |
main.rs | use cargo::core::dependency::Kind;
use cargo::core::manifest::ManifestMetadata;
use cargo::core::package::PackageSet;
use cargo::core::registry::PackageRegistry;
use cargo::core::resolver::Method;
use cargo::core::shell::Shell;
use cargo::core::{Package, PackageId, Resolve, Workspace};
use cargo::ops;
use cargo::util::{self, important_paths, CargoResult, Cfg, Rustc};
use cargo::{CliResult, Config};
use failure::bail;
use petgraph::graph::NodeIndex;
use petgraph::visit::EdgeRef;
use petgraph::EdgeDirection;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::str::{self, FromStr};
use structopt::clap::AppSettings;
use structopt::StructOpt;
use crate::format::Pattern;
mod format;
#[derive(StructOpt)]
#[structopt(bin_name = "cargo")]
enum Opts {
#[structopt(
name = "tree",
raw(
setting = "AppSettings::UnifiedHelpMessage",
setting = "AppSettings::DeriveDisplayOrder",
setting = "AppSettings::DontCollapseArgsInUsage"
)
)]
/// Display a tree visualization of a dependency graph
Tree(Args),
}
#[derive(StructOpt)]
struct Args {
#[structopt(long = "package", short = "p", value_name = "SPEC")]
/// Package to be used as the root of the tree
package: Option<String>,
#[structopt(long = "features", value_name = "FEATURES")]
/// Space-separated list of features to activate
features: Option<String>,
#[structopt(long = "all-features")]
/// Activate all available features
all_features: bool,
#[structopt(long = "no-default-features")]
/// Do not activate the `default` feature
no_default_features: bool,
#[structopt(long = "target", value_name = "TARGET")]
/// Set the target triple
target: Option<String>,
/// Directory for all generated artifacts
#[structopt(long = "target-dir", value_name = "DIRECTORY", parse(from_os_str))]
target_dir: Option<PathBuf>,
#[structopt(long = "all-targets")]
/// Return dependencies for all targets. By default only the host target is matched.
all_targets: bool,
#[structopt(long = "no-dev-dependencies")]
/// Skip dev dependencies.
no_dev_dependencies: bool,
#[structopt(long = "manifest-path", value_name = "PATH", parse(from_os_str))]
/// Path to Cargo.toml
manifest_path: Option<PathBuf>,
#[structopt(long = "invert", short = "i")]
/// Invert the tree direction
invert: bool,
#[structopt(long = "no-indent")]
/// Display the dependencies as a list (rather than a tree)
no_indent: bool,
#[structopt(long = "prefix-depth")]
/// Display the dependencies as a list (rather than a tree), but prefixed with the depth
prefix_depth: bool,
#[structopt(long = "all", short = "a")]
/// Don't truncate dependencies that have already been displayed
all: bool,
#[structopt(long = "duplicate", short = "d")]
/// Show only dependencies which come in multiple versions (implies -i)
duplicates: bool,
#[structopt(long = "charset", value_name = "CHARSET", default_value = "utf8")]
/// Character set to use in output: utf8, ascii
charset: Charset,
#[structopt(
long = "format",
short = "f",
value_name = "FORMAT",
default_value = "{p}"
)]
/// Format string used for printing dependencies
format: String,
#[structopt(long = "verbose", short = "v", parse(from_occurrences))]
/// Use verbose output (-vv very verbose/build.rs output)
verbose: u32,
#[structopt(long = "quiet", short = "q")]
/// No output printed to stdout other than the tree
quiet: Option<bool>,
#[structopt(long = "color", value_name = "WHEN")]
/// Coloring: auto, always, never
color: Option<String>,
#[structopt(long = "frozen")]
/// Require Cargo.lock and cache are up to date
frozen: bool,
#[structopt(long = "locked")]
/// Require Cargo.lock is up to date
locked: bool,
#[structopt(short = "Z", value_name = "FLAG")]
/// Unstable (nightly-only) flags to Cargo
unstable_flags: Vec<String>,
}
enum Charset {
Utf8,
Ascii,
}
#[derive(Clone, Copy)]
enum Prefix {
None,
Indent,
Depth,
}
impl FromStr for Charset {
type Err = &'static str;
fn from_str(s: &str) -> Result<Charset, &'static str> {
match s {
"utf8" => Ok(Charset::Utf8),
"ascii" => Ok(Charset::Ascii),
_ => Err("invalid charset"),
}
}
}
struct Symbols {
down: &'static str,
tee: &'static str,
ell: &'static str,
right: &'static str,
}
static UTF8_SYMBOLS: Symbols = Symbols {
down: "│",
tee: "├",
ell: "└",
right: "─",
};
static ASCII_SYMBOLS: Symbols = Symbols {
down: "|",
tee: "|",
ell: "`",
right: "-",
};
fn main() {
env_logger::init();
let mut config = match Config::default() {
Ok(cfg) => cfg,
Err(e) => {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
};
let Opts::Tree(args) = Opts::from_args();
if let Err(e) = real_main(args, &mut config) {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
}
fn real_main(args: Args, config: &mut Config) -> CliResult {
config.configure(
args.verbose,
args.quiet,
&args.color,
args.frozen,
args.locked,
&args.target_dir,
&args.unstable_flags,
)?;
let workspace = workspace(config, args.manifest_path)?;
let package = workspace.current()?;
let mut registry = registry(config, &package)?;
let (packages, resolve) = resolve(
&mut registry,
&workspace,
args.features,
args.all_features,
args.no_default_features,
args.no_dev_dependencies,
)?;
let ids = packages.package_ids().collect::<Vec<_>>();
let packages = registry.get(&ids)?;
let root = match args.package {
Some(ref pkg) => resolve.query(pkg)?,
None => package.package_id(),
};
let rustc = config.rustc(Some(&workspace))?;
let target = if args.all_targets {
None
} else {
Some(args.target.as_ref().unwrap_or(&rustc.host).as_str())
};
let format = Pattern::new(&args.format).map_err(|e| failure::err_msg(e.to_string()))?;
let cfgs = get_cfgs(&rustc, &args.target)?;
let graph = build_graph(
&resolve,
&packages,
package.package_id(),
target,
cfgs.as_ref().map(|r| &**r),
)?;
let direction = if args.invert || args.duplicates {
EdgeDirection::Incoming
} else {
EdgeDirection::Outgoing
};
let symbols = match args.charset {
Charset::Ascii => &ASCII_SYMBOLS,
Charset::Utf8 => &UTF8_SYMBOLS,
};
let prefix = if args.prefix_depth {
Prefix::Depth
} else if args.no_indent {
Prefix::None
} else {
Prefix::Indent
};
if args.duplicates {
let dups = find_duplicates(&graph);
for dup in &dups {
print_tree(dup, &graph, &format, direction, symbols, prefix, args.all)?;
println!();
}
} else {
print_tree(&root, &graph, &format, direction, symbols, prefix, args.all)?;
}
Ok(())
}
fn find_duplicates<'a>(graph: &Graph<'a>) -> Vec<PackageId> {
let mut counts = HashMap::new();
// Count by name only. Source and version are irrelevant here.
for package in graph.nodes.keys() {
*counts.entry(package.name()).or_insert(0) += 1;
}
// Theoretically inefficient, but in practice we're only listing duplicates and
// there won't be enough dependencies for it to matter.
let mut dup_ids = Vec::new();
for name in counts.drain().filter(|&(_, v)| v > 1).map(|(k, _)| k) {
dup_ids.extend(graph.nodes.keys().filter(|p| p.name() == name));
}
dup_ids.sort();
dup_ids
}
fn get_cfgs(rustc: &Rustc, target: &Option<String>) -> CargoResult<Option<Vec<Cfg>>> {
let mut process = util::process(&rustc.path);
process.arg("--print=cfg").env_remove("RUST_LOG");
if let Some(ref s) = *target {
process.arg("--target").arg(s);
}
let output = match process.exec_with_output() {
Ok(output) => output,
Err(e) => return Err(e),
};
let output = str::from_utf8(&output.stdout).unwrap();
let lines = output.lines();
Ok(Some(
lines.map(Cfg::from_str).collect::<CargoResult<Vec<_>>>()?,
))
}
fn workspace(config: &Config, manifest_path: Option<PathBuf>) -> CargoResult<Workspace<'_>> {
let root = match manifest_path {
Some(path) => path,
None => important_paths::find_root_manifest_for_wd(config.cwd())?,
};
Workspace::new(&root, config)
}
fn registry<'a>(config: &'a Config, package: &Package) -> CargoResult<PackageRegistry<'a>> {
let mut registry = PackageRegistry::new(config)?;
registry.add_sources(Some(package.package_id().source_id().clone()))?;
Ok(registry)
}
fn resolve<'a, 'cfg>(
registry: &mut PackageRegistry<'cfg>,
workspace: &'a Workspace<'cfg>,
features: Option<String>,
all_features: bool,
no_default_features: bool,
no_dev_dependencies: bool,
) -> CargoResult<(PackageSet<'a>, Resolve)> {
let features = Method::split_features(&features.into_iter().collect::<Vec<_>>());
let (packages, resolve) = ops::resolve_ws(workspace)?;
let method = Method::Required {
dev_deps: !no_dev_dependencies,
features: &features,
all_features,
uses_default_features: !no_default_features,
};
let resolve = ops::resolve_with_previous(
registry,
workspace,
method,
Some(&resolve),
None,
&[],
true,
true,
)?;
Ok((packages, resolve))
}
struct Node<'a> {
id: PackageId,
metadata: &'a ManifestMetadata,
}
struct Graph<'a> {
graph: petgraph::Graph<Node<'a>, Kind>,
nodes: HashMap<PackageId, NodeIndex>,
}
fn build_graph<'a>(
resolve: &'a Resolve,
packages: &'a PackageSet<'_>,
root: PackageId,
target: Option<&str>,
cfgs: Option<&[Cfg]>,
) -> CargoResult<Graph<'a>> {
let mut graph = Graph {
graph: petgraph::Graph::new(),
nodes: HashMap::new(),
};
let node = Node {
id: root.clone(),
metadata: packages.get_one(root)?.manifest().metadata(),
};
graph.nodes.insert(root.clone(), graph.graph.add_node(node));
let mut pending = vec![root];
while let Some(pkg_id) = pending.pop() {
let idx = graph.nodes[&pkg_id];
let pkg = packages.get_one(pkg_id)?;
for raw_dep_id in resolve.deps_not_replaced(pkg_id) {
let it = pkg
.dependencies()
.iter()
.filter(|d| d.matches_ignoring_source(raw_dep_id))
.filter(|d| {
d.platform()
.and_then(|p| target.map(|t| p.matches(t, cfgs)))
.unwrap_or(true)
});
let dep_id = match resolve.replacement(raw_dep_id) {
Some(id) => id,
None => raw_dep_id,
}; | for dep in it {
let dep_idx = match graph.nodes.entry(dep_id) {
Entry::Occupied(e) => *e.get(),
Entry::Vacant(e) => {
pending.push(dep_id);
let node = Node {
id: dep_id,
metadata: packages.get_one(dep_id)?.manifest().metadata(),
};
*e.insert(graph.graph.add_node(node))
}
};
graph.graph.add_edge(idx, dep_idx, dep.kind());
}
}
}
Ok(graph)
}
fn print_tree<'a>(
package: &'a PackageId,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
prefix: Prefix,
all: bool,
) -> CargoResult<()> {
let mut visited_deps = HashSet::new();
let mut levels_continue = vec![];
let package = match graph.nodes.get(package) {
Some(package) => package,
None => bail!("package {} not found", package),
};
let node = &graph.graph[*package];
print_dependency(
node,
&graph,
format,
direction,
symbols,
&mut visited_deps,
&mut levels_continue,
prefix,
all,
);
Ok(())
}
fn print_dependency<'a>(
package: &Node<'a>,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
visited_deps: &mut HashSet<PackageId>,
levels_continue: &mut Vec<bool>,
prefix: Prefix,
all: bool,
) {
let new = all || visited_deps.insert(package.id);
let star = if new { "" } else { " (*)" };
match prefix {
Prefix::Depth => print!("{} ", levels_continue.len()),
Prefix::Indent => {
if let Some((&last_continues, rest)) = levels_continue.split_last() {
for &continues in rest {
let c = if continues { symbols.down } else { " " };
print!("{} ", c);
}
let c = if last_continues {
symbols.tee
} else {
symbols.ell
};
print!("{0}{1}{1} ", c, symbols.right);
}
}
Prefix::None => (),
}
println!("{}{}", format.display(&package.id, package.metadata), star);
if !new {
return;
}
let mut normal = vec![];
let mut build = vec![];
let mut development = vec![];
for edge in graph
.graph
.edges_directed(graph.nodes[&package.id], direction)
{
let dep = match direction {
EdgeDirection::Incoming => &graph.graph[edge.source()],
EdgeDirection::Outgoing => &graph.graph[edge.target()],
};
match *edge.weight() {
Kind::Normal => normal.push(dep),
Kind::Build => build.push(dep),
Kind::Development => development.push(dep),
}
}
print_dependency_kind(
Kind::Normal,
normal,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
print_dependency_kind(
Kind::Build,
build,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
print_dependency_kind(
Kind::Development,
development,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
}
fn print_dependency_kind<'a>(
kind: Kind,
mut deps: Vec<&Node<'a>>,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
visited_deps: &mut HashSet<PackageId>,
levels_continue: &mut Vec<bool>,
prefix: Prefix,
all: bool,
) {
if deps.is_empty() {
return;
}
// Resolve uses Hash data types internally but we want consistent output ordering
deps.sort_by_key(|n| n.id);
let name = match kind {
Kind::Normal => None,
Kind::Build => Some("[build-dependencies]"),
Kind::Development => Some("[dev-dependencies]"),
};
if let Prefix::Indent = prefix {
if let Some(name) = name {
for &continues in &**levels_continue {
let c = if continues { symbols.down } else { " " };
print!("{} ", c);
}
println!("{}", name);
}
}
let mut it = deps.iter().peekable();
while let Some(dependency) = it.next() {
levels_continue.push(it.peek().is_some());
print_dependency(
dependency,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
levels_continue.pop();
}
} | random_line_split | |
main.rs | use cargo::core::dependency::Kind;
use cargo::core::manifest::ManifestMetadata;
use cargo::core::package::PackageSet;
use cargo::core::registry::PackageRegistry;
use cargo::core::resolver::Method;
use cargo::core::shell::Shell;
use cargo::core::{Package, PackageId, Resolve, Workspace};
use cargo::ops;
use cargo::util::{self, important_paths, CargoResult, Cfg, Rustc};
use cargo::{CliResult, Config};
use failure::bail;
use petgraph::graph::NodeIndex;
use petgraph::visit::EdgeRef;
use petgraph::EdgeDirection;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::str::{self, FromStr};
use structopt::clap::AppSettings;
use structopt::StructOpt;
use crate::format::Pattern;
mod format;
#[derive(StructOpt)]
#[structopt(bin_name = "cargo")]
enum Opts {
#[structopt(
name = "tree",
raw(
setting = "AppSettings::UnifiedHelpMessage",
setting = "AppSettings::DeriveDisplayOrder",
setting = "AppSettings::DontCollapseArgsInUsage"
)
)]
/// Display a tree visualization of a dependency graph
Tree(Args),
}
#[derive(StructOpt)]
struct Args {
#[structopt(long = "package", short = "p", value_name = "SPEC")]
/// Package to be used as the root of the tree
package: Option<String>,
#[structopt(long = "features", value_name = "FEATURES")]
/// Space-separated list of features to activate
features: Option<String>,
#[structopt(long = "all-features")]
/// Activate all available features
all_features: bool,
#[structopt(long = "no-default-features")]
/// Do not activate the `default` feature
no_default_features: bool,
#[structopt(long = "target", value_name = "TARGET")]
/// Set the target triple
target: Option<String>,
/// Directory for all generated artifacts
#[structopt(long = "target-dir", value_name = "DIRECTORY", parse(from_os_str))]
target_dir: Option<PathBuf>,
#[structopt(long = "all-targets")]
/// Return dependencies for all targets. By default only the host target is matched.
all_targets: bool,
#[structopt(long = "no-dev-dependencies")]
/// Skip dev dependencies.
no_dev_dependencies: bool,
#[structopt(long = "manifest-path", value_name = "PATH", parse(from_os_str))]
/// Path to Cargo.toml
manifest_path: Option<PathBuf>,
#[structopt(long = "invert", short = "i")]
/// Invert the tree direction
invert: bool,
#[structopt(long = "no-indent")]
/// Display the dependencies as a list (rather than a tree)
no_indent: bool,
#[structopt(long = "prefix-depth")]
/// Display the dependencies as a list (rather than a tree), but prefixed with the depth
prefix_depth: bool,
#[structopt(long = "all", short = "a")]
/// Don't truncate dependencies that have already been displayed
all: bool,
#[structopt(long = "duplicate", short = "d")]
/// Show only dependencies which come in multiple versions (implies -i)
duplicates: bool,
#[structopt(long = "charset", value_name = "CHARSET", default_value = "utf8")]
/// Character set to use in output: utf8, ascii
charset: Charset,
#[structopt(
long = "format",
short = "f",
value_name = "FORMAT",
default_value = "{p}"
)]
/// Format string used for printing dependencies
format: String,
#[structopt(long = "verbose", short = "v", parse(from_occurrences))]
/// Use verbose output (-vv very verbose/build.rs output)
verbose: u32,
#[structopt(long = "quiet", short = "q")]
/// No output printed to stdout other than the tree
quiet: Option<bool>,
#[structopt(long = "color", value_name = "WHEN")]
/// Coloring: auto, always, never
color: Option<String>,
#[structopt(long = "frozen")]
/// Require Cargo.lock and cache are up to date
frozen: bool,
#[structopt(long = "locked")]
/// Require Cargo.lock is up to date
locked: bool,
#[structopt(short = "Z", value_name = "FLAG")]
/// Unstable (nightly-only) flags to Cargo
unstable_flags: Vec<String>,
}
enum Charset {
Utf8,
Ascii,
}
#[derive(Clone, Copy)]
enum Prefix {
None,
Indent,
Depth,
}
impl FromStr for Charset {
type Err = &'static str;
fn from_str(s: &str) -> Result<Charset, &'static str> {
match s {
"utf8" => Ok(Charset::Utf8),
"ascii" => Ok(Charset::Ascii),
_ => Err("invalid charset"),
}
}
}
struct Symbols {
down: &'static str,
tee: &'static str,
ell: &'static str,
right: &'static str,
}
static UTF8_SYMBOLS: Symbols = Symbols {
down: "│",
tee: "├",
ell: "└",
right: "─",
};
static ASCII_SYMBOLS: Symbols = Symbols {
down: "|",
tee: "|",
ell: "`",
right: "-",
};
fn main() {
env_logger::init();
let mut config = match Config::default() {
Ok(cfg) => cfg,
Err(e) => {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
};
let Opts::Tree(args) = Opts::from_args();
if let Err(e) = real_main(args, &mut config) {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
}
fn real_main(args: Args, config: &mut Config) -> CliResult {
config.configure(
args.verbose,
args.quiet,
&args.color,
args.frozen,
args.locked,
&args.target_dir,
&args.unstable_flags,
)?;
let workspace = workspace(config, args.manifest_path)?;
let package = workspace.current()?;
let mut registry = registry(config, &package)?;
let (packages, resolve) = resolve(
&mut registry,
&workspace,
args.features,
args.all_features,
args.no_default_features,
args.no_dev_dependencies,
)?;
let ids = packages.package_ids().collect::<Vec<_>>();
let packages = registry.get(&ids)?;
let root = match args.package {
Some(ref pkg) => resolve.query(pkg)?,
None => package.package_id(),
};
let rustc = config.rustc(Some(&workspace))?;
let target = if args.all_targets {
None
} else {
Some(args.target.as_ref().unwrap_or(&rustc.host).as_str())
};
let format = Pattern::new(&args.format).map_err(|e| failure::err_msg(e.to_string()))?;
let cfgs = get_cfgs(&rustc, &args.target)?;
let graph = build_graph(
&resolve,
&packages,
package.package_id(),
target,
cfgs.as_ref().map(|r| &**r),
)?;
let direction = if args.invert || args.duplicates {
EdgeDirection::Incoming
} else {
EdgeDirection::Outgoing
};
let symbols = match args.charset {
Charset::Ascii => &ASCII_SYMBOLS,
Charset::Utf8 => &UTF8_SYMBOLS,
};
let prefix = if args.prefix_depth {
Prefix::Depth
} else if args.no_indent {
Prefix::None
} else {
Prefix::Indent
};
if args.duplicates {
let dups = find_duplicates(&graph);
for dup in &dups {
print_tree(dup, &graph, &format, direction, symbols, prefix, args.all)?;
println!();
}
} else {
print_tree(&root, &graph, &format, direction, symbols, prefix, args.all)?;
}
Ok(())
}
fn find_duplicates<'a>(graph: &Graph<'a>) -> Vec<PackageId> {
let mut counts = HashMap::new();
// Count by name only. Source and version are irrelevant here.
for package in graph.nodes.keys() {
*counts.entry(package.name()).or_insert(0) += 1;
}
// Theoretically inefficient, but in practice we're only listing duplicates and
// there won't be enough dependencies for it to matter.
let mut dup_ids = Vec::new();
for name in counts.drain().filter(|&(_, v)| v > 1).map(|(k, _)| k) {
dup_ids.extend(graph.nodes.keys().filter(|p| p.name() == name));
}
dup_ids.sort();
dup_ids
}
fn get_cfgs(rustc: &Rustc, target: &Option<String>) -> CargoResult<Option<Vec<Cfg>>> {
let mut process = util::process(&rustc.path);
process.arg("--print=cfg").env_remove("RUST_LOG");
if let Some(ref s) = *target {
process.arg("--target").arg(s);
}
let output = match process.exec_with_output() {
Ok(output) => output,
Err(e) => return Err(e),
};
let output = str::from_utf8(&output.stdout).unwrap();
let lines = output.lines();
Ok(Some(
lines.map(Cfg::from_str).collect::<CargoResult<Vec<_>>>()?,
))
}
fn workspace(config: &Config, manifest_path: Option<PathBuf>) -> CargoResult<Workspace<'_>> {
let root = match manifest_path {
Some(path) => path,
None => important_paths::find_root_manifest_for_wd(config.cwd())?,
};
Workspace::new(&root, config)
}
fn registry<'a>(config: &'a Config, package: &Package) -> CargoResult<PackageRegistry<'a>> {
let mut registry = PackageRegistry::new(config)?;
registry.add_sources(Some(package.package_id().source_id().clone()))?;
Ok(registry)
}
fn resolve<'a, 'cfg>(
registry: &mut PackageRegistry<'cfg>,
workspace: &'a Workspace<'cfg>,
features: Option<String>,
all_features: bool,
no_default_features: bool,
no_dev_dependencies: bool,
) -> CargoResult<(PackageSet<'a>, Resolve)> {
let features = Method::split_features(&features.into_iter().collect::<Vec<_>>());
let (packages, resolve) = ops::resolve_ws(workspace)?;
let method = Method::Required {
dev_deps: !no_dev_dependencies,
features: &features,
all_features,
uses_default_features: !no_default_features,
};
let resolve = ops::resolve_with_previous(
registry,
workspace,
method,
Some(&resolve),
None,
&[],
true,
true,
)?;
Ok((packages, resolve))
}
struct Node<'a> {
id: PackageId,
metadata: &'a ManifestMetadata,
}
struct Graph<'a> {
graph: petgraph::Graph<Node<'a>, Kind>,
nodes: HashMap<PackageId, NodeIndex>,
}
fn build_graph<'a>(
resolve: &'a Resolve,
packages: &'a PackageSet<'_>,
root: PackageId,
target: Option<&str>,
cfgs: Option<&[Cfg]>,
) -> CargoResult<Graph<'a>> {
let mut graph = Graph {
graph: petgraph::Graph::new(),
nodes: HashMap::new(),
};
let node = Node {
id: root.clone(),
metadata: packages.get_one(root)?.manifest().metadata(),
};
graph.nodes.insert(root.clone(), graph.graph.add_node(node));
let mut pending = vec![root];
while let Some(pkg_id) = pending.pop() {
let idx = graph.nodes[&pkg_id];
let pkg = packages.get_one(pkg_id)?;
for raw_dep_id in resolve.deps_not_replaced(pkg_id) {
let it = pkg
.dependencies()
.iter()
.filter(|d| d.matches_ignoring_source(raw_dep_id))
.filter(|d| {
d.platform()
.and_then(|p| target.map(|t| p.matches(t, cfgs)))
.unwrap_or(true)
});
let dep_id = match resolve.replacement(raw_dep_id) {
Some(id) => id,
None => raw_dep_id,
};
for dep in it {
let dep_idx = match graph.nodes.entry(dep_id) {
Entry::Occupied(e) => *e.get(),
Entry::Vacant(e) => {
pending.push(dep_id);
let node = Node {
id: dep_id,
metadata: packages.get_one(dep_id)?.manifest().metadata(),
};
*e.insert(graph.graph.add_node(node))
}
};
graph.graph.add_edge(idx, dep_idx, dep.kind());
}
}
}
Ok(graph)
}
fn print_tree<'a>(
package: &'a PackageId,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
prefix: Prefix,
all: bool,
) -> CargoResult<()> {
let mut visited_deps = HashSet::new();
let mut levels_continue = vec![];
let package = match graph.nodes.get(package) {
Some(package) => package,
None => bail!("package {} not found", package),
};
let node = &graph.graph[*package];
print_dependency(
node,
&graph,
format,
direction,
symbols,
&mut visited_deps,
&mut levels_continue,
prefix,
all,
);
Ok(())
}
fn print_dependency<'a>(
package: &Node<'a>,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
visited_deps: &mut HashSet<PackageId>,
levels_continue: &mut Vec<bool>,
prefix: Prefix,
all: bool,
) {
let new = all || visited_deps.insert(package.id);
let star = if new { "" } else { " (*)" };
match prefix {
Prefix::Depth => print!("{} ", levels_continue.len()),
Prefix::Indent => {
if let Some((&last_continues, rest)) = levels_continue.split_last() {
for &continues in rest {
let c = if continues { symbols.down } else { " " };
print!("{} ", c);
}
let c = if last_continues {
symbols.tee
} else {
symbols.ell
};
print!("{0}{1}{1} ", c, symbols.right);
}
}
Prefix::None => (),
}
println!("{}{}", format.display(&package.id, package.metadata), star);
if !new {
return;
}
let mut normal = vec![];
let mut build = vec![];
let mut development = vec![];
for edge in graph
.graph
.edges_directed(graph.nodes[&package.id], direction)
{
let dep = match direction {
EdgeDirection::Incoming => &graph.graph[edge.source()],
EdgeDirection::Outgoing => &graph.graph[edge.target()],
};
match *edge.weight() {
Kind::Normal => normal.push(dep),
Kind::Build => build.push(dep),
Kind::Development => development.push(dep),
}
}
print_dependency_kind(
Kind::Normal,
normal,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
print_dependency_kind(
Kind::Build,
build,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
print_dependency_kind(
Kind::Development,
development,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
}
fn print_dependency_kind<'a>(
kind: Kind,
mut deps: Vec<&Node<'a>>,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
visited_deps: &mut HashSet<PackageId>,
levels_continue: &mut Vec<bool>,
prefix: Prefix,
all: bool,
) {
if deps.is_empty() {
| Resolve uses Hash data types internally but we want consistent output ordering
deps.sort_by_key(|n| n.id);
let name = match kind {
Kind::Normal => None,
Kind::Build => Some("[build-dependencies]"),
Kind::Development => Some("[dev-dependencies]"),
};
if let Prefix::Indent = prefix {
if let Some(name) = name {
for &continues in &**levels_continue {
let c = if continues { symbols.down } else { " " };
print!("{} ", c);
}
println!("{}", name);
}
}
let mut it = deps.iter().peekable();
while let Some(dependency) = it.next() {
levels_continue.push(it.peek().is_some());
print_dependency(
dependency,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
levels_continue.pop();
}
}
| return;
}
// | conditional_block |
Liquid.go | /**
* Liquid.go - Go+SDL port V1 Robert Rasmay (2013)
* Liquid.go - Go+SDL port V2 Robert Ramsay (2021)
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
*
* JS version:
* Copyright Stephen Sinclair (radarsat1) (http://www.music.mcgill.ca/~sinclair)
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
* Downloaded from: http://www.music.mcgill.ca/~sinclair/blog
*
* Flash version:
* Copyright iunpin ( http://wonderfl.net/user/iunpin )
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
* Downloaded from: http://wonderfl.net/c/6eu4
*
*
* Original Java version:
* http://grantkot.com/MPM/Liquid.html
*/
package main
import (
"fmt"
"image/color"
"math/rand"
"time"
"github.com/veandco/go-sdl2/sdl"
)
/* Material
* Some of these parameters are hard to explain in one or two sentences
* (and a couple I made up) so I'll also link you to their corresponding
* Wikipedia pages. One object I like to compare fluids with is springs.
* Everybody is familiar with springs. If you pull on them they'll try to go
* back to their original shape. Some springs are stronger and some are weaker
* (stiffness and elasticity). Some springs will continue to bounce back and
* forth for a long time, while others will quickly slow down and stop (bulk
* viscosity and viscosity). If you pull hard enough the spring will break.
*
* Density - Target density for the particles. Higher density makes particles
* want to be closer together.
*
* Stiffness - How compressible the fluid is.
*
* Bulk viscosity - Kind of like damping. Another effect it will have is that
* it'll smooth out shockwaves.
*
* Elasticity - How fast the fluid will try to return to its original shape.
*
* Viscosity - Kind of like bulk viscosity only this operates on the shear
* components.
*
* Yield rate - How fast the fluid forgets its shape or melts away. Only
* affects things when elasticity is non-zero.
*
* Gravity - How much the particles will accelerate downwards.
*
* Smoothing - Smooths the velocity field. Will make things more stable. It is
* also useful to have a high smoothing value when simulating elastic
* materials.
*/
type Material struct {
m, rd, k, v, d, g float32
}
type Node struct {
m, d, gx, gy, u, v, ax, ay float32
active bool
}
// Particle Particles are value holders that manage the mathematical and physical
// attributes of an object
type Particle struct {
material *Material
x, y, u, v, cx, cy float32
px, py, gx, gy [3]float32
color color.Color
}
func MakeParticle(material *Material, x, y, u, v float32) *Particle {
return &Particle{
material: material,
x: x,
y: y,
u: u,
v: v,
color: color.RGBA{B: 255, A: 255}}
}
type MouseState struct {
pressed bool
x, y float32
}
type Liquid struct {
width float32
height float32
pressed bool
pressedprev bool
mouse MouseState
grid *Nodemap
particles []*Particle
}
type Nodemap struct {
width, height int
nodes []*Node
}
func NewNodemap(width int, height int) *Nodemap {
nodes := make([]*Node, (width+1)*(height+1))
for i := range nodes {
nodes[i] = new(Node)
}
return &Nodemap{
width: width,
height: height,
nodes: nodes,
}
}
func (nm *Nodemap) Get(x, y int) *Node {
return nm.nodes[nm.height*y+x]
}
type NodeFunctor func(*Node)
func (nm *Nodemap) Each(functor NodeFunctor) {
for i := range nm.nodes {
if nm.nodes[i].active {
functor(nm.nodes[i])
}
}
}
func (nm *Nodemap) Reset() {
emptyNode := &Node{}
for i := range nm.nodes {
if nm.nodes[i].active {
*(nm.nodes[i]) = *emptyNode
}
}
}
func MakeLiquid(width, height, rows, columns int) *Liquid {
water := &Material{1.0, 1.0, 1.0, 1.0, 1.0, 1.0}
particles := make([]*Particle, rows*columns)
for r := 0; r < rows; r++ {
for c := 0; c < columns; c++ {
particles[r*columns+c] = MakeParticle(water, float32(r), float32(c), 0.0, 0.0)
}
}
return &Liquid{
float32(width),
float32(height),
false,
false,
MouseState{false, 0.0, 0.0},
NewNodemap(width, height),
particles,
}
}
func _equation1(pressure, gravity *[3]float32, x float32) {
pressure[0] = 0.5*x*x + 1.5*x + 1.125
gravity[0] = x + 1.5
x += 1.0
pressure[1] = -x*x + 0.75
gravity[1] = -2.0 * x
x += 1.0
pressure[2] = 0.5*x*x - 1.5*x + 1.125
gravity[2] = x - 1.5
}
func (l *Liquid) _step1() {
for _, particle := range l.particles {
particle.cx = float32(int(particle.x - 0.5))
particle.cy = float32(int(particle.y - 0.5))
_equation1(&particle.px, &particle.gx, particle.cx-particle.x)
_equation1(&particle.py, &particle.gy, particle.cy-particle.y)
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(particle.cx)+i, int(particle.cy)+j)
if n.active != true {
n.active = true
}
phi := particle.px[i] * particle.py[j]
n.m += phi * particle.material.m
n.d += phi
n.gx += particle.gx[i] * particle.py[j]
n.gy += particle.px[i] * particle.gy[j]
}
}
}
}
func (l *Liquid) _density_summary(drag bool, mdx, mdy float32) {
var n01, n02, n11, n12 *Node
var cx, cy, cxi, cyi int
var pdx, pdy, C20, C02, C30, C03, csum1, csum2, C21, C31,
C12, C13, C11, density, pressure, fx, fy, u, u2, u3, v, v2, v3 float32
for _, p := range l.particles {
cx = int(p.x)
cy = int(p.y)
cxi = cx + 1
cyi = cy + 1
n01 = l.grid.Get(cx, cy)
n02 = l.grid.Get(cx, cyi)
n11 = l.grid.Get(cxi, cy)
n12 = l.grid.Get(cxi, cyi)
pdx = n11.d - n01.d
pdy = n02.d - n01.d
C20 = 3.0*pdx - n11.gx - 2.0*n01.gx
C02 = 3.0*pdy - n02.gy - 2.0*n01.gy
C30 = -2.0*pdx + n11.gx + n01.gx
C03 = -2.0*pdy + n02.gy + n01.gy
csum1 = n01.d + n01.gy + C02 + C03
csum2 = n01.d + n01.gx + C20 + C30
C21 = 3.0*n12.d - 2.0*n02.gx - n12.gx - 3.0*csum1 - C20
C31 = -2.0*n12.d + n02.gx + n12.gx + 2.0*csum1 - C30
C12 = 3.0*n12.d - 2.0*n11.gy - n12.gy - 3.0*csum2 - C02
C13 = -2.0*n12.d + n11.gy + n12.gy + 2.0*csum2 - C03
C11 = n02.gx - C13 - C12 - n01.gx
u = p.x - float32(cx)
u2 = u * u
u3 = u * u2
v = p.y - float32(cy)
v2 = v * v
v3 = v * v2
density = n01.d + n01.gx*u + n01.gy*v + C20*u2 + C02*v2 +
C30*u3 + C03*v3 + C21*u2*v + C31*u3*v + C12*u*
v2 + C13*u*v3 + C11*u*v
pressure = density - 1.0
if pressure > 2.0 {
pressure = 2.0
}
fx = 0.0
fy = 0.0
if p.x < 4.0 {
fx += p.material.m * (4.0 - p.x)
} else if p.x > l.width-5 {
fx += p.material.m * (l.width - 5 - p.x)
}
if p.y < 4.0 {
fy += p.material.m * (4.0 - p.y)
} else if p.y > l.height-5 {
fy += p.material.m * (l.height - 5 - p.y)
}
if drag {
vx := Abs(p.x - l.mouse.x)
vy := Abs(p.y - l.mouse.y)
if vx < 10.0 && 10.0 > vy {
weight := p.material.m * (1.0 - vx*0.10) *
(1.0 - vy*0.10)
fx += weight * (mdx - p.u)
fy += weight * (mdy - p.v)
}
}
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.ax += -(p.gx[i] * p.py[j] * pressure) + fx*phi
n.ay += -(p.px[i] * p.gy[j] * pressure) + fy*phi
}
}
}
}
func (l *Liquid) | () {
var mu, mv float32
for _, p := range l.particles {
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
p.u += phi * n.ax
p.v += phi * n.ay
}
}
mu = p.material.m * p.u
mv = p.material.m * p.v
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.u += phi * mu
n.v += phi * mv
}
}
}
}
func (l *Liquid) _step4() {
var gu, gv float32
for _, p := range l.particles {
gu = 0.0
gv = 0.0
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
gu += phi * n.u
gv += phi * n.v
}
}
p.x += gu
p.y += gv
p.u += 1.0 * (gu - p.u)
p.v += 1.0 * (gv - p.v)
// bounce off walls
if p.x < 1.0 {
p.x = 1.0 + rand.Float32()*0.01
p.u = 0.0
} else if p.x > l.width-2 {
p.x = l.width - 3 - rand.Float32()*0.01
p.u = 0.0
}
if p.y < 1.0 {
p.y = 1.0 + rand.Float32()*0.01
p.v = 0.0
} else if p.y > l.height-2 {
p.y = l.height - 3 - rand.Float32()*0.01
p.v = 0.0
}
}
}
func (l *Liquid) simulate(mouse MouseState) {
drag := false
mdx := float32(0.0)
mdy := float32(0.0)
if mouse.pressed && l.mouse.pressed {
drag = true
mdx = mouse.x - l.mouse.x
mdy = mouse.y - l.mouse.y
}
l.mouse = mouse
// Set all nodes back to empty state
l.grid.Reset()
l._step1()
l._density_summary(drag, mdx, mdy)
// Apply gravity ?
l.grid.Each(func(n *Node) {
n.ax /= n.m
n.ay /= n.m
n.ay += 0.03
})
l._step3()
// Apply pressure ?
l.grid.Each(func(n *Node) {
n.u /= n.m
n.v /= n.m
})
l._step4()
}
func Abs(val float32) float32 {
if val < 0 {
return val * -1.0
}
return val
}
func renderloop(stats *Stats, renderer *sdl.Renderer, l *Liquid, mouse *MouseState, done chan bool) {
for stats.running {
stats.frames++
// Calculate next simulation state
stats.t = time.Now()
l.simulate(*mouse)
stats.simulateSeconds += time.Since(stats.t).Nanoseconds()
// draw state
stats.t = time.Now()
_ = renderer.SetDrawColor(0, 0, 0, 255)
_ = renderer.Clear()
_ = renderer.SetDrawColor(0, 0, 255, 255)
for _, p := range l.particles {
_ = renderer.DrawLineF(4*p.x, 4*p.y, 4*(p.x-p.u), 4*(p.y-p.v))
}
renderer.Present()
stats.drawSeconds += time.Since(stats.t).Nanoseconds()
}
done <- true
}
type Stats struct {
t time.Time
frames int
simulateSeconds int64
drawSeconds int64
running bool
}
// SdlMain /**
func SdlMain(l *Liquid) {
if err := sdl.Init(sdl.INIT_EVERYTHING); err != nil {
panic(err)
}
defer sdl.Quit()
var (
window *sdl.Window
renderer *sdl.Renderer
err error
stats = Stats{running: true}
mouse = &MouseState{}
)
if window, err = sdl.CreateWindow(
"test", sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED,
int32(l.width)*4, int32(l.height)*4, sdl.WINDOW_SHOWN|sdl.WINDOW_ALLOW_HIGHDPI); err != nil {
panic(err)
}
defer window.Destroy()
if _, err = window.GetSurface(); err != nil {
panic(err)
}
if renderer, err = window.GetRenderer(); err != nil {
panic(err)
}
//mch := make(chan MouseState)
start := time.Now()
renderDone := make(chan bool)
go renderloop(&stats, renderer, l, mouse, renderDone)
for stats.running {
for event := sdl.PollEvent(); event != nil; event = sdl.PollEvent() {
switch e := event.(type) {
case *sdl.QuitEvent:
stats.running = false
break
case *sdl.MouseButtonEvent:
if e.State == sdl.PRESSED {
fmt.Println("Mouse pressed")
mouse.pressed = true
}
if e.State == sdl.RELEASED {
fmt.Println("Mouse released")
mouse.pressed = false
}
case *sdl.MouseMotionEvent:
fmt.Printf("Mouse motion = (%d, %d)\n", e.X/4, e.Y/4)
mouse.x = float32(e.X / 4)
mouse.y = float32(e.Y / 4)
}
// eventtime += time.Since(t1).Nanoseconds()
}
}
fmt.Printf("%v frames\n", stats.frames)
fmt.Printf("%v frames/s\n", float64(stats.frames)/time.Since(start).Seconds())
// fmt.Printf("%v time polling events\n", time.Duration(eventtime))
fmt.Printf("%v time simulating\n", time.Duration(stats.simulateSeconds))
fmt.Printf("%v time drawing\n", time.Duration(stats.drawSeconds))
// Wait for last render loop to finish before deferred functions above get called.
<-renderDone
}
/*
import argparse
PARSER = argparse.ArgumentParser(
prog='Liquid.py',
description='Material Point Method liquid simulation',
)
PARSER.add_argument('--width',
help='The width of the simulation area', default=100)
PARSER.add_argument('--height',
help='The height of the simulation area', default=100)
PARSER.add_argument('--columns',
help='The number of particle columns', default=50)
PARSER.add_argument('--rows',
help='The number of particle rows', default=80)
PARSER.add_argument('--n',
help='The number of iterations to run the simulation.',
default=200)
PARSER.add_argument('-i', '--interactive',
help='Run the simulation interactively with pygame or pyglet',
choices=['pygame', 'pyglet'])
ARGS = PARSER.parse_args()
*/
func main() {
liquid := MakeLiquid(100, 100, 100, 100)
SdlMain(liquid)
}
| _step3 | identifier_name |
Liquid.go | /**
* Liquid.go - Go+SDL port V1 Robert Rasmay (2013)
* Liquid.go - Go+SDL port V2 Robert Ramsay (2021)
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
*
* JS version:
* Copyright Stephen Sinclair (radarsat1) (http://www.music.mcgill.ca/~sinclair)
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
* Downloaded from: http://www.music.mcgill.ca/~sinclair/blog
*
* Flash version:
* Copyright iunpin ( http://wonderfl.net/user/iunpin )
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
* Downloaded from: http://wonderfl.net/c/6eu4
*
*
* Original Java version:
* http://grantkot.com/MPM/Liquid.html
*/
package main
import (
"fmt"
"image/color"
"math/rand"
"time"
"github.com/veandco/go-sdl2/sdl"
)
/* Material
* Some of these parameters are hard to explain in one or two sentences
* (and a couple I made up) so I'll also link you to their corresponding
* Wikipedia pages. One object I like to compare fluids with is springs.
* Everybody is familiar with springs. If you pull on them they'll try to go
* back to their original shape. Some springs are stronger and some are weaker
* (stiffness and elasticity). Some springs will continue to bounce back and
* forth for a long time, while others will quickly slow down and stop (bulk
* viscosity and viscosity). If you pull hard enough the spring will break.
*
* Density - Target density for the particles. Higher density makes particles
* want to be closer together.
*
* Stiffness - How compressible the fluid is.
*
* Bulk viscosity - Kind of like damping. Another effect it will have is that
* it'll smooth out shockwaves.
*
* Elasticity - How fast the fluid will try to return to its original shape.
*
* Viscosity - Kind of like bulk viscosity only this operates on the shear
* components.
*
* Yield rate - How fast the fluid forgets its shape or melts away. Only
* affects things when elasticity is non-zero.
*
* Gravity - How much the particles will accelerate downwards.
*
* Smoothing - Smooths the velocity field. Will make things more stable. It is
* also useful to have a high smoothing value when simulating elastic
* materials.
*/
type Material struct {
m, rd, k, v, d, g float32
}
type Node struct {
m, d, gx, gy, u, v, ax, ay float32
active bool
}
// Particle Particles are value holders that manage the mathematical and physical
// attributes of an object
type Particle struct {
material *Material
x, y, u, v, cx, cy float32
px, py, gx, gy [3]float32
color color.Color
}
func MakeParticle(material *Material, x, y, u, v float32) *Particle {
return &Particle{
material: material,
x: x,
y: y,
u: u,
v: v,
color: color.RGBA{B: 255, A: 255}}
}
type MouseState struct {
pressed bool
x, y float32
}
type Liquid struct {
width float32
height float32
pressed bool
pressedprev bool
mouse MouseState
grid *Nodemap
particles []*Particle
}
type Nodemap struct {
width, height int
nodes []*Node
}
func NewNodemap(width int, height int) *Nodemap {
nodes := make([]*Node, (width+1)*(height+1))
for i := range nodes {
nodes[i] = new(Node)
}
return &Nodemap{
width: width,
height: height,
nodes: nodes,
}
}
func (nm *Nodemap) Get(x, y int) *Node {
return nm.nodes[nm.height*y+x]
}
type NodeFunctor func(*Node)
func (nm *Nodemap) Each(functor NodeFunctor) {
for i := range nm.nodes {
if nm.nodes[i].active {
functor(nm.nodes[i])
}
}
}
func (nm *Nodemap) Reset() {
emptyNode := &Node{}
for i := range nm.nodes {
if nm.nodes[i].active {
*(nm.nodes[i]) = *emptyNode
}
}
}
func MakeLiquid(width, height, rows, columns int) *Liquid {
water := &Material{1.0, 1.0, 1.0, 1.0, 1.0, 1.0}
particles := make([]*Particle, rows*columns)
for r := 0; r < rows; r++ {
for c := 0; c < columns; c++ {
particles[r*columns+c] = MakeParticle(water, float32(r), float32(c), 0.0, 0.0)
}
}
return &Liquid{
float32(width),
float32(height),
false,
false,
MouseState{false, 0.0, 0.0},
NewNodemap(width, height),
particles,
}
}
func _equation1(pressure, gravity *[3]float32, x float32) {
pressure[0] = 0.5*x*x + 1.5*x + 1.125
gravity[0] = x + 1.5
x += 1.0
pressure[1] = -x*x + 0.75
gravity[1] = -2.0 * x
x += 1.0
pressure[2] = 0.5*x*x - 1.5*x + 1.125
gravity[2] = x - 1.5
}
func (l *Liquid) _step1() {
for _, particle := range l.particles {
particle.cx = float32(int(particle.x - 0.5))
particle.cy = float32(int(particle.y - 0.5))
_equation1(&particle.px, &particle.gx, particle.cx-particle.x)
_equation1(&particle.py, &particle.gy, particle.cy-particle.y)
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(particle.cx)+i, int(particle.cy)+j)
if n.active != true {
n.active = true
}
phi := particle.px[i] * particle.py[j]
n.m += phi * particle.material.m
n.d += phi | n.gx += particle.gx[i] * particle.py[j]
n.gy += particle.px[i] * particle.gy[j]
}
}
}
}
func (l *Liquid) _density_summary(drag bool, mdx, mdy float32) {
var n01, n02, n11, n12 *Node
var cx, cy, cxi, cyi int
var pdx, pdy, C20, C02, C30, C03, csum1, csum2, C21, C31,
C12, C13, C11, density, pressure, fx, fy, u, u2, u3, v, v2, v3 float32
for _, p := range l.particles {
cx = int(p.x)
cy = int(p.y)
cxi = cx + 1
cyi = cy + 1
n01 = l.grid.Get(cx, cy)
n02 = l.grid.Get(cx, cyi)
n11 = l.grid.Get(cxi, cy)
n12 = l.grid.Get(cxi, cyi)
pdx = n11.d - n01.d
pdy = n02.d - n01.d
C20 = 3.0*pdx - n11.gx - 2.0*n01.gx
C02 = 3.0*pdy - n02.gy - 2.0*n01.gy
C30 = -2.0*pdx + n11.gx + n01.gx
C03 = -2.0*pdy + n02.gy + n01.gy
csum1 = n01.d + n01.gy + C02 + C03
csum2 = n01.d + n01.gx + C20 + C30
C21 = 3.0*n12.d - 2.0*n02.gx - n12.gx - 3.0*csum1 - C20
C31 = -2.0*n12.d + n02.gx + n12.gx + 2.0*csum1 - C30
C12 = 3.0*n12.d - 2.0*n11.gy - n12.gy - 3.0*csum2 - C02
C13 = -2.0*n12.d + n11.gy + n12.gy + 2.0*csum2 - C03
C11 = n02.gx - C13 - C12 - n01.gx
u = p.x - float32(cx)
u2 = u * u
u3 = u * u2
v = p.y - float32(cy)
v2 = v * v
v3 = v * v2
density = n01.d + n01.gx*u + n01.gy*v + C20*u2 + C02*v2 +
C30*u3 + C03*v3 + C21*u2*v + C31*u3*v + C12*u*
v2 + C13*u*v3 + C11*u*v
pressure = density - 1.0
if pressure > 2.0 {
pressure = 2.0
}
fx = 0.0
fy = 0.0
if p.x < 4.0 {
fx += p.material.m * (4.0 - p.x)
} else if p.x > l.width-5 {
fx += p.material.m * (l.width - 5 - p.x)
}
if p.y < 4.0 {
fy += p.material.m * (4.0 - p.y)
} else if p.y > l.height-5 {
fy += p.material.m * (l.height - 5 - p.y)
}
if drag {
vx := Abs(p.x - l.mouse.x)
vy := Abs(p.y - l.mouse.y)
if vx < 10.0 && 10.0 > vy {
weight := p.material.m * (1.0 - vx*0.10) *
(1.0 - vy*0.10)
fx += weight * (mdx - p.u)
fy += weight * (mdy - p.v)
}
}
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.ax += -(p.gx[i] * p.py[j] * pressure) + fx*phi
n.ay += -(p.px[i] * p.gy[j] * pressure) + fy*phi
}
}
}
}
func (l *Liquid) _step3() {
var mu, mv float32
for _, p := range l.particles {
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
p.u += phi * n.ax
p.v += phi * n.ay
}
}
mu = p.material.m * p.u
mv = p.material.m * p.v
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.u += phi * mu
n.v += phi * mv
}
}
}
}
func (l *Liquid) _step4() {
var gu, gv float32
for _, p := range l.particles {
gu = 0.0
gv = 0.0
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
gu += phi * n.u
gv += phi * n.v
}
}
p.x += gu
p.y += gv
p.u += 1.0 * (gu - p.u)
p.v += 1.0 * (gv - p.v)
// bounce off walls
if p.x < 1.0 {
p.x = 1.0 + rand.Float32()*0.01
p.u = 0.0
} else if p.x > l.width-2 {
p.x = l.width - 3 - rand.Float32()*0.01
p.u = 0.0
}
if p.y < 1.0 {
p.y = 1.0 + rand.Float32()*0.01
p.v = 0.0
} else if p.y > l.height-2 {
p.y = l.height - 3 - rand.Float32()*0.01
p.v = 0.0
}
}
}
func (l *Liquid) simulate(mouse MouseState) {
drag := false
mdx := float32(0.0)
mdy := float32(0.0)
if mouse.pressed && l.mouse.pressed {
drag = true
mdx = mouse.x - l.mouse.x
mdy = mouse.y - l.mouse.y
}
l.mouse = mouse
// Set all nodes back to empty state
l.grid.Reset()
l._step1()
l._density_summary(drag, mdx, mdy)
// Apply gravity ?
l.grid.Each(func(n *Node) {
n.ax /= n.m
n.ay /= n.m
n.ay += 0.03
})
l._step3()
// Apply pressure ?
l.grid.Each(func(n *Node) {
n.u /= n.m
n.v /= n.m
})
l._step4()
}
func Abs(val float32) float32 {
if val < 0 {
return val * -1.0
}
return val
}
func renderloop(stats *Stats, renderer *sdl.Renderer, l *Liquid, mouse *MouseState, done chan bool) {
for stats.running {
stats.frames++
// Calculate next simulation state
stats.t = time.Now()
l.simulate(*mouse)
stats.simulateSeconds += time.Since(stats.t).Nanoseconds()
// draw state
stats.t = time.Now()
_ = renderer.SetDrawColor(0, 0, 0, 255)
_ = renderer.Clear()
_ = renderer.SetDrawColor(0, 0, 255, 255)
for _, p := range l.particles {
_ = renderer.DrawLineF(4*p.x, 4*p.y, 4*(p.x-p.u), 4*(p.y-p.v))
}
renderer.Present()
stats.drawSeconds += time.Since(stats.t).Nanoseconds()
}
done <- true
}
type Stats struct {
t time.Time
frames int
simulateSeconds int64
drawSeconds int64
running bool
}
// SdlMain /**
func SdlMain(l *Liquid) {
if err := sdl.Init(sdl.INIT_EVERYTHING); err != nil {
panic(err)
}
defer sdl.Quit()
var (
window *sdl.Window
renderer *sdl.Renderer
err error
stats = Stats{running: true}
mouse = &MouseState{}
)
if window, err = sdl.CreateWindow(
"test", sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED,
int32(l.width)*4, int32(l.height)*4, sdl.WINDOW_SHOWN|sdl.WINDOW_ALLOW_HIGHDPI); err != nil {
panic(err)
}
defer window.Destroy()
if _, err = window.GetSurface(); err != nil {
panic(err)
}
if renderer, err = window.GetRenderer(); err != nil {
panic(err)
}
//mch := make(chan MouseState)
start := time.Now()
renderDone := make(chan bool)
go renderloop(&stats, renderer, l, mouse, renderDone)
for stats.running {
for event := sdl.PollEvent(); event != nil; event = sdl.PollEvent() {
switch e := event.(type) {
case *sdl.QuitEvent:
stats.running = false
break
case *sdl.MouseButtonEvent:
if e.State == sdl.PRESSED {
fmt.Println("Mouse pressed")
mouse.pressed = true
}
if e.State == sdl.RELEASED {
fmt.Println("Mouse released")
mouse.pressed = false
}
case *sdl.MouseMotionEvent:
fmt.Printf("Mouse motion = (%d, %d)\n", e.X/4, e.Y/4)
mouse.x = float32(e.X / 4)
mouse.y = float32(e.Y / 4)
}
// eventtime += time.Since(t1).Nanoseconds()
}
}
fmt.Printf("%v frames\n", stats.frames)
fmt.Printf("%v frames/s\n", float64(stats.frames)/time.Since(start).Seconds())
// fmt.Printf("%v time polling events\n", time.Duration(eventtime))
fmt.Printf("%v time simulating\n", time.Duration(stats.simulateSeconds))
fmt.Printf("%v time drawing\n", time.Duration(stats.drawSeconds))
// Wait for last render loop to finish before deferred functions above get called.
<-renderDone
}
/*
import argparse
PARSER = argparse.ArgumentParser(
prog='Liquid.py',
description='Material Point Method liquid simulation',
)
PARSER.add_argument('--width',
help='The width of the simulation area', default=100)
PARSER.add_argument('--height',
help='The height of the simulation area', default=100)
PARSER.add_argument('--columns',
help='The number of particle columns', default=50)
PARSER.add_argument('--rows',
help='The number of particle rows', default=80)
PARSER.add_argument('--n',
help='The number of iterations to run the simulation.',
default=200)
PARSER.add_argument('-i', '--interactive',
help='Run the simulation interactively with pygame or pyglet',
choices=['pygame', 'pyglet'])
ARGS = PARSER.parse_args()
*/
func main() {
liquid := MakeLiquid(100, 100, 100, 100)
SdlMain(liquid)
} | random_line_split | |
Liquid.go | /**
* Liquid.go - Go+SDL port V1 Robert Rasmay (2013)
* Liquid.go - Go+SDL port V2 Robert Ramsay (2021)
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
*
* JS version:
* Copyright Stephen Sinclair (radarsat1) (http://www.music.mcgill.ca/~sinclair)
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
* Downloaded from: http://www.music.mcgill.ca/~sinclair/blog
*
* Flash version:
* Copyright iunpin ( http://wonderfl.net/user/iunpin )
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
* Downloaded from: http://wonderfl.net/c/6eu4
*
*
* Original Java version:
* http://grantkot.com/MPM/Liquid.html
*/
package main
import (
"fmt"
"image/color"
"math/rand"
"time"
"github.com/veandco/go-sdl2/sdl"
)
/* Material
* Some of these parameters are hard to explain in one or two sentences
* (and a couple I made up) so I'll also link you to their corresponding
* Wikipedia pages. One object I like to compare fluids with is springs.
* Everybody is familiar with springs. If you pull on them they'll try to go
* back to their original shape. Some springs are stronger and some are weaker
* (stiffness and elasticity). Some springs will continue to bounce back and
* forth for a long time, while others will quickly slow down and stop (bulk
* viscosity and viscosity). If you pull hard enough the spring will break.
*
* Density - Target density for the particles. Higher density makes particles
* want to be closer together.
*
* Stiffness - How compressible the fluid is.
*
* Bulk viscosity - Kind of like damping. Another effect it will have is that
* it'll smooth out shockwaves.
*
* Elasticity - How fast the fluid will try to return to its original shape.
*
* Viscosity - Kind of like bulk viscosity only this operates on the shear
* components.
*
* Yield rate - How fast the fluid forgets its shape or melts away. Only
* affects things when elasticity is non-zero.
*
* Gravity - How much the particles will accelerate downwards.
*
* Smoothing - Smooths the velocity field. Will make things more stable. It is
* also useful to have a high smoothing value when simulating elastic
* materials.
*/
type Material struct {
m, rd, k, v, d, g float32
}
type Node struct {
m, d, gx, gy, u, v, ax, ay float32
active bool
}
// Particle Particles are value holders that manage the mathematical and physical
// attributes of an object
type Particle struct {
material *Material
x, y, u, v, cx, cy float32
px, py, gx, gy [3]float32
color color.Color
}
func MakeParticle(material *Material, x, y, u, v float32) *Particle {
return &Particle{
material: material,
x: x,
y: y,
u: u,
v: v,
color: color.RGBA{B: 255, A: 255}}
}
type MouseState struct {
pressed bool
x, y float32
}
type Liquid struct {
width float32
height float32
pressed bool
pressedprev bool
mouse MouseState
grid *Nodemap
particles []*Particle
}
type Nodemap struct {
width, height int
nodes []*Node
}
func NewNodemap(width int, height int) *Nodemap {
nodes := make([]*Node, (width+1)*(height+1))
for i := range nodes {
nodes[i] = new(Node)
}
return &Nodemap{
width: width,
height: height,
nodes: nodes,
}
}
func (nm *Nodemap) Get(x, y int) *Node {
return nm.nodes[nm.height*y+x]
}
type NodeFunctor func(*Node)
func (nm *Nodemap) Each(functor NodeFunctor) {
for i := range nm.nodes {
if nm.nodes[i].active {
functor(nm.nodes[i])
}
}
}
func (nm *Nodemap) Reset() {
emptyNode := &Node{}
for i := range nm.nodes {
if nm.nodes[i].active {
*(nm.nodes[i]) = *emptyNode
}
}
}
func MakeLiquid(width, height, rows, columns int) *Liquid {
water := &Material{1.0, 1.0, 1.0, 1.0, 1.0, 1.0}
particles := make([]*Particle, rows*columns)
for r := 0; r < rows; r++ {
for c := 0; c < columns; c++ {
particles[r*columns+c] = MakeParticle(water, float32(r), float32(c), 0.0, 0.0)
}
}
return &Liquid{
float32(width),
float32(height),
false,
false,
MouseState{false, 0.0, 0.0},
NewNodemap(width, height),
particles,
}
}
func _equation1(pressure, gravity *[3]float32, x float32) {
pressure[0] = 0.5*x*x + 1.5*x + 1.125
gravity[0] = x + 1.5
x += 1.0
pressure[1] = -x*x + 0.75
gravity[1] = -2.0 * x
x += 1.0
pressure[2] = 0.5*x*x - 1.5*x + 1.125
gravity[2] = x - 1.5
}
func (l *Liquid) _step1() {
for _, particle := range l.particles {
particle.cx = float32(int(particle.x - 0.5))
particle.cy = float32(int(particle.y - 0.5))
_equation1(&particle.px, &particle.gx, particle.cx-particle.x)
_equation1(&particle.py, &particle.gy, particle.cy-particle.y)
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(particle.cx)+i, int(particle.cy)+j)
if n.active != true {
n.active = true
}
phi := particle.px[i] * particle.py[j]
n.m += phi * particle.material.m
n.d += phi
n.gx += particle.gx[i] * particle.py[j]
n.gy += particle.px[i] * particle.gy[j]
}
}
}
}
func (l *Liquid) _density_summary(drag bool, mdx, mdy float32) {
var n01, n02, n11, n12 *Node
var cx, cy, cxi, cyi int
var pdx, pdy, C20, C02, C30, C03, csum1, csum2, C21, C31,
C12, C13, C11, density, pressure, fx, fy, u, u2, u3, v, v2, v3 float32
for _, p := range l.particles {
cx = int(p.x)
cy = int(p.y)
cxi = cx + 1
cyi = cy + 1
n01 = l.grid.Get(cx, cy)
n02 = l.grid.Get(cx, cyi)
n11 = l.grid.Get(cxi, cy)
n12 = l.grid.Get(cxi, cyi)
pdx = n11.d - n01.d
pdy = n02.d - n01.d
C20 = 3.0*pdx - n11.gx - 2.0*n01.gx
C02 = 3.0*pdy - n02.gy - 2.0*n01.gy
C30 = -2.0*pdx + n11.gx + n01.gx
C03 = -2.0*pdy + n02.gy + n01.gy
csum1 = n01.d + n01.gy + C02 + C03
csum2 = n01.d + n01.gx + C20 + C30
C21 = 3.0*n12.d - 2.0*n02.gx - n12.gx - 3.0*csum1 - C20
C31 = -2.0*n12.d + n02.gx + n12.gx + 2.0*csum1 - C30
C12 = 3.0*n12.d - 2.0*n11.gy - n12.gy - 3.0*csum2 - C02
C13 = -2.0*n12.d + n11.gy + n12.gy + 2.0*csum2 - C03
C11 = n02.gx - C13 - C12 - n01.gx
u = p.x - float32(cx)
u2 = u * u
u3 = u * u2
v = p.y - float32(cy)
v2 = v * v
v3 = v * v2
density = n01.d + n01.gx*u + n01.gy*v + C20*u2 + C02*v2 +
C30*u3 + C03*v3 + C21*u2*v + C31*u3*v + C12*u*
v2 + C13*u*v3 + C11*u*v
pressure = density - 1.0
if pressure > 2.0 {
pressure = 2.0
}
fx = 0.0
fy = 0.0
if p.x < 4.0 {
fx += p.material.m * (4.0 - p.x)
} else if p.x > l.width-5 {
fx += p.material.m * (l.width - 5 - p.x)
}
if p.y < 4.0 {
fy += p.material.m * (4.0 - p.y)
} else if p.y > l.height-5 {
fy += p.material.m * (l.height - 5 - p.y)
}
if drag |
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.ax += -(p.gx[i] * p.py[j] * pressure) + fx*phi
n.ay += -(p.px[i] * p.gy[j] * pressure) + fy*phi
}
}
}
}
func (l *Liquid) _step3() {
var mu, mv float32
for _, p := range l.particles {
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
p.u += phi * n.ax
p.v += phi * n.ay
}
}
mu = p.material.m * p.u
mv = p.material.m * p.v
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.u += phi * mu
n.v += phi * mv
}
}
}
}
func (l *Liquid) _step4() {
var gu, gv float32
for _, p := range l.particles {
gu = 0.0
gv = 0.0
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
gu += phi * n.u
gv += phi * n.v
}
}
p.x += gu
p.y += gv
p.u += 1.0 * (gu - p.u)
p.v += 1.0 * (gv - p.v)
// bounce off walls
if p.x < 1.0 {
p.x = 1.0 + rand.Float32()*0.01
p.u = 0.0
} else if p.x > l.width-2 {
p.x = l.width - 3 - rand.Float32()*0.01
p.u = 0.0
}
if p.y < 1.0 {
p.y = 1.0 + rand.Float32()*0.01
p.v = 0.0
} else if p.y > l.height-2 {
p.y = l.height - 3 - rand.Float32()*0.01
p.v = 0.0
}
}
}
func (l *Liquid) simulate(mouse MouseState) {
drag := false
mdx := float32(0.0)
mdy := float32(0.0)
if mouse.pressed && l.mouse.pressed {
drag = true
mdx = mouse.x - l.mouse.x
mdy = mouse.y - l.mouse.y
}
l.mouse = mouse
// Set all nodes back to empty state
l.grid.Reset()
l._step1()
l._density_summary(drag, mdx, mdy)
// Apply gravity ?
l.grid.Each(func(n *Node) {
n.ax /= n.m
n.ay /= n.m
n.ay += 0.03
})
l._step3()
// Apply pressure ?
l.grid.Each(func(n *Node) {
n.u /= n.m
n.v /= n.m
})
l._step4()
}
func Abs(val float32) float32 {
if val < 0 {
return val * -1.0
}
return val
}
func renderloop(stats *Stats, renderer *sdl.Renderer, l *Liquid, mouse *MouseState, done chan bool) {
for stats.running {
stats.frames++
// Calculate next simulation state
stats.t = time.Now()
l.simulate(*mouse)
stats.simulateSeconds += time.Since(stats.t).Nanoseconds()
// draw state
stats.t = time.Now()
_ = renderer.SetDrawColor(0, 0, 0, 255)
_ = renderer.Clear()
_ = renderer.SetDrawColor(0, 0, 255, 255)
for _, p := range l.particles {
_ = renderer.DrawLineF(4*p.x, 4*p.y, 4*(p.x-p.u), 4*(p.y-p.v))
}
renderer.Present()
stats.drawSeconds += time.Since(stats.t).Nanoseconds()
}
done <- true
}
type Stats struct {
t time.Time
frames int
simulateSeconds int64
drawSeconds int64
running bool
}
// SdlMain /**
func SdlMain(l *Liquid) {
if err := sdl.Init(sdl.INIT_EVERYTHING); err != nil {
panic(err)
}
defer sdl.Quit()
var (
window *sdl.Window
renderer *sdl.Renderer
err error
stats = Stats{running: true}
mouse = &MouseState{}
)
if window, err = sdl.CreateWindow(
"test", sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED,
int32(l.width)*4, int32(l.height)*4, sdl.WINDOW_SHOWN|sdl.WINDOW_ALLOW_HIGHDPI); err != nil {
panic(err)
}
defer window.Destroy()
if _, err = window.GetSurface(); err != nil {
panic(err)
}
if renderer, err = window.GetRenderer(); err != nil {
panic(err)
}
//mch := make(chan MouseState)
start := time.Now()
renderDone := make(chan bool)
go renderloop(&stats, renderer, l, mouse, renderDone)
for stats.running {
for event := sdl.PollEvent(); event != nil; event = sdl.PollEvent() {
switch e := event.(type) {
case *sdl.QuitEvent:
stats.running = false
break
case *sdl.MouseButtonEvent:
if e.State == sdl.PRESSED {
fmt.Println("Mouse pressed")
mouse.pressed = true
}
if e.State == sdl.RELEASED {
fmt.Println("Mouse released")
mouse.pressed = false
}
case *sdl.MouseMotionEvent:
fmt.Printf("Mouse motion = (%d, %d)\n", e.X/4, e.Y/4)
mouse.x = float32(e.X / 4)
mouse.y = float32(e.Y / 4)
}
// eventtime += time.Since(t1).Nanoseconds()
}
}
fmt.Printf("%v frames\n", stats.frames)
fmt.Printf("%v frames/s\n", float64(stats.frames)/time.Since(start).Seconds())
// fmt.Printf("%v time polling events\n", time.Duration(eventtime))
fmt.Printf("%v time simulating\n", time.Duration(stats.simulateSeconds))
fmt.Printf("%v time drawing\n", time.Duration(stats.drawSeconds))
// Wait for last render loop to finish before deferred functions above get called.
<-renderDone
}
/*
import argparse
PARSER = argparse.ArgumentParser(
prog='Liquid.py',
description='Material Point Method liquid simulation',
)
PARSER.add_argument('--width',
help='The width of the simulation area', default=100)
PARSER.add_argument('--height',
help='The height of the simulation area', default=100)
PARSER.add_argument('--columns',
help='The number of particle columns', default=50)
PARSER.add_argument('--rows',
help='The number of particle rows', default=80)
PARSER.add_argument('--n',
help='The number of iterations to run the simulation.',
default=200)
PARSER.add_argument('-i', '--interactive',
help='Run the simulation interactively with pygame or pyglet',
choices=['pygame', 'pyglet'])
ARGS = PARSER.parse_args()
*/
func main() {
liquid := MakeLiquid(100, 100, 100, 100)
SdlMain(liquid)
}
| {
vx := Abs(p.x - l.mouse.x)
vy := Abs(p.y - l.mouse.y)
if vx < 10.0 && 10.0 > vy {
weight := p.material.m * (1.0 - vx*0.10) *
(1.0 - vy*0.10)
fx += weight * (mdx - p.u)
fy += weight * (mdy - p.v)
}
} | conditional_block |
Liquid.go | /**
* Liquid.go - Go+SDL port V1 Robert Rasmay (2013)
* Liquid.go - Go+SDL port V2 Robert Ramsay (2021)
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
*
* JS version:
* Copyright Stephen Sinclair (radarsat1) (http://www.music.mcgill.ca/~sinclair)
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
* Downloaded from: http://www.music.mcgill.ca/~sinclair/blog
*
* Flash version:
* Copyright iunpin ( http://wonderfl.net/user/iunpin )
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
* Downloaded from: http://wonderfl.net/c/6eu4
*
*
* Original Java version:
* http://grantkot.com/MPM/Liquid.html
*/
package main
import (
"fmt"
"image/color"
"math/rand"
"time"
"github.com/veandco/go-sdl2/sdl"
)
/* Material
* Some of these parameters are hard to explain in one or two sentences
* (and a couple I made up) so I'll also link you to their corresponding
* Wikipedia pages. One object I like to compare fluids with is springs.
* Everybody is familiar with springs. If you pull on them they'll try to go
* back to their original shape. Some springs are stronger and some are weaker
* (stiffness and elasticity). Some springs will continue to bounce back and
* forth for a long time, while others will quickly slow down and stop (bulk
* viscosity and viscosity). If you pull hard enough the spring will break.
*
* Density - Target density for the particles. Higher density makes particles
* want to be closer together.
*
* Stiffness - How compressible the fluid is.
*
* Bulk viscosity - Kind of like damping. Another effect it will have is that
* it'll smooth out shockwaves.
*
* Elasticity - How fast the fluid will try to return to its original shape.
*
* Viscosity - Kind of like bulk viscosity only this operates on the shear
* components.
*
* Yield rate - How fast the fluid forgets its shape or melts away. Only
* affects things when elasticity is non-zero.
*
* Gravity - How much the particles will accelerate downwards.
*
* Smoothing - Smooths the velocity field. Will make things more stable. It is
* also useful to have a high smoothing value when simulating elastic
* materials.
*/
type Material struct {
m, rd, k, v, d, g float32
}
type Node struct {
m, d, gx, gy, u, v, ax, ay float32
active bool
}
// Particle Particles are value holders that manage the mathematical and physical
// attributes of an object
type Particle struct {
material *Material
x, y, u, v, cx, cy float32
px, py, gx, gy [3]float32
color color.Color
}
func MakeParticle(material *Material, x, y, u, v float32) *Particle {
return &Particle{
material: material,
x: x,
y: y,
u: u,
v: v,
color: color.RGBA{B: 255, A: 255}}
}
type MouseState struct {
pressed bool
x, y float32
}
type Liquid struct {
width float32
height float32
pressed bool
pressedprev bool
mouse MouseState
grid *Nodemap
particles []*Particle
}
type Nodemap struct {
width, height int
nodes []*Node
}
func NewNodemap(width int, height int) *Nodemap {
nodes := make([]*Node, (width+1)*(height+1))
for i := range nodes {
nodes[i] = new(Node)
}
return &Nodemap{
width: width,
height: height,
nodes: nodes,
}
}
func (nm *Nodemap) Get(x, y int) *Node {
return nm.nodes[nm.height*y+x]
}
type NodeFunctor func(*Node)
func (nm *Nodemap) Each(functor NodeFunctor) {
for i := range nm.nodes {
if nm.nodes[i].active {
functor(nm.nodes[i])
}
}
}
func (nm *Nodemap) Reset() {
emptyNode := &Node{}
for i := range nm.nodes {
if nm.nodes[i].active {
*(nm.nodes[i]) = *emptyNode
}
}
}
func MakeLiquid(width, height, rows, columns int) *Liquid {
water := &Material{1.0, 1.0, 1.0, 1.0, 1.0, 1.0}
particles := make([]*Particle, rows*columns)
for r := 0; r < rows; r++ {
for c := 0; c < columns; c++ {
particles[r*columns+c] = MakeParticle(water, float32(r), float32(c), 0.0, 0.0)
}
}
return &Liquid{
float32(width),
float32(height),
false,
false,
MouseState{false, 0.0, 0.0},
NewNodemap(width, height),
particles,
}
}
func _equation1(pressure, gravity *[3]float32, x float32) |
func (l *Liquid) _step1() {
for _, particle := range l.particles {
particle.cx = float32(int(particle.x - 0.5))
particle.cy = float32(int(particle.y - 0.5))
_equation1(&particle.px, &particle.gx, particle.cx-particle.x)
_equation1(&particle.py, &particle.gy, particle.cy-particle.y)
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(particle.cx)+i, int(particle.cy)+j)
if n.active != true {
n.active = true
}
phi := particle.px[i] * particle.py[j]
n.m += phi * particle.material.m
n.d += phi
n.gx += particle.gx[i] * particle.py[j]
n.gy += particle.px[i] * particle.gy[j]
}
}
}
}
func (l *Liquid) _density_summary(drag bool, mdx, mdy float32) {
var n01, n02, n11, n12 *Node
var cx, cy, cxi, cyi int
var pdx, pdy, C20, C02, C30, C03, csum1, csum2, C21, C31,
C12, C13, C11, density, pressure, fx, fy, u, u2, u3, v, v2, v3 float32
for _, p := range l.particles {
cx = int(p.x)
cy = int(p.y)
cxi = cx + 1
cyi = cy + 1
n01 = l.grid.Get(cx, cy)
n02 = l.grid.Get(cx, cyi)
n11 = l.grid.Get(cxi, cy)
n12 = l.grid.Get(cxi, cyi)
pdx = n11.d - n01.d
pdy = n02.d - n01.d
C20 = 3.0*pdx - n11.gx - 2.0*n01.gx
C02 = 3.0*pdy - n02.gy - 2.0*n01.gy
C30 = -2.0*pdx + n11.gx + n01.gx
C03 = -2.0*pdy + n02.gy + n01.gy
csum1 = n01.d + n01.gy + C02 + C03
csum2 = n01.d + n01.gx + C20 + C30
C21 = 3.0*n12.d - 2.0*n02.gx - n12.gx - 3.0*csum1 - C20
C31 = -2.0*n12.d + n02.gx + n12.gx + 2.0*csum1 - C30
C12 = 3.0*n12.d - 2.0*n11.gy - n12.gy - 3.0*csum2 - C02
C13 = -2.0*n12.d + n11.gy + n12.gy + 2.0*csum2 - C03
C11 = n02.gx - C13 - C12 - n01.gx
u = p.x - float32(cx)
u2 = u * u
u3 = u * u2
v = p.y - float32(cy)
v2 = v * v
v3 = v * v2
density = n01.d + n01.gx*u + n01.gy*v + C20*u2 + C02*v2 +
C30*u3 + C03*v3 + C21*u2*v + C31*u3*v + C12*u*
v2 + C13*u*v3 + C11*u*v
pressure = density - 1.0
if pressure > 2.0 {
pressure = 2.0
}
fx = 0.0
fy = 0.0
if p.x < 4.0 {
fx += p.material.m * (4.0 - p.x)
} else if p.x > l.width-5 {
fx += p.material.m * (l.width - 5 - p.x)
}
if p.y < 4.0 {
fy += p.material.m * (4.0 - p.y)
} else if p.y > l.height-5 {
fy += p.material.m * (l.height - 5 - p.y)
}
if drag {
vx := Abs(p.x - l.mouse.x)
vy := Abs(p.y - l.mouse.y)
if vx < 10.0 && 10.0 > vy {
weight := p.material.m * (1.0 - vx*0.10) *
(1.0 - vy*0.10)
fx += weight * (mdx - p.u)
fy += weight * (mdy - p.v)
}
}
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.ax += -(p.gx[i] * p.py[j] * pressure) + fx*phi
n.ay += -(p.px[i] * p.gy[j] * pressure) + fy*phi
}
}
}
}
func (l *Liquid) _step3() {
var mu, mv float32
for _, p := range l.particles {
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
p.u += phi * n.ax
p.v += phi * n.ay
}
}
mu = p.material.m * p.u
mv = p.material.m * p.v
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.u += phi * mu
n.v += phi * mv
}
}
}
}
func (l *Liquid) _step4() {
var gu, gv float32
for _, p := range l.particles {
gu = 0.0
gv = 0.0
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
gu += phi * n.u
gv += phi * n.v
}
}
p.x += gu
p.y += gv
p.u += 1.0 * (gu - p.u)
p.v += 1.0 * (gv - p.v)
// bounce off walls
if p.x < 1.0 {
p.x = 1.0 + rand.Float32()*0.01
p.u = 0.0
} else if p.x > l.width-2 {
p.x = l.width - 3 - rand.Float32()*0.01
p.u = 0.0
}
if p.y < 1.0 {
p.y = 1.0 + rand.Float32()*0.01
p.v = 0.0
} else if p.y > l.height-2 {
p.y = l.height - 3 - rand.Float32()*0.01
p.v = 0.0
}
}
}
func (l *Liquid) simulate(mouse MouseState) {
drag := false
mdx := float32(0.0)
mdy := float32(0.0)
if mouse.pressed && l.mouse.pressed {
drag = true
mdx = mouse.x - l.mouse.x
mdy = mouse.y - l.mouse.y
}
l.mouse = mouse
// Set all nodes back to empty state
l.grid.Reset()
l._step1()
l._density_summary(drag, mdx, mdy)
// Apply gravity ?
l.grid.Each(func(n *Node) {
n.ax /= n.m
n.ay /= n.m
n.ay += 0.03
})
l._step3()
// Apply pressure ?
l.grid.Each(func(n *Node) {
n.u /= n.m
n.v /= n.m
})
l._step4()
}
func Abs(val float32) float32 {
if val < 0 {
return val * -1.0
}
return val
}
func renderloop(stats *Stats, renderer *sdl.Renderer, l *Liquid, mouse *MouseState, done chan bool) {
for stats.running {
stats.frames++
// Calculate next simulation state
stats.t = time.Now()
l.simulate(*mouse)
stats.simulateSeconds += time.Since(stats.t).Nanoseconds()
// draw state
stats.t = time.Now()
_ = renderer.SetDrawColor(0, 0, 0, 255)
_ = renderer.Clear()
_ = renderer.SetDrawColor(0, 0, 255, 255)
for _, p := range l.particles {
_ = renderer.DrawLineF(4*p.x, 4*p.y, 4*(p.x-p.u), 4*(p.y-p.v))
}
renderer.Present()
stats.drawSeconds += time.Since(stats.t).Nanoseconds()
}
done <- true
}
type Stats struct {
t time.Time
frames int
simulateSeconds int64
drawSeconds int64
running bool
}
// SdlMain /**
func SdlMain(l *Liquid) {
if err := sdl.Init(sdl.INIT_EVERYTHING); err != nil {
panic(err)
}
defer sdl.Quit()
var (
window *sdl.Window
renderer *sdl.Renderer
err error
stats = Stats{running: true}
mouse = &MouseState{}
)
if window, err = sdl.CreateWindow(
"test", sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED,
int32(l.width)*4, int32(l.height)*4, sdl.WINDOW_SHOWN|sdl.WINDOW_ALLOW_HIGHDPI); err != nil {
panic(err)
}
defer window.Destroy()
if _, err = window.GetSurface(); err != nil {
panic(err)
}
if renderer, err = window.GetRenderer(); err != nil {
panic(err)
}
//mch := make(chan MouseState)
start := time.Now()
renderDone := make(chan bool)
go renderloop(&stats, renderer, l, mouse, renderDone)
for stats.running {
for event := sdl.PollEvent(); event != nil; event = sdl.PollEvent() {
switch e := event.(type) {
case *sdl.QuitEvent:
stats.running = false
break
case *sdl.MouseButtonEvent:
if e.State == sdl.PRESSED {
fmt.Println("Mouse pressed")
mouse.pressed = true
}
if e.State == sdl.RELEASED {
fmt.Println("Mouse released")
mouse.pressed = false
}
case *sdl.MouseMotionEvent:
fmt.Printf("Mouse motion = (%d, %d)\n", e.X/4, e.Y/4)
mouse.x = float32(e.X / 4)
mouse.y = float32(e.Y / 4)
}
// eventtime += time.Since(t1).Nanoseconds()
}
}
fmt.Printf("%v frames\n", stats.frames)
fmt.Printf("%v frames/s\n", float64(stats.frames)/time.Since(start).Seconds())
// fmt.Printf("%v time polling events\n", time.Duration(eventtime))
fmt.Printf("%v time simulating\n", time.Duration(stats.simulateSeconds))
fmt.Printf("%v time drawing\n", time.Duration(stats.drawSeconds))
// Wait for last render loop to finish before deferred functions above get called.
<-renderDone
}
/*
import argparse
PARSER = argparse.ArgumentParser(
prog='Liquid.py',
description='Material Point Method liquid simulation',
)
PARSER.add_argument('--width',
help='The width of the simulation area', default=100)
PARSER.add_argument('--height',
help='The height of the simulation area', default=100)
PARSER.add_argument('--columns',
help='The number of particle columns', default=50)
PARSER.add_argument('--rows',
help='The number of particle rows', default=80)
PARSER.add_argument('--n',
help='The number of iterations to run the simulation.',
default=200)
PARSER.add_argument('-i', '--interactive',
help='Run the simulation interactively with pygame or pyglet',
choices=['pygame', 'pyglet'])
ARGS = PARSER.parse_args()
*/
func main() {
liquid := MakeLiquid(100, 100, 100, 100)
SdlMain(liquid)
}
| {
pressure[0] = 0.5*x*x + 1.5*x + 1.125
gravity[0] = x + 1.5
x += 1.0
pressure[1] = -x*x + 0.75
gravity[1] = -2.0 * x
x += 1.0
pressure[2] = 0.5*x*x - 1.5*x + 1.125
gravity[2] = x - 1.5
} | identifier_body |
arena.rs | // Copyright 2019 Fullstop000 <fullstop1005@gmail.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::util::slice::Slice;
use std::mem;
use std::slice;
use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use super::skiplist::{Node, MAX_HEIGHT, MAX_NODE_SIZE};
pub trait Arena {
/// Allocate memory for a node by given height.
/// This method allocates a Node size + height * ptr ( u64 ) memory area.
// TODO: define the potential errors and return Result<Error, *mut Node> instead of raw pointer
fn alloc_node(&self, height: usize) -> *mut Node;
/// Copy bytes data of the Slice into arena directly and return the starting offset
fn alloc_bytes(&self, data: &Slice) -> u32;
/// Get in memory arena bytes as Slice from start point to start + offset
fn get(&self, offset: usize, count: usize) -> Slice;
/// Return bool to indicate whether there is enough room for given size
/// If false, use a new arena for allocating and flush the old.
fn has_room_for(&self, size: usize) -> bool;
/// Return the size of memory that allocated
fn size(&self) -> usize;
/// Return the size of memory that has been allocated.
fn memory_used(&self) -> usize;
}
// TODO: implement CommonArena: https://github.com/google/leveldb/blob/master/util/arena.cc
/// AggressiveArena is a memory pool for allocating and handling Node memory dynamically.
/// Unlike CommonArena, this simplify the memory handling by aggressively pre-allocating
/// the total fixed memory so it's caller's responsibility to ensure the room before allocating.
pub struct AggressiveArena {
// indicates that how many memories has been allocated actually
pub(super) offset: AtomicUsize,
pub(super) mem: Vec<u8>,
}
impl AggressiveArena {
/// Create an AggressiveArena with given cap.
/// This function will allocate a cap size memory block directly for further usage
pub fn new(cap: usize) -> AggressiveArena {
AggressiveArena {
offset: AtomicUsize::new(0),
mem: Vec::<u8>::with_capacity(cap),
}
}
/// For test
pub(super) fn display_all(&self) -> Vec<u8> {
let mut result = Vec::with_capacity(self.mem.capacity());
unsafe {
let ptr = self.mem.as_ptr();
for i in 0..self.offset.load(Ordering::Acquire) {
let p = ptr.add(i) as *mut u8;
result.push(*p)
}
}
result
}
}
impl Arena for AggressiveArena {
fn alloc_node(&self, height: usize) -> *mut Node {
let ptr_size = mem::size_of::<*mut u8>();
// truncate node size to reduce waste
let used_node_size = MAX_NODE_SIZE - (MAX_HEIGHT - height) * ptr_size;
let n = self.offset.fetch_add(used_node_size, Ordering::SeqCst);
unsafe {
let node_ptr = self.mem.as_ptr().add(n) as *mut u8;
// get the actually to-be-used memory of node and spilt it into 2 parts:
// node part: the Node struct
// nexts part: the pre allocated memory used by elements of next_nodes
let (node_part, nexts_part) = slice::from_raw_parts_mut(node_ptr, used_node_size)
.split_at_mut(used_node_size - height * ptr_size);
#[allow(clippy::cast_ptr_alignment)]
let node = node_part.as_mut_ptr() as *mut Node;
// FIXME: Box::from_raw can be unsafe when releasing memory
#[allow(clippy::cast_ptr_alignment)]
let next_nodes = Box::from_raw(slice::from_raw_parts_mut(
nexts_part.as_mut_ptr() as *mut AtomicPtr<Node>,
height,
));
(*node).height = height;
(*node).next_nodes = next_nodes;
node
}
}
fn alloc_bytes(&self, data: &Slice) -> u32 {
let start = self.offset.fetch_add(data.size(), Ordering::SeqCst);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *mut u8;
for (i, b) in data.to_slice().iter().enumerate() {
let p = ptr.add(i) as *mut u8;
(*p) = *b;
}
}
start as u32
}
fn get(&self, start: usize, count: usize) -> Slice {
let o = self.offset.load(Ordering::Acquire);
invarint!(
start + count <= o,
"[arena] try to get data from [{}] to [{}] but max count is [{}]",
start,
start + count,
o,
);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *const u8;
Slice::new(ptr, count)
}
}
#[inline]
fn has_room_for(&self, size: usize) -> bool {
self.size() - self.memory_used() >= size
}
#[inline]
fn size(&self) -> usize {
self.mem.capacity()
}
#[inline]
fn memory_used(&self) -> usize {
self.offset.load(Ordering::Acquire)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Arc, Mutex};
use std::thread;
fn new_default_arena() -> AggressiveArena {
AggressiveArena::new(64 << 20)
}
#[test]
fn test_new_arena() {
let cap = 200;
let arena = AggressiveArena::new(cap);
assert_eq!(arena.memory_used(), 0);
assert_eq!(arena.size(), cap);
}
#[test]
fn test_alloc_single_node() {
let arena = new_default_arena();
let node = arena.alloc_node(MAX_HEIGHT);
unsafe {
assert_eq!((*node).height, MAX_HEIGHT);
assert_eq!((*node).next_nodes.len(), MAX_HEIGHT);
assert_eq!((*node).key_size, 0);
assert_eq!((*node).key_offset, 0);
assert_eq!((*node).value_size, 0);
assert_eq!((*node).value_offset, 0);
// dereference and assigning should work
let u8_ptr = node as *mut u8;
(*node).key_offset = 1;
let key_offset_ptr = u8_ptr.add(0);
assert_eq!(*key_offset_ptr, 1);
(*node).key_size = 2;
let key_size_ptr = u8_ptr.add(8);
assert_eq!(*key_size_ptr, 2);
(*node).value_offset = 3;
let value_offset_ptr = u8_ptr.add(16);
assert_eq!(*value_offset_ptr, 3);
(*node).value_size = 4;
let value_size_ptr = u8_ptr.add(24);
assert_eq!(*value_size_ptr, 4);
// the value of data ptr in 'next_nodes' slice must be the beginning pointer of first element
let next_nodes_ptr = u8_ptr
.add(mem::size_of::<Node>() - mem::size_of::<Box<[AtomicPtr<Node>]>>())
as *mut u64;
let first_element_ptr = u8_ptr.add(mem::size_of::<Node>());
assert_eq!(
"0x".to_owned() + &format!("{:x}", *next_nodes_ptr),
format!("{:?}", first_element_ptr)
);
}
}
#[test]
fn test_alloc_nodes() {
let arena = new_default_arena();
let node1 = arena.alloc_node(4);
let node2 = arena.alloc_node(MAX_HEIGHT);
unsafe {
// node1 and node2 should be neighbor in memory
let struct_tail = node1.add(1) as *mut *mut Node;
let nexts_tail = struct_tail.add(4);
assert_eq!(nexts_tail as *mut Node, node2);
};
}
#[test]
fn test_simple_alloc_bytes() {
let mut arena = AggressiveArena::new(100);
let input = vec![1u8, 2u8, 3u8, 4u8, 5u8];
let offset = arena.alloc_bytes(&Slice::from(&input));
unsafe {
let ptr = arena.mem.as_mut_ptr().add(offset as usize) as *mut u8;
for (i, b) in input.clone().iter().enumerate() {
let p = ptr.add(i);
assert_eq!(*p, *b);
}
}
}
#[test]
fn test_alloc_bytes_concurrency() {
let arena = Arc::new(AggressiveArena::new(500));
let results = Arc::new(Mutex::new(vec![]));
let mut tests = vec![vec![1u8, 2, 3, 4, 5], vec![6u8, 7, 8, 9], vec![10u8, 11]];
for t in tests
.drain(..)
.map(|test| {
let cloned_arena = arena.clone();
let cloned_results = results.clone();
thread::spawn(move || {
let offset = cloned_arena.alloc_bytes(&Slice::from(test.as_slice())) as usize;
// start position in arena, origin test data
cloned_results.lock().unwrap().push((offset, test));
})
})
.collect::<Vec<_>>()
{
t.join().unwrap();
}
let mem_ptr = arena.mem.as_ptr();
for (offset, expect) in results.lock().unwrap().drain(..) {
// compare result and expect byte by byte
unsafe {
let ptr = mem_ptr.add(offset) as *mut u8;
for (i, b) in expect.iter().enumerate() {
let inmem_b = ptr.add(i);
assert_eq!(*inmem_b, *b);
}
}
}
}
#[test]
fn test_get() {
let arena = new_default_arena();
let input = vec![1u8, 2u8, 3u8, 4u8, 5u8];
let start = arena.alloc_bytes(&Slice::from(input.as_slice()));
let result = arena.get(start as usize, 5);
for (b1, b2) in input.iter().zip(result.to_slice()) {
assert_eq!(*b1, *b2);
}
}
#[test]
fn test_memory_used() {
let arena = new_default_arena(); |
#[test]
fn test_has_room_for() {
let arena = AggressiveArena::new(1);
assert_eq!(arena.has_room_for(100), false);
}
} | arena.alloc_node(MAX_HEIGHT); // 152
arena.alloc_node(1); // 64
arena.alloc_bytes(&Slice::from(vec![1u8, 2u8, 3u8, 4u8].as_slice())); // 4
assert_eq!(152 + 64 + 4, arena.memory_used())
} | random_line_split |
arena.rs | // Copyright 2019 Fullstop000 <fullstop1005@gmail.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::util::slice::Slice;
use std::mem;
use std::slice;
use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use super::skiplist::{Node, MAX_HEIGHT, MAX_NODE_SIZE};
pub trait Arena {
/// Allocate memory for a node by given height.
/// This method allocates a Node size + height * ptr ( u64 ) memory area.
// TODO: define the potential errors and return Result<Error, *mut Node> instead of raw pointer
fn alloc_node(&self, height: usize) -> *mut Node;
/// Copy bytes data of the Slice into arena directly and return the starting offset
fn alloc_bytes(&self, data: &Slice) -> u32;
/// Get in memory arena bytes as Slice from start point to start + offset
fn get(&self, offset: usize, count: usize) -> Slice;
/// Return bool to indicate whether there is enough room for given size
/// If false, use a new arena for allocating and flush the old.
fn has_room_for(&self, size: usize) -> bool;
/// Return the size of memory that allocated
fn size(&self) -> usize;
/// Return the size of memory that has been allocated.
fn memory_used(&self) -> usize;
}
// TODO: implement CommonArena: https://github.com/google/leveldb/blob/master/util/arena.cc
/// AggressiveArena is a memory pool for allocating and handling Node memory dynamically.
/// Unlike CommonArena, this simplify the memory handling by aggressively pre-allocating
/// the total fixed memory so it's caller's responsibility to ensure the room before allocating.
pub struct AggressiveArena {
// indicates that how many memories has been allocated actually
pub(super) offset: AtomicUsize,
pub(super) mem: Vec<u8>,
}
impl AggressiveArena {
/// Create an AggressiveArena with given cap.
/// This function will allocate a cap size memory block directly for further usage
pub fn new(cap: usize) -> AggressiveArena {
AggressiveArena {
offset: AtomicUsize::new(0),
mem: Vec::<u8>::with_capacity(cap),
}
}
/// For test
pub(super) fn display_all(&self) -> Vec<u8> {
let mut result = Vec::with_capacity(self.mem.capacity());
unsafe {
let ptr = self.mem.as_ptr();
for i in 0..self.offset.load(Ordering::Acquire) {
let p = ptr.add(i) as *mut u8;
result.push(*p)
}
}
result
}
}
impl Arena for AggressiveArena {
fn alloc_node(&self, height: usize) -> *mut Node {
let ptr_size = mem::size_of::<*mut u8>();
// truncate node size to reduce waste
let used_node_size = MAX_NODE_SIZE - (MAX_HEIGHT - height) * ptr_size;
let n = self.offset.fetch_add(used_node_size, Ordering::SeqCst);
unsafe {
let node_ptr = self.mem.as_ptr().add(n) as *mut u8;
// get the actually to-be-used memory of node and spilt it into 2 parts:
// node part: the Node struct
// nexts part: the pre allocated memory used by elements of next_nodes
let (node_part, nexts_part) = slice::from_raw_parts_mut(node_ptr, used_node_size)
.split_at_mut(used_node_size - height * ptr_size);
#[allow(clippy::cast_ptr_alignment)]
let node = node_part.as_mut_ptr() as *mut Node;
// FIXME: Box::from_raw can be unsafe when releasing memory
#[allow(clippy::cast_ptr_alignment)]
let next_nodes = Box::from_raw(slice::from_raw_parts_mut(
nexts_part.as_mut_ptr() as *mut AtomicPtr<Node>,
height,
));
(*node).height = height;
(*node).next_nodes = next_nodes;
node
}
}
fn alloc_bytes(&self, data: &Slice) -> u32 {
let start = self.offset.fetch_add(data.size(), Ordering::SeqCst);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *mut u8;
for (i, b) in data.to_slice().iter().enumerate() {
let p = ptr.add(i) as *mut u8;
(*p) = *b;
}
}
start as u32
}
fn get(&self, start: usize, count: usize) -> Slice {
let o = self.offset.load(Ordering::Acquire);
invarint!(
start + count <= o,
"[arena] try to get data from [{}] to [{}] but max count is [{}]",
start,
start + count,
o,
);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *const u8;
Slice::new(ptr, count)
}
}
#[inline]
fn has_room_for(&self, size: usize) -> bool {
self.size() - self.memory_used() >= size
}
#[inline]
fn size(&self) -> usize {
self.mem.capacity()
}
#[inline]
fn memory_used(&self) -> usize {
self.offset.load(Ordering::Acquire)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Arc, Mutex};
use std::thread;
fn new_default_arena() -> AggressiveArena {
AggressiveArena::new(64 << 20)
}
#[test]
fn test_new_arena() {
let cap = 200;
let arena = AggressiveArena::new(cap);
assert_eq!(arena.memory_used(), 0);
assert_eq!(arena.size(), cap);
}
#[test]
fn test_alloc_single_node() {
let arena = new_default_arena();
let node = arena.alloc_node(MAX_HEIGHT);
unsafe {
assert_eq!((*node).height, MAX_HEIGHT);
assert_eq!((*node).next_nodes.len(), MAX_HEIGHT);
assert_eq!((*node).key_size, 0);
assert_eq!((*node).key_offset, 0);
assert_eq!((*node).value_size, 0);
assert_eq!((*node).value_offset, 0);
// dereference and assigning should work
let u8_ptr = node as *mut u8;
(*node).key_offset = 1;
let key_offset_ptr = u8_ptr.add(0);
assert_eq!(*key_offset_ptr, 1);
(*node).key_size = 2;
let key_size_ptr = u8_ptr.add(8);
assert_eq!(*key_size_ptr, 2);
(*node).value_offset = 3;
let value_offset_ptr = u8_ptr.add(16);
assert_eq!(*value_offset_ptr, 3);
(*node).value_size = 4;
let value_size_ptr = u8_ptr.add(24);
assert_eq!(*value_size_ptr, 4);
// the value of data ptr in 'next_nodes' slice must be the beginning pointer of first element
let next_nodes_ptr = u8_ptr
.add(mem::size_of::<Node>() - mem::size_of::<Box<[AtomicPtr<Node>]>>())
as *mut u64;
let first_element_ptr = u8_ptr.add(mem::size_of::<Node>());
assert_eq!(
"0x".to_owned() + &format!("{:x}", *next_nodes_ptr),
format!("{:?}", first_element_ptr)
);
}
}
#[test]
fn test_alloc_nodes() {
let arena = new_default_arena();
let node1 = arena.alloc_node(4);
let node2 = arena.alloc_node(MAX_HEIGHT);
unsafe {
// node1 and node2 should be neighbor in memory
let struct_tail = node1.add(1) as *mut *mut Node;
let nexts_tail = struct_tail.add(4);
assert_eq!(nexts_tail as *mut Node, node2);
};
}
#[test]
fn test_simple_alloc_bytes() {
let mut arena = AggressiveArena::new(100);
let input = vec![1u8, 2u8, 3u8, 4u8, 5u8];
let offset = arena.alloc_bytes(&Slice::from(&input));
unsafe {
let ptr = arena.mem.as_mut_ptr().add(offset as usize) as *mut u8;
for (i, b) in input.clone().iter().enumerate() {
let p = ptr.add(i);
assert_eq!(*p, *b);
}
}
}
#[test]
fn | () {
let arena = Arc::new(AggressiveArena::new(500));
let results = Arc::new(Mutex::new(vec![]));
let mut tests = vec![vec![1u8, 2, 3, 4, 5], vec![6u8, 7, 8, 9], vec![10u8, 11]];
for t in tests
.drain(..)
.map(|test| {
let cloned_arena = arena.clone();
let cloned_results = results.clone();
thread::spawn(move || {
let offset = cloned_arena.alloc_bytes(&Slice::from(test.as_slice())) as usize;
// start position in arena, origin test data
cloned_results.lock().unwrap().push((offset, test));
})
})
.collect::<Vec<_>>()
{
t.join().unwrap();
}
let mem_ptr = arena.mem.as_ptr();
for (offset, expect) in results.lock().unwrap().drain(..) {
// compare result and expect byte by byte
unsafe {
let ptr = mem_ptr.add(offset) as *mut u8;
for (i, b) in expect.iter().enumerate() {
let inmem_b = ptr.add(i);
assert_eq!(*inmem_b, *b);
}
}
}
}
#[test]
fn test_get() {
let arena = new_default_arena();
let input = vec![1u8, 2u8, 3u8, 4u8, 5u8];
let start = arena.alloc_bytes(&Slice::from(input.as_slice()));
let result = arena.get(start as usize, 5);
for (b1, b2) in input.iter().zip(result.to_slice()) {
assert_eq!(*b1, *b2);
}
}
#[test]
fn test_memory_used() {
let arena = new_default_arena();
arena.alloc_node(MAX_HEIGHT); // 152
arena.alloc_node(1); // 64
arena.alloc_bytes(&Slice::from(vec![1u8, 2u8, 3u8, 4u8].as_slice())); // 4
assert_eq!(152 + 64 + 4, arena.memory_used())
}
#[test]
fn test_has_room_for() {
let arena = AggressiveArena::new(1);
assert_eq!(arena.has_room_for(100), false);
}
}
| test_alloc_bytes_concurrency | identifier_name |
arena.rs | // Copyright 2019 Fullstop000 <fullstop1005@gmail.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::util::slice::Slice;
use std::mem;
use std::slice;
use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use super::skiplist::{Node, MAX_HEIGHT, MAX_NODE_SIZE};
pub trait Arena {
/// Allocate memory for a node by given height.
/// This method allocates a Node size + height * ptr ( u64 ) memory area.
// TODO: define the potential errors and return Result<Error, *mut Node> instead of raw pointer
fn alloc_node(&self, height: usize) -> *mut Node;
/// Copy bytes data of the Slice into arena directly and return the starting offset
fn alloc_bytes(&self, data: &Slice) -> u32;
/// Get in memory arena bytes as Slice from start point to start + offset
fn get(&self, offset: usize, count: usize) -> Slice;
/// Return bool to indicate whether there is enough room for given size
/// If false, use a new arena for allocating and flush the old.
fn has_room_for(&self, size: usize) -> bool;
/// Return the size of memory that allocated
fn size(&self) -> usize;
/// Return the size of memory that has been allocated.
fn memory_used(&self) -> usize;
}
// TODO: implement CommonArena: https://github.com/google/leveldb/blob/master/util/arena.cc
/// AggressiveArena is a memory pool for allocating and handling Node memory dynamically.
/// Unlike CommonArena, this simplify the memory handling by aggressively pre-allocating
/// the total fixed memory so it's caller's responsibility to ensure the room before allocating.
pub struct AggressiveArena {
// indicates that how many memories has been allocated actually
pub(super) offset: AtomicUsize,
pub(super) mem: Vec<u8>,
}
impl AggressiveArena {
/// Create an AggressiveArena with given cap.
/// This function will allocate a cap size memory block directly for further usage
pub fn new(cap: usize) -> AggressiveArena {
AggressiveArena {
offset: AtomicUsize::new(0),
mem: Vec::<u8>::with_capacity(cap),
}
}
/// For test
pub(super) fn display_all(&self) -> Vec<u8> {
let mut result = Vec::with_capacity(self.mem.capacity());
unsafe {
let ptr = self.mem.as_ptr();
for i in 0..self.offset.load(Ordering::Acquire) {
let p = ptr.add(i) as *mut u8;
result.push(*p)
}
}
result
}
}
impl Arena for AggressiveArena {
fn alloc_node(&self, height: usize) -> *mut Node |
fn alloc_bytes(&self, data: &Slice) -> u32 {
let start = self.offset.fetch_add(data.size(), Ordering::SeqCst);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *mut u8;
for (i, b) in data.to_slice().iter().enumerate() {
let p = ptr.add(i) as *mut u8;
(*p) = *b;
}
}
start as u32
}
fn get(&self, start: usize, count: usize) -> Slice {
let o = self.offset.load(Ordering::Acquire);
invarint!(
start + count <= o,
"[arena] try to get data from [{}] to [{}] but max count is [{}]",
start,
start + count,
o,
);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *const u8;
Slice::new(ptr, count)
}
}
#[inline]
fn has_room_for(&self, size: usize) -> bool {
self.size() - self.memory_used() >= size
}
#[inline]
fn size(&self) -> usize {
self.mem.capacity()
}
#[inline]
fn memory_used(&self) -> usize {
self.offset.load(Ordering::Acquire)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Arc, Mutex};
use std::thread;
fn new_default_arena() -> AggressiveArena {
AggressiveArena::new(64 << 20)
}
#[test]
fn test_new_arena() {
let cap = 200;
let arena = AggressiveArena::new(cap);
assert_eq!(arena.memory_used(), 0);
assert_eq!(arena.size(), cap);
}
#[test]
fn test_alloc_single_node() {
let arena = new_default_arena();
let node = arena.alloc_node(MAX_HEIGHT);
unsafe {
assert_eq!((*node).height, MAX_HEIGHT);
assert_eq!((*node).next_nodes.len(), MAX_HEIGHT);
assert_eq!((*node).key_size, 0);
assert_eq!((*node).key_offset, 0);
assert_eq!((*node).value_size, 0);
assert_eq!((*node).value_offset, 0);
// dereference and assigning should work
let u8_ptr = node as *mut u8;
(*node).key_offset = 1;
let key_offset_ptr = u8_ptr.add(0);
assert_eq!(*key_offset_ptr, 1);
(*node).key_size = 2;
let key_size_ptr = u8_ptr.add(8);
assert_eq!(*key_size_ptr, 2);
(*node).value_offset = 3;
let value_offset_ptr = u8_ptr.add(16);
assert_eq!(*value_offset_ptr, 3);
(*node).value_size = 4;
let value_size_ptr = u8_ptr.add(24);
assert_eq!(*value_size_ptr, 4);
// the value of data ptr in 'next_nodes' slice must be the beginning pointer of first element
let next_nodes_ptr = u8_ptr
.add(mem::size_of::<Node>() - mem::size_of::<Box<[AtomicPtr<Node>]>>())
as *mut u64;
let first_element_ptr = u8_ptr.add(mem::size_of::<Node>());
assert_eq!(
"0x".to_owned() + &format!("{:x}", *next_nodes_ptr),
format!("{:?}", first_element_ptr)
);
}
}
#[test]
fn test_alloc_nodes() {
let arena = new_default_arena();
let node1 = arena.alloc_node(4);
let node2 = arena.alloc_node(MAX_HEIGHT);
unsafe {
// node1 and node2 should be neighbor in memory
let struct_tail = node1.add(1) as *mut *mut Node;
let nexts_tail = struct_tail.add(4);
assert_eq!(nexts_tail as *mut Node, node2);
};
}
#[test]
fn test_simple_alloc_bytes() {
let mut arena = AggressiveArena::new(100);
let input = vec![1u8, 2u8, 3u8, 4u8, 5u8];
let offset = arena.alloc_bytes(&Slice::from(&input));
unsafe {
let ptr = arena.mem.as_mut_ptr().add(offset as usize) as *mut u8;
for (i, b) in input.clone().iter().enumerate() {
let p = ptr.add(i);
assert_eq!(*p, *b);
}
}
}
#[test]
fn test_alloc_bytes_concurrency() {
let arena = Arc::new(AggressiveArena::new(500));
let results = Arc::new(Mutex::new(vec![]));
let mut tests = vec![vec![1u8, 2, 3, 4, 5], vec![6u8, 7, 8, 9], vec![10u8, 11]];
for t in tests
.drain(..)
.map(|test| {
let cloned_arena = arena.clone();
let cloned_results = results.clone();
thread::spawn(move || {
let offset = cloned_arena.alloc_bytes(&Slice::from(test.as_slice())) as usize;
// start position in arena, origin test data
cloned_results.lock().unwrap().push((offset, test));
})
})
.collect::<Vec<_>>()
{
t.join().unwrap();
}
let mem_ptr = arena.mem.as_ptr();
for (offset, expect) in results.lock().unwrap().drain(..) {
// compare result and expect byte by byte
unsafe {
let ptr = mem_ptr.add(offset) as *mut u8;
for (i, b) in expect.iter().enumerate() {
let inmem_b = ptr.add(i);
assert_eq!(*inmem_b, *b);
}
}
}
}
#[test]
fn test_get() {
let arena = new_default_arena();
let input = vec![1u8, 2u8, 3u8, 4u8, 5u8];
let start = arena.alloc_bytes(&Slice::from(input.as_slice()));
let result = arena.get(start as usize, 5);
for (b1, b2) in input.iter().zip(result.to_slice()) {
assert_eq!(*b1, *b2);
}
}
#[test]
fn test_memory_used() {
let arena = new_default_arena();
arena.alloc_node(MAX_HEIGHT); // 152
arena.alloc_node(1); // 64
arena.alloc_bytes(&Slice::from(vec![1u8, 2u8, 3u8, 4u8].as_slice())); // 4
assert_eq!(152 + 64 + 4, arena.memory_used())
}
#[test]
fn test_has_room_for() {
let arena = AggressiveArena::new(1);
assert_eq!(arena.has_room_for(100), false);
}
}
| {
let ptr_size = mem::size_of::<*mut u8>();
// truncate node size to reduce waste
let used_node_size = MAX_NODE_SIZE - (MAX_HEIGHT - height) * ptr_size;
let n = self.offset.fetch_add(used_node_size, Ordering::SeqCst);
unsafe {
let node_ptr = self.mem.as_ptr().add(n) as *mut u8;
// get the actually to-be-used memory of node and spilt it into 2 parts:
// node part: the Node struct
// nexts part: the pre allocated memory used by elements of next_nodes
let (node_part, nexts_part) = slice::from_raw_parts_mut(node_ptr, used_node_size)
.split_at_mut(used_node_size - height * ptr_size);
#[allow(clippy::cast_ptr_alignment)]
let node = node_part.as_mut_ptr() as *mut Node;
// FIXME: Box::from_raw can be unsafe when releasing memory
#[allow(clippy::cast_ptr_alignment)]
let next_nodes = Box::from_raw(slice::from_raw_parts_mut(
nexts_part.as_mut_ptr() as *mut AtomicPtr<Node>,
height,
));
(*node).height = height;
(*node).next_nodes = next_nodes;
node
}
} | identifier_body |
goes.py | # Author: Rishabh Sharma <rishabh.sharma.gunner@gmail.com>
# This module was developed under funding provided by
# Google Summer of Code 2014
import os
from datetime import datetime
from itertools import compress
from urllib.parse import urlsplit
import astropy.units as u
from astropy.time import Time, TimeDelta
from sunpy import config
from sunpy.net.dataretriever import GenericClient
from sunpy.time import TimeRange, parse_time
from sunpy.time.time import _variables_for_parse_time_docstring
from sunpy.util.decorators import add_common_docstring
from sunpy.util.scraper import Scraper
TIME_FORMAT = config.get("general", "time_format")
__all__ = ["XRSClient", "SUVIClient"]
class XRSClient(GenericClient):
"""
Provides access to the GOES XRS fits files archive.
Searches data hosted by the `Solar Data Analysis Center <https://umbra.nascom.nasa.gov/goes/fits/>`__.
Examples
--------
>>> from sunpy.net import Fido, attrs as a
>>> results = Fido.search(a.Time("2016/1/1", "2016/1/2"),
... a.Instrument.xrs) #doctest: +REMOTE_DATA
>>> results #doctest: +REMOTE_DATA
<sunpy.net.fido_factory.UnifiedResponse object at ...>
Results from 1 Provider:
<BLANKLINE>
2 Results from the XRSClient:
Start Time End Time Source Instrument Wavelength
------------------- ------------------- ------ ---------- ----------
2016-01-01 00:00:00 2016-01-01 23:59:59 nasa goes nan
2016-01-02 00:00:00 2016-01-02 23:59:59 nasa goes nan
<BLANKLINE>
<BLANKLINE>
"""
def _get_goes_sat_num(self, date):
"""
Determines the satellite number for a given date.
Parameters
----------
date : `astropy.time.Time`
The date to determine which satellite is active.
"""
goes_operational = {
2: TimeRange("1981-01-01", "1983-04-30"),
5: TimeRange("1983-05-02", "1984-07-31"),
6: TimeRange("1983-06-01", "1994-08-18"),
7: TimeRange("1994-01-01", "1996-08-13"),
8: TimeRange("1996-03-21", "2003-06-18"),
9: TimeRange("1997-01-01", "1998-09-08"),
10: TimeRange("1998-07-10", "2009-12-01"),
11: TimeRange("2006-06-20", "2008-02-15"),
12: TimeRange("2002-12-13", "2007-05-08"),
13: TimeRange("2006-08-01", "2006-08-01"),
14: TimeRange("2009-12-02", "2010-10-04"),
15: TimeRange("2010-09-01", parse_time("now")),
}
results = []
for sat_num in goes_operational:
if date in goes_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
|
def _get_time_for_url(self, urls):
times = []
for uri in urls:
uripath = urlsplit(uri).path
# Extract the yymmdd or yyyymmdd timestamp
datestamp = os.path.splitext(os.path.split(uripath)[1])[0][4:]
# 1999-01-15 as an integer.
if int(datestamp) <= 990115:
start = Time.strptime(datestamp, "%y%m%d")
else:
start = Time.strptime(datestamp, "%Y%m%d")
almost_day = TimeDelta(1 * u.day - 1 * u.millisecond)
times.append(TimeRange(start, start + almost_day))
return times
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns a URL to the GOES data for the specified date.
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
timerange = TimeRange(timerange.start.strftime('%Y-%m-%d'), timerange.end)
if timerange.end < parse_time("1999/01/15"):
goes_file = "%Y/go{satellitenumber:02d}%y%m%d.fits"
elif timerange.start < parse_time("1999/01/15") and timerange.end >= parse_time("1999/01/15"):
return self._get_overlap_urls(timerange)
else:
goes_file = "%Y/go{satellitenumber}%Y%m%d.fits"
goes_pattern = f"https://umbra.nascom.nasa.gov/goes/fits/{goes_file}"
satellitenumber = kwargs.get("satellitenumber", self._get_goes_sat_num(timerange.start))
goes_files = Scraper(goes_pattern, satellitenumber=satellitenumber)
return goes_files.filelist(timerange)
def _get_overlap_urls(self, timerange):
"""
Return a list of URLs over timerange when the URL path changed format `%Y` to `%y`
on the date 1999/01/15
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
tr_before = TimeRange(timerange.start, parse_time("1999/01/14"))
tr_after = TimeRange(parse_time("1999/01/15"), timerange.end)
urls_before = self._get_url_for_timerange(tr_before)
urls_after = self._get_url_for_timerange(tr_after)
return urls_before + urls_after
def _makeimap(self):
"""
Helper function used to hold information about source.
"""
self.map_["source"] = "nasa"
self.map_["instrument"] = "goes"
self.map_["physobs"] = "irradiance"
self.map_["provider"] = "sdac"
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : list of query objects
Returns
-------
boolean
answer as to whether client can service the query
"""
chkattr = ["Time", "Instrument", "SatelliteNumber"]
chklist = [x.__class__.__name__ in chkattr for x in query]
for x in query:
if x.__class__.__name__ == "Instrument" and x.value.lower() in (
"xrs",
"goes",
):
return all(chklist)
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
adict = {attrs.Instrument: [
("GOES", "The Geostationary Operational Environmental Satellite Program."),
("XRS", "GOES X-ray Flux")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
class SUVIClient(GenericClient):
"""
Provides access to data from the GOES Solar Ultraviolet Imager (SUVI).
SUVI data are provided by NOAA at the following url
https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/
The SUVI instrument was first included on GOES-16. It produces level-1b as
well as level-2 data products. Level-2 data products are a weighted average
of level-1b product files and therefore provide higher imaging dynamic
range than individual images. The exposure time of level 1b images range
from 1 s to 0.005 s. SUVI supports the following wavelengths;
94, 131, 171, 195, 284, 304 angstrom. If no wavelength is specified, images
from all wavelengths are returned.
Note
----
GOES-16 began providing regular level-1b data on 2018-06-01. At the time
of writing, SUVI on GOES-17 is operational but currently does not provide
Level-2 data.
"""
@add_common_docstring(**_variables_for_parse_time_docstring())
def _get_goes_sat_num(self, date):
"""
Determines the best satellite number for a given date.
Parameters
----------
date : {parse_time_types}
The date to determine which satellite is active.
Note
----
At the time this function was written.
GOES-17 is operational but currently does not provide Level 2 data therefore it is never returned.
The GOES-16 start date is based on the availability of regular level 1b data.
"""
# GOES-17 is operational but currently does not provide Level 2 data
# GOES-16 start date is based on the availability of regular level 1b data
suvi_operational = {
16: TimeRange("2018-06-01", parse_time("now")),
}
results = []
for sat_num in suvi_operational:
if date in suvi_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(f"No operational SUVI instrument on {date.strftime(TIME_FORMAT)}")
def _get_time_for_url(self, urls):
these_timeranges = []
for this_url in urls:
if this_url.count('/l2/') > 0: # this is a level 2 data file
start_time = parse_time(os.path.basename(this_url).split('_s')[2].split('Z')[0])
end_time = parse_time(os.path.basename(this_url).split('_e')[1].split('Z')[0])
these_timeranges.append(TimeRange(start_time, end_time))
if this_url.count('/l1b/') > 0: # this is a level 1b data file
start_time = datetime.strptime(os.path.basename(this_url).split('_s')[
1].split('_e')[0][:-1], '%Y%j%H%M%S')
end_time = datetime.strptime(os.path.basename(this_url).split('_e')[
1].split('_c')[0][:-1], '%Y%j%H%M%S')
these_timeranges.append(TimeRange(start_time, end_time))
return these_timeranges
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns urls to the SUVI data for the specified time range.
Parameters
----------
timerange: `sunpy.time.TimeRange`
Time range for which data is to be downloaded.
level : `str`, optional
The level of the data. Possible values are 1b and 2 (default).
wavelength : `astropy.units.Quantity` or `tuple`, optional
Wavelength band. If not given, all wavelengths are returned.
satellitenumber : `int`, optional
GOES satellite number. Must be >= 16. Default is 16.
"""
base_url = "https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/goes/goes{goes_number}/"
supported_waves = [94, 131, 171, 195, 284, 304]
supported_levels = ("2", "1b")
# these are optional requirements so if not provided assume defaults
# if wavelength is not provided assuming all of them
if "wavelength" in kwargs.keys():
wavelength_input = kwargs.get("wavelength")
if isinstance(wavelength_input, u.Quantity): # not a range
if int(wavelength_input.to_value('Angstrom')) not in supported_waves:
raise ValueError(f"Wavelength {kwargs.get('wavelength')} not supported.")
else:
wavelength = [kwargs.get("wavelength")]
else: # Range was provided
compress_index = [wavelength_input.wavemin <= this_wave <=
wavelength_input.wavemax for this_wave in (supported_waves * u.Angstrom)]
if not any(compress_index):
raise ValueError(
f"Wavelength {wavelength_input} not supported.")
else:
wavelength = list(compress(supported_waves, compress_index)) * u.Angstrom
else: # no wavelength provided return all of them
wavelength = supported_waves * u.Angstrom
# check that the input wavelength can be converted to angstrom
waves = [int(this_wave.to_value('angstrom', equivalencies=u.spectral()))
for this_wave in wavelength]
# use the given satellite number or choose the best one
satellitenumber = int(kwargs.get(
"satellitenumber", self._get_goes_sat_num(timerange.start)))
if satellitenumber < 16:
raise ValueError(f"Satellite number {satellitenumber} not supported.")
# default to the highest level of data
level = str(kwargs.get("level", "2")) # make string in case the input is a number
if level not in supported_levels:
raise ValueError(f"Level {level} is not supported.")
results = []
for this_wave in waves:
if level == "2":
search_pattern = base_url + \
r'l{level}/data/suvi-l{level}-ci{wave:03}/%Y/%m/%d/dr_suvi-l{level}-ci{wave:03}_g{goes_number}_s%Y%m%dT%H%M%SZ_.*\.fits'
elif level == "1b":
if this_wave in [131, 171, 195, 284]:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
elif this_wave == 304:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-he{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-He{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
elif this_wave == 94:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
if search_pattern.count('wave_minus1'):
scraper = Scraper(search_pattern, level=level, wave=this_wave,
goes_number=satellitenumber, wave_minus1=this_wave-1)
else:
scraper = Scraper(search_pattern, level=level, wave=this_wave,
goes_number=satellitenumber)
results.extend(scraper.filelist(timerange))
return results
def _makeimap(self):
"""
Helper Function used to hold information about source.
"""
self.map_['source'] = 'GOES'
self.map_['provider'] = 'NOAA'
self.map_['instrument'] = 'SUVI'
self.map_['physobs'] = 'flux'
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : `tuple`
All specified query objects.
Returns
-------
`bool`
answer as to whether client can service the query.
"""
# Import here to prevent circular imports
from sunpy.net import attrs as a
required = {a.Time, a.Instrument}
optional = {a.Wavelength, a.Level, a.goes.SatelliteNumber}
all_attrs = {type(x) for x in query}
ops = all_attrs - required
# check to ensure that all optional requirements are in approved list
if ops and not all(elem in optional for elem in ops):
return False
# if we get this far we have either Instrument and Time
# or Instrument, Time and Wavelength
check_var_count = 0
for x in query:
if isinstance(x, a.Instrument) and x.value.lower() == 'suvi':
check_var_count += 1
if check_var_count == 1:
return True
else:
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [16, 17]
adict = {attrs.Instrument: [
("SUVI", "The Geostationary Operational Environmental Satellite Program.")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
| raise ValueError(
"No operational GOES satellites on {}".format(
date.strftime(TIME_FORMAT)
)
) | conditional_block |
goes.py | # Author: Rishabh Sharma <rishabh.sharma.gunner@gmail.com>
# This module was developed under funding provided by
# Google Summer of Code 2014
import os
from datetime import datetime
from itertools import compress
from urllib.parse import urlsplit
import astropy.units as u
from astropy.time import Time, TimeDelta
from sunpy import config
from sunpy.net.dataretriever import GenericClient
from sunpy.time import TimeRange, parse_time
from sunpy.time.time import _variables_for_parse_time_docstring
from sunpy.util.decorators import add_common_docstring
from sunpy.util.scraper import Scraper
TIME_FORMAT = config.get("general", "time_format")
__all__ = ["XRSClient", "SUVIClient"]
class XRSClient(GenericClient):
"""
Provides access to the GOES XRS fits files archive.
Searches data hosted by the `Solar Data Analysis Center <https://umbra.nascom.nasa.gov/goes/fits/>`__.
Examples
--------
>>> from sunpy.net import Fido, attrs as a
>>> results = Fido.search(a.Time("2016/1/1", "2016/1/2"),
... a.Instrument.xrs) #doctest: +REMOTE_DATA
>>> results #doctest: +REMOTE_DATA
<sunpy.net.fido_factory.UnifiedResponse object at ...>
Results from 1 Provider:
<BLANKLINE>
2 Results from the XRSClient:
Start Time End Time Source Instrument Wavelength
------------------- ------------------- ------ ---------- ----------
2016-01-01 00:00:00 2016-01-01 23:59:59 nasa goes nan
2016-01-02 00:00:00 2016-01-02 23:59:59 nasa goes nan
<BLANKLINE>
<BLANKLINE>
"""
def _get_goes_sat_num(self, date):
"""
Determines the satellite number for a given date.
Parameters
----------
date : `astropy.time.Time`
The date to determine which satellite is active.
"""
goes_operational = {
2: TimeRange("1981-01-01", "1983-04-30"),
5: TimeRange("1983-05-02", "1984-07-31"), | 8: TimeRange("1996-03-21", "2003-06-18"),
9: TimeRange("1997-01-01", "1998-09-08"),
10: TimeRange("1998-07-10", "2009-12-01"),
11: TimeRange("2006-06-20", "2008-02-15"),
12: TimeRange("2002-12-13", "2007-05-08"),
13: TimeRange("2006-08-01", "2006-08-01"),
14: TimeRange("2009-12-02", "2010-10-04"),
15: TimeRange("2010-09-01", parse_time("now")),
}
results = []
for sat_num in goes_operational:
if date in goes_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(
"No operational GOES satellites on {}".format(
date.strftime(TIME_FORMAT)
)
)
def _get_time_for_url(self, urls):
times = []
for uri in urls:
uripath = urlsplit(uri).path
# Extract the yymmdd or yyyymmdd timestamp
datestamp = os.path.splitext(os.path.split(uripath)[1])[0][4:]
# 1999-01-15 as an integer.
if int(datestamp) <= 990115:
start = Time.strptime(datestamp, "%y%m%d")
else:
start = Time.strptime(datestamp, "%Y%m%d")
almost_day = TimeDelta(1 * u.day - 1 * u.millisecond)
times.append(TimeRange(start, start + almost_day))
return times
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns a URL to the GOES data for the specified date.
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
timerange = TimeRange(timerange.start.strftime('%Y-%m-%d'), timerange.end)
if timerange.end < parse_time("1999/01/15"):
goes_file = "%Y/go{satellitenumber:02d}%y%m%d.fits"
elif timerange.start < parse_time("1999/01/15") and timerange.end >= parse_time("1999/01/15"):
return self._get_overlap_urls(timerange)
else:
goes_file = "%Y/go{satellitenumber}%Y%m%d.fits"
goes_pattern = f"https://umbra.nascom.nasa.gov/goes/fits/{goes_file}"
satellitenumber = kwargs.get("satellitenumber", self._get_goes_sat_num(timerange.start))
goes_files = Scraper(goes_pattern, satellitenumber=satellitenumber)
return goes_files.filelist(timerange)
def _get_overlap_urls(self, timerange):
"""
Return a list of URLs over timerange when the URL path changed format `%Y` to `%y`
on the date 1999/01/15
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
tr_before = TimeRange(timerange.start, parse_time("1999/01/14"))
tr_after = TimeRange(parse_time("1999/01/15"), timerange.end)
urls_before = self._get_url_for_timerange(tr_before)
urls_after = self._get_url_for_timerange(tr_after)
return urls_before + urls_after
def _makeimap(self):
"""
Helper function used to hold information about source.
"""
self.map_["source"] = "nasa"
self.map_["instrument"] = "goes"
self.map_["physobs"] = "irradiance"
self.map_["provider"] = "sdac"
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : list of query objects
Returns
-------
boolean
answer as to whether client can service the query
"""
chkattr = ["Time", "Instrument", "SatelliteNumber"]
chklist = [x.__class__.__name__ in chkattr for x in query]
for x in query:
if x.__class__.__name__ == "Instrument" and x.value.lower() in (
"xrs",
"goes",
):
return all(chklist)
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
adict = {attrs.Instrument: [
("GOES", "The Geostationary Operational Environmental Satellite Program."),
("XRS", "GOES X-ray Flux")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
class SUVIClient(GenericClient):
"""
Provides access to data from the GOES Solar Ultraviolet Imager (SUVI).
SUVI data are provided by NOAA at the following url
https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/
The SUVI instrument was first included on GOES-16. It produces level-1b as
well as level-2 data products. Level-2 data products are a weighted average
of level-1b product files and therefore provide higher imaging dynamic
range than individual images. The exposure time of level 1b images range
from 1 s to 0.005 s. SUVI supports the following wavelengths;
94, 131, 171, 195, 284, 304 angstrom. If no wavelength is specified, images
from all wavelengths are returned.
Note
----
GOES-16 began providing regular level-1b data on 2018-06-01. At the time
of writing, SUVI on GOES-17 is operational but currently does not provide
Level-2 data.
"""
@add_common_docstring(**_variables_for_parse_time_docstring())
def _get_goes_sat_num(self, date):
"""
Determines the best satellite number for a given date.
Parameters
----------
date : {parse_time_types}
The date to determine which satellite is active.
Note
----
At the time this function was written.
GOES-17 is operational but currently does not provide Level 2 data therefore it is never returned.
The GOES-16 start date is based on the availability of regular level 1b data.
"""
# GOES-17 is operational but currently does not provide Level 2 data
# GOES-16 start date is based on the availability of regular level 1b data
suvi_operational = {
16: TimeRange("2018-06-01", parse_time("now")),
}
results = []
for sat_num in suvi_operational:
if date in suvi_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(f"No operational SUVI instrument on {date.strftime(TIME_FORMAT)}")
def _get_time_for_url(self, urls):
these_timeranges = []
for this_url in urls:
if this_url.count('/l2/') > 0: # this is a level 2 data file
start_time = parse_time(os.path.basename(this_url).split('_s')[2].split('Z')[0])
end_time = parse_time(os.path.basename(this_url).split('_e')[1].split('Z')[0])
these_timeranges.append(TimeRange(start_time, end_time))
if this_url.count('/l1b/') > 0: # this is a level 1b data file
start_time = datetime.strptime(os.path.basename(this_url).split('_s')[
1].split('_e')[0][:-1], '%Y%j%H%M%S')
end_time = datetime.strptime(os.path.basename(this_url).split('_e')[
1].split('_c')[0][:-1], '%Y%j%H%M%S')
these_timeranges.append(TimeRange(start_time, end_time))
return these_timeranges
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns urls to the SUVI data for the specified time range.
Parameters
----------
timerange: `sunpy.time.TimeRange`
Time range for which data is to be downloaded.
level : `str`, optional
The level of the data. Possible values are 1b and 2 (default).
wavelength : `astropy.units.Quantity` or `tuple`, optional
Wavelength band. If not given, all wavelengths are returned.
satellitenumber : `int`, optional
GOES satellite number. Must be >= 16. Default is 16.
"""
base_url = "https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/goes/goes{goes_number}/"
supported_waves = [94, 131, 171, 195, 284, 304]
supported_levels = ("2", "1b")
# these are optional requirements so if not provided assume defaults
# if wavelength is not provided assuming all of them
if "wavelength" in kwargs.keys():
wavelength_input = kwargs.get("wavelength")
if isinstance(wavelength_input, u.Quantity): # not a range
if int(wavelength_input.to_value('Angstrom')) not in supported_waves:
raise ValueError(f"Wavelength {kwargs.get('wavelength')} not supported.")
else:
wavelength = [kwargs.get("wavelength")]
else: # Range was provided
compress_index = [wavelength_input.wavemin <= this_wave <=
wavelength_input.wavemax for this_wave in (supported_waves * u.Angstrom)]
if not any(compress_index):
raise ValueError(
f"Wavelength {wavelength_input} not supported.")
else:
wavelength = list(compress(supported_waves, compress_index)) * u.Angstrom
else: # no wavelength provided return all of them
wavelength = supported_waves * u.Angstrom
# check that the input wavelength can be converted to angstrom
waves = [int(this_wave.to_value('angstrom', equivalencies=u.spectral()))
for this_wave in wavelength]
# use the given satellite number or choose the best one
satellitenumber = int(kwargs.get(
"satellitenumber", self._get_goes_sat_num(timerange.start)))
if satellitenumber < 16:
raise ValueError(f"Satellite number {satellitenumber} not supported.")
# default to the highest level of data
level = str(kwargs.get("level", "2")) # make string in case the input is a number
if level not in supported_levels:
raise ValueError(f"Level {level} is not supported.")
results = []
for this_wave in waves:
if level == "2":
search_pattern = base_url + \
r'l{level}/data/suvi-l{level}-ci{wave:03}/%Y/%m/%d/dr_suvi-l{level}-ci{wave:03}_g{goes_number}_s%Y%m%dT%H%M%SZ_.*\.fits'
elif level == "1b":
if this_wave in [131, 171, 195, 284]:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
elif this_wave == 304:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-he{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-He{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
elif this_wave == 94:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
if search_pattern.count('wave_minus1'):
scraper = Scraper(search_pattern, level=level, wave=this_wave,
goes_number=satellitenumber, wave_minus1=this_wave-1)
else:
scraper = Scraper(search_pattern, level=level, wave=this_wave,
goes_number=satellitenumber)
results.extend(scraper.filelist(timerange))
return results
def _makeimap(self):
"""
Helper Function used to hold information about source.
"""
self.map_['source'] = 'GOES'
self.map_['provider'] = 'NOAA'
self.map_['instrument'] = 'SUVI'
self.map_['physobs'] = 'flux'
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : `tuple`
All specified query objects.
Returns
-------
`bool`
answer as to whether client can service the query.
"""
# Import here to prevent circular imports
from sunpy.net import attrs as a
required = {a.Time, a.Instrument}
optional = {a.Wavelength, a.Level, a.goes.SatelliteNumber}
all_attrs = {type(x) for x in query}
ops = all_attrs - required
# check to ensure that all optional requirements are in approved list
if ops and not all(elem in optional for elem in ops):
return False
# if we get this far we have either Instrument and Time
# or Instrument, Time and Wavelength
check_var_count = 0
for x in query:
if isinstance(x, a.Instrument) and x.value.lower() == 'suvi':
check_var_count += 1
if check_var_count == 1:
return True
else:
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [16, 17]
adict = {attrs.Instrument: [
("SUVI", "The Geostationary Operational Environmental Satellite Program.")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict | 6: TimeRange("1983-06-01", "1994-08-18"),
7: TimeRange("1994-01-01", "1996-08-13"), | random_line_split |
goes.py | # Author: Rishabh Sharma <rishabh.sharma.gunner@gmail.com>
# This module was developed under funding provided by
# Google Summer of Code 2014
import os
from datetime import datetime
from itertools import compress
from urllib.parse import urlsplit
import astropy.units as u
from astropy.time import Time, TimeDelta
from sunpy import config
from sunpy.net.dataretriever import GenericClient
from sunpy.time import TimeRange, parse_time
from sunpy.time.time import _variables_for_parse_time_docstring
from sunpy.util.decorators import add_common_docstring
from sunpy.util.scraper import Scraper
TIME_FORMAT = config.get("general", "time_format")
__all__ = ["XRSClient", "SUVIClient"]
class XRSClient(GenericClient):
"""
Provides access to the GOES XRS fits files archive.
Searches data hosted by the `Solar Data Analysis Center <https://umbra.nascom.nasa.gov/goes/fits/>`__.
Examples
--------
>>> from sunpy.net import Fido, attrs as a
>>> results = Fido.search(a.Time("2016/1/1", "2016/1/2"),
... a.Instrument.xrs) #doctest: +REMOTE_DATA
>>> results #doctest: +REMOTE_DATA
<sunpy.net.fido_factory.UnifiedResponse object at ...>
Results from 1 Provider:
<BLANKLINE>
2 Results from the XRSClient:
Start Time End Time Source Instrument Wavelength
------------------- ------------------- ------ ---------- ----------
2016-01-01 00:00:00 2016-01-01 23:59:59 nasa goes nan
2016-01-02 00:00:00 2016-01-02 23:59:59 nasa goes nan
<BLANKLINE>
<BLANKLINE>
"""
def _get_goes_sat_num(self, date):
"""
Determines the satellite number for a given date.
Parameters
----------
date : `astropy.time.Time`
The date to determine which satellite is active.
"""
goes_operational = {
2: TimeRange("1981-01-01", "1983-04-30"),
5: TimeRange("1983-05-02", "1984-07-31"),
6: TimeRange("1983-06-01", "1994-08-18"),
7: TimeRange("1994-01-01", "1996-08-13"),
8: TimeRange("1996-03-21", "2003-06-18"),
9: TimeRange("1997-01-01", "1998-09-08"),
10: TimeRange("1998-07-10", "2009-12-01"),
11: TimeRange("2006-06-20", "2008-02-15"),
12: TimeRange("2002-12-13", "2007-05-08"),
13: TimeRange("2006-08-01", "2006-08-01"),
14: TimeRange("2009-12-02", "2010-10-04"),
15: TimeRange("2010-09-01", parse_time("now")),
}
results = []
for sat_num in goes_operational:
if date in goes_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(
"No operational GOES satellites on {}".format(
date.strftime(TIME_FORMAT)
)
)
def _get_time_for_url(self, urls):
times = []
for uri in urls:
uripath = urlsplit(uri).path
# Extract the yymmdd or yyyymmdd timestamp
datestamp = os.path.splitext(os.path.split(uripath)[1])[0][4:]
# 1999-01-15 as an integer.
if int(datestamp) <= 990115:
start = Time.strptime(datestamp, "%y%m%d")
else:
start = Time.strptime(datestamp, "%Y%m%d")
almost_day = TimeDelta(1 * u.day - 1 * u.millisecond)
times.append(TimeRange(start, start + almost_day))
return times
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns a URL to the GOES data for the specified date.
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
timerange = TimeRange(timerange.start.strftime('%Y-%m-%d'), timerange.end)
if timerange.end < parse_time("1999/01/15"):
goes_file = "%Y/go{satellitenumber:02d}%y%m%d.fits"
elif timerange.start < parse_time("1999/01/15") and timerange.end >= parse_time("1999/01/15"):
return self._get_overlap_urls(timerange)
else:
goes_file = "%Y/go{satellitenumber}%Y%m%d.fits"
goes_pattern = f"https://umbra.nascom.nasa.gov/goes/fits/{goes_file}"
satellitenumber = kwargs.get("satellitenumber", self._get_goes_sat_num(timerange.start))
goes_files = Scraper(goes_pattern, satellitenumber=satellitenumber)
return goes_files.filelist(timerange)
def _get_overlap_urls(self, timerange):
"""
Return a list of URLs over timerange when the URL path changed format `%Y` to `%y`
on the date 1999/01/15
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
tr_before = TimeRange(timerange.start, parse_time("1999/01/14"))
tr_after = TimeRange(parse_time("1999/01/15"), timerange.end)
urls_before = self._get_url_for_timerange(tr_before)
urls_after = self._get_url_for_timerange(tr_after)
return urls_before + urls_after
def _makeimap(self):
"""
Helper function used to hold information about source.
"""
self.map_["source"] = "nasa"
self.map_["instrument"] = "goes"
self.map_["physobs"] = "irradiance"
self.map_["provider"] = "sdac"
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : list of query objects
Returns
-------
boolean
answer as to whether client can service the query
"""
chkattr = ["Time", "Instrument", "SatelliteNumber"]
chklist = [x.__class__.__name__ in chkattr for x in query]
for x in query:
if x.__class__.__name__ == "Instrument" and x.value.lower() in (
"xrs",
"goes",
):
return all(chklist)
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
adict = {attrs.Instrument: [
("GOES", "The Geostationary Operational Environmental Satellite Program."),
("XRS", "GOES X-ray Flux")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
class SUVIClient(GenericClient):
"""
Provides access to data from the GOES Solar Ultraviolet Imager (SUVI).
SUVI data are provided by NOAA at the following url
https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/
The SUVI instrument was first included on GOES-16. It produces level-1b as
well as level-2 data products. Level-2 data products are a weighted average
of level-1b product files and therefore provide higher imaging dynamic
range than individual images. The exposure time of level 1b images range
from 1 s to 0.005 s. SUVI supports the following wavelengths;
94, 131, 171, 195, 284, 304 angstrom. If no wavelength is specified, images
from all wavelengths are returned.
Note
----
GOES-16 began providing regular level-1b data on 2018-06-01. At the time
of writing, SUVI on GOES-17 is operational but currently does not provide
Level-2 data.
"""
@add_common_docstring(**_variables_for_parse_time_docstring())
def _get_goes_sat_num(self, date):
"""
Determines the best satellite number for a given date.
Parameters
----------
date : {parse_time_types}
The date to determine which satellite is active.
Note
----
At the time this function was written.
GOES-17 is operational but currently does not provide Level 2 data therefore it is never returned.
The GOES-16 start date is based on the availability of regular level 1b data.
"""
# GOES-17 is operational but currently does not provide Level 2 data
# GOES-16 start date is based on the availability of regular level 1b data
suvi_operational = {
16: TimeRange("2018-06-01", parse_time("now")),
}
results = []
for sat_num in suvi_operational:
if date in suvi_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(f"No operational SUVI instrument on {date.strftime(TIME_FORMAT)}")
def | (self, urls):
these_timeranges = []
for this_url in urls:
if this_url.count('/l2/') > 0: # this is a level 2 data file
start_time = parse_time(os.path.basename(this_url).split('_s')[2].split('Z')[0])
end_time = parse_time(os.path.basename(this_url).split('_e')[1].split('Z')[0])
these_timeranges.append(TimeRange(start_time, end_time))
if this_url.count('/l1b/') > 0: # this is a level 1b data file
start_time = datetime.strptime(os.path.basename(this_url).split('_s')[
1].split('_e')[0][:-1], '%Y%j%H%M%S')
end_time = datetime.strptime(os.path.basename(this_url).split('_e')[
1].split('_c')[0][:-1], '%Y%j%H%M%S')
these_timeranges.append(TimeRange(start_time, end_time))
return these_timeranges
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns urls to the SUVI data for the specified time range.
Parameters
----------
timerange: `sunpy.time.TimeRange`
Time range for which data is to be downloaded.
level : `str`, optional
The level of the data. Possible values are 1b and 2 (default).
wavelength : `astropy.units.Quantity` or `tuple`, optional
Wavelength band. If not given, all wavelengths are returned.
satellitenumber : `int`, optional
GOES satellite number. Must be >= 16. Default is 16.
"""
base_url = "https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/goes/goes{goes_number}/"
supported_waves = [94, 131, 171, 195, 284, 304]
supported_levels = ("2", "1b")
# these are optional requirements so if not provided assume defaults
# if wavelength is not provided assuming all of them
if "wavelength" in kwargs.keys():
wavelength_input = kwargs.get("wavelength")
if isinstance(wavelength_input, u.Quantity): # not a range
if int(wavelength_input.to_value('Angstrom')) not in supported_waves:
raise ValueError(f"Wavelength {kwargs.get('wavelength')} not supported.")
else:
wavelength = [kwargs.get("wavelength")]
else: # Range was provided
compress_index = [wavelength_input.wavemin <= this_wave <=
wavelength_input.wavemax for this_wave in (supported_waves * u.Angstrom)]
if not any(compress_index):
raise ValueError(
f"Wavelength {wavelength_input} not supported.")
else:
wavelength = list(compress(supported_waves, compress_index)) * u.Angstrom
else: # no wavelength provided return all of them
wavelength = supported_waves * u.Angstrom
# check that the input wavelength can be converted to angstrom
waves = [int(this_wave.to_value('angstrom', equivalencies=u.spectral()))
for this_wave in wavelength]
# use the given satellite number or choose the best one
satellitenumber = int(kwargs.get(
"satellitenumber", self._get_goes_sat_num(timerange.start)))
if satellitenumber < 16:
raise ValueError(f"Satellite number {satellitenumber} not supported.")
# default to the highest level of data
level = str(kwargs.get("level", "2")) # make string in case the input is a number
if level not in supported_levels:
raise ValueError(f"Level {level} is not supported.")
results = []
for this_wave in waves:
if level == "2":
search_pattern = base_url + \
r'l{level}/data/suvi-l{level}-ci{wave:03}/%Y/%m/%d/dr_suvi-l{level}-ci{wave:03}_g{goes_number}_s%Y%m%dT%H%M%SZ_.*\.fits'
elif level == "1b":
if this_wave in [131, 171, 195, 284]:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
elif this_wave == 304:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-he{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-He{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
elif this_wave == 94:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
if search_pattern.count('wave_minus1'):
scraper = Scraper(search_pattern, level=level, wave=this_wave,
goes_number=satellitenumber, wave_minus1=this_wave-1)
else:
scraper = Scraper(search_pattern, level=level, wave=this_wave,
goes_number=satellitenumber)
results.extend(scraper.filelist(timerange))
return results
def _makeimap(self):
"""
Helper Function used to hold information about source.
"""
self.map_['source'] = 'GOES'
self.map_['provider'] = 'NOAA'
self.map_['instrument'] = 'SUVI'
self.map_['physobs'] = 'flux'
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : `tuple`
All specified query objects.
Returns
-------
`bool`
answer as to whether client can service the query.
"""
# Import here to prevent circular imports
from sunpy.net import attrs as a
required = {a.Time, a.Instrument}
optional = {a.Wavelength, a.Level, a.goes.SatelliteNumber}
all_attrs = {type(x) for x in query}
ops = all_attrs - required
# check to ensure that all optional requirements are in approved list
if ops and not all(elem in optional for elem in ops):
return False
# if we get this far we have either Instrument and Time
# or Instrument, Time and Wavelength
check_var_count = 0
for x in query:
if isinstance(x, a.Instrument) and x.value.lower() == 'suvi':
check_var_count += 1
if check_var_count == 1:
return True
else:
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [16, 17]
adict = {attrs.Instrument: [
("SUVI", "The Geostationary Operational Environmental Satellite Program.")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
| _get_time_for_url | identifier_name |
goes.py | # Author: Rishabh Sharma <rishabh.sharma.gunner@gmail.com>
# This module was developed under funding provided by
# Google Summer of Code 2014
import os
from datetime import datetime
from itertools import compress
from urllib.parse import urlsplit
import astropy.units as u
from astropy.time import Time, TimeDelta
from sunpy import config
from sunpy.net.dataretriever import GenericClient
from sunpy.time import TimeRange, parse_time
from sunpy.time.time import _variables_for_parse_time_docstring
from sunpy.util.decorators import add_common_docstring
from sunpy.util.scraper import Scraper
TIME_FORMAT = config.get("general", "time_format")
__all__ = ["XRSClient", "SUVIClient"]
class XRSClient(GenericClient):
"""
Provides access to the GOES XRS fits files archive.
Searches data hosted by the `Solar Data Analysis Center <https://umbra.nascom.nasa.gov/goes/fits/>`__.
Examples
--------
>>> from sunpy.net import Fido, attrs as a
>>> results = Fido.search(a.Time("2016/1/1", "2016/1/2"),
... a.Instrument.xrs) #doctest: +REMOTE_DATA
>>> results #doctest: +REMOTE_DATA
<sunpy.net.fido_factory.UnifiedResponse object at ...>
Results from 1 Provider:
<BLANKLINE>
2 Results from the XRSClient:
Start Time End Time Source Instrument Wavelength
------------------- ------------------- ------ ---------- ----------
2016-01-01 00:00:00 2016-01-01 23:59:59 nasa goes nan
2016-01-02 00:00:00 2016-01-02 23:59:59 nasa goes nan
<BLANKLINE>
<BLANKLINE>
"""
def _get_goes_sat_num(self, date):
"""
Determines the satellite number for a given date.
Parameters
----------
date : `astropy.time.Time`
The date to determine which satellite is active.
"""
goes_operational = {
2: TimeRange("1981-01-01", "1983-04-30"),
5: TimeRange("1983-05-02", "1984-07-31"),
6: TimeRange("1983-06-01", "1994-08-18"),
7: TimeRange("1994-01-01", "1996-08-13"),
8: TimeRange("1996-03-21", "2003-06-18"),
9: TimeRange("1997-01-01", "1998-09-08"),
10: TimeRange("1998-07-10", "2009-12-01"),
11: TimeRange("2006-06-20", "2008-02-15"),
12: TimeRange("2002-12-13", "2007-05-08"),
13: TimeRange("2006-08-01", "2006-08-01"),
14: TimeRange("2009-12-02", "2010-10-04"),
15: TimeRange("2010-09-01", parse_time("now")),
}
results = []
for sat_num in goes_operational:
if date in goes_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(
"No operational GOES satellites on {}".format(
date.strftime(TIME_FORMAT)
)
)
def _get_time_for_url(self, urls):
times = []
for uri in urls:
uripath = urlsplit(uri).path
# Extract the yymmdd or yyyymmdd timestamp
datestamp = os.path.splitext(os.path.split(uripath)[1])[0][4:]
# 1999-01-15 as an integer.
if int(datestamp) <= 990115:
start = Time.strptime(datestamp, "%y%m%d")
else:
start = Time.strptime(datestamp, "%Y%m%d")
almost_day = TimeDelta(1 * u.day - 1 * u.millisecond)
times.append(TimeRange(start, start + almost_day))
return times
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns a URL to the GOES data for the specified date.
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
timerange = TimeRange(timerange.start.strftime('%Y-%m-%d'), timerange.end)
if timerange.end < parse_time("1999/01/15"):
goes_file = "%Y/go{satellitenumber:02d}%y%m%d.fits"
elif timerange.start < parse_time("1999/01/15") and timerange.end >= parse_time("1999/01/15"):
return self._get_overlap_urls(timerange)
else:
goes_file = "%Y/go{satellitenumber}%Y%m%d.fits"
goes_pattern = f"https://umbra.nascom.nasa.gov/goes/fits/{goes_file}"
satellitenumber = kwargs.get("satellitenumber", self._get_goes_sat_num(timerange.start))
goes_files = Scraper(goes_pattern, satellitenumber=satellitenumber)
return goes_files.filelist(timerange)
def _get_overlap_urls(self, timerange):
"""
Return a list of URLs over timerange when the URL path changed format `%Y` to `%y`
on the date 1999/01/15
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
tr_before = TimeRange(timerange.start, parse_time("1999/01/14"))
tr_after = TimeRange(parse_time("1999/01/15"), timerange.end)
urls_before = self._get_url_for_timerange(tr_before)
urls_after = self._get_url_for_timerange(tr_after)
return urls_before + urls_after
def _makeimap(self):
"""
Helper function used to hold information about source.
"""
self.map_["source"] = "nasa"
self.map_["instrument"] = "goes"
self.map_["physobs"] = "irradiance"
self.map_["provider"] = "sdac"
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : list of query objects
Returns
-------
boolean
answer as to whether client can service the query
"""
chkattr = ["Time", "Instrument", "SatelliteNumber"]
chklist = [x.__class__.__name__ in chkattr for x in query]
for x in query:
if x.__class__.__name__ == "Instrument" and x.value.lower() in (
"xrs",
"goes",
):
return all(chklist)
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
adict = {attrs.Instrument: [
("GOES", "The Geostationary Operational Environmental Satellite Program."),
("XRS", "GOES X-ray Flux")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
class SUVIClient(GenericClient):
"""
Provides access to data from the GOES Solar Ultraviolet Imager (SUVI).
SUVI data are provided by NOAA at the following url
https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/
The SUVI instrument was first included on GOES-16. It produces level-1b as
well as level-2 data products. Level-2 data products are a weighted average
of level-1b product files and therefore provide higher imaging dynamic
range than individual images. The exposure time of level 1b images range
from 1 s to 0.005 s. SUVI supports the following wavelengths;
94, 131, 171, 195, 284, 304 angstrom. If no wavelength is specified, images
from all wavelengths are returned.
Note
----
GOES-16 began providing regular level-1b data on 2018-06-01. At the time
of writing, SUVI on GOES-17 is operational but currently does not provide
Level-2 data.
"""
@add_common_docstring(**_variables_for_parse_time_docstring())
def _get_goes_sat_num(self, date):
"""
Determines the best satellite number for a given date.
Parameters
----------
date : {parse_time_types}
The date to determine which satellite is active.
Note
----
At the time this function was written.
GOES-17 is operational but currently does not provide Level 2 data therefore it is never returned.
The GOES-16 start date is based on the availability of regular level 1b data.
"""
# GOES-17 is operational but currently does not provide Level 2 data
# GOES-16 start date is based on the availability of regular level 1b data
suvi_operational = {
16: TimeRange("2018-06-01", parse_time("now")),
}
results = []
for sat_num in suvi_operational:
if date in suvi_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(f"No operational SUVI instrument on {date.strftime(TIME_FORMAT)}")
def _get_time_for_url(self, urls):
|
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns urls to the SUVI data for the specified time range.
Parameters
----------
timerange: `sunpy.time.TimeRange`
Time range for which data is to be downloaded.
level : `str`, optional
The level of the data. Possible values are 1b and 2 (default).
wavelength : `astropy.units.Quantity` or `tuple`, optional
Wavelength band. If not given, all wavelengths are returned.
satellitenumber : `int`, optional
GOES satellite number. Must be >= 16. Default is 16.
"""
base_url = "https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/goes/goes{goes_number}/"
supported_waves = [94, 131, 171, 195, 284, 304]
supported_levels = ("2", "1b")
# these are optional requirements so if not provided assume defaults
# if wavelength is not provided assuming all of them
if "wavelength" in kwargs.keys():
wavelength_input = kwargs.get("wavelength")
if isinstance(wavelength_input, u.Quantity): # not a range
if int(wavelength_input.to_value('Angstrom')) not in supported_waves:
raise ValueError(f"Wavelength {kwargs.get('wavelength')} not supported.")
else:
wavelength = [kwargs.get("wavelength")]
else: # Range was provided
compress_index = [wavelength_input.wavemin <= this_wave <=
wavelength_input.wavemax for this_wave in (supported_waves * u.Angstrom)]
if not any(compress_index):
raise ValueError(
f"Wavelength {wavelength_input} not supported.")
else:
wavelength = list(compress(supported_waves, compress_index)) * u.Angstrom
else: # no wavelength provided return all of them
wavelength = supported_waves * u.Angstrom
# check that the input wavelength can be converted to angstrom
waves = [int(this_wave.to_value('angstrom', equivalencies=u.spectral()))
for this_wave in wavelength]
# use the given satellite number or choose the best one
satellitenumber = int(kwargs.get(
"satellitenumber", self._get_goes_sat_num(timerange.start)))
if satellitenumber < 16:
raise ValueError(f"Satellite number {satellitenumber} not supported.")
# default to the highest level of data
level = str(kwargs.get("level", "2")) # make string in case the input is a number
if level not in supported_levels:
raise ValueError(f"Level {level} is not supported.")
results = []
for this_wave in waves:
if level == "2":
search_pattern = base_url + \
r'l{level}/data/suvi-l{level}-ci{wave:03}/%Y/%m/%d/dr_suvi-l{level}-ci{wave:03}_g{goes_number}_s%Y%m%dT%H%M%SZ_.*\.fits'
elif level == "1b":
if this_wave in [131, 171, 195, 284]:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
elif this_wave == 304:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-he{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-He{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
elif this_wave == 94:
search_pattern = base_url + \
r'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
if search_pattern.count('wave_minus1'):
scraper = Scraper(search_pattern, level=level, wave=this_wave,
goes_number=satellitenumber, wave_minus1=this_wave-1)
else:
scraper = Scraper(search_pattern, level=level, wave=this_wave,
goes_number=satellitenumber)
results.extend(scraper.filelist(timerange))
return results
def _makeimap(self):
"""
Helper Function used to hold information about source.
"""
self.map_['source'] = 'GOES'
self.map_['provider'] = 'NOAA'
self.map_['instrument'] = 'SUVI'
self.map_['physobs'] = 'flux'
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : `tuple`
All specified query objects.
Returns
-------
`bool`
answer as to whether client can service the query.
"""
# Import here to prevent circular imports
from sunpy.net import attrs as a
required = {a.Time, a.Instrument}
optional = {a.Wavelength, a.Level, a.goes.SatelliteNumber}
all_attrs = {type(x) for x in query}
ops = all_attrs - required
# check to ensure that all optional requirements are in approved list
if ops and not all(elem in optional for elem in ops):
return False
# if we get this far we have either Instrument and Time
# or Instrument, Time and Wavelength
check_var_count = 0
for x in query:
if isinstance(x, a.Instrument) and x.value.lower() == 'suvi':
check_var_count += 1
if check_var_count == 1:
return True
else:
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [16, 17]
adict = {attrs.Instrument: [
("SUVI", "The Geostationary Operational Environmental Satellite Program.")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
| these_timeranges = []
for this_url in urls:
if this_url.count('/l2/') > 0: # this is a level 2 data file
start_time = parse_time(os.path.basename(this_url).split('_s')[2].split('Z')[0])
end_time = parse_time(os.path.basename(this_url).split('_e')[1].split('Z')[0])
these_timeranges.append(TimeRange(start_time, end_time))
if this_url.count('/l1b/') > 0: # this is a level 1b data file
start_time = datetime.strptime(os.path.basename(this_url).split('_s')[
1].split('_e')[0][:-1], '%Y%j%H%M%S')
end_time = datetime.strptime(os.path.basename(this_url).split('_e')[
1].split('_c')[0][:-1], '%Y%j%H%M%S')
these_timeranges.append(TimeRange(start_time, end_time))
return these_timeranges | identifier_body |
writeToStore.ts | import { SelectionSetNode, FieldNode, DocumentNode } from 'graphql';
import { invariant, InvariantError } from 'ts-invariant';
import { equal } from '@wry/equality';
import {
createFragmentMap,
FragmentMap,
getFragmentFromSelection,
getDefaultValues,
getFragmentDefinitions,
getOperationDefinition,
getTypenameFromResult,
makeReference,
isField,
resultKeyNameFromField,
StoreValue,
StoreObject,
Reference,
isReference,
shouldInclude,
hasDirectives,
cloneDeep,
} from '../../utilities';
import { NormalizedCache, ReadMergeModifyContext, MergeTree } from './types';
import { makeProcessedFieldsMerger, fieldNameFromStoreName, storeValueIsStoreObject } from './helpers';
import { StoreReader } from './readFromStore';
import { InMemoryCache } from './inMemoryCache';
import { EntityStore } from './entityStore';
export interface WriteContext extends ReadMergeModifyContext {
readonly written: {
[dataId: string]: SelectionSetNode[];
};
readonly fragmentMap?: FragmentMap;
// General-purpose deep-merge function for use during writes.
merge<T>(existing: T, incoming: T): T;
};
interface ProcessSelectionSetOptions {
dataId?: string,
result: Record<string, any>;
selectionSet: SelectionSetNode;
context: WriteContext;
mergeTree: MergeTree;
}
export interface WriteToStoreOptions {
query: DocumentNode;
result: Object;
dataId?: string;
store: NormalizedCache;
variables?: Object;
}
export class StoreWriter {
constructor(
public readonly cache: InMemoryCache,
private reader?: StoreReader,
) {}
/**
* Writes the result of a query to the store.
*
* @param result The result object returned for the query document.
*
* @param query The query document whose result we are writing to the store.
*
* @param store The {@link NormalizedCache} used by Apollo for the `data` portion of the store.
*
* @param variables A map from the name of a variable to its value. These variables can be
* referenced by the query document.
*
* @return A `Reference` to the written object.
*/
public writeToStore({
query,
result,
dataId,
store,
variables,
}: WriteToStoreOptions): Reference | undefined {
const operationDefinition = getOperationDefinition(query)!;
const merger = makeProcessedFieldsMerger();
variables = {
...getDefaultValues(operationDefinition),
...variables!,
};
const ref = this.processSelectionSet({
result: result || Object.create(null),
dataId,
selectionSet: operationDefinition.selectionSet,
mergeTree: { map: new Map },
context: {
store,
written: Object.create(null),
merge<T>(existing: T, incoming: T) {
return merger.merge(existing, incoming) as T;
},
variables,
varString: JSON.stringify(variables),
fragmentMap: createFragmentMap(getFragmentDefinitions(query)),
},
});
if (!isReference(ref)) {
throw new InvariantError(`Could not identify object ${JSON.stringify(result)}`);
}
// Any IDs written explicitly to the cache will be retained as
// reachable root IDs for garbage collection purposes. Although this
// logic includes root IDs like ROOT_QUERY and ROOT_MUTATION, their
// retainment counts are effectively ignored because cache.gc() always
// includes them in its root ID set.
store.retain(ref.__ref);
return ref;
}
private processSelectionSet({
dataId,
result,
selectionSet,
context,
// This object allows processSelectionSet to report useful information
// to its callers without explicitly returning that information.
mergeTree,
}: ProcessSelectionSetOptions): StoreObject | Reference {
const { policies } = this.cache;
// Identify the result object, even if dataId was already provided,
// since we always need keyObject below.
const [id, keyObject] = policies.identify(
result, selectionSet, context.fragmentMap);
// If dataId was not provided, fall back to the id just generated by
// policies.identify.
dataId = dataId || id;
if ("string" === typeof dataId) {
// Avoid processing the same entity object using the same selection
// set more than once. We use an array instead of a Set since most
// entity IDs will be written using only one selection set, so the
// size of this array is likely to be very small, meaning indexOf is
// likely to be faster than Set.prototype.has.
const sets = context.written[dataId] || (context.written[dataId] = []);
const ref = makeReference(dataId);
if (sets.indexOf(selectionSet) >= 0) return ref;
sets.push(selectionSet);
// If we're about to write a result object into the store, but we
// happen to know that the exact same (===) result object would be
// returned if we were to reread the result with the same inputs,
// then we can skip the rest of the processSelectionSet work for
// this object, and immediately return a Reference to it.
if (this.reader && this.reader.isFresh(
result,
ref,
selectionSet,
context,
)) {
return ref;
}
}
// This variable will be repeatedly updated using context.merge to
// accumulate all fields that need to be written into the store.
let incomingFields: StoreObject = Object.create(null);
// Write any key fields that were used during identification, even if
// they were not mentioned in the original query.
if (keyObject) {
incomingFields = context.merge(incomingFields, keyObject);
}
// If typename was not passed in, infer it. Note that typename is
// always passed in for tricky-to-infer cases such as "Query" for
// ROOT_QUERY.
const typename: string | undefined =
(dataId && policies.rootTypenamesById[dataId]) ||
getTypenameFromResult(result, selectionSet, context.fragmentMap) ||
(dataId && context.store.get(dataId, "__typename") as string);
if ("string" === typeof typename) {
incomingFields.__typename = typename;
}
const workSet = new Set(selectionSet.selections);
workSet.forEach(selection => {
if (!shouldInclude(selection, context.variables)) return;
if (isField(selection)) {
const resultFieldKey = resultKeyNameFromField(selection);
const value = result[resultFieldKey];
if (typeof value !== 'undefined') {
const storeFieldName = policies.getStoreFieldName({
typename,
fieldName: selection.name.value,
field: selection,
variables: context.variables,
});
const childTree = getChildMergeTree(mergeTree, storeFieldName);
let incomingValue =
this.processFieldValue(value, selection, context, childTree);
const childTypename = selection.selectionSet
&& context.store.getFieldValue<string>(incomingValue as StoreObject, "__typename")
|| void 0;
const merge = policies.getMergeFunction(
typename,
selection.name.value,
childTypename,
);
if (merge) {
childTree.info = {
// TODO Check compatibility against any existing
// childTree.field?
field: selection,
typename,
merge,
};
} else {
maybeRecycleChildMergeTree(mergeTree, storeFieldName);
}
incomingFields = context.merge(incomingFields, {
[storeFieldName]: incomingValue,
});
} else if (
policies.usingPossibleTypes &&
!hasDirectives(["defer", "client"], selection)
) {
throw new InvariantError(
`Missing field '${resultFieldKey}' in ${JSON.stringify(
result,
null,
2,
).substring(0, 100)}`,
);
}
} else {
// This is not a field, so it must be a fragment, either inline or named
const fragment = getFragmentFromSelection(
selection,
context.fragmentMap,
);
if (fragment &&
// By passing result and context.variables, we enable
// policies.fragmentMatches to bend the rules when typename is
// not a known subtype of the fragment type condition, but the
// result object contains all the keys requested by the
// fragment, which strongly suggests the fragment probably
// matched. This fuzzy matching behavior must be enabled by
// including a regular expression string (such as ".*" or
// "Prefix.*" or ".*Suffix") in the possibleTypes array for
// specific supertypes; otherwise, all matching remains exact.
// Fuzzy matches are remembered by the Policies object and
// later used when reading from the cache. Since there is no
// incoming result object to check when reading, reading does
// not involve the same fuzzy inference, so the StoreReader
// class calls policies.fragmentMatches without passing result
// or context.variables. The flexibility of fuzzy matching
// allows existing clients to accommodate previously unknown
// __typename strings produced by server/schema changes, which
// would otherwise be breaking changes.
policies.fragmentMatches(fragment, typename, result, context.variables)) {
fragment.selectionSet.selections.forEach(workSet.add, workSet);
}
}
});
if ("string" === typeof dataId) {
const entityRef = makeReference(dataId);
if (mergeTree.map.size) {
incomingFields = this.applyMerges(mergeTree, entityRef, incomingFields, context);
}
if (process.env.NODE_ENV !== "production") {
const hasSelectionSet = (storeFieldName: string) => | fieldsWithSelectionSets.has(fieldNameFromStoreName(storeFieldName));
const fieldsWithSelectionSets = new Set<string>();
workSet.forEach(selection => {
if (isField(selection) && selection.selectionSet) {
fieldsWithSelectionSets.add(selection.name.value);
}
});
const hasMergeFunction = (storeFieldName: string) => {
const childTree = mergeTree.map.get(storeFieldName);
return Boolean(childTree && childTree.info && childTree.info.merge);
};
Object.keys(incomingFields).forEach(storeFieldName => {
// If a merge function was defined for this field, trust that it
// did the right thing about (not) clobbering data. If the field
// has no selection set, it's a scalar field, so it doesn't need
// a merge function (even if it's an object, like JSON data).
if (hasSelectionSet(storeFieldName) &&
!hasMergeFunction(storeFieldName)) {
warnAboutDataLoss(
entityRef,
incomingFields,
storeFieldName,
context.store,
);
}
});
}
context.store.merge(dataId, incomingFields);
return entityRef;
}
return incomingFields;
}
private processFieldValue(
value: any,
field: FieldNode,
context: WriteContext,
mergeTree: MergeTree,
): StoreValue {
if (!field.selectionSet || value === null) {
// In development, we need to clone scalar values so that they can be
// safely frozen with maybeDeepFreeze in readFromStore.ts. In production,
// it's cheaper to store the scalar values directly in the cache.
return process.env.NODE_ENV === 'production' ? value : cloneDeep(value);
}
if (Array.isArray(value)) {
return value.map((item, i) => {
const value = this.processFieldValue(
item, field, context, getChildMergeTree(mergeTree, i));
maybeRecycleChildMergeTree(mergeTree, i);
return value;
});
}
return this.processSelectionSet({
result: value,
selectionSet: field.selectionSet,
context,
mergeTree,
});
}
private applyMerges<T extends StoreValue>(
mergeTree: MergeTree,
existing: StoreValue,
incoming: T,
context: ReadMergeModifyContext,
getStorageArgs?: Parameters<EntityStore["getStorage"]>,
): T {
if (mergeTree.map.size && !isReference(incoming)) {
const e: StoreObject | Reference | undefined = (
// Items in the same position in different arrays are not
// necessarily related to each other, so when incoming is an array
// we process its elements as if there was no existing data.
!Array.isArray(incoming) &&
// Likewise, existing must be either a Reference or a StoreObject
// in order for its fields to be safe to merge with the fields of
// the incoming object.
(isReference(existing) || storeValueIsStoreObject(existing))
) ? existing : void 0;
// This narrowing is implied by mergeTree.map.size > 0 and
// !isReference(incoming), though TypeScript understandably cannot
// hope to infer this type.
const i = incoming as StoreObject | StoreValue[];
// The options.storage objects provided to read and merge functions
// are derived from the identity of the parent object plus a
// sequence of storeFieldName strings/numbers identifying the nested
// field name path of each field value to be merged.
if (e && !getStorageArgs) {
getStorageArgs = [isReference(e) ? e.__ref : e];
}
// It's possible that applying merge functions to this subtree will
// not change the incoming data, so this variable tracks the fields
// that did change, so we can create a new incoming object when (and
// only when) at least one incoming field has changed. We use a Map
// to preserve the type of numeric keys.
let changedFields: Map<string | number, StoreValue> | undefined;
const getValue = (
from: typeof e | typeof i,
name: string | number,
): StoreValue => {
return Array.isArray(from)
? (typeof name === "number" ? from[name] : void 0)
: context.store.getFieldValue(from, String(name))
};
mergeTree.map.forEach((childTree, storeFieldName) => {
if (getStorageArgs) {
getStorageArgs.push(storeFieldName);
}
const eVal = getValue(e, storeFieldName);
const iVal = getValue(i, storeFieldName);
const aVal = this.applyMerges(
childTree,
eVal,
iVal,
context,
getStorageArgs,
);
if (aVal !== iVal) {
changedFields = changedFields || new Map;
changedFields.set(storeFieldName, aVal);
}
if (getStorageArgs) {
invariant(getStorageArgs.pop() === storeFieldName);
}
});
if (changedFields) {
// Shallow clone i so we can add changed fields to it.
incoming = (Array.isArray(i) ? i.slice(0) : { ...i }) as T;
changedFields.forEach((value, name) => {
(incoming as any)[name] = value;
});
}
}
if (mergeTree.info) {
return this.cache.policies.runMergeFunction(
existing,
incoming,
mergeTree.info,
context,
getStorageArgs && context.store.getStorage(...getStorageArgs),
);
}
return incoming;
}
}
const emptyMergeTreePool: MergeTree[] = [];
function getChildMergeTree(
{ map }: MergeTree,
name: string | number,
): MergeTree {
if (!map.has(name)) {
map.set(name, emptyMergeTreePool.pop() || { map: new Map });
}
return map.get(name)!;
}
function maybeRecycleChildMergeTree(
{ map }: MergeTree,
name: string | number,
) {
const childTree = map.get(name);
if (childTree &&
!childTree.info &&
!childTree.map.size) {
emptyMergeTreePool.push(childTree);
map.delete(name);
}
}
const warnings = new Set<string>();
// Note that this function is unused in production, and thus should be
// pruned by any well-configured minifier.
function warnAboutDataLoss(
existingRef: Reference,
incomingObj: StoreObject,
storeFieldName: string,
store: NormalizedCache,
) {
const getChild = (objOrRef: StoreObject | Reference): StoreObject | false => {
const child = store.getFieldValue<StoreObject>(objOrRef, storeFieldName);
return typeof child === "object" && child;
};
const existing = getChild(existingRef);
if (!existing) return;
const incoming = getChild(incomingObj);
if (!incoming) return;
// It's always safe to replace a reference, since it refers to data
// safely stored elsewhere.
if (isReference(existing)) return;
// If the values are structurally equivalent, we do not need to worry
// about incoming replacing existing.
if (equal(existing, incoming)) return;
// If we're replacing every key of the existing object, then the
// existing data would be overwritten even if the objects were
// normalized, so warning would not be helpful here.
if (Object.keys(existing).every(
key => store.getFieldValue(incoming, key) !== void 0)) {
return;
}
const parentType =
store.getFieldValue<string>(existingRef, "__typename") ||
store.getFieldValue<string>(incomingObj, "__typename");
const fieldName = fieldNameFromStoreName(storeFieldName);
const typeDotName = `${parentType}.${fieldName}`;
// Avoid warning more than once for the same type and field name.
if (warnings.has(typeDotName)) return;
warnings.add(typeDotName);
const childTypenames: string[] = [];
// Arrays do not have __typename fields, and always need a custom merge
// function, even if their elements are normalized entities.
if (!Array.isArray(existing) &&
!Array.isArray(incoming)) {
[existing, incoming].forEach(child => {
const typename = store.getFieldValue(child, "__typename");
if (typeof typename === "string" &&
!childTypenames.includes(typename)) {
childTypenames.push(typename);
}
});
}
invariant.warn(
`Cache data may be lost when replacing the ${fieldName} field of a ${parentType} object.
To address this problem (which is not a bug in Apollo Client), ${
childTypenames.length
? "either ensure all objects of type " +
childTypenames.join(" and ") + " have an ID or a custom merge function, or "
: ""
}define a custom merge function for the ${
typeDotName
} field, so InMemoryCache can safely merge these objects:
existing: ${JSON.stringify(existing).slice(0, 1000)}
incoming: ${JSON.stringify(incoming).slice(0, 1000)}
For more information about these options, please refer to the documentation:
* Ensuring entity objects have IDs: https://go.apollo.dev/c/generating-unique-identifiers
* Defining custom merge functions: https://go.apollo.dev/c/merging-non-normalized-objects
`);
} | random_line_split | |
writeToStore.ts | import { SelectionSetNode, FieldNode, DocumentNode } from 'graphql';
import { invariant, InvariantError } from 'ts-invariant';
import { equal } from '@wry/equality';
import {
createFragmentMap,
FragmentMap,
getFragmentFromSelection,
getDefaultValues,
getFragmentDefinitions,
getOperationDefinition,
getTypenameFromResult,
makeReference,
isField,
resultKeyNameFromField,
StoreValue,
StoreObject,
Reference,
isReference,
shouldInclude,
hasDirectives,
cloneDeep,
} from '../../utilities';
import { NormalizedCache, ReadMergeModifyContext, MergeTree } from './types';
import { makeProcessedFieldsMerger, fieldNameFromStoreName, storeValueIsStoreObject } from './helpers';
import { StoreReader } from './readFromStore';
import { InMemoryCache } from './inMemoryCache';
import { EntityStore } from './entityStore';
export interface WriteContext extends ReadMergeModifyContext {
readonly written: {
[dataId: string]: SelectionSetNode[];
};
readonly fragmentMap?: FragmentMap;
// General-purpose deep-merge function for use during writes.
merge<T>(existing: T, incoming: T): T;
};
interface ProcessSelectionSetOptions {
dataId?: string,
result: Record<string, any>;
selectionSet: SelectionSetNode;
context: WriteContext;
mergeTree: MergeTree;
}
export interface WriteToStoreOptions {
query: DocumentNode;
result: Object;
dataId?: string;
store: NormalizedCache;
variables?: Object;
}
export class StoreWriter {
constructor(
public readonly cache: InMemoryCache,
private reader?: StoreReader,
) {}
/**
* Writes the result of a query to the store.
*
* @param result The result object returned for the query document.
*
* @param query The query document whose result we are writing to the store.
*
* @param store The {@link NormalizedCache} used by Apollo for the `data` portion of the store.
*
* @param variables A map from the name of a variable to its value. These variables can be
* referenced by the query document.
*
* @return A `Reference` to the written object.
*/
public writeToStore({
query,
result,
dataId,
store,
variables,
}: WriteToStoreOptions): Reference | undefined {
const operationDefinition = getOperationDefinition(query)!;
const merger = makeProcessedFieldsMerger();
variables = {
...getDefaultValues(operationDefinition),
...variables!,
};
const ref = this.processSelectionSet({
result: result || Object.create(null),
dataId,
selectionSet: operationDefinition.selectionSet,
mergeTree: { map: new Map },
context: {
store,
written: Object.create(null),
merge<T>(existing: T, incoming: T) {
return merger.merge(existing, incoming) as T;
},
variables,
varString: JSON.stringify(variables),
fragmentMap: createFragmentMap(getFragmentDefinitions(query)),
},
});
if (!isReference(ref)) {
throw new InvariantError(`Could not identify object ${JSON.stringify(result)}`);
}
// Any IDs written explicitly to the cache will be retained as
// reachable root IDs for garbage collection purposes. Although this
// logic includes root IDs like ROOT_QUERY and ROOT_MUTATION, their
// retainment counts are effectively ignored because cache.gc() always
// includes them in its root ID set.
store.retain(ref.__ref);
return ref;
}
private processSelectionSet({
dataId,
result,
selectionSet,
context,
// This object allows processSelectionSet to report useful information
// to its callers without explicitly returning that information.
mergeTree,
}: ProcessSelectionSetOptions): StoreObject | Reference {
const { policies } = this.cache;
// Identify the result object, even if dataId was already provided,
// since we always need keyObject below.
const [id, keyObject] = policies.identify(
result, selectionSet, context.fragmentMap);
// If dataId was not provided, fall back to the id just generated by
// policies.identify.
dataId = dataId || id;
if ("string" === typeof dataId) {
// Avoid processing the same entity object using the same selection
// set more than once. We use an array instead of a Set since most
// entity IDs will be written using only one selection set, so the
// size of this array is likely to be very small, meaning indexOf is
// likely to be faster than Set.prototype.has.
const sets = context.written[dataId] || (context.written[dataId] = []);
const ref = makeReference(dataId);
if (sets.indexOf(selectionSet) >= 0) return ref;
sets.push(selectionSet);
// If we're about to write a result object into the store, but we
// happen to know that the exact same (===) result object would be
// returned if we were to reread the result with the same inputs,
// then we can skip the rest of the processSelectionSet work for
// this object, and immediately return a Reference to it.
if (this.reader && this.reader.isFresh(
result,
ref,
selectionSet,
context,
)) {
return ref;
}
}
// This variable will be repeatedly updated using context.merge to
// accumulate all fields that need to be written into the store.
let incomingFields: StoreObject = Object.create(null);
// Write any key fields that were used during identification, even if
// they were not mentioned in the original query.
if (keyObject) {
incomingFields = context.merge(incomingFields, keyObject);
}
// If typename was not passed in, infer it. Note that typename is
// always passed in for tricky-to-infer cases such as "Query" for
// ROOT_QUERY.
const typename: string | undefined =
(dataId && policies.rootTypenamesById[dataId]) ||
getTypenameFromResult(result, selectionSet, context.fragmentMap) ||
(dataId && context.store.get(dataId, "__typename") as string);
if ("string" === typeof typename) {
incomingFields.__typename = typename;
}
const workSet = new Set(selectionSet.selections);
workSet.forEach(selection => {
if (!shouldInclude(selection, context.variables)) return;
if (isField(selection)) {
const resultFieldKey = resultKeyNameFromField(selection);
const value = result[resultFieldKey];
if (typeof value !== 'undefined') {
const storeFieldName = policies.getStoreFieldName({
typename,
fieldName: selection.name.value,
field: selection,
variables: context.variables,
});
const childTree = getChildMergeTree(mergeTree, storeFieldName);
let incomingValue =
this.processFieldValue(value, selection, context, childTree);
const childTypename = selection.selectionSet
&& context.store.getFieldValue<string>(incomingValue as StoreObject, "__typename")
|| void 0;
const merge = policies.getMergeFunction(
typename,
selection.name.value,
childTypename,
);
if (merge) {
childTree.info = {
// TODO Check compatibility against any existing
// childTree.field?
field: selection,
typename,
merge,
};
} else {
maybeRecycleChildMergeTree(mergeTree, storeFieldName);
}
incomingFields = context.merge(incomingFields, {
[storeFieldName]: incomingValue,
});
} else if (
policies.usingPossibleTypes &&
!hasDirectives(["defer", "client"], selection)
) {
throw new InvariantError(
`Missing field '${resultFieldKey}' in ${JSON.stringify(
result,
null,
2,
).substring(0, 100)}`,
);
}
} else {
// This is not a field, so it must be a fragment, either inline or named
const fragment = getFragmentFromSelection(
selection,
context.fragmentMap,
);
if (fragment &&
// By passing result and context.variables, we enable
// policies.fragmentMatches to bend the rules when typename is
// not a known subtype of the fragment type condition, but the
// result object contains all the keys requested by the
// fragment, which strongly suggests the fragment probably
// matched. This fuzzy matching behavior must be enabled by
// including a regular expression string (such as ".*" or
// "Prefix.*" or ".*Suffix") in the possibleTypes array for
// specific supertypes; otherwise, all matching remains exact.
// Fuzzy matches are remembered by the Policies object and
// later used when reading from the cache. Since there is no
// incoming result object to check when reading, reading does
// not involve the same fuzzy inference, so the StoreReader
// class calls policies.fragmentMatches without passing result
// or context.variables. The flexibility of fuzzy matching
// allows existing clients to accommodate previously unknown
// __typename strings produced by server/schema changes, which
// would otherwise be breaking changes.
policies.fragmentMatches(fragment, typename, result, context.variables)) {
fragment.selectionSet.selections.forEach(workSet.add, workSet);
}
}
});
if ("string" === typeof dataId) {
const entityRef = makeReference(dataId);
if (mergeTree.map.size) {
incomingFields = this.applyMerges(mergeTree, entityRef, incomingFields, context);
}
if (process.env.NODE_ENV !== "production") {
const hasSelectionSet = (storeFieldName: string) =>
fieldsWithSelectionSets.has(fieldNameFromStoreName(storeFieldName));
const fieldsWithSelectionSets = new Set<string>();
workSet.forEach(selection => {
if (isField(selection) && selection.selectionSet) {
fieldsWithSelectionSets.add(selection.name.value);
}
});
const hasMergeFunction = (storeFieldName: string) => {
const childTree = mergeTree.map.get(storeFieldName);
return Boolean(childTree && childTree.info && childTree.info.merge);
};
Object.keys(incomingFields).forEach(storeFieldName => {
// If a merge function was defined for this field, trust that it
// did the right thing about (not) clobbering data. If the field
// has no selection set, it's a scalar field, so it doesn't need
// a merge function (even if it's an object, like JSON data).
if (hasSelectionSet(storeFieldName) &&
!hasMergeFunction(storeFieldName)) {
warnAboutDataLoss(
entityRef,
incomingFields,
storeFieldName,
context.store,
);
}
});
}
context.store.merge(dataId, incomingFields);
return entityRef;
}
return incomingFields;
}
private processFieldValue(
value: any,
field: FieldNode,
context: WriteContext,
mergeTree: MergeTree,
): StoreValue {
if (!field.selectionSet || value === null) {
// In development, we need to clone scalar values so that they can be
// safely frozen with maybeDeepFreeze in readFromStore.ts. In production,
// it's cheaper to store the scalar values directly in the cache.
return process.env.NODE_ENV === 'production' ? value : cloneDeep(value);
}
if (Array.isArray(value)) {
return value.map((item, i) => {
const value = this.processFieldValue(
item, field, context, getChildMergeTree(mergeTree, i));
maybeRecycleChildMergeTree(mergeTree, i);
return value;
});
}
return this.processSelectionSet({
result: value,
selectionSet: field.selectionSet,
context,
mergeTree,
});
}
private applyMerges<T extends StoreValue>(
mergeTree: MergeTree,
existing: StoreValue,
incoming: T,
context: ReadMergeModifyContext,
getStorageArgs?: Parameters<EntityStore["getStorage"]>,
): T {
if (mergeTree.map.size && !isReference(incoming)) {
const e: StoreObject | Reference | undefined = (
// Items in the same position in different arrays are not
// necessarily related to each other, so when incoming is an array
// we process its elements as if there was no existing data.
!Array.isArray(incoming) &&
// Likewise, existing must be either a Reference or a StoreObject
// in order for its fields to be safe to merge with the fields of
// the incoming object.
(isReference(existing) || storeValueIsStoreObject(existing))
) ? existing : void 0;
// This narrowing is implied by mergeTree.map.size > 0 and
// !isReference(incoming), though TypeScript understandably cannot
// hope to infer this type.
const i = incoming as StoreObject | StoreValue[];
// The options.storage objects provided to read and merge functions
// are derived from the identity of the parent object plus a
// sequence of storeFieldName strings/numbers identifying the nested
// field name path of each field value to be merged.
if (e && !getStorageArgs) {
getStorageArgs = [isReference(e) ? e.__ref : e];
}
// It's possible that applying merge functions to this subtree will
// not change the incoming data, so this variable tracks the fields
// that did change, so we can create a new incoming object when (and
// only when) at least one incoming field has changed. We use a Map
// to preserve the type of numeric keys.
let changedFields: Map<string | number, StoreValue> | undefined;
const getValue = (
from: typeof e | typeof i,
name: string | number,
): StoreValue => {
return Array.isArray(from)
? (typeof name === "number" ? from[name] : void 0)
: context.store.getFieldValue(from, String(name))
};
mergeTree.map.forEach((childTree, storeFieldName) => {
if (getStorageArgs) {
getStorageArgs.push(storeFieldName);
}
const eVal = getValue(e, storeFieldName);
const iVal = getValue(i, storeFieldName);
const aVal = this.applyMerges(
childTree,
eVal,
iVal,
context,
getStorageArgs,
);
if (aVal !== iVal) {
changedFields = changedFields || new Map;
changedFields.set(storeFieldName, aVal);
}
if (getStorageArgs) {
invariant(getStorageArgs.pop() === storeFieldName);
}
});
if (changedFields) {
// Shallow clone i so we can add changed fields to it.
incoming = (Array.isArray(i) ? i.slice(0) : { ...i }) as T;
changedFields.forEach((value, name) => {
(incoming as any)[name] = value;
});
}
}
if (mergeTree.info) {
return this.cache.policies.runMergeFunction(
existing,
incoming,
mergeTree.info,
context,
getStorageArgs && context.store.getStorage(...getStorageArgs),
);
}
return incoming;
}
}
const emptyMergeTreePool: MergeTree[] = [];
function getChildMergeTree(
{ map }: MergeTree,
name: string | number,
): MergeTree {
if (!map.has(name)) {
map.set(name, emptyMergeTreePool.pop() || { map: new Map });
}
return map.get(name)!;
}
function maybeRecycleChildMergeTree(
{ map }: MergeTree,
name: string | number,
) {
const childTree = map.get(name);
if (childTree &&
!childTree.info &&
!childTree.map.size) {
emptyMergeTreePool.push(childTree);
map.delete(name);
}
}
const warnings = new Set<string>();
// Note that this function is unused in production, and thus should be
// pruned by any well-configured minifier.
function warnAboutDataLoss(
existingRef: Reference,
incomingObj: StoreObject,
storeFieldName: string,
store: NormalizedCache,
) | {
const getChild = (objOrRef: StoreObject | Reference): StoreObject | false => {
const child = store.getFieldValue<StoreObject>(objOrRef, storeFieldName);
return typeof child === "object" && child;
};
const existing = getChild(existingRef);
if (!existing) return;
const incoming = getChild(incomingObj);
if (!incoming) return;
// It's always safe to replace a reference, since it refers to data
// safely stored elsewhere.
if (isReference(existing)) return;
// If the values are structurally equivalent, we do not need to worry
// about incoming replacing existing.
if (equal(existing, incoming)) return;
// If we're replacing every key of the existing object, then the
// existing data would be overwritten even if the objects were
// normalized, so warning would not be helpful here.
if (Object.keys(existing).every(
key => store.getFieldValue(incoming, key) !== void 0)) {
return;
}
const parentType =
store.getFieldValue<string>(existingRef, "__typename") ||
store.getFieldValue<string>(incomingObj, "__typename");
const fieldName = fieldNameFromStoreName(storeFieldName);
const typeDotName = `${parentType}.${fieldName}`;
// Avoid warning more than once for the same type and field name.
if (warnings.has(typeDotName)) return;
warnings.add(typeDotName);
const childTypenames: string[] = [];
// Arrays do not have __typename fields, and always need a custom merge
// function, even if their elements are normalized entities.
if (!Array.isArray(existing) &&
!Array.isArray(incoming)) {
[existing, incoming].forEach(child => {
const typename = store.getFieldValue(child, "__typename");
if (typeof typename === "string" &&
!childTypenames.includes(typename)) {
childTypenames.push(typename);
}
});
}
invariant.warn(
`Cache data may be lost when replacing the ${fieldName} field of a ${parentType} object.
To address this problem (which is not a bug in Apollo Client), ${
childTypenames.length
? "either ensure all objects of type " +
childTypenames.join(" and ") + " have an ID or a custom merge function, or "
: ""
}define a custom merge function for the ${
typeDotName
} field, so InMemoryCache can safely merge these objects:
existing: ${JSON.stringify(existing).slice(0, 1000)}
incoming: ${JSON.stringify(incoming).slice(0, 1000)}
For more information about these options, please refer to the documentation:
* Ensuring entity objects have IDs: https://go.apollo.dev/c/generating-unique-identifiers
* Defining custom merge functions: https://go.apollo.dev/c/merging-non-normalized-objects
`);
} | identifier_body | |
writeToStore.ts | import { SelectionSetNode, FieldNode, DocumentNode } from 'graphql';
import { invariant, InvariantError } from 'ts-invariant';
import { equal } from '@wry/equality';
import {
createFragmentMap,
FragmentMap,
getFragmentFromSelection,
getDefaultValues,
getFragmentDefinitions,
getOperationDefinition,
getTypenameFromResult,
makeReference,
isField,
resultKeyNameFromField,
StoreValue,
StoreObject,
Reference,
isReference,
shouldInclude,
hasDirectives,
cloneDeep,
} from '../../utilities';
import { NormalizedCache, ReadMergeModifyContext, MergeTree } from './types';
import { makeProcessedFieldsMerger, fieldNameFromStoreName, storeValueIsStoreObject } from './helpers';
import { StoreReader } from './readFromStore';
import { InMemoryCache } from './inMemoryCache';
import { EntityStore } from './entityStore';
export interface WriteContext extends ReadMergeModifyContext {
readonly written: {
[dataId: string]: SelectionSetNode[];
};
readonly fragmentMap?: FragmentMap;
// General-purpose deep-merge function for use during writes.
merge<T>(existing: T, incoming: T): T;
};
interface ProcessSelectionSetOptions {
dataId?: string,
result: Record<string, any>;
selectionSet: SelectionSetNode;
context: WriteContext;
mergeTree: MergeTree;
}
export interface WriteToStoreOptions {
query: DocumentNode;
result: Object;
dataId?: string;
store: NormalizedCache;
variables?: Object;
}
export class StoreWriter {
constructor(
public readonly cache: InMemoryCache,
private reader?: StoreReader,
) {}
/**
* Writes the result of a query to the store.
*
* @param result The result object returned for the query document.
*
* @param query The query document whose result we are writing to the store.
*
* @param store The {@link NormalizedCache} used by Apollo for the `data` portion of the store.
*
* @param variables A map from the name of a variable to its value. These variables can be
* referenced by the query document.
*
* @return A `Reference` to the written object.
*/
public writeToStore({
query,
result,
dataId,
store,
variables,
}: WriteToStoreOptions): Reference | undefined {
const operationDefinition = getOperationDefinition(query)!;
const merger = makeProcessedFieldsMerger();
variables = {
...getDefaultValues(operationDefinition),
...variables!,
};
const ref = this.processSelectionSet({
result: result || Object.create(null),
dataId,
selectionSet: operationDefinition.selectionSet,
mergeTree: { map: new Map },
context: {
store,
written: Object.create(null),
merge<T>(existing: T, incoming: T) {
return merger.merge(existing, incoming) as T;
},
variables,
varString: JSON.stringify(variables),
fragmentMap: createFragmentMap(getFragmentDefinitions(query)),
},
});
if (!isReference(ref)) {
throw new InvariantError(`Could not identify object ${JSON.stringify(result)}`);
}
// Any IDs written explicitly to the cache will be retained as
// reachable root IDs for garbage collection purposes. Although this
// logic includes root IDs like ROOT_QUERY and ROOT_MUTATION, their
// retainment counts are effectively ignored because cache.gc() always
// includes them in its root ID set.
store.retain(ref.__ref);
return ref;
}
private processSelectionSet({
dataId,
result,
selectionSet,
context,
// This object allows processSelectionSet to report useful information
// to its callers without explicitly returning that information.
mergeTree,
}: ProcessSelectionSetOptions): StoreObject | Reference {
const { policies } = this.cache;
// Identify the result object, even if dataId was already provided,
// since we always need keyObject below.
const [id, keyObject] = policies.identify(
result, selectionSet, context.fragmentMap);
// If dataId was not provided, fall back to the id just generated by
// policies.identify.
dataId = dataId || id;
if ("string" === typeof dataId) {
// Avoid processing the same entity object using the same selection
// set more than once. We use an array instead of a Set since most
// entity IDs will be written using only one selection set, so the
// size of this array is likely to be very small, meaning indexOf is
// likely to be faster than Set.prototype.has.
const sets = context.written[dataId] || (context.written[dataId] = []);
const ref = makeReference(dataId);
if (sets.indexOf(selectionSet) >= 0) return ref;
sets.push(selectionSet);
// If we're about to write a result object into the store, but we
// happen to know that the exact same (===) result object would be
// returned if we were to reread the result with the same inputs,
// then we can skip the rest of the processSelectionSet work for
// this object, and immediately return a Reference to it.
if (this.reader && this.reader.isFresh(
result,
ref,
selectionSet,
context,
)) {
return ref;
}
}
// This variable will be repeatedly updated using context.merge to
// accumulate all fields that need to be written into the store.
let incomingFields: StoreObject = Object.create(null);
// Write any key fields that were used during identification, even if
// they were not mentioned in the original query.
if (keyObject) {
incomingFields = context.merge(incomingFields, keyObject);
}
// If typename was not passed in, infer it. Note that typename is
// always passed in for tricky-to-infer cases such as "Query" for
// ROOT_QUERY.
const typename: string | undefined =
(dataId && policies.rootTypenamesById[dataId]) ||
getTypenameFromResult(result, selectionSet, context.fragmentMap) ||
(dataId && context.store.get(dataId, "__typename") as string);
if ("string" === typeof typename) {
incomingFields.__typename = typename;
}
const workSet = new Set(selectionSet.selections);
workSet.forEach(selection => {
if (!shouldInclude(selection, context.variables)) return;
if (isField(selection)) {
const resultFieldKey = resultKeyNameFromField(selection);
const value = result[resultFieldKey];
if (typeof value !== 'undefined') {
const storeFieldName = policies.getStoreFieldName({
typename,
fieldName: selection.name.value,
field: selection,
variables: context.variables,
});
const childTree = getChildMergeTree(mergeTree, storeFieldName);
let incomingValue =
this.processFieldValue(value, selection, context, childTree);
const childTypename = selection.selectionSet
&& context.store.getFieldValue<string>(incomingValue as StoreObject, "__typename")
|| void 0;
const merge = policies.getMergeFunction(
typename,
selection.name.value,
childTypename,
);
if (merge) {
childTree.info = {
// TODO Check compatibility against any existing
// childTree.field?
field: selection,
typename,
merge,
};
} else {
maybeRecycleChildMergeTree(mergeTree, storeFieldName);
}
incomingFields = context.merge(incomingFields, {
[storeFieldName]: incomingValue,
});
} else if (
policies.usingPossibleTypes &&
!hasDirectives(["defer", "client"], selection)
) {
throw new InvariantError(
`Missing field '${resultFieldKey}' in ${JSON.stringify(
result,
null,
2,
).substring(0, 100)}`,
);
}
} else {
// This is not a field, so it must be a fragment, either inline or named
const fragment = getFragmentFromSelection(
selection,
context.fragmentMap,
);
if (fragment &&
// By passing result and context.variables, we enable
// policies.fragmentMatches to bend the rules when typename is
// not a known subtype of the fragment type condition, but the
// result object contains all the keys requested by the
// fragment, which strongly suggests the fragment probably
// matched. This fuzzy matching behavior must be enabled by
// including a regular expression string (such as ".*" or
// "Prefix.*" or ".*Suffix") in the possibleTypes array for
// specific supertypes; otherwise, all matching remains exact.
// Fuzzy matches are remembered by the Policies object and
// later used when reading from the cache. Since there is no
// incoming result object to check when reading, reading does
// not involve the same fuzzy inference, so the StoreReader
// class calls policies.fragmentMatches without passing result
// or context.variables. The flexibility of fuzzy matching
// allows existing clients to accommodate previously unknown
// __typename strings produced by server/schema changes, which
// would otherwise be breaking changes.
policies.fragmentMatches(fragment, typename, result, context.variables)) {
fragment.selectionSet.selections.forEach(workSet.add, workSet);
}
}
});
if ("string" === typeof dataId) {
const entityRef = makeReference(dataId);
if (mergeTree.map.size) {
incomingFields = this.applyMerges(mergeTree, entityRef, incomingFields, context);
}
if (process.env.NODE_ENV !== "production") {
const hasSelectionSet = (storeFieldName: string) =>
fieldsWithSelectionSets.has(fieldNameFromStoreName(storeFieldName));
const fieldsWithSelectionSets = new Set<string>();
workSet.forEach(selection => {
if (isField(selection) && selection.selectionSet) {
fieldsWithSelectionSets.add(selection.name.value);
}
});
const hasMergeFunction = (storeFieldName: string) => {
const childTree = mergeTree.map.get(storeFieldName);
return Boolean(childTree && childTree.info && childTree.info.merge);
};
Object.keys(incomingFields).forEach(storeFieldName => {
// If a merge function was defined for this field, trust that it
// did the right thing about (not) clobbering data. If the field
// has no selection set, it's a scalar field, so it doesn't need
// a merge function (even if it's an object, like JSON data).
if (hasSelectionSet(storeFieldName) &&
!hasMergeFunction(storeFieldName)) {
warnAboutDataLoss(
entityRef,
incomingFields,
storeFieldName,
context.store,
);
}
});
}
context.store.merge(dataId, incomingFields);
return entityRef;
}
return incomingFields;
}
private processFieldValue(
value: any,
field: FieldNode,
context: WriteContext,
mergeTree: MergeTree,
): StoreValue {
if (!field.selectionSet || value === null) {
// In development, we need to clone scalar values so that they can be
// safely frozen with maybeDeepFreeze in readFromStore.ts. In production,
// it's cheaper to store the scalar values directly in the cache.
return process.env.NODE_ENV === 'production' ? value : cloneDeep(value);
}
if (Array.isArray(value)) {
return value.map((item, i) => {
const value = this.processFieldValue(
item, field, context, getChildMergeTree(mergeTree, i));
maybeRecycleChildMergeTree(mergeTree, i);
return value;
});
}
return this.processSelectionSet({
result: value,
selectionSet: field.selectionSet,
context,
mergeTree,
});
}
private applyMerges<T extends StoreValue>(
mergeTree: MergeTree,
existing: StoreValue,
incoming: T,
context: ReadMergeModifyContext,
getStorageArgs?: Parameters<EntityStore["getStorage"]>,
): T {
if (mergeTree.map.size && !isReference(incoming)) {
const e: StoreObject | Reference | undefined = (
// Items in the same position in different arrays are not
// necessarily related to each other, so when incoming is an array
// we process its elements as if there was no existing data.
!Array.isArray(incoming) &&
// Likewise, existing must be either a Reference or a StoreObject
// in order for its fields to be safe to merge with the fields of
// the incoming object.
(isReference(existing) || storeValueIsStoreObject(existing))
) ? existing : void 0;
// This narrowing is implied by mergeTree.map.size > 0 and
// !isReference(incoming), though TypeScript understandably cannot
// hope to infer this type.
const i = incoming as StoreObject | StoreValue[];
// The options.storage objects provided to read and merge functions
// are derived from the identity of the parent object plus a
// sequence of storeFieldName strings/numbers identifying the nested
// field name path of each field value to be merged.
if (e && !getStorageArgs) {
getStorageArgs = [isReference(e) ? e.__ref : e];
}
// It's possible that applying merge functions to this subtree will
// not change the incoming data, so this variable tracks the fields
// that did change, so we can create a new incoming object when (and
// only when) at least one incoming field has changed. We use a Map
// to preserve the type of numeric keys.
let changedFields: Map<string | number, StoreValue> | undefined;
const getValue = (
from: typeof e | typeof i,
name: string | number,
): StoreValue => {
return Array.isArray(from)
? (typeof name === "number" ? from[name] : void 0)
: context.store.getFieldValue(from, String(name))
};
mergeTree.map.forEach((childTree, storeFieldName) => {
if (getStorageArgs) {
getStorageArgs.push(storeFieldName);
}
const eVal = getValue(e, storeFieldName);
const iVal = getValue(i, storeFieldName);
const aVal = this.applyMerges(
childTree,
eVal,
iVal,
context,
getStorageArgs,
);
if (aVal !== iVal) {
changedFields = changedFields || new Map;
changedFields.set(storeFieldName, aVal);
}
if (getStorageArgs) {
invariant(getStorageArgs.pop() === storeFieldName);
}
});
if (changedFields) {
// Shallow clone i so we can add changed fields to it.
incoming = (Array.isArray(i) ? i.slice(0) : { ...i }) as T;
changedFields.forEach((value, name) => {
(incoming as any)[name] = value;
});
}
}
if (mergeTree.info) {
return this.cache.policies.runMergeFunction(
existing,
incoming,
mergeTree.info,
context,
getStorageArgs && context.store.getStorage(...getStorageArgs),
);
}
return incoming;
}
}
const emptyMergeTreePool: MergeTree[] = [];
function getChildMergeTree(
{ map }: MergeTree,
name: string | number,
): MergeTree {
if (!map.has(name)) {
map.set(name, emptyMergeTreePool.pop() || { map: new Map });
}
return map.get(name)!;
}
function maybeRecycleChildMergeTree(
{ map }: MergeTree,
name: string | number,
) {
const childTree = map.get(name);
if (childTree &&
!childTree.info &&
!childTree.map.size) {
emptyMergeTreePool.push(childTree);
map.delete(name);
}
}
const warnings = new Set<string>();
// Note that this function is unused in production, and thus should be
// pruned by any well-configured minifier.
function | (
existingRef: Reference,
incomingObj: StoreObject,
storeFieldName: string,
store: NormalizedCache,
) {
const getChild = (objOrRef: StoreObject | Reference): StoreObject | false => {
const child = store.getFieldValue<StoreObject>(objOrRef, storeFieldName);
return typeof child === "object" && child;
};
const existing = getChild(existingRef);
if (!existing) return;
const incoming = getChild(incomingObj);
if (!incoming) return;
// It's always safe to replace a reference, since it refers to data
// safely stored elsewhere.
if (isReference(existing)) return;
// If the values are structurally equivalent, we do not need to worry
// about incoming replacing existing.
if (equal(existing, incoming)) return;
// If we're replacing every key of the existing object, then the
// existing data would be overwritten even if the objects were
// normalized, so warning would not be helpful here.
if (Object.keys(existing).every(
key => store.getFieldValue(incoming, key) !== void 0)) {
return;
}
const parentType =
store.getFieldValue<string>(existingRef, "__typename") ||
store.getFieldValue<string>(incomingObj, "__typename");
const fieldName = fieldNameFromStoreName(storeFieldName);
const typeDotName = `${parentType}.${fieldName}`;
// Avoid warning more than once for the same type and field name.
if (warnings.has(typeDotName)) return;
warnings.add(typeDotName);
const childTypenames: string[] = [];
// Arrays do not have __typename fields, and always need a custom merge
// function, even if their elements are normalized entities.
if (!Array.isArray(existing) &&
!Array.isArray(incoming)) {
[existing, incoming].forEach(child => {
const typename = store.getFieldValue(child, "__typename");
if (typeof typename === "string" &&
!childTypenames.includes(typename)) {
childTypenames.push(typename);
}
});
}
invariant.warn(
`Cache data may be lost when replacing the ${fieldName} field of a ${parentType} object.
To address this problem (which is not a bug in Apollo Client), ${
childTypenames.length
? "either ensure all objects of type " +
childTypenames.join(" and ") + " have an ID or a custom merge function, or "
: ""
}define a custom merge function for the ${
typeDotName
} field, so InMemoryCache can safely merge these objects:
existing: ${JSON.stringify(existing).slice(0, 1000)}
incoming: ${JSON.stringify(incoming).slice(0, 1000)}
For more information about these options, please refer to the documentation:
* Ensuring entity objects have IDs: https://go.apollo.dev/c/generating-unique-identifiers
* Defining custom merge functions: https://go.apollo.dev/c/merging-non-normalized-objects
`);
}
| warnAboutDataLoss | identifier_name |
service.go | package auth
import (
"database/sql"
"encoding/base64"
"errors"
"html/template"
"net/mail"
"strings"
"time"
"github.com/bryanjeal/go-helpers"
"github.com/bryanjeal/go-nonce"
tmpl "github.com/bryanjeal/go-tmpl"
// handle mysql database
_ "github.com/go-sql-driver/mysql"
// handle sqlite3 database
_ "github.com/mattn/go-sqlite3"
"github.com/golang/glog"
"github.com/jmoiron/sqlx"
"github.com/markbates/goth"
"github.com/satori/go.uuid"
"gopkg.in/mailgun/mailgun-go.v1"
)
// Errors
var (
ErrInconsistentIDs = errors.New("inconsistent IDs")
ErrAlreadyExists = errors.New("already exists")
ErrUserNotFound = errors.New("user not found")
ErrInvalidID = errors.New("null id")
ErrInvalidPassword = errors.New("password cannot blank or all spaces")
ErrInvalidName = errors.New("name cannot be blank or all spaces")
ErrIncorrectAuth = errors.New("incorrect email or password")
ErrTodo = errors.New("unimplemented feature or function")
)
// Service is the interface that provides auth methods.
type Service interface {
// NewUserLocal registers a new user by a local account (email and password)
NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error)
// NewUserProvider registers a new user by some oAuth Provider
NewUserProvider(user goth.User, isSuperuser bool) (User, error)
// UserAddProvider associates a new oAuth Provider with the user account
UserAddProvider(id uuid.UUID, user goth.User) (User, error)
// GetUser gets a user account by their ID
GetUser(id uuid.UUID) (User, error)
// UpdateUser update the user's details
UpdateUser(u User) (User, error)
// DeleteUser flag a user as deleted
DeleteUser(id uuid.UUID) (User, error)
// AuthenticateUser logs in a Local User with an email and password
AuthenticateUser(email, password string) (User, error)
// Start the Password Reset process
BeginPasswordReset(email string) error
// Complete the Password Reset process
CompletePasswordReset(token, email, password string) (User, error)
}
// authService satisfies the auth.Service interface
type authService struct {
db *sqlx.DB
mg mailgun.Mailgun
nonce nonce.Service
tpl *tmpl.TplSys
}
// NewService creates an Auth Service that connects to provided DB information
func NewService(db *sqlx.DB, mg mailgun.Mailgun, nonce nonce.Service, tpl *tmpl.TplSys) Service {
s := &authService{
db: db,
mg: mg,
nonce: nonce,
tpl: tpl,
}
// TODO
// Move hardcoded Template Strings to templates.go
template.Must(s.tpl.AddTemplate("auth.baseHTMLEmailTemplate", "", baseHTMLEmailTemplate))
template.Must(s.tpl.AddTemplate("auth.NewUserEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Welcome New User{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Welcome to our service. Thank you for signing up.<br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Forgot your password? No problem! <br/> <br/> To reset your password, click the following link: <br/> <a href="https://www.example.com/auth/password-reset/%recipient.token%">Reset Password</a> <br/> <br/> If you did not request to have your password reset you can safely ignore this email. Rest assured your customer account is safe. <br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetConfirmEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset Complete{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Your account's password was recently changed. <br/> <br/> </p>{{end}}`))
return s
}
func (s *authService) NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", email)
if err == nil {
return User{}, ErrAlreadyExists
} else if err != sql.ErrNoRows {
return User{}, err
}
// get current time
t := time.Now()
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
// TODO:
// Have users activate their account via an email
u := User{
Email: email,
Password: hashedB64,
FirstName: firstName,
LastName: lastName,
IsSuperuser: isSuperuser,
IsActive: true,
IsDeleted: false,
CreatedAt: t,
UpdatedAt: t,
DeletedAt: time.Time{},
AvatarURL: "",
newPassword: true,
rawPassword: password,
}
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(NewUserEmail.From, NewUserEmail.Subject, NewUserEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(NewUserEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
func (s *authService) NewUserProvider(u goth.User, isSuperuser bool) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) UserAddProvider(id uuid.UUID, u goth.User) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) GetUser(id uuid.UUID) (User, error) {
if id == uuid.Nil {
return User{}, ErrInvalidID
}
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE id=$1", id)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
}
return u, nil
}
func (s *authService) UpdateUser(u User) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", u.Email)
if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
} else if err != nil {
return User{}, err
}
if !uuid.Equal(eUser.ID, u.ID) {
return User{}, ErrInconsistentIDs
}
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) DeleteUser(id uuid.UUID) (User, error) {
u, err := s.GetUser(id)
if err != nil {
return User{}, err
}
u.IsDeleted = true
u.DeletedAt = time.Now()
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) AuthenticateUser(email, password string) (User, error) {
// Check Email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Check Password
p := strings.TrimSpace(password)
if len(p) == 0 {
return User{}, ErrInvalidPassword
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// check password
hashed, err := base64.StdEncoding.DecodeString(u.Password)
if err != nil {
return User{}, err
}
err = helpers.Crypto.BCryptCompareHashPassword(hashed, []byte(password))
if err != nil {
return User{}, ErrIncorrectAuth
}
return u, nil
}
func (s *authService) BeginPasswordReset(email string) error {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return err
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return err
}
// create nonce for reset token
n, err := s.nonce.New("auth.PasswordReset", u.ID, time.Hour*3)
if err != nil {
return err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetEmail.From, PasswordResetEmail.Subject, PasswordResetEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetEmail.TplName, u)
if err != nil {
return err
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"token": n.Token,
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
return err
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
return err
}
return nil
}
func (s *authService) CompletePasswordReset(token, email, password string) (User, error) {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Get User
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// Check and Use Token
_, err = s.nonce.CheckThenConsume(token, "auth.PasswordReset", u.ID)
if err != nil {
return User{}, err
}
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
u.Password = hashedB64
u.newPassword = true
u.rawPassword = password
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetConfirmEmail.From, PasswordResetConfirmEmail.Subject, PasswordResetConfirmEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetConfirmEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
// getUserByEmail gets a user from the database by email address
func (s *authService) | (email string) (User, error) {
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE email=$1", email)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrIncorrectAuth
}
return u, nil
}
// saveUser saves a new user to the database or updates an existing user
func (s *authService) saveUser(u *User) error {
if err := u.Validate(); err != nil {
return err
}
var sqlExec string
// if id is nil then it is a new user
if u.ID == uuid.Nil {
// generate ID
u.ID = uuid.NewV4()
sqlExec = `INSERT INTO user
(id, email, password, firstname, lastname, is_superuser, is_active, is_deleted, created_at, updated_at, deleted_at, avatar_url)
VALUES (:id, :email, :password, :firstname, :lastname, :is_superuser, :is_active, :is_deleted, :created_at, :updated_at, :deleted_at, :avatar_url)`
} else {
sqlExec = `UPDATE user SET email=:email, password=:password, firstname=:firstname, lastname=:lastname, is_superuser=:is_superuser,
is_active=:is_active, is_deleted=:is_deleted, created_at=:created_at, updated_at=:updated_at, deleted_at=:deleted_at, avatar_url=:avatar_url WHERE id=:id`
}
tx, err := s.db.Beginx()
if err != nil {
return err
}
_, err = tx.NamedExec(sqlExec, &u)
if err != nil {
tx.Rollback()
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
| getUserByEmail | identifier_name |
service.go | package auth
import (
"database/sql"
"encoding/base64"
"errors"
"html/template"
"net/mail"
"strings"
"time"
"github.com/bryanjeal/go-helpers"
"github.com/bryanjeal/go-nonce"
tmpl "github.com/bryanjeal/go-tmpl"
// handle mysql database
_ "github.com/go-sql-driver/mysql"
// handle sqlite3 database
_ "github.com/mattn/go-sqlite3"
"github.com/golang/glog"
"github.com/jmoiron/sqlx"
"github.com/markbates/goth"
"github.com/satori/go.uuid"
"gopkg.in/mailgun/mailgun-go.v1"
)
// Errors
var (
ErrInconsistentIDs = errors.New("inconsistent IDs")
ErrAlreadyExists = errors.New("already exists")
ErrUserNotFound = errors.New("user not found")
ErrInvalidID = errors.New("null id")
ErrInvalidPassword = errors.New("password cannot blank or all spaces")
ErrInvalidName = errors.New("name cannot be blank or all spaces")
ErrIncorrectAuth = errors.New("incorrect email or password")
ErrTodo = errors.New("unimplemented feature or function")
)
// Service is the interface that provides auth methods.
type Service interface {
// NewUserLocal registers a new user by a local account (email and password)
NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error)
// NewUserProvider registers a new user by some oAuth Provider
NewUserProvider(user goth.User, isSuperuser bool) (User, error)
// UserAddProvider associates a new oAuth Provider with the user account
UserAddProvider(id uuid.UUID, user goth.User) (User, error)
// GetUser gets a user account by their ID
GetUser(id uuid.UUID) (User, error)
// UpdateUser update the user's details
UpdateUser(u User) (User, error)
// DeleteUser flag a user as deleted
DeleteUser(id uuid.UUID) (User, error)
// AuthenticateUser logs in a Local User with an email and password
AuthenticateUser(email, password string) (User, error)
// Start the Password Reset process
BeginPasswordReset(email string) error
// Complete the Password Reset process
CompletePasswordReset(token, email, password string) (User, error)
}
// authService satisfies the auth.Service interface
type authService struct {
db *sqlx.DB
mg mailgun.Mailgun
nonce nonce.Service
tpl *tmpl.TplSys
}
// NewService creates an Auth Service that connects to provided DB information
func NewService(db *sqlx.DB, mg mailgun.Mailgun, nonce nonce.Service, tpl *tmpl.TplSys) Service {
s := &authService{
db: db,
mg: mg,
nonce: nonce,
tpl: tpl,
}
// TODO
// Move hardcoded Template Strings to templates.go
template.Must(s.tpl.AddTemplate("auth.baseHTMLEmailTemplate", "", baseHTMLEmailTemplate))
template.Must(s.tpl.AddTemplate("auth.NewUserEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Welcome New User{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Welcome to our service. Thank you for signing up.<br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Forgot your password? No problem! <br/> <br/> To reset your password, click the following link: <br/> <a href="https://www.example.com/auth/password-reset/%recipient.token%">Reset Password</a> <br/> <br/> If you did not request to have your password reset you can safely ignore this email. Rest assured your customer account is safe. <br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetConfirmEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset Complete{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Your account's password was recently changed. <br/> <br/> </p>{{end}}`))
return s
}
func (s *authService) NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", email)
if err == nil {
return User{}, ErrAlreadyExists
} else if err != sql.ErrNoRows {
return User{}, err
}
// get current time
t := time.Now()
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
// TODO:
// Have users activate their account via an email
u := User{
Email: email,
Password: hashedB64,
FirstName: firstName,
LastName: lastName,
IsSuperuser: isSuperuser,
IsActive: true,
IsDeleted: false,
CreatedAt: t,
UpdatedAt: t,
DeletedAt: time.Time{},
AvatarURL: "",
newPassword: true,
rawPassword: password,
}
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(NewUserEmail.From, NewUserEmail.Subject, NewUserEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(NewUserEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
func (s *authService) NewUserProvider(u goth.User, isSuperuser bool) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) UserAddProvider(id uuid.UUID, u goth.User) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) GetUser(id uuid.UUID) (User, error) {
if id == uuid.Nil {
return User{}, ErrInvalidID
}
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE id=$1", id)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
}
return u, nil
}
func (s *authService) UpdateUser(u User) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", u.Email)
if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
} else if err != nil {
return User{}, err
}
if !uuid.Equal(eUser.ID, u.ID) {
return User{}, ErrInconsistentIDs
}
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) DeleteUser(id uuid.UUID) (User, error) {
u, err := s.GetUser(id)
if err != nil {
return User{}, err
}
u.IsDeleted = true
u.DeletedAt = time.Now()
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) AuthenticateUser(email, password string) (User, error) {
// Check Email
e, err := mail.ParseAddress(email)
if err != nil |
// Check Password
p := strings.TrimSpace(password)
if len(p) == 0 {
return User{}, ErrInvalidPassword
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// check password
hashed, err := base64.StdEncoding.DecodeString(u.Password)
if err != nil {
return User{}, err
}
err = helpers.Crypto.BCryptCompareHashPassword(hashed, []byte(password))
if err != nil {
return User{}, ErrIncorrectAuth
}
return u, nil
}
func (s *authService) BeginPasswordReset(email string) error {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return err
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return err
}
// create nonce for reset token
n, err := s.nonce.New("auth.PasswordReset", u.ID, time.Hour*3)
if err != nil {
return err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetEmail.From, PasswordResetEmail.Subject, PasswordResetEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetEmail.TplName, u)
if err != nil {
return err
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"token": n.Token,
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
return err
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
return err
}
return nil
}
func (s *authService) CompletePasswordReset(token, email, password string) (User, error) {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Get User
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// Check and Use Token
_, err = s.nonce.CheckThenConsume(token, "auth.PasswordReset", u.ID)
if err != nil {
return User{}, err
}
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
u.Password = hashedB64
u.newPassword = true
u.rawPassword = password
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetConfirmEmail.From, PasswordResetConfirmEmail.Subject, PasswordResetConfirmEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetConfirmEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
// getUserByEmail gets a user from the database by email address
func (s *authService) getUserByEmail(email string) (User, error) {
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE email=$1", email)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrIncorrectAuth
}
return u, nil
}
// saveUser saves a new user to the database or updates an existing user
func (s *authService) saveUser(u *User) error {
if err := u.Validate(); err != nil {
return err
}
var sqlExec string
// if id is nil then it is a new user
if u.ID == uuid.Nil {
// generate ID
u.ID = uuid.NewV4()
sqlExec = `INSERT INTO user
(id, email, password, firstname, lastname, is_superuser, is_active, is_deleted, created_at, updated_at, deleted_at, avatar_url)
VALUES (:id, :email, :password, :firstname, :lastname, :is_superuser, :is_active, :is_deleted, :created_at, :updated_at, :deleted_at, :avatar_url)`
} else {
sqlExec = `UPDATE user SET email=:email, password=:password, firstname=:firstname, lastname=:lastname, is_superuser=:is_superuser,
is_active=:is_active, is_deleted=:is_deleted, created_at=:created_at, updated_at=:updated_at, deleted_at=:deleted_at, avatar_url=:avatar_url WHERE id=:id`
}
tx, err := s.db.Beginx()
if err != nil {
return err
}
_, err = tx.NamedExec(sqlExec, &u)
if err != nil {
tx.Rollback()
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
| {
return User{}, err
} | conditional_block |
service.go | package auth
import (
"database/sql"
"encoding/base64"
"errors"
"html/template"
"net/mail"
"strings"
"time"
"github.com/bryanjeal/go-helpers"
"github.com/bryanjeal/go-nonce"
tmpl "github.com/bryanjeal/go-tmpl"
// handle mysql database
_ "github.com/go-sql-driver/mysql"
// handle sqlite3 database
_ "github.com/mattn/go-sqlite3"
"github.com/golang/glog"
"github.com/jmoiron/sqlx"
"github.com/markbates/goth"
"github.com/satori/go.uuid"
"gopkg.in/mailgun/mailgun-go.v1"
)
// Errors
var (
ErrInconsistentIDs = errors.New("inconsistent IDs")
ErrAlreadyExists = errors.New("already exists")
ErrUserNotFound = errors.New("user not found")
ErrInvalidID = errors.New("null id")
ErrInvalidPassword = errors.New("password cannot blank or all spaces")
ErrInvalidName = errors.New("name cannot be blank or all spaces")
ErrIncorrectAuth = errors.New("incorrect email or password")
ErrTodo = errors.New("unimplemented feature or function")
)
// Service is the interface that provides auth methods.
type Service interface {
// NewUserLocal registers a new user by a local account (email and password)
NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error)
// NewUserProvider registers a new user by some oAuth Provider
NewUserProvider(user goth.User, isSuperuser bool) (User, error)
// UserAddProvider associates a new oAuth Provider with the user account
UserAddProvider(id uuid.UUID, user goth.User) (User, error)
// GetUser gets a user account by their ID
GetUser(id uuid.UUID) (User, error)
// UpdateUser update the user's details
UpdateUser(u User) (User, error)
// DeleteUser flag a user as deleted
DeleteUser(id uuid.UUID) (User, error)
// AuthenticateUser logs in a Local User with an email and password
AuthenticateUser(email, password string) (User, error)
// Start the Password Reset process
BeginPasswordReset(email string) error
// Complete the Password Reset process
CompletePasswordReset(token, email, password string) (User, error)
}
// authService satisfies the auth.Service interface
type authService struct {
db *sqlx.DB
mg mailgun.Mailgun
nonce nonce.Service
tpl *tmpl.TplSys
}
// NewService creates an Auth Service that connects to provided DB information
func NewService(db *sqlx.DB, mg mailgun.Mailgun, nonce nonce.Service, tpl *tmpl.TplSys) Service {
s := &authService{
db: db,
mg: mg,
nonce: nonce,
tpl: tpl,
}
// TODO
// Move hardcoded Template Strings to templates.go
template.Must(s.tpl.AddTemplate("auth.baseHTMLEmailTemplate", "", baseHTMLEmailTemplate))
template.Must(s.tpl.AddTemplate("auth.NewUserEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Welcome New User{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Welcome to our service. Thank you for signing up.<br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Forgot your password? No problem! <br/> <br/> To reset your password, click the following link: <br/> <a href="https://www.example.com/auth/password-reset/%recipient.token%">Reset Password</a> <br/> <br/> If you did not request to have your password reset you can safely ignore this email. Rest assured your customer account is safe. <br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetConfirmEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset Complete{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Your account's password was recently changed. <br/> <br/> </p>{{end}}`))
return s
}
func (s *authService) NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", email)
if err == nil {
return User{}, ErrAlreadyExists
} else if err != sql.ErrNoRows {
return User{}, err
}
// get current time
t := time.Now()
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
// TODO:
// Have users activate their account via an email
u := User{
Email: email,
Password: hashedB64,
FirstName: firstName,
LastName: lastName,
IsSuperuser: isSuperuser,
IsActive: true,
IsDeleted: false,
CreatedAt: t,
UpdatedAt: t,
DeletedAt: time.Time{},
AvatarURL: "",
newPassword: true,
rawPassword: password,
}
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(NewUserEmail.From, NewUserEmail.Subject, NewUserEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(NewUserEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
func (s *authService) NewUserProvider(u goth.User, isSuperuser bool) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) UserAddProvider(id uuid.UUID, u goth.User) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) GetUser(id uuid.UUID) (User, error) {
if id == uuid.Nil {
return User{}, ErrInvalidID
}
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE id=$1", id)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
}
return u, nil
}
func (s *authService) UpdateUser(u User) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", u.Email)
if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
} else if err != nil {
return User{}, err
}
if !uuid.Equal(eUser.ID, u.ID) {
return User{}, ErrInconsistentIDs
}
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) DeleteUser(id uuid.UUID) (User, error) {
u, err := s.GetUser(id)
if err != nil {
return User{}, err
}
u.IsDeleted = true
u.DeletedAt = time.Now()
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) AuthenticateUser(email, password string) (User, error) {
// Check Email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Check Password
p := strings.TrimSpace(password)
if len(p) == 0 {
return User{}, ErrInvalidPassword
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// check password
hashed, err := base64.StdEncoding.DecodeString(u.Password)
if err != nil {
return User{}, err
}
err = helpers.Crypto.BCryptCompareHashPassword(hashed, []byte(password))
if err != nil {
return User{}, ErrIncorrectAuth
}
return u, nil
}
func (s *authService) BeginPasswordReset(email string) error {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return err
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return err
}
// create nonce for reset token
n, err := s.nonce.New("auth.PasswordReset", u.ID, time.Hour*3)
if err != nil {
return err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetEmail.From, PasswordResetEmail.Subject, PasswordResetEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetEmail.TplName, u)
if err != nil {
return err
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"token": n.Token,
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
return err
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
return err
}
return nil
}
func (s *authService) CompletePasswordReset(token, email, password string) (User, error) {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Get User
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// Check and Use Token
_, err = s.nonce.CheckThenConsume(token, "auth.PasswordReset", u.ID)
if err != nil {
return User{}, err
}
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
u.Password = hashedB64
u.newPassword = true
u.rawPassword = password
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetConfirmEmail.From, PasswordResetConfirmEmail.Subject, PasswordResetConfirmEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetConfirmEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
// getUserByEmail gets a user from the database by email address
func (s *authService) getUserByEmail(email string) (User, error) {
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE email=$1", email)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrIncorrectAuth
}
return u, nil
}
// saveUser saves a new user to the database or updates an existing user
func (s *authService) saveUser(u *User) error { | if err := u.Validate(); err != nil {
return err
}
var sqlExec string
// if id is nil then it is a new user
if u.ID == uuid.Nil {
// generate ID
u.ID = uuid.NewV4()
sqlExec = `INSERT INTO user
(id, email, password, firstname, lastname, is_superuser, is_active, is_deleted, created_at, updated_at, deleted_at, avatar_url)
VALUES (:id, :email, :password, :firstname, :lastname, :is_superuser, :is_active, :is_deleted, :created_at, :updated_at, :deleted_at, :avatar_url)`
} else {
sqlExec = `UPDATE user SET email=:email, password=:password, firstname=:firstname, lastname=:lastname, is_superuser=:is_superuser,
is_active=:is_active, is_deleted=:is_deleted, created_at=:created_at, updated_at=:updated_at, deleted_at=:deleted_at, avatar_url=:avatar_url WHERE id=:id`
}
tx, err := s.db.Beginx()
if err != nil {
return err
}
_, err = tx.NamedExec(sqlExec, &u)
if err != nil {
tx.Rollback()
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
} | random_line_split | |
service.go | package auth
import (
"database/sql"
"encoding/base64"
"errors"
"html/template"
"net/mail"
"strings"
"time"
"github.com/bryanjeal/go-helpers"
"github.com/bryanjeal/go-nonce"
tmpl "github.com/bryanjeal/go-tmpl"
// handle mysql database
_ "github.com/go-sql-driver/mysql"
// handle sqlite3 database
_ "github.com/mattn/go-sqlite3"
"github.com/golang/glog"
"github.com/jmoiron/sqlx"
"github.com/markbates/goth"
"github.com/satori/go.uuid"
"gopkg.in/mailgun/mailgun-go.v1"
)
// Errors
var (
ErrInconsistentIDs = errors.New("inconsistent IDs")
ErrAlreadyExists = errors.New("already exists")
ErrUserNotFound = errors.New("user not found")
ErrInvalidID = errors.New("null id")
ErrInvalidPassword = errors.New("password cannot blank or all spaces")
ErrInvalidName = errors.New("name cannot be blank or all spaces")
ErrIncorrectAuth = errors.New("incorrect email or password")
ErrTodo = errors.New("unimplemented feature or function")
)
// Service is the interface that provides auth methods.
type Service interface {
// NewUserLocal registers a new user by a local account (email and password)
NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error)
// NewUserProvider registers a new user by some oAuth Provider
NewUserProvider(user goth.User, isSuperuser bool) (User, error)
// UserAddProvider associates a new oAuth Provider with the user account
UserAddProvider(id uuid.UUID, user goth.User) (User, error)
// GetUser gets a user account by their ID
GetUser(id uuid.UUID) (User, error)
// UpdateUser update the user's details
UpdateUser(u User) (User, error)
// DeleteUser flag a user as deleted
DeleteUser(id uuid.UUID) (User, error)
// AuthenticateUser logs in a Local User with an email and password
AuthenticateUser(email, password string) (User, error)
// Start the Password Reset process
BeginPasswordReset(email string) error
// Complete the Password Reset process
CompletePasswordReset(token, email, password string) (User, error)
}
// authService satisfies the auth.Service interface
type authService struct {
db *sqlx.DB
mg mailgun.Mailgun
nonce nonce.Service
tpl *tmpl.TplSys
}
// NewService creates an Auth Service that connects to provided DB information
func NewService(db *sqlx.DB, mg mailgun.Mailgun, nonce nonce.Service, tpl *tmpl.TplSys) Service {
s := &authService{
db: db,
mg: mg,
nonce: nonce,
tpl: tpl,
}
// TODO
// Move hardcoded Template Strings to templates.go
template.Must(s.tpl.AddTemplate("auth.baseHTMLEmailTemplate", "", baseHTMLEmailTemplate))
template.Must(s.tpl.AddTemplate("auth.NewUserEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Welcome New User{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Welcome to our service. Thank you for signing up.<br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Forgot your password? No problem! <br/> <br/> To reset your password, click the following link: <br/> <a href="https://www.example.com/auth/password-reset/%recipient.token%">Reset Password</a> <br/> <br/> If you did not request to have your password reset you can safely ignore this email. Rest assured your customer account is safe. <br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetConfirmEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset Complete{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Your account's password was recently changed. <br/> <br/> </p>{{end}}`))
return s
}
func (s *authService) NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", email)
if err == nil {
return User{}, ErrAlreadyExists
} else if err != sql.ErrNoRows {
return User{}, err
}
// get current time
t := time.Now()
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
// TODO:
// Have users activate their account via an email
u := User{
Email: email,
Password: hashedB64,
FirstName: firstName,
LastName: lastName,
IsSuperuser: isSuperuser,
IsActive: true,
IsDeleted: false,
CreatedAt: t,
UpdatedAt: t,
DeletedAt: time.Time{},
AvatarURL: "",
newPassword: true,
rawPassword: password,
}
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(NewUserEmail.From, NewUserEmail.Subject, NewUserEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(NewUserEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
func (s *authService) NewUserProvider(u goth.User, isSuperuser bool) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) UserAddProvider(id uuid.UUID, u goth.User) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) GetUser(id uuid.UUID) (User, error) |
func (s *authService) UpdateUser(u User) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", u.Email)
if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
} else if err != nil {
return User{}, err
}
if !uuid.Equal(eUser.ID, u.ID) {
return User{}, ErrInconsistentIDs
}
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) DeleteUser(id uuid.UUID) (User, error) {
u, err := s.GetUser(id)
if err != nil {
return User{}, err
}
u.IsDeleted = true
u.DeletedAt = time.Now()
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) AuthenticateUser(email, password string) (User, error) {
// Check Email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Check Password
p := strings.TrimSpace(password)
if len(p) == 0 {
return User{}, ErrInvalidPassword
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// check password
hashed, err := base64.StdEncoding.DecodeString(u.Password)
if err != nil {
return User{}, err
}
err = helpers.Crypto.BCryptCompareHashPassword(hashed, []byte(password))
if err != nil {
return User{}, ErrIncorrectAuth
}
return u, nil
}
func (s *authService) BeginPasswordReset(email string) error {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return err
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return err
}
// create nonce for reset token
n, err := s.nonce.New("auth.PasswordReset", u.ID, time.Hour*3)
if err != nil {
return err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetEmail.From, PasswordResetEmail.Subject, PasswordResetEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetEmail.TplName, u)
if err != nil {
return err
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"token": n.Token,
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
return err
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
return err
}
return nil
}
func (s *authService) CompletePasswordReset(token, email, password string) (User, error) {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Get User
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// Check and Use Token
_, err = s.nonce.CheckThenConsume(token, "auth.PasswordReset", u.ID)
if err != nil {
return User{}, err
}
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
u.Password = hashedB64
u.newPassword = true
u.rawPassword = password
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetConfirmEmail.From, PasswordResetConfirmEmail.Subject, PasswordResetConfirmEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetConfirmEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
// getUserByEmail gets a user from the database by email address
func (s *authService) getUserByEmail(email string) (User, error) {
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE email=$1", email)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrIncorrectAuth
}
return u, nil
}
// saveUser saves a new user to the database or updates an existing user
func (s *authService) saveUser(u *User) error {
if err := u.Validate(); err != nil {
return err
}
var sqlExec string
// if id is nil then it is a new user
if u.ID == uuid.Nil {
// generate ID
u.ID = uuid.NewV4()
sqlExec = `INSERT INTO user
(id, email, password, firstname, lastname, is_superuser, is_active, is_deleted, created_at, updated_at, deleted_at, avatar_url)
VALUES (:id, :email, :password, :firstname, :lastname, :is_superuser, :is_active, :is_deleted, :created_at, :updated_at, :deleted_at, :avatar_url)`
} else {
sqlExec = `UPDATE user SET email=:email, password=:password, firstname=:firstname, lastname=:lastname, is_superuser=:is_superuser,
is_active=:is_active, is_deleted=:is_deleted, created_at=:created_at, updated_at=:updated_at, deleted_at=:deleted_at, avatar_url=:avatar_url WHERE id=:id`
}
tx, err := s.db.Beginx()
if err != nil {
return err
}
_, err = tx.NamedExec(sqlExec, &u)
if err != nil {
tx.Rollback()
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
| {
if id == uuid.Nil {
return User{}, ErrInvalidID
}
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE id=$1", id)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
}
return u, nil
} | identifier_body |
pkg_util.go | package pkg_util
import (
"fmt"
"os"
"zip_util"
"time"
"strings"
"os/exec"
"log"
"io"
"path/filepath"
"errors"
)
var (
LOCAL_PATH1 = os.TempDir() + "\\test_file.zip" // 下载到本地的路径1
LOCAL_PATH2 = "d:\\test_file.zip" // 下载到本地的路径2
UNZIP_PATH1 = os.TempDir() + "\\hbn_pkg\\" // 更新包解压路径1
UNZIP_PATH2 = "d:\\hbn_pkg\\" // 更新包解压路径1
//TOMCAT_PROCESS_PREFIX = "tomcat" // tomcat进程前缀
//TOMCAT_W_PROCESS_SUFFIX = "w.exe" // tomcatx进程后缀,形式:w.exe
PKG_CFGFILE_PATH_ARR = []string{// 项目配置文件地址,
"webapps\\agent\\WEB-INF\\log4j.properties",
"webapps\\agent\\WEB-INF\\classes\\config.properties",
"webapps\\agent\\WEB-INF\\classes\\openoffice.properties"}
)
type TomcatInfo struct {
ProcessName string // tomcat6.ext
ProcessPath string // d:\xx\tomcat6\bin\tomcat6.exe
ProcessHome string // d:\xx\tomcat6\
PackageBackPath string // 更新前项目备份路径
PackageDir string // 项目存放路径 d:\xx\tomcat6\webapps\agent
PackageFileName string // 更新包名称 agent_1114
PackageBackFileName string // 备份包名称 201712081536更新前备份.zip
ConfigFileBackupDir string // 配置文件临时目录d:\xx\tomcat6\temp_config
NewPackageDir string // 新包地址
Update bool // 是否需要更新
Complete bool // 更新完成
}
/**
备份tomcat目录下项目
*/
func BackupCurrentPackage(tomcatArr []*TomcatInfo) error {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
if(len(tempArr) == 0) {
fmt.Println("当前系统未运行tomcat实例,无法更新");
//os.Exit(1);
var e = errors.New("当前系统未运行tomcat实例,无法更新")
return e;
}
for i := range tempArr {
var tomcatInfo = tempArr[i];
if !tomcatInfo.Update {
continue;
}
tomcatInfo.ConfigFileBackupDir = tomcatInfo.ProcessHome+ "pkg_cfg\\"
_, stat_err := os.Stat(tomcatInfo.ProcessHome+ "pkg_cfg\\");
if stat_err != nil && os.IsNotExist(stat_err) {
var mkdir_err = os.MkdirAll(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if mkdir_err != nil {
return stat_err;
}
} else if stat_err != nil {
return stat_err;
}
tomcatWebappDirFile, err := os.Open(tomcatInfo.PackageDir);
var tomcatWebappPath = []*os.File{tomcatWebappDirFile };
if err != nil {
fmt.Println(tomcatInfo.ProcessName + "备份失败")
fmt.Println(err)
os.Exit(1);
}
var tomcatBckupPath = tomcatInfo.PackageBackPath;
_, stateErr := os.Stat(tomcatBckupPath)
if stateErr != nil {
direrr := os.Mkdir("" + tomcatBckupPath, 0777);
//direrr := os.MkdirAll("D:\\Program Files\\Apache Software Foundation\\apache-tomcat-8.0.39\\backup", 0777)
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录失败");
//os.Exit(1);
return direrr;
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录成功");
}
}
// 备份tomcat目录下当前项目
// time.Now().Format("200601021504")
var now = time.Now();
var backupFileName = now.Format("200601021504") + "更新前备份";
ziperr := zip_util.Zip(tomcatWebappPath, tomcatBckupPath + backupFileName + ".zip");
if ziperr == nil {
tomcatInfo.PackageBackFileName = backupFileName + ".zip";
fmt.Println("创建" + tomcatInfo.ProcessName + "备份文件成功:" + tomcatInfo.PackageBackFileName);
}
// 备份tomcat目录下项目配置文件,如config.properties、log4j.xml、openoffice.properties
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
// 备份目录不存在则创建备份目录
_, stateErr := os.Stat(tomcatInfo.ProcessHome + "pkg_cfg\\");
if stateErr != nil {
direrr := os.Mkdir(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录失败");
os.Exit(1);
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录成功");
}
}
written, copyErr := copyFile(tomcatInfo.ProcessHome+ "pkg_cfg\\" + cfgFileName, cfgFilePath);
if copyErr != nil {
fmt.Println(copyErr)
fmt.Println("备份" + tomcatInfo.ProcessName + "配置文件失败");
os.Exit(1);
}
fmt.Println("复制" + tomcatInfo.ProcessName+ "配置文件成功,文件:" + cfgFileName + ",大小:", written, "byte");
}
}
//fmt.Println(tomcatArr);
return nil;
}
/**
获取tomcat信息
*/
func GetTomcatArray(
tomcatPrefix string,
tomcatSuffix string) [] TomcatInfo {
//out, err := exec.Command("cmd", "/C", "tasklist ").Output()
out, err := exec.Command("cmd", "/C", "tasklist").Output()
if err != nil {
log.Fatal(err)
}
//fmt.Printf(string(out))
var processStrList[] string = strings.Split(string(out), "\r\n");
var tomcatArr []TomcatInfo;
for i := range processStrList {
if(strings.HasPrefix(strings.ToLower(processStrList[i]), tomcatPrefix)) {
//fmt.Println(i)
//fmt.Println(processStrList[i])
var processName = strings.Split(processStrList[i], " ")[0];
if ! strings.HasSuffix(processName, tomcatSuffix) {
out2, err2 := exec.Command("cmd", "/C", "wmic process where name='" + processName + "' get ExecutablePath").Output()
if err2 == nil {
// TODO
var fileDirectoryArr[] string = strings.Split(strings.Split(string(out2), "\r\n", )[1], "\\");
if(len(fileDirectoryArr) < 2) {
continue;
}
var parentDirectoryArr = fileDirectoryArr[0: len(fileDirectoryArr) - 2];
var tomcatInfo TomcatInfo;
tomcatInfo.ProcessName = processName;
tomcatInfo.ProcessHome = strings.Join(parentDirectoryArr, "\\") + "\\";
tomcatInfo.ProcessPath = tomcatInfo.ProcessHome + "bin\\" + processName;
tomcatInfo.PackageBackPath = tomcatInfo.ProcessHome + "backup\\";
tomcatInfo.PackageDir = tomcatInfo.ProcessHome + "webapps\\agent\\";
tomcatArr = append(tomcatArr, tomcatInfo);
} else {
fmt.Println(err2)
}
//fmt.Println("------------------------------------------------------")
}
}
}
return tomcatArr;
//fmt.Println(TOMCAT_PROCESS_MAP)
}
/**
确认tomcat
*/
func ConfirmTomcat(tomcatArr []* TomcatInfo) {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
for i := range tempArr {
var tomcatInfo = tempArr[i];
if tomcatInfo == nil {
continue;
}
// 当前tomcat是否需要更新
for true {
var update string
fmt.Print("是否需要更新 " + tomcatInfo.ProcessName + "(0:否;1:是): ");
fmt.Scanln(&update);
if(update == "1" || update == "0") {
tomcatInfo.Update = (update == "1");
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
// 当前tomcat需要的更新包
for tomcatInfo.Update {
var pkg string
fmt.Print(tomcatInfo.ProcessName + "需要哪个包进行更新(0:通用版最新包;1:安装包1114): ");
fmt.Scanln(&pkg);
if(pkg == "0") {
tomcatInfo.PackageFileName = "tyb";
break;
} else if(pkg == "1") {
tomcatInfo.PackageFileName = "agent_1114";
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
}
//fmt.Println("confirm complete.")
}
/**
拷贝文件
*/
func copyFile(dstName, srcName string) (written int64, err error) {
src, err := os.Open(srcName)
if err != nil {
return
}
defer src.Close()
dst, err := os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return
}
defer dst.Close()
fmt.Println("拷贝文件:" + src.Name() + " > " + dst.Name());
return io.Copy(dst, src)
}
func copyDir(src string, dest string) error {
src_original := src;
err := filepath.Walk(src, func(src string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if f.IsDir() {
//fmt.Println(f.Name())
//copyDir(f.Name(), dest+"/"+f.Name())
if(src != src_original) {
var temp_str = strings.Replace(src, src_original, dest, 1);
os.MkdirAll(temp_str, 0777);
}
} else {
//fmt.Println(src);
//fmt.Println(src_original);
//fmt.Println(dest);
//fmt.Println("--------------------------------------------------------------------------------")
dest_new := strings.Replace(src, src_original, dest, -1);
//fmt.Println(dest_new);
//fmt.Println("拷贝文件:" + src + " > " + dest_new);
os.Create(dest_new);
copyFile(dest_new, src);
}
//println(path)
return nil
}) | }
return nil;
}
/**
替换包
*/
func ReplacePkg(tomcatInfo TomcatInfo) error {
var stopErr = stopTomcat(tomcatInfo);
if stopErr != nil {
fmt.Println("停止" + tomcatInfo.ProcessName + "出错!");
} else {
fmt.Println("停止" + tomcatInfo.ProcessName + "成功。");
}
//var destDir = tomcatInfo.ProcessHome + "webapps\\agent";
var destDir = tomcatInfo.ProcessHome + "webapps";
// 删除webapps\agent
rem_err := os.RemoveAll(destDir + "\\agent");
if rem_err != nil {
fmt.Println("移除目录出错:" + destDir + "\\agent");
fmt.Println(rem_err)
return rem_err;
}
copy_err := copyDir(tomcatInfo.NewPackageDir, destDir);
if copy_err != nil {
fmt.Println("拷贝目录出错:" + destDir);
return copy_err;
}
// 还原配置文件
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
write_len, copy_err2 := copyFile(cfgFilePath, tomcatInfo.ConfigFileBackupDir + cfgFileName);
if copy_err2 != nil || write_len == 0 {
fmt.Println("还原配置文件出错:" + tomcatInfo.ConfigFileBackupDir + cfgFileName);
return copy_err2;
}
}
var startErr = startTomcat(tomcatInfo);
if startErr != nil {
fmt.Println("启动" + tomcatInfo.ProcessName + "出错!");
} else {
fmt.Println("启动" + tomcatInfo.ProcessName + "成功。");
}
return nil;
}
/**
停止tomcat
*/
func stopTomcat(tomcatInfo TomcatInfo) error {
var processName = strings.Split(tomcatInfo.ProcessName, ".")[0]
_, err := exec.Command("cmd", "/C", "net stop " + processName + " && taskkill /f /im " + tomcatInfo.ProcessHome).Output();
if err != nil {
return err;
}
return nil;
}
/**
启动tomcat
*/
func startTomcat(tomcatInfo TomcatInfo) error {
var processName = strings.Split(tomcatInfo.ProcessName, ".")[0]
_, err := exec.Command("cmd", "/C", "net start " + processName).Output();
if err != nil {
return err;
}
return nil;
} |
if err != nil {
fmt.Printf("filepath.Walk() returned %v\n", err);
return err; | random_line_split |
pkg_util.go | package pkg_util
import (
"fmt"
"os"
"zip_util"
"time"
"strings"
"os/exec"
"log"
"io"
"path/filepath"
"errors"
)
var (
LOCAL_PATH1 = os.TempDir() + "\\test_file.zip" // 下载到本地的路径1
LOCAL_PATH2 = "d:\\test_file.zip" // 下载到本地的路径2
UNZIP_PATH1 = os.TempDir() + "\\hbn_pkg\\" // 更新包解压路径1
UNZIP_PATH2 = "d:\\hbn_pkg\\" // 更新包解压路径1
//TOMCAT_PROCESS_PREFIX = "tomcat" // tomcat进程前缀
//TOMCAT_W_PROCESS_SUFFIX = "w.exe" // tomcatx进程后缀,形式:w.exe
PKG_CFGFILE_PATH_ARR = []string{// 项目配置文件地址,
"webapps\\agent\\WEB-INF\\log4j.properties",
"webapps\\agent\\WEB-INF\\classes\\config.properties",
"webapps\\agent\\WEB-INF\\classes\\openoffice.properties"}
)
type TomcatInfo struct {
ProcessName string // tomcat6.ext
ProcessPath string // d:\xx\tomcat6\bin\tomcat6.exe
ProcessHome string // d:\xx\tomcat6\
PackageBackPath string // 更新前项目备份路径
PackageDir string // 项目存放路径 d:\xx\tomcat6\webapps\agent
PackageFileName string // 更新包名称 agent_1114
PackageBackFileName string // 备份包名称 201712081536更新前备份.zip
ConfigFileBackupDir string // 配置文件临时目录d:\xx\tomcat6\temp_config
NewPackageDir string // 新包地址
Update bool // 是否需要更新
Complete bool // 更新完成
}
/**
备份tomcat目录下项目
*/
func BackupCurrentPackage(tomcatArr []*TomcatInfo) error {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
if(len(tempArr) == 0) {
fmt.Println("当前系统未运行tomcat实例,无法更新");
//os.Exit(1);
var e = errors.New("当前系统未运行tomcat实例,无法更新")
return e;
}
for i := range tempArr {
var tomcatInfo = tempArr[i];
if !tomcatInfo.Update {
continue;
}
tomcatInfo.ConfigFileBackupDir = tomcatInfo.ProcessHome+ "pkg_cfg\\"
_, stat_err := os.Stat(tomcatInfo.ProcessHome+ "pkg_cfg\\");
if stat_err != nil && os.IsNotExist(stat_err) {
var mkdir_err = os.MkdirAll(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if mkdir_err != nil {
return stat_err;
}
} else if stat_err != nil {
return stat_err;
}
tomcatWebappDirFile, err := os.Open(tomcatInfo.PackageDir);
var tomcatWebappPath = []*os.File{tomcatWebappDirFile };
if err != nil {
fmt.Println(tomcatInfo.ProcessName + "备份失败")
fmt.Println(err)
os.Exit(1);
}
var tomcatBckupPath = tomcatInfo.PackageBackPath;
_, stateErr := os.Stat(tomcatBckupPath)
if stateErr != nil {
direrr := os.Mkdir("" + tomcatBckupPath, 0777);
//direrr := os.MkdirAll("D:\\Program Files\\Apache Software Foundation\\apache-tomcat-8.0.39\\backup", 0777)
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录失败");
//os.Exit(1);
return direrr;
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录成功");
}
}
// 备份tomcat目录下当前项目
// time.Now().Format("200601021504")
var now = time.Now();
var backupFileName = now.Format("200601021504") + "更新前备份";
ziperr := zip_util.Zip(tomcatWebappPath, tomcatBckupPath + backupFileName + ".zip");
if ziperr == nil {
tomcatInfo.PackageBackFileName = backupFileName + ".zip";
fmt.Println("创建" + tomcatInfo.ProcessName + "备份文件成功:" + tomcatInfo.PackageBackFileName);
}
// 备份tomcat目录下项目配置文件,如config.properties、log4j.xml、openoffice.properties
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
// 备份目录不存在则创建备份目录
_, stateErr := os.Stat(tomcatInfo.ProcessHome + "pkg_cfg\\");
if stateErr != nil {
direrr := os.Mkdir(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录失败");
os.Exit(1);
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录成功");
}
}
written, copyErr := copyFile(tomcatInfo.ProcessHome+ "pkg_cfg\\" + cfgFileName, cfgFilePath);
if copyErr != nil {
fmt.Println(copyErr)
fmt.Println("备份" + tomcatInfo.ProcessName + "配置文件失败");
os.Exit(1);
}
fmt.Println("复制" + tomcatInfo.ProcessName+ "配置文件成功,文件:" + cfgFileName + ",大小:", written, "byte");
}
}
//fmt.Println(tomcatArr);
return nil;
}
/**
获取tomcat信息
*/
func GetTomcatArray(
tomcatPrefix string,
tomcatSuffix string) [] TomcatInfo {
//out, err := exec.Command("cmd", "/C", "tasklist ").Output()
out, err := exec.Command("cmd", "/C", "tasklist").Output()
if err != nil {
log.Fatal(err)
}
//fmt.Printf(string(out))
var processStrList[] string = strings.Split(string(out), "\r\n");
var tomcatArr []TomcatInfo;
for i := range processStrList {
if(strings.HasPrefix(strings.ToLower(processStrList[i]), tomcatPrefix)) {
//fmt.Println(i)
//fmt.Println(processStrList[i])
var processName = strings.Split(processStrList[i], " ")[0];
if ! strings.HasSuffi | e {
var pkg string
fmt.Print(tomcatInfo.ProcessName + "需要哪个包进行更新(0:通用版最新包;1:安装包1114): ");
fmt.Scanln(&pkg);
if(pkg == "0") {
tomcatInfo.PackageFileName = "tyb";
break;
} else if(pkg == "1") {
tomcatInfo.PackageFileName = "agent_1114";
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
}
//fmt.Println("confirm complete.")
}
/**
拷贝文件
*/
func copyFile(dstName, srcName string) (written int64, err error) {
src, err := os.Open(srcName)
if err != nil {
return
}
defer src.Close()
dst, err := os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return
}
defer dst.Close()
fmt.Println("拷贝文件:" + src.Name() + " > " + dst.Name());
return io.Copy(dst, src)
}
func copyDir(src string, dest string) error {
src_original := src;
err := filepath.Walk(src, func(src string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if f.IsDir() {
//fmt.Println(f.Name())
//copyDir(f.Name(), dest+"/"+f.Name())
if(src != src_original) {
var temp_str = strings.Replace(src, src_original, dest, 1);
os.MkdirAll(temp_str, 0777);
}
} else {
//fmt.Println(src);
//fmt.Println(src_original);
//fmt.Println(dest);
//fmt.Println("--------------------------------------------------------------------------------")
dest_new := strings.Replace(src, src_original, dest, -1);
//fmt.Println(dest_new);
//fmt.Println("拷贝文件:" + src + " > " + dest_new);
os.Create(dest_new);
copyFile(dest_new, src);
}
//println(path)
return nil
})
if err != nil {
fmt.Printf("filepath.Walk() returned %v\n", err);
return err;
}
return nil;
}
/**
替换包
*/
func ReplacePkg(tomcatInfo TomcatInfo) error {
var stopErr = stopTomcat(tomcatInfo);
if stopErr != nil {
fmt.Println("停止" + tomcatInfo.ProcessName + "出错!");
} else {
fmt.Println("停止" + tomcatInfo.ProcessName + "成功。");
}
//var destDir = tomcatInfo.ProcessHome + "webapps\\agent";
var destDir = tomcatInfo.ProcessHome + "webapps";
// 删除webapps\agent
rem_err := os.RemoveAll(destDir + "\\agent");
if rem_err != nil {
fmt.Println("移除目录出错:" + destDir + "\\agent");
fmt.Println(rem_err)
return rem_err;
}
copy_err := copyDir(tomcatInfo.NewPackageDir, destDir);
if copy_err != nil {
fmt.Println("拷贝目录出错:" + destDir);
return copy_err;
}
// 还原配置文件
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
write_len, copy_err2 := copyFile(cfgFilePath, tomcatInfo.ConfigFileBackupDir + cfgFileName);
if copy_err2 != nil || write_len == 0 {
fmt.Println("还原配置文件出错:" + tomcatInfo.ConfigFileBackupDir + cfgFileName);
return copy_err2;
}
}
var startErr = startTomcat(tomcatInfo);
if startErr != nil {
fmt.Println("启动" + tomcatInfo.ProcessName + "出错!");
} else {
fmt.Println("启动" + tomcatInfo.ProcessName + "成功。");
}
return nil;
}
/**
停止tomcat
*/
func stopTomcat(tomcatInfo TomcatInfo) error {
var processName = strings.Split(tomcatInfo.ProcessName, ".")[0]
_, err := exec.Command("cmd", "/C", "net stop " + processName + " && taskkill /f /im " + tomcatInfo.ProcessHome).Output();
if err != nil {
return err;
}
return nil;
}
/**
启动tomcat
*/
func startTomcat(tomcatInfo TomcatInfo) error {
var processName = strings.Split(tomcatInfo.ProcessName, ".")[0]
_, err := exec.Command("cmd", "/C", "net start " + processName).Output();
if err != nil {
return err;
}
return nil;
}
| x(processName, tomcatSuffix) {
out2, err2 := exec.Command("cmd", "/C", "wmic process where name='" + processName + "' get ExecutablePath").Output()
if err2 == nil {
// TODO
var fileDirectoryArr[] string = strings.Split(strings.Split(string(out2), "\r\n", )[1], "\\");
if(len(fileDirectoryArr) < 2) {
continue;
}
var parentDirectoryArr = fileDirectoryArr[0: len(fileDirectoryArr) - 2];
var tomcatInfo TomcatInfo;
tomcatInfo.ProcessName = processName;
tomcatInfo.ProcessHome = strings.Join(parentDirectoryArr, "\\") + "\\";
tomcatInfo.ProcessPath = tomcatInfo.ProcessHome + "bin\\" + processName;
tomcatInfo.PackageBackPath = tomcatInfo.ProcessHome + "backup\\";
tomcatInfo.PackageDir = tomcatInfo.ProcessHome + "webapps\\agent\\";
tomcatArr = append(tomcatArr, tomcatInfo);
} else {
fmt.Println(err2)
}
//fmt.Println("------------------------------------------------------")
}
}
}
return tomcatArr;
//fmt.Println(TOMCAT_PROCESS_MAP)
}
/**
确认tomcat
*/
func ConfirmTomcat(tomcatArr []* TomcatInfo) {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
for i := range tempArr {
var tomcatInfo = tempArr[i];
if tomcatInfo == nil {
continue;
}
// 当前tomcat是否需要更新
for true {
var update string
fmt.Print("是否需要更新 " + tomcatInfo.ProcessName + "(0:否;1:是): ");
fmt.Scanln(&update);
if(update == "1" || update == "0") {
tomcatInfo.Update = (update == "1");
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
// 当前tomcat需要的更新包
for tomcatInfo.Updat | identifier_body |
pkg_util.go | package pkg_util
import (
"fmt"
"os"
"zip_util"
"time"
"strings"
"os/exec"
"log"
"io"
"path/filepath"
"errors"
)
var (
LOCAL_PATH1 = os.TempDir() + "\\test_file.zip" // 下载到本地的路径1
LOCAL_PATH2 = "d:\\test_file.zip" // 下载到本地的路径2
UNZIP_PATH1 = os.TempDir() + "\\hbn_pkg\\" // 更新包解压路径1
UNZIP_PATH2 = "d:\\hbn_pkg\\" // 更新包解压路径1
//TOMCAT_PROCESS_PREFIX = "tomcat" // tomcat进程前缀
//TOMCAT_W_PROCESS_SUFFIX = "w.exe" // tomcatx进程后缀,形式:w.exe
PKG_CFGFILE_PATH_ARR = []string{// 项目配置文件地址,
"webapps\\agent\\WEB-INF\\log4j.properties",
"webapps\\agent\\WEB-INF\\classes\\config.properties",
"webapps\\agent\\WEB-INF\\classes\\openoffice.properties"}
)
type TomcatInfo struct {
ProcessName string // tomcat6.ext
ProcessPath string // d:\xx\tomcat6\bin\tomcat6.exe
ProcessHome string // d:\xx\tomcat6\
PackageBackPath string // 更新前项目备份路径
PackageDir string // 项目存放路径 d:\xx\tomcat6\webapps\agent
PackageFileName string // 更新包名称 agent_1114
PackageBackFileName string // 备份包名称 201712081536更新前备份.zip
ConfigFileBackupDir string // 配置文件临时目录d:\xx\tomcat6\temp_config
NewPackageDir string // 新包地址
Update bool // 是否需要更新
Complete bool // 更新完成
}
/**
备份tomcat目录下项目
*/
func BackupCurrentPackage(tomcatArr []*TomcatInfo) error {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
if(len(tempArr) == 0) {
fmt.Println("当前系统未运行tomcat实例,无法更新");
//os.Exit(1);
var e = errors.New("当前系统未运行tomcat实例,无法更新")
return e;
}
for i := range tempArr {
var tomcatInfo = tempArr[i];
if !tomcatInfo.Update {
continue;
}
tomcatInfo.ConfigFileBackupDir = tomcatInfo.ProcessHome+ "pkg_cfg\\"
_, stat_err := os.Stat(tomcatInfo.ProcessHome+ "pkg_cfg\\");
if stat_err != nil && os.IsNotExist(stat_err) {
var mkdir_err = os.MkdirAll(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if mkdir_err != nil {
return stat_err;
}
} else if stat_err != nil {
return stat_err;
}
tomcatWebappDirFile, err := os.Open(tomcatInfo.PackageDir);
var tomcatWebappPath = []*os.File{tomcatWebappDirFile };
if err != nil {
fmt.Println(tomcatInfo.ProcessName + "备份失败")
fmt.Println(err)
os.Exit(1);
}
var tomcatBckupPath = tomcatInfo.PackageBackPath;
_, stateErr := os.Stat(tomcatBckupPath)
if stateErr != nil {
direrr := os.Mkdir("" + tomcatBckupPath, 0777);
//direrr := os.MkdirAll("D:\\Program Files\\Apache Software Foundation\\apache-tomcat-8.0.39\\backup", 0777)
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录失败");
//os.Exit(1);
return direrr;
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录成功");
}
}
// 备份tomcat目录下当前项目
// time.Now().Format("200601021504")
var now = time.Now();
var backupFileName = now.Format("200601021504") + "更新前备份";
ziperr := zip_util.Zip(tomcatWebappPath, tomcatBckupPath + backupFileName + ".zip");
if ziperr == nil {
tomcatInfo.PackageBackFileName = backupFileName + ".zip";
fmt.Println("创建" + tomcatInfo.ProcessName + "备份文件成功:" + tomcatInfo.PackageBackFileName);
}
// 备份tomcat目录下项目配置文件,如config.properties、log4j.xml、openoffice.properties
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
// 备份目录不存在则创建备份目录
_, stateErr := os.Stat(tomcatInfo.ProcessHome + "pkg_cfg\\");
if stateErr != nil {
direrr := os.Mkdir(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录失败");
os.Exit(1);
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录成功");
}
}
written, copyErr := copyFile(tomcatInfo.ProcessHome+ "pkg_cfg\\" + cfgFileName, cfgFilePath);
if copyErr != nil {
fmt.Println(copyErr)
fmt.Println("备份" + tomcatInfo.ProcessName + "配置文件失败");
os.Exit(1);
}
| il;
}
/**
获取tomcat信息
*/
func GetTomcatArray(
tomcatPrefix string,
tomcatSuffix string) [] TomcatInfo {
//out, err := exec.Command("cmd", "/C", "tasklist ").Output()
out, err := exec.Command("cmd", "/C", "tasklist").Output()
if err != nil {
log.Fatal(err)
}
//fmt.Printf(string(out))
var processStrList[] string = strings.Split(string(out), "\r\n");
var tomcatArr []TomcatInfo;
for i := range processStrList {
if(strings.HasPrefix(strings.ToLower(processStrList[i]), tomcatPrefix)) {
//fmt.Println(i)
//fmt.Println(processStrList[i])
var processName = strings.Split(processStrList[i], " ")[0];
if ! strings.HasSuffix(processName, tomcatSuffix) {
out2, err2 := exec.Command("cmd", "/C", "wmic process where name='" + processName + "' get ExecutablePath").Output()
if err2 == nil {
// TODO
var fileDirectoryArr[] string = strings.Split(strings.Split(string(out2), "\r\n", )[1], "\\");
if(len(fileDirectoryArr) < 2) {
continue;
}
var parentDirectoryArr = fileDirectoryArr[0: len(fileDirectoryArr) - 2];
var tomcatInfo TomcatInfo;
tomcatInfo.ProcessName = processName;
tomcatInfo.ProcessHome = strings.Join(parentDirectoryArr, "\\") + "\\";
tomcatInfo.ProcessPath = tomcatInfo.ProcessHome + "bin\\" + processName;
tomcatInfo.PackageBackPath = tomcatInfo.ProcessHome + "backup\\";
tomcatInfo.PackageDir = tomcatInfo.ProcessHome + "webapps\\agent\\";
tomcatArr = append(tomcatArr, tomcatInfo);
} else {
fmt.Println(err2)
}
//fmt.Println("------------------------------------------------------")
}
}
}
return tomcatArr;
//fmt.Println(TOMCAT_PROCESS_MAP)
}
/**
确认tomcat
*/
func ConfirmTomcat(tomcatArr []* TomcatInfo) {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
for i := range tempArr {
var tomcatInfo = tempArr[i];
if tomcatInfo == nil {
continue;
}
// 当前tomcat是否需要更新
for true {
var update string
fmt.Print("是否需要更新 " + tomcatInfo.ProcessName + "(0:否;1:是): ");
fmt.Scanln(&update);
if(update == "1" || update == "0") {
tomcatInfo.Update = (update == "1");
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
// 当前tomcat需要的更新包
for tomcatInfo.Update {
var pkg string
fmt.Print(tomcatInfo.ProcessName + "需要哪个包进行更新(0:通用版最新包;1:安装包1114): ");
fmt.Scanln(&pkg);
if(pkg == "0") {
tomcatInfo.PackageFileName = "tyb";
break;
} else if(pkg == "1") {
tomcatInfo.PackageFileName = "agent_1114";
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
}
//fmt.Println("confirm complete.")
}
/**
拷贝文件
*/
func copyFile(dstName, srcName string) (written int64, err error) {
src, err := os.Open(srcName)
if err != nil {
return
}
defer src.Close()
dst, err := os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return
}
defer dst.Close()
fmt.Println("拷贝文件:" + src.Name() + " > " + dst.Name());
return io.Copy(dst, src)
}
func copyDir(src string, dest string) error {
src_original := src;
err := filepath.Walk(src, func(src string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if f.IsDir() {
//fmt.Println(f.Name())
//copyDir(f.Name(), dest+"/"+f.Name())
if(src != src_original) {
var temp_str = strings.Replace(src, src_original, dest, 1);
os.MkdirAll(temp_str, 0777);
}
} else {
//fmt.Println(src);
//fmt.Println(src_original);
//fmt.Println(dest);
//fmt.Println("--------------------------------------------------------------------------------")
dest_new := strings.Replace(src, src_original, dest, -1);
//fmt.Println(dest_new);
//fmt.Println("拷贝文件:" + src + " > " + dest_new);
os.Create(dest_new);
copyFile(dest_new, src);
}
//println(path)
return nil
})
if err != nil {
fmt.Printf("filepath.Walk() returned %v\n", err);
return err;
}
return nil;
}
/**
替换包
*/
func ReplacePkg(tomcatInfo TomcatInfo) error {
var stopErr = stopTomcat(tomcatInfo);
if stopErr != nil {
fmt.Println("停止" + tomcatInfo.ProcessName + "出错!");
} else {
fmt.Println("停止" + tomcatInfo.ProcessName + "成功。");
}
//var destDir = tomcatInfo.ProcessHome + "webapps\\agent";
var destDir = tomcatInfo.ProcessHome + "webapps";
// 删除webapps\agent
rem_err := os.RemoveAll(destDir + "\\agent");
if rem_err != nil {
fmt.Println("移除目录出错:" + destDir + "\\agent");
fmt.Println(rem_err)
return rem_err;
}
copy_err := copyDir(tomcatInfo.NewPackageDir, destDir);
if copy_err != nil {
fmt.Println("拷贝目录出错:" + destDir);
return copy_err;
}
// 还原配置文件
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
write_len, copy_err2 := copyFile(cfgFilePath, tomcatInfo.ConfigFileBackupDir + cfgFileName);
if copy_err2 != nil || write_len == 0 {
fmt.Println("还原配置文件出错:" + tomcatInfo.ConfigFileBackupDir + cfgFileName);
return copy_err2;
}
}
var startErr = startTomcat(tomcatInfo);
if startErr != nil {
fmt.Println("启动" + tomcatInfo.ProcessName + "出错!");
} else {
fmt.Println("启动" + tomcatInfo.ProcessName + "成功。");
}
return nil;
}
/**
停止tomcat
*/
func stopTomcat(tomcatInfo TomcatInfo) error {
var processName = strings.Split(tomcatInfo.ProcessName, ".")[0]
_, err := exec.Command("cmd", "/C", "net stop " + processName + " && taskkill /f /im " + tomcatInfo.ProcessHome).Output();
if err != nil {
return err;
}
return nil;
}
/**
启动tomcat
*/
func startTomcat(tomcatInfo TomcatInfo) error {
var processName = strings.Split(tomcatInfo.ProcessName, ".")[0]
_, err := exec.Command("cmd", "/C", "net start " + processName).Output();
if err != nil {
return err;
}
return nil;
}
| fmt.Println("复制" + tomcatInfo.ProcessName+ "配置文件成功,文件:" + cfgFileName + ",大小:", written, "byte");
}
}
//fmt.Println(tomcatArr);
return n | conditional_block |
pkg_util.go | package pkg_util
import (
"fmt"
"os"
"zip_util"
"time"
"strings"
"os/exec"
"log"
"io"
"path/filepath"
"errors"
)
var (
LOCAL_PATH1 = os.TempDir() + "\\test_file.zip" // 下载到本地的路径1
LOCAL_PATH2 = "d:\\test_file.zip" // 下载到本地的路径2
UNZIP_PATH1 = os.TempDir() + "\\hbn_pkg\\" // 更新包解压路径1
UNZIP_PATH2 = "d:\\hbn_pkg\\" // 更新包解压路径1
//TOMCAT_PROCESS_PREFIX = "tomcat" // tomcat进程前缀
//TOMCAT_W_PROCESS_SUFFIX = "w.exe" // tomcatx进程后缀,形式:w.exe
PKG_CFGFILE_PATH_ARR = []string{// 项目配置文件地址,
"webapps\\agent\\WEB-INF\\log4j.properties",
"webapps\\agent\\WEB-INF\\classes\\config.properties",
"webapps\\agent\\WEB-INF\\classes\\openoffice.properties"}
)
type TomcatInfo struct {
ProcessName string // tomcat6.ext
ProcessPath string // d:\xx\tomcat6\bin\tomcat6.exe
ProcessHome string // d:\xx\tomcat6\
PackageBackPath string // 更新前项目备份路径
PackageDir string // 项目存放路径 d:\xx\tomcat6\webapps\agent
PackageFileName string // 更新包名称 agent_1114
PackageBackFileName string // 备份包名称 201712081536更新前备份.zip
ConfigFileBackupDir string // 配置文件临时目录d:\xx\tomcat6\temp_config
NewPackageDir string // 新包地址
Update bool // 是否需要更新
Complete bool // 更新完成
}
/**
备份tomcat目录下项目
*/
func BackupCurrentPackage(tomcatArr []*TomcatInfo) error {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
if(len(tempArr) == 0) {
fmt.Println("当前系统未运行tomcat实例,无法更新");
//os.Exit(1);
var e = errors.New("当前系统未运行tomcat实例,无法更新")
return e;
}
for i := range tempArr {
var tomcatInfo = tempArr[i];
if !tomcatInfo.Update {
continue;
}
tomcatInfo.ConfigFileBackupDir = tomcatInfo.ProcessHome+ "pkg_cfg\\"
_, stat_err := os.Stat(tomcatInfo.ProcessHome+ "pkg_cfg\\");
if stat_err != nil && os.IsNotExist(stat_err) {
var mkdir_err = os.MkdirAll(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if mkdir_err != nil {
return stat_err;
}
} else if stat_err != nil {
return stat_err;
}
tomcatWebappDirFile, err := os.Open(tomcatInfo.PackageDir);
var tomcatWebappPath = []*os.File{tomcatWebappDirFile };
if err != nil {
fmt.Println(tomcatInfo.ProcessName + "备份失败")
fmt.Println(err)
os.Exit(1);
}
var tomcatBckupPath = tomcatInfo.PackageBackPath;
_, stateErr := os.Stat(tomcatBckupPath)
if stateErr != nil {
direrr := os.Mkdir("" + tomcatBckupPath, 0777);
//direrr := os.MkdirAll("D:\\Program Files\\Apache Software Foundation\\apache-tomcat-8.0.39\\backup", 0777)
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录失败");
//os.Exit(1);
return direrr;
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录成功");
}
}
// 备份tomcat目录下当前项目
// time.Now().Format("200601021504")
var now = time.Now();
var backupFileName = now.Format("200601021504") + "更新前备份";
ziperr := zip_util.Zip(tomcatWebappPath, tomcatBckupPath + backupFileName + ".zip");
if ziperr == nil {
tomcatInfo.PackageBackFileName = backupFileName + ".zip";
fmt.Println("创建" + tomcatInfo.ProcessName + "备份文件成功:" + tomcatInfo.PackageBackFileName);
}
// 备份tomcat目录下项目配置文件,如config.properties、log4j.xml、openoffice.properties
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
// 备份目录不存在则创建备份目录
_, stateErr := os.Stat(tomcatInfo.ProcessHome + "pkg_cfg\\");
if stateErr != nil {
direrr := os.Mkdir(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录失败");
os.Exit(1);
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录成功");
}
}
written, copyErr := copyFile(tomcatInfo.ProcessHome+ "pkg_cfg\\" + cfgFileName, cfgFilePath);
if copyErr != nil {
fmt.Println(copyErr)
fmt.Println("备份" + tomcatInfo.ProcessName + "配置文件失败");
os.Exit(1);
}
fmt.Println("复制" + tomcatInfo.ProcessName+ "配置文件成功,文件:" + cfgFileName + ",大小:", written, "byte");
}
}
//fmt.Println(tomcatArr);
return nil;
}
/**
获取tomcat信息
*/
func GetTomcatArray(
tomcatPrefix string,
tomcatSuffix string) [] TomcatInfo {
//out, err := exec.Command("cmd", "/C", "tasklist ").Output()
out, err := exec.Command("cmd", "/C", "tasklist").Output()
if err != nil {
log.Fatal(err)
}
//fmt.Printf(string(out))
var processStrList[] string = strings.Split(string(out), "\r\n");
var tomcatArr []TomcatInfo;
for i := range processStrList {
if(strings.HasPrefix(strings.ToLower(processStrList[i]), tomcatPrefix)) {
//fmt.Println(i)
//fmt.Println(processStrList[i])
var processName = strings.Split(processStrList[i], " ")[0];
if ! strings.HasSuffix(processName, tomcatSuffix) {
out2, err2 := exec.Command("cmd", "/C", "wmic process where name='" + processName + "' get ExecutablePath").Output()
if err2 == nil {
// TODO
var fileDirectoryArr[] string = strings.Split(strings.Split(string(out2), "\r\n", )[1], "\\");
if(len(fileDirectoryArr) < 2) {
continue;
}
var parentDirectoryArr = fileDirectoryArr[0: len(fileDirectoryArr) - 2];
var tomcatInfo TomcatInfo;
tomcatInfo.ProcessName = processName;
tomcatInfo.ProcessHome = strings.Join(parentDirectoryArr, "\\") + "\\";
tomcatInfo.ProcessPath = tomcatInfo.ProcessHome + "bin\\" + processName;
tomcatInfo.PackageBackPath = tomcatInfo.ProcessHome + "backup\\";
tomcatInfo.PackageDir = tomcatInfo.ProcessHome + "webapps\\agent\\";
tomcatArr = append(tomcatArr, tomcatInfo);
} else {
fmt.Println(err2)
}
//fmt.Println("------------------------------------------------------")
}
}
}
return tomcatArr;
//fmt.Println(TOMCAT_PROCESS_MAP)
}
/**
确认tomcat
*/
func ConfirmTomcat(tomcatArr []* TomcatInfo) {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
for i := range tempArr {
var tomcatInfo = tempArr[i];
if tomcatInfo == nil {
continue;
}
// 当前tomcat是否需要更新
for true {
var update string
fmt.Print("是否需要更新 " + tomcatInfo.ProcessName + "(0:否;1:是): ");
fmt.Scanln(&update);
if(update == "1" || update == "0") {
tomcatInfo.Update = (update == "1");
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
// 当前tomcat需要的更新包
for tomcatInfo.Update {
var pkg string
fmt.Print(tomcatInfo.ProcessName + "需要哪个包进行更新(0:通用版最新包;1:安装包1114): ");
fmt.Scanln(&pkg);
if(pkg == "0") {
tomcatInfo.PackageFileName = "tyb";
break;
} else if(pkg == "1") {
tomcatInfo.PackageFileName = "agent_1114";
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
}
//fmt.Println("confirm complete.")
}
/**
拷贝文件
*/
func copyFile(dstName, srcName string) (written int64, err error) {
src, err := os.Open(srcName)
if err != nil {
return
}
defer src.Close()
dst, err := os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return
}
defer dst.Close()
fmt.Println("拷贝文件:" + src.Name() + " > " + dst.Name());
return io.Copy(dst, src)
}
func copyDir(src string, dest string) error {
src_original := src;
err := filepath.Walk(src, func(src string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if f.IsDir() {
//fmt.Println(f.Name())
//copyDir(f.Name(), dest+"/"+f.Name())
if(src != src_original) {
var temp_str = strings.Replace(src, src_original, dest, 1);
os.MkdirAll(temp_str, 0777);
}
} else {
//fmt.Println(src);
//fmt.Println(src_original);
//fmt.Println(dest);
//fmt.Println("--------------------------------------------------------------------------------")
dest_new := strings.Replace(src, src_original, dest, -1);
//fmt.Println(dest_new);
//fmt.Println("拷贝文件:" + src + " > " + dest_new);
os.Create(dest_new);
copyFile(dest_new, src);
}
//println(path)
return nil
})
if err != nil {
fmt.Printf("filepath.Walk() returned %v\n", err);
return err;
}
return nil;
}
/**
替换包
*/
func ReplacePkg(tomcatInfo TomcatInfo) error {
var stopErr = stopTomcat(tomcatInfo);
if stopErr != nil {
fmt.Println("停止" + tomcatInfo.ProcessName + "出错!");
} else {
fmt.Println("停止" + tomcatInfo.ProcessName + "成功。");
}
//var destDir = tomcatInfo.ProcessHome + "webapps\\agent";
var destDir = tomcatInfo.ProcessHome + "webapps";
// 删除webapps\agent
rem_err := os.RemoveAll(destDir + "\\agent");
if rem_err != nil {
fmt.Println("移除目录出错:" + destDir + "\\agent");
fmt.Println(rem_err)
return rem_err;
}
copy_err := copyDir(tomcatInfo.NewPackageDir, destDir);
if copy_err != nil {
fmt.Println("拷贝目录出错:" + destDir);
return copy_err;
}
// 还原配置文件
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
write_len, copy_err2 := copyFile(cfgFilePath, tomcatInfo.ConfigFileBackupDir + cfgFileName);
if copy_err2 != nil || write_len == 0 {
fmt.Println("还原配置文件出错:" + tomcatInfo.ConfigFileBackupDir + cfgFileName);
return copy_err2;
}
}
var startErr = startTomcat(tomcatInfo);
if startErr != nil {
fmt.Println("启动" + tomcatInfo.ProcessName + "出错!");
} else {
fmt.Println("启动" + tomcatInfo.ProcessName + "成功。");
}
return nil;
}
/**
停止tomcat
*/
func stopTomcat(tomcatInfo TomcatInfo) error {
var processName = strings.Split(tomcatInfo.ProcessName, ".")[0]
_, err := exec.Command("cmd", "/C", "net stop " + processName + " && taskkill /f /im " + tomcatInfo.ProcessHome).Output();
if err != nil {
return err;
}
return nil;
}
/**
启动tomcat
*/
func startTomcat(tomcatInfo TomcatInfo) error {
var processName = strings.Split(tomcatInfo.ProcessName, ".")[0]
_, err := exec.Command("cmd", "/C", "net start " + processName).Output();
if err != nil {
return err;
}
return nil;
}
| identifier_name | ||
aweMBPicker.py | # coding: UTF-8
# -------------------------
# A proof of concept port of aweControlPicker
# from Maya to Motionbuilder
# -------------------------
from pyfbsdk import *
import pyfbsdk_additions as pyui
from PySide import QtGui
gDeveloperMode = True
def log(*messages):
'''Wrapper around print statement to control script output'''
if gDeveloperMode:
message = ""
for m in range(len(messages)):
bit = str(messages[m])
sep = " " if m else ""
message += sep + bit
print message
class Picker(object):
'''The internal Picker object
This class stores and manages the state of each Picker.
There should only be one Picker instance per Picker, and it should be passed
around to the UI etc.
'''
def __init__(self, name="Picker", objectList=[],pickerObject=None, tab="Pickers"):
self.pickerObject = self.createPickerObject(name, tab, pickerObject, objectList)
@property
def name(self):
if self.pickerObject:
return self.pickerObject.PropertyList.Find('PickerName').Data
else:
return "Unknown"
@name.setter
def name(self, value):
self.pickerObject.PropertyList.Find('PickerName').Data = value
self.pickerObject.Name = value
@property
def tab(self):
return self.pickerObject.PropertyList.Find('Tab').Data
@tab.setter
def tab(self, value):
self.pickerObject.PropertyList.Find('Tab').Data = value
self.pickerObject.Tab = value
@property
def objects(self):
return [o for o in self.pickerObject.PropertyList.Find('Objects')]
@objects.setter
def objects(self, objectList):
self.pickerObject.PropertyList.Find('Objects').removeAll()
for o in objectList:
self.pickerObject.PropertyList.Find('Objects').append(o)
def createPickerObject(self, name, tab, pickerObject, objectList=[]):
'''Creates the Set object used to store the Picker in the Scene
When used during initPickers(), it doesn't create a new set and
returns the existing set instead.
'''
po = pickerObject
if not po:
po = aweCreateSet(name)
# search for master set. If none found, create it.
masterSet = None
for s in FBSystem().Scene.Sets:
if s.LongName == "awe:Pickers":
masterSet = s
if not masterSet:
masterSet = aweCreateSet("awe:Pickers")
# search for the tab set. If none found, create it.
tabSet = None
for s in masterSet.Items:
if s.ClassName() == 'FBSet' and s.LongName == tab:
tabSet = s
if not tabSet:
tabSet = aweCreateSet(tab)
masterSet.ConnectSrc(tabSet)
tabSet.ConnectSrc(po)
po.PropertyCreate('PickerName', FBPropertyType.kFBPT_charptr, 'String', False, False, None)
po.PropertyCreate('Objects', FBPropertyType.kFBPT_object, 'Object', False, False, None)
po.PropertyList.Find("PickerName").Data = name
po.Pickable = po.Transformable = False
for o in objectList:
po.PropertyList.Find('Objects').append(o)
po.picker = self
po.OnUnbind.Add(_pickerObjectDestroyed)
return po
def rename(self, newName):
self.name = newName
return self.name
def select(self):
'''Selects all objects associated with this Picker
'''
if self.pickerObject:
FBBeginChangeAllModels()
ml = FBModelList()
FBGetSelectedModels(ml)
for m in ml:
m.Selected = False
for o in self.objects:
o.Selected = True
FBEndChangeAllModels()
return True
else:
return False
def delete(self):
'''Deletes this Picker's associated pickerObject'''
if self.pickerObject:
self.pickerObject.FBDelete()
def add(self,objectList):
'''Adds a list of objects to this Picker'''
objects = self.objects
objects.extend(objectList)
# remove duplicates
tempSet = set(objects)
self.objects = [o for o in tempSet]
def aweCreateSet(name):
Set = FBSet("")
Set.LongName = name
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
#for flag in disallowedFlags:
#Set.DisableObjectFlags(flag)
return Set
def _createPicker(control,event):
'''Callback:
Creates Picker and its UI after prompting for a name
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objSet = []
for m in ml:
objSet.append(m)
if not objSet:
FBMessageBox("Picker Error", "Error: No Objects selected","OK")
else:
userInput = FBMessageBoxGetUserValue("Create New Picker", "Name: ", "Picker", FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if userInput[0] == 1:
name = userInput[1]
picker = Picker(name,objSet)
createPickerButton(name,picker)
_toolResize()
def createPickerButton(name,picker):
'''Creates Picker button UI and associates it with given Picker object'''
box = FBLayout()
box.picker = picker
# optionBtn region
x = FBAddRegionParam(0, FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(20, FBAttachType.kFBAttachNone,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("optionBtnRegion", "optionBtnRegion", x,y,w,h)
box.optionBtn = FBButton()
box.optionBtn.Caption = "»"
#box.optionBtn.Look = FBButtonLook.kFBLookColorChange
#box.optionBtn.Style = FBButtonStyle.kFB2States
box.optionBtn.optionBoxVisible = False
box.optionBtn.picker = picker
box.optionBtn.OnClick.Add(_toggleOptionMenu)
box.SetControl("optionBtnRegion", box.optionBtn)
# picker / optionBox region
x = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"optionBtnRegion")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("pickerBoxRegion", "pickerBoxRegion", x,y,w,h)
box.pickerBtn = FBButton()
box.pickerBtn.Caption = name
box.pickerBtn.picker = picker
box.pickerBtn.OnClick.Add(_pickerSelect)
box.SetControl("pickerBoxRegion", box.pickerBtn)
box.optionBtn.optionBox = box.optionBox = createOptionBox(box)
box.pickerBtn.box = box.optionBtn.box = box
awePickerTool.pickerLayout.Add(box, 25, space=2)
def createOptionBox(parentBox):
'''Creates a layout that holds a Picker's option UI'''
optionLayout = pyui.FBHBoxLayout()
addBtn = FBButton()
addBtn.Caption = "+"
addBtn.OnClick.Add(_addObjects)
addBtn.picker = parentBox.picker
addBtn.Look = FBButtonLook.kFBLookColorChange
addBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.5,0.3))
addBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.45,0.25))
optionLayout.AddRelative(addBtn,0.25,height=25, space=4)
removeBtn = FBButton()
removeBtn.Caption = "-"
removeBtn.Look = FBButtonLook.kFBLookColorChange
removeBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.2,0.5))
removeBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.15,0.45))
removeBtn.OnClick.Add(_removeObjects)
removeBtn.picker = parentBox.picker
optionLayout.AddRelative(removeBtn,0.25,height=25, space=2)
renameBtn = FBButton()
renameBtn.Caption = "ab*"
renameBtn.Look = FBButtonLook.kFBLookColorChange
renameBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.3,0.4,0.5))
renameBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.25,0.35,0.45))
renameBtn.OnClick.Add(_renamePicker)
renameBtn.picker = parentBox.picker
renameBtn.pickerButton = parentBox.pickerBtn
optionLayout.AddRelative(renameBtn,0.25,height=25, space=2)
deleteBtn = FBButton()
deleteBtn.Caption = "x"
deleteBtn.Look = FBButtonLook.kFBLookColorChange
deleteBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.7,0.2,0.3))
deleteBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.65,0.15,0.25))
deleteBtn.OnClick.Add(_deletePicker)
deleteBtn.picker = parentBox.picker
deleteBtn.box = parentBox
optionLayout.AddRelative(deleteBtn,0.25,height=25, space=2)
return optionLayout
def _addObjects(control,event):
'''Callback:
Adds selected objects to the Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objectList = [o for o in ml]
control.picker.add(objectList)
def _removeObjects(control,event):
'''Callback:
Removes selected objects from Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objects = control.picker.objects
for m in ml:
if m in objects:
objects.remove(m)
control.picker.objects = objects
def _renamePicker(control,event):
'''Callback:
Prompts to rename a Picker associated with the caller
'''
if control.picker.pickerObject:
response, value = FBMessageBoxGetUserValue("Rename Picker %s" % control.picker.name, "Name: ", control.picker.name, FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if response == 1:
if value:
control.picker.rename(value)
control.pickerButton.Caption = value
else:
FBMessageBox('Picker Error', "Could not locate Picker Object","OK")
def _deletePicker(control,event):
'''Callback:
Deletes a Picker and UI associated with caller (and the caller itself)
'''
deleteUI = False
if control.picker.pickerObject:
result = FBMessageBox("Delete Picker", "Are you sure you want to delete %s" % control.picker.name,"Yes","Cancel")
if result == 1:
deleteUI = True
else:
deleteUI = True
if deleteUI:
control.picker.delete()
awePickerTool.pickerLayout.Remove(control.box)
_toolResize()
def _toggleOptionMenu2(control,event):
'''Callback:
Shows a Picker's option UI or hides it, depending on current state
'''
region = "pickerBoxRegion"
# hide options
if control.box.optionBtn.optionBoxVisible:
log("hiding optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.pickerBtn)
control.box.optionBtn.optionBoxVisible = False
control.box.Refresh(True)
# show options
else:
log("showing optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.optionBox)
control.box.optionBtn.optionBoxVisible = True
control.box.Refresh(True)
def _toggleOptionMenu(control,event):
#if hasattr(awePickerTool,"mouse") and awePickerTool.mouse:
mouse = QtGui.QCursor.pos()
#x = int(desktop.width() / 100 * awePickerTool.mouse.PropertyList.Find("X").Data)
#y = int(desktop.height() / 100 * (100-awePickerTool.mouse.PropertyList.Find("Y").Data))
x = mouse.x()
y = mouse.y()
menu = FBGenericMenu()
menu.InsertLast("Add Selection",1)
menu.InsertLast("Remove Selection",2)
menu.InsertLast("Rename Picker",3)
menu.InsertLast("Delete Picker",4)
item = menu.Execute(x,y)
print item
if item:
if item.Id == 1:
_addObjects(control,None)
if item.Id == 2:
_removeObjects(control,None)
if item.Id == 3:
_renamePicker(control,None)
if item.Id == 4:
_deletePicker(control,None)
menu.FBDelete()
def _pickerSelect(control,event):
if control.picker:
success = control.picker.select()
if not success:
FBMessageBox("Picker Error", "An error occured: couldn't find Picker object.\nDeleting this Picker","OK")
awePickerTool.pickerLayout.Remove(control.box)
awePickerTool.pickerLayout.HardSelect()
def initPickers(tool):
log("initializing pickers")
log("tool", tool)
tool.pickerLayout.RemoveAll()
sets = FBSystem().Scene.Sets
masterSet = None
for s in sets:
if s.LongName == "awe:Pickers":
masterSet = s
if masterSet:
hideComponent(masterSet,masterSet.Items)
for t in masterSet.Items:
for p in t.Items:
name = p.PropertyList.Find("PickerName").Data
objects = [o for o in p.PropertyList.Find("Objects")]
picker = Picker(name,objects,p)
createPickerButton(name,picker)
#_toolResize()
# create the mouse device
# if hasattr(tool,"mouse") and tool.mouse:
# try:
# tool.mouse.FBDelete()
# except:
# pass
# tool.mouse = FBCreateObject("Browsing/Templates/Devices","Mouse","pickerMouse")
# FBSystem().Scene.Devices.append(tool.mouse)
# tool.mouse.Live = tool.mouse.Online = True
def hideComponent(component=None,componentList=None):
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
if component:
for flag in disallowedFlags:
component.DisableObjectFlags(flag)
if componentList:
for c in componentList:
hideComponent(component=c)
def _pickerObjectDestroyed(object,event):
object.picker.pickerObject = None
def _toolResize(*args):
if not awePickerTool:
return
log("resizing")
sb = awePickerTool.scrollBox
log(sb)
pl = awePickerTool.pickerLayout
sX = sb.RegionPosMaxX - sb.RegionPosMinX - 15
i = childCount = 0
log("checking children of pickerLayout")
box = pl.GetChild(i)
while box:
log("found picker box %s" % str(i))
i += 1
childCount += 1
box = pl.GetChild(i)
log("found %d picker boxes" % childCount)
sY = 27 * childCount + 10
log("computed size Y: ", sY)
sb.SetContentSize(sX, sY)
def getUIChildren(control, pList=None, tabs=0, firstRun=True):
'''Recursively loops through all child UI components of control
Returns list of items found
'''
pList = [] if firstRun else pList
i = 0
child = control.GetChild(i)
if control.ClassName() == "FBScrollBox":
child = control.Content.GetChild(i)
log("----"*tabs, control.ClassName(), control.RegionName if control.ClassName() == "FBLayout" else "")
while child:
pList.append(child)
getUIChildren(child, pList,tabs + 1,False)
i += 1
child = control.GetChild(i)
if firstRun:
return pList
def restructureAll(control,pList=None,firstRun=True):
'''Recursively loops through all child layouts of control
and calls Restructure() and Refresh() on them
'''
pList = [] if firstRun else pList
i = 0
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if hasattr(control, "Restructure"):
pList.append(control)
while child:
restructureAll(child, pList, False)
i += 1
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if firstRun:
for c in pList:
c.Restructure(False)
c.Refresh(True)
#log(c)
pList = []
def _fileChange(control,event):
initPickers(awePickerTool)
def _ | control,event):
FBSystem().Scene.OnChange.RemoveAll()
def _monitorSet(control,event):
'''Callback:
Check for manual deletion of a picker object (FBSet).
If it's the master set, prompt for undo. If it's a picker
set, notify the associated Picker object
'''
if event.Type == FBSceneChangeType.kFBSceneChangeDetach:
c = event.ChildComponent
if c.Is(44) and c.IsSDKComponent():
if c.LongName == "awe:Pickers":
FBMessageBox("Picker Error", "Hey! You just deleted the Picker set! Undo that please or I will crash", "OK")
return
for p in c.Parents:
if p.LongName == "awe:Pickers":
if c.picker:
c.picker.pickerObject = None
def aweCreateBaseUI(tool):
# ------------------------------
# Tool Layout Scheme:
#
# -- MainLayout
# -- |-- Edit Layout
# -- |-- |-- Add Button
# -- |-- ScrollBox
# -- |-- |-- Picker Layout
# -- |-- |-- |-- Picker Box
# -- |-- |-- |-- ...
# ------------------------------
startX = 175
startY = 240
tool.StartSizeX = startX
tool.StartSizeY = startY
tool.OnResize.Add(_toolResize)
# ----------------------
# Main Layout
# ----------------------
x = FBAddRegionParam(5,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(5,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"")
tool.AddRegion("mainRegion", "mainRegion",x,y,w,h)
mainLayout = pyui.FBVBoxLayout()
tool.SetControl("mainRegion", mainLayout)
# ----------------------
# Edit region (top)
# ----------------------
x = FBAddRegionParam(20,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(35,FBAttachType.kFBAttachNone,"")
mainLayout.AddRegion("editRegion", "editRegion", x,y,w,h)
editLayout = pyui.FBHBoxLayout()
mainLayout.SetControl("editRegion", editLayout)
addBtn = FBButton()
addBtn.Caption = "+"
editLayout.Add(addBtn, 30, space=0, height=30)
addBtn.OnClick.Add(_createPicker)
# ----------------------
# ScrollBox for Picker List
# ---------------------
x = FBAddRegionParam(0,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"editRegion")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(5,FBAttachType.kFBAttachBottom,"")
mainLayout.AddRegion("pickerScrollBox", "pickerScrollBox", x,y,w,h)
tool.scrollBox = FBScrollBox()
tool.scrollBox.SetContentSize(startX,startY)
mainLayout.SetControl("pickerScrollBox", tool.scrollBox)
# ----------------------
# Picker Layout
# (child of ScrollBox)
# ---------------------
x = FBAddRegionParam(0,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"")
tool.scrollBox.Content.AddRegion("pickerRegion", "pickerRegion", x,y,w,h)
tool.pickerLayout = pyui.FBVBoxLayout()
tool.scrollBox.Content.SetControl("pickerRegion", tool.pickerLayout)
# clear pickers and rebuild from existing picker objects
initPickers(tool)
# add callbacks to scene
tool.app = FBApplication()
#tool.app.OnFileNewCompleted.RemoveAll()
tool.app.OnFileNewCompleted.Add(_fileChange)
#tool.app.OnFileOpenCompleted.RemoveAll()
tool.app.OnFileOpenCompleted.Add(_fileChange)
tool.app.OnFileExit.Add(_removeSceneCB)
tool.app.OnFileNew.Add(_removeSceneCB)
tool.app.OnFileOpen.Add(_removeSceneCB)
FBSystem().Scene.OnChange.Add(_monitorSet)
if __name__ in ['__builtin__', '__main__']:
awePickerTool = pyui.FBCreateUniqueTool("aweMBPicker")
aweCreateBaseUI(awePickerTool) | removeSceneCB( | identifier_name |
aweMBPicker.py | # coding: UTF-8
# -------------------------
# A proof of concept port of aweControlPicker
# from Maya to Motionbuilder
# -------------------------
from pyfbsdk import *
import pyfbsdk_additions as pyui
from PySide import QtGui
gDeveloperMode = True
def log(*messages):
'''Wrapper around print statement to control script output'''
if gDeveloperMode:
message = ""
for m in range(len(messages)):
bit = str(messages[m])
sep = " " if m else ""
message += sep + bit
print message
class Picker(object):
'''The internal Picker object
This class stores and manages the state of each Picker.
There should only be one Picker instance per Picker, and it should be passed
around to the UI etc.
'''
def __init__(self, name="Picker", objectList=[],pickerObject=None, tab="Pickers"):
self.pickerObject = self.createPickerObject(name, tab, pickerObject, objectList)
@property
def name(self):
if self.pickerObject:
return self.pickerObject.PropertyList.Find('PickerName').Data
else:
return "Unknown"
@name.setter
def name(self, value):
self.pickerObject.PropertyList.Find('PickerName').Data = value
self.pickerObject.Name = value
@property
def tab(self):
return self.pickerObject.PropertyList.Find('Tab').Data
@tab.setter
def tab(self, value):
self.pickerObject.PropertyList.Find('Tab').Data = value
self.pickerObject.Tab = value
@property
def objects(self):
return [o for o in self.pickerObject.PropertyList.Find('Objects')]
@objects.setter
def objects(self, objectList):
self.pickerObject.PropertyList.Find('Objects').removeAll()
for o in objectList:
self.pickerObject.PropertyList.Find('Objects').append(o)
def createPickerObject(self, name, tab, pickerObject, objectList=[]):
'''Creates the Set object used to store the Picker in the Scene
When used during initPickers(), it doesn't create a new set and
returns the existing set instead.
'''
po = pickerObject
if not po:
po = aweCreateSet(name)
# search for master set. If none found, create it.
masterSet = None
for s in FBSystem().Scene.Sets:
if s.LongName == "awe:Pickers":
masterSet = s
if not masterSet:
masterSet = aweCreateSet("awe:Pickers")
# search for the tab set. If none found, create it.
tabSet = None
for s in masterSet.Items:
if s.ClassName() == 'FBSet' and s.LongName == tab:
tabSet = s
if not tabSet:
tabSet = aweCreateSet(tab)
masterSet.ConnectSrc(tabSet)
tabSet.ConnectSrc(po)
po.PropertyCreate('PickerName', FBPropertyType.kFBPT_charptr, 'String', False, False, None)
po.PropertyCreate('Objects', FBPropertyType.kFBPT_object, 'Object', False, False, None)
po.PropertyList.Find("PickerName").Data = name
po.Pickable = po.Transformable = False
for o in objectList:
po.PropertyList.Find('Objects').append(o)
po.picker = self
po.OnUnbind.Add(_pickerObjectDestroyed)
return po
def rename(self, newName):
self.name = newName
return self.name
def select(self):
'''Selects all objects associated with this Picker
'''
if self.pickerObject:
FBBeginChangeAllModels()
ml = FBModelList()
FBGetSelectedModels(ml)
for m in ml:
m.Selected = False
for o in self.objects:
o.Selected = True
FBEndChangeAllModels()
return True
else:
return False
def delete(self):
'''Deletes this Picker's associated pickerObject'''
if self.pickerObject:
self.pickerObject.FBDelete()
def add(self,objectList):
'''Adds a list of objects to this Picker'''
objects = self.objects
objects.extend(objectList)
# remove duplicates
tempSet = set(objects)
self.objects = [o for o in tempSet]
def aweCreateSet(name):
Set = FBSet("")
Set.LongName = name
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
#for flag in disallowedFlags:
#Set.DisableObjectFlags(flag)
return Set
def _createPicker(control,event):
'''Callback:
Creates Picker and its UI after prompting for a name
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objSet = []
for m in ml:
objSet.append(m)
if not objSet:
FBMessageBox("Picker Error", "Error: No Objects selected","OK")
else:
userInput = FBMessageBoxGetUserValue("Create New Picker", "Name: ", "Picker", FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if userInput[0] == 1:
name = userInput[1]
picker = Picker(name,objSet)
createPickerButton(name,picker)
_toolResize()
def createPickerButton(name,picker):
'''Creates Picker button UI and associates it with given Picker object'''
box = FBLayout()
box.picker = picker
# optionBtn region
x = FBAddRegionParam(0, FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(20, FBAttachType.kFBAttachNone,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("optionBtnRegion", "optionBtnRegion", x,y,w,h)
box.optionBtn = FBButton()
box.optionBtn.Caption = "»"
#box.optionBtn.Look = FBButtonLook.kFBLookColorChange
#box.optionBtn.Style = FBButtonStyle.kFB2States
box.optionBtn.optionBoxVisible = False
box.optionBtn.picker = picker
box.optionBtn.OnClick.Add(_toggleOptionMenu)
box.SetControl("optionBtnRegion", box.optionBtn)
# picker / optionBox region
x = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"optionBtnRegion")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("pickerBoxRegion", "pickerBoxRegion", x,y,w,h)
box.pickerBtn = FBButton()
box.pickerBtn.Caption = name
box.pickerBtn.picker = picker
box.pickerBtn.OnClick.Add(_pickerSelect)
box.SetControl("pickerBoxRegion", box.pickerBtn)
box.optionBtn.optionBox = box.optionBox = createOptionBox(box)
box.pickerBtn.box = box.optionBtn.box = box
awePickerTool.pickerLayout.Add(box, 25, space=2)
def createOptionBox(parentBox):
' |
def _addObjects(control,event):
'''Callback:
Adds selected objects to the Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objectList = [o for o in ml]
control.picker.add(objectList)
def _removeObjects(control,event):
'''Callback:
Removes selected objects from Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objects = control.picker.objects
for m in ml:
if m in objects:
objects.remove(m)
control.picker.objects = objects
def _renamePicker(control,event):
'''Callback:
Prompts to rename a Picker associated with the caller
'''
if control.picker.pickerObject:
response, value = FBMessageBoxGetUserValue("Rename Picker %s" % control.picker.name, "Name: ", control.picker.name, FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if response == 1:
if value:
control.picker.rename(value)
control.pickerButton.Caption = value
else:
FBMessageBox('Picker Error', "Could not locate Picker Object","OK")
def _deletePicker(control,event):
'''Callback:
Deletes a Picker and UI associated with caller (and the caller itself)
'''
deleteUI = False
if control.picker.pickerObject:
result = FBMessageBox("Delete Picker", "Are you sure you want to delete %s" % control.picker.name,"Yes","Cancel")
if result == 1:
deleteUI = True
else:
deleteUI = True
if deleteUI:
control.picker.delete()
awePickerTool.pickerLayout.Remove(control.box)
_toolResize()
def _toggleOptionMenu2(control,event):
'''Callback:
Shows a Picker's option UI or hides it, depending on current state
'''
region = "pickerBoxRegion"
# hide options
if control.box.optionBtn.optionBoxVisible:
log("hiding optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.pickerBtn)
control.box.optionBtn.optionBoxVisible = False
control.box.Refresh(True)
# show options
else:
log("showing optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.optionBox)
control.box.optionBtn.optionBoxVisible = True
control.box.Refresh(True)
def _toggleOptionMenu(control,event):
#if hasattr(awePickerTool,"mouse") and awePickerTool.mouse:
mouse = QtGui.QCursor.pos()
#x = int(desktop.width() / 100 * awePickerTool.mouse.PropertyList.Find("X").Data)
#y = int(desktop.height() / 100 * (100-awePickerTool.mouse.PropertyList.Find("Y").Data))
x = mouse.x()
y = mouse.y()
menu = FBGenericMenu()
menu.InsertLast("Add Selection",1)
menu.InsertLast("Remove Selection",2)
menu.InsertLast("Rename Picker",3)
menu.InsertLast("Delete Picker",4)
item = menu.Execute(x,y)
print item
if item:
if item.Id == 1:
_addObjects(control,None)
if item.Id == 2:
_removeObjects(control,None)
if item.Id == 3:
_renamePicker(control,None)
if item.Id == 4:
_deletePicker(control,None)
menu.FBDelete()
def _pickerSelect(control,event):
if control.picker:
success = control.picker.select()
if not success:
FBMessageBox("Picker Error", "An error occured: couldn't find Picker object.\nDeleting this Picker","OK")
awePickerTool.pickerLayout.Remove(control.box)
awePickerTool.pickerLayout.HardSelect()
def initPickers(tool):
log("initializing pickers")
log("tool", tool)
tool.pickerLayout.RemoveAll()
sets = FBSystem().Scene.Sets
masterSet = None
for s in sets:
if s.LongName == "awe:Pickers":
masterSet = s
if masterSet:
hideComponent(masterSet,masterSet.Items)
for t in masterSet.Items:
for p in t.Items:
name = p.PropertyList.Find("PickerName").Data
objects = [o for o in p.PropertyList.Find("Objects")]
picker = Picker(name,objects,p)
createPickerButton(name,picker)
#_toolResize()
# create the mouse device
# if hasattr(tool,"mouse") and tool.mouse:
# try:
# tool.mouse.FBDelete()
# except:
# pass
# tool.mouse = FBCreateObject("Browsing/Templates/Devices","Mouse","pickerMouse")
# FBSystem().Scene.Devices.append(tool.mouse)
# tool.mouse.Live = tool.mouse.Online = True
def hideComponent(component=None,componentList=None):
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
if component:
for flag in disallowedFlags:
component.DisableObjectFlags(flag)
if componentList:
for c in componentList:
hideComponent(component=c)
def _pickerObjectDestroyed(object,event):
object.picker.pickerObject = None
def _toolResize(*args):
if not awePickerTool:
return
log("resizing")
sb = awePickerTool.scrollBox
log(sb)
pl = awePickerTool.pickerLayout
sX = sb.RegionPosMaxX - sb.RegionPosMinX - 15
i = childCount = 0
log("checking children of pickerLayout")
box = pl.GetChild(i)
while box:
log("found picker box %s" % str(i))
i += 1
childCount += 1
box = pl.GetChild(i)
log("found %d picker boxes" % childCount)
sY = 27 * childCount + 10
log("computed size Y: ", sY)
sb.SetContentSize(sX, sY)
def getUIChildren(control, pList=None, tabs=0, firstRun=True):
'''Recursively loops through all child UI components of control
Returns list of items found
'''
pList = [] if firstRun else pList
i = 0
child = control.GetChild(i)
if control.ClassName() == "FBScrollBox":
child = control.Content.GetChild(i)
log("----"*tabs, control.ClassName(), control.RegionName if control.ClassName() == "FBLayout" else "")
while child:
pList.append(child)
getUIChildren(child, pList,tabs + 1,False)
i += 1
child = control.GetChild(i)
if firstRun:
return pList
def restructureAll(control,pList=None,firstRun=True):
'''Recursively loops through all child layouts of control
and calls Restructure() and Refresh() on them
'''
pList = [] if firstRun else pList
i = 0
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if hasattr(control, "Restructure"):
pList.append(control)
while child:
restructureAll(child, pList, False)
i += 1
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if firstRun:
for c in pList:
c.Restructure(False)
c.Refresh(True)
#log(c)
pList = []
def _fileChange(control,event):
initPickers(awePickerTool)
def _removeSceneCB(control,event):
FBSystem().Scene.OnChange.RemoveAll()
def _monitorSet(control,event):
'''Callback:
Check for manual deletion of a picker object (FBSet).
If it's the master set, prompt for undo. If it's a picker
set, notify the associated Picker object
'''
if event.Type == FBSceneChangeType.kFBSceneChangeDetach:
c = event.ChildComponent
if c.Is(44) and c.IsSDKComponent():
if c.LongName == "awe:Pickers":
FBMessageBox("Picker Error", "Hey! You just deleted the Picker set! Undo that please or I will crash", "OK")
return
for p in c.Parents:
if p.LongName == "awe:Pickers":
if c.picker:
c.picker.pickerObject = None
def aweCreateBaseUI(tool):
# ------------------------------
# Tool Layout Scheme:
#
# -- MainLayout
# -- |-- Edit Layout
# -- |-- |-- Add Button
# -- |-- ScrollBox
# -- |-- |-- Picker Layout
# -- |-- |-- |-- Picker Box
# -- |-- |-- |-- ...
# ------------------------------
startX = 175
startY = 240
tool.StartSizeX = startX
tool.StartSizeY = startY
tool.OnResize.Add(_toolResize)
# ----------------------
# Main Layout
# ----------------------
x = FBAddRegionParam(5,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(5,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"")
tool.AddRegion("mainRegion", "mainRegion",x,y,w,h)
mainLayout = pyui.FBVBoxLayout()
tool.SetControl("mainRegion", mainLayout)
# ----------------------
# Edit region (top)
# ----------------------
x = FBAddRegionParam(20,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(35,FBAttachType.kFBAttachNone,"")
mainLayout.AddRegion("editRegion", "editRegion", x,y,w,h)
editLayout = pyui.FBHBoxLayout()
mainLayout.SetControl("editRegion", editLayout)
addBtn = FBButton()
addBtn.Caption = "+"
editLayout.Add(addBtn, 30, space=0, height=30)
addBtn.OnClick.Add(_createPicker)
# ----------------------
# ScrollBox for Picker List
# ---------------------
x = FBAddRegionParam(0,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"editRegion")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(5,FBAttachType.kFBAttachBottom,"")
mainLayout.AddRegion("pickerScrollBox", "pickerScrollBox", x,y,w,h)
tool.scrollBox = FBScrollBox()
tool.scrollBox.SetContentSize(startX,startY)
mainLayout.SetControl("pickerScrollBox", tool.scrollBox)
# ----------------------
# Picker Layout
# (child of ScrollBox)
# ---------------------
x = FBAddRegionParam(0,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"")
tool.scrollBox.Content.AddRegion("pickerRegion", "pickerRegion", x,y,w,h)
tool.pickerLayout = pyui.FBVBoxLayout()
tool.scrollBox.Content.SetControl("pickerRegion", tool.pickerLayout)
# clear pickers and rebuild from existing picker objects
initPickers(tool)
# add callbacks to scene
tool.app = FBApplication()
#tool.app.OnFileNewCompleted.RemoveAll()
tool.app.OnFileNewCompleted.Add(_fileChange)
#tool.app.OnFileOpenCompleted.RemoveAll()
tool.app.OnFileOpenCompleted.Add(_fileChange)
tool.app.OnFileExit.Add(_removeSceneCB)
tool.app.OnFileNew.Add(_removeSceneCB)
tool.app.OnFileOpen.Add(_removeSceneCB)
FBSystem().Scene.OnChange.Add(_monitorSet)
if __name__ in ['__builtin__', '__main__']:
awePickerTool = pyui.FBCreateUniqueTool("aweMBPicker")
aweCreateBaseUI(awePickerTool) | ''Creates a layout that holds a Picker's option UI'''
optionLayout = pyui.FBHBoxLayout()
addBtn = FBButton()
addBtn.Caption = "+"
addBtn.OnClick.Add(_addObjects)
addBtn.picker = parentBox.picker
addBtn.Look = FBButtonLook.kFBLookColorChange
addBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.5,0.3))
addBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.45,0.25))
optionLayout.AddRelative(addBtn,0.25,height=25, space=4)
removeBtn = FBButton()
removeBtn.Caption = "-"
removeBtn.Look = FBButtonLook.kFBLookColorChange
removeBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.2,0.5))
removeBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.15,0.45))
removeBtn.OnClick.Add(_removeObjects)
removeBtn.picker = parentBox.picker
optionLayout.AddRelative(removeBtn,0.25,height=25, space=2)
renameBtn = FBButton()
renameBtn.Caption = "ab*"
renameBtn.Look = FBButtonLook.kFBLookColorChange
renameBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.3,0.4,0.5))
renameBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.25,0.35,0.45))
renameBtn.OnClick.Add(_renamePicker)
renameBtn.picker = parentBox.picker
renameBtn.pickerButton = parentBox.pickerBtn
optionLayout.AddRelative(renameBtn,0.25,height=25, space=2)
deleteBtn = FBButton()
deleteBtn.Caption = "x"
deleteBtn.Look = FBButtonLook.kFBLookColorChange
deleteBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.7,0.2,0.3))
deleteBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.65,0.15,0.25))
deleteBtn.OnClick.Add(_deletePicker)
deleteBtn.picker = parentBox.picker
deleteBtn.box = parentBox
optionLayout.AddRelative(deleteBtn,0.25,height=25, space=2)
return optionLayout
| identifier_body |
aweMBPicker.py | # coding: UTF-8
# -------------------------
# A proof of concept port of aweControlPicker
# from Maya to Motionbuilder
# -------------------------
from pyfbsdk import *
import pyfbsdk_additions as pyui
from PySide import QtGui
gDeveloperMode = True
def log(*messages):
'''Wrapper around print statement to control script output'''
if gDeveloperMode:
message = ""
for m in range(len(messages)):
bit = str(messages[m])
sep = " " if m else ""
message += sep + bit
print message
class Picker(object):
'''The internal Picker object
This class stores and manages the state of each Picker.
There should only be one Picker instance per Picker, and it should be passed
around to the UI etc.
'''
def __init__(self, name="Picker", objectList=[],pickerObject=None, tab="Pickers"):
self.pickerObject = self.createPickerObject(name, tab, pickerObject, objectList)
@property
def name(self):
if self.pickerObject:
return self.pickerObject.PropertyList.Find('PickerName').Data
else:
return "Unknown"
@name.setter
def name(self, value):
self.pickerObject.PropertyList.Find('PickerName').Data = value
self.pickerObject.Name = value
@property
def tab(self):
return self.pickerObject.PropertyList.Find('Tab').Data
@tab.setter
def tab(self, value):
self.pickerObject.PropertyList.Find('Tab').Data = value
self.pickerObject.Tab = value
@property
def objects(self):
return [o for o in self.pickerObject.PropertyList.Find('Objects')]
@objects.setter
def objects(self, objectList):
self.pickerObject.PropertyList.Find('Objects').removeAll()
for o in objectList:
self.pickerObject.PropertyList.Find('Objects').append(o)
def createPickerObject(self, name, tab, pickerObject, objectList=[]):
'''Creates the Set object used to store the Picker in the Scene
When used during initPickers(), it doesn't create a new set and
returns the existing set instead.
'''
po = pickerObject
if not po:
po = aweCreateSet(name)
# search for master set. If none found, create it.
masterSet = None
for s in FBSystem().Scene.Sets:
if s.LongName == "awe:Pickers":
masterSet = s
if not masterSet:
masterSet = aweCreateSet("awe:Pickers")
# search for the tab set. If none found, create it.
tabSet = None
for s in masterSet.Items:
if s.ClassName() == 'FBSet' and s.LongName == tab:
tabSet = s
if not tabSet:
tabSet = aweCreateSet(tab)
masterSet.ConnectSrc(tabSet)
tabSet.ConnectSrc(po)
po.PropertyCreate('PickerName', FBPropertyType.kFBPT_charptr, 'String', False, False, None)
po.PropertyCreate('Objects', FBPropertyType.kFBPT_object, 'Object', False, False, None)
po.PropertyList.Find("PickerName").Data = name
po.Pickable = po.Transformable = False
for o in objectList:
po.PropertyList.Find('Objects').append(o)
po.picker = self
po.OnUnbind.Add(_pickerObjectDestroyed)
return po
def rename(self, newName):
self.name = newName
return self.name
def select(self):
'''Selects all objects associated with this Picker
'''
if self.pickerObject:
FBBeginChangeAllModels()
ml = FBModelList()
FBGetSelectedModels(ml)
for m in ml:
m.Selected = False
for o in self.objects:
o.Selected = True
FBEndChangeAllModels()
return True
else:
return False
def delete(self):
'''Deletes this Picker's associated pickerObject'''
if self.pickerObject:
self.pickerObject.FBDelete()
def add(self,objectList):
'''Adds a list of objects to this Picker'''
objects = self.objects
objects.extend(objectList)
# remove duplicates
tempSet = set(objects)
self.objects = [o for o in tempSet]
def aweCreateSet(name):
Set = FBSet("")
Set.LongName = name
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
#for flag in disallowedFlags:
#Set.DisableObjectFlags(flag)
return Set
def _createPicker(control,event):
'''Callback:
Creates Picker and its UI after prompting for a name
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objSet = []
for m in ml:
objSet.append(m)
if not objSet:
FBMessageBox("Picker Error", "Error: No Objects selected","OK")
else:
userInput = FBMessageBoxGetUserValue("Create New Picker", "Name: ", "Picker", FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if userInput[0] == 1:
name = userInput[1]
picker = Picker(name,objSet)
createPickerButton(name,picker)
_toolResize()
def createPickerButton(name,picker):
'''Creates Picker button UI and associates it with given Picker object'''
box = FBLayout()
box.picker = picker
# optionBtn region
x = FBAddRegionParam(0, FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(20, FBAttachType.kFBAttachNone,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("optionBtnRegion", "optionBtnRegion", x,y,w,h)
box.optionBtn = FBButton()
box.optionBtn.Caption = "»"
#box.optionBtn.Look = FBButtonLook.kFBLookColorChange
#box.optionBtn.Style = FBButtonStyle.kFB2States
box.optionBtn.optionBoxVisible = False
box.optionBtn.picker = picker
box.optionBtn.OnClick.Add(_toggleOptionMenu)
box.SetControl("optionBtnRegion", box.optionBtn)
# picker / optionBox region
x = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"optionBtnRegion")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("pickerBoxRegion", "pickerBoxRegion", x,y,w,h)
box.pickerBtn = FBButton()
box.pickerBtn.Caption = name
box.pickerBtn.picker = picker
box.pickerBtn.OnClick.Add(_pickerSelect)
box.SetControl("pickerBoxRegion", box.pickerBtn)
box.optionBtn.optionBox = box.optionBox = createOptionBox(box)
box.pickerBtn.box = box.optionBtn.box = box
awePickerTool.pickerLayout.Add(box, 25, space=2)
def createOptionBox(parentBox):
'''Creates a layout that holds a Picker's option UI'''
optionLayout = pyui.FBHBoxLayout()
addBtn = FBButton()
addBtn.Caption = "+"
addBtn.OnClick.Add(_addObjects)
addBtn.picker = parentBox.picker
addBtn.Look = FBButtonLook.kFBLookColorChange
addBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.5,0.3))
addBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.45,0.25))
optionLayout.AddRelative(addBtn,0.25,height=25, space=4)
removeBtn = FBButton()
removeBtn.Caption = "-"
removeBtn.Look = FBButtonLook.kFBLookColorChange
removeBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.2,0.5))
removeBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.15,0.45))
removeBtn.OnClick.Add(_removeObjects)
removeBtn.picker = parentBox.picker
optionLayout.AddRelative(removeBtn,0.25,height=25, space=2)
renameBtn = FBButton()
renameBtn.Caption = "ab*"
renameBtn.Look = FBButtonLook.kFBLookColorChange
renameBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.3,0.4,0.5))
renameBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.25,0.35,0.45))
renameBtn.OnClick.Add(_renamePicker)
renameBtn.picker = parentBox.picker
renameBtn.pickerButton = parentBox.pickerBtn
optionLayout.AddRelative(renameBtn,0.25,height=25, space=2)
deleteBtn = FBButton()
deleteBtn.Caption = "x"
deleteBtn.Look = FBButtonLook.kFBLookColorChange
deleteBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.7,0.2,0.3))
deleteBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.65,0.15,0.25))
deleteBtn.OnClick.Add(_deletePicker)
deleteBtn.picker = parentBox.picker
deleteBtn.box = parentBox
optionLayout.AddRelative(deleteBtn,0.25,height=25, space=2)
return optionLayout
def _addObjects(control,event):
'''Callback:
Adds selected objects to the Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objectList = [o for o in ml]
control.picker.add(objectList)
def _removeObjects(control,event):
'''Callback:
Removes selected objects from Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objects = control.picker.objects
for m in ml:
if m in objects:
objects.remove(m)
control.picker.objects = objects
def _renamePicker(control,event):
'''Callback:
Prompts to rename a Picker associated with the caller
'''
if control.picker.pickerObject:
response, value = FBMessageBoxGetUserValue("Rename Picker %s" % control.picker.name, "Name: ", control.picker.name, FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if response == 1:
if value:
control.picker.rename(value)
control.pickerButton.Caption = value
else:
FBMessageBox('Picker Error', "Could not locate Picker Object","OK")
def _deletePicker(control,event):
'''Callback:
Deletes a Picker and UI associated with caller (and the caller itself)
'''
deleteUI = False
if control.picker.pickerObject:
result = FBMessageBox("Delete Picker", "Are you sure you want to delete %s" % control.picker.name,"Yes","Cancel")
if result == 1:
deleteUI = True
else:
deleteUI = True
if deleteUI:
control.picker.delete()
awePickerTool.pickerLayout.Remove(control.box)
_toolResize()
def _toggleOptionMenu2(control,event):
'''Callback:
Shows a Picker's option UI or hides it, depending on current state
'''
region = "pickerBoxRegion"
# hide options
if control.box.optionBtn.optionBoxVisible:
log("hiding optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.pickerBtn)
control.box.optionBtn.optionBoxVisible = False
control.box.Refresh(True)
# show options
else:
log("showing optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.optionBox)
control.box.optionBtn.optionBoxVisible = True
control.box.Refresh(True)
def _toggleOptionMenu(control,event):
#if hasattr(awePickerTool,"mouse") and awePickerTool.mouse:
mouse = QtGui.QCursor.pos()
#x = int(desktop.width() / 100 * awePickerTool.mouse.PropertyList.Find("X").Data)
#y = int(desktop.height() / 100 * (100-awePickerTool.mouse.PropertyList.Find("Y").Data))
x = mouse.x()
y = mouse.y()
menu = FBGenericMenu()
menu.InsertLast("Add Selection",1)
menu.InsertLast("Remove Selection",2)
menu.InsertLast("Rename Picker",3)
menu.InsertLast("Delete Picker",4)
item = menu.Execute(x,y)
print item
if item:
if item.Id == 1:
_addObjects(control,None)
if item.Id == 2:
_removeObjects(control,None)
if item.Id == 3:
_renamePicker(control,None)
if item.Id == 4:
_deletePicker(control,None)
menu.FBDelete()
def _pickerSelect(control,event):
if control.picker:
success = control.picker.select()
if not success:
FBMessageBox("Picker Error", "An error occured: couldn't find Picker object.\nDeleting this Picker","OK")
awePickerTool.pickerLayout.Remove(control.box)
awePickerTool.pickerLayout.HardSelect()
def initPickers(tool):
log("initializing pickers")
log("tool", tool)
tool.pickerLayout.RemoveAll()
sets = FBSystem().Scene.Sets
masterSet = None
for s in sets:
if s.LongName == "awe:Pickers":
masterSet = s
if masterSet:
hideComponent(masterSet,masterSet.Items)
for t in masterSet.Items:
for p in t.Items:
name = p.PropertyList.Find("PickerName").Data
objects = [o for o in p.PropertyList.Find("Objects")]
picker = Picker(name,objects,p)
createPickerButton(name,picker)
#_toolResize()
# create the mouse device
# if hasattr(tool,"mouse") and tool.mouse:
# try:
# tool.mouse.FBDelete()
# except:
# pass
# tool.mouse = FBCreateObject("Browsing/Templates/Devices","Mouse","pickerMouse")
# FBSystem().Scene.Devices.append(tool.mouse)
# tool.mouse.Live = tool.mouse.Online = True
def hideComponent(component=None,componentList=None):
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
if component:
for flag in disallowedFlags:
component.DisableObjectFlags(flag)
if componentList:
for c in componentList:
hideComponent(component=c)
def _pickerObjectDestroyed(object,event):
object.picker.pickerObject = None
def _toolResize(*args):
if not awePickerTool:
return
log("resizing")
sb = awePickerTool.scrollBox
log(sb)
pl = awePickerTool.pickerLayout
sX = sb.RegionPosMaxX - sb.RegionPosMinX - 15
i = childCount = 0
log("checking children of pickerLayout")
box = pl.GetChild(i)
while box:
log("found picker box %s" % str(i))
i += 1
childCount += 1
box = pl.GetChild(i)
log("found %d picker boxes" % childCount)
sY = 27 * childCount + 10
log("computed size Y: ", sY)
sb.SetContentSize(sX, sY)
def getUIChildren(control, pList=None, tabs=0, firstRun=True):
'''Recursively loops through all child UI components of control
Returns list of items found
'''
pList = [] if firstRun else pList
i = 0
child = control.GetChild(i)
if control.ClassName() == "FBScrollBox":
child = control.Content.GetChild(i)
log("----"*tabs, control.ClassName(), control.RegionName if control.ClassName() == "FBLayout" else "")
while child:
pList.append(child)
getUIChildren(child, pList,tabs + 1,False)
i += 1
child = control.GetChild(i)
if firstRun:
return pList
def restructureAll(control,pList=None,firstRun=True):
'''Recursively loops through all child layouts of control
and calls Restructure() and Refresh() on them
'''
pList = [] if firstRun else pList
i = 0
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if hasattr(control, "Restructure"):
pList.append(control)
while child:
restructureAll(child, pList, False)
i += 1
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if firstRun:
for c in pList:
c.Restructure(False)
c.Refresh(True)
#log(c)
pList = []
def _fileChange(control,event):
initPickers(awePickerTool)
def _removeSceneCB(control,event):
FBSystem().Scene.OnChange.RemoveAll()
def _monitorSet(control,event):
'''Callback:
Check for manual deletion of a picker object (FBSet).
If it's the master set, prompt for undo. If it's a picker
set, notify the associated Picker object
'''
if event.Type == FBSceneChangeType.kFBSceneChangeDetach:
c = event.ChildComponent
if c.Is(44) and c.IsSDKComponent():
if c.LongName == "awe:Pickers":
FBMessageBox("Picker Error", "Hey! You just deleted the Picker set! Undo that please or I will crash", "OK")
return
for p in c.Parents:
if p.LongName == "awe:Pickers":
if c.picker:
c.picker.pickerObject = None
def aweCreateBaseUI(tool):
# ------------------------------
# Tool Layout Scheme:
#
# -- MainLayout
# -- |-- Edit Layout
# -- |-- |-- Add Button
# -- |-- ScrollBox
# -- |-- |-- Picker Layout
# -- |-- |-- |-- Picker Box
# -- |-- |-- |-- ...
# ------------------------------
startX = 175
startY = 240
| # ----------------------
# Main Layout
# ----------------------
x = FBAddRegionParam(5,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(5,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"")
tool.AddRegion("mainRegion", "mainRegion",x,y,w,h)
mainLayout = pyui.FBVBoxLayout()
tool.SetControl("mainRegion", mainLayout)
# ----------------------
# Edit region (top)
# ----------------------
x = FBAddRegionParam(20,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(35,FBAttachType.kFBAttachNone,"")
mainLayout.AddRegion("editRegion", "editRegion", x,y,w,h)
editLayout = pyui.FBHBoxLayout()
mainLayout.SetControl("editRegion", editLayout)
addBtn = FBButton()
addBtn.Caption = "+"
editLayout.Add(addBtn, 30, space=0, height=30)
addBtn.OnClick.Add(_createPicker)
# ----------------------
# ScrollBox for Picker List
# ---------------------
x = FBAddRegionParam(0,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"editRegion")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(5,FBAttachType.kFBAttachBottom,"")
mainLayout.AddRegion("pickerScrollBox", "pickerScrollBox", x,y,w,h)
tool.scrollBox = FBScrollBox()
tool.scrollBox.SetContentSize(startX,startY)
mainLayout.SetControl("pickerScrollBox", tool.scrollBox)
# ----------------------
# Picker Layout
# (child of ScrollBox)
# ---------------------
x = FBAddRegionParam(0,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"")
tool.scrollBox.Content.AddRegion("pickerRegion", "pickerRegion", x,y,w,h)
tool.pickerLayout = pyui.FBVBoxLayout()
tool.scrollBox.Content.SetControl("pickerRegion", tool.pickerLayout)
# clear pickers and rebuild from existing picker objects
initPickers(tool)
# add callbacks to scene
tool.app = FBApplication()
#tool.app.OnFileNewCompleted.RemoveAll()
tool.app.OnFileNewCompleted.Add(_fileChange)
#tool.app.OnFileOpenCompleted.RemoveAll()
tool.app.OnFileOpenCompleted.Add(_fileChange)
tool.app.OnFileExit.Add(_removeSceneCB)
tool.app.OnFileNew.Add(_removeSceneCB)
tool.app.OnFileOpen.Add(_removeSceneCB)
FBSystem().Scene.OnChange.Add(_monitorSet)
if __name__ in ['__builtin__', '__main__']:
awePickerTool = pyui.FBCreateUniqueTool("aweMBPicker")
aweCreateBaseUI(awePickerTool) | tool.StartSizeX = startX
tool.StartSizeY = startY
tool.OnResize.Add(_toolResize)
| random_line_split |
aweMBPicker.py | # coding: UTF-8
# -------------------------
# A proof of concept port of aweControlPicker
# from Maya to Motionbuilder
# -------------------------
from pyfbsdk import *
import pyfbsdk_additions as pyui
from PySide import QtGui
gDeveloperMode = True
def log(*messages):
'''Wrapper around print statement to control script output'''
if gDeveloperMode:
message = ""
for m in range(len(messages)):
bit = str(messages[m])
sep = " " if m else ""
message += sep + bit
print message
class Picker(object):
'''The internal Picker object
This class stores and manages the state of each Picker.
There should only be one Picker instance per Picker, and it should be passed
around to the UI etc.
'''
def __init__(self, name="Picker", objectList=[],pickerObject=None, tab="Pickers"):
self.pickerObject = self.createPickerObject(name, tab, pickerObject, objectList)
@property
def name(self):
if self.pickerObject:
return self.pickerObject.PropertyList.Find('PickerName').Data
else:
return "Unknown"
@name.setter
def name(self, value):
self.pickerObject.PropertyList.Find('PickerName').Data = value
self.pickerObject.Name = value
@property
def tab(self):
return self.pickerObject.PropertyList.Find('Tab').Data
@tab.setter
def tab(self, value):
self.pickerObject.PropertyList.Find('Tab').Data = value
self.pickerObject.Tab = value
@property
def objects(self):
return [o for o in self.pickerObject.PropertyList.Find('Objects')]
@objects.setter
def objects(self, objectList):
self.pickerObject.PropertyList.Find('Objects').removeAll()
for o in objectList:
|
def createPickerObject(self, name, tab, pickerObject, objectList=[]):
'''Creates the Set object used to store the Picker in the Scene
When used during initPickers(), it doesn't create a new set and
returns the existing set instead.
'''
po = pickerObject
if not po:
po = aweCreateSet(name)
# search for master set. If none found, create it.
masterSet = None
for s in FBSystem().Scene.Sets:
if s.LongName == "awe:Pickers":
masterSet = s
if not masterSet:
masterSet = aweCreateSet("awe:Pickers")
# search for the tab set. If none found, create it.
tabSet = None
for s in masterSet.Items:
if s.ClassName() == 'FBSet' and s.LongName == tab:
tabSet = s
if not tabSet:
tabSet = aweCreateSet(tab)
masterSet.ConnectSrc(tabSet)
tabSet.ConnectSrc(po)
po.PropertyCreate('PickerName', FBPropertyType.kFBPT_charptr, 'String', False, False, None)
po.PropertyCreate('Objects', FBPropertyType.kFBPT_object, 'Object', False, False, None)
po.PropertyList.Find("PickerName").Data = name
po.Pickable = po.Transformable = False
for o in objectList:
po.PropertyList.Find('Objects').append(o)
po.picker = self
po.OnUnbind.Add(_pickerObjectDestroyed)
return po
def rename(self, newName):
self.name = newName
return self.name
def select(self):
'''Selects all objects associated with this Picker
'''
if self.pickerObject:
FBBeginChangeAllModels()
ml = FBModelList()
FBGetSelectedModels(ml)
for m in ml:
m.Selected = False
for o in self.objects:
o.Selected = True
FBEndChangeAllModels()
return True
else:
return False
def delete(self):
'''Deletes this Picker's associated pickerObject'''
if self.pickerObject:
self.pickerObject.FBDelete()
def add(self,objectList):
'''Adds a list of objects to this Picker'''
objects = self.objects
objects.extend(objectList)
# remove duplicates
tempSet = set(objects)
self.objects = [o for o in tempSet]
def aweCreateSet(name):
Set = FBSet("")
Set.LongName = name
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
#for flag in disallowedFlags:
#Set.DisableObjectFlags(flag)
return Set
def _createPicker(control,event):
'''Callback:
Creates Picker and its UI after prompting for a name
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objSet = []
for m in ml:
objSet.append(m)
if not objSet:
FBMessageBox("Picker Error", "Error: No Objects selected","OK")
else:
userInput = FBMessageBoxGetUserValue("Create New Picker", "Name: ", "Picker", FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if userInput[0] == 1:
name = userInput[1]
picker = Picker(name,objSet)
createPickerButton(name,picker)
_toolResize()
def createPickerButton(name,picker):
'''Creates Picker button UI and associates it with given Picker object'''
box = FBLayout()
box.picker = picker
# optionBtn region
x = FBAddRegionParam(0, FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(20, FBAttachType.kFBAttachNone,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("optionBtnRegion", "optionBtnRegion", x,y,w,h)
box.optionBtn = FBButton()
box.optionBtn.Caption = "»"
#box.optionBtn.Look = FBButtonLook.kFBLookColorChange
#box.optionBtn.Style = FBButtonStyle.kFB2States
box.optionBtn.optionBoxVisible = False
box.optionBtn.picker = picker
box.optionBtn.OnClick.Add(_toggleOptionMenu)
box.SetControl("optionBtnRegion", box.optionBtn)
# picker / optionBox region
x = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"optionBtnRegion")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("pickerBoxRegion", "pickerBoxRegion", x,y,w,h)
box.pickerBtn = FBButton()
box.pickerBtn.Caption = name
box.pickerBtn.picker = picker
box.pickerBtn.OnClick.Add(_pickerSelect)
box.SetControl("pickerBoxRegion", box.pickerBtn)
box.optionBtn.optionBox = box.optionBox = createOptionBox(box)
box.pickerBtn.box = box.optionBtn.box = box
awePickerTool.pickerLayout.Add(box, 25, space=2)
def createOptionBox(parentBox):
'''Creates a layout that holds a Picker's option UI'''
optionLayout = pyui.FBHBoxLayout()
addBtn = FBButton()
addBtn.Caption = "+"
addBtn.OnClick.Add(_addObjects)
addBtn.picker = parentBox.picker
addBtn.Look = FBButtonLook.kFBLookColorChange
addBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.5,0.3))
addBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.45,0.25))
optionLayout.AddRelative(addBtn,0.25,height=25, space=4)
removeBtn = FBButton()
removeBtn.Caption = "-"
removeBtn.Look = FBButtonLook.kFBLookColorChange
removeBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.2,0.5))
removeBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.15,0.45))
removeBtn.OnClick.Add(_removeObjects)
removeBtn.picker = parentBox.picker
optionLayout.AddRelative(removeBtn,0.25,height=25, space=2)
renameBtn = FBButton()
renameBtn.Caption = "ab*"
renameBtn.Look = FBButtonLook.kFBLookColorChange
renameBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.3,0.4,0.5))
renameBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.25,0.35,0.45))
renameBtn.OnClick.Add(_renamePicker)
renameBtn.picker = parentBox.picker
renameBtn.pickerButton = parentBox.pickerBtn
optionLayout.AddRelative(renameBtn,0.25,height=25, space=2)
deleteBtn = FBButton()
deleteBtn.Caption = "x"
deleteBtn.Look = FBButtonLook.kFBLookColorChange
deleteBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.7,0.2,0.3))
deleteBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.65,0.15,0.25))
deleteBtn.OnClick.Add(_deletePicker)
deleteBtn.picker = parentBox.picker
deleteBtn.box = parentBox
optionLayout.AddRelative(deleteBtn,0.25,height=25, space=2)
return optionLayout
def _addObjects(control,event):
'''Callback:
Adds selected objects to the Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objectList = [o for o in ml]
control.picker.add(objectList)
def _removeObjects(control,event):
'''Callback:
Removes selected objects from Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objects = control.picker.objects
for m in ml:
if m in objects:
objects.remove(m)
control.picker.objects = objects
def _renamePicker(control,event):
'''Callback:
Prompts to rename a Picker associated with the caller
'''
if control.picker.pickerObject:
response, value = FBMessageBoxGetUserValue("Rename Picker %s" % control.picker.name, "Name: ", control.picker.name, FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if response == 1:
if value:
control.picker.rename(value)
control.pickerButton.Caption = value
else:
FBMessageBox('Picker Error', "Could not locate Picker Object","OK")
def _deletePicker(control,event):
'''Callback:
Deletes a Picker and UI associated with caller (and the caller itself)
'''
deleteUI = False
if control.picker.pickerObject:
result = FBMessageBox("Delete Picker", "Are you sure you want to delete %s" % control.picker.name,"Yes","Cancel")
if result == 1:
deleteUI = True
else:
deleteUI = True
if deleteUI:
control.picker.delete()
awePickerTool.pickerLayout.Remove(control.box)
_toolResize()
def _toggleOptionMenu2(control,event):
'''Callback:
Shows a Picker's option UI or hides it, depending on current state
'''
region = "pickerBoxRegion"
# hide options
if control.box.optionBtn.optionBoxVisible:
log("hiding optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.pickerBtn)
control.box.optionBtn.optionBoxVisible = False
control.box.Refresh(True)
# show options
else:
log("showing optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.optionBox)
control.box.optionBtn.optionBoxVisible = True
control.box.Refresh(True)
def _toggleOptionMenu(control,event):
#if hasattr(awePickerTool,"mouse") and awePickerTool.mouse:
mouse = QtGui.QCursor.pos()
#x = int(desktop.width() / 100 * awePickerTool.mouse.PropertyList.Find("X").Data)
#y = int(desktop.height() / 100 * (100-awePickerTool.mouse.PropertyList.Find("Y").Data))
x = mouse.x()
y = mouse.y()
menu = FBGenericMenu()
menu.InsertLast("Add Selection",1)
menu.InsertLast("Remove Selection",2)
menu.InsertLast("Rename Picker",3)
menu.InsertLast("Delete Picker",4)
item = menu.Execute(x,y)
print item
if item:
if item.Id == 1:
_addObjects(control,None)
if item.Id == 2:
_removeObjects(control,None)
if item.Id == 3:
_renamePicker(control,None)
if item.Id == 4:
_deletePicker(control,None)
menu.FBDelete()
def _pickerSelect(control,event):
if control.picker:
success = control.picker.select()
if not success:
FBMessageBox("Picker Error", "An error occured: couldn't find Picker object.\nDeleting this Picker","OK")
awePickerTool.pickerLayout.Remove(control.box)
awePickerTool.pickerLayout.HardSelect()
def initPickers(tool):
log("initializing pickers")
log("tool", tool)
tool.pickerLayout.RemoveAll()
sets = FBSystem().Scene.Sets
masterSet = None
for s in sets:
if s.LongName == "awe:Pickers":
masterSet = s
if masterSet:
hideComponent(masterSet,masterSet.Items)
for t in masterSet.Items:
for p in t.Items:
name = p.PropertyList.Find("PickerName").Data
objects = [o for o in p.PropertyList.Find("Objects")]
picker = Picker(name,objects,p)
createPickerButton(name,picker)
#_toolResize()
# create the mouse device
# if hasattr(tool,"mouse") and tool.mouse:
# try:
# tool.mouse.FBDelete()
# except:
# pass
# tool.mouse = FBCreateObject("Browsing/Templates/Devices","Mouse","pickerMouse")
# FBSystem().Scene.Devices.append(tool.mouse)
# tool.mouse.Live = tool.mouse.Online = True
def hideComponent(component=None,componentList=None):
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
if component:
for flag in disallowedFlags:
component.DisableObjectFlags(flag)
if componentList:
for c in componentList:
hideComponent(component=c)
def _pickerObjectDestroyed(object,event):
object.picker.pickerObject = None
def _toolResize(*args):
if not awePickerTool:
return
log("resizing")
sb = awePickerTool.scrollBox
log(sb)
pl = awePickerTool.pickerLayout
sX = sb.RegionPosMaxX - sb.RegionPosMinX - 15
i = childCount = 0
log("checking children of pickerLayout")
box = pl.GetChild(i)
while box:
log("found picker box %s" % str(i))
i += 1
childCount += 1
box = pl.GetChild(i)
log("found %d picker boxes" % childCount)
sY = 27 * childCount + 10
log("computed size Y: ", sY)
sb.SetContentSize(sX, sY)
def getUIChildren(control, pList=None, tabs=0, firstRun=True):
'''Recursively loops through all child UI components of control
Returns list of items found
'''
pList = [] if firstRun else pList
i = 0
child = control.GetChild(i)
if control.ClassName() == "FBScrollBox":
child = control.Content.GetChild(i)
log("----"*tabs, control.ClassName(), control.RegionName if control.ClassName() == "FBLayout" else "")
while child:
pList.append(child)
getUIChildren(child, pList,tabs + 1,False)
i += 1
child = control.GetChild(i)
if firstRun:
return pList
def restructureAll(control,pList=None,firstRun=True):
'''Recursively loops through all child layouts of control
and calls Restructure() and Refresh() on them
'''
pList = [] if firstRun else pList
i = 0
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if hasattr(control, "Restructure"):
pList.append(control)
while child:
restructureAll(child, pList, False)
i += 1
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if firstRun:
for c in pList:
c.Restructure(False)
c.Refresh(True)
#log(c)
pList = []
def _fileChange(control,event):
initPickers(awePickerTool)
def _removeSceneCB(control,event):
FBSystem().Scene.OnChange.RemoveAll()
def _monitorSet(control,event):
'''Callback:
Check for manual deletion of a picker object (FBSet).
If it's the master set, prompt for undo. If it's a picker
set, notify the associated Picker object
'''
if event.Type == FBSceneChangeType.kFBSceneChangeDetach:
c = event.ChildComponent
if c.Is(44) and c.IsSDKComponent():
if c.LongName == "awe:Pickers":
FBMessageBox("Picker Error", "Hey! You just deleted the Picker set! Undo that please or I will crash", "OK")
return
for p in c.Parents:
if p.LongName == "awe:Pickers":
if c.picker:
c.picker.pickerObject = None
def aweCreateBaseUI(tool):
# ------------------------------
# Tool Layout Scheme:
#
# -- MainLayout
# -- |-- Edit Layout
# -- |-- |-- Add Button
# -- |-- ScrollBox
# -- |-- |-- Picker Layout
# -- |-- |-- |-- Picker Box
# -- |-- |-- |-- ...
# ------------------------------
startX = 175
startY = 240
tool.StartSizeX = startX
tool.StartSizeY = startY
tool.OnResize.Add(_toolResize)
# ----------------------
# Main Layout
# ----------------------
x = FBAddRegionParam(5,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(5,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"")
tool.AddRegion("mainRegion", "mainRegion",x,y,w,h)
mainLayout = pyui.FBVBoxLayout()
tool.SetControl("mainRegion", mainLayout)
# ----------------------
# Edit region (top)
# ----------------------
x = FBAddRegionParam(20,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(35,FBAttachType.kFBAttachNone,"")
mainLayout.AddRegion("editRegion", "editRegion", x,y,w,h)
editLayout = pyui.FBHBoxLayout()
mainLayout.SetControl("editRegion", editLayout)
addBtn = FBButton()
addBtn.Caption = "+"
editLayout.Add(addBtn, 30, space=0, height=30)
addBtn.OnClick.Add(_createPicker)
# ----------------------
# ScrollBox for Picker List
# ---------------------
x = FBAddRegionParam(0,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"editRegion")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(5,FBAttachType.kFBAttachBottom,"")
mainLayout.AddRegion("pickerScrollBox", "pickerScrollBox", x,y,w,h)
tool.scrollBox = FBScrollBox()
tool.scrollBox.SetContentSize(startX,startY)
mainLayout.SetControl("pickerScrollBox", tool.scrollBox)
# ----------------------
# Picker Layout
# (child of ScrollBox)
# ---------------------
x = FBAddRegionParam(0,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(0,FBAttachType.kFBAttachBottom,"")
tool.scrollBox.Content.AddRegion("pickerRegion", "pickerRegion", x,y,w,h)
tool.pickerLayout = pyui.FBVBoxLayout()
tool.scrollBox.Content.SetControl("pickerRegion", tool.pickerLayout)
# clear pickers and rebuild from existing picker objects
initPickers(tool)
# add callbacks to scene
tool.app = FBApplication()
#tool.app.OnFileNewCompleted.RemoveAll()
tool.app.OnFileNewCompleted.Add(_fileChange)
#tool.app.OnFileOpenCompleted.RemoveAll()
tool.app.OnFileOpenCompleted.Add(_fileChange)
tool.app.OnFileExit.Add(_removeSceneCB)
tool.app.OnFileNew.Add(_removeSceneCB)
tool.app.OnFileOpen.Add(_removeSceneCB)
FBSystem().Scene.OnChange.Add(_monitorSet)
if __name__ in ['__builtin__', '__main__']:
awePickerTool = pyui.FBCreateUniqueTool("aweMBPicker")
aweCreateBaseUI(awePickerTool) | self.pickerObject.PropertyList.Find('Objects').append(o) | conditional_block |
types.go | package consensus
import (
"bytes"
"math/big"
"time"
"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/encoding"
)
type (
Timestamp uint64
BlockHeight uint64
Siafund Currency // arbitrary-precision unsigned integer
// A Specifier is a fixed-length string that serves two purposes. In the
// wire protocol, they are used to identify a particular encoding
// algorithm, signature algorithm, etc. This allows nodes to communicate on
// their own terms; for example, to reduce bandwidth costs, a node might
// only accept compressed messages.
//
// Internally, Specifiers are used to guarantee unique IDs. Various
// consensus types have an associated ID, calculated by hashing the data
// contained in the type. By prepending the data with Specifier, we can
// guarantee that distinct types will never produce the same hash.
Specifier [16]byte
// The Signature type is arbitrary-length to enable a variety of signature
// algorithms.
Signature string
// IDs are used to refer to a type without revealing its contents. They
// are constructed by hashing specific fields of the type, along with a
// Specifier. While all of these types are hashes, defining type aliases
// gives us type safety and makes the code more readable.
BlockID crypto.Hash
SiacoinOutputID crypto.Hash
SiafundOutputID crypto.Hash
FileContractID crypto.Hash
// An UnlockHash is a specially constructed hash of the UnlockConditions
// type. "Locked" values can be unlocked by providing the UnlockConditions
// that hash to a given UnlockHash. See SpendConditions.UnlockHash for
// details on how the UnlockHash is constructed.
UnlockHash crypto.Hash
// A Target is a hash that a block's ID must be "less than" in order for
// the block to be considered valid. Miners vary the block's 'Nonce' field
// in order to brute-force such an ID. The inverse of a Target is called
// the "difficulty," because it is proportional to the amount of time
// required to brute-force the Target.
Target crypto.Hash
)
// These Specifiers are used internally when calculating a type's ID. See
// Specifier for more details.
var (
SpecifierSiacoinOutput = Specifier{'s', 'i', 'a', 'c', 'o', 'i', 'n', ' ', 'o', 'u', 't', 'p', 'u', 't'}
SpecifierFileContract = Specifier{'f', 'i', 'l', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'a', 'c', 't'}
SpecifierFileContractTerminationPayout = Specifier{'f', 'i', 'l', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'a', 'c', 't', ' ', 't'}
SpecifierStorageProofOutput = Specifier{'s', 't', 'o', 'r', 'a', 'g', 'e', ' ', 'p', 'r', 'o', 'o', 'f'}
SpecifierSiafundOutput = Specifier{'s', 'i', 'a', 'f', 'u', 'n', 'd', ' ', 'o', 'u', 't', 'p', 'u', 't'}
)
// These Specifiers enumerate the types of signatures that are recognized by
// this implementation. If a signature's type is unrecognized, the signature
// is treated as valid. Signatures using the special "entropy" type are always
// treated as invalid; see Consensus.md for more details.
var (
SignatureEntropy = Specifier{'e', 'n', 't', 'r', 'o', 'p', 'y'}
SignatureEd25519 = Specifier{'e', 'd', '2', '5', '5', '1', '9'}
)
// A Block is a summary of changes to the state that have occurred since the
// previous block. Blocks reference the ID of the previous block (their
// "parent"), creating the linked-list commonly known as the blockchain. Their
// primary function is to bundle together transactions on the network. Blocks
// are created by "miners," who collect transactions from other nodes, and
// then try to pick a Nonce that results in a block whose BlockID is below a
// given Target.
type Block struct {
ParentID BlockID
Nonce uint64
Timestamp Timestamp
MinerPayouts []SiacoinOutput
Transactions []Transaction
}
// A Transaction is an atomic component of a block. Transactions can contain
// inputs and outputs, file contracts, storage proofs, and even arbitrary
// data. They can also contain signatures to prove that a given party has
// approved the transaction, or at least a particular subset of it.
//
// Transactions can depend on other previous transactions in the same block,
// but transactions cannot spend outputs that they create or otherwise be
// self-dependent.
type Transaction struct {
SiacoinInputs []SiacoinInput
SiacoinOutputs []SiacoinOutput
FileContracts []FileContract
FileContractTerminations []FileContractTermination
StorageProofs []StorageProof
SiafundInputs []SiafundInput
SiafundOutputs []SiafundOutput
MinerFees []Currency
ArbitraryData []string
Signatures []TransactionSignature
}
// A SiacoinInput consumes a SiacoinOutput and adds the siacoins to the set of
// siacoins that can be spent in the transaction. The ParentID points to the
// output that is getting consumed, and the UnlockConditions contain the rules
// for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiacoinInput struct {
ParentID SiacoinOutputID
UnlockConditions UnlockConditions
}
// A SiacoinOutput holds a volume of siacoins. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of the UnlockConditions that must be fulfilled
// in order to spend the output.
type SiacoinOutput struct {
Value Currency
UnlockHash UnlockHash
}
// A FileContract is a public record of a storage agreement between a "host"
// and a "renter." It mandates that a host must submit a storage proof to the
// network, proving that they still possess the file they have agreed to
// store.
//
// The party must submit the storage proof in a block that is between 'Start'
// and 'Expiration'. Upon submitting the proof, the outputs for
// 'ValidProofOutputs' are created. If the party does not submit a storage
// proof by 'Expiration', then the outputs for 'MissedProofOutputs' are
// created instead. The sum of 'MissedProofOutputs' must equal 'Payout', and
// the sum of 'ValidProofOutputs' must equal 'Payout' plus the siafund fee.
// This fee is sent to the siafund pool, which is a set of siacoins only
// spendable by siafund owners.
//
// Under normal circumstances, the payout will be funded by both the host and
// the renter, which gives the host incentive not to lose the file. The
// 'ValidProofUnlockHash' will typically be spendable by host, and the
// 'MissedProofUnlockHash' will either by spendable by the renter or by
// nobody (the ZeroUnlockHash).
//
// A contract can be terminated early by submitting a FileContractTermination
// whose UnlockConditions hash to 'TerminationHash'.
type FileContract struct {
FileSize uint64
FileMerkleRoot crypto.Hash
Start BlockHeight
Expiration BlockHeight
Payout Currency
ValidProofOutputs []SiacoinOutput
MissedProofOutputs []SiacoinOutput
TerminationHash UnlockHash
}
// A FileContractTermination terminates a file contract. The ParentID
// specifies the contract being terminated, and the TerminationConditions are
// the conditions under which termination will be treated as valid. The hash
// of the TerminationConditions must match the TerminationHash in the
// contract. 'Payouts' is a set of SiacoinOutputs describing how the payout of
// the contract is redistributed. It follows that the sum of these outputs
// must equal the original payout. The outputs can have any Value and
// UnlockHash, and do not need to match the ValidProofUnlockHash or
// MissedProofUnlockHash of the original FileContract.
type FileContractTermination struct {
ParentID FileContractID
TerminationConditions UnlockConditions
Payouts []SiacoinOutput
}
// A StorageProof fulfills a FileContract. The proof contains a specific
// segment of the file, along with a set of hashes from the file's Merkle
// tree. In combination, these can be used to prove that the segment came from
// the file. To prevent abuse, the segment must be chosen randomly, so the ID
// of block 'Start' - 1 is used as a seed value; see StorageProofSegment for
// the exact implementation.
//
// A transaction with a StorageProof cannot have any SiacoinOutputs,
// SiafundOutputs, or FileContracts. This is because a mundane reorg can
// invalidate the proof, and with it the rest of the transaction.
type StorageProof struct {
ParentID FileContractID
Segment [crypto.SegmentSize]byte
HashSet []crypto.Hash
}
// A SiafundInput consumes a SiafundOutput and adds the siafunds to the set of
// siafunds that can be spent in the transaction. The ParentID points to the
// output that is getting consumed, and the UnlockConditions contain the rules
// for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiafundInput struct {
ParentID SiafundOutputID
UnlockConditions UnlockConditions
}
// A SiafundOutput holds a volume of siafunds. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of a set of UnlockConditions that must be fulfilled
// in order to spend the output.
//
// When the SiafundOutput is spent, a SiacoinOutput is created, where:
//
// SiacoinOutput.Value := (SiafundPool - ClaimStart) / 10,000
// SiacoinOutput.UnlockHash := SiafundOutput.ClaimUnlockHash
//
// When a SiafundOutput is put into a transaction, the ClaimStart must always
// equal zero. While the transaction is being processed, the ClaimStart is set
// to the value of the SiafundPool.
type SiafundOutput struct {
Value Currency
UnlockHash UnlockHash
ClaimUnlockHash UnlockHash
ClaimStart Currency
}
// UnlockConditions are a set of conditions which must be met to execute
// certain actions, such as spending a SiacoinOutput or terminating a
// FileContract.
//
// The simplest requirement is that the block containing the UnlockConditions
// must have a height >= 'Timelock'.
//
// 'PublicKeys' specifies the set of keys that can be used to satisfy the
// UnlockConditions; of these, at least 'NumSignatures' unique keys must sign
// the transaction. The keys that do not need to use the same cryptographic
// algorithm.
//
// If 'NumSignatures' == 0, the UnlockConditions are effectively "anyone can
// unlock." If 'NumSignatures' > len('PublicKeys'), then the UnlockConditions
// cannot be fulfilled under any circumstances.
type UnlockConditions struct {
Timelock BlockHeight
PublicKeys []SiaPublicKey
NumSignatures uint64
}
// A SiaPublicKey is a public key prefixed by a Specifier. The Specifier
// indicates the algorithm used for signing and verification. Unrecognized
// algorithms will always verify, which allows new algorithms to be added to
// the protocol via a soft-fork.
type SiaPublicKey struct {
Algorithm Specifier
Key string
}
// A TransactionSignature is a signature that is included in the transaction.
// The signature should correspond to a public key in one of the
// UnlockConditions of the transaction. This key is specified first by
// 'ParentID', which specifies the UnlockConditions, and then
// 'PublicKeyIndex', which indicates the key in the UnlockConditions. There
// are three types that use UnlockConditions: SiacoinInputs, SiafundInputs,
// and FileContractTerminations. Each of these types also references a
// ParentID, and this is the hash that 'ParentID' must match. The 'Timelock'
// prevents the signature from being used until a certain height.
// 'CoveredFields' indicates which parts of the transaction are being signed;
// see CoveredFields.
type TransactionSignature struct {
ParentID crypto.Hash
PublicKeyIndex uint64
Timelock BlockHeight
CoveredFields CoveredFields
Signature Signature
}
// CoveredFields indicates which fields in a transaction have been covered by
// the signature. (Note that the signature does not sign the fields
// themselves, but rather their combined hash; see SigHash.) Each slice
// corresponds to a slice in the Transaction type, indicating which indices of
// the slice have been signed. The indices must be valid, i.e. within the
// bounds of the slice. In addition, they must be sorted and unique.
//
// As a convenience, a signature of the entire transaction can be indicated by
// the 'WholeTransaction' field. If 'WholeTransaction' == true, all other
// fields must be empty (except for the Signatures field, since a signature
// cannot sign itself).
type CoveredFields struct {
WholeTransaction bool
SiacoinInputs []uint64
SiacoinOutputs []uint64
FileContracts []uint64
FileContractTerminations []uint64
StorageProofs []uint64
SiafundInputs []uint64
SiafundOutputs []uint64
MinerFees []uint64
ArbitraryData []uint64
Signatures []uint64
}
// CurrentTimestamp returns the current time as a Timestamp.
func CurrentTimestamp() Timestamp {
return Timestamp(time.Now().Unix())
}
// CalculateCoinbase calculates the coinbase for a given height. The coinbase
// equation is:
//
// coinbase := max(InitialCoinbase - height, MinimumCoinbase) * CoinbaseAugment
func CalculateCoinbase(height BlockHeight) (c Currency) {
base := InitialCoinbase - uint64(height)
if base < MinimumCoinbase {
base = MinimumCoinbase
}
return NewCurrency64(base).Mul(NewCurrency(CoinbaseAugment))
}
// Int converts a Target to a big.Int.
func (t Target) Int() *big.Int {
return new(big.Int).SetBytes(t[:])
}
// Rat converts a Target to a big.Rat.
func (t Target) Rat() *big.Rat {
return new(big.Rat).SetInt(t.Int())
}
// Inverse returns the inverse of a Target as a big.Rat
func (t Target) Inverse() *big.Rat {
return new(big.Rat).Inv(t.Rat())
}
// IntToTarget converts a big.Int to a Target.
func IntToTarget(i *big.Int) (t Target) {
// i may overflow the maximum target.
// In the event of overflow, return the maximum.
if i.BitLen() > 256 {
return RootDepth
}
b := i.Bytes()
// need to preserve big-endianness
offset := len(t[:]) - len(b)
copy(t[offset:], b)
return
}
// RatToTarget converts a big.Rat to a Target.
func RatToTarget(r *big.Rat) Target {
// conversion to big.Int truncates decimal
i := new(big.Int).Div(r.Num(), r.Denom())
return IntToTarget(i)
}
// Tax returns the amount of Currency that will be taxed from fc.
func (fc FileContract) Tax() Currency {
return fc.Payout.MulFloat(SiafundPortion).RoundDown(SiafundCount)
}
// UnlockHash calculates the root hash of a Merkle tree of the
// UnlockConditions object. The leaves of this tree are formed by taking the
// hash of the timelock, the hash of the public keys (one leaf each), and the
// hash of the number of signatures. The keys are put in the middle because
// Timelock and NumSignatures are both low entropy fields; they can be
// protected by having random public keys next to them.
func (uc UnlockConditions) UnlockHash() UnlockHash {
tree := crypto.NewTree()
tree.PushObject(uc.Timelock)
for i := range uc.PublicKeys {
tree.PushObject(uc.PublicKeys[i])
}
tree.PushObject(uc.NumSignatures)
return UnlockHash(tree.Root())
}
// ID returns the ID of a Block, which is calculated by hashing the
// concatenation of the block's parent ID, nonce, and Merkle root.
func (b Block) ID() BlockID {
return BlockID(crypto.HashAll(
b.ParentID,
b.Nonce,
b.MerkleRoot(),
))
}
// CheckTarget returns true if the block's ID meets the given target.
func (b Block) CheckTarget(target Target) bool {
blockHash := b.ID()
return bytes.Compare(target[:], blockHash[:]) >= 0
}
// MerkleRoot calculates the Merkle root of a Block. The leaves of the Merkle
// tree are composed of the Timestamp, the miner outputs (one leaf per
// payout), and the transactions (one leaf per transaction).
func (b Block) MerkleRoot() crypto.Hash {
tree := crypto.NewTree()
tree.PushObject(b.Timestamp)
for _, payout := range b.MinerPayouts {
tree.PushObject(payout)
}
for _, txn := range b.Transactions {
tree.PushObject(txn)
}
return tree.Root()
}
// MinerPayoutID returns the ID of the miner payout at the given index, which
// is calculated by hashing the concatenation of the BlockID and the payout
// index.
func (b Block) MinerPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
b.ID(),
i,
))
}
// SiacoinOutputID returns the ID of a siacoin output at the given index,
// which is calculated by hashing the concatenation of the SiacoinOutput
// Specifier, all of the fields in the transaction (except the signatures),
// and output index.
func (t Transaction) SiacoinOutputID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierSiacoinOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
| // contract index.
func (t Transaction) FileContractID(i int) FileContractID {
return FileContractID(crypto.HashAll(
SpecifierFileContract,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// FileContractTerminationPayoutID returns the ID of a file contract
// termination payout, given the index of the payout in the termination. The
// ID is calculated by hashing the concatenation of the
// FileContractTerminationPayout Specifier, the ID of the file contract being
// terminated, and the payout index.
func (fcid FileContractID) FileContractTerminationPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierFileContractTerminationPayout,
fcid,
i,
))
}
// StorageProofOutputID returns the ID of an output created by a file
// contract, given the status of the storage proof. The ID is calculating by
// hashing the concatenation of the StorageProofOutput Specifier, the ID of
// the file contract that the proof is for, a boolean indicating whether the
// proof was valid (true) or missed (false), and the index of the output
// within the file contract.
func (fcid FileContractID) StorageProofOutputID(proofValid bool, i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierStorageProofOutput,
fcid,
proofValid,
i,
))
}
// SiafundOutputID returns the ID of a SiafundOutput at the given index, which
// is calculated by hashing the concatenation of the SiafundOutput Specifier,
// all of the fields in the transaction (except the signatures), and output
// index.
func (t Transaction) SiafundOutputID(i int) SiafundOutputID {
return SiafundOutputID(crypto.HashAll(
SpecifierSiafundOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// SiaClaimOutputID returns the ID of the SiacoinOutput that is created when
// the siafund output is spent. The ID is the hash the SiafundOutputID.
func (id SiafundOutputID) SiaClaimOutputID() SiacoinOutputID {
return SiacoinOutputID(crypto.HashObject(id))
}
// SigHash returns the hash of the fields in a transaction covered by a given
// signature. See CoveredFields for more details.
func (t Transaction) SigHash(i int) crypto.Hash {
cf := t.Signatures[i].CoveredFields
var signedData []byte
if cf.WholeTransaction {
signedData = encoding.MarshalAll(
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
t.Signatures[i].ParentID,
t.Signatures[i].PublicKeyIndex,
t.Signatures[i].Timelock,
)
} else {
for _, input := range cf.SiacoinInputs {
signedData = append(signedData, encoding.Marshal(t.SiacoinInputs[input])...)
}
for _, output := range cf.SiacoinOutputs {
signedData = append(signedData, encoding.Marshal(t.SiacoinOutputs[output])...)
}
for _, contract := range cf.FileContracts {
signedData = append(signedData, encoding.Marshal(t.FileContracts[contract])...)
}
for _, termination := range cf.FileContractTerminations {
signedData = append(signedData, encoding.Marshal(t.FileContractTerminations[termination])...)
}
for _, storageProof := range cf.StorageProofs {
signedData = append(signedData, encoding.Marshal(t.StorageProofs[storageProof])...)
}
for _, siafundInput := range cf.SiafundInputs {
signedData = append(signedData, encoding.Marshal(t.SiafundInputs[siafundInput])...)
}
for _, siafundOutput := range cf.SiafundOutputs {
signedData = append(signedData, encoding.Marshal(t.SiafundOutputs[siafundOutput])...)
}
for _, minerFee := range cf.MinerFees {
signedData = append(signedData, encoding.Marshal(t.MinerFees[minerFee])...)
}
for _, arbData := range cf.ArbitraryData {
signedData = append(signedData, encoding.Marshal(t.ArbitraryData[arbData])...)
}
}
for _, sig := range cf.Signatures {
signedData = append(signedData, encoding.Marshal(t.Signatures[sig])...)
}
return crypto.HashBytes(signedData)
}
// ID returns the id of a transaction, which is taken by marshalling all of the
// fields except for the signatures and taking the hash of the result.
func (t Transaction) ID() crypto.Hash {
tBytes := encoding.MarshalAll(
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
)
return crypto.HashBytes(tBytes)
} | // FileContractID returns the ID of a file contract at the given index, which
// is calculated by hashing the concatenation of the FileContract Specifier,
// all of the fields in the transaction (except the signatures), and the | random_line_split |
types.go | package consensus
import (
"bytes"
"math/big"
"time"
"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/encoding"
)
type (
Timestamp uint64
BlockHeight uint64
Siafund Currency // arbitrary-precision unsigned integer
// A Specifier is a fixed-length string that serves two purposes. In the
// wire protocol, they are used to identify a particular encoding
// algorithm, signature algorithm, etc. This allows nodes to communicate on
// their own terms; for example, to reduce bandwidth costs, a node might
// only accept compressed messages.
//
// Internally, Specifiers are used to guarantee unique IDs. Various
// consensus types have an associated ID, calculated by hashing the data
// contained in the type. By prepending the data with Specifier, we can
// guarantee that distinct types will never produce the same hash.
Specifier [16]byte
// The Signature type is arbitrary-length to enable a variety of signature
// algorithms.
Signature string
// IDs are used to refer to a type without revealing its contents. They
// are constructed by hashing specific fields of the type, along with a
// Specifier. While all of these types are hashes, defining type aliases
// gives us type safety and makes the code more readable.
BlockID crypto.Hash
SiacoinOutputID crypto.Hash
SiafundOutputID crypto.Hash
FileContractID crypto.Hash
// An UnlockHash is a specially constructed hash of the UnlockConditions
// type. "Locked" values can be unlocked by providing the UnlockConditions
// that hash to a given UnlockHash. See SpendConditions.UnlockHash for
// details on how the UnlockHash is constructed.
UnlockHash crypto.Hash
// A Target is a hash that a block's ID must be "less than" in order for
// the block to be considered valid. Miners vary the block's 'Nonce' field
// in order to brute-force such an ID. The inverse of a Target is called
// the "difficulty," because it is proportional to the amount of time
// required to brute-force the Target.
Target crypto.Hash
)
// These Specifiers are used internally when calculating a type's ID. See
// Specifier for more details.
var (
SpecifierSiacoinOutput = Specifier{'s', 'i', 'a', 'c', 'o', 'i', 'n', ' ', 'o', 'u', 't', 'p', 'u', 't'}
SpecifierFileContract = Specifier{'f', 'i', 'l', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'a', 'c', 't'}
SpecifierFileContractTerminationPayout = Specifier{'f', 'i', 'l', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'a', 'c', 't', ' ', 't'}
SpecifierStorageProofOutput = Specifier{'s', 't', 'o', 'r', 'a', 'g', 'e', ' ', 'p', 'r', 'o', 'o', 'f'}
SpecifierSiafundOutput = Specifier{'s', 'i', 'a', 'f', 'u', 'n', 'd', ' ', 'o', 'u', 't', 'p', 'u', 't'}
)
// These Specifiers enumerate the types of signatures that are recognized by
// this implementation. If a signature's type is unrecognized, the signature
// is treated as valid. Signatures using the special "entropy" type are always
// treated as invalid; see Consensus.md for more details.
var (
SignatureEntropy = Specifier{'e', 'n', 't', 'r', 'o', 'p', 'y'}
SignatureEd25519 = Specifier{'e', 'd', '2', '5', '5', '1', '9'}
)
// A Block is a summary of changes to the state that have occurred since the
// previous block. Blocks reference the ID of the previous block (their
// "parent"), creating the linked-list commonly known as the blockchain. Their
// primary function is to bundle together transactions on the network. Blocks
// are created by "miners," who collect transactions from other nodes, and
// then try to pick a Nonce that results in a block whose BlockID is below a
// given Target.
type Block struct {
ParentID BlockID
Nonce uint64
Timestamp Timestamp
MinerPayouts []SiacoinOutput
Transactions []Transaction
}
// A Transaction is an atomic component of a block. Transactions can contain
// inputs and outputs, file contracts, storage proofs, and even arbitrary
// data. They can also contain signatures to prove that a given party has
// approved the transaction, or at least a particular subset of it.
//
// Transactions can depend on other previous transactions in the same block,
// but transactions cannot spend outputs that they create or otherwise be
// self-dependent.
type Transaction struct {
SiacoinInputs []SiacoinInput
SiacoinOutputs []SiacoinOutput
FileContracts []FileContract
FileContractTerminations []FileContractTermination
StorageProofs []StorageProof
SiafundInputs []SiafundInput
SiafundOutputs []SiafundOutput
MinerFees []Currency
ArbitraryData []string
Signatures []TransactionSignature
}
// A SiacoinInput consumes a SiacoinOutput and adds the siacoins to the set of
// siacoins that can be spent in the transaction. The ParentID points to the
// output that is getting consumed, and the UnlockConditions contain the rules
// for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiacoinInput struct {
ParentID SiacoinOutputID
UnlockConditions UnlockConditions
}
// A SiacoinOutput holds a volume of siacoins. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of the UnlockConditions that must be fulfilled
// in order to spend the output.
type SiacoinOutput struct {
Value Currency
UnlockHash UnlockHash
}
// A FileContract is a public record of a storage agreement between a "host"
// and a "renter." It mandates that a host must submit a storage proof to the
// network, proving that they still possess the file they have agreed to
// store.
//
// The party must submit the storage proof in a block that is between 'Start'
// and 'Expiration'. Upon submitting the proof, the outputs for
// 'ValidProofOutputs' are created. If the party does not submit a storage
// proof by 'Expiration', then the outputs for 'MissedProofOutputs' are
// created instead. The sum of 'MissedProofOutputs' must equal 'Payout', and
// the sum of 'ValidProofOutputs' must equal 'Payout' plus the siafund fee.
// This fee is sent to the siafund pool, which is a set of siacoins only
// spendable by siafund owners.
//
// Under normal circumstances, the payout will be funded by both the host and
// the renter, which gives the host incentive not to lose the file. The
// 'ValidProofUnlockHash' will typically be spendable by host, and the
// 'MissedProofUnlockHash' will either by spendable by the renter or by
// nobody (the ZeroUnlockHash).
//
// A contract can be terminated early by submitting a FileContractTermination
// whose UnlockConditions hash to 'TerminationHash'.
type FileContract struct {
FileSize uint64
FileMerkleRoot crypto.Hash
Start BlockHeight
Expiration BlockHeight
Payout Currency
ValidProofOutputs []SiacoinOutput
MissedProofOutputs []SiacoinOutput
TerminationHash UnlockHash
}
// A FileContractTermination terminates a file contract. The ParentID
// specifies the contract being terminated, and the TerminationConditions are
// the conditions under which termination will be treated as valid. The hash
// of the TerminationConditions must match the TerminationHash in the
// contract. 'Payouts' is a set of SiacoinOutputs describing how the payout of
// the contract is redistributed. It follows that the sum of these outputs
// must equal the original payout. The outputs can have any Value and
// UnlockHash, and do not need to match the ValidProofUnlockHash or
// MissedProofUnlockHash of the original FileContract.
type FileContractTermination struct {
ParentID FileContractID
TerminationConditions UnlockConditions
Payouts []SiacoinOutput
}
// A StorageProof fulfills a FileContract. The proof contains a specific
// segment of the file, along with a set of hashes from the file's Merkle
// tree. In combination, these can be used to prove that the segment came from
// the file. To prevent abuse, the segment must be chosen randomly, so the ID
// of block 'Start' - 1 is used as a seed value; see StorageProofSegment for
// the exact implementation.
//
// A transaction with a StorageProof cannot have any SiacoinOutputs,
// SiafundOutputs, or FileContracts. This is because a mundane reorg can
// invalidate the proof, and with it the rest of the transaction.
type StorageProof struct {
ParentID FileContractID
Segment [crypto.SegmentSize]byte
HashSet []crypto.Hash
}
// A SiafundInput consumes a SiafundOutput and adds the siafunds to the set of
// siafunds that can be spent in the transaction. The ParentID points to the
// output that is getting consumed, and the UnlockConditions contain the rules
// for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiafundInput struct {
ParentID SiafundOutputID
UnlockConditions UnlockConditions
}
// A SiafundOutput holds a volume of siafunds. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of a set of UnlockConditions that must be fulfilled
// in order to spend the output.
//
// When the SiafundOutput is spent, a SiacoinOutput is created, where:
//
// SiacoinOutput.Value := (SiafundPool - ClaimStart) / 10,000
// SiacoinOutput.UnlockHash := SiafundOutput.ClaimUnlockHash
//
// When a SiafundOutput is put into a transaction, the ClaimStart must always
// equal zero. While the transaction is being processed, the ClaimStart is set
// to the value of the SiafundPool.
type SiafundOutput struct {
Value Currency
UnlockHash UnlockHash
ClaimUnlockHash UnlockHash
ClaimStart Currency
}
// UnlockConditions are a set of conditions which must be met to execute
// certain actions, such as spending a SiacoinOutput or terminating a
// FileContract.
//
// The simplest requirement is that the block containing the UnlockConditions
// must have a height >= 'Timelock'.
//
// 'PublicKeys' specifies the set of keys that can be used to satisfy the
// UnlockConditions; of these, at least 'NumSignatures' unique keys must sign
// the transaction. The keys that do not need to use the same cryptographic
// algorithm.
//
// If 'NumSignatures' == 0, the UnlockConditions are effectively "anyone can
// unlock." If 'NumSignatures' > len('PublicKeys'), then the UnlockConditions
// cannot be fulfilled under any circumstances.
type UnlockConditions struct {
Timelock BlockHeight
PublicKeys []SiaPublicKey
NumSignatures uint64
}
// A SiaPublicKey is a public key prefixed by a Specifier. The Specifier
// indicates the algorithm used for signing and verification. Unrecognized
// algorithms will always verify, which allows new algorithms to be added to
// the protocol via a soft-fork.
type SiaPublicKey struct {
Algorithm Specifier
Key string
}
// A TransactionSignature is a signature that is included in the transaction.
// The signature should correspond to a public key in one of the
// UnlockConditions of the transaction. This key is specified first by
// 'ParentID', which specifies the UnlockConditions, and then
// 'PublicKeyIndex', which indicates the key in the UnlockConditions. There
// are three types that use UnlockConditions: SiacoinInputs, SiafundInputs,
// and FileContractTerminations. Each of these types also references a
// ParentID, and this is the hash that 'ParentID' must match. The 'Timelock'
// prevents the signature from being used until a certain height.
// 'CoveredFields' indicates which parts of the transaction are being signed;
// see CoveredFields.
type TransactionSignature struct {
ParentID crypto.Hash
PublicKeyIndex uint64
Timelock BlockHeight
CoveredFields CoveredFields
Signature Signature
}
// CoveredFields indicates which fields in a transaction have been covered by
// the signature. (Note that the signature does not sign the fields
// themselves, but rather their combined hash; see SigHash.) Each slice
// corresponds to a slice in the Transaction type, indicating which indices of
// the slice have been signed. The indices must be valid, i.e. within the
// bounds of the slice. In addition, they must be sorted and unique.
//
// As a convenience, a signature of the entire transaction can be indicated by
// the 'WholeTransaction' field. If 'WholeTransaction' == true, all other
// fields must be empty (except for the Signatures field, since a signature
// cannot sign itself).
type CoveredFields struct {
WholeTransaction bool
SiacoinInputs []uint64
SiacoinOutputs []uint64
FileContracts []uint64
FileContractTerminations []uint64
StorageProofs []uint64
SiafundInputs []uint64
SiafundOutputs []uint64
MinerFees []uint64
ArbitraryData []uint64
Signatures []uint64
}
// CurrentTimestamp returns the current time as a Timestamp.
func CurrentTimestamp() Timestamp {
return Timestamp(time.Now().Unix())
}
// CalculateCoinbase calculates the coinbase for a given height. The coinbase
// equation is:
//
// coinbase := max(InitialCoinbase - height, MinimumCoinbase) * CoinbaseAugment
func CalculateCoinbase(height BlockHeight) (c Currency) {
base := InitialCoinbase - uint64(height)
if base < MinimumCoinbase {
base = MinimumCoinbase
}
return NewCurrency64(base).Mul(NewCurrency(CoinbaseAugment))
}
// Int converts a Target to a big.Int.
func (t Target) Int() *big.Int |
// Rat converts a Target to a big.Rat.
func (t Target) Rat() *big.Rat {
return new(big.Rat).SetInt(t.Int())
}
// Inverse returns the inverse of a Target as a big.Rat
func (t Target) Inverse() *big.Rat {
return new(big.Rat).Inv(t.Rat())
}
// IntToTarget converts a big.Int to a Target.
func IntToTarget(i *big.Int) (t Target) {
// i may overflow the maximum target.
// In the event of overflow, return the maximum.
if i.BitLen() > 256 {
return RootDepth
}
b := i.Bytes()
// need to preserve big-endianness
offset := len(t[:]) - len(b)
copy(t[offset:], b)
return
}
// RatToTarget converts a big.Rat to a Target.
func RatToTarget(r *big.Rat) Target {
// conversion to big.Int truncates decimal
i := new(big.Int).Div(r.Num(), r.Denom())
return IntToTarget(i)
}
// Tax returns the amount of Currency that will be taxed from fc.
func (fc FileContract) Tax() Currency {
return fc.Payout.MulFloat(SiafundPortion).RoundDown(SiafundCount)
}
// UnlockHash calculates the root hash of a Merkle tree of the
// UnlockConditions object. The leaves of this tree are formed by taking the
// hash of the timelock, the hash of the public keys (one leaf each), and the
// hash of the number of signatures. The keys are put in the middle because
// Timelock and NumSignatures are both low entropy fields; they can be
// protected by having random public keys next to them.
func (uc UnlockConditions) UnlockHash() UnlockHash {
tree := crypto.NewTree()
tree.PushObject(uc.Timelock)
for i := range uc.PublicKeys {
tree.PushObject(uc.PublicKeys[i])
}
tree.PushObject(uc.NumSignatures)
return UnlockHash(tree.Root())
}
// ID returns the ID of a Block, which is calculated by hashing the
// concatenation of the block's parent ID, nonce, and Merkle root.
func (b Block) ID() BlockID {
return BlockID(crypto.HashAll(
b.ParentID,
b.Nonce,
b.MerkleRoot(),
))
}
// CheckTarget returns true if the block's ID meets the given target.
func (b Block) CheckTarget(target Target) bool {
blockHash := b.ID()
return bytes.Compare(target[:], blockHash[:]) >= 0
}
// MerkleRoot calculates the Merkle root of a Block. The leaves of the Merkle
// tree are composed of the Timestamp, the miner outputs (one leaf per
// payout), and the transactions (one leaf per transaction).
func (b Block) MerkleRoot() crypto.Hash {
tree := crypto.NewTree()
tree.PushObject(b.Timestamp)
for _, payout := range b.MinerPayouts {
tree.PushObject(payout)
}
for _, txn := range b.Transactions {
tree.PushObject(txn)
}
return tree.Root()
}
// MinerPayoutID returns the ID of the miner payout at the given index, which
// is calculated by hashing the concatenation of the BlockID and the payout
// index.
func (b Block) MinerPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
b.ID(),
i,
))
}
// SiacoinOutputID returns the ID of a siacoin output at the given index,
// which is calculated by hashing the concatenation of the SiacoinOutput
// Specifier, all of the fields in the transaction (except the signatures),
// and output index.
func (t Transaction) SiacoinOutputID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierSiacoinOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// FileContractID returns the ID of a file contract at the given index, which
// is calculated by hashing the concatenation of the FileContract Specifier,
// all of the fields in the transaction (except the signatures), and the
// contract index.
func (t Transaction) FileContractID(i int) FileContractID {
return FileContractID(crypto.HashAll(
SpecifierFileContract,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// FileContractTerminationPayoutID returns the ID of a file contract
// termination payout, given the index of the payout in the termination. The
// ID is calculated by hashing the concatenation of the
// FileContractTerminationPayout Specifier, the ID of the file contract being
// terminated, and the payout index.
func (fcid FileContractID) FileContractTerminationPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierFileContractTerminationPayout,
fcid,
i,
))
}
// StorageProofOutputID returns the ID of an output created by a file
// contract, given the status of the storage proof. The ID is calculating by
// hashing the concatenation of the StorageProofOutput Specifier, the ID of
// the file contract that the proof is for, a boolean indicating whether the
// proof was valid (true) or missed (false), and the index of the output
// within the file contract.
func (fcid FileContractID) StorageProofOutputID(proofValid bool, i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierStorageProofOutput,
fcid,
proofValid,
i,
))
}
// SiafundOutputID returns the ID of a SiafundOutput at the given index, which
// is calculated by hashing the concatenation of the SiafundOutput Specifier,
// all of the fields in the transaction (except the signatures), and output
// index.
func (t Transaction) SiafundOutputID(i int) SiafundOutputID {
return SiafundOutputID(crypto.HashAll(
SpecifierSiafundOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// SiaClaimOutputID returns the ID of the SiacoinOutput that is created when
// the siafund output is spent. The ID is the hash the SiafundOutputID.
func (id SiafundOutputID) SiaClaimOutputID() SiacoinOutputID {
return SiacoinOutputID(crypto.HashObject(id))
}
// SigHash returns the hash of the fields in a transaction covered by a given
// signature. See CoveredFields for more details.
func (t Transaction) SigHash(i int) crypto.Hash {
cf := t.Signatures[i].CoveredFields
var signedData []byte
if cf.WholeTransaction {
signedData = encoding.MarshalAll(
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
t.Signatures[i].ParentID,
t.Signatures[i].PublicKeyIndex,
t.Signatures[i].Timelock,
)
} else {
for _, input := range cf.SiacoinInputs {
signedData = append(signedData, encoding.Marshal(t.SiacoinInputs[input])...)
}
for _, output := range cf.SiacoinOutputs {
signedData = append(signedData, encoding.Marshal(t.SiacoinOutputs[output])...)
}
for _, contract := range cf.FileContracts {
signedData = append(signedData, encoding.Marshal(t.FileContracts[contract])...)
}
for _, termination := range cf.FileContractTerminations {
signedData = append(signedData, encoding.Marshal(t.FileContractTerminations[termination])...)
}
for _, storageProof := range cf.StorageProofs {
signedData = append(signedData, encoding.Marshal(t.StorageProofs[storageProof])...)
}
for _, siafundInput := range cf.SiafundInputs {
signedData = append(signedData, encoding.Marshal(t.SiafundInputs[siafundInput])...)
}
for _, siafundOutput := range cf.SiafundOutputs {
signedData = append(signedData, encoding.Marshal(t.SiafundOutputs[siafundOutput])...)
}
for _, minerFee := range cf.MinerFees {
signedData = append(signedData, encoding.Marshal(t.MinerFees[minerFee])...)
}
for _, arbData := range cf.ArbitraryData {
signedData = append(signedData, encoding.Marshal(t.ArbitraryData[arbData])...)
}
}
for _, sig := range cf.Signatures {
signedData = append(signedData, encoding.Marshal(t.Signatures[sig])...)
}
return crypto.HashBytes(signedData)
}
// ID returns the id of a transaction, which is taken by marshalling all of the
// fields except for the signatures and taking the hash of the result.
func (t Transaction) ID() crypto.Hash {
tBytes := encoding.MarshalAll(
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
)
return crypto.HashBytes(tBytes)
}
| {
return new(big.Int).SetBytes(t[:])
} | identifier_body |
types.go | package consensus
import (
"bytes"
"math/big"
"time"
"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/encoding"
)
type (
Timestamp uint64
BlockHeight uint64
Siafund Currency // arbitrary-precision unsigned integer
// A Specifier is a fixed-length string that serves two purposes. In the
// wire protocol, they are used to identify a particular encoding
// algorithm, signature algorithm, etc. This allows nodes to communicate on
// their own terms; for example, to reduce bandwidth costs, a node might
// only accept compressed messages.
//
// Internally, Specifiers are used to guarantee unique IDs. Various
// consensus types have an associated ID, calculated by hashing the data
// contained in the type. By prepending the data with Specifier, we can
// guarantee that distinct types will never produce the same hash.
Specifier [16]byte
// The Signature type is arbitrary-length to enable a variety of signature
// algorithms.
Signature string
// IDs are used to refer to a type without revealing its contents. They
// are constructed by hashing specific fields of the type, along with a
// Specifier. While all of these types are hashes, defining type aliases
// gives us type safety and makes the code more readable.
BlockID crypto.Hash
SiacoinOutputID crypto.Hash
SiafundOutputID crypto.Hash
FileContractID crypto.Hash
// An UnlockHash is a specially constructed hash of the UnlockConditions
// type. "Locked" values can be unlocked by providing the UnlockConditions
// that hash to a given UnlockHash. See SpendConditions.UnlockHash for
// details on how the UnlockHash is constructed.
UnlockHash crypto.Hash
// A Target is a hash that a block's ID must be "less than" in order for
// the block to be considered valid. Miners vary the block's 'Nonce' field
// in order to brute-force such an ID. The inverse of a Target is called
// the "difficulty," because it is proportional to the amount of time
// required to brute-force the Target.
Target crypto.Hash
)
// These Specifiers are used internally when calculating a type's ID. See
// Specifier for more details.
var (
SpecifierSiacoinOutput = Specifier{'s', 'i', 'a', 'c', 'o', 'i', 'n', ' ', 'o', 'u', 't', 'p', 'u', 't'}
SpecifierFileContract = Specifier{'f', 'i', 'l', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'a', 'c', 't'}
SpecifierFileContractTerminationPayout = Specifier{'f', 'i', 'l', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'a', 'c', 't', ' ', 't'}
SpecifierStorageProofOutput = Specifier{'s', 't', 'o', 'r', 'a', 'g', 'e', ' ', 'p', 'r', 'o', 'o', 'f'}
SpecifierSiafundOutput = Specifier{'s', 'i', 'a', 'f', 'u', 'n', 'd', ' ', 'o', 'u', 't', 'p', 'u', 't'}
)
// These Specifiers enumerate the types of signatures that are recognized by
// this implementation. If a signature's type is unrecognized, the signature
// is treated as valid. Signatures using the special "entropy" type are always
// treated as invalid; see Consensus.md for more details.
var (
SignatureEntropy = Specifier{'e', 'n', 't', 'r', 'o', 'p', 'y'}
SignatureEd25519 = Specifier{'e', 'd', '2', '5', '5', '1', '9'}
)
// A Block is a summary of changes to the state that have occurred since the
// previous block. Blocks reference the ID of the previous block (their
// "parent"), creating the linked-list commonly known as the blockchain. Their
// primary function is to bundle together transactions on the network. Blocks
// are created by "miners," who collect transactions from other nodes, and
// then try to pick a Nonce that results in a block whose BlockID is below a
// given Target.
type Block struct {
ParentID BlockID
Nonce uint64
Timestamp Timestamp
MinerPayouts []SiacoinOutput
Transactions []Transaction
}
// A Transaction is an atomic component of a block. Transactions can contain
// inputs and outputs, file contracts, storage proofs, and even arbitrary
// data. They can also contain signatures to prove that a given party has
// approved the transaction, or at least a particular subset of it.
//
// Transactions can depend on other previous transactions in the same block,
// but transactions cannot spend outputs that they create or otherwise be
// self-dependent.
type Transaction struct {
SiacoinInputs []SiacoinInput
SiacoinOutputs []SiacoinOutput
FileContracts []FileContract
FileContractTerminations []FileContractTermination
StorageProofs []StorageProof
SiafundInputs []SiafundInput
SiafundOutputs []SiafundOutput
MinerFees []Currency
ArbitraryData []string
Signatures []TransactionSignature
}
// A SiacoinInput consumes a SiacoinOutput and adds the siacoins to the set of
// siacoins that can be spent in the transaction. The ParentID points to the
// output that is getting consumed, and the UnlockConditions contain the rules
// for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiacoinInput struct {
ParentID SiacoinOutputID
UnlockConditions UnlockConditions
}
// A SiacoinOutput holds a volume of siacoins. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of the UnlockConditions that must be fulfilled
// in order to spend the output.
type SiacoinOutput struct {
Value Currency
UnlockHash UnlockHash
}
// A FileContract is a public record of a storage agreement between a "host"
// and a "renter." It mandates that a host must submit a storage proof to the
// network, proving that they still possess the file they have agreed to
// store.
//
// The party must submit the storage proof in a block that is between 'Start'
// and 'Expiration'. Upon submitting the proof, the outputs for
// 'ValidProofOutputs' are created. If the party does not submit a storage
// proof by 'Expiration', then the outputs for 'MissedProofOutputs' are
// created instead. The sum of 'MissedProofOutputs' must equal 'Payout', and
// the sum of 'ValidProofOutputs' must equal 'Payout' plus the siafund fee.
// This fee is sent to the siafund pool, which is a set of siacoins only
// spendable by siafund owners.
//
// Under normal circumstances, the payout will be funded by both the host and
// the renter, which gives the host incentive not to lose the file. The
// 'ValidProofUnlockHash' will typically be spendable by host, and the
// 'MissedProofUnlockHash' will either by spendable by the renter or by
// nobody (the ZeroUnlockHash).
//
// A contract can be terminated early by submitting a FileContractTermination
// whose UnlockConditions hash to 'TerminationHash'.
type FileContract struct {
FileSize uint64
FileMerkleRoot crypto.Hash
Start BlockHeight
Expiration BlockHeight
Payout Currency
ValidProofOutputs []SiacoinOutput
MissedProofOutputs []SiacoinOutput
TerminationHash UnlockHash
}
// A FileContractTermination terminates a file contract. The ParentID
// specifies the contract being terminated, and the TerminationConditions are
// the conditions under which termination will be treated as valid. The hash
// of the TerminationConditions must match the TerminationHash in the
// contract. 'Payouts' is a set of SiacoinOutputs describing how the payout of
// the contract is redistributed. It follows that the sum of these outputs
// must equal the original payout. The outputs can have any Value and
// UnlockHash, and do not need to match the ValidProofUnlockHash or
// MissedProofUnlockHash of the original FileContract.
type FileContractTermination struct {
ParentID FileContractID
TerminationConditions UnlockConditions
Payouts []SiacoinOutput
}
// A StorageProof fulfills a FileContract. The proof contains a specific
// segment of the file, along with a set of hashes from the file's Merkle
// tree. In combination, these can be used to prove that the segment came from
// the file. To prevent abuse, the segment must be chosen randomly, so the ID
// of block 'Start' - 1 is used as a seed value; see StorageProofSegment for
// the exact implementation.
//
// A transaction with a StorageProof cannot have any SiacoinOutputs,
// SiafundOutputs, or FileContracts. This is because a mundane reorg can
// invalidate the proof, and with it the rest of the transaction.
type StorageProof struct {
ParentID FileContractID
Segment [crypto.SegmentSize]byte
HashSet []crypto.Hash
}
// A SiafundInput consumes a SiafundOutput and adds the siafunds to the set of
// siafunds that can be spent in the transaction. The ParentID points to the
// output that is getting consumed, and the UnlockConditions contain the rules
// for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiafundInput struct {
ParentID SiafundOutputID
UnlockConditions UnlockConditions
}
// A SiafundOutput holds a volume of siafunds. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of a set of UnlockConditions that must be fulfilled
// in order to spend the output.
//
// When the SiafundOutput is spent, a SiacoinOutput is created, where:
//
// SiacoinOutput.Value := (SiafundPool - ClaimStart) / 10,000
// SiacoinOutput.UnlockHash := SiafundOutput.ClaimUnlockHash
//
// When a SiafundOutput is put into a transaction, the ClaimStart must always
// equal zero. While the transaction is being processed, the ClaimStart is set
// to the value of the SiafundPool.
type SiafundOutput struct {
Value Currency
UnlockHash UnlockHash
ClaimUnlockHash UnlockHash
ClaimStart Currency
}
// UnlockConditions are a set of conditions which must be met to execute
// certain actions, such as spending a SiacoinOutput or terminating a
// FileContract.
//
// The simplest requirement is that the block containing the UnlockConditions
// must have a height >= 'Timelock'.
//
// 'PublicKeys' specifies the set of keys that can be used to satisfy the
// UnlockConditions; of these, at least 'NumSignatures' unique keys must sign
// the transaction. The keys that do not need to use the same cryptographic
// algorithm.
//
// If 'NumSignatures' == 0, the UnlockConditions are effectively "anyone can
// unlock." If 'NumSignatures' > len('PublicKeys'), then the UnlockConditions
// cannot be fulfilled under any circumstances.
type UnlockConditions struct {
Timelock BlockHeight
PublicKeys []SiaPublicKey
NumSignatures uint64
}
// A SiaPublicKey is a public key prefixed by a Specifier. The Specifier
// indicates the algorithm used for signing and verification. Unrecognized
// algorithms will always verify, which allows new algorithms to be added to
// the protocol via a soft-fork.
type SiaPublicKey struct {
Algorithm Specifier
Key string
}
// A TransactionSignature is a signature that is included in the transaction.
// The signature should correspond to a public key in one of the
// UnlockConditions of the transaction. This key is specified first by
// 'ParentID', which specifies the UnlockConditions, and then
// 'PublicKeyIndex', which indicates the key in the UnlockConditions. There
// are three types that use UnlockConditions: SiacoinInputs, SiafundInputs,
// and FileContractTerminations. Each of these types also references a
// ParentID, and this is the hash that 'ParentID' must match. The 'Timelock'
// prevents the signature from being used until a certain height.
// 'CoveredFields' indicates which parts of the transaction are being signed;
// see CoveredFields.
type TransactionSignature struct {
ParentID crypto.Hash
PublicKeyIndex uint64
Timelock BlockHeight
CoveredFields CoveredFields
Signature Signature
}
// CoveredFields indicates which fields in a transaction have been covered by
// the signature. (Note that the signature does not sign the fields
// themselves, but rather their combined hash; see SigHash.) Each slice
// corresponds to a slice in the Transaction type, indicating which indices of
// the slice have been signed. The indices must be valid, i.e. within the
// bounds of the slice. In addition, they must be sorted and unique.
//
// As a convenience, a signature of the entire transaction can be indicated by
// the 'WholeTransaction' field. If 'WholeTransaction' == true, all other
// fields must be empty (except for the Signatures field, since a signature
// cannot sign itself).
type CoveredFields struct {
WholeTransaction bool
SiacoinInputs []uint64
SiacoinOutputs []uint64
FileContracts []uint64
FileContractTerminations []uint64
StorageProofs []uint64
SiafundInputs []uint64
SiafundOutputs []uint64
MinerFees []uint64
ArbitraryData []uint64
Signatures []uint64
}
// CurrentTimestamp returns the current time as a Timestamp.
func CurrentTimestamp() Timestamp {
return Timestamp(time.Now().Unix())
}
// CalculateCoinbase calculates the coinbase for a given height. The coinbase
// equation is:
//
// coinbase := max(InitialCoinbase - height, MinimumCoinbase) * CoinbaseAugment
func CalculateCoinbase(height BlockHeight) (c Currency) {
base := InitialCoinbase - uint64(height)
if base < MinimumCoinbase {
base = MinimumCoinbase
}
return NewCurrency64(base).Mul(NewCurrency(CoinbaseAugment))
}
// Int converts a Target to a big.Int.
func (t Target) Int() *big.Int {
return new(big.Int).SetBytes(t[:])
}
// Rat converts a Target to a big.Rat.
func (t Target) Rat() *big.Rat {
return new(big.Rat).SetInt(t.Int())
}
// Inverse returns the inverse of a Target as a big.Rat
func (t Target) Inverse() *big.Rat {
return new(big.Rat).Inv(t.Rat())
}
// IntToTarget converts a big.Int to a Target.
func IntToTarget(i *big.Int) (t Target) {
// i may overflow the maximum target.
// In the event of overflow, return the maximum.
if i.BitLen() > 256 {
return RootDepth
}
b := i.Bytes()
// need to preserve big-endianness
offset := len(t[:]) - len(b)
copy(t[offset:], b)
return
}
// RatToTarget converts a big.Rat to a Target.
func RatToTarget(r *big.Rat) Target {
// conversion to big.Int truncates decimal
i := new(big.Int).Div(r.Num(), r.Denom())
return IntToTarget(i)
}
// Tax returns the amount of Currency that will be taxed from fc.
func (fc FileContract) Tax() Currency {
return fc.Payout.MulFloat(SiafundPortion).RoundDown(SiafundCount)
}
// UnlockHash calculates the root hash of a Merkle tree of the
// UnlockConditions object. The leaves of this tree are formed by taking the
// hash of the timelock, the hash of the public keys (one leaf each), and the
// hash of the number of signatures. The keys are put in the middle because
// Timelock and NumSignatures are both low entropy fields; they can be
// protected by having random public keys next to them.
func (uc UnlockConditions) UnlockHash() UnlockHash {
tree := crypto.NewTree()
tree.PushObject(uc.Timelock)
for i := range uc.PublicKeys {
tree.PushObject(uc.PublicKeys[i])
}
tree.PushObject(uc.NumSignatures)
return UnlockHash(tree.Root())
}
// ID returns the ID of a Block, which is calculated by hashing the
// concatenation of the block's parent ID, nonce, and Merkle root.
func (b Block) ID() BlockID {
return BlockID(crypto.HashAll(
b.ParentID,
b.Nonce,
b.MerkleRoot(),
))
}
// CheckTarget returns true if the block's ID meets the given target.
func (b Block) CheckTarget(target Target) bool {
blockHash := b.ID()
return bytes.Compare(target[:], blockHash[:]) >= 0
}
// MerkleRoot calculates the Merkle root of a Block. The leaves of the Merkle
// tree are composed of the Timestamp, the miner outputs (one leaf per
// payout), and the transactions (one leaf per transaction).
func (b Block) MerkleRoot() crypto.Hash {
tree := crypto.NewTree()
tree.PushObject(b.Timestamp)
for _, payout := range b.MinerPayouts {
tree.PushObject(payout)
}
for _, txn := range b.Transactions {
tree.PushObject(txn)
}
return tree.Root()
}
// MinerPayoutID returns the ID of the miner payout at the given index, which
// is calculated by hashing the concatenation of the BlockID and the payout
// index.
func (b Block) MinerPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
b.ID(),
i,
))
}
// SiacoinOutputID returns the ID of a siacoin output at the given index,
// which is calculated by hashing the concatenation of the SiacoinOutput
// Specifier, all of the fields in the transaction (except the signatures),
// and output index.
func (t Transaction) SiacoinOutputID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierSiacoinOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// FileContractID returns the ID of a file contract at the given index, which
// is calculated by hashing the concatenation of the FileContract Specifier,
// all of the fields in the transaction (except the signatures), and the
// contract index.
func (t Transaction) FileContractID(i int) FileContractID {
return FileContractID(crypto.HashAll(
SpecifierFileContract,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// FileContractTerminationPayoutID returns the ID of a file contract
// termination payout, given the index of the payout in the termination. The
// ID is calculated by hashing the concatenation of the
// FileContractTerminationPayout Specifier, the ID of the file contract being
// terminated, and the payout index.
func (fcid FileContractID) FileContractTerminationPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierFileContractTerminationPayout,
fcid,
i,
))
}
// StorageProofOutputID returns the ID of an output created by a file
// contract, given the status of the storage proof. The ID is calculating by
// hashing the concatenation of the StorageProofOutput Specifier, the ID of
// the file contract that the proof is for, a boolean indicating whether the
// proof was valid (true) or missed (false), and the index of the output
// within the file contract.
func (fcid FileContractID) StorageProofOutputID(proofValid bool, i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierStorageProofOutput,
fcid,
proofValid,
i,
))
}
// SiafundOutputID returns the ID of a SiafundOutput at the given index, which
// is calculated by hashing the concatenation of the SiafundOutput Specifier,
// all of the fields in the transaction (except the signatures), and output
// index.
func (t Transaction) SiafundOutputID(i int) SiafundOutputID {
return SiafundOutputID(crypto.HashAll(
SpecifierSiafundOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// SiaClaimOutputID returns the ID of the SiacoinOutput that is created when
// the siafund output is spent. The ID is the hash the SiafundOutputID.
func (id SiafundOutputID) SiaClaimOutputID() SiacoinOutputID {
return SiacoinOutputID(crypto.HashObject(id))
}
// SigHash returns the hash of the fields in a transaction covered by a given
// signature. See CoveredFields for more details.
func (t Transaction) SigHash(i int) crypto.Hash {
cf := t.Signatures[i].CoveredFields
var signedData []byte
if cf.WholeTransaction {
signedData = encoding.MarshalAll(
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
t.Signatures[i].ParentID,
t.Signatures[i].PublicKeyIndex,
t.Signatures[i].Timelock,
)
} else {
for _, input := range cf.SiacoinInputs {
signedData = append(signedData, encoding.Marshal(t.SiacoinInputs[input])...)
}
for _, output := range cf.SiacoinOutputs {
signedData = append(signedData, encoding.Marshal(t.SiacoinOutputs[output])...)
}
for _, contract := range cf.FileContracts |
for _, termination := range cf.FileContractTerminations {
signedData = append(signedData, encoding.Marshal(t.FileContractTerminations[termination])...)
}
for _, storageProof := range cf.StorageProofs {
signedData = append(signedData, encoding.Marshal(t.StorageProofs[storageProof])...)
}
for _, siafundInput := range cf.SiafundInputs {
signedData = append(signedData, encoding.Marshal(t.SiafundInputs[siafundInput])...)
}
for _, siafundOutput := range cf.SiafundOutputs {
signedData = append(signedData, encoding.Marshal(t.SiafundOutputs[siafundOutput])...)
}
for _, minerFee := range cf.MinerFees {
signedData = append(signedData, encoding.Marshal(t.MinerFees[minerFee])...)
}
for _, arbData := range cf.ArbitraryData {
signedData = append(signedData, encoding.Marshal(t.ArbitraryData[arbData])...)
}
}
for _, sig := range cf.Signatures {
signedData = append(signedData, encoding.Marshal(t.Signatures[sig])...)
}
return crypto.HashBytes(signedData)
}
// ID returns the id of a transaction, which is taken by marshalling all of the
// fields except for the signatures and taking the hash of the result.
func (t Transaction) ID() crypto.Hash {
tBytes := encoding.MarshalAll(
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
)
return crypto.HashBytes(tBytes)
}
| {
signedData = append(signedData, encoding.Marshal(t.FileContracts[contract])...)
} | conditional_block |
types.go | package consensus
import (
"bytes"
"math/big"
"time"
"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/encoding"
)
type (
Timestamp uint64
BlockHeight uint64
Siafund Currency // arbitrary-precision unsigned integer
// A Specifier is a fixed-length string that serves two purposes. In the
// wire protocol, they are used to identify a particular encoding
// algorithm, signature algorithm, etc. This allows nodes to communicate on
// their own terms; for example, to reduce bandwidth costs, a node might
// only accept compressed messages.
//
// Internally, Specifiers are used to guarantee unique IDs. Various
// consensus types have an associated ID, calculated by hashing the data
// contained in the type. By prepending the data with Specifier, we can
// guarantee that distinct types will never produce the same hash.
Specifier [16]byte
// The Signature type is arbitrary-length to enable a variety of signature
// algorithms.
Signature string
// IDs are used to refer to a type without revealing its contents. They
// are constructed by hashing specific fields of the type, along with a
// Specifier. While all of these types are hashes, defining type aliases
// gives us type safety and makes the code more readable.
BlockID crypto.Hash
SiacoinOutputID crypto.Hash
SiafundOutputID crypto.Hash
FileContractID crypto.Hash
// An UnlockHash is a specially constructed hash of the UnlockConditions
// type. "Locked" values can be unlocked by providing the UnlockConditions
// that hash to a given UnlockHash. See SpendConditions.UnlockHash for
// details on how the UnlockHash is constructed.
UnlockHash crypto.Hash
// A Target is a hash that a block's ID must be "less than" in order for
// the block to be considered valid. Miners vary the block's 'Nonce' field
// in order to brute-force such an ID. The inverse of a Target is called
// the "difficulty," because it is proportional to the amount of time
// required to brute-force the Target.
Target crypto.Hash
)
// These Specifiers are used internally when calculating a type's ID. See
// Specifier for more details.
var (
SpecifierSiacoinOutput = Specifier{'s', 'i', 'a', 'c', 'o', 'i', 'n', ' ', 'o', 'u', 't', 'p', 'u', 't'}
SpecifierFileContract = Specifier{'f', 'i', 'l', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'a', 'c', 't'}
SpecifierFileContractTerminationPayout = Specifier{'f', 'i', 'l', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'a', 'c', 't', ' ', 't'}
SpecifierStorageProofOutput = Specifier{'s', 't', 'o', 'r', 'a', 'g', 'e', ' ', 'p', 'r', 'o', 'o', 'f'}
SpecifierSiafundOutput = Specifier{'s', 'i', 'a', 'f', 'u', 'n', 'd', ' ', 'o', 'u', 't', 'p', 'u', 't'}
)
// These Specifiers enumerate the types of signatures that are recognized by
// this implementation. If a signature's type is unrecognized, the signature
// is treated as valid. Signatures using the special "entropy" type are always
// treated as invalid; see Consensus.md for more details.
var (
SignatureEntropy = Specifier{'e', 'n', 't', 'r', 'o', 'p', 'y'}
SignatureEd25519 = Specifier{'e', 'd', '2', '5', '5', '1', '9'}
)
// A Block is a summary of changes to the state that have occurred since the
// previous block. Blocks reference the ID of the previous block (their
// "parent"), creating the linked-list commonly known as the blockchain. Their
// primary function is to bundle together transactions on the network. Blocks
// are created by "miners," who collect transactions from other nodes, and
// then try to pick a Nonce that results in a block whose BlockID is below a
// given Target.
type Block struct {
ParentID BlockID
Nonce uint64
Timestamp Timestamp
MinerPayouts []SiacoinOutput
Transactions []Transaction
}
// A Transaction is an atomic component of a block. Transactions can contain
// inputs and outputs, file contracts, storage proofs, and even arbitrary
// data. They can also contain signatures to prove that a given party has
// approved the transaction, or at least a particular subset of it.
//
// Transactions can depend on other previous transactions in the same block,
// but transactions cannot spend outputs that they create or otherwise be
// self-dependent.
type Transaction struct {
SiacoinInputs []SiacoinInput
SiacoinOutputs []SiacoinOutput
FileContracts []FileContract
FileContractTerminations []FileContractTermination
StorageProofs []StorageProof
SiafundInputs []SiafundInput
SiafundOutputs []SiafundOutput
MinerFees []Currency
ArbitraryData []string
Signatures []TransactionSignature
}
// A SiacoinInput consumes a SiacoinOutput and adds the siacoins to the set of
// siacoins that can be spent in the transaction. The ParentID points to the
// output that is getting consumed, and the UnlockConditions contain the rules
// for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiacoinInput struct {
ParentID SiacoinOutputID
UnlockConditions UnlockConditions
}
// A SiacoinOutput holds a volume of siacoins. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of the UnlockConditions that must be fulfilled
// in order to spend the output.
type SiacoinOutput struct {
Value Currency
UnlockHash UnlockHash
}
// A FileContract is a public record of a storage agreement between a "host"
// and a "renter." It mandates that a host must submit a storage proof to the
// network, proving that they still possess the file they have agreed to
// store.
//
// The party must submit the storage proof in a block that is between 'Start'
// and 'Expiration'. Upon submitting the proof, the outputs for
// 'ValidProofOutputs' are created. If the party does not submit a storage
// proof by 'Expiration', then the outputs for 'MissedProofOutputs' are
// created instead. The sum of 'MissedProofOutputs' must equal 'Payout', and
// the sum of 'ValidProofOutputs' must equal 'Payout' plus the siafund fee.
// This fee is sent to the siafund pool, which is a set of siacoins only
// spendable by siafund owners.
//
// Under normal circumstances, the payout will be funded by both the host and
// the renter, which gives the host incentive not to lose the file. The
// 'ValidProofUnlockHash' will typically be spendable by host, and the
// 'MissedProofUnlockHash' will either by spendable by the renter or by
// nobody (the ZeroUnlockHash).
//
// A contract can be terminated early by submitting a FileContractTermination
// whose UnlockConditions hash to 'TerminationHash'.
type FileContract struct {
FileSize uint64
FileMerkleRoot crypto.Hash
Start BlockHeight
Expiration BlockHeight
Payout Currency
ValidProofOutputs []SiacoinOutput
MissedProofOutputs []SiacoinOutput
TerminationHash UnlockHash
}
// A FileContractTermination terminates a file contract. The ParentID
// specifies the contract being terminated, and the TerminationConditions are
// the conditions under which termination will be treated as valid. The hash
// of the TerminationConditions must match the TerminationHash in the
// contract. 'Payouts' is a set of SiacoinOutputs describing how the payout of
// the contract is redistributed. It follows that the sum of these outputs
// must equal the original payout. The outputs can have any Value and
// UnlockHash, and do not need to match the ValidProofUnlockHash or
// MissedProofUnlockHash of the original FileContract.
type FileContractTermination struct {
ParentID FileContractID
TerminationConditions UnlockConditions
Payouts []SiacoinOutput
}
// A StorageProof fulfills a FileContract. The proof contains a specific
// segment of the file, along with a set of hashes from the file's Merkle
// tree. In combination, these can be used to prove that the segment came from
// the file. To prevent abuse, the segment must be chosen randomly, so the ID
// of block 'Start' - 1 is used as a seed value; see StorageProofSegment for
// the exact implementation.
//
// A transaction with a StorageProof cannot have any SiacoinOutputs,
// SiafundOutputs, or FileContracts. This is because a mundane reorg can
// invalidate the proof, and with it the rest of the transaction.
type StorageProof struct {
ParentID FileContractID
Segment [crypto.SegmentSize]byte
HashSet []crypto.Hash
}
// A SiafundInput consumes a SiafundOutput and adds the siafunds to the set of
// siafunds that can be spent in the transaction. The ParentID points to the
// output that is getting consumed, and the UnlockConditions contain the rules
// for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiafundInput struct {
ParentID SiafundOutputID
UnlockConditions UnlockConditions
}
// A SiafundOutput holds a volume of siafunds. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of a set of UnlockConditions that must be fulfilled
// in order to spend the output.
//
// When the SiafundOutput is spent, a SiacoinOutput is created, where:
//
// SiacoinOutput.Value := (SiafundPool - ClaimStart) / 10,000
// SiacoinOutput.UnlockHash := SiafundOutput.ClaimUnlockHash
//
// When a SiafundOutput is put into a transaction, the ClaimStart must always
// equal zero. While the transaction is being processed, the ClaimStart is set
// to the value of the SiafundPool.
type SiafundOutput struct {
Value Currency
UnlockHash UnlockHash
ClaimUnlockHash UnlockHash
ClaimStart Currency
}
// UnlockConditions are a set of conditions which must be met to execute
// certain actions, such as spending a SiacoinOutput or terminating a
// FileContract.
//
// The simplest requirement is that the block containing the UnlockConditions
// must have a height >= 'Timelock'.
//
// 'PublicKeys' specifies the set of keys that can be used to satisfy the
// UnlockConditions; of these, at least 'NumSignatures' unique keys must sign
// the transaction. The keys that do not need to use the same cryptographic
// algorithm.
//
// If 'NumSignatures' == 0, the UnlockConditions are effectively "anyone can
// unlock." If 'NumSignatures' > len('PublicKeys'), then the UnlockConditions
// cannot be fulfilled under any circumstances.
type UnlockConditions struct {
Timelock BlockHeight
PublicKeys []SiaPublicKey
NumSignatures uint64
}
// A SiaPublicKey is a public key prefixed by a Specifier. The Specifier
// indicates the algorithm used for signing and verification. Unrecognized
// algorithms will always verify, which allows new algorithms to be added to
// the protocol via a soft-fork.
type SiaPublicKey struct {
Algorithm Specifier
Key string
}
// A TransactionSignature is a signature that is included in the transaction.
// The signature should correspond to a public key in one of the
// UnlockConditions of the transaction. This key is specified first by
// 'ParentID', which specifies the UnlockConditions, and then
// 'PublicKeyIndex', which indicates the key in the UnlockConditions. There
// are three types that use UnlockConditions: SiacoinInputs, SiafundInputs,
// and FileContractTerminations. Each of these types also references a
// ParentID, and this is the hash that 'ParentID' must match. The 'Timelock'
// prevents the signature from being used until a certain height.
// 'CoveredFields' indicates which parts of the transaction are being signed;
// see CoveredFields.
type TransactionSignature struct {
ParentID crypto.Hash
PublicKeyIndex uint64
Timelock BlockHeight
CoveredFields CoveredFields
Signature Signature
}
// CoveredFields indicates which fields in a transaction have been covered by
// the signature. (Note that the signature does not sign the fields
// themselves, but rather their combined hash; see SigHash.) Each slice
// corresponds to a slice in the Transaction type, indicating which indices of
// the slice have been signed. The indices must be valid, i.e. within the
// bounds of the slice. In addition, they must be sorted and unique.
//
// As a convenience, a signature of the entire transaction can be indicated by
// the 'WholeTransaction' field. If 'WholeTransaction' == true, all other
// fields must be empty (except for the Signatures field, since a signature
// cannot sign itself).
type CoveredFields struct {
WholeTransaction bool
SiacoinInputs []uint64
SiacoinOutputs []uint64
FileContracts []uint64
FileContractTerminations []uint64
StorageProofs []uint64
SiafundInputs []uint64
SiafundOutputs []uint64
MinerFees []uint64
ArbitraryData []uint64
Signatures []uint64
}
// CurrentTimestamp returns the current time as a Timestamp.
func CurrentTimestamp() Timestamp {
return Timestamp(time.Now().Unix())
}
// CalculateCoinbase calculates the coinbase for a given height. The coinbase
// equation is:
//
// coinbase := max(InitialCoinbase - height, MinimumCoinbase) * CoinbaseAugment
func CalculateCoinbase(height BlockHeight) (c Currency) {
base := InitialCoinbase - uint64(height)
if base < MinimumCoinbase {
base = MinimumCoinbase
}
return NewCurrency64(base).Mul(NewCurrency(CoinbaseAugment))
}
// Int converts a Target to a big.Int.
func (t Target) Int() *big.Int {
return new(big.Int).SetBytes(t[:])
}
// Rat converts a Target to a big.Rat.
func (t Target) Rat() *big.Rat {
return new(big.Rat).SetInt(t.Int())
}
// Inverse returns the inverse of a Target as a big.Rat
func (t Target) Inverse() *big.Rat {
return new(big.Rat).Inv(t.Rat())
}
// IntToTarget converts a big.Int to a Target.
func IntToTarget(i *big.Int) (t Target) {
// i may overflow the maximum target.
// In the event of overflow, return the maximum.
if i.BitLen() > 256 {
return RootDepth
}
b := i.Bytes()
// need to preserve big-endianness
offset := len(t[:]) - len(b)
copy(t[offset:], b)
return
}
// RatToTarget converts a big.Rat to a Target.
func RatToTarget(r *big.Rat) Target {
// conversion to big.Int truncates decimal
i := new(big.Int).Div(r.Num(), r.Denom())
return IntToTarget(i)
}
// Tax returns the amount of Currency that will be taxed from fc.
func (fc FileContract) Tax() Currency {
return fc.Payout.MulFloat(SiafundPortion).RoundDown(SiafundCount)
}
// UnlockHash calculates the root hash of a Merkle tree of the
// UnlockConditions object. The leaves of this tree are formed by taking the
// hash of the timelock, the hash of the public keys (one leaf each), and the
// hash of the number of signatures. The keys are put in the middle because
// Timelock and NumSignatures are both low entropy fields; they can be
// protected by having random public keys next to them.
func (uc UnlockConditions) UnlockHash() UnlockHash {
tree := crypto.NewTree()
tree.PushObject(uc.Timelock)
for i := range uc.PublicKeys {
tree.PushObject(uc.PublicKeys[i])
}
tree.PushObject(uc.NumSignatures)
return UnlockHash(tree.Root())
}
// ID returns the ID of a Block, which is calculated by hashing the
// concatenation of the block's parent ID, nonce, and Merkle root.
func (b Block) | () BlockID {
return BlockID(crypto.HashAll(
b.ParentID,
b.Nonce,
b.MerkleRoot(),
))
}
// CheckTarget returns true if the block's ID meets the given target.
func (b Block) CheckTarget(target Target) bool {
blockHash := b.ID()
return bytes.Compare(target[:], blockHash[:]) >= 0
}
// MerkleRoot calculates the Merkle root of a Block. The leaves of the Merkle
// tree are composed of the Timestamp, the miner outputs (one leaf per
// payout), and the transactions (one leaf per transaction).
func (b Block) MerkleRoot() crypto.Hash {
tree := crypto.NewTree()
tree.PushObject(b.Timestamp)
for _, payout := range b.MinerPayouts {
tree.PushObject(payout)
}
for _, txn := range b.Transactions {
tree.PushObject(txn)
}
return tree.Root()
}
// MinerPayoutID returns the ID of the miner payout at the given index, which
// is calculated by hashing the concatenation of the BlockID and the payout
// index.
func (b Block) MinerPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
b.ID(),
i,
))
}
// SiacoinOutputID returns the ID of a siacoin output at the given index,
// which is calculated by hashing the concatenation of the SiacoinOutput
// Specifier, all of the fields in the transaction (except the signatures),
// and output index.
func (t Transaction) SiacoinOutputID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierSiacoinOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// FileContractID returns the ID of a file contract at the given index, which
// is calculated by hashing the concatenation of the FileContract Specifier,
// all of the fields in the transaction (except the signatures), and the
// contract index.
func (t Transaction) FileContractID(i int) FileContractID {
return FileContractID(crypto.HashAll(
SpecifierFileContract,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// FileContractTerminationPayoutID returns the ID of a file contract
// termination payout, given the index of the payout in the termination. The
// ID is calculated by hashing the concatenation of the
// FileContractTerminationPayout Specifier, the ID of the file contract being
// terminated, and the payout index.
func (fcid FileContractID) FileContractTerminationPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierFileContractTerminationPayout,
fcid,
i,
))
}
// StorageProofOutputID returns the ID of an output created by a file
// contract, given the status of the storage proof. The ID is calculating by
// hashing the concatenation of the StorageProofOutput Specifier, the ID of
// the file contract that the proof is for, a boolean indicating whether the
// proof was valid (true) or missed (false), and the index of the output
// within the file contract.
func (fcid FileContractID) StorageProofOutputID(proofValid bool, i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierStorageProofOutput,
fcid,
proofValid,
i,
))
}
// SiafundOutputID returns the ID of a SiafundOutput at the given index, which
// is calculated by hashing the concatenation of the SiafundOutput Specifier,
// all of the fields in the transaction (except the signatures), and output
// index.
func (t Transaction) SiafundOutputID(i int) SiafundOutputID {
return SiafundOutputID(crypto.HashAll(
SpecifierSiafundOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// SiaClaimOutputID returns the ID of the SiacoinOutput that is created when
// the siafund output is spent. The ID is the hash the SiafundOutputID.
func (id SiafundOutputID) SiaClaimOutputID() SiacoinOutputID {
return SiacoinOutputID(crypto.HashObject(id))
}
// SigHash returns the hash of the fields in a transaction covered by a given
// signature. See CoveredFields for more details.
func (t Transaction) SigHash(i int) crypto.Hash {
cf := t.Signatures[i].CoveredFields
var signedData []byte
if cf.WholeTransaction {
signedData = encoding.MarshalAll(
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
t.Signatures[i].ParentID,
t.Signatures[i].PublicKeyIndex,
t.Signatures[i].Timelock,
)
} else {
for _, input := range cf.SiacoinInputs {
signedData = append(signedData, encoding.Marshal(t.SiacoinInputs[input])...)
}
for _, output := range cf.SiacoinOutputs {
signedData = append(signedData, encoding.Marshal(t.SiacoinOutputs[output])...)
}
for _, contract := range cf.FileContracts {
signedData = append(signedData, encoding.Marshal(t.FileContracts[contract])...)
}
for _, termination := range cf.FileContractTerminations {
signedData = append(signedData, encoding.Marshal(t.FileContractTerminations[termination])...)
}
for _, storageProof := range cf.StorageProofs {
signedData = append(signedData, encoding.Marshal(t.StorageProofs[storageProof])...)
}
for _, siafundInput := range cf.SiafundInputs {
signedData = append(signedData, encoding.Marshal(t.SiafundInputs[siafundInput])...)
}
for _, siafundOutput := range cf.SiafundOutputs {
signedData = append(signedData, encoding.Marshal(t.SiafundOutputs[siafundOutput])...)
}
for _, minerFee := range cf.MinerFees {
signedData = append(signedData, encoding.Marshal(t.MinerFees[minerFee])...)
}
for _, arbData := range cf.ArbitraryData {
signedData = append(signedData, encoding.Marshal(t.ArbitraryData[arbData])...)
}
}
for _, sig := range cf.Signatures {
signedData = append(signedData, encoding.Marshal(t.Signatures[sig])...)
}
return crypto.HashBytes(signedData)
}
// ID returns the id of a transaction, which is taken by marshalling all of the
// fields except for the signatures and taking the hash of the result.
func (t Transaction) ID() crypto.Hash {
tBytes := encoding.MarshalAll(
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
)
return crypto.HashBytes(tBytes)
}
| ID | identifier_name |
SurveySimulator.py | #!/usr/bin/python
from random import random
import math
import ephem
# import field
# to be implemented once the field class has been created
class ssobj(ephem.EllipticalBody):
'Class for all Survey Simulator objects.'
def __init__(self, a, e, inc, capom, argperi, H=5, M=0.0):
# ephem.EllipticalBody.__init__()
self.a = a
self.e = e
self.inc = inc # degrees
self.Om = capom # degrees
self.om = argperi # degrees
self.H = H
self.M = M
self._G = -0.12 # Hard coded by JM: "c Hard coded slope for magnitude ! Bad boy !"
#----------- a
@property
def a(self):
"""I'm the a property."""
return self._a
@a.setter
def a(self, value):
if not 0.0 <= value <= 10E6:
|
self._a = value
#----------- e
@property
def e(self):
"""I'm the e property."""
return self._e
@e.setter
def e(self, value):
if not 0.0 <= value <= 1.0:
raise ValueError('Bad e value. e must be between 0 and 1')
self._e = float(value)
#----------- inc
@property
def inc(self):
"""I'm the inc property."""
return self._inc
@inc.setter
def inc(self, value):
if not 0.0 <= value <= 180.0:
raise ValueError('Bad inclination value. Ensure 0.0 < inclination < 90 degrees')
self._inc = value
#----------- Om
@property
def Om(self):
"""I'm the Om property."""
return self._Om
@Om.setter
def Om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad Om value. Om must be between 0 and 360 degrees')
self._Om = float(value)
#----------- om
@property
def om(self):
"""I'm the om property."""
return self._om
@om.setter
def om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad om value. om must be between 0 and 360 degrees')
self._om = float(value)
#----------- H
@property
def H(self):
"""I'm the H property."""
return self._H
@H.setter
def H(self, value):
self._H = float(value)
#----------- epoch
@property
def epoch(self):
"""I'm the epoch property."""
return self._epoch
@epoch.setter
def epoch(self, value):
self._epoch = float(value)
#----------- epoch_M
@property
def epoch_M(self):
"""I'm the epoch_M property."""
return self._epoch_M
@epoch_M.setter
def epoch_M(self, value):
self._epoch_M = float(value)
#----------- M
@property
def M(self):
"""I'm the M property."""
return self._M
@M.setter
def M(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad M value. M must be between 0 and 360 degrees')
self._M = float(value)
#------------------------------- Object Status --------------------------------
def __str__(self):
"""Print the current orbital parameters a, e, inc, argperi, capom, H"""
status = ("\na: %.2f \n" % self.a +
"e: %.2f \n" % self.e +
"inc: %.2f deg \n" % (self.inc * 180/math.pi) +
"om: %.2f deg \n" % (self.om * 180/math.pi) +
"Om: %.2f deg \n" % (self.Om * 180/math.pi) +
"H: %.2f \n" % self.H
)
return status
#-------------------------- Size Distribution ---------------------------------
def drawH(self, alpha, hmax, alpha_faint=None, contrast=1, hbreak=None,
hmin=1):
"""Compute and assign and H-magnitude from a so-called singlE
power-law, knee, or divot H-magnitude distribution.
When provided a slope alpha and a faint-side maximum H-magnitude
(hmax), a H-magnitude is drawn randomly from the distribution
dN/dH propto 10**(alpha H)
in the range hmin = 1 to hmax. Specify hmin to change the bright-end.
Specifying an hbreak and alpha_faint will draw from a knee distribution
Specifying an hbreak, alpha_faint and contrast will draw from a divot
distrubtion as in Shankman et al. 2013
e.g.
---Single Power Law---
object.drawH(0.8,13)
will draw an H-magnitude from the appropriate distribution such that
H [1,13]
object.drawH(0.8,13,hmin=5)
will draw an H-magnitude such that H [5,13]
---Knee---
To draw from a knee distribution specify hbreak and alpha_faint
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5)
This will draw an H-magnitude from a distrubtion that breaks at H=9
from a slope of 0.8 to a slope of 0.5. hmin can also be specified here.
---Divot---
To draw from a divot (see Shankman et al 2013), specify hbreak,
alpha_faint, and the contrast value. Contrasts should be > 1.
hmin can also be specified.
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5, contrast = 23)
"""
# Avoid singularity for alpha = 0
alpha = 0.0000000001 if alpha == 0 else alpha
# Set alpha_faint to alpha for the case of a single power-law
alpha_faint = alpha if alpha_faint is None else alpha_faint
# Avoid singularity for alpha_faint = 0
alpha_faint = 0.0000000001 if alpha_faint == 0 else alpha_faint
# Set hbreak to be the maximum H for the case of a single power-law
hbreak = hmax if hbreak is None else hbreak
# ckc is the fraction of objects big (H<Hbreak) of the break
# (with contrast cont >= 1 as in Shankman et al. 2013)
ckc = (1.0 + 1.0 / contrast * alpha / alpha_faint *
(10**(alpha_faint*(hmax - hbreak)) - 1.0))**(-1.0)
rv = random()
if (rv < ckc):
rv = random()
hbright = 10**(alpha*hmin)
hfaint = 10**(alpha*hbreak)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha
else:
rv = random()
hbright = 10**(alpha_faint*hbreak)
hfaint = 10**(alpha_faint*hmax)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha_faint
#----------------- Fuzzing Variables a,e,inc, argperi, capom ------------------
def fuzz(self, variable, fz, type=None):
"""Perturb (fuzz) semimajor axis randomly by up to +- percent specified
Input is treated as percentage if type is not specified as 'abs'.
If type = 'abs', a will be changed randomly by +- amount specified.
The first argument is a string containing the variable to be fuzzed.
The appropriate options are 'a', 'e', 'inc', 'Om', 'om'
e.g.
# KBO(a, e, inc, argperi, capom)
object = ssobj(75, 0.5, 12, 45, 60)
object.fuzz('a', 0.1)
this will take a and randomly perturb it by +- 10%
object.fuzz('a', 10)
produces the same result
---
Conversely,
object.fuzz('a', 0.1, type='abs')
pertubs a by +- 0.1 AU, and
object.fuzz('a', 10, type='abs')
perturbs a by +- 10 AU
"""
# Check to see if the attribute exists, if so get the value
if not hasattr(self, variable):
raise ValueError("You tried to fuzz a parameter that does not exit")
var = getattr(self, variable)
# if variable is an angle, treat it properly as
# float(ephem.EllipticalBody().inc) gives the angle in radians
if variable in ['inc', 'om', 'Om']:
var = float(var)*180.0/math.pi
# set fuzzer to percent
fz = fz/100.0 if (fz > 1.0 and type is None) else fz
var = (var*(1.0 + fz*(2.0*random()-1.0)) if type is None else
(var + (2.0*random()-1.0)*fz))
setattr(self, variable, var)
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*- Detect *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
class detect(object):
fuzzedvars =[]
@classmethod
def load_survey(cls, path):
# Empty dictionary to contain all of the field objects in the class
cls.fields = {}
# path to pointing.list directory
# create field objects for every pointing which are shared by the class
@classmethod
def hdraw(cls, *args):
pass
@classmethod
def fuzz_objects(cls, *args):
# cls.fuzzed = True # Probably unnecessary
options = ['a', 'e', 'inc', 'Om', 'om']
for item in args:
if not item[0] in options:
raise ValueError('Your given input of ' + item[0] + ' is not of a fuzzable variable')
if not item[2] is 'abs':
rais ValueError("The third argument for fuzz_objects MUST be 'abs' if specified")
if len(item) > 3:
raise ValuError("Specify the variable to be fuzzed and the amount e.g. ('inc', 1, 'abs')")
cls.fuzzedvars = args
@classmethod
def load_file(cls, filepath, *args):
cls.filepath = filepath
#
# take in the order of the variables in the file as a tuple
# i.e. ss.loadfile(path, ('inc',1), ('a',2)) counting from 0
options = ['a', 'e', 'inc', 'Om', 'om', 'H', 'M', 'M_epoch']
for item in args:
if not item[0] in options:
raise ValueError('Your given input of ' + item[0] + ' is not of the appropriate read-in type')
if len(item) > 2:
raise ValuError("Specify the variable and column of the variable in the form ('a', 0), counting from 0")
cls.elementorder = args
@classmethod
def numdetections(class, numdetections):
cls.numdetections = numdetections
@classmethod
def output(cls, outputfile):
cls.outputfile = outputfile
def __init__(self, external_candidate):
# Take in the cadidate object and
# do all of the actual detection stuff.
pass
# Probably also write out for successful detections
| raise ValueError('Bad a value. Ensure 0.0 < a < 10E6') | conditional_block |
SurveySimulator.py | #!/usr/bin/python
from random import random
import math
import ephem
# import field
# to be implemented once the field class has been created
class ssobj(ephem.EllipticalBody):
'Class for all Survey Simulator objects.'
def __init__(self, a, e, inc, capom, argperi, H=5, M=0.0):
# ephem.EllipticalBody.__init__()
self.a = a
self.e = e
self.inc = inc # degrees
self.Om = capom # degrees
self.om = argperi # degrees
self.H = H
self.M = M
self._G = -0.12 # Hard coded by JM: "c Hard coded slope for magnitude ! Bad boy !"
#----------- a
@property
def a(self):
"""I'm the a property."""
return self._a
@a.setter
def a(self, value): | raise ValueError('Bad a value. Ensure 0.0 < a < 10E6')
self._a = value
#----------- e
@property
def e(self):
"""I'm the e property."""
return self._e
@e.setter
def e(self, value):
if not 0.0 <= value <= 1.0:
raise ValueError('Bad e value. e must be between 0 and 1')
self._e = float(value)
#----------- inc
@property
def inc(self):
"""I'm the inc property."""
return self._inc
@inc.setter
def inc(self, value):
if not 0.0 <= value <= 180.0:
raise ValueError('Bad inclination value. Ensure 0.0 < inclination < 90 degrees')
self._inc = value
#----------- Om
@property
def Om(self):
"""I'm the Om property."""
return self._Om
@Om.setter
def Om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad Om value. Om must be between 0 and 360 degrees')
self._Om = float(value)
#----------- om
@property
def om(self):
"""I'm the om property."""
return self._om
@om.setter
def om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad om value. om must be between 0 and 360 degrees')
self._om = float(value)
#----------- H
@property
def H(self):
"""I'm the H property."""
return self._H
@H.setter
def H(self, value):
self._H = float(value)
#----------- epoch
@property
def epoch(self):
"""I'm the epoch property."""
return self._epoch
@epoch.setter
def epoch(self, value):
self._epoch = float(value)
#----------- epoch_M
@property
def epoch_M(self):
"""I'm the epoch_M property."""
return self._epoch_M
@epoch_M.setter
def epoch_M(self, value):
self._epoch_M = float(value)
#----------- M
@property
def M(self):
"""I'm the M property."""
return self._M
@M.setter
def M(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad M value. M must be between 0 and 360 degrees')
self._M = float(value)
#------------------------------- Object Status --------------------------------
def __str__(self):
"""Print the current orbital parameters a, e, inc, argperi, capom, H"""
status = ("\na: %.2f \n" % self.a +
"e: %.2f \n" % self.e +
"inc: %.2f deg \n" % (self.inc * 180/math.pi) +
"om: %.2f deg \n" % (self.om * 180/math.pi) +
"Om: %.2f deg \n" % (self.Om * 180/math.pi) +
"H: %.2f \n" % self.H
)
return status
#-------------------------- Size Distribution ---------------------------------
def drawH(self, alpha, hmax, alpha_faint=None, contrast=1, hbreak=None,
hmin=1):
"""Compute and assign and H-magnitude from a so-called singlE
power-law, knee, or divot H-magnitude distribution.
When provided a slope alpha and a faint-side maximum H-magnitude
(hmax), a H-magnitude is drawn randomly from the distribution
dN/dH propto 10**(alpha H)
in the range hmin = 1 to hmax. Specify hmin to change the bright-end.
Specifying an hbreak and alpha_faint will draw from a knee distribution
Specifying an hbreak, alpha_faint and contrast will draw from a divot
distrubtion as in Shankman et al. 2013
e.g.
---Single Power Law---
object.drawH(0.8,13)
will draw an H-magnitude from the appropriate distribution such that
H [1,13]
object.drawH(0.8,13,hmin=5)
will draw an H-magnitude such that H [5,13]
---Knee---
To draw from a knee distribution specify hbreak and alpha_faint
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5)
This will draw an H-magnitude from a distrubtion that breaks at H=9
from a slope of 0.8 to a slope of 0.5. hmin can also be specified here.
---Divot---
To draw from a divot (see Shankman et al 2013), specify hbreak,
alpha_faint, and the contrast value. Contrasts should be > 1.
hmin can also be specified.
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5, contrast = 23)
"""
# Avoid singularity for alpha = 0
alpha = 0.0000000001 if alpha == 0 else alpha
# Set alpha_faint to alpha for the case of a single power-law
alpha_faint = alpha if alpha_faint is None else alpha_faint
# Avoid singularity for alpha_faint = 0
alpha_faint = 0.0000000001 if alpha_faint == 0 else alpha_faint
# Set hbreak to be the maximum H for the case of a single power-law
hbreak = hmax if hbreak is None else hbreak
# ckc is the fraction of objects big (H<Hbreak) of the break
# (with contrast cont >= 1 as in Shankman et al. 2013)
ckc = (1.0 + 1.0 / contrast * alpha / alpha_faint *
(10**(alpha_faint*(hmax - hbreak)) - 1.0))**(-1.0)
rv = random()
if (rv < ckc):
rv = random()
hbright = 10**(alpha*hmin)
hfaint = 10**(alpha*hbreak)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha
else:
rv = random()
hbright = 10**(alpha_faint*hbreak)
hfaint = 10**(alpha_faint*hmax)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha_faint
#----------------- Fuzzing Variables a,e,inc, argperi, capom ------------------
def fuzz(self, variable, fz, type=None):
"""Perturb (fuzz) semimajor axis randomly by up to +- percent specified
Input is treated as percentage if type is not specified as 'abs'.
If type = 'abs', a will be changed randomly by +- amount specified.
The first argument is a string containing the variable to be fuzzed.
The appropriate options are 'a', 'e', 'inc', 'Om', 'om'
e.g.
# KBO(a, e, inc, argperi, capom)
object = ssobj(75, 0.5, 12, 45, 60)
object.fuzz('a', 0.1)
this will take a and randomly perturb it by +- 10%
object.fuzz('a', 10)
produces the same result
---
Conversely,
object.fuzz('a', 0.1, type='abs')
pertubs a by +- 0.1 AU, and
object.fuzz('a', 10, type='abs')
perturbs a by +- 10 AU
"""
# Check to see if the attribute exists, if so get the value
if not hasattr(self, variable):
raise ValueError("You tried to fuzz a parameter that does not exit")
var = getattr(self, variable)
# if variable is an angle, treat it properly as
# float(ephem.EllipticalBody().inc) gives the angle in radians
if variable in ['inc', 'om', 'Om']:
var = float(var)*180.0/math.pi
# set fuzzer to percent
fz = fz/100.0 if (fz > 1.0 and type is None) else fz
var = (var*(1.0 + fz*(2.0*random()-1.0)) if type is None else
(var + (2.0*random()-1.0)*fz))
setattr(self, variable, var)
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*- Detect *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
class detect(object):
fuzzedvars =[]
@classmethod
def load_survey(cls, path):
# Empty dictionary to contain all of the field objects in the class
cls.fields = {}
# path to pointing.list directory
# create field objects for every pointing which are shared by the class
@classmethod
def hdraw(cls, *args):
pass
@classmethod
def fuzz_objects(cls, *args):
# cls.fuzzed = True # Probably unnecessary
options = ['a', 'e', 'inc', 'Om', 'om']
for item in args:
if not item[0] in options:
raise ValueError('Your given input of ' + item[0] + ' is not of a fuzzable variable')
if not item[2] is 'abs':
rais ValueError("The third argument for fuzz_objects MUST be 'abs' if specified")
if len(item) > 3:
raise ValuError("Specify the variable to be fuzzed and the amount e.g. ('inc', 1, 'abs')")
cls.fuzzedvars = args
@classmethod
def load_file(cls, filepath, *args):
cls.filepath = filepath
#
# take in the order of the variables in the file as a tuple
# i.e. ss.loadfile(path, ('inc',1), ('a',2)) counting from 0
options = ['a', 'e', 'inc', 'Om', 'om', 'H', 'M', 'M_epoch']
for item in args:
if not item[0] in options:
raise ValueError('Your given input of ' + item[0] + ' is not of the appropriate read-in type')
if len(item) > 2:
raise ValuError("Specify the variable and column of the variable in the form ('a', 0), counting from 0")
cls.elementorder = args
@classmethod
def numdetections(class, numdetections):
cls.numdetections = numdetections
@classmethod
def output(cls, outputfile):
cls.outputfile = outputfile
def __init__(self, external_candidate):
# Take in the cadidate object and
# do all of the actual detection stuff.
pass
# Probably also write out for successful detections | if not 0.0 <= value <= 10E6: | random_line_split |
SurveySimulator.py | #!/usr/bin/python
from random import random
import math
import ephem
# import field
# to be implemented once the field class has been created
class ssobj(ephem.EllipticalBody):
'Class for all Survey Simulator objects.'
def __init__(self, a, e, inc, capom, argperi, H=5, M=0.0):
# ephem.EllipticalBody.__init__()
self.a = a
self.e = e
self.inc = inc # degrees
self.Om = capom # degrees
self.om = argperi # degrees
self.H = H
self.M = M
self._G = -0.12 # Hard coded by JM: "c Hard coded slope for magnitude ! Bad boy !"
#----------- a
@property
def a(self):
"""I'm the a property."""
return self._a
@a.setter
def a(self, value):
if not 0.0 <= value <= 10E6:
raise ValueError('Bad a value. Ensure 0.0 < a < 10E6')
self._a = value
#----------- e
@property
def e(self):
"""I'm the e property."""
return self._e
@e.setter
def e(self, value):
if not 0.0 <= value <= 1.0:
raise ValueError('Bad e value. e must be between 0 and 1')
self._e = float(value)
#----------- inc
@property
def inc(self):
"""I'm the inc property."""
return self._inc
@inc.setter
def | (self, value):
if not 0.0 <= value <= 180.0:
raise ValueError('Bad inclination value. Ensure 0.0 < inclination < 90 degrees')
self._inc = value
#----------- Om
@property
def Om(self):
"""I'm the Om property."""
return self._Om
@Om.setter
def Om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad Om value. Om must be between 0 and 360 degrees')
self._Om = float(value)
#----------- om
@property
def om(self):
"""I'm the om property."""
return self._om
@om.setter
def om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad om value. om must be between 0 and 360 degrees')
self._om = float(value)
#----------- H
@property
def H(self):
"""I'm the H property."""
return self._H
@H.setter
def H(self, value):
self._H = float(value)
#----------- epoch
@property
def epoch(self):
"""I'm the epoch property."""
return self._epoch
@epoch.setter
def epoch(self, value):
self._epoch = float(value)
#----------- epoch_M
@property
def epoch_M(self):
"""I'm the epoch_M property."""
return self._epoch_M
@epoch_M.setter
def epoch_M(self, value):
self._epoch_M = float(value)
#----------- M
@property
def M(self):
"""I'm the M property."""
return self._M
@M.setter
def M(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad M value. M must be between 0 and 360 degrees')
self._M = float(value)
#------------------------------- Object Status --------------------------------
def __str__(self):
"""Print the current orbital parameters a, e, inc, argperi, capom, H"""
status = ("\na: %.2f \n" % self.a +
"e: %.2f \n" % self.e +
"inc: %.2f deg \n" % (self.inc * 180/math.pi) +
"om: %.2f deg \n" % (self.om * 180/math.pi) +
"Om: %.2f deg \n" % (self.Om * 180/math.pi) +
"H: %.2f \n" % self.H
)
return status
#-------------------------- Size Distribution ---------------------------------
def drawH(self, alpha, hmax, alpha_faint=None, contrast=1, hbreak=None,
hmin=1):
"""Compute and assign and H-magnitude from a so-called singlE
power-law, knee, or divot H-magnitude distribution.
When provided a slope alpha and a faint-side maximum H-magnitude
(hmax), a H-magnitude is drawn randomly from the distribution
dN/dH propto 10**(alpha H)
in the range hmin = 1 to hmax. Specify hmin to change the bright-end.
Specifying an hbreak and alpha_faint will draw from a knee distribution
Specifying an hbreak, alpha_faint and contrast will draw from a divot
distrubtion as in Shankman et al. 2013
e.g.
---Single Power Law---
object.drawH(0.8,13)
will draw an H-magnitude from the appropriate distribution such that
H [1,13]
object.drawH(0.8,13,hmin=5)
will draw an H-magnitude such that H [5,13]
---Knee---
To draw from a knee distribution specify hbreak and alpha_faint
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5)
This will draw an H-magnitude from a distrubtion that breaks at H=9
from a slope of 0.8 to a slope of 0.5. hmin can also be specified here.
---Divot---
To draw from a divot (see Shankman et al 2013), specify hbreak,
alpha_faint, and the contrast value. Contrasts should be > 1.
hmin can also be specified.
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5, contrast = 23)
"""
# Avoid singularity for alpha = 0
alpha = 0.0000000001 if alpha == 0 else alpha
# Set alpha_faint to alpha for the case of a single power-law
alpha_faint = alpha if alpha_faint is None else alpha_faint
# Avoid singularity for alpha_faint = 0
alpha_faint = 0.0000000001 if alpha_faint == 0 else alpha_faint
# Set hbreak to be the maximum H for the case of a single power-law
hbreak = hmax if hbreak is None else hbreak
# ckc is the fraction of objects big (H<Hbreak) of the break
# (with contrast cont >= 1 as in Shankman et al. 2013)
ckc = (1.0 + 1.0 / contrast * alpha / alpha_faint *
(10**(alpha_faint*(hmax - hbreak)) - 1.0))**(-1.0)
rv = random()
if (rv < ckc):
rv = random()
hbright = 10**(alpha*hmin)
hfaint = 10**(alpha*hbreak)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha
else:
rv = random()
hbright = 10**(alpha_faint*hbreak)
hfaint = 10**(alpha_faint*hmax)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha_faint
#----------------- Fuzzing Variables a,e,inc, argperi, capom ------------------
def fuzz(self, variable, fz, type=None):
"""Perturb (fuzz) semimajor axis randomly by up to +- percent specified
Input is treated as percentage if type is not specified as 'abs'.
If type = 'abs', a will be changed randomly by +- amount specified.
The first argument is a string containing the variable to be fuzzed.
The appropriate options are 'a', 'e', 'inc', 'Om', 'om'
e.g.
# KBO(a, e, inc, argperi, capom)
object = ssobj(75, 0.5, 12, 45, 60)
object.fuzz('a', 0.1)
this will take a and randomly perturb it by +- 10%
object.fuzz('a', 10)
produces the same result
---
Conversely,
object.fuzz('a', 0.1, type='abs')
pertubs a by +- 0.1 AU, and
object.fuzz('a', 10, type='abs')
perturbs a by +- 10 AU
"""
# Check to see if the attribute exists, if so get the value
if not hasattr(self, variable):
raise ValueError("You tried to fuzz a parameter that does not exit")
var = getattr(self, variable)
# if variable is an angle, treat it properly as
# float(ephem.EllipticalBody().inc) gives the angle in radians
if variable in ['inc', 'om', 'Om']:
var = float(var)*180.0/math.pi
# set fuzzer to percent
fz = fz/100.0 if (fz > 1.0 and type is None) else fz
var = (var*(1.0 + fz*(2.0*random()-1.0)) if type is None else
(var + (2.0*random()-1.0)*fz))
setattr(self, variable, var)
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*- Detect *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
class detect(object):
fuzzedvars =[]
@classmethod
def load_survey(cls, path):
# Empty dictionary to contain all of the field objects in the class
cls.fields = {}
# path to pointing.list directory
# create field objects for every pointing which are shared by the class
@classmethod
def hdraw(cls, *args):
pass
@classmethod
def fuzz_objects(cls, *args):
# cls.fuzzed = True # Probably unnecessary
options = ['a', 'e', 'inc', 'Om', 'om']
for item in args:
if not item[0] in options:
raise ValueError('Your given input of ' + item[0] + ' is not of a fuzzable variable')
if not item[2] is 'abs':
rais ValueError("The third argument for fuzz_objects MUST be 'abs' if specified")
if len(item) > 3:
raise ValuError("Specify the variable to be fuzzed and the amount e.g. ('inc', 1, 'abs')")
cls.fuzzedvars = args
@classmethod
def load_file(cls, filepath, *args):
cls.filepath = filepath
#
# take in the order of the variables in the file as a tuple
# i.e. ss.loadfile(path, ('inc',1), ('a',2)) counting from 0
options = ['a', 'e', 'inc', 'Om', 'om', 'H', 'M', 'M_epoch']
for item in args:
if not item[0] in options:
raise ValueError('Your given input of ' + item[0] + ' is not of the appropriate read-in type')
if len(item) > 2:
raise ValuError("Specify the variable and column of the variable in the form ('a', 0), counting from 0")
cls.elementorder = args
@classmethod
def numdetections(class, numdetections):
cls.numdetections = numdetections
@classmethod
def output(cls, outputfile):
cls.outputfile = outputfile
def __init__(self, external_candidate):
# Take in the cadidate object and
# do all of the actual detection stuff.
pass
# Probably also write out for successful detections
| inc | identifier_name |
SurveySimulator.py | #!/usr/bin/python
from random import random
import math
import ephem
# import field
# to be implemented once the field class has been created
class ssobj(ephem.EllipticalBody):
'Class for all Survey Simulator objects.'
def __init__(self, a, e, inc, capom, argperi, H=5, M=0.0):
# ephem.EllipticalBody.__init__()
self.a = a
self.e = e
self.inc = inc # degrees
self.Om = capom # degrees
self.om = argperi # degrees
self.H = H
self.M = M
self._G = -0.12 # Hard coded by JM: "c Hard coded slope for magnitude ! Bad boy !"
#----------- a
@property
def a(self):
"""I'm the a property."""
return self._a
@a.setter
def a(self, value):
if not 0.0 <= value <= 10E6:
raise ValueError('Bad a value. Ensure 0.0 < a < 10E6')
self._a = value
#----------- e
@property
def e(self):
|
@e.setter
def e(self, value):
if not 0.0 <= value <= 1.0:
raise ValueError('Bad e value. e must be between 0 and 1')
self._e = float(value)
#----------- inc
@property
def inc(self):
"""I'm the inc property."""
return self._inc
@inc.setter
def inc(self, value):
if not 0.0 <= value <= 180.0:
raise ValueError('Bad inclination value. Ensure 0.0 < inclination < 90 degrees')
self._inc = value
#----------- Om
@property
def Om(self):
"""I'm the Om property."""
return self._Om
@Om.setter
def Om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad Om value. Om must be between 0 and 360 degrees')
self._Om = float(value)
#----------- om
@property
def om(self):
"""I'm the om property."""
return self._om
@om.setter
def om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad om value. om must be between 0 and 360 degrees')
self._om = float(value)
#----------- H
@property
def H(self):
"""I'm the H property."""
return self._H
@H.setter
def H(self, value):
self._H = float(value)
#----------- epoch
@property
def epoch(self):
"""I'm the epoch property."""
return self._epoch
@epoch.setter
def epoch(self, value):
self._epoch = float(value)
#----------- epoch_M
@property
def epoch_M(self):
"""I'm the epoch_M property."""
return self._epoch_M
@epoch_M.setter
def epoch_M(self, value):
self._epoch_M = float(value)
#----------- M
@property
def M(self):
"""I'm the M property."""
return self._M
@M.setter
def M(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad M value. M must be between 0 and 360 degrees')
self._M = float(value)
#------------------------------- Object Status --------------------------------
def __str__(self):
"""Print the current orbital parameters a, e, inc, argperi, capom, H"""
status = ("\na: %.2f \n" % self.a +
"e: %.2f \n" % self.e +
"inc: %.2f deg \n" % (self.inc * 180/math.pi) +
"om: %.2f deg \n" % (self.om * 180/math.pi) +
"Om: %.2f deg \n" % (self.Om * 180/math.pi) +
"H: %.2f \n" % self.H
)
return status
#-------------------------- Size Distribution ---------------------------------
def drawH(self, alpha, hmax, alpha_faint=None, contrast=1, hbreak=None,
hmin=1):
"""Compute and assign and H-magnitude from a so-called singlE
power-law, knee, or divot H-magnitude distribution.
When provided a slope alpha and a faint-side maximum H-magnitude
(hmax), a H-magnitude is drawn randomly from the distribution
dN/dH propto 10**(alpha H)
in the range hmin = 1 to hmax. Specify hmin to change the bright-end.
Specifying an hbreak and alpha_faint will draw from a knee distribution
Specifying an hbreak, alpha_faint and contrast will draw from a divot
distrubtion as in Shankman et al. 2013
e.g.
---Single Power Law---
object.drawH(0.8,13)
will draw an H-magnitude from the appropriate distribution such that
H [1,13]
object.drawH(0.8,13,hmin=5)
will draw an H-magnitude such that H [5,13]
---Knee---
To draw from a knee distribution specify hbreak and alpha_faint
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5)
This will draw an H-magnitude from a distrubtion that breaks at H=9
from a slope of 0.8 to a slope of 0.5. hmin can also be specified here.
---Divot---
To draw from a divot (see Shankman et al 2013), specify hbreak,
alpha_faint, and the contrast value. Contrasts should be > 1.
hmin can also be specified.
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5, contrast = 23)
"""
# Avoid singularity for alpha = 0
alpha = 0.0000000001 if alpha == 0 else alpha
# Set alpha_faint to alpha for the case of a single power-law
alpha_faint = alpha if alpha_faint is None else alpha_faint
# Avoid singularity for alpha_faint = 0
alpha_faint = 0.0000000001 if alpha_faint == 0 else alpha_faint
# Set hbreak to be the maximum H for the case of a single power-law
hbreak = hmax if hbreak is None else hbreak
# ckc is the fraction of objects big (H<Hbreak) of the break
# (with contrast cont >= 1 as in Shankman et al. 2013)
ckc = (1.0 + 1.0 / contrast * alpha / alpha_faint *
(10**(alpha_faint*(hmax - hbreak)) - 1.0))**(-1.0)
rv = random()
if (rv < ckc):
rv = random()
hbright = 10**(alpha*hmin)
hfaint = 10**(alpha*hbreak)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha
else:
rv = random()
hbright = 10**(alpha_faint*hbreak)
hfaint = 10**(alpha_faint*hmax)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha_faint
#----------------- Fuzzing Variables a,e,inc, argperi, capom ------------------
def fuzz(self, variable, fz, type=None):
"""Perturb (fuzz) semimajor axis randomly by up to +- percent specified
Input is treated as percentage if type is not specified as 'abs'.
If type = 'abs', a will be changed randomly by +- amount specified.
The first argument is a string containing the variable to be fuzzed.
The appropriate options are 'a', 'e', 'inc', 'Om', 'om'
e.g.
# KBO(a, e, inc, argperi, capom)
object = ssobj(75, 0.5, 12, 45, 60)
object.fuzz('a', 0.1)
this will take a and randomly perturb it by +- 10%
object.fuzz('a', 10)
produces the same result
---
Conversely,
object.fuzz('a', 0.1, type='abs')
pertubs a by +- 0.1 AU, and
object.fuzz('a', 10, type='abs')
perturbs a by +- 10 AU
"""
# Check to see if the attribute exists, if so get the value
if not hasattr(self, variable):
raise ValueError("You tried to fuzz a parameter that does not exit")
var = getattr(self, variable)
# if variable is an angle, treat it properly as
# float(ephem.EllipticalBody().inc) gives the angle in radians
if variable in ['inc', 'om', 'Om']:
var = float(var)*180.0/math.pi
# set fuzzer to percent
fz = fz/100.0 if (fz > 1.0 and type is None) else fz
var = (var*(1.0 + fz*(2.0*random()-1.0)) if type is None else
(var + (2.0*random()-1.0)*fz))
setattr(self, variable, var)
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*- Detect *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
class detect(object):
fuzzedvars =[]
@classmethod
def load_survey(cls, path):
# Empty dictionary to contain all of the field objects in the class
cls.fields = {}
# path to pointing.list directory
# create field objects for every pointing which are shared by the class
@classmethod
def hdraw(cls, *args):
pass
@classmethod
def fuzz_objects(cls, *args):
# cls.fuzzed = True # Probably unnecessary
options = ['a', 'e', 'inc', 'Om', 'om']
for item in args:
if not item[0] in options:
raise ValueError('Your given input of ' + item[0] + ' is not of a fuzzable variable')
if not item[2] is 'abs':
rais ValueError("The third argument for fuzz_objects MUST be 'abs' if specified")
if len(item) > 3:
raise ValuError("Specify the variable to be fuzzed and the amount e.g. ('inc', 1, 'abs')")
cls.fuzzedvars = args
@classmethod
def load_file(cls, filepath, *args):
cls.filepath = filepath
#
# take in the order of the variables in the file as a tuple
# i.e. ss.loadfile(path, ('inc',1), ('a',2)) counting from 0
options = ['a', 'e', 'inc', 'Om', 'om', 'H', 'M', 'M_epoch']
for item in args:
if not item[0] in options:
raise ValueError('Your given input of ' + item[0] + ' is not of the appropriate read-in type')
if len(item) > 2:
raise ValuError("Specify the variable and column of the variable in the form ('a', 0), counting from 0")
cls.elementorder = args
@classmethod
def numdetections(class, numdetections):
cls.numdetections = numdetections
@classmethod
def output(cls, outputfile):
cls.outputfile = outputfile
def __init__(self, external_candidate):
# Take in the cadidate object and
# do all of the actual detection stuff.
pass
# Probably also write out for successful detections
| """I'm the e property."""
return self._e | identifier_body |
simanalysis.py | import numpy as np
from MDAnalysis.analysis.distances import distance_array
import mybiotools as mbt
def traj_nslice (u,teq,tsample) :
"""
Returns the number of frames in the trajectory in universe u, using teq as
equilibration time and tsample as sampling time
"""
# get the number of frames in the slice (http://stackoverflow.com/a/7223557)
traj_slice = u.trajectory[teq::tsample]
return sum(1 for _ in traj_slice)
def hic_chipseq_r2 (hic, chipseq) :
"""
Calculate the Pearson correlation coefficient between the row sum of the
given Hi-C matrix and the given ChIP-seq profile.
"""
hic_rowsum = np.sum(hic,axis=1)/float(np.sum(hic))
return np.corrcoef(hic_rowsum,chipseq)[0,1]**2
def ps (H) :
"""
Calculate the normalized probability of contact between a monomer and all
others as a function of the linear distance s.
"""
p = np.array ([np.mean (np.diagonal (H, offset=k))
for k in range (H.shape[0])])
return p/np.sum(p)
def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) :
"""
Calculate the relative proportion of contacts of the tracers with binding
sites compared with non-binding sites. As usual user should supply
equilibration time, sampling time, and contact threshold value.
"""
# select polymer, tracers, and binding sites
polymer = sim.u.select_atoms (polymer_text)
tracers = sim.u.select_atoms (tracers_text)
bss = sim.u.select_atoms (bindingsites_text)
# select binding site indices
bs_n = bss.n_atoms
bs_idx = bss.indices
# select non-binding site indices
polymer_idx = polymer.indices
nbs_idx = np.setdiff1d (polymer_idx,bs_idx)
nbs_n = nbs_idx.size
# evaluate contacts with binding sites and non-binding sites for each
# independent simulation snapshot
c = []
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
contacts = d<threshold
cB = np.sum (contacts[bs_idx]).astype('float')
cA = np.sum (contacts[nbs_idx]).astype('float')
if cA != 0 :
c.append ((cB/cA) / (float(bs_n)/nbs_n))
return np.mean(np.array(c))
def fit_msd (msd,cutoff,delta_t,scale_l) :
"""
Perform a simple fit of the supplied time-dependent MSD, using a linear
regression of the logarithms of the values. User must supply the conversion
factor from time to real time and from length to real length. Also, user
must supply the cutoff value: from there on the values will be considered.
This is because the long-time behaviour is generally what matters really.
"""
# prepare the values to fit: exclude the first value because it is zero
t = np.arange(msd.size)*delta_t
x = np.log(t[cutoff:])
y = np.log(msd[cutoff:]*scale_l**2)
# perform fit to y = ax + b with their errors
b,a,db,da = mbt.linear_regression (x,y,0.99)
# now convert the value of b into a diffusion coefficient
D = np.exp(b)/6.0
dD = np.exp(db)/6.0
return a,da,D,dD
def msd_t (sim,particles_text,teq,tsample) :
"""
Calculate the mean square displacement of the particles defined by
'particles_text' in simulation sim, using sampling tsample and equilibration
time teq. Returns the matrix corresponding to the mean square displacement
of each particle, along with a matrix corresponding to the variance in the
estimate of this quantity.
"""
u = sim.u
particles = u.select_atoms (particles_text)
nparticles = particles.n_atoms
nslice = traj_nslice (u,teq,tsample)
# initialize the matrix containing all the positions
# of the particles at all the sampling frames
particles_pos = np.zeros ((nslice,nparticles,3))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
particles_pos[i,:,:] = particles.positions
# now initialize the Delta matrix, which contains the
# squared differences between the particles' positions
# at different time delays
Nt = int(nslice/2)
Delta = np.zeros((nparticles,Nt,Nt))
for delay in xrange(1,Nt+1) :
for t0 in xrange (Nt) :
t1 = t0 + delay
pos1 = particles_pos[t1,:,:]
pos0 = particles_pos[t0,:,:]
Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1)
# return the matrices of MSD and its variance
return np.mean(Delta,axis=2),np.var(Delta,axis=2)
def dmin_sel (sim,sel1_text,sel2_text,teq,tsample) :
"""
Calculate the minimum distance between the atoms defined in sel1 and the
atoms defined in sel2, as a function of time. Returns a matrix that contains
the minimum distance for each atom defined in sel1. As usual user should
supply equilibration time, sampling time, and contact threshold value.
"""
# define atom selections
sel1 = sim.u.select_atoms (sel1_text)
sel2 = sim.u.select_atoms (sel2_text)
# get number of atoms in selection 1
natoms = sel1.n_atoms
nslice = traj_nslice (sim.u,teq,tsample)
dmin = np.zeros((natoms,nslice))
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (sel1.positions,sel2.positions,
box=ts.dimensions)
dmin[:,i] = d.min(axis=1)
return dmin
def particle_images (sim,frame_id) :
"""
Get the image index of all particles in simulation, at the frame 'frame_id'
"""
# get positions of all particles: define first the atom selection, then jump to
# the user-requested trajectory frame, get the box dimensions (currently works
# only for orthorhombic boxes, then calculate the image indices
atoms = sim.u.select_atoms ('all')
ts = sim.u.trajectory[frame_id]
L = ts.dimensions[:3]
pos = atoms.positions + L/2.
return pos//L
def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
Calculate the matrix that represents the number of times that the tracers
(defined by 'tracer_text') jump from one site to another site of the polymer
(defined by 'polymer_text'). The simulation 'sim' is sampled at 'tsample',
excluding the first 'teq' time frames. Contact between a tracer and the
polymer is defined by the distance being smaller than 'threshold'.
"""
# define polymer and tracers
u = sim.u
polymer = u.select_atoms(polymer_text)
tracers = u.select_atoms(tracer_text)
n_polymer = polymer.n_atoms
n_tracers = tracers.n_atoms
# initialize jumping matrix and first distance matrix d_prev
J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)
ts = u.trajectory [teq]
d_prev = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_prev = d_prev<threshold
for ts in u.trajectory [teq::tsample] :
# get distance matrix at current time step
d_next = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_next = d_next<threshold
# get jumps of all tracers and add it to the jumping matrix
for i in xrange (n_tracers) :
t_prev = D_prev [:,i]
t_next = D_next [:,i].reshape ((n_polymer,1))
t = t_prev * t_next
J += t
D_prev = D_next.copy()
return J
def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
For the simulation 'sim', calculate the matrix of binding events of the
polymer and the tracers. Returns a contacts matrix of the shape
(ntracers,nslice,npolymer).
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
tracers = u.select_atoms (tracer_text)
ntracers = tracers.n_atoms
npolymer = polymer.n_atoms
nslice = mbt.traj_nslice(u,teq,tsample)
C = np.zeros((ntracers,nslice,npolymer),dtype=bool)
for i,ts in enumerate(u.trajectory [teq::tsample]) :
d = distance_array (tracers.positions,polymer.positions,
box=ts.dimensions)
c = d<threshold
C[:,i,:] = c
return C
def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) :
"""
Calculate the matrix of average intra-polymer distances. User must supply
the parameters teq, tsample and threshold.
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
N = polymer.n_atoms
nslice = mbt.traj_nslice (u,teq,tsample)
d = np.zeros((N,N))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
this_d = distance_array(polymer.positions,
polymer.positions,
box=ts.dimensions)
d = mbt.new_average(i,d,this_d)
return d
def | (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :
# define DKL(t) vector
nframes = traj_nslice(sim.u,teq,tsample)
DKL_t = np.zeros(nframes)
# define polymer and tracers
polymer = sim.u.select_atoms(polymer_text)
tracers = sim.u.select_atoms(tracer_text)
N = polymer.n_atoms
ntracers = tracers.n_atoms
# init H and C vectors
H = np.zeros((N,N))
C = np.zeros((N,ntracers))
# analyze all simulation frames as decided
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
# calculate Hi-C at this time frame
d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)
H += (d<p_threshold)
Rt = H.sum(axis=1)
# calculate ChIP-seq at this time frame
c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)
C += (c<t_threshold)
Ct = C.sum(axis=1)
DKL_t[i] = mbt.KL_divergence(Ct,Rt)
return DKL_t
def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :
"""
This function does the complete analysis of the tracers in the simulation.
It calculates the virtual Hi-C, virtual ChIP-seq, Kullback-Leibler
divergence between the two profiles as a function of time, and coverage of
the tracers.
"""
# define DKL(t) vector
nframes = traj_nslice(sim.u,teq,tsample)
DKL_t = np.zeros(nframes)
# define polymer and tracers
polymer = sim.u.select_atoms(polymer_text)
tracers = sim.u.select_atoms(tracer_text)
N = polymer.n_atoms
ntracers = tracers.n_atoms
# init H and C vectors
H = np.zeros((N,N),dtype=np.int32)
C = np.zeros((N,ntracers),dtype=np.int32)
# analyze all simulation frames as decided
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
# calculate Hi-C at this time frame
d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)
H += (d<p_threshold)
Rt = H.sum(axis=1)
# calculate ChIP-seq at this time frame
c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)
C += (c<t_threshold)
Ct = C.sum(axis=1)
DKL_t[i] = mbt.KL_divergence(Ct,Rt)
# coverage analysis
C[C>1] = 1
coverage = C.sum(axis=0).astype('float')/N
return DKL_t,H,Ct.astype(np.int64),coverage
| DKL_t | identifier_name |
simanalysis.py | import numpy as np
from MDAnalysis.analysis.distances import distance_array
import mybiotools as mbt
def traj_nslice (u,teq,tsample) :
"""
Returns the number of frames in the trajectory in universe u, using teq as
equilibration time and tsample as sampling time
"""
# get the number of frames in the slice (http://stackoverflow.com/a/7223557)
traj_slice = u.trajectory[teq::tsample]
return sum(1 for _ in traj_slice)
def hic_chipseq_r2 (hic, chipseq) :
"""
Calculate the Pearson correlation coefficient between the row sum of the
given Hi-C matrix and the given ChIP-seq profile.
"""
hic_rowsum = np.sum(hic,axis=1)/float(np.sum(hic))
return np.corrcoef(hic_rowsum,chipseq)[0,1]**2
def ps (H) :
"""
Calculate the normalized probability of contact between a monomer and all
others as a function of the linear distance s.
"""
p = np.array ([np.mean (np.diagonal (H, offset=k))
for k in range (H.shape[0])])
return p/np.sum(p)
def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) :
"""
Calculate the relative proportion of contacts of the tracers with binding
sites compared with non-binding sites. As usual user should supply
equilibration time, sampling time, and contact threshold value.
"""
# select polymer, tracers, and binding sites
polymer = sim.u.select_atoms (polymer_text)
tracers = sim.u.select_atoms (tracers_text)
bss = sim.u.select_atoms (bindingsites_text)
# select binding site indices
bs_n = bss.n_atoms
bs_idx = bss.indices
# select non-binding site indices
polymer_idx = polymer.indices
nbs_idx = np.setdiff1d (polymer_idx,bs_idx)
nbs_n = nbs_idx.size
# evaluate contacts with binding sites and non-binding sites for each
# independent simulation snapshot
c = []
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
contacts = d<threshold
cB = np.sum (contacts[bs_idx]).astype('float')
cA = np.sum (contacts[nbs_idx]).astype('float')
if cA != 0 :
|
return np.mean(np.array(c))
def fit_msd (msd,cutoff,delta_t,scale_l) :
"""
Perform a simple fit of the supplied time-dependent MSD, using a linear
regression of the logarithms of the values. User must supply the conversion
factor from time to real time and from length to real length. Also, user
must supply the cutoff value: from there on the values will be considered.
This is because the long-time behaviour is generally what matters really.
"""
# prepare the values to fit: exclude the first value because it is zero
t = np.arange(msd.size)*delta_t
x = np.log(t[cutoff:])
y = np.log(msd[cutoff:]*scale_l**2)
# perform fit to y = ax + b with their errors
b,a,db,da = mbt.linear_regression (x,y,0.99)
# now convert the value of b into a diffusion coefficient
D = np.exp(b)/6.0
dD = np.exp(db)/6.0
return a,da,D,dD
def msd_t (sim,particles_text,teq,tsample) :
"""
Calculate the mean square displacement of the particles defined by
'particles_text' in simulation sim, using sampling tsample and equilibration
time teq. Returns the matrix corresponding to the mean square displacement
of each particle, along with a matrix corresponding to the variance in the
estimate of this quantity.
"""
u = sim.u
particles = u.select_atoms (particles_text)
nparticles = particles.n_atoms
nslice = traj_nslice (u,teq,tsample)
# initialize the matrix containing all the positions
# of the particles at all the sampling frames
particles_pos = np.zeros ((nslice,nparticles,3))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
particles_pos[i,:,:] = particles.positions
# now initialize the Delta matrix, which contains the
# squared differences between the particles' positions
# at different time delays
Nt = int(nslice/2)
Delta = np.zeros((nparticles,Nt,Nt))
for delay in xrange(1,Nt+1) :
for t0 in xrange (Nt) :
t1 = t0 + delay
pos1 = particles_pos[t1,:,:]
pos0 = particles_pos[t0,:,:]
Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1)
# return the matrices of MSD and its variance
return np.mean(Delta,axis=2),np.var(Delta,axis=2)
def dmin_sel (sim,sel1_text,sel2_text,teq,tsample) :
"""
Calculate the minimum distance between the atoms defined in sel1 and the
atoms defined in sel2, as a function of time. Returns a matrix that contains
the minimum distance for each atom defined in sel1. As usual user should
supply equilibration time, sampling time, and contact threshold value.
"""
# define atom selections
sel1 = sim.u.select_atoms (sel1_text)
sel2 = sim.u.select_atoms (sel2_text)
# get number of atoms in selection 1
natoms = sel1.n_atoms
nslice = traj_nslice (sim.u,teq,tsample)
dmin = np.zeros((natoms,nslice))
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (sel1.positions,sel2.positions,
box=ts.dimensions)
dmin[:,i] = d.min(axis=1)
return dmin
def particle_images (sim,frame_id) :
"""
Get the image index of all particles in simulation, at the frame 'frame_id'
"""
# get positions of all particles: define first the atom selection, then jump to
# the user-requested trajectory frame, get the box dimensions (currently works
# only for orthorhombic boxes, then calculate the image indices
atoms = sim.u.select_atoms ('all')
ts = sim.u.trajectory[frame_id]
L = ts.dimensions[:3]
pos = atoms.positions + L/2.
return pos//L
def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
Calculate the matrix that represents the number of times that the tracers
(defined by 'tracer_text') jump from one site to another site of the polymer
(defined by 'polymer_text'). The simulation 'sim' is sampled at 'tsample',
excluding the first 'teq' time frames. Contact between a tracer and the
polymer is defined by the distance being smaller than 'threshold'.
"""
# define polymer and tracers
u = sim.u
polymer = u.select_atoms(polymer_text)
tracers = u.select_atoms(tracer_text)
n_polymer = polymer.n_atoms
n_tracers = tracers.n_atoms
# initialize jumping matrix and first distance matrix d_prev
J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)
ts = u.trajectory [teq]
d_prev = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_prev = d_prev<threshold
for ts in u.trajectory [teq::tsample] :
# get distance matrix at current time step
d_next = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_next = d_next<threshold
# get jumps of all tracers and add it to the jumping matrix
for i in xrange (n_tracers) :
t_prev = D_prev [:,i]
t_next = D_next [:,i].reshape ((n_polymer,1))
t = t_prev * t_next
J += t
D_prev = D_next.copy()
return J
def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
For the simulation 'sim', calculate the matrix of binding events of the
polymer and the tracers. Returns a contacts matrix of the shape
(ntracers,nslice,npolymer).
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
tracers = u.select_atoms (tracer_text)
ntracers = tracers.n_atoms
npolymer = polymer.n_atoms
nslice = mbt.traj_nslice(u,teq,tsample)
C = np.zeros((ntracers,nslice,npolymer),dtype=bool)
for i,ts in enumerate(u.trajectory [teq::tsample]) :
d = distance_array (tracers.positions,polymer.positions,
box=ts.dimensions)
c = d<threshold
C[:,i,:] = c
return C
def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) :
"""
Calculate the matrix of average intra-polymer distances. User must supply
the parameters teq, tsample and threshold.
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
N = polymer.n_atoms
nslice = mbt.traj_nslice (u,teq,tsample)
d = np.zeros((N,N))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
this_d = distance_array(polymer.positions,
polymer.positions,
box=ts.dimensions)
d = mbt.new_average(i,d,this_d)
return d
def DKL_t (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :
# define DKL(t) vector
nframes = traj_nslice(sim.u,teq,tsample)
DKL_t = np.zeros(nframes)
# define polymer and tracers
polymer = sim.u.select_atoms(polymer_text)
tracers = sim.u.select_atoms(tracer_text)
N = polymer.n_atoms
ntracers = tracers.n_atoms
# init H and C vectors
H = np.zeros((N,N))
C = np.zeros((N,ntracers))
# analyze all simulation frames as decided
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
# calculate Hi-C at this time frame
d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)
H += (d<p_threshold)
Rt = H.sum(axis=1)
# calculate ChIP-seq at this time frame
c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)
C += (c<t_threshold)
Ct = C.sum(axis=1)
DKL_t[i] = mbt.KL_divergence(Ct,Rt)
return DKL_t
def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :
"""
This function does the complete analysis of the tracers in the simulation.
It calculates the virtual Hi-C, virtual ChIP-seq, Kullback-Leibler
divergence between the two profiles as a function of time, and coverage of
the tracers.
"""
# define DKL(t) vector
nframes = traj_nslice(sim.u,teq,tsample)
DKL_t = np.zeros(nframes)
# define polymer and tracers
polymer = sim.u.select_atoms(polymer_text)
tracers = sim.u.select_atoms(tracer_text)
N = polymer.n_atoms
ntracers = tracers.n_atoms
# init H and C vectors
H = np.zeros((N,N),dtype=np.int32)
C = np.zeros((N,ntracers),dtype=np.int32)
# analyze all simulation frames as decided
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
# calculate Hi-C at this time frame
d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)
H += (d<p_threshold)
Rt = H.sum(axis=1)
# calculate ChIP-seq at this time frame
c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)
C += (c<t_threshold)
Ct = C.sum(axis=1)
DKL_t[i] = mbt.KL_divergence(Ct,Rt)
# coverage analysis
C[C>1] = 1
coverage = C.sum(axis=0).astype('float')/N
return DKL_t,H,Ct.astype(np.int64),coverage
| c.append ((cB/cA) / (float(bs_n)/nbs_n)) | conditional_block |
simanalysis.py | import numpy as np
from MDAnalysis.analysis.distances import distance_array
import mybiotools as mbt
def traj_nslice (u,teq,tsample) :
"""
Returns the number of frames in the trajectory in universe u, using teq as
equilibration time and tsample as sampling time
"""
# get the number of frames in the slice (http://stackoverflow.com/a/7223557)
traj_slice = u.trajectory[teq::tsample]
return sum(1 for _ in traj_slice)
def hic_chipseq_r2 (hic, chipseq) :
|
def ps (H) :
"""
Calculate the normalized probability of contact between a monomer and all
others as a function of the linear distance s.
"""
p = np.array ([np.mean (np.diagonal (H, offset=k))
for k in range (H.shape[0])])
return p/np.sum(p)
def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) :
"""
Calculate the relative proportion of contacts of the tracers with binding
sites compared with non-binding sites. As usual user should supply
equilibration time, sampling time, and contact threshold value.
"""
# select polymer, tracers, and binding sites
polymer = sim.u.select_atoms (polymer_text)
tracers = sim.u.select_atoms (tracers_text)
bss = sim.u.select_atoms (bindingsites_text)
# select binding site indices
bs_n = bss.n_atoms
bs_idx = bss.indices
# select non-binding site indices
polymer_idx = polymer.indices
nbs_idx = np.setdiff1d (polymer_idx,bs_idx)
nbs_n = nbs_idx.size
# evaluate contacts with binding sites and non-binding sites for each
# independent simulation snapshot
c = []
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
contacts = d<threshold
cB = np.sum (contacts[bs_idx]).astype('float')
cA = np.sum (contacts[nbs_idx]).astype('float')
if cA != 0 :
c.append ((cB/cA) / (float(bs_n)/nbs_n))
return np.mean(np.array(c))
def fit_msd (msd,cutoff,delta_t,scale_l) :
"""
Perform a simple fit of the supplied time-dependent MSD, using a linear
regression of the logarithms of the values. User must supply the conversion
factor from time to real time and from length to real length. Also, user
must supply the cutoff value: from there on the values will be considered.
This is because the long-time behaviour is generally what matters really.
"""
# prepare the values to fit: exclude the first value because it is zero
t = np.arange(msd.size)*delta_t
x = np.log(t[cutoff:])
y = np.log(msd[cutoff:]*scale_l**2)
# perform fit to y = ax + b with their errors
b,a,db,da = mbt.linear_regression (x,y,0.99)
# now convert the value of b into a diffusion coefficient
D = np.exp(b)/6.0
dD = np.exp(db)/6.0
return a,da,D,dD
def msd_t (sim,particles_text,teq,tsample) :
"""
Calculate the mean square displacement of the particles defined by
'particles_text' in simulation sim, using sampling tsample and equilibration
time teq. Returns the matrix corresponding to the mean square displacement
of each particle, along with a matrix corresponding to the variance in the
estimate of this quantity.
"""
u = sim.u
particles = u.select_atoms (particles_text)
nparticles = particles.n_atoms
nslice = traj_nslice (u,teq,tsample)
# initialize the matrix containing all the positions
# of the particles at all the sampling frames
particles_pos = np.zeros ((nslice,nparticles,3))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
particles_pos[i,:,:] = particles.positions
# now initialize the Delta matrix, which contains the
# squared differences between the particles' positions
# at different time delays
Nt = int(nslice/2)
Delta = np.zeros((nparticles,Nt,Nt))
for delay in xrange(1,Nt+1) :
for t0 in xrange (Nt) :
t1 = t0 + delay
pos1 = particles_pos[t1,:,:]
pos0 = particles_pos[t0,:,:]
Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1)
# return the matrices of MSD and its variance
return np.mean(Delta,axis=2),np.var(Delta,axis=2)
def dmin_sel (sim,sel1_text,sel2_text,teq,tsample) :
"""
Calculate the minimum distance between the atoms defined in sel1 and the
atoms defined in sel2, as a function of time. Returns a matrix that contains
the minimum distance for each atom defined in sel1. As usual user should
supply equilibration time, sampling time, and contact threshold value.
"""
# define atom selections
sel1 = sim.u.select_atoms (sel1_text)
sel2 = sim.u.select_atoms (sel2_text)
# get number of atoms in selection 1
natoms = sel1.n_atoms
nslice = traj_nslice (sim.u,teq,tsample)
dmin = np.zeros((natoms,nslice))
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (sel1.positions,sel2.positions,
box=ts.dimensions)
dmin[:,i] = d.min(axis=1)
return dmin
def particle_images (sim,frame_id) :
"""
Get the image index of all particles in simulation, at the frame 'frame_id'
"""
# get positions of all particles: define first the atom selection, then jump to
# the user-requested trajectory frame, get the box dimensions (currently works
# only for orthorhombic boxes, then calculate the image indices
atoms = sim.u.select_atoms ('all')
ts = sim.u.trajectory[frame_id]
L = ts.dimensions[:3]
pos = atoms.positions + L/2.
return pos//L
def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
Calculate the matrix that represents the number of times that the tracers
(defined by 'tracer_text') jump from one site to another site of the polymer
(defined by 'polymer_text'). The simulation 'sim' is sampled at 'tsample',
excluding the first 'teq' time frames. Contact between a tracer and the
polymer is defined by the distance being smaller than 'threshold'.
"""
# define polymer and tracers
u = sim.u
polymer = u.select_atoms(polymer_text)
tracers = u.select_atoms(tracer_text)
n_polymer = polymer.n_atoms
n_tracers = tracers.n_atoms
# initialize jumping matrix and first distance matrix d_prev
J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)
ts = u.trajectory [teq]
d_prev = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_prev = d_prev<threshold
for ts in u.trajectory [teq::tsample] :
# get distance matrix at current time step
d_next = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_next = d_next<threshold
# get jumps of all tracers and add it to the jumping matrix
for i in xrange (n_tracers) :
t_prev = D_prev [:,i]
t_next = D_next [:,i].reshape ((n_polymer,1))
t = t_prev * t_next
J += t
D_prev = D_next.copy()
return J
def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
For the simulation 'sim', calculate the matrix of binding events of the
polymer and the tracers. Returns a contacts matrix of the shape
(ntracers,nslice,npolymer).
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
tracers = u.select_atoms (tracer_text)
ntracers = tracers.n_atoms
npolymer = polymer.n_atoms
nslice = mbt.traj_nslice(u,teq,tsample)
C = np.zeros((ntracers,nslice,npolymer),dtype=bool)
for i,ts in enumerate(u.trajectory [teq::tsample]) :
d = distance_array (tracers.positions,polymer.positions,
box=ts.dimensions)
c = d<threshold
C[:,i,:] = c
return C
def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) :
"""
Calculate the matrix of average intra-polymer distances. User must supply
the parameters teq, tsample and threshold.
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
N = polymer.n_atoms
nslice = mbt.traj_nslice (u,teq,tsample)
d = np.zeros((N,N))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
this_d = distance_array(polymer.positions,
polymer.positions,
box=ts.dimensions)
d = mbt.new_average(i,d,this_d)
return d
def DKL_t (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :
# define DKL(t) vector
nframes = traj_nslice(sim.u,teq,tsample)
DKL_t = np.zeros(nframes)
# define polymer and tracers
polymer = sim.u.select_atoms(polymer_text)
tracers = sim.u.select_atoms(tracer_text)
N = polymer.n_atoms
ntracers = tracers.n_atoms
# init H and C vectors
H = np.zeros((N,N))
C = np.zeros((N,ntracers))
# analyze all simulation frames as decided
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
# calculate Hi-C at this time frame
d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)
H += (d<p_threshold)
Rt = H.sum(axis=1)
# calculate ChIP-seq at this time frame
c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)
C += (c<t_threshold)
Ct = C.sum(axis=1)
DKL_t[i] = mbt.KL_divergence(Ct,Rt)
return DKL_t
def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :
"""
This function does the complete analysis of the tracers in the simulation.
It calculates the virtual Hi-C, virtual ChIP-seq, Kullback-Leibler
divergence between the two profiles as a function of time, and coverage of
the tracers.
"""
# define DKL(t) vector
nframes = traj_nslice(sim.u,teq,tsample)
DKL_t = np.zeros(nframes)
# define polymer and tracers
polymer = sim.u.select_atoms(polymer_text)
tracers = sim.u.select_atoms(tracer_text)
N = polymer.n_atoms
ntracers = tracers.n_atoms
# init H and C vectors
H = np.zeros((N,N),dtype=np.int32)
C = np.zeros((N,ntracers),dtype=np.int32)
# analyze all simulation frames as decided
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
# calculate Hi-C at this time frame
d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)
H += (d<p_threshold)
Rt = H.sum(axis=1)
# calculate ChIP-seq at this time frame
c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)
C += (c<t_threshold)
Ct = C.sum(axis=1)
DKL_t[i] = mbt.KL_divergence(Ct,Rt)
# coverage analysis
C[C>1] = 1
coverage = C.sum(axis=0).astype('float')/N
return DKL_t,H,Ct.astype(np.int64),coverage
| """
Calculate the Pearson correlation coefficient between the row sum of the
given Hi-C matrix and the given ChIP-seq profile.
"""
hic_rowsum = np.sum(hic,axis=1)/float(np.sum(hic))
return np.corrcoef(hic_rowsum,chipseq)[0,1]**2 | identifier_body |
simanalysis.py | import numpy as np | import mybiotools as mbt
def traj_nslice (u,teq,tsample) :
"""
Returns the number of frames in the trajectory in universe u, using teq as
equilibration time and tsample as sampling time
"""
# get the number of frames in the slice (http://stackoverflow.com/a/7223557)
traj_slice = u.trajectory[teq::tsample]
return sum(1 for _ in traj_slice)
def hic_chipseq_r2 (hic, chipseq) :
"""
Calculate the Pearson correlation coefficient between the row sum of the
given Hi-C matrix and the given ChIP-seq profile.
"""
hic_rowsum = np.sum(hic,axis=1)/float(np.sum(hic))
return np.corrcoef(hic_rowsum,chipseq)[0,1]**2
def ps (H) :
"""
Calculate the normalized probability of contact between a monomer and all
others as a function of the linear distance s.
"""
p = np.array ([np.mean (np.diagonal (H, offset=k))
for k in range (H.shape[0])])
return p/np.sum(p)
def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) :
"""
Calculate the relative proportion of contacts of the tracers with binding
sites compared with non-binding sites. As usual user should supply
equilibration time, sampling time, and contact threshold value.
"""
# select polymer, tracers, and binding sites
polymer = sim.u.select_atoms (polymer_text)
tracers = sim.u.select_atoms (tracers_text)
bss = sim.u.select_atoms (bindingsites_text)
# select binding site indices
bs_n = bss.n_atoms
bs_idx = bss.indices
# select non-binding site indices
polymer_idx = polymer.indices
nbs_idx = np.setdiff1d (polymer_idx,bs_idx)
nbs_n = nbs_idx.size
# evaluate contacts with binding sites and non-binding sites for each
# independent simulation snapshot
c = []
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
contacts = d<threshold
cB = np.sum (contacts[bs_idx]).astype('float')
cA = np.sum (contacts[nbs_idx]).astype('float')
if cA != 0 :
c.append ((cB/cA) / (float(bs_n)/nbs_n))
return np.mean(np.array(c))
def fit_msd (msd,cutoff,delta_t,scale_l) :
"""
Perform a simple fit of the supplied time-dependent MSD, using a linear
regression of the logarithms of the values. User must supply the conversion
factor from time to real time and from length to real length. Also, user
must supply the cutoff value: from there on the values will be considered.
This is because the long-time behaviour is generally what matters really.
"""
# prepare the values to fit: exclude the first value because it is zero
t = np.arange(msd.size)*delta_t
x = np.log(t[cutoff:])
y = np.log(msd[cutoff:]*scale_l**2)
# perform fit to y = ax + b with their errors
b,a,db,da = mbt.linear_regression (x,y,0.99)
# now convert the value of b into a diffusion coefficient
D = np.exp(b)/6.0
dD = np.exp(db)/6.0
return a,da,D,dD
def msd_t (sim,particles_text,teq,tsample) :
"""
Calculate the mean square displacement of the particles defined by
'particles_text' in simulation sim, using sampling tsample and equilibration
time teq. Returns the matrix corresponding to the mean square displacement
of each particle, along with a matrix corresponding to the variance in the
estimate of this quantity.
"""
u = sim.u
particles = u.select_atoms (particles_text)
nparticles = particles.n_atoms
nslice = traj_nslice (u,teq,tsample)
# initialize the matrix containing all the positions
# of the particles at all the sampling frames
particles_pos = np.zeros ((nslice,nparticles,3))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
particles_pos[i,:,:] = particles.positions
# now initialize the Delta matrix, which contains the
# squared differences between the particles' positions
# at different time delays
Nt = int(nslice/2)
Delta = np.zeros((nparticles,Nt,Nt))
for delay in xrange(1,Nt+1) :
for t0 in xrange (Nt) :
t1 = t0 + delay
pos1 = particles_pos[t1,:,:]
pos0 = particles_pos[t0,:,:]
Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1)
# return the matrices of MSD and its variance
return np.mean(Delta,axis=2),np.var(Delta,axis=2)
def dmin_sel (sim,sel1_text,sel2_text,teq,tsample) :
"""
Calculate the minimum distance between the atoms defined in sel1 and the
atoms defined in sel2, as a function of time. Returns a matrix that contains
the minimum distance for each atom defined in sel1. As usual user should
supply equilibration time, sampling time, and contact threshold value.
"""
# define atom selections
sel1 = sim.u.select_atoms (sel1_text)
sel2 = sim.u.select_atoms (sel2_text)
# get number of atoms in selection 1
natoms = sel1.n_atoms
nslice = traj_nslice (sim.u,teq,tsample)
dmin = np.zeros((natoms,nslice))
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (sel1.positions,sel2.positions,
box=ts.dimensions)
dmin[:,i] = d.min(axis=1)
return dmin
def particle_images (sim,frame_id) :
"""
Get the image index of all particles in simulation, at the frame 'frame_id'
"""
# get positions of all particles: define first the atom selection, then jump to
# the user-requested trajectory frame, get the box dimensions (currently works
# only for orthorhombic boxes, then calculate the image indices
atoms = sim.u.select_atoms ('all')
ts = sim.u.trajectory[frame_id]
L = ts.dimensions[:3]
pos = atoms.positions + L/2.
return pos//L
def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
Calculate the matrix that represents the number of times that the tracers
(defined by 'tracer_text') jump from one site to another site of the polymer
(defined by 'polymer_text'). The simulation 'sim' is sampled at 'tsample',
excluding the first 'teq' time frames. Contact between a tracer and the
polymer is defined by the distance being smaller than 'threshold'.
"""
# define polymer and tracers
u = sim.u
polymer = u.select_atoms(polymer_text)
tracers = u.select_atoms(tracer_text)
n_polymer = polymer.n_atoms
n_tracers = tracers.n_atoms
# initialize jumping matrix and first distance matrix d_prev
J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)
ts = u.trajectory [teq]
d_prev = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_prev = d_prev<threshold
for ts in u.trajectory [teq::tsample] :
# get distance matrix at current time step
d_next = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_next = d_next<threshold
# get jumps of all tracers and add it to the jumping matrix
for i in xrange (n_tracers) :
t_prev = D_prev [:,i]
t_next = D_next [:,i].reshape ((n_polymer,1))
t = t_prev * t_next
J += t
D_prev = D_next.copy()
return J
def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
For the simulation 'sim', calculate the matrix of binding events of the
polymer and the tracers. Returns a contacts matrix of the shape
(ntracers,nslice,npolymer).
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
tracers = u.select_atoms (tracer_text)
ntracers = tracers.n_atoms
npolymer = polymer.n_atoms
nslice = mbt.traj_nslice(u,teq,tsample)
C = np.zeros((ntracers,nslice,npolymer),dtype=bool)
for i,ts in enumerate(u.trajectory [teq::tsample]) :
d = distance_array (tracers.positions,polymer.positions,
box=ts.dimensions)
c = d<threshold
C[:,i,:] = c
return C
def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) :
"""
Calculate the matrix of average intra-polymer distances. User must supply
the parameters teq, tsample and threshold.
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
N = polymer.n_atoms
nslice = mbt.traj_nslice (u,teq,tsample)
d = np.zeros((N,N))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
this_d = distance_array(polymer.positions,
polymer.positions,
box=ts.dimensions)
d = mbt.new_average(i,d,this_d)
return d
def DKL_t (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :
# define DKL(t) vector
nframes = traj_nslice(sim.u,teq,tsample)
DKL_t = np.zeros(nframes)
# define polymer and tracers
polymer = sim.u.select_atoms(polymer_text)
tracers = sim.u.select_atoms(tracer_text)
N = polymer.n_atoms
ntracers = tracers.n_atoms
# init H and C vectors
H = np.zeros((N,N))
C = np.zeros((N,ntracers))
# analyze all simulation frames as decided
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
# calculate Hi-C at this time frame
d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)
H += (d<p_threshold)
Rt = H.sum(axis=1)
# calculate ChIP-seq at this time frame
c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)
C += (c<t_threshold)
Ct = C.sum(axis=1)
DKL_t[i] = mbt.KL_divergence(Ct,Rt)
return DKL_t
def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :
"""
This function does the complete analysis of the tracers in the simulation.
It calculates the virtual Hi-C, virtual ChIP-seq, Kullback-Leibler
divergence between the two profiles as a function of time, and coverage of
the tracers.
"""
# define DKL(t) vector
nframes = traj_nslice(sim.u,teq,tsample)
DKL_t = np.zeros(nframes)
# define polymer and tracers
polymer = sim.u.select_atoms(polymer_text)
tracers = sim.u.select_atoms(tracer_text)
N = polymer.n_atoms
ntracers = tracers.n_atoms
# init H and C vectors
H = np.zeros((N,N),dtype=np.int32)
C = np.zeros((N,ntracers),dtype=np.int32)
# analyze all simulation frames as decided
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
# calculate Hi-C at this time frame
d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)
H += (d<p_threshold)
Rt = H.sum(axis=1)
# calculate ChIP-seq at this time frame
c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)
C += (c<t_threshold)
Ct = C.sum(axis=1)
DKL_t[i] = mbt.KL_divergence(Ct,Rt)
# coverage analysis
C[C>1] = 1
coverage = C.sum(axis=0).astype('float')/N
return DKL_t,H,Ct.astype(np.int64),coverage | from MDAnalysis.analysis.distances import distance_array | random_line_split |
wikibrief.go | package wikibrief
import (
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"os"
"sync"
"time"
"github.com/RoaringBitmap/roaring"
"github.com/remeh/sizedwaitgroup"
"github.com/negapedia/wikiassignment"
"github.com/negapedia/wikibots"
"github.com/negapedia/wikidump"
"github.com/negapedia/wikipage"
errorsOnSteroids "github.com/pkg/errors"
)
//New digest the latest wikipedia dump of the specified language into the output channel.
//The revision channel of each page must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
//The ctx and fail together should behave in the same manner as if created with WithFail - https://godoc.org/github.com/ebonetti/ctxutils#WithFail
//The condition restrict restricts the digest to just one dump file, used for testing purposes.
func New(ctx context.Context, fail func(err error) error, tmpDir, lang string, restrict bool) <-chan EvolvingPage {
//Default value to a closed channel
dummyPagesChan := make(chan EvolvingPage)
close(dummyPagesChan)
ID2Bot, err := wikibots.New(ctx, lang)
if err != nil {
fail(err)
return dummyPagesChan
}
latestDump, err := wikidump.Latest(tmpDir, lang, "metahistory7zdump",
"pagetable", "redirecttable", "categorylinkstable", "pagelinkstable")
if err != nil {
fail(err)
return dummyPagesChan
}
article2TopicID, err := getArticle2TopicID(ctx, tmpDir, lang)
if err != nil {
fail(err)
return dummyPagesChan
}
simplePages := make(chan EvolvingPage, pageBufferSize)
go func() {
defer close(simplePages)
//limit the number of workers to prevent system from killing 7zip instances
wg := sizedwaitgroup.New(pageBufferSize)
it := latestDump.Open("metahistory7zdump")
r, err := it(ctx)
if restrict { //Use just one dump file for testing purposes
it = func(_ context.Context) (io.ReadCloser, error) {
return nil, io.EOF
}
}
for ; err == nil; r, err = it(ctx) {
if err = wg.AddWithContext(ctx); err != nil { //AddWithContext fails only if ctx is Done
r.Close()
break
}
go func(r io.ReadCloser) {
defer wg.Done()
defer r.Close()
err := run(ctx, bBase{xml.NewDecoder(r), article2TopicID, ID2Bot, simplePages, &errorContext{"", filename(r)}})
if err != nil {
fail(err)
}
}(r)
}
if err != io.EOF {
fail(err)
}
wg.Wait()
}()
return completeInfo(ctx, fail, lang, simplePages)
}
//EvolvingPage represents a wikipedia page that is being edited. Revisions is closed when there are no more revisions.
//Revision channel must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
type EvolvingPage struct {
PageID uint32
Title, Abstract string
TopicID uint32
Revisions <-chan Revision
}
//Revision represents a revision of a page.
type Revision struct {
ID, UserID uint32
IsBot bool
Text, SHA1 string
IsRevert uint32
Timestamp time.Time
}
//There are 4 buffers in various forms: 4*pageBufferSize is the maximum number of wikipedia pages in memory.
//Each page has a buffer of revisionBufferSize revisions: this means that at each moment there is
//a maximum of 4*pageBufferSize*revisionBufferSize page texts in memory.
const (
pageBufferSize = 40
revisionBufferSize = 300
)
func run(ctx context.Context, base bBase) (err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
b := base.New()
defer b.ClosePage() //Close eventually open revision channel
var t xml.Token
for t, err = base.Decoder.Token(); err == nil; t, err = base.Decoder.Token() {
switch xmlEvent(t) {
case "page start":
b, err = b.NewPage()
case "title start":
b, err = b.SetPageTitle(ctx, t.(xml.StartElement))
case "id start":
b, err = b.SetPageID(ctx, t.(xml.StartElement))
case "revision start":
b, err = b.NewRevision(ctx, t.(xml.StartElement))
case "page end":
b, err = b.ClosePage()
}
if err != nil {
break
}
}
causer, errHasCause := err.(interface{ Cause() error })
switch {
case err == io.EOF:
err = nil
case errHasCause && causer.Cause() != nil:
//do nothing
default:
err = b.Wrapf(err, "Unexpected error in outer XML Decoder event loop")
}
return
}
//AnonimousUserID is the UserID value assumed by revisions done by an anonimous user
const AnonimousUserID uint32 = 0
var errInvalidXML = errors.New("Invalid XML")
type builder interface {
NewPage() (be builder, err error)
SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error)
SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error)
NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error)
ClosePage() (be builder, err error)
Wrapf(err error, format string, args ...interface{}) error
}
/////////////////////////////////////////////////////////////////////////////////////
//bBase is the base state builder
type bBase struct {
Decoder *xml.Decoder
Article2TopicID func(articleID uint32) (topicID uint32, ok bool)
ID2Bot func(userID uint32) (username string, ok bool)
OutStream chan<- EvolvingPage
ErrorContext *errorContext
}
func (bs *bBase) New() builder {
be := bBase(*bs)
return &be
}
func (bs *bBase) NewPage() (be builder, err error) {
be = &bStarted{*bs}
return
}
func (bs *bBase) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"title\")")
return
}
func (bs *bBase) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"id\")")
return
}
func (bs *bBase) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"revision\")")
return
}
func (bs *bBase) ClosePage() (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" start before end)")
return
}
func (bs *bBase) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bStarted is the state of the builder in which a new page start has been found
type bStarted struct {
bBase
}
func (bs *bStarted) NewPage() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bStarted) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
var title string
if err = bs.Decoder.DecodeElement(&title, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding the title of a page")
return
}
bs.ErrorContext.LastTitle = title //used for error reporting purposes
be = &bTitled{
bStarted: *bs,
Title: title,
}
return
}
func (bs *bStarted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) AddRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) ClosePage() (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bTitled is the state of the builder in which has been set a title for the page
type bTitled struct {
bStarted
Title string
}
func (bs *bTitled) Start() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bTitled) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two titles)")
return
}
func (bs *bTitled) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
var pageID uint32
if err = bs.Decoder.DecodeElement(&pageID, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding page ID")
return
}
if topicID, ok := bs.Article2TopicID(pageID); ok {
revisions := make(chan Revision, revisionBufferSize)
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
return
case bs.OutStream <- EvolvingPage{pageID, bs.Title, "", topicID, revisions}: //Use empty abstract, later filled by completeInfo
be = &bSetted{
bTitled: *bs,
Revisions: revisions,
SHA12SerialID: map[string]uint32{},
}
return
}
}
if err = bs.Decoder.Skip(); err != nil {
err = bs.Wrapf(err, "Error while skipping page")
return
}
be = bs.New()
return
}
func (bs *bTitled) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page revision without finding previous page ID)")
return
}
func (bs *bTitled) ClosePage() (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page end without finding previous page ID)")
return
}
func (bs *bTitled) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bSetted is the state of the builder in which has been set a page ID for the page
type bSetted struct {
bTitled
Revisions chan Revision
RevisionCount uint32
SHA12SerialID map[string]uint32
}
func (bs *bSetted) NewPage() (be builder, err error) { //no page nesting
close(bs.Revisions)
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bSetted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
close(bs.Revisions)
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two ids)")
return
}
func (bs *bSetted) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
defer func() {
if err != nil {
close(bs.Revisions)
}
}()
//parse revision
var r revision
if err = bs.Decoder.DecodeElement(&r, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding the %vth revision", bs.RevisionCount+1)
return
}
//Calculate reverts
serialID, IsRevert := bs.RevisionCount, uint32(0)
oldSerialID, isRevert := bs.SHA12SerialID[r.SHA1]
switch {
case isRevert:
IsRevert = serialID - (oldSerialID + 1)
fallthrough
case len(r.SHA1) == 31:
bs.SHA12SerialID[r.SHA1] = serialID
}
//convert time
const layout = "2006-01-02T15:04:05Z"
timestamp, err := time.Parse(layout, r.Timestamp)
if err != nil {
err = bs.Wrapf(err, "Error while decoding the timestamp %s of %vth revision", r.Timestamp, bs.RevisionCount+1)
return
}
r.Timestamp = ""
//Check if userID represents bot
_, isBot := bs.ID2Bot(r.UserID)
bs.RevisionCount++
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
case bs.Revisions <- Revision{r.ID, r.UserID, isBot, r.Text, r.SHA1, IsRevert, timestamp}:
be = bs
}
return
}
func (bs *bSetted) ClosePage() (be builder, err error) {
close(bs.Revisions)
be = bs.New()
return
}
// A page revision.
type revision struct {
ID uint32 `xml:"id"`
Timestamp string `xml:"timestamp"`
UserID uint32 `xml:"contributor>id"`
Text string `xml:"text"`
SHA1 string `xml:"sha1"`
//converted data
timestamp time.Time
}
func xmlEvent(t xml.Token) string {
switch elem := t.(type) {
case xml.StartElement:
return elem.Name.Local + " start"
case xml.EndElement:
return elem.Name.Local + " end"
default:
return ""
}
}
type errorContext struct {
LastTitle string //used for error reporting purposes
Filename string //used for error reporting purposes
}
func (ec errorContext) String() string {
report := fmt.Sprintf("last title %v in \"%s\"", ec.LastTitle, ec.Filename)
if _, err := os.Stat(ec.Filename); os.IsNotExist(err) {
report += " - WARNING: file not found!"
}
return report
}
func filename(r io.Reader) (filename string) {
if namer, ok := r.(interface{ Name() string }); ok {
filename = namer.Name()
}
return
}
func getArticle2TopicID(ctx context.Context, tmpDir, lang string) (article2TopicID func(uint32) (uint32, bool), err error) {
article2Topic, namespaces, err := wikiassignment.From(ctx, tmpDir, lang)
if err != nil {
return
}
//Filter out non articles
articlesIDS := roaring.BitmapOf(namespaces.Articles...)
for pageID := range article2Topic {
if !articlesIDS.Contains(pageID) {
delete(article2Topic, pageID)
}
}
return func(articleID uint32) (topicID uint32, ok bool) {
topicID, ok = article2Topic[articleID]
return
}, nil
}
func completeInfo(ctx context.Context, fail func(err error) error, lang string, pages <-chan EvolvingPage) <-chan EvolvingPage {
results := make(chan EvolvingPage, pageBufferSize)
go func() {
defer close(results)
wikiPage := wikipage.New(lang)
wg := sync.WaitGroup{}
for i := 0; i < pageBufferSize; i++ {
wg.Add(1)
go func() {
defer wg.Done()
loop:
for p := range pages {
timeoutCtx, cancel := context.WithTimeout(ctx, 6*time.Hour)
wp, err := wikiPage.From(timeoutCtx, p.Title) //bottle neck: query to wikipedia api for each page
cancel()
switch {
case err != nil: //Querying the summary returns an error, so the article should be filtered
fallthrough
case p.PageID != wp.ID: //It's a redirect, so it should be filtered
emptyRevisions(p.Revisions, &wg)
continue loop
}
p.Abstract = wp.Abstract
select {
case results <- p:
//proceed
case <-ctx.Done():
return
}
}
}()
}
wg.Wait()
}()
return results
}
//Empty concurrently revision channel: wait goroutine so that if some error arises is caught by fail
func emptyRevisions(revisions <-chan Revision, wg *sync.WaitGroup) | {
wg.Add(1)
go func() {
defer wg.Done()
for range revisions {
//skip
}
}()
} | identifier_body | |
wikibrief.go | package wikibrief
import (
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"os"
"sync"
"time"
"github.com/RoaringBitmap/roaring"
"github.com/remeh/sizedwaitgroup"
"github.com/negapedia/wikiassignment"
"github.com/negapedia/wikibots"
"github.com/negapedia/wikidump"
"github.com/negapedia/wikipage"
errorsOnSteroids "github.com/pkg/errors"
)
//New digest the latest wikipedia dump of the specified language into the output channel.
//The revision channel of each page must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
//The ctx and fail together should behave in the same manner as if created with WithFail - https://godoc.org/github.com/ebonetti/ctxutils#WithFail
//The condition restrict restricts the digest to just one dump file, used for testing purposes.
func New(ctx context.Context, fail func(err error) error, tmpDir, lang string, restrict bool) <-chan EvolvingPage {
//Default value to a closed channel
dummyPagesChan := make(chan EvolvingPage)
close(dummyPagesChan)
ID2Bot, err := wikibots.New(ctx, lang)
if err != nil {
fail(err)
return dummyPagesChan
}
latestDump, err := wikidump.Latest(tmpDir, lang, "metahistory7zdump",
"pagetable", "redirecttable", "categorylinkstable", "pagelinkstable")
if err != nil {
fail(err)
return dummyPagesChan
}
article2TopicID, err := getArticle2TopicID(ctx, tmpDir, lang)
if err != nil {
fail(err)
return dummyPagesChan
}
simplePages := make(chan EvolvingPage, pageBufferSize)
go func() {
defer close(simplePages)
//limit the number of workers to prevent system from killing 7zip instances
wg := sizedwaitgroup.New(pageBufferSize)
it := latestDump.Open("metahistory7zdump")
r, err := it(ctx)
if restrict { //Use just one dump file for testing purposes
it = func(_ context.Context) (io.ReadCloser, error) {
return nil, io.EOF
}
}
for ; err == nil; r, err = it(ctx) {
if err = wg.AddWithContext(ctx); err != nil { //AddWithContext fails only if ctx is Done
r.Close()
break
}
go func(r io.ReadCloser) {
defer wg.Done()
defer r.Close()
err := run(ctx, bBase{xml.NewDecoder(r), article2TopicID, ID2Bot, simplePages, &errorContext{"", filename(r)}})
if err != nil {
fail(err)
}
}(r)
}
if err != io.EOF {
fail(err)
}
wg.Wait()
}()
return completeInfo(ctx, fail, lang, simplePages)
}
//EvolvingPage represents a wikipedia page that is being edited. Revisions is closed when there are no more revisions.
//Revision channel must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
type EvolvingPage struct {
PageID uint32
Title, Abstract string
TopicID uint32
Revisions <-chan Revision
}
//Revision represents a revision of a page.
type Revision struct {
ID, UserID uint32
IsBot bool |
//There are 4 buffers in various forms: 4*pageBufferSize is the maximum number of wikipedia pages in memory.
//Each page has a buffer of revisionBufferSize revisions: this means that at each moment there is
//a maximum of 4*pageBufferSize*revisionBufferSize page texts in memory.
const (
pageBufferSize = 40
revisionBufferSize = 300
)
func run(ctx context.Context, base bBase) (err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
b := base.New()
defer b.ClosePage() //Close eventually open revision channel
var t xml.Token
for t, err = base.Decoder.Token(); err == nil; t, err = base.Decoder.Token() {
switch xmlEvent(t) {
case "page start":
b, err = b.NewPage()
case "title start":
b, err = b.SetPageTitle(ctx, t.(xml.StartElement))
case "id start":
b, err = b.SetPageID(ctx, t.(xml.StartElement))
case "revision start":
b, err = b.NewRevision(ctx, t.(xml.StartElement))
case "page end":
b, err = b.ClosePage()
}
if err != nil {
break
}
}
causer, errHasCause := err.(interface{ Cause() error })
switch {
case err == io.EOF:
err = nil
case errHasCause && causer.Cause() != nil:
//do nothing
default:
err = b.Wrapf(err, "Unexpected error in outer XML Decoder event loop")
}
return
}
//AnonimousUserID is the UserID value assumed by revisions done by an anonimous user
const AnonimousUserID uint32 = 0
var errInvalidXML = errors.New("Invalid XML")
type builder interface {
NewPage() (be builder, err error)
SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error)
SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error)
NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error)
ClosePage() (be builder, err error)
Wrapf(err error, format string, args ...interface{}) error
}
/////////////////////////////////////////////////////////////////////////////////////
//bBase is the base state builder
type bBase struct {
Decoder *xml.Decoder
Article2TopicID func(articleID uint32) (topicID uint32, ok bool)
ID2Bot func(userID uint32) (username string, ok bool)
OutStream chan<- EvolvingPage
ErrorContext *errorContext
}
func (bs *bBase) New() builder {
be := bBase(*bs)
return &be
}
func (bs *bBase) NewPage() (be builder, err error) {
be = &bStarted{*bs}
return
}
func (bs *bBase) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"title\")")
return
}
func (bs *bBase) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"id\")")
return
}
func (bs *bBase) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"revision\")")
return
}
func (bs *bBase) ClosePage() (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" start before end)")
return
}
func (bs *bBase) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bStarted is the state of the builder in which a new page start has been found
type bStarted struct {
bBase
}
func (bs *bStarted) NewPage() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bStarted) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
var title string
if err = bs.Decoder.DecodeElement(&title, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding the title of a page")
return
}
bs.ErrorContext.LastTitle = title //used for error reporting purposes
be = &bTitled{
bStarted: *bs,
Title: title,
}
return
}
func (bs *bStarted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) AddRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) ClosePage() (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bTitled is the state of the builder in which has been set a title for the page
type bTitled struct {
bStarted
Title string
}
func (bs *bTitled) Start() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bTitled) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two titles)")
return
}
func (bs *bTitled) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
var pageID uint32
if err = bs.Decoder.DecodeElement(&pageID, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding page ID")
return
}
if topicID, ok := bs.Article2TopicID(pageID); ok {
revisions := make(chan Revision, revisionBufferSize)
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
return
case bs.OutStream <- EvolvingPage{pageID, bs.Title, "", topicID, revisions}: //Use empty abstract, later filled by completeInfo
be = &bSetted{
bTitled: *bs,
Revisions: revisions,
SHA12SerialID: map[string]uint32{},
}
return
}
}
if err = bs.Decoder.Skip(); err != nil {
err = bs.Wrapf(err, "Error while skipping page")
return
}
be = bs.New()
return
}
func (bs *bTitled) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page revision without finding previous page ID)")
return
}
func (bs *bTitled) ClosePage() (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page end without finding previous page ID)")
return
}
func (bs *bTitled) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bSetted is the state of the builder in which has been set a page ID for the page
type bSetted struct {
bTitled
Revisions chan Revision
RevisionCount uint32
SHA12SerialID map[string]uint32
}
func (bs *bSetted) NewPage() (be builder, err error) { //no page nesting
close(bs.Revisions)
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bSetted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
close(bs.Revisions)
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two ids)")
return
}
func (bs *bSetted) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
defer func() {
if err != nil {
close(bs.Revisions)
}
}()
//parse revision
var r revision
if err = bs.Decoder.DecodeElement(&r, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding the %vth revision", bs.RevisionCount+1)
return
}
//Calculate reverts
serialID, IsRevert := bs.RevisionCount, uint32(0)
oldSerialID, isRevert := bs.SHA12SerialID[r.SHA1]
switch {
case isRevert:
IsRevert = serialID - (oldSerialID + 1)
fallthrough
case len(r.SHA1) == 31:
bs.SHA12SerialID[r.SHA1] = serialID
}
//convert time
const layout = "2006-01-02T15:04:05Z"
timestamp, err := time.Parse(layout, r.Timestamp)
if err != nil {
err = bs.Wrapf(err, "Error while decoding the timestamp %s of %vth revision", r.Timestamp, bs.RevisionCount+1)
return
}
r.Timestamp = ""
//Check if userID represents bot
_, isBot := bs.ID2Bot(r.UserID)
bs.RevisionCount++
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
case bs.Revisions <- Revision{r.ID, r.UserID, isBot, r.Text, r.SHA1, IsRevert, timestamp}:
be = bs
}
return
}
func (bs *bSetted) ClosePage() (be builder, err error) {
close(bs.Revisions)
be = bs.New()
return
}
// A page revision.
type revision struct {
ID uint32 `xml:"id"`
Timestamp string `xml:"timestamp"`
UserID uint32 `xml:"contributor>id"`
Text string `xml:"text"`
SHA1 string `xml:"sha1"`
//converted data
timestamp time.Time
}
func xmlEvent(t xml.Token) string {
switch elem := t.(type) {
case xml.StartElement:
return elem.Name.Local + " start"
case xml.EndElement:
return elem.Name.Local + " end"
default:
return ""
}
}
type errorContext struct {
LastTitle string //used for error reporting purposes
Filename string //used for error reporting purposes
}
func (ec errorContext) String() string {
report := fmt.Sprintf("last title %v in \"%s\"", ec.LastTitle, ec.Filename)
if _, err := os.Stat(ec.Filename); os.IsNotExist(err) {
report += " - WARNING: file not found!"
}
return report
}
func filename(r io.Reader) (filename string) {
if namer, ok := r.(interface{ Name() string }); ok {
filename = namer.Name()
}
return
}
func getArticle2TopicID(ctx context.Context, tmpDir, lang string) (article2TopicID func(uint32) (uint32, bool), err error) {
article2Topic, namespaces, err := wikiassignment.From(ctx, tmpDir, lang)
if err != nil {
return
}
//Filter out non articles
articlesIDS := roaring.BitmapOf(namespaces.Articles...)
for pageID := range article2Topic {
if !articlesIDS.Contains(pageID) {
delete(article2Topic, pageID)
}
}
return func(articleID uint32) (topicID uint32, ok bool) {
topicID, ok = article2Topic[articleID]
return
}, nil
}
func completeInfo(ctx context.Context, fail func(err error) error, lang string, pages <-chan EvolvingPage) <-chan EvolvingPage {
results := make(chan EvolvingPage, pageBufferSize)
go func() {
defer close(results)
wikiPage := wikipage.New(lang)
wg := sync.WaitGroup{}
for i := 0; i < pageBufferSize; i++ {
wg.Add(1)
go func() {
defer wg.Done()
loop:
for p := range pages {
timeoutCtx, cancel := context.WithTimeout(ctx, 6*time.Hour)
wp, err := wikiPage.From(timeoutCtx, p.Title) //bottle neck: query to wikipedia api for each page
cancel()
switch {
case err != nil: //Querying the summary returns an error, so the article should be filtered
fallthrough
case p.PageID != wp.ID: //It's a redirect, so it should be filtered
emptyRevisions(p.Revisions, &wg)
continue loop
}
p.Abstract = wp.Abstract
select {
case results <- p:
//proceed
case <-ctx.Done():
return
}
}
}()
}
wg.Wait()
}()
return results
}
//Empty concurrently revision channel: wait goroutine so that if some error arises is caught by fail
func emptyRevisions(revisions <-chan Revision, wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
for range revisions {
//skip
}
}()
} | Text, SHA1 string
IsRevert uint32
Timestamp time.Time
} | random_line_split |
wikibrief.go | package wikibrief
import (
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"os"
"sync"
"time"
"github.com/RoaringBitmap/roaring"
"github.com/remeh/sizedwaitgroup"
"github.com/negapedia/wikiassignment"
"github.com/negapedia/wikibots"
"github.com/negapedia/wikidump"
"github.com/negapedia/wikipage"
errorsOnSteroids "github.com/pkg/errors"
)
//New digest the latest wikipedia dump of the specified language into the output channel.
//The revision channel of each page must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
//The ctx and fail together should behave in the same manner as if created with WithFail - https://godoc.org/github.com/ebonetti/ctxutils#WithFail
//The condition restrict restricts the digest to just one dump file, used for testing purposes.
func New(ctx context.Context, fail func(err error) error, tmpDir, lang string, restrict bool) <-chan EvolvingPage {
//Default value to a closed channel
dummyPagesChan := make(chan EvolvingPage)
close(dummyPagesChan)
ID2Bot, err := wikibots.New(ctx, lang)
if err != nil {
fail(err)
return dummyPagesChan
}
latestDump, err := wikidump.Latest(tmpDir, lang, "metahistory7zdump",
"pagetable", "redirecttable", "categorylinkstable", "pagelinkstable")
if err != nil {
fail(err)
return dummyPagesChan
}
article2TopicID, err := getArticle2TopicID(ctx, tmpDir, lang)
if err != nil {
fail(err)
return dummyPagesChan
}
simplePages := make(chan EvolvingPage, pageBufferSize)
go func() {
defer close(simplePages)
//limit the number of workers to prevent system from killing 7zip instances
wg := sizedwaitgroup.New(pageBufferSize)
it := latestDump.Open("metahistory7zdump")
r, err := it(ctx)
if restrict { //Use just one dump file for testing purposes
it = func(_ context.Context) (io.ReadCloser, error) {
return nil, io.EOF
}
}
for ; err == nil; r, err = it(ctx) {
if err = wg.AddWithContext(ctx); err != nil { //AddWithContext fails only if ctx is Done
r.Close()
break
}
go func(r io.ReadCloser) {
defer wg.Done()
defer r.Close()
err := run(ctx, bBase{xml.NewDecoder(r), article2TopicID, ID2Bot, simplePages, &errorContext{"", filename(r)}})
if err != nil {
fail(err)
}
}(r)
}
if err != io.EOF {
fail(err)
}
wg.Wait()
}()
return completeInfo(ctx, fail, lang, simplePages)
}
//EvolvingPage represents a wikipedia page that is being edited. Revisions is closed when there are no more revisions.
//Revision channel must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
type EvolvingPage struct {
PageID uint32
Title, Abstract string
TopicID uint32
Revisions <-chan Revision
}
//Revision represents a revision of a page.
type Revision struct {
ID, UserID uint32
IsBot bool
Text, SHA1 string
IsRevert uint32
Timestamp time.Time
}
//There are 4 buffers in various forms: 4*pageBufferSize is the maximum number of wikipedia pages in memory.
//Each page has a buffer of revisionBufferSize revisions: this means that at each moment there is
//a maximum of 4*pageBufferSize*revisionBufferSize page texts in memory.
const (
pageBufferSize = 40
revisionBufferSize = 300
)
func run(ctx context.Context, base bBase) (err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
b := base.New()
defer b.ClosePage() //Close eventually open revision channel
var t xml.Token
for t, err = base.Decoder.Token(); err == nil; t, err = base.Decoder.Token() {
switch xmlEvent(t) {
case "page start":
b, err = b.NewPage()
case "title start":
b, err = b.SetPageTitle(ctx, t.(xml.StartElement))
case "id start":
b, err = b.SetPageID(ctx, t.(xml.StartElement))
case "revision start":
b, err = b.NewRevision(ctx, t.(xml.StartElement))
case "page end":
b, err = b.ClosePage()
}
if err != nil {
break
}
}
causer, errHasCause := err.(interface{ Cause() error })
switch {
case err == io.EOF:
err = nil
case errHasCause && causer.Cause() != nil:
//do nothing
default:
err = b.Wrapf(err, "Unexpected error in outer XML Decoder event loop")
}
return
}
//AnonimousUserID is the UserID value assumed by revisions done by an anonimous user
const AnonimousUserID uint32 = 0
var errInvalidXML = errors.New("Invalid XML")
type builder interface {
NewPage() (be builder, err error)
SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error)
SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error)
NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error)
ClosePage() (be builder, err error)
Wrapf(err error, format string, args ...interface{}) error
}
/////////////////////////////////////////////////////////////////////////////////////
//bBase is the base state builder
type bBase struct {
Decoder *xml.Decoder
Article2TopicID func(articleID uint32) (topicID uint32, ok bool)
ID2Bot func(userID uint32) (username string, ok bool)
OutStream chan<- EvolvingPage
ErrorContext *errorContext
}
func (bs *bBase) New() builder {
be := bBase(*bs)
return &be
}
func (bs *bBase) NewPage() (be builder, err error) {
be = &bStarted{*bs}
return
}
func (bs *bBase) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"title\")")
return
}
func (bs *bBase) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"id\")")
return
}
func (bs *bBase) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"revision\")")
return
}
func (bs *bBase) ClosePage() (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" start before end)")
return
}
func (bs *bBase) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bStarted is the state of the builder in which a new page start has been found
type bStarted struct {
bBase
}
func (bs *bStarted) NewPage() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bStarted) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
var title string
if err = bs.Decoder.DecodeElement(&title, &t); err != nil |
bs.ErrorContext.LastTitle = title //used for error reporting purposes
be = &bTitled{
bStarted: *bs,
Title: title,
}
return
}
func (bs *bStarted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) AddRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) ClosePage() (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bTitled is the state of the builder in which has been set a title for the page
type bTitled struct {
bStarted
Title string
}
func (bs *bTitled) Start() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bTitled) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two titles)")
return
}
func (bs *bTitled) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
var pageID uint32
if err = bs.Decoder.DecodeElement(&pageID, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding page ID")
return
}
if topicID, ok := bs.Article2TopicID(pageID); ok {
revisions := make(chan Revision, revisionBufferSize)
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
return
case bs.OutStream <- EvolvingPage{pageID, bs.Title, "", topicID, revisions}: //Use empty abstract, later filled by completeInfo
be = &bSetted{
bTitled: *bs,
Revisions: revisions,
SHA12SerialID: map[string]uint32{},
}
return
}
}
if err = bs.Decoder.Skip(); err != nil {
err = bs.Wrapf(err, "Error while skipping page")
return
}
be = bs.New()
return
}
func (bs *bTitled) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page revision without finding previous page ID)")
return
}
func (bs *bTitled) ClosePage() (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page end without finding previous page ID)")
return
}
func (bs *bTitled) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bSetted is the state of the builder in which has been set a page ID for the page
type bSetted struct {
bTitled
Revisions chan Revision
RevisionCount uint32
SHA12SerialID map[string]uint32
}
func (bs *bSetted) NewPage() (be builder, err error) { //no page nesting
close(bs.Revisions)
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bSetted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
close(bs.Revisions)
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two ids)")
return
}
func (bs *bSetted) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
defer func() {
if err != nil {
close(bs.Revisions)
}
}()
//parse revision
var r revision
if err = bs.Decoder.DecodeElement(&r, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding the %vth revision", bs.RevisionCount+1)
return
}
//Calculate reverts
serialID, IsRevert := bs.RevisionCount, uint32(0)
oldSerialID, isRevert := bs.SHA12SerialID[r.SHA1]
switch {
case isRevert:
IsRevert = serialID - (oldSerialID + 1)
fallthrough
case len(r.SHA1) == 31:
bs.SHA12SerialID[r.SHA1] = serialID
}
//convert time
const layout = "2006-01-02T15:04:05Z"
timestamp, err := time.Parse(layout, r.Timestamp)
if err != nil {
err = bs.Wrapf(err, "Error while decoding the timestamp %s of %vth revision", r.Timestamp, bs.RevisionCount+1)
return
}
r.Timestamp = ""
//Check if userID represents bot
_, isBot := bs.ID2Bot(r.UserID)
bs.RevisionCount++
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
case bs.Revisions <- Revision{r.ID, r.UserID, isBot, r.Text, r.SHA1, IsRevert, timestamp}:
be = bs
}
return
}
func (bs *bSetted) ClosePage() (be builder, err error) {
close(bs.Revisions)
be = bs.New()
return
}
// A page revision.
type revision struct {
ID uint32 `xml:"id"`
Timestamp string `xml:"timestamp"`
UserID uint32 `xml:"contributor>id"`
Text string `xml:"text"`
SHA1 string `xml:"sha1"`
//converted data
timestamp time.Time
}
func xmlEvent(t xml.Token) string {
switch elem := t.(type) {
case xml.StartElement:
return elem.Name.Local + " start"
case xml.EndElement:
return elem.Name.Local + " end"
default:
return ""
}
}
type errorContext struct {
LastTitle string //used for error reporting purposes
Filename string //used for error reporting purposes
}
func (ec errorContext) String() string {
report := fmt.Sprintf("last title %v in \"%s\"", ec.LastTitle, ec.Filename)
if _, err := os.Stat(ec.Filename); os.IsNotExist(err) {
report += " - WARNING: file not found!"
}
return report
}
func filename(r io.Reader) (filename string) {
if namer, ok := r.(interface{ Name() string }); ok {
filename = namer.Name()
}
return
}
func getArticle2TopicID(ctx context.Context, tmpDir, lang string) (article2TopicID func(uint32) (uint32, bool), err error) {
article2Topic, namespaces, err := wikiassignment.From(ctx, tmpDir, lang)
if err != nil {
return
}
//Filter out non articles
articlesIDS := roaring.BitmapOf(namespaces.Articles...)
for pageID := range article2Topic {
if !articlesIDS.Contains(pageID) {
delete(article2Topic, pageID)
}
}
return func(articleID uint32) (topicID uint32, ok bool) {
topicID, ok = article2Topic[articleID]
return
}, nil
}
func completeInfo(ctx context.Context, fail func(err error) error, lang string, pages <-chan EvolvingPage) <-chan EvolvingPage {
results := make(chan EvolvingPage, pageBufferSize)
go func() {
defer close(results)
wikiPage := wikipage.New(lang)
wg := sync.WaitGroup{}
for i := 0; i < pageBufferSize; i++ {
wg.Add(1)
go func() {
defer wg.Done()
loop:
for p := range pages {
timeoutCtx, cancel := context.WithTimeout(ctx, 6*time.Hour)
wp, err := wikiPage.From(timeoutCtx, p.Title) //bottle neck: query to wikipedia api for each page
cancel()
switch {
case err != nil: //Querying the summary returns an error, so the article should be filtered
fallthrough
case p.PageID != wp.ID: //It's a redirect, so it should be filtered
emptyRevisions(p.Revisions, &wg)
continue loop
}
p.Abstract = wp.Abstract
select {
case results <- p:
//proceed
case <-ctx.Done():
return
}
}
}()
}
wg.Wait()
}()
return results
}
//Empty concurrently revision channel: wait goroutine so that if some error arises is caught by fail
func emptyRevisions(revisions <-chan Revision, wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
for range revisions {
//skip
}
}()
}
| {
err = bs.Wrapf(err, "Error while decoding the title of a page")
return
} | conditional_block |
wikibrief.go | package wikibrief
import (
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"os"
"sync"
"time"
"github.com/RoaringBitmap/roaring"
"github.com/remeh/sizedwaitgroup"
"github.com/negapedia/wikiassignment"
"github.com/negapedia/wikibots"
"github.com/negapedia/wikidump"
"github.com/negapedia/wikipage"
errorsOnSteroids "github.com/pkg/errors"
)
//New digest the latest wikipedia dump of the specified language into the output channel.
//The revision channel of each page must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
//The ctx and fail together should behave in the same manner as if created with WithFail - https://godoc.org/github.com/ebonetti/ctxutils#WithFail
//The condition restrict restricts the digest to just one dump file, used for testing purposes.
func | (ctx context.Context, fail func(err error) error, tmpDir, lang string, restrict bool) <-chan EvolvingPage {
//Default value to a closed channel
dummyPagesChan := make(chan EvolvingPage)
close(dummyPagesChan)
ID2Bot, err := wikibots.New(ctx, lang)
if err != nil {
fail(err)
return dummyPagesChan
}
latestDump, err := wikidump.Latest(tmpDir, lang, "metahistory7zdump",
"pagetable", "redirecttable", "categorylinkstable", "pagelinkstable")
if err != nil {
fail(err)
return dummyPagesChan
}
article2TopicID, err := getArticle2TopicID(ctx, tmpDir, lang)
if err != nil {
fail(err)
return dummyPagesChan
}
simplePages := make(chan EvolvingPage, pageBufferSize)
go func() {
defer close(simplePages)
//limit the number of workers to prevent system from killing 7zip instances
wg := sizedwaitgroup.New(pageBufferSize)
it := latestDump.Open("metahistory7zdump")
r, err := it(ctx)
if restrict { //Use just one dump file for testing purposes
it = func(_ context.Context) (io.ReadCloser, error) {
return nil, io.EOF
}
}
for ; err == nil; r, err = it(ctx) {
if err = wg.AddWithContext(ctx); err != nil { //AddWithContext fails only if ctx is Done
r.Close()
break
}
go func(r io.ReadCloser) {
defer wg.Done()
defer r.Close()
err := run(ctx, bBase{xml.NewDecoder(r), article2TopicID, ID2Bot, simplePages, &errorContext{"", filename(r)}})
if err != nil {
fail(err)
}
}(r)
}
if err != io.EOF {
fail(err)
}
wg.Wait()
}()
return completeInfo(ctx, fail, lang, simplePages)
}
//EvolvingPage represents a wikipedia page that is being edited. Revisions is closed when there are no more revisions.
//Revision channel must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
type EvolvingPage struct {
PageID uint32
Title, Abstract string
TopicID uint32
Revisions <-chan Revision
}
//Revision represents a revision of a page.
type Revision struct {
ID, UserID uint32
IsBot bool
Text, SHA1 string
IsRevert uint32
Timestamp time.Time
}
//There are 4 buffers in various forms: 4*pageBufferSize is the maximum number of wikipedia pages in memory.
//Each page has a buffer of revisionBufferSize revisions: this means that at each moment there is
//a maximum of 4*pageBufferSize*revisionBufferSize page texts in memory.
const (
pageBufferSize = 40
revisionBufferSize = 300
)
func run(ctx context.Context, base bBase) (err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
b := base.New()
defer b.ClosePage() //Close eventually open revision channel
var t xml.Token
for t, err = base.Decoder.Token(); err == nil; t, err = base.Decoder.Token() {
switch xmlEvent(t) {
case "page start":
b, err = b.NewPage()
case "title start":
b, err = b.SetPageTitle(ctx, t.(xml.StartElement))
case "id start":
b, err = b.SetPageID(ctx, t.(xml.StartElement))
case "revision start":
b, err = b.NewRevision(ctx, t.(xml.StartElement))
case "page end":
b, err = b.ClosePage()
}
if err != nil {
break
}
}
causer, errHasCause := err.(interface{ Cause() error })
switch {
case err == io.EOF:
err = nil
case errHasCause && causer.Cause() != nil:
//do nothing
default:
err = b.Wrapf(err, "Unexpected error in outer XML Decoder event loop")
}
return
}
//AnonimousUserID is the UserID value assumed by revisions done by an anonimous user
const AnonimousUserID uint32 = 0
var errInvalidXML = errors.New("Invalid XML")
type builder interface {
NewPage() (be builder, err error)
SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error)
SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error)
NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error)
ClosePage() (be builder, err error)
Wrapf(err error, format string, args ...interface{}) error
}
/////////////////////////////////////////////////////////////////////////////////////
//bBase is the base state builder
type bBase struct {
Decoder *xml.Decoder
Article2TopicID func(articleID uint32) (topicID uint32, ok bool)
ID2Bot func(userID uint32) (username string, ok bool)
OutStream chan<- EvolvingPage
ErrorContext *errorContext
}
func (bs *bBase) New() builder {
be := bBase(*bs)
return &be
}
func (bs *bBase) NewPage() (be builder, err error) {
be = &bStarted{*bs}
return
}
func (bs *bBase) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"title\")")
return
}
func (bs *bBase) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"id\")")
return
}
func (bs *bBase) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"revision\")")
return
}
func (bs *bBase) ClosePage() (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" start before end)")
return
}
func (bs *bBase) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bStarted is the state of the builder in which a new page start has been found
type bStarted struct {
bBase
}
func (bs *bStarted) NewPage() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bStarted) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
var title string
if err = bs.Decoder.DecodeElement(&title, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding the title of a page")
return
}
bs.ErrorContext.LastTitle = title //used for error reporting purposes
be = &bTitled{
bStarted: *bs,
Title: title,
}
return
}
func (bs *bStarted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) AddRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) ClosePage() (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bTitled is the state of the builder in which has been set a title for the page
type bTitled struct {
bStarted
Title string
}
func (bs *bTitled) Start() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bTitled) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two titles)")
return
}
func (bs *bTitled) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
var pageID uint32
if err = bs.Decoder.DecodeElement(&pageID, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding page ID")
return
}
if topicID, ok := bs.Article2TopicID(pageID); ok {
revisions := make(chan Revision, revisionBufferSize)
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
return
case bs.OutStream <- EvolvingPage{pageID, bs.Title, "", topicID, revisions}: //Use empty abstract, later filled by completeInfo
be = &bSetted{
bTitled: *bs,
Revisions: revisions,
SHA12SerialID: map[string]uint32{},
}
return
}
}
if err = bs.Decoder.Skip(); err != nil {
err = bs.Wrapf(err, "Error while skipping page")
return
}
be = bs.New()
return
}
func (bs *bTitled) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page revision without finding previous page ID)")
return
}
func (bs *bTitled) ClosePage() (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page end without finding previous page ID)")
return
}
func (bs *bTitled) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bSetted is the state of the builder in which has been set a page ID for the page
type bSetted struct {
bTitled
Revisions chan Revision
RevisionCount uint32
SHA12SerialID map[string]uint32
}
func (bs *bSetted) NewPage() (be builder, err error) { //no page nesting
close(bs.Revisions)
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bSetted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
close(bs.Revisions)
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two ids)")
return
}
func (bs *bSetted) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
defer func() {
if err != nil {
close(bs.Revisions)
}
}()
//parse revision
var r revision
if err = bs.Decoder.DecodeElement(&r, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding the %vth revision", bs.RevisionCount+1)
return
}
//Calculate reverts
serialID, IsRevert := bs.RevisionCount, uint32(0)
oldSerialID, isRevert := bs.SHA12SerialID[r.SHA1]
switch {
case isRevert:
IsRevert = serialID - (oldSerialID + 1)
fallthrough
case len(r.SHA1) == 31:
bs.SHA12SerialID[r.SHA1] = serialID
}
//convert time
const layout = "2006-01-02T15:04:05Z"
timestamp, err := time.Parse(layout, r.Timestamp)
if err != nil {
err = bs.Wrapf(err, "Error while decoding the timestamp %s of %vth revision", r.Timestamp, bs.RevisionCount+1)
return
}
r.Timestamp = ""
//Check if userID represents bot
_, isBot := bs.ID2Bot(r.UserID)
bs.RevisionCount++
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
case bs.Revisions <- Revision{r.ID, r.UserID, isBot, r.Text, r.SHA1, IsRevert, timestamp}:
be = bs
}
return
}
func (bs *bSetted) ClosePage() (be builder, err error) {
close(bs.Revisions)
be = bs.New()
return
}
// A page revision.
type revision struct {
ID uint32 `xml:"id"`
Timestamp string `xml:"timestamp"`
UserID uint32 `xml:"contributor>id"`
Text string `xml:"text"`
SHA1 string `xml:"sha1"`
//converted data
timestamp time.Time
}
func xmlEvent(t xml.Token) string {
switch elem := t.(type) {
case xml.StartElement:
return elem.Name.Local + " start"
case xml.EndElement:
return elem.Name.Local + " end"
default:
return ""
}
}
type errorContext struct {
LastTitle string //used for error reporting purposes
Filename string //used for error reporting purposes
}
func (ec errorContext) String() string {
report := fmt.Sprintf("last title %v in \"%s\"", ec.LastTitle, ec.Filename)
if _, err := os.Stat(ec.Filename); os.IsNotExist(err) {
report += " - WARNING: file not found!"
}
return report
}
func filename(r io.Reader) (filename string) {
if namer, ok := r.(interface{ Name() string }); ok {
filename = namer.Name()
}
return
}
func getArticle2TopicID(ctx context.Context, tmpDir, lang string) (article2TopicID func(uint32) (uint32, bool), err error) {
article2Topic, namespaces, err := wikiassignment.From(ctx, tmpDir, lang)
if err != nil {
return
}
//Filter out non articles
articlesIDS := roaring.BitmapOf(namespaces.Articles...)
for pageID := range article2Topic {
if !articlesIDS.Contains(pageID) {
delete(article2Topic, pageID)
}
}
return func(articleID uint32) (topicID uint32, ok bool) {
topicID, ok = article2Topic[articleID]
return
}, nil
}
func completeInfo(ctx context.Context, fail func(err error) error, lang string, pages <-chan EvolvingPage) <-chan EvolvingPage {
results := make(chan EvolvingPage, pageBufferSize)
go func() {
defer close(results)
wikiPage := wikipage.New(lang)
wg := sync.WaitGroup{}
for i := 0; i < pageBufferSize; i++ {
wg.Add(1)
go func() {
defer wg.Done()
loop:
for p := range pages {
timeoutCtx, cancel := context.WithTimeout(ctx, 6*time.Hour)
wp, err := wikiPage.From(timeoutCtx, p.Title) //bottle neck: query to wikipedia api for each page
cancel()
switch {
case err != nil: //Querying the summary returns an error, so the article should be filtered
fallthrough
case p.PageID != wp.ID: //It's a redirect, so it should be filtered
emptyRevisions(p.Revisions, &wg)
continue loop
}
p.Abstract = wp.Abstract
select {
case results <- p:
//proceed
case <-ctx.Done():
return
}
}
}()
}
wg.Wait()
}()
return results
}
//Empty concurrently revision channel: wait goroutine so that if some error arises is caught by fail
func emptyRevisions(revisions <-chan Revision, wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
for range revisions {
//skip
}
}()
}
| New | identifier_name |
keys.rs | // DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
// Version 2, December 2004
//
// Copyleft (ↄ) meh. <meh@schizofreni.co> | http://meh.schizofreni.co
//
// Everyone is permitted to copy and distribute verbatim or modified
// copies of this license document, and changing it is allowed as long
// as the name is changed.
//
// DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
// TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
//
// 0. You just DO WHAT THE FUCK YOU WANT TO.
use std::collections::{BTreeMap, HashMap};
use std::hash::BuildHasherDefault;
use fnv::FnvHasher;
use std::str;
use info::{self, capability as cap};
#[derive(Debug)]
pub struct Keys(BTreeMap<usize, HashMap<Vec<u8>, Key, BuildHasherDefault<FnvHasher>>>);
#[derive(Eq, PartialEq, Copy, Clone, Debug)]
pub struct Key {
pub modifier: Modifier,
pub value: Value,
}
bitflags! {
pub struct Modifier: u8 {
const NONE = 0;
const ALT = 1 << 0;
const CTRL = 1 << 1;
const LOGO = 1 << 2;
const SHIFT = 1 << 3;
}
}
impl Default for Modifier {
fn default() -> Self {
Modifier::empty()
}
}
#[derive(Eq, PartialEq, Copy, Clone, Debug)]
pub enum Value {
Escape,
Enter,
Down,
Up,
Left,
Right,
PageUp,
PageDown,
BackSpace,
BackTab,
Tab,
Delete,
Insert,
Home,
End,
Begin,
F(u8),
Char(char),
}
pub use self::Value::*;
impl Keys {
pub fn new(info: &info::Database) -> Self {
let mut map = BTreeMap::default();
// Load terminfo bindings.
{
macro_rules! insert {
($name:ident => $($key:tt)*) => (
if let Some(cap) = info.get::<cap::$name>() {
let value: &[u8] = cap.as_ref();
map.entry(value.len()).or_insert(HashMap::default())
.entry(value.into()).or_insert(Key {
modifier: Modifier::empty(),
value: Value::$($key)*
});
}
)
}
insert!(KeyEnter => Enter);
insert!(CarriageReturn => Enter);
insert!(KeyDown => Down);
insert!(KeyUp => Up);
insert!(KeyLeft => Left);
insert!(KeyRight => Right);
insert!(KeyNPage => PageDown);
insert!(KeyPPage => PageUp);
insert!(KeyBackspace => BackSpace);
insert!(KeyBTab => BackTab);
insert!(Tab => Tab);
insert!(KeyF1 => F(1));
insert!(KeyF2 => F(2));
insert!(KeyF3 => F(3));
insert!(KeyF4 => F(4));
insert!(KeyF5 => F(5));
insert!(KeyF6 => F(6));
insert!(KeyF7 => F(7));
insert!(KeyF8 => F(8));
insert!(KeyF9 => F(9));
insert!(KeyF10 => F(10));
insert!(KeyF11 => F(11));
insert!(KeyF12 => F(12));
insert!(KeyF13 => F(13));
insert!(KeyF14 => F(14));
insert!(KeyF15 => F(15));
insert!(KeyF16 => F(16));
insert!(KeyF17 => F(17));
insert!(KeyF18 => F(18));
insert!(KeyF19 => F(19));
insert!(KeyF20 => F(20));
insert!(KeyF21 => F(21));
insert!(KeyF22 => F(22));
insert!(KeyF23 => F(23));
insert!(KeyF24 => F(24));
insert!(KeyF25 => F(25));
insert!(KeyF26 => F(26));
insert!(KeyF27 => F(27));
insert!(KeyF28 => F(28));
insert!(KeyF29 => F(29));
insert!(KeyF30 => F(30));
insert!(KeyF31 => F(31));
insert!(KeyF32 => F(32));
insert!(KeyF33 => F(33));
insert!(KeyF34 => F(34));
insert!(KeyF35 => F(35));
insert!(KeyF36 => F(36));
insert!(KeyF37 => F(37));
insert!(KeyF38 => F(38));
insert!(KeyF39 => F(39));
insert!(KeyF40 => F(40));
insert!(KeyF41 => F(41));
insert!(KeyF42 => F(42));
insert!(KeyF43 => F(43));
insert!(KeyF44 => F(44));
insert!(KeyF45 => F(45));
insert!(KeyF46 => F(46));
insert!(KeyF47 => F(47));
insert!(KeyF48 => F(48));
insert!(KeyF49 => F(49));
insert!(KeyF50 => F(50));
insert!(KeyF51 => F(51));
insert!(KeyF52 => F(52));
insert!(KeyF53 => F(53));
insert!(KeyF54 => F(54));
insert!(KeyF55 => F(55));
insert!(KeyF56 => F(56));
insert!(KeyF57 => F(57));
insert!(KeyF58 => F(58));
insert!(KeyF59 => F(59));
insert!(KeyF60 => F(60));
insert!(KeyF61 => F(61));
insert!(KeyF62 => F(62));
insert!(KeyF63 => F(63));
}
// Load default bindings.
{
macro_rules! insert {
($string:expr => $value:expr) => (
insert!($string => $value; NONE);
);
($string:expr => $value:expr; $($mods:ident)|+) => (
map.entry($string.len()).or_insert(HashMap::default())
.entry($string.to_vec()).or_insert(Key {
modifier: $(Modifier::$mods)|+,
value: $value,
});
);
}
insert!(b"\x1B[Z" => Tab; SHIFT);
insert!(b"\x1B\x7F" => BackSpace; ALT);
insert!(b"\x7F" => BackSpace);
insert!(b"\x1B\r\n" => Enter; ALT);
insert!(b"\x1B\r" => Enter; ALT);
insert!(b"\x1B\n" => Enter; ALT);
insert!(b"\r\n" => Enter);
insert!(b"\r" => Enter);
insert!(b"\n" => Enter);
insert!(b"\x1B[3;5~" => Delete; CTRL);
insert!(b"\x1B[3;2~" => Delete; SHIFT);
insert!(b"\x1B[3~" => Delete);
insert!(b"\x1B[2;5~" => Insert; CTRL);
insert!(b"\x1B[2;2~" => Insert; SHIFT);
insert!(b"\x1B[2~" => Insert);
insert!(b"\x1B[1;2H" => Home; SHIFT);
insert!(b"\x1B[H" => Home);
insert!(b"\x1B[1;5F" => End; CTRL);
insert!(b"\x1B[1;2F" => End; SHIFT);
insert!(b"\x1B[8~" => End);
insert!(b"\x1B[E" => Begin);
insert!(b"\x1B[5;5~" => PageUp; CTRL);
insert!(b"\x1B[5;2~" => PageUp; SHIFT);
insert!(b"\x1B[5~" => PageUp);
insert!(b"\x1B[6;5~" => PageDown; CTRL);
insert!(b"\x1B[6;2~" => PageDown; SHIFT);
insert!(b"\x1B[6~" => PageDown);
insert!(b"\x1B[1;5A" => Up; CTRL);
insert!(b"\x1B[1;3A" => Up; ALT);
insert!(b"\x1B[1;2A" => Up; SHIFT);
insert!(b"\x1BBOA" => Up);
insert!(b"\x1B[1;5B" => Down; CTRL);
insert!(b"\x1B[1;3B" => Down; ALT);
insert!(b"\x1B[1;2B" => Down; SHIFT);
insert!(b"\x1BBOB" => Down);
insert!(b"\x1B[1;5C" => Right; CTRL);
insert!(b"\x1B[1;3C" => Right; ALT);
insert!(b"\x1B[1;2C" => Right; SHIFT);
insert!(b"\x1BBOC" => Right);
insert!(b"\x1B[1;5D" => Left; CTRL);
insert!(b"\x1B[1;3D" => Left; ALT);
insert!(b"\x1B[1;2D" => Left; SHIFT);
insert!(b"\x1BBOD" => Left);
insert!(b"\x1B[1;5P" => F(1); CTRL);
insert!(b"\x1B[1;3P" => F(1); ALT);
insert!(b"\x1B[1;6P" => F(1); LOGO);
insert!(b"\x1B[1;2P" => F(1); SHIFT);
insert!(b"\x1BOP" => F(1));
insert!(b"\x1B[1;5Q" => F(2); CTRL);
insert!(b"\x1B[1;3Q" => F(2); ALT);
insert!(b"\x1B[1;6Q" => F(2); LOGO);
insert!(b"\x1B[1;2Q" => F(2); SHIFT);
insert!(b"\x1BOQ" => F(2));
insert!(b"\x1B[1;5R" => F(3); CTRL);
insert!(b"\x1B[1;3R" => F(3); ALT);
insert!(b"\x1B[1;6R" => F(3); LOGO);
insert!(b"\x1B[1;2R" => F(3); SHIFT);
insert!(b"\x1BOR" => F(3));
insert!(b"\x1B[1;5S" => F(4); CTRL);
insert!(b"\x1B[1;3S" => F(4); ALT);
insert!(b"\x1B[1;6S" => F(4); LOGO);
insert!(b"\x1B[1;2S" => F(4); SHIFT);
insert!(b"\x1BOS" => F(4));
insert!(b"\x1B[15;5~" => F(5); CTRL);
insert!(b"\x1B[15;3~" => F(5); ALT);
insert!(b"\x1B[15;6~" => F(5); LOGO);
insert!(b"\x1B[15;2~" => F(5); SHIFT);
insert!(b"\x1B[15~" => F(5));
insert!(b"\x1B[17;5~" => F(6); CTRL);
insert!(b"\x1B[17;3~" => F(6); ALT);
insert!(b"\x1B[17;6~" => F(6); LOGO);
insert!(b"\x1B[17;2~" => F(6); SHIFT);
insert!(b"\x1B[17~" => F(6));
insert!(b"\x1B[18;5~" => F(7); CTRL);
insert!(b"\x1B[18;3~" => F(7); ALT);
insert!(b"\x1B[18;6~" => F(7); LOGO);
insert!(b"\x1B[18;2~" => F(7); SHIFT);
insert!(b"\x1B[18~" => F(7));
insert!(b"\x1B[19;5~" => F(8); CTRL);
insert!(b"\x1B[19;3~" => F(8); ALT);
insert!(b"\x1B[19;6~" => F(8); LOGO);
insert!(b"\x1B[19;2~" => F(8); SHIFT);
insert!(b"\x1B[19~" => F(8));
insert!(b"\x1B[20;5~" => F(9); CTRL);
insert!(b"\x1B[20;3~" => F(9); ALT);
insert!(b"\x1B[20;6~" => F(9); LOGO);
insert!(b"\x1B[20;2~" => F(9); SHIFT);
insert!(b"\x1B[20~" => F(9));
insert!(b"\x1B[21;5~" => F(10); CTRL);
insert!(b"\x1B[21;3~" => F(10); ALT);
insert!(b"\x1B[21;6~" => F(10); LOGO);
insert!(b"\x1B[21;2~" => F(10); SHIFT);
insert!(b"\x1B[21~" => F(10));
insert!(b"\x1B[23;5~" => F(11); CTRL);
insert!(b"\x1B[23;3~" => F(11); ALT);
insert!(b"\x1B[23;6~" => F(11); LOGO);
insert!(b"\x1B[23;2~" => F(11); SHIFT);
insert!(b"\x1B[23~" => F(11));
insert!(b"\x1B[24;5~" => F(12); CTRL);
insert!(b"\x1B[24;3~" => F(12); ALT);
insert!(b"\x1B[24;6~" => F(12); LOGO);
insert!(b"\x1B[24;2~" => F(12); SHIFT);
insert!(b"\x1B[24~" => F(12));
insert!(b"\x1B[1;2P" => F(13));
insert!(b"\x1B[1;2Q" => F(14));
insert!(b"\x1B[1;2R" => F(15));
insert!(b"\x1B[1;2S" => F(16));
insert!(b"\x1B[15;2~" => F(17));
insert!(b"\x1B[17;2~" => F(18));
insert!(b"\x1B[18;2~" => F(19));
insert!(b"\x1B[19;2~" => F(20));
insert!(b"\x1B[20;2~" => F(21));
insert!(b"\x1B[21;2~" => F(22));
insert!(b"\x1B[23;2~" => F(23));
insert!(b"\x1B[24;2~" => F(24));
insert!(b"\x1B[1;5P" => F(25));
insert!(b"\x1B[1;5Q" => F(26));
insert!(b"\x1B[1;5R" => F(27));
insert!(b"\x1B[1;5S" => F(28));
insert!(b"\x1B[15;5~" => F(29));
insert!(b"\x1B[17;5~" => F(30));
insert!(b"\x1B[18;5~" => F(31));
insert!(b"\x1B[19;5~" => F(32));
insert!(b"\x1B[20;5~" => F(33));
insert!(b"\x1B[21;5~" => F(34));
insert!(b"\x1B[23;5~" => F(35));
}
Keys(map)
}
pub fn bind<T: Into<Vec<u8>>>(&mut self, value: T, key: Key) -> &mut Self {
let value = value.into();
if !value.is_empty() {
self.0.entry(value.len()).or_insert(HashMap::default())
.insert(value, key);
}
self
}
pub fn unbind<T: AsRef<[u8]>>(&mut self, value: T) -> &mut Self {
let value = value.as_ref();
if let Some(map) = self.0.get_mut(&value.len()) {
map.remove(value);
}
self
}
pub fn find<'a>(&self, mut input: &'a [u8]) -> (&'a [u8], Option<Key>) {
// Check if it's a defined key.
for (&length, map) in self.0.iter().rev() {
if length > input.len() {
continue;
}
if let Some(key) = map.get(&input[..length]) {
return (&input[length..], Some(*key));
}
}
// Check if it's a single escape press.
if input == &[0x1B] {
return (&input[1..], Some(Key {
modifier: Modifier::empty(),
value: Escape,
}));
}
let mut mods = Modifier::empty();
if input[0] == 0x1B {
mods.insert(Modifier::ALT);
input = &input[1..];
}
// Check if it's a control character.
if input[0] & 0b011_00000 == 0 {
return (&input[1..], Some(Key {
modifier: mods | Modifier::CTRL,
value: Char((input[0] | 0b010_00000) as char),
}));
}
// Check if it's a unicode character.
const WIDTH: [u8; 256] = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x1F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x3F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x5F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x7F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x9F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xBF
0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 0xDF
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 0xEF
4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xFF
];
let length = WIDTH[input[0] as usize] as usize;
if length >= input.len() {
if let Ok(string) = str::from_utf8(&input[..length]) {
| }
(&input[1..], None)
}
}
| return (&input[length..], Some(Key {
modifier: mods,
value: Char(string.chars().next().unwrap())
}));
}
| conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.