file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
common.js | type}&page={page}&uid={uid}",//)获取下面指定层数人员(如 一 级好友 二级好友)
changpwd: "/game/g_user.ashx?action=changpwd&pwd0={pwd0}&pwd1={pwd1}&pwd2={pwd2}&pwd3={pwd3}&pwd4={pwd4}&pwd5={pwd5}&usercode={usercode}",//修改密码
backpwd: "/game/g_user.ashx?action=backpwd&pwd={pwd}&usercode={usercode}",//找回密码
changname: "/game/g_user.ashx?action=changname&name={name}&uid={uid}",//修改姓名
jhuo: "/game/g_user.ashx?action=jhuo&uid={uid}",//激活
photo: "/game/g_user.ashx?action=photo&uid={uid}&extion={extion}&img={img}",//设置头像
//------------明细接口 -------------------------
account: "/game/g_accoun.ashx?action=account&page={page}&uid={uid}&accountid={accountid}",//获取指定会员账户明细
account_zx: "/game/g_accoun.ashx?action=account_zx&page={page}&uid={uid}",////获取指定会员转账明细
account_dh: "/game/g_accoun.ashx?action=account_dh&page={page}&uid={uid}",//获取指定会员兑换明细
//-----------游戏主界面接口--------------
treelst: "/game/g_game.ashx?action=gettreelst&grade={grade}&uid={uid}", //获取指定场景 所有树信息(电话,回调函数(返回 json 值))
caozuo: "/game/g_game.ashx?action=caozuo&grade={grade}&uid={uid}&no={no}&actype={actype}",//游戏操作(1 种树 2浇水,3施肥 4采摘 11一键施肥 22一键采摘 可以不传No参数) (grade 等级,no 编号,actype 操作类型)-
flnum: "/game/g_game.ashx?action=getnum&grade={grade}&uid={uid}", //获取指定等级的肥料数量
shouhuo: "/game/g_game.ashx?action=shouhuo&uid={uid}&grade={grade}", //收获租借收益
zhujie: "/game/g_game.ashx?action=zhujie&uid={uid}&grade={grade}", //获取租借信息 操作编号(租橡胶树 :20 租风力发电 :30)
buygamegood: "/game/g_shop.ashx?action=buygamegood&grade={grade}&type={type}&uid={uid}",//游戏商城购买 (肥料,树苗 ,地)grade等级,type大类1肥料2树苗3土地
getwareh: "/game/g_shop.ashx?action=getwareh&uid={uid}", //库存
shopgood: "/game/g_shop.ashx?action=shopgood", //兑换商城普通商品列表
hieghgood: "/game/g_shop.ashx?action=hieghgood", //兑换商城高级商品信息
byshopgood: "/game/g_shop.ashx?action=byshopgood&name={name}&usertel={usertel}&pwd={pwd}&province={province}&Area={Area}&City={City}&Addr={Addr}&num={num}&uid={uid}&goodid={goodid}",//购买普通商品
byhieghgood: "/game/g_shop.ashx?action=byhieghgood&bankname={bankname}&bankno={bankno}&bankname={bankname}&pwd={pwd}&num={num}&goodid={goodid}&uid={uid}", //购买高级商品
zhu: "/game/g_shop.ashx?action=zhu&uid={uid}&grade={grade}&pwd={pwd}", //租橡胶林 grade
//-----------------公告信息----------------------------
newlist: "/game/g_new.ashx?action=newlist",//公告信息
newinfo: "/game/g_new.ashx?action=newinfo&id={id}",//获取新闻公告详细信息
//签到
goSign: '/game/g_user.ashx?action=sign&uid={uid}',
chongzhi: '/game/qmzsgame.ashx?action=xjconvert&uid={uid}&money={money}&password={password}&type={type}&signcode={signcode}',
//棋牌
getyb:'/game/qmzsgame.ashx?action=getyb&uid={uid}',//获取元宝数量
czqp:'/game/qmzsgame.ashx?action=czqp&uid={uid}&money={money}&pwd={pwd}',//充值元宝
//确认转账
tranzr:'/game/qp_game.ashx?action=tranzr&uid={uid}&page={page}',//待确认转入
tranzc:'/game/qp_game.ashx?action=tranzc&uid={uid}&page={page}',//待确认转出
confirm:'/game/qp_game.ashx?action=confirm&uid={uid}&Id={Id}',//确认
cancel:'/game/qp_game.ashx?action=cancel&uid={uid}&Id={Id}',//取消
//--------------------------二维码--------------------------------
qrcode: '/game/qrcode.aspx?id={id}',
//------------------------获取指定长度随机数-获取-------------------//
getrandom: function (length) { var Num = ""; for (var i = 0; i < length; i++) { Num += Math.floor(Math.random() * 10); } return Num; },
//--------------------------参数替换(url,参数数组)----------------------------//
urlreplace: function (url, lst) {
var newurl = url; for (var name in lst) { newurl = newurl.replace("{" + name + "}", lst[name]); }
return newurl + '&key=' + this.onekey;
},
//-------------------------异步回调-------------------
async: function (url, fun, panm) {
var url1 = this.urlreplace(url, panm)
console.log('执行url:' + url1)
//网络加载中
var scene_ = cc.director.getScene();
var loadingNode = new cc.Node();
loadingNode.name = 'loadingNode';
loadingNode.addComponent(cc.Label);
loadingNode.getComponent(cc.Label).string = '网络加载中,请稍后...';
loadingNode.getComponent(cc.Label).fontSize = 25;
loadingNode.setPosition(600, 400);
loadingNode.color = new cc.Color(255, 0, 0);
scene_.addChild(loadingNode);
var xhr = new XMLHttpRequest();
xhr.timeout = 5000;
xhr.onreadystatechange = function () {
if (xhr.readyState == 4) {
if (xhr.status >= 200 && xhr.status < 400) {
loadingNode.destroy();
var response = xhr.responseText;
// console.log(response);
response = JSON.parse(response);
if (response.msg2 == '非法操作') {
window.url = response.msg3;
cc.director.loadScene('update');
} else {
fun(response);
}
} else {
loadingNode.destroy();
var response = { 'msg1': 'error', 'msg2': '网络连接错误', 'msg3': '' };
fun(response)
}
}
loadingNode.destroy();
};
xhr.ontimeout = (e) => {
loadingNode.destroy();
let response = { 'msg1': 'error', 'msg2': '连接超时,网络繁忙,请稍后再试', 'msg3': '' };
fun(response)
}
xhr.onerror = (e) => {
loadingNode.destroy();
let response = { 'msg1': 'error', 'msg2': '网路错误,请检查网络设置', 'msg3': '' };
fun(response)
}
xhr.open("GET", url1, true);
xhr.send();
},
//-------------------------调取数据--------------------------------------------
geturldata: function (url, lst, fun) {
var url2 = this.yuming + url;
var user = cc.sys.localStorage.getItem('current_user');
lst["uid"] = user.ID;
com.async(url2, fun, lst);
},
//--------------------------------------------会员信息 ---------------------------------------
getUser: function () {
if (!cc.sys.localStorage.getItem('current_user')) {
cc.director.loadScene('index');
} else {
return JSON.parse(cc.sys.localStorage.getItem("current_user"));
}
},
updateUser: function (fun) {
var userinfo = this.getUser();
var uid = userinfo.ID;
var url = this.yuming + this.geturl;
this.async(url, function (resp) {
if (resp.msg1 == 'success') {
cc.sys.localStorage.setItem('current_user', JSON.stringify(resp.msg3));
if (fun) {
fun(resp);
}
}
}, { 'uid': uid });
},
logout: function () {
cc.sys.localStorage.setItem('current_user', null);
},
//-----------------------------------------------原有接口函数----------------------------------------------------------------------------------//
//-------------- | -----------注册短信(电话,回调函数(返回 json 值))--------------------------------------------
sen | conditional_block | |
common.js | .ashx?action=regcheck&tel={tel}&yzm={yzm}",
backcode: "/game/g_sendsms.ashx?action=backcheck&tel={tel}&yzm={yzm}",
ruser: "/game/g_user.ashx?action=ruser&uid={uid}",//获取会员余额信息
geturl: '/game/g_user.ashx?action=getuser&uid={uid}', //获得会员信息
hzhuan: "/game/g_user.ashx?action=hzhuan&usercode={usercode}&pwd={pwd}&uid={uid}&accounttype={accounttype}&price={price}",//转账 1树呗 4地呗
donwuser: "/game/g_user.ashx?action=donwuser&type={type}&page={page}&uid={uid}",//)获取下面指定层数人员(如 一 级好友 二级好友)
changpwd: "/game/g_user.ashx?action=changpwd&pwd0={pwd0}&pwd1={pwd1}&pwd2={pwd2}&pwd3={pwd3}&pwd4={pwd4}&pwd5={pwd5}&usercode={usercode}",//修改密码
backpwd: "/game/g_user.ashx?action=backpwd&pwd={pwd}&usercode={usercode}",//找回密码
changname: "/game/g_user.ashx?action=changname&name={name}&uid={uid}",//修改姓名
jhuo: "/game/g_user.ashx?action=jhuo&uid={uid}",//激活
photo: "/game/g_user.ashx?action=photo&uid={uid}&extion={extion}&img={img}",//设置头像
//------------明细接口 -------------------------
account: "/game/g_accoun.ashx?action=account&page={page}&uid={uid}&accountid={accountid}",//获取指定会员账户明细
account_zx: "/game/g_accoun.ashx?action=account_zx&page={page}&uid={uid}",////获取指定会员转账明细
account_dh: "/game/g_accoun.ashx?action=account_dh&page={page}&uid={uid}",//获取指定会员兑换明细
//-----------游戏主界面接口--------------
treelst: "/game/g_game.ashx?action=gettreelst&grade={grade}&uid={uid}", //获取指定场景 所有树信息(电话,回调函数(返回 json 值))
caozuo: "/game/g_game.ashx?action=caozuo&grade={grade}&uid={uid}&no={no}&actype={actype}",//游戏操作(1 种树 2浇水,3施肥 4采摘 11一键施肥 22一键采摘 可以不传No参数) (grade 等级,no 编号,actype 操作类型)-
flnum: "/game/g_game.ashx?action=getnum&grade={grade}&uid={uid}", //获取指定等级的肥料数量
shouhuo: "/game/g_game.ashx?action=shouhuo&uid={uid}&grade={grade}", //收获租借收益
zhujie: "/game/g_game.ashx?action=zhujie&uid={uid}&grade={grade}", //获取租借信息 操作编号(租橡胶树 :20 租风力发电 :30)
buygamegood: "/game/g_shop.ashx?action=buygamegood&grade={grade}&type={type}&uid={uid}",//游戏商城购买 (肥料,树苗 ,地)grade等级,type大类1肥料2树苗3土地
getwareh: "/game/g_shop.ashx?action=getwareh&uid={uid}", //库存
shopgood: "/game/g_shop.ashx?action=shopgood", //兑换商城普通商品列表
hieghgood: "/game/g_shop.ashx?action=hieghgood", //兑换商城高级商品信息
byshopgood: "/game/g_shop.ashx?action=byshopgood&name={name}&usertel={usertel}&pwd={pwd}&province={province}&Area={Area}&City={City}&Addr={Addr}&num={num}&uid={uid}&goodid={goodid}",//购买普通商品
byhieghgood: "/game/g_shop.ashx?action=byhieghgood&bankname={bankname}&bankno={bankno}&bankname={bankname}&pwd={pwd}&num={num}&goodid={goodid}&uid={uid}", //购买高级商品
zhu: "/game/g_shop.ashx?action=zhu&uid={uid}&grade={grade}&pwd={pwd}", //租橡胶林 grade
//-----------------公告信息----------------------------
newlist: "/game/g_new.ashx?action=newlist",//公告信息
newinfo: "/game/g_new.ashx?action=newinfo&id={id}",//获取新闻公告详细信息
//签到
goSign: '/game/g_user.ashx?action=sign&uid={uid}',
chongzhi: '/game/qmzsgame.ashx?action=xjconvert&uid={uid}&money={money}&password={password}&type={type}&signcode={signcode}',
//棋牌
getyb:'/game/qmzsgame.ashx?action=getyb&uid={uid}',//获取元宝数量
czqp:'/game/qmzsgame.ashx?action=czqp&uid={uid}&money={money}&pwd={pwd}',//充值元宝
//确认转账
tranzr:'/game/qp_game.ashx?action=tranzr&uid={uid}&page={page}',//待确认转入
tranzc:'/game/qp_game.ashx?action=tranzc&uid={uid}&page={page}',//待确认转出
confirm:'/game/qp_game.ashx?action=confirm&uid={uid}&Id={Id}',//确认
cancel:'/game/qp_game.ashx?action=cancel&uid={uid}&Id={Id}',//取消
//--------------------------二维码--------------------------------
qrcode: '/game/qrcode.aspx?id={id}',
//------------------------获取指定长度随机数-获取-------------------//
getrandom: function (length) { var Num = ""; for (var i = 0; i < length; i++) { Num += Math.floor(Math.random() * 10); } return Num; },
//--------------------------参数替换(url,参数数组)----------------------------//
urlreplace: function (url, lst) {
var newurl = url; for (var name in lst) { newurl = newurl.replace("{" + name + "}", lst[name]); }
return newurl + '&key=' + this.onekey;
},
//-------------------------异步回调-------------------
async: function (url, fun, panm) {
var url1 = this.urlreplace(url, panm)
console.log('执行url:' + url1)
//网络加载中
var scene_ = cc.director.getScene();
var loadingNode = new cc.Node();
loadingNode.name = 'loadingNode';
loadingNode.addComponent(cc.Label);
loadingNode.getComponent(cc.Label).string = '网络加载中,请稍后...';
loadingNode.getComponent(cc.Label).fontSize = 25;
loadingNode.setPosition(600, 400);
loadingNode.color = new cc.Color(255, 0, 0);
scene_.addChild(loadingNode);
var xhr = new XMLHttpRequest();
xhr.timeout = 5000;
xhr.onreadystatechange = function () {
if (xhr.readyState == 4) {
if (xhr.status >= 200 && xhr.status < 400) {
loadingNode.destroy(); | // console.log(response);
response = JSON.parse(response);
if (response.msg2 == '非法操作') {
window.url = response.msg3;
cc.director.loadScene('update');
} else {
fun(response);
}
} else {
loadingNode.destroy();
var response = { 'msg1': 'error', 'msg2': '网络连接错误', 'msg3': '' };
fun(response)
}
}
loadingNode.destroy();
};
xhr.ontimeout = (e) => {
loadingNode.destroy();
let response = { 'msg1': 'error', 'msg2': '连接超时,网络繁忙,请稍后再试', 'msg3': '' };
fun(response)
}
xhr.onerror = (e) => {
loadingNode.destroy();
let response = { 'msg1': 'error', 'msg2': '网路错误,请检查网络设置', 'msg3': '' };
fun(response)
}
xhr.open("GET", url1, true);
xhr.send();
},
//-------------------------调取数据--------------------------------------------
geturldata: function (url, lst, fun) {
var url2 = this.yuming + url;
var user = cc.sys.localStorage.getItem('current_user');
lst["uid"] = user.ID;
com.async(url2, fun, lst);
},
//--------------------------------------------会员信息 ---------------------------------------
getUser: function () {
if (!cc.sys.localStorage.getItem('current_user')) {
cc.director.loadScene('index |
var response = xhr.responseText; | random_line_split |
read_archive.rs | ::recover::{accounts_into_recovery, LegacyRecovery};
fn get_runtime() -> (Runtime, u16) {
let port = get_available_port();
let path = TempPath::new();
let rt = start_backup_service(
SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port),
Arc::new(LibraDB::new_for_test(&path)),
);
(rt, port)
}
async fn open_for_read(file_handle: &FileHandleRef) -> Result<Box<dyn AsyncRead + Send + Unpin>> {
let file = OpenOptions::new().read(true).open(file_handle).await?;
Ok(Box::new(file))
}
fn read_from_file(path: &str) -> Result<Vec<u8>> {
let mut data = Vec::<u8>::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
Ok(data)
}
fn read_from_json(path: &PathBuf) -> Result<StateSnapshotBackup> {
let config = std::fs::read_to_string(path)?;
let map: StateSnapshotBackup = serde_json::from_str(&config)?;
Ok(map)
}
fn load_lcs_file<T: DeserializeOwned>(file_handle: &str) -> Result<T> {
let x = read_from_file(&file_handle)?;
Ok(lcs::from_bytes(&x)?)
}
async fn read_account_state_chunk(
file_handle: FileHandle,
archive_path: &PathBuf,
) -> Result<Vec<(HashValue, AccountStateBlob)>> {
let full_handle = archive_path.parent().unwrap().join(file_handle);
let handle_str = full_handle.to_str().unwrap();
let mut file = open_for_read(handle_str).await?;
let mut chunk = vec![];
while let Some(record_bytes) = file.read_record_bytes().await? {
chunk.push(lcs::from_bytes(&record_bytes)?);
}
Ok(chunk)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_swarm_writeset(
archive_path: PathBuf,
) -> Result<WriteSetMut, Error> {
let backup = read_from_json(&archive_path)?;
let account_blobs = accounts_from_snapshot_backup(backup, &archive_path).await?;
accounts_into_writeset_swarm(&account_blobs)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_recovery(archive_path: &PathBuf) -> Result<Vec<LegacyRecovery>, Error> {
let manifest_json = archive_path.join("state.manifest");
let backup = read_from_json(&manifest_json)?;
let account_blobs = accounts_from_snapshot_backup(backup, archive_path).await?;
let r = accounts_into_recovery(&account_blobs)?;
Ok(r)
}
/// Tokio async parsing of state snapshot into blob
async fn accounts_from_snapshot_backup(
manifest: StateSnapshotBackup,
archive_path: &PathBuf
) -> Result<Vec<AccountStateBlob>> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, archive_path).await?;
// println!("{:?}", blobs);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
Ok(account_state_blobs)
}
fn get_alice_authkey_for_swarm() -> Vec<u8> {
let mnemonic_string = get_persona_mnem("alice");
let account_details = get_account_from_mnem(mnemonic_string);
account_details.0.to_vec()
}
/// cases that we need to create a genesis from backup.
pub enum GenesisCase {
/// a network upgrade or fork
Fork,
/// simulate state in a local swarm.
Test,
}
/// make the writeset for the genesis case. Starts with an unmodified account state and make into a writeset.
pub fn accounts_into_writeset_swarm(
account_state_blobs: &Vec<AccountStateBlob>,
) -> Result<WriteSetMut, Error> |
/// Without modifying the data convert an AccountState struct, into a WriteSet Item which can be included in a genesis transaction. This should take all of the resources in the account.
fn get_unmodified_writeset(account_state: &AccountState) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
let item_tuple = (
AccessPath::new(address, k.clone()),
WriteOp::Value(v.clone()),
);
// push into the writeset
ws.push(item_tuple);
}
println!("processed account: {:?}", address);
return Ok(ws);
}
bail!("ERROR: No address for AccountState: {:?}", account_state);
}
/// Returns the writeset item for replaceing an authkey on an account. This is only to be used in testing and simulation.
fn authkey_rotate_change_item(
account_state: &AccountState,
authentication_key: Vec<u8>,
) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources
for (k, _v) in account_state.iter() {
// if we find an AccountResource struc, which is where authkeys are kept
if k.clone() == AccountResource::resource_path() {
// let account_resource_option = account_state.get_account_resource()?;
if let Some(account_resource) = account_state.get_account_resource()? {
let account_resource_new = account_resource
.clone_with_authentication_key(authentication_key.clone(), address.clone());
ws.push((
AccessPath::new(address, k.clone()),
WriteOp::Value(lcs::to_bytes(&account_resource_new).unwrap()),
));
}
}
}
println!("rotate authkey for account: {:?}", address);
}
bail!(
"ERROR: No address found at AccountState: {:?}",
account_state
);
}
/// helper to merge writesets
pub fn merge_writeset(mut left: WriteSetMut, right: WriteSetMut) -> Result<WriteSetMut, Error> {
left.write_set.extend(right.write_set);
Ok(left)
}
/// Tokio async parsing of state snapshot into blob
async fn run_impl(manifest: StateSnapshotBackup, path: &PathBuf) -> Result<()> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, path).await?;
// let proof = load_lcs_file(&chunk.proof)?;
println!("{:?}", blobs);
// TODO(Venkat) -> Here's the blob
// println!("{:?}", proof);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
let genesis = vm_genesis::test_genesis_change_set_and_validators(Some(1));
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0));
let tmp_dir = TempPath::new();
let db_rw = DbReaderWriter::new(LibraDB::new_for_test(&tmp_dir));
// Executor won't be able to boot on empty db due to lack of StartupInfo.
assert!(db_rw.reader.get_startup_info().unwrap().is_none());
// Bootstrap empty DB.
let waypoint = generate_waypoint::<LibraVM>(&db_rw, &genesis_txn).expect("Should not fail.");
maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap();
let startup_info = db_rw
.reader
.get_startup_info()
.expect("Should not fail.")
.expect("Should not be None.");
assert_eq!(
Waypoint::new_epoch_boundary(startup_info.latest_ledger_info.ledger_info()).unwrap(),
waypoint
);
let (li, epoch_change_proof, _) = db_rw.reader.get_state_proof(waypoint.version()).unwrap();
let trusted_state = TrustedState::from(waypoint);
trusted_state
.verify_and_ratchet(&li, &epoch_change_proof)
.unwrap();
// `maybe_bootstrap()` does nothing on non-empty DB.
assert!(!maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap());
let genesis_txn =
generate_genesis::generate_genesis_from_snapshot(&account_state_blobs, &db_rw).unwrap();
generate_genesis::write_genesis_blob(genesis_txn)?;
generate_genesis::test_gen | {
let mut write_set_mut = WriteSetMut::new(vec![]);
for blob in account_state_blobs {
let account_state = AccountState::try_from(blob)?;
// TODO: borrow
let clean = get_unmodified_writeset(&account_state)?;
let auth = authkey_rotate_change_item(&account_state, get_alice_authkey_for_swarm())?;
let merge_clean = merge_writeset(write_set_mut, clean)?;
write_set_mut = merge_writeset(merge_clean, auth)?;
}
println!("Total accounts read: {}", &account_state_blobs.len());
Ok(write_set_mut)
} | identifier_body |
read_archive.rs | ::recover::{accounts_into_recovery, LegacyRecovery};
fn get_runtime() -> (Runtime, u16) {
let port = get_available_port();
let path = TempPath::new();
let rt = start_backup_service(
SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port),
Arc::new(LibraDB::new_for_test(&path)),
);
(rt, port)
}
async fn open_for_read(file_handle: &FileHandleRef) -> Result<Box<dyn AsyncRead + Send + Unpin>> {
let file = OpenOptions::new().read(true).open(file_handle).await?;
Ok(Box::new(file)) |
fn read_from_file(path: &str) -> Result<Vec<u8>> {
let mut data = Vec::<u8>::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
Ok(data)
}
fn read_from_json(path: &PathBuf) -> Result<StateSnapshotBackup> {
let config = std::fs::read_to_string(path)?;
let map: StateSnapshotBackup = serde_json::from_str(&config)?;
Ok(map)
}
fn load_lcs_file<T: DeserializeOwned>(file_handle: &str) -> Result<T> {
let x = read_from_file(&file_handle)?;
Ok(lcs::from_bytes(&x)?)
}
async fn read_account_state_chunk(
file_handle: FileHandle,
archive_path: &PathBuf,
) -> Result<Vec<(HashValue, AccountStateBlob)>> {
let full_handle = archive_path.parent().unwrap().join(file_handle);
let handle_str = full_handle.to_str().unwrap();
let mut file = open_for_read(handle_str).await?;
let mut chunk = vec![];
while let Some(record_bytes) = file.read_record_bytes().await? {
chunk.push(lcs::from_bytes(&record_bytes)?);
}
Ok(chunk)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_swarm_writeset(
archive_path: PathBuf,
) -> Result<WriteSetMut, Error> {
let backup = read_from_json(&archive_path)?;
let account_blobs = accounts_from_snapshot_backup(backup, &archive_path).await?;
accounts_into_writeset_swarm(&account_blobs)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_recovery(archive_path: &PathBuf) -> Result<Vec<LegacyRecovery>, Error> {
let manifest_json = archive_path.join("state.manifest");
let backup = read_from_json(&manifest_json)?;
let account_blobs = accounts_from_snapshot_backup(backup, archive_path).await?;
let r = accounts_into_recovery(&account_blobs)?;
Ok(r)
}
/// Tokio async parsing of state snapshot into blob
async fn accounts_from_snapshot_backup(
manifest: StateSnapshotBackup,
archive_path: &PathBuf
) -> Result<Vec<AccountStateBlob>> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, archive_path).await?;
// println!("{:?}", blobs);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
Ok(account_state_blobs)
}
fn get_alice_authkey_for_swarm() -> Vec<u8> {
let mnemonic_string = get_persona_mnem("alice");
let account_details = get_account_from_mnem(mnemonic_string);
account_details.0.to_vec()
}
/// cases that we need to create a genesis from backup.
pub enum GenesisCase {
/// a network upgrade or fork
Fork,
/// simulate state in a local swarm.
Test,
}
/// make the writeset for the genesis case. Starts with an unmodified account state and make into a writeset.
pub fn accounts_into_writeset_swarm(
account_state_blobs: &Vec<AccountStateBlob>,
) -> Result<WriteSetMut, Error> {
let mut write_set_mut = WriteSetMut::new(vec![]);
for blob in account_state_blobs {
let account_state = AccountState::try_from(blob)?;
// TODO: borrow
let clean = get_unmodified_writeset(&account_state)?;
let auth = authkey_rotate_change_item(&account_state, get_alice_authkey_for_swarm())?;
let merge_clean = merge_writeset(write_set_mut, clean)?;
write_set_mut = merge_writeset(merge_clean, auth)?;
}
println!("Total accounts read: {}", &account_state_blobs.len());
Ok(write_set_mut)
}
/// Without modifying the data convert an AccountState struct, into a WriteSet Item which can be included in a genesis transaction. This should take all of the resources in the account.
fn get_unmodified_writeset(account_state: &AccountState) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
let item_tuple = (
AccessPath::new(address, k.clone()),
WriteOp::Value(v.clone()),
);
// push into the writeset
ws.push(item_tuple);
}
println!("processed account: {:?}", address);
return Ok(ws);
}
bail!("ERROR: No address for AccountState: {:?}", account_state);
}
/// Returns the writeset item for replaceing an authkey on an account. This is only to be used in testing and simulation.
fn authkey_rotate_change_item(
account_state: &AccountState,
authentication_key: Vec<u8>,
) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources
for (k, _v) in account_state.iter() {
// if we find an AccountResource struc, which is where authkeys are kept
if k.clone() == AccountResource::resource_path() {
// let account_resource_option = account_state.get_account_resource()?;
if let Some(account_resource) = account_state.get_account_resource()? {
let account_resource_new = account_resource
.clone_with_authentication_key(authentication_key.clone(), address.clone());
ws.push((
AccessPath::new(address, k.clone()),
WriteOp::Value(lcs::to_bytes(&account_resource_new).unwrap()),
));
}
}
}
println!("rotate authkey for account: {:?}", address);
}
bail!(
"ERROR: No address found at AccountState: {:?}",
account_state
);
}
/// helper to merge writesets
pub fn merge_writeset(mut left: WriteSetMut, right: WriteSetMut) -> Result<WriteSetMut, Error> {
left.write_set.extend(right.write_set);
Ok(left)
}
/// Tokio async parsing of state snapshot into blob
async fn run_impl(manifest: StateSnapshotBackup, path: &PathBuf) -> Result<()> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, path).await?;
// let proof = load_lcs_file(&chunk.proof)?;
println!("{:?}", blobs);
// TODO(Venkat) -> Here's the blob
// println!("{:?}", proof);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
let genesis = vm_genesis::test_genesis_change_set_and_validators(Some(1));
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0));
let tmp_dir = TempPath::new();
let db_rw = DbReaderWriter::new(LibraDB::new_for_test(&tmp_dir));
// Executor won't be able to boot on empty db due to lack of StartupInfo.
assert!(db_rw.reader.get_startup_info().unwrap().is_none());
// Bootstrap empty DB.
let waypoint = generate_waypoint::<LibraVM>(&db_rw, &genesis_txn).expect("Should not fail.");
maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap();
let startup_info = db_rw
.reader
.get_startup_info()
.expect("Should not fail.")
.expect("Should not be None.");
assert_eq!(
Waypoint::new_epoch_boundary(startup_info.latest_ledger_info.ledger_info()).unwrap(),
waypoint
);
let (li, epoch_change_proof, _) = db_rw.reader.get_state_proof(waypoint.version()).unwrap();
let trusted_state = TrustedState::from(waypoint);
trusted_state
.verify_and_ratchet(&li, &epoch_change_proof)
.unwrap();
// `maybe_bootstrap()` does nothing on non-empty DB.
assert!(!maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap());
let genesis_txn =
generate_genesis::generate_genesis_from_snapshot(&account_state_blobs, &db_rw).unwrap();
generate_genesis::write_genesis_blob(genesis_txn)?;
generate_genesis::test_genesis | } | random_line_split |
read_archive.rs | ) -> Result<WriteSetMut, Error> {
let mut write_set_mut = WriteSetMut::new(vec![]);
for blob in account_state_blobs {
let account_state = AccountState::try_from(blob)?;
// TODO: borrow
let clean = get_unmodified_writeset(&account_state)?;
let auth = authkey_rotate_change_item(&account_state, get_alice_authkey_for_swarm())?;
let merge_clean = merge_writeset(write_set_mut, clean)?;
write_set_mut = merge_writeset(merge_clean, auth)?;
}
println!("Total accounts read: {}", &account_state_blobs.len());
Ok(write_set_mut)
}
/// Without modifying the data convert an AccountState struct, into a WriteSet Item which can be included in a genesis transaction. This should take all of the resources in the account.
fn get_unmodified_writeset(account_state: &AccountState) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
let item_tuple = (
AccessPath::new(address, k.clone()),
WriteOp::Value(v.clone()),
);
// push into the writeset
ws.push(item_tuple);
}
println!("processed account: {:?}", address);
return Ok(ws);
}
bail!("ERROR: No address for AccountState: {:?}", account_state);
}
/// Returns the writeset item for replaceing an authkey on an account. This is only to be used in testing and simulation.
fn authkey_rotate_change_item(
account_state: &AccountState,
authentication_key: Vec<u8>,
) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources
for (k, _v) in account_state.iter() {
// if we find an AccountResource struc, which is where authkeys are kept
if k.clone() == AccountResource::resource_path() {
// let account_resource_option = account_state.get_account_resource()?;
if let Some(account_resource) = account_state.get_account_resource()? {
let account_resource_new = account_resource
.clone_with_authentication_key(authentication_key.clone(), address.clone());
ws.push((
AccessPath::new(address, k.clone()),
WriteOp::Value(lcs::to_bytes(&account_resource_new).unwrap()),
));
}
}
}
println!("rotate authkey for account: {:?}", address);
}
bail!(
"ERROR: No address found at AccountState: {:?}",
account_state
);
}
/// helper to merge writesets
pub fn merge_writeset(mut left: WriteSetMut, right: WriteSetMut) -> Result<WriteSetMut, Error> {
left.write_set.extend(right.write_set);
Ok(left)
}
/// Tokio async parsing of state snapshot into blob
async fn run_impl(manifest: StateSnapshotBackup, path: &PathBuf) -> Result<()> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, path).await?;
// let proof = load_lcs_file(&chunk.proof)?;
println!("{:?}", blobs);
// TODO(Venkat) -> Here's the blob
// println!("{:?}", proof);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
let genesis = vm_genesis::test_genesis_change_set_and_validators(Some(1));
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0));
let tmp_dir = TempPath::new();
let db_rw = DbReaderWriter::new(LibraDB::new_for_test(&tmp_dir));
// Executor won't be able to boot on empty db due to lack of StartupInfo.
assert!(db_rw.reader.get_startup_info().unwrap().is_none());
// Bootstrap empty DB.
let waypoint = generate_waypoint::<LibraVM>(&db_rw, &genesis_txn).expect("Should not fail.");
maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap();
let startup_info = db_rw
.reader
.get_startup_info()
.expect("Should not fail.")
.expect("Should not be None.");
assert_eq!(
Waypoint::new_epoch_boundary(startup_info.latest_ledger_info.ledger_info()).unwrap(),
waypoint
);
let (li, epoch_change_proof, _) = db_rw.reader.get_state_proof(waypoint.version()).unwrap();
let trusted_state = TrustedState::from(waypoint);
trusted_state
.verify_and_ratchet(&li, &epoch_change_proof)
.unwrap();
// `maybe_bootstrap()` does nothing on non-empty DB.
assert!(!maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap());
let genesis_txn =
generate_genesis::generate_genesis_from_snapshot(&account_state_blobs, &db_rw).unwrap();
generate_genesis::write_genesis_blob(genesis_txn)?;
generate_genesis::test_genesis_from_blob(&account_state_blobs, db_rw)?;
Ok(())
}
/// given a path to state archive, produce a genesis.blob
pub fn genesis_from_path(path: PathBuf) -> Result<()> {
let path_man = path.clone().join("state.manifest");
dbg!(&path_man);
let path_proof = path.join("state.proof");
dbg!(&path_proof);
let manifest = read_from_json(&path_man).unwrap();
// Tokio runtime
let (mut rt, _port) = get_runtime();
let (txn_info_with_proof, li): (TransactionInfoWithProof, LedgerInfoWithSignatures) =
load_lcs_file(&path_proof.into_os_string().into_string().unwrap()).unwrap();
txn_info_with_proof.verify(li.ledger_info(), manifest.version)?;
ensure!(
txn_info_with_proof.transaction_info().state_root_hash() == manifest.root_hash,
"Root hash mismatch with that in proof. root hash: {}, expected: {}",
manifest.root_hash,
txn_info_with_proof.transaction_info().state_root_hash(),
);
let future = run_impl(manifest, &path); // Nothing is printed
rt.block_on(future)?;
Ok(())
}
#[cfg(test)]
#[test]
fn test_main() -> Result<()> {
use std::path::Path;
let path = env!("CARGO_MANIFEST_DIR");
let buf = Path::new(path)
.parent()
.unwrap()
.join("fixtures/state-snapshot/194/state_ver_74694920.0889/");
genesis_from_path(buf)
}
#[test]
pub fn test_accounts_into_recovery() {
use std::path::Path;
let path = env!("CARGO_MANIFEST_DIR");
let buf = Path::new(path)
.parent()
.unwrap()
.join("fixtures/state-snapshot/194/state_ver_74694920.0889/");
let path_man = buf.clone().join("state.manifest");
println!("Running.....");
let backup = read_from_json(&path_man).unwrap();
let (mut rt, _port) = get_runtime();
let account_blobs_futures = accounts_from_snapshot_backup(backup);
let account_blobs = rt.block_on(account_blobs_futures).unwrap();
let genesis_recovery_list = accounts_into_recovery(&account_blobs).unwrap();
println!("Total GenesisRecovery objects: {}", &genesis_recovery_list.len());
for blob in account_blobs {
let account_state = AccountState::try_from(&blob).unwrap();
if let Some(address) = account_state.get_account_address().unwrap() {
let mut address_processed = false;
for gr in &genesis_recovery_list {
if gr.address != address {
continue;
}
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
// extract the validator config resource
if k.clone() == BalanceResource::resource_path() {
match &gr.balance {
Some(balance) => {
if lcs::to_bytes(&balance).unwrap() != v.clone() {
panic!("Balance resource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("Balance not found");
}
}
}
if k.clone() == ValidatorConfigResource::resource_path() {
match &gr.val_cfg {
Some(val_cfg) => {
if lcs::to_bytes(&val_cfg).unwrap() != v.clone() {
panic!("ValidatorConfigResource not found in GenesisRecovery object: {}", gr.address);
}
}, None => {
panic!("ValidatorConfigResource not found");
}
}
}
if k.clone() == MinerStateResource::resource_path() {
match &gr.miner_state {
Some(miner_state) => {
if lcs::to_bytes(&miner_state).unwrap() != v.clone() {
panic!("MinerStateResource not found in GenesisRecovery object: {}", gr.address);
}
}, None => | {
panic!("MinerStateResource not found");
} | conditional_block | |
read_archive.rs | ::recover::{accounts_into_recovery, LegacyRecovery};
fn get_runtime() -> (Runtime, u16) {
let port = get_available_port();
let path = TempPath::new();
let rt = start_backup_service(
SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port),
Arc::new(LibraDB::new_for_test(&path)),
);
(rt, port)
}
async fn open_for_read(file_handle: &FileHandleRef) -> Result<Box<dyn AsyncRead + Send + Unpin>> {
let file = OpenOptions::new().read(true).open(file_handle).await?;
Ok(Box::new(file))
}
fn read_from_file(path: &str) -> Result<Vec<u8>> {
let mut data = Vec::<u8>::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
Ok(data)
}
fn read_from_json(path: &PathBuf) -> Result<StateSnapshotBackup> {
let config = std::fs::read_to_string(path)?;
let map: StateSnapshotBackup = serde_json::from_str(&config)?;
Ok(map)
}
fn load_lcs_file<T: DeserializeOwned>(file_handle: &str) -> Result<T> {
let x = read_from_file(&file_handle)?;
Ok(lcs::from_bytes(&x)?)
}
async fn read_account_state_chunk(
file_handle: FileHandle,
archive_path: &PathBuf,
) -> Result<Vec<(HashValue, AccountStateBlob)>> {
let full_handle = archive_path.parent().unwrap().join(file_handle);
let handle_str = full_handle.to_str().unwrap();
let mut file = open_for_read(handle_str).await?;
let mut chunk = vec![];
while let Some(record_bytes) = file.read_record_bytes().await? {
chunk.push(lcs::from_bytes(&record_bytes)?);
}
Ok(chunk)
}
/// take an archive file path and parse into a writeset
pub async fn | (
archive_path: PathBuf,
) -> Result<WriteSetMut, Error> {
let backup = read_from_json(&archive_path)?;
let account_blobs = accounts_from_snapshot_backup(backup, &archive_path).await?;
accounts_into_writeset_swarm(&account_blobs)
}
/// take an archive file path and parse into a writeset
pub async fn archive_into_recovery(archive_path: &PathBuf) -> Result<Vec<LegacyRecovery>, Error> {
let manifest_json = archive_path.join("state.manifest");
let backup = read_from_json(&manifest_json)?;
let account_blobs = accounts_from_snapshot_backup(backup, archive_path).await?;
let r = accounts_into_recovery(&account_blobs)?;
Ok(r)
}
/// Tokio async parsing of state snapshot into blob
async fn accounts_from_snapshot_backup(
manifest: StateSnapshotBackup,
archive_path: &PathBuf
) -> Result<Vec<AccountStateBlob>> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, archive_path).await?;
// println!("{:?}", blobs);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
Ok(account_state_blobs)
}
fn get_alice_authkey_for_swarm() -> Vec<u8> {
let mnemonic_string = get_persona_mnem("alice");
let account_details = get_account_from_mnem(mnemonic_string);
account_details.0.to_vec()
}
/// cases that we need to create a genesis from backup.
pub enum GenesisCase {
/// a network upgrade or fork
Fork,
/// simulate state in a local swarm.
Test,
}
/// make the writeset for the genesis case. Starts with an unmodified account state and make into a writeset.
pub fn accounts_into_writeset_swarm(
account_state_blobs: &Vec<AccountStateBlob>,
) -> Result<WriteSetMut, Error> {
let mut write_set_mut = WriteSetMut::new(vec![]);
for blob in account_state_blobs {
let account_state = AccountState::try_from(blob)?;
// TODO: borrow
let clean = get_unmodified_writeset(&account_state)?;
let auth = authkey_rotate_change_item(&account_state, get_alice_authkey_for_swarm())?;
let merge_clean = merge_writeset(write_set_mut, clean)?;
write_set_mut = merge_writeset(merge_clean, auth)?;
}
println!("Total accounts read: {}", &account_state_blobs.len());
Ok(write_set_mut)
}
/// Without modifying the data convert an AccountState struct, into a WriteSet Item which can be included in a genesis transaction. This should take all of the resources in the account.
fn get_unmodified_writeset(account_state: &AccountState) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources\
for (k, v) in account_state.iter() {
let item_tuple = (
AccessPath::new(address, k.clone()),
WriteOp::Value(v.clone()),
);
// push into the writeset
ws.push(item_tuple);
}
println!("processed account: {:?}", address);
return Ok(ws);
}
bail!("ERROR: No address for AccountState: {:?}", account_state);
}
/// Returns the writeset item for replaceing an authkey on an account. This is only to be used in testing and simulation.
fn authkey_rotate_change_item(
account_state: &AccountState,
authentication_key: Vec<u8>,
) -> Result<WriteSetMut, Error> {
let mut ws = WriteSetMut::new(vec![]);
if let Some(address) = account_state.get_account_address()? {
// iterate over all the account's resources
for (k, _v) in account_state.iter() {
// if we find an AccountResource struc, which is where authkeys are kept
if k.clone() == AccountResource::resource_path() {
// let account_resource_option = account_state.get_account_resource()?;
if let Some(account_resource) = account_state.get_account_resource()? {
let account_resource_new = account_resource
.clone_with_authentication_key(authentication_key.clone(), address.clone());
ws.push((
AccessPath::new(address, k.clone()),
WriteOp::Value(lcs::to_bytes(&account_resource_new).unwrap()),
));
}
}
}
println!("rotate authkey for account: {:?}", address);
}
bail!(
"ERROR: No address found at AccountState: {:?}",
account_state
);
}
/// helper to merge writesets
pub fn merge_writeset(mut left: WriteSetMut, right: WriteSetMut) -> Result<WriteSetMut, Error> {
left.write_set.extend(right.write_set);
Ok(left)
}
/// Tokio async parsing of state snapshot into blob
async fn run_impl(manifest: StateSnapshotBackup, path: &PathBuf) -> Result<()> {
// parse AccountStateBlob from chunks of the archive
let mut account_state_blobs: Vec<AccountStateBlob> = Vec::new();
for chunk in manifest.chunks {
let blobs = read_account_state_chunk(chunk.blobs, path).await?;
// let proof = load_lcs_file(&chunk.proof)?;
println!("{:?}", blobs);
// TODO(Venkat) -> Here's the blob
// println!("{:?}", proof);
for (_key, blob) in blobs {
account_state_blobs.push(blob)
}
}
let genesis = vm_genesis::test_genesis_change_set_and_validators(Some(1));
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0));
let tmp_dir = TempPath::new();
let db_rw = DbReaderWriter::new(LibraDB::new_for_test(&tmp_dir));
// Executor won't be able to boot on empty db due to lack of StartupInfo.
assert!(db_rw.reader.get_startup_info().unwrap().is_none());
// Bootstrap empty DB.
let waypoint = generate_waypoint::<LibraVM>(&db_rw, &genesis_txn).expect("Should not fail.");
maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap();
let startup_info = db_rw
.reader
.get_startup_info()
.expect("Should not fail.")
.expect("Should not be None.");
assert_eq!(
Waypoint::new_epoch_boundary(startup_info.latest_ledger_info.ledger_info()).unwrap(),
waypoint
);
let (li, epoch_change_proof, _) = db_rw.reader.get_state_proof(waypoint.version()).unwrap();
let trusted_state = TrustedState::from(waypoint);
trusted_state
.verify_and_ratchet(&li, &epoch_change_proof)
.unwrap();
// `maybe_bootstrap()` does nothing on non-empty DB.
assert!(!maybe_bootstrap::<LibraVM>(&db_rw, &genesis_txn, waypoint).unwrap());
let genesis_txn =
generate_genesis::generate_genesis_from_snapshot(&account_state_blobs, &db_rw).unwrap();
generate_genesis::write_genesis_blob(genesis_txn)?;
generate_genesis::test_genesis | archive_into_swarm_writeset | identifier_name |
manager.go | {
log.Printf("[Error] Unable to connect to Docker: %v", err)
}
context := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo}
fsInfo, err := fs.NewFsInfo(context)
if err != nil {
return nil, err
}
// If started with host's rootfs mounted, assume that its running
// in its own namespaces.
inHostNamespace := false
if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) {
inHostNamespace = true
}
newManager := &manager{
containers: make(map[namespacedContainerName]*containerData),
backendStorage: influxdbStorage,
quitChannels: make([]chan error, 0, 2),
fsInfo: fsInfo,
selfContainer: selfContainer,
inHostNamespace: inHostNamespace,
startupTime: time.Now(),
housekeepingInterval: housekeepingInterval,
}
machineInfo, err := getMachineInfo(sysfs, fsInfo)
if err != nil {
return nil, err
}
newManager.machineInfo = *machineInfo
//log.Printf("[Info] Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
}
newManager.versionInfo = *versionInfo
//log.Printf("[Info] Version: %+v", newManager.versionInfo)
newManager.eventHandler = events.NewEventManager(events.DefaultStoragePolicy())
return newManager, nil
}
// Start the container manager.
func (self *manager) Start() error {
// Register Docker container factory.
err := docker.Register(self, self.fsInfo)
if err != nil {
log.Printf("{Error] Docker container factory registration failed: %v.", err)
return err
}
// Register the raw driver.
err = raw.Register(self, self.fsInfo)
if err != nil {
log.Printf("[Error] Registration of the raw container factory failed: %v", err)
return err
}
self.DockerInfo()
self.DockerImages()
if enableLoadReader {
// Create cpu load reader.
cpuLoadReader, err := cpuload.New()
if err != nil {
log.Printf("[Error] Could not initialize cpu load reader: %s", err)
} else {
err = cpuLoadReader.Start()
if err != nil {
log.Printf("[Error] Could not start cpu load stat collector: %s", err)
} else {
self.loadReader = cpuLoadReader
}
}
}
// Watch for OOMs.
err = self.watchForNewOoms()
if err != nil {
log.Printf("[Error] Could not configure a source for OOM detection, disabling OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
if !container.HasFactories() {
return nil
}
// Create root and then recover all containers.
err = self.createContainer("/")
if err != nil {
return err
}
//log.Printf("[Info] Starting recovery of all containers")
err = self.detectSubcontainers("/")
if err != nil {
return err
}
//log.Printf("[Info] Recovery completed")
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
return nil
}
func (self *manager) Stop() error {
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
if self.loadReader != nil {
self.loadReader.Stop()
self.loadReader = nil
}
return nil
}
// Get a container by name.
func (self *manager) GetContainerInfo(containerName string) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont)
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
Name: containerName,
}]
}()
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) containerDataToContainerInfo(cont *containerData) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo()
if err != nil {
return nil, err
}
stats, err := cont.updateStats(false)
if err != nil {
return nil, err
}
// Make a copy of the info for the user.
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
}
}
return spec
}
func (self *manager) AllDockerContainers() (map[string]*info.ContainerInfo, error) {
containers := self.getAllDockerContainers()
output := make(map[string]*info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont)
if err != nil {
return nil, err
}
output[name] = inf
}
return output, nil
}
func (self *manager) getAllDockerContainers() map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers := make(map[string]*containerData, len(self.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
}
return containers
}
func (self *manager) DockerContainer(containerName string) (*info.ContainerInfo, error) {
container, err := self.getDockerContainer(containerName)
if err != nil {
return &info.ContainerInfo{}, err
}
inf, err := self.containerDataToContainerInfo(container)
if err != nil {
return &info.ContainerInfo{}, err
}
return inf, nil
}
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
if !ok {
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
}
return cont, nil
}
func (m *manager) Exists(containerName string) bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
_, ok := m.containers[namespacedName]
if ok {
return true
}
return false
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo.
return &m.machineInfo, nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
return &m.versionInfo, nil
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return self.eventHandler.WatchEvents(request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return self.eventHandler.GetEvents(request)
}
// called by the api when a client is no longer listening to the channel
func (self *manager) CloseEventChannel(watch_id int) |
func (m *manager) DockerInfo() (DockerStatus, error) {
info, err := docker.DockerInfo()
if err != nil {
return Docker | {
self.eventHandler.StopWatch(watch_id)
} | identifier_body |
manager.go | {
log.Printf("[Error] Unable to connect to Docker: %v", err)
}
context := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo}
fsInfo, err := fs.NewFsInfo(context)
if err != nil {
return nil, err
}
// If started with host's rootfs mounted, assume that its running
// in its own namespaces.
inHostNamespace := false
if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) {
inHostNamespace = true
}
newManager := &manager{
containers: make(map[namespacedContainerName]*containerData),
backendStorage: influxdbStorage,
quitChannels: make([]chan error, 0, 2),
fsInfo: fsInfo,
selfContainer: selfContainer,
inHostNamespace: inHostNamespace,
startupTime: time.Now(),
housekeepingInterval: housekeepingInterval,
}
machineInfo, err := getMachineInfo(sysfs, fsInfo)
if err != nil {
return nil, err
}
newManager.machineInfo = *machineInfo
//log.Printf("[Info] Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
}
newManager.versionInfo = *versionInfo
//log.Printf("[Info] Version: %+v", newManager.versionInfo)
newManager.eventHandler = events.NewEventManager(events.DefaultStoragePolicy())
return newManager, nil
}
// Start the container manager.
func (self *manager) Start() error {
// Register Docker container factory.
err := docker.Register(self, self.fsInfo)
if err != nil {
log.Printf("{Error] Docker container factory registration failed: %v.", err)
return err
}
// Register the raw driver.
err = raw.Register(self, self.fsInfo)
if err != nil {
log.Printf("[Error] Registration of the raw container factory failed: %v", err)
return err
}
self.DockerInfo()
self.DockerImages()
if enableLoadReader {
// Create cpu load reader.
cpuLoadReader, err := cpuload.New()
if err != nil {
log.Printf("[Error] Could not initialize cpu load reader: %s", err)
} else {
err = cpuLoadReader.Start()
if err != nil {
log.Printf("[Error] Could not start cpu load stat collector: %s", err)
} else {
self.loadReader = cpuLoadReader
}
}
}
// Watch for OOMs.
err = self.watchForNewOoms()
if err != nil {
log.Printf("[Error] Could not configure a source for OOM detection, disabling OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
if !container.HasFactories() {
return nil
}
// Create root and then recover all containers.
err = self.createContainer("/")
if err != nil {
return err
}
//log.Printf("[Info] Starting recovery of all containers")
err = self.detectSubcontainers("/")
if err != nil {
return err
}
//log.Printf("[Info] Recovery completed")
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
return nil
}
func (self *manager) Stop() error {
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
if self.loadReader != nil {
self.loadReader.Stop()
self.loadReader = nil
}
return nil
}
// Get a container by name.
func (self *manager) GetContainerInfo(containerName string) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont)
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
Name: containerName,
}]
}()
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) containerDataToContainerInfo(cont *containerData) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo()
if err != nil {
return nil, err
}
stats, err := cont.updateStats(false)
if err != nil {
return nil, err
}
// Make a copy of the info for the user.
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
}
}
return spec
}
func (self *manager) AllDockerContainers() (map[string]*info.ContainerInfo, error) {
containers := self.getAllDockerContainers()
output := make(map[string]*info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont)
if err != nil {
return nil, err
}
output[name] = inf
}
return output, nil
}
func (self *manager) getAllDockerContainers() map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers := make(map[string]*containerData, len(self.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
}
return containers
}
func (self *manager) DockerContainer(containerName string) (*info.ContainerInfo, error) {
container, err := self.getDockerContainer(containerName)
if err != nil {
return &info.ContainerInfo{}, err
}
inf, err := self.containerDataToContainerInfo(container)
if err != nil {
return &info.ContainerInfo{}, err
}
return inf, nil
}
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
if !ok {
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
}
return cont, nil
}
func (m *manager) Exists(containerName string) bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
_, ok := m.containers[namespacedName]
if ok |
return false
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo.
return &m.machineInfo, nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
return &m.versionInfo, nil
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return self.eventHandler.WatchEvents(request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return self.eventHandler.GetEvents(request)
}
// called by the api when a client is no longer listening to the channel
func (self *manager) CloseEventChannel(watch_id int) {
self.eventHandler.StopWatch(watch_id)
}
func (m *manager) DockerInfo() (DockerStatus, error) {
info, err := docker.DockerInfo()
if err != nil {
return Docker | {
return true
} | conditional_block |
manager.go | {
log.Printf("[Error] Unable to connect to Docker: %v", err)
}
context := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo}
fsInfo, err := fs.NewFsInfo(context)
if err != nil {
return nil, err
}
// If started with host's rootfs mounted, assume that its running
// in its own namespaces.
inHostNamespace := false
if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) {
inHostNamespace = true
}
newManager := &manager{
containers: make(map[namespacedContainerName]*containerData),
backendStorage: influxdbStorage,
quitChannels: make([]chan error, 0, 2),
fsInfo: fsInfo,
selfContainer: selfContainer,
inHostNamespace: inHostNamespace,
startupTime: time.Now(),
housekeepingInterval: housekeepingInterval,
}
machineInfo, err := getMachineInfo(sysfs, fsInfo)
if err != nil {
return nil, err
}
newManager.machineInfo = *machineInfo
//log.Printf("[Info] Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
}
newManager.versionInfo = *versionInfo
//log.Printf("[Info] Version: %+v", newManager.versionInfo)
newManager.eventHandler = events.NewEventManager(events.DefaultStoragePolicy())
return newManager, nil
}
// Start the container manager.
func (self *manager) Start() error {
// Register Docker container factory.
err := docker.Register(self, self.fsInfo)
if err != nil {
log.Printf("{Error] Docker container factory registration failed: %v.", err)
return err
}
// Register the raw driver.
err = raw.Register(self, self.fsInfo)
if err != nil {
log.Printf("[Error] Registration of the raw container factory failed: %v", err)
return err
}
self.DockerInfo()
self.DockerImages()
if enableLoadReader {
// Create cpu load reader.
cpuLoadReader, err := cpuload.New()
if err != nil {
log.Printf("[Error] Could not initialize cpu load reader: %s", err)
} else {
err = cpuLoadReader.Start()
if err != nil {
log.Printf("[Error] Could not start cpu load stat collector: %s", err)
} else {
self.loadReader = cpuLoadReader
}
}
}
// Watch for OOMs.
err = self.watchForNewOoms()
if err != nil {
log.Printf("[Error] Could not configure a source for OOM detection, disabling OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
if !container.HasFactories() {
return nil
}
// Create root and then recover all containers.
err = self.createContainer("/")
if err != nil {
return err
}
//log.Printf("[Info] Starting recovery of all containers")
err = self.detectSubcontainers("/")
if err != nil {
return err
}
//log.Printf("[Info] Recovery completed")
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
return nil
}
func (self *manager) Stop() error {
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
if self.loadReader != nil {
self.loadReader.Stop()
self.loadReader = nil
}
return nil
}
// Get a container by name.
func (self *manager) GetContainerInfo(containerName string) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont)
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
Name: containerName,
}]
}()
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) containerDataToContainerInfo(cont *containerData) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo()
if err != nil {
return nil, err
}
stats, err := cont.updateStats(false)
if err != nil {
return nil, err
}
// Make a copy of the info for the user.
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
}
}
return spec
}
func (self *manager) AllDockerContainers() (map[string]*info.ContainerInfo, error) {
containers := self.getAllDockerContainers()
output := make(map[string]*info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont)
if err != nil {
return nil, err
}
output[name] = inf
}
return output, nil
}
func (self *manager) | () map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers := make(map[string]*containerData, len(self.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
}
return containers
}
func (self *manager) DockerContainer(containerName string) (*info.ContainerInfo, error) {
container, err := self.getDockerContainer(containerName)
if err != nil {
return &info.ContainerInfo{}, err
}
inf, err := self.containerDataToContainerInfo(container)
if err != nil {
return &info.ContainerInfo{}, err
}
return inf, nil
}
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
if !ok {
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
}
return cont, nil
}
func (m *manager) Exists(containerName string) bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
_, ok := m.containers[namespacedName]
if ok {
return true
}
return false
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo.
return &m.machineInfo, nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
return &m.versionInfo, nil
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return self.eventHandler.WatchEvents(request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return self.eventHandler.GetEvents(request)
}
// called by the api when a client is no longer listening to the channel
func (self *manager) CloseEventChannel(watch_id int) {
self.eventHandler.StopWatch(watch_id)
}
func (m *manager) DockerInfo() (DockerStatus, error) {
info, err := docker.DockerInfo()
if err != nil {
return DockerStatus | getAllDockerContainers | identifier_name |
manager.go | Reader.Stop()
self.loadReader = nil
}
return nil
}
// Get a container by name.
func (self *manager) GetContainerInfo(containerName string) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont)
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
Name: containerName,
}]
}()
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) containerDataToContainerInfo(cont *containerData) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo()
if err != nil {
return nil, err
}
stats, err := cont.updateStats(false)
if err != nil {
return nil, err
}
// Make a copy of the info for the user.
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
}
}
return spec
}
func (self *manager) AllDockerContainers() (map[string]*info.ContainerInfo, error) {
containers := self.getAllDockerContainers()
output := make(map[string]*info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont)
if err != nil {
return nil, err
}
output[name] = inf
}
return output, nil
}
func (self *manager) getAllDockerContainers() map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers := make(map[string]*containerData, len(self.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
}
return containers
}
func (self *manager) DockerContainer(containerName string) (*info.ContainerInfo, error) {
container, err := self.getDockerContainer(containerName)
if err != nil {
return &info.ContainerInfo{}, err
}
inf, err := self.containerDataToContainerInfo(container)
if err != nil {
return &info.ContainerInfo{}, err
}
return inf, nil
}
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
if !ok {
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
}
return cont, nil
}
func (m *manager) Exists(containerName string) bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
_, ok := m.containers[namespacedName]
if ok {
return true
}
return false
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo.
return &m.machineInfo, nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
return &m.versionInfo, nil
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return self.eventHandler.WatchEvents(request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return self.eventHandler.GetEvents(request)
}
// called by the api when a client is no longer listening to the channel
func (self *manager) CloseEventChannel(watch_id int) {
self.eventHandler.StopWatch(watch_id)
}
func (m *manager) DockerInfo() (DockerStatus, error) {
info, err := docker.DockerInfo()
if err != nil {
return DockerStatus{}, err
}
out := DockerStatus{}
out.Version = m.versionInfo.DockerVersion
if val, ok := info["KernelVersion"]; ok {
out.KernelVersion = val
}
if val, ok := info["OperatingSystem"]; ok {
out.OS = val
}
if val, ok := info["Name"]; ok {
out.Hostname = val
}
if val, ok := info["DockerRootDir"]; ok {
out.RootDir = val
}
if val, ok := info["Driver"]; ok {
out.Driver = val
}
if val, ok := info["ExecutionDriver"]; ok {
out.ExecDriver = val
}
if val, ok := info["Images"]; ok {
n, err := strconv.Atoi(val)
if err == nil {
out.NumImages = n
}
}
if val, ok := info["Containers"]; ok {
n, err := strconv.Atoi(val)
if err == nil {
out.NumContainers = n
}
}
// cut, trim, cut - Example format:
// DriverStatus=[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirperm1 Supported","false"]]
if val, ok := info["DriverStatus"]; ok {
out.DriverStatus = make(map[string]string)
val = strings.TrimPrefix(val, "[[")
val = strings.TrimSuffix(val, "]]")
vals := strings.Split(val, "],[")
for _, v := range vals {
kv := strings.Split(v, "\",\"")
if len(kv) != 2 {
continue
} else {
out.DriverStatus[strings.Trim(kv[0], "\"")] = strings.Trim(kv[1], "\"")
}
}
}
return out, nil
}
func (m *manager) DockerImages() ([]DockerImage, error) {
images, err := docker.DockerImages()
if err != nil {
return nil, err
}
out := []DockerImage{}
const unknownTag = "<none>:<none>"
for _, image := range images {
if len(image.RepoTags) == 1 && image.RepoTags[0] == unknownTag {
// images with repo or tags are uninteresting.
continue
}
di := DockerImage{
ID: image.ID,
RepoTags: image.RepoTags,
Created: image.Created,
VirtualSize: image.VirtualSize,
Size: image.Size,
}
out = append(out, di)
}
return out, nil
}
func (self *manager) watchForNewOoms() error {
//log.Printf("[Info] Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New()
if err != nil {
return err
}
go oomLog.StreamOoms(outStream)
go func() {
for oomInstance := range outStream {
// Surface OOM and OOM kill events.
newEvent := &info.Event{
ContainerName: oomInstance.ContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOom,
}
err := self.eventHandler.AddEvent(newEvent)
if err != nil {
log.Printf("[Error] failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
}
//log.Printf("[Info] Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath)
newEvent = &info.Event{
ContainerName: oomInstance.VictimContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOomKill,
EventData: info.EventData{
OomKill: &info.OomKillEventData{
Pid: oomInstance.Pid,
ProcessName: oomInstance.ProcessName,
},
},
}
err = self.eventHandler.AddEvent(newEvent)
if err != nil {
log.Printf("[Error] failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
}
} | }()
return nil | random_line_split | |
main.go | /primitive"
)
//error class
type Error struct{
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
}
//new meeting
type new_meet struct{
Meet_ID string `json:"Id"`
}
//participant shema
type participant struct{
Name string `json:"Name" bson:"name"`
Email string `json:"Email" bson:"email"`
RSVP string `json:"RSVP" bson:"rsvp"`
}
//meeting schema
type meeting struct{
Id primitive.ObjectID `bson:"_id"`
Title string `json:"Title" bson:"title"`
Part []participant `json:"Participants" bson:"participants" `
Start time.Time `json:"Start Time" bson:"start" `
End time.Time `json:"End Time" bson:"end"`
Stamp time.Time `bson:"stamp"`
}
//schema for results of conditional meetings
type conditional_meets struct{
Meetings []meeting `json:"meetings"`
}
//invalid request response writer function
func invalid_request(w http.ResponseWriter, statCode int, message string){
w.Header().Set("Content-Type", "application/json")
switch statCode {
case 400: w.WriteHeader(http.StatusBadRequest)
case 403: w.WriteHeader(http.StatusForbidden)
case 404: w.WriteHeader(http.StatusNotFound)
default: w.WriteHeader(http.StatusNotFound)
}
err := Error {
StatusCode: statCode,
ErrorMessage: message}
json.NewEncoder(w).Encode(err)
}
//helper function to coneect to DB
func connectdb(ctx context.Context) (*mongo.Collection){
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017"))
if err != nil {
log.Fatal(err)
}
err = client.Connect(ctx)
if err != nil {
log.Fatal(err)
}
appointyDatabase := client.Database("appointy-task-ritvix")
meetingCollection := appointyDatabase.Collection("meetings")
//returns collection object
return meetingCollection
}
func main(){
fmt.Println("Server is up")
http.HandleFunc("/meetings" , meets_handler) // handler for /meetings end point
http.HandleFunc("/meeting/" , meeting_handler) // handler for rooted /meeting/
fmt.Println(http.ListenAndServe(":8082", nil)); // listen to port 8082
}
//handle requests at /meetings
func | (w http.ResponseWriter, r *http.Request){
switch r.Method{
//if method is POST
case "POST":
//disallow query strings with POST method
if keys := r.URL.Query(); len(keys)!=0{
invalid_request(w, 400, "Queries not allowed at this endpoint with this method")
}else{
//error handling if request not JSON
if ua := r.Header.Get("Content-Type"); ua!="application/json"{
invalid_request(w, 400, "This end point accepts only JSON request body")
}else{
var m meeting
dec := json.NewDecoder(r.Body)
dec.DisallowUnknownFields()
err := dec.Decode(&m)
//error if meeting details are not in right format
if err != nil {
invalid_request(w, 400, "Please recheck the meeting information")
return
}
m.Stamp = time.Now() //assign Creation stamp
m.Id = primitive.NewObjectID() //assign unique ObjectID
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //meeting collection
//check for overlap of participants
final_check := false
//iterate over al participants and find clashes is db
for _, particip := range m.Part{
var check meeting
check1 := true
check2 := true
check3 := true
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lte", m.Start}}, "end": bson.D{{"$gt", m.Start}}, "participants.email": particip.Email}).Decode(&check); err!=nil{
check1 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lt", m.End}}, "end": bson.D{{"$gte",m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check2 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$gte", m.Start}}, "end": bson.D{{"$lte", m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check3 = false
}
if check1 || check2 || check3 {
final_check =true
}
}
if final_check{
invalid_request(w, 400, "Meeting clashes with other meeting/s with some common participant/s")
}else{
insertResult, err := meetingCollection.InsertOne(ctx, m)
if err != nil {
log.Fatal(err)
return
}
//write back meeting id as JSON response
w.Header().Set("Content-Type", "application/json")
meet := new_meet{
Meet_ID: insertResult.InsertedID.(primitive.ObjectID).Hex()}
json.NewEncoder(w).Encode(meet)
}
}
}
//if method is GET
case "GET":
keys := r.URL.Query()
//cases to allow only valid queries
switch len(keys){
//no query string error
case 0:invalid_request(w, 400, "Not a valid query at this end point")
case 1:
//extract participant email
if email, ok := keys["participant"]; !ok || len(email[0])<1{
invalid_request(w, 400, "Not a valid query at this end point")
}else {
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //collection meetings
if len(email)>1{
invalid_request(w, 400, "Only one participant can be queried at a time")
return
}
//query the collection for the mail id
cursor, err := meetingCollection.Find(ctx, bson.M{"participants.email":bson.M{"$eq":email[0]}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//write back all his/her meetings as an array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
case 2:
start, okStart := keys["start"]
end, okEnd := keys["end"]
//check both start and end time are provided, else error
if !okStart || !okEnd {invalid_request(w, 400, "Not a valid query at this end point")
}else{
start_time := start[0]
end_time := end[0]
// fmt.Println(start_time, end_time)
start_tim, err := time.Parse(time.RFC3339, start_time)
//check if the time format is valid
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format- YY-MM-DDTHH-MM-SSZ")
return
}
end_tim, err := time.Parse(time.RFC3339, end_time)
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format - YY-MM-DDTHH-MM-SSZ")
return
}
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
//query the DB for the time window
cursor, err := meetingCollection.Find(ctx, bson.M{"start": bson.D{{"$gt", start_tim}}, "end": bson.D{{"$lt", end_tim}}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//return all such meetings as array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
default:invalid_request(w, 400, "Not a valid query at this end point")
}
//disallow any other method
default:invalid_request(w, 403, "Not a valid method at this end point")
}
}
//handler for meeting/ root
func meeting_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
case "GET":
//extract meeting id from url
if meet_id := r.URL.Path[len("/meeting/"):]; len(meet_id)==0{
invalid_request(w, 400, "Not a valid Meeting ID")
}else{
//check forvalid id
id, | meets_handler | identifier_name |
main.go | }
//meeting schema
type meeting struct{
Id primitive.ObjectID `bson:"_id"`
Title string `json:"Title" bson:"title"`
Part []participant `json:"Participants" bson:"participants" `
Start time.Time `json:"Start Time" bson:"start" `
End time.Time `json:"End Time" bson:"end"`
Stamp time.Time `bson:"stamp"`
}
//schema for results of conditional meetings
type conditional_meets struct{
Meetings []meeting `json:"meetings"`
}
//invalid request response writer function
func invalid_request(w http.ResponseWriter, statCode int, message string){
w.Header().Set("Content-Type", "application/json")
switch statCode {
case 400: w.WriteHeader(http.StatusBadRequest)
case 403: w.WriteHeader(http.StatusForbidden)
case 404: w.WriteHeader(http.StatusNotFound)
default: w.WriteHeader(http.StatusNotFound)
}
err := Error {
StatusCode: statCode,
ErrorMessage: message}
json.NewEncoder(w).Encode(err)
}
//helper function to coneect to DB
func connectdb(ctx context.Context) (*mongo.Collection){
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017"))
if err != nil {
log.Fatal(err)
}
err = client.Connect(ctx)
if err != nil {
log.Fatal(err)
}
appointyDatabase := client.Database("appointy-task-ritvix")
meetingCollection := appointyDatabase.Collection("meetings")
//returns collection object
return meetingCollection
}
func main(){
fmt.Println("Server is up")
http.HandleFunc("/meetings" , meets_handler) // handler for /meetings end point
http.HandleFunc("/meeting/" , meeting_handler) // handler for rooted /meeting/
fmt.Println(http.ListenAndServe(":8082", nil)); // listen to port 8082
}
//handle requests at /meetings
func meets_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
//if method is POST
case "POST":
//disallow query strings with POST method
if keys := r.URL.Query(); len(keys)!=0{
invalid_request(w, 400, "Queries not allowed at this endpoint with this method")
}else{
//error handling if request not JSON
if ua := r.Header.Get("Content-Type"); ua!="application/json"{
invalid_request(w, 400, "This end point accepts only JSON request body")
}else{
var m meeting
dec := json.NewDecoder(r.Body)
dec.DisallowUnknownFields()
err := dec.Decode(&m)
//error if meeting details are not in right format
if err != nil {
invalid_request(w, 400, "Please recheck the meeting information")
return
}
m.Stamp = time.Now() //assign Creation stamp
m.Id = primitive.NewObjectID() //assign unique ObjectID
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //meeting collection
//check for overlap of participants
final_check := false
//iterate over al participants and find clashes is db
for _, particip := range m.Part{
var check meeting
check1 := true
check2 := true
check3 := true
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lte", m.Start}}, "end": bson.D{{"$gt", m.Start}}, "participants.email": particip.Email}).Decode(&check); err!=nil{
check1 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lt", m.End}}, "end": bson.D{{"$gte",m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check2 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$gte", m.Start}}, "end": bson.D{{"$lte", m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check3 = false
}
if check1 || check2 || check3 {
final_check =true
}
}
if final_check{
invalid_request(w, 400, "Meeting clashes with other meeting/s with some common participant/s")
}else{
insertResult, err := meetingCollection.InsertOne(ctx, m)
if err != nil {
log.Fatal(err)
return
}
//write back meeting id as JSON response
w.Header().Set("Content-Type", "application/json")
meet := new_meet{
Meet_ID: insertResult.InsertedID.(primitive.ObjectID).Hex()}
json.NewEncoder(w).Encode(meet)
}
}
}
//if method is GET
case "GET":
keys := r.URL.Query()
//cases to allow only valid queries
switch len(keys){
//no query string error
case 0:invalid_request(w, 400, "Not a valid query at this end point")
case 1:
//extract participant email
if email, ok := keys["participant"]; !ok || len(email[0])<1{
invalid_request(w, 400, "Not a valid query at this end point")
}else {
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //collection meetings
if len(email)>1{
invalid_request(w, 400, "Only one participant can be queried at a time")
return
}
//query the collection for the mail id
cursor, err := meetingCollection.Find(ctx, bson.M{"participants.email":bson.M{"$eq":email[0]}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//write back all his/her meetings as an array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
case 2:
start, okStart := keys["start"]
end, okEnd := keys["end"]
//check both start and end time are provided, else error
if !okStart || !okEnd {invalid_request(w, 400, "Not a valid query at this end point")
}else{
start_time := start[0]
end_time := end[0]
// fmt.Println(start_time, end_time)
start_tim, err := time.Parse(time.RFC3339, start_time)
//check if the time format is valid
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format- YY-MM-DDTHH-MM-SSZ")
return
}
end_tim, err := time.Parse(time.RFC3339, end_time)
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format - YY-MM-DDTHH-MM-SSZ")
return
}
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
//query the DB for the time window
cursor, err := meetingCollection.Find(ctx, bson.M{"start": bson.D{{"$gt", start_tim}}, "end": bson.D{{"$lt", end_tim}}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//return all such meetings as array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
default:invalid_request(w, 400, "Not a valid query at this end point")
}
//disallow any other method
default:invalid_request(w, 403, "Not a valid method at this end point")
}
}
//handler for meeting/ root
func meeting_handler(w http.ResponseWriter, r *http.Request) | {
switch r.Method{
case "GET":
//extract meeting id from url
if meet_id := r.URL.Path[len("/meeting/"):]; len(meet_id)==0{
invalid_request(w, 400, "Not a valid Meeting ID")
}else{
//check forvalid id
id, err := primitive.ObjectIDFromHex(meet_id)
if err!=nil{
invalid_request(w, 400, "Not a valid Meeting ID")
return
}
var meet meeting
filter := bson.M{"_id": id}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
err = meetingCollection.FindOne(ctx, filter).Decode(&meet)
if err != nil { | identifier_body | |
main.go | /primitive"
)
//error class
type Error struct{
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
}
//new meeting
type new_meet struct{
Meet_ID string `json:"Id"`
}
//participant shema
type participant struct{
Name string `json:"Name" bson:"name"`
Email string `json:"Email" bson:"email"`
RSVP string `json:"RSVP" bson:"rsvp"`
}
//meeting schema
type meeting struct{
Id primitive.ObjectID `bson:"_id"`
Title string `json:"Title" bson:"title"`
Part []participant `json:"Participants" bson:"participants" `
Start time.Time `json:"Start Time" bson:"start" `
End time.Time `json:"End Time" bson:"end"`
Stamp time.Time `bson:"stamp"`
}
//schema for results of conditional meetings
type conditional_meets struct{
Meetings []meeting `json:"meetings"`
}
//invalid request response writer function
func invalid_request(w http.ResponseWriter, statCode int, message string){
w.Header().Set("Content-Type", "application/json")
switch statCode {
case 400: w.WriteHeader(http.StatusBadRequest)
case 403: w.WriteHeader(http.StatusForbidden)
case 404: w.WriteHeader(http.StatusNotFound)
default: w.WriteHeader(http.StatusNotFound)
}
err := Error {
StatusCode: statCode,
ErrorMessage: message}
json.NewEncoder(w).Encode(err)
}
//helper function to coneect to DB
func connectdb(ctx context.Context) (*mongo.Collection){
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017"))
if err != nil {
log.Fatal(err)
}
err = client.Connect(ctx)
if err != nil {
log.Fatal(err)
}
appointyDatabase := client.Database("appointy-task-ritvix")
meetingCollection := appointyDatabase.Collection("meetings")
//returns collection object
return meetingCollection
}
func main(){
fmt.Println("Server is up")
http.HandleFunc("/meetings" , meets_handler) // handler for /meetings end point
http.HandleFunc("/meeting/" , meeting_handler) // handler for rooted /meeting/
fmt.Println(http.ListenAndServe(":8082", nil)); // listen to port 8082
}
//handle requests at /meetings
func meets_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
//if method is POST
case "POST":
//disallow query strings with POST method
if keys := r.URL.Query(); len(keys)!=0{
invalid_request(w, 400, "Queries not allowed at this endpoint with this method")
}else{
//error handling if request not JSON
if ua := r.Header.Get("Content-Type"); ua!="application/json"{
invalid_request(w, 400, "This end point accepts only JSON request body")
}else{
var m meeting
dec := json.NewDecoder(r.Body)
dec.DisallowUnknownFields()
err := dec.Decode(&m)
//error if meeting details are not in right format
if err != nil {
invalid_request(w, 400, "Please recheck the meeting information")
return
}
m.Stamp = time.Now() //assign Creation stamp
m.Id = primitive.NewObjectID() //assign unique ObjectID
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //meeting collection
//check for overlap of participants
final_check := false
//iterate over al participants and find clashes is db
for _, particip := range m.Part{
var check meeting
check1 := true
check2 := true
check3 := true
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lte", m.Start}}, "end": bson.D{{"$gt", m.Start}}, "participants.email": particip.Email}).Decode(&check); err!=nil{
check1 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lt", m.End}}, "end": bson.D{{"$gte",m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check2 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$gte", m.Start}}, "end": bson.D{{"$lte", m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil |
if check1 || check2 || check3 {
final_check =true
}
}
if final_check{
invalid_request(w, 400, "Meeting clashes with other meeting/s with some common participant/s")
}else{
insertResult, err := meetingCollection.InsertOne(ctx, m)
if err != nil {
log.Fatal(err)
return
}
//write back meeting id as JSON response
w.Header().Set("Content-Type", "application/json")
meet := new_meet{
Meet_ID: insertResult.InsertedID.(primitive.ObjectID).Hex()}
json.NewEncoder(w).Encode(meet)
}
}
}
//if method is GET
case "GET":
keys := r.URL.Query()
//cases to allow only valid queries
switch len(keys){
//no query string error
case 0:invalid_request(w, 400, "Not a valid query at this end point")
case 1:
//extract participant email
if email, ok := keys["participant"]; !ok || len(email[0])<1{
invalid_request(w, 400, "Not a valid query at this end point")
}else {
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //collection meetings
if len(email)>1{
invalid_request(w, 400, "Only one participant can be queried at a time")
return
}
//query the collection for the mail id
cursor, err := meetingCollection.Find(ctx, bson.M{"participants.email":bson.M{"$eq":email[0]}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//write back all his/her meetings as an array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
case 2:
start, okStart := keys["start"]
end, okEnd := keys["end"]
//check both start and end time are provided, else error
if !okStart || !okEnd {invalid_request(w, 400, "Not a valid query at this end point")
}else{
start_time := start[0]
end_time := end[0]
// fmt.Println(start_time, end_time)
start_tim, err := time.Parse(time.RFC3339, start_time)
//check if the time format is valid
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format- YY-MM-DDTHH-MM-SSZ")
return
}
end_tim, err := time.Parse(time.RFC3339, end_time)
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format - YY-MM-DDTHH-MM-SSZ")
return
}
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
//query the DB for the time window
cursor, err := meetingCollection.Find(ctx, bson.M{"start": bson.D{{"$gt", start_tim}}, "end": bson.D{{"$lt", end_tim}}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//return all such meetings as array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
default:invalid_request(w, 400, "Not a valid query at this end point")
}
//disallow any other method
default:invalid_request(w, 403, "Not a valid method at this end point")
}
}
//handler for meeting/ root
func meeting_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
case "GET":
//extract meeting id from url
if meet_id := r.URL.Path[len("/meeting/"):]; len(meet_id)==0{
invalid_request(w, 400, "Not a valid Meeting ID")
}else{
//check forvalid id
id, err | {
check3 = false
} | conditional_block |
main.go | /primitive"
)
//error class
type Error struct{
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
}
//new meeting
type new_meet struct{
Meet_ID string `json:"Id"`
}
//participant shema
type participant struct{
Name string `json:"Name" bson:"name"`
Email string `json:"Email" bson:"email"`
RSVP string `json:"RSVP" bson:"rsvp"`
}
//meeting schema
type meeting struct{
Id primitive.ObjectID `bson:"_id"`
Title string `json:"Title" bson:"title"`
Part []participant `json:"Participants" bson:"participants" `
Start time.Time `json:"Start Time" bson:"start" `
End time.Time `json:"End Time" bson:"end"`
Stamp time.Time `bson:"stamp"`
}
//schema for results of conditional meetings
type conditional_meets struct{
Meetings []meeting `json:"meetings"`
}
//invalid request response writer function
func invalid_request(w http.ResponseWriter, statCode int, message string){
w.Header().Set("Content-Type", "application/json")
switch statCode {
case 400: w.WriteHeader(http.StatusBadRequest)
case 403: w.WriteHeader(http.StatusForbidden)
case 404: w.WriteHeader(http.StatusNotFound)
default: w.WriteHeader(http.StatusNotFound)
}
err := Error {
StatusCode: statCode,
ErrorMessage: message}
json.NewEncoder(w).Encode(err)
}
//helper function to coneect to DB
func connectdb(ctx context.Context) (*mongo.Collection){
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017"))
if err != nil {
log.Fatal(err)
}
err = client.Connect(ctx)
if err != nil {
log.Fatal(err)
}
appointyDatabase := client.Database("appointy-task-ritvix")
meetingCollection := appointyDatabase.Collection("meetings")
//returns collection object
return meetingCollection
}
func main(){
fmt.Println("Server is up")
http.HandleFunc("/meetings" , meets_handler) // handler for /meetings end point
http.HandleFunc("/meeting/" , meeting_handler) // handler for rooted /meeting/
fmt.Println(http.ListenAndServe(":8082", nil)); // listen to port 8082
}
//handle requests at /meetings
func meets_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
//if method is POST
case "POST":
//disallow query strings with POST method
if keys := r.URL.Query(); len(keys)!=0{
invalid_request(w, 400, "Queries not allowed at this endpoint with this method")
}else{
//error handling if request not JSON
if ua := r.Header.Get("Content-Type"); ua!="application/json"{
invalid_request(w, 400, "This end point accepts only JSON request body")
}else{
var m meeting
dec := json.NewDecoder(r.Body)
dec.DisallowUnknownFields()
err := dec.Decode(&m)
//error if meeting details are not in right format
if err != nil {
invalid_request(w, 400, "Please recheck the meeting information")
return
}
m.Stamp = time.Now() //assign Creation stamp
m.Id = primitive.NewObjectID() //assign unique ObjectID
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //meeting collection
//check for overlap of participants
final_check := false
//iterate over al participants and find clashes is db
for _, particip := range m.Part{
var check meeting
check1 := true
check2 := true
check3 := true
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lte", m.Start}}, "end": bson.D{{"$gt", m.Start}}, "participants.email": particip.Email}).Decode(&check); err!=nil{
check1 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$lt", m.End}}, "end": bson.D{{"$gte",m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check2 = false
}
if err = meetingCollection.FindOne(ctx, bson.M{"start": bson.D{{"$gte", m.Start}}, "end": bson.D{{"$lte", m.End}}, "participants.email":particip.Email}).Decode(&check); err!=nil{
check3 = false
}
if check1 || check2 || check3 {
final_check =true
}
}
if final_check{
invalid_request(w, 400, "Meeting clashes with other meeting/s with some common participant/s")
}else{
insertResult, err := meetingCollection.InsertOne(ctx, m)
if err != nil {
log.Fatal(err)
return
}
//write back meeting id as JSON response
w.Header().Set("Content-Type", "application/json")
meet := new_meet{
Meet_ID: insertResult.InsertedID.(primitive.ObjectID).Hex()}
json.NewEncoder(w).Encode(meet)
}
}
}
//if method is GET
case "GET":
keys := r.URL.Query()
//cases to allow only valid queries
switch len(keys){
//no query string error
case 0:invalid_request(w, 400, "Not a valid query at this end point")
case 1:
//extract participant email
if email, ok := keys["participant"]; !ok || len(email[0])<1{
invalid_request(w, 400, "Not a valid query at this end point")
}else {
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) //timeout
meetingCollection := connectdb(ctx) //collection meetings
if len(email)>1{
invalid_request(w, 400, "Only one participant can be queried at a time")
return
}
//query the collection for the mail id
cursor, err := meetingCollection.Find(ctx, bson.M{"participants.email":bson.M{"$eq":email[0]}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//write back all his/her meetings as an array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json") | case 2:
start, okStart := keys["start"]
end, okEnd := keys["end"]
//check both start and end time are provided, else error
if !okStart || !okEnd {invalid_request(w, 400, "Not a valid query at this end point")
}else{
start_time := start[0]
end_time := end[0]
// fmt.Println(start_time, end_time)
start_tim, err := time.Parse(time.RFC3339, start_time)
//check if the time format is valid
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format- YY-MM-DDTHH-MM-SSZ")
return
}
end_tim, err := time.Parse(time.RFC3339, end_time)
if err!=nil{
invalid_request(w, 400, "Please enter date and time in RFC3339 format - YY-MM-DDTHH-MM-SSZ")
return
}
var meets []meeting
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
meetingCollection := connectdb(ctx)
//query the DB for the time window
cursor, err := meetingCollection.Find(ctx, bson.M{"start": bson.D{{"$gt", start_tim}}, "end": bson.D{{"$lt", end_tim}}})
if err != nil {
log.Fatal(err)
return
}
if err = cursor.All(ctx, &meets); err != nil {
log.Fatal(err)
return
}
//return all such meetings as array
my_meets := conditional_meets{
Meetings: meets}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(my_meets)
}
default:invalid_request(w, 400, "Not a valid query at this end point")
}
//disallow any other method
default:invalid_request(w, 403, "Not a valid method at this end point")
}
}
//handler for meeting/ root
func meeting_handler(w http.ResponseWriter, r *http.Request){
switch r.Method{
case "GET":
//extract meeting id from url
if meet_id := r.URL.Path[len("/meeting/"):]; len(meet_id)==0{
invalid_request(w, 400, "Not a valid Meeting ID")
}else{
//check forvalid id
id, err := | json.NewEncoder(w).Encode(my_meets)
} | random_line_split |
column_chunk.go | information and metadata for a given column chunk
// and it's associated Column
type ColumnChunkMetaData struct {
column *format.ColumnChunk
columnMeta *format.ColumnMetaData
decryptedMeta format.ColumnMetaData
descr *schema.Column
writerVersion *AppVersion
encodings []parquet.Encoding
encodingStats []format.PageEncodingStats
possibleStats TypedStatistics
mem memory.Allocator
}
// NewColumnChunkMetaData creates an instance of the metadata from a column chunk and descriptor
//
// this is primarily used internally or between the subpackages. ColumnChunkMetaDataBuilder should
// be used by consumers instead of using this directly.
func NewColumnChunkMetaData(column *format.ColumnChunk, descr *schema.Column, writerVersion *AppVersion, rowGroupOrdinal, columnOrdinal int16, fileDecryptor encryption.FileDecryptor) (*ColumnChunkMetaData, error) {
c := &ColumnChunkMetaData{
column: column,
columnMeta: column.GetMetaData(),
descr: descr,
writerVersion: writerVersion,
mem: memory.DefaultAllocator,
}
if column.IsSetCryptoMetadata() {
ccmd := column.CryptoMetadata
if ccmd.IsSetENCRYPTION_WITH_COLUMN_KEY() {
if fileDecryptor != nil && fileDecryptor.Properties() != nil | else {
return nil, xerrors.New("cannot decrypt column metadata. file decryption not setup correctly")
}
}
}
for _, enc := range c.columnMeta.Encodings {
c.encodings = append(c.encodings, parquet.Encoding(enc))
}
for _, enc := range c.columnMeta.EncodingStats {
c.encodingStats = append(c.encodingStats, *enc)
}
return c, nil
}
// CryptoMetadata returns the cryptographic metadata for how this column was
// encrypted and how to decrypt it.
func (c *ColumnChunkMetaData) CryptoMetadata() *format.ColumnCryptoMetaData {
return c.column.GetCryptoMetadata()
}
// FileOffset is the location in the file where the column data begins
func (c *ColumnChunkMetaData) FileOffset() int64 { return c.column.FileOffset }
// FilePath gives the name of the parquet file if provided in the metadata
func (c *ColumnChunkMetaData) FilePath() string { return c.column.GetFilePath() }
// Type is the physical storage type used in the parquet file for this column chunk.
func (c *ColumnChunkMetaData) Type() parquet.Type { return parquet.Type(c.columnMeta.Type) }
// NumValues is the number of values stored in just this chunk including nulls.
func (c *ColumnChunkMetaData) NumValues() int64 { return c.columnMeta.NumValues }
// PathInSchema is the full path to this column from the root of the schema including
// any nested columns
func (c *ColumnChunkMetaData) PathInSchema() parquet.ColumnPath {
return c.columnMeta.GetPathInSchema()
}
// Compression provides the type of compression used for this particular chunk.
func (c *ColumnChunkMetaData) Compression() compress.Compression {
return compress.Compression(c.columnMeta.Codec)
}
// Encodings returns the list of different encodings used in this chunk
func (c *ColumnChunkMetaData) Encodings() []parquet.Encoding { return c.encodings }
// EncodingStats connects the order of encodings based on the list of pages and types
func (c *ColumnChunkMetaData) EncodingStats() []PageEncodingStats {
ret := make([]PageEncodingStats, len(c.encodingStats))
for idx := range ret {
ret[idx].Encoding = parquet.Encoding(c.encodingStats[idx].Encoding)
ret[idx].PageType = c.encodingStats[idx].PageType
}
return ret
}
// HasDictionaryPage returns true if there is a dictionary page offset set in
// this metadata.
func (c *ColumnChunkMetaData) HasDictionaryPage() bool {
return c.columnMeta.IsSetDictionaryPageOffset()
}
// DictionaryPageOffset returns the location in the file where the dictionary page starts
func (c *ColumnChunkMetaData) DictionaryPageOffset() int64 {
return c.columnMeta.GetDictionaryPageOffset()
}
// DataPageOffset returns the location in the file where the data pages begin for this column
func (c *ColumnChunkMetaData) DataPageOffset() int64 { return c.columnMeta.GetDataPageOffset() }
// HasIndexPage returns true if the offset for the index page is set in the metadata
func (c *ColumnChunkMetaData) HasIndexPage() bool { return c.columnMeta.IsSetIndexPageOffset() }
// IndexPageOffset is the location in the file where the index page starts.
func (c *ColumnChunkMetaData) IndexPageOffset() int64 { return c.columnMeta.GetIndexPageOffset() }
// TotalCompressedSize will be equal to TotalUncompressedSize if the data is not compressed.
// Otherwise this will be the size of the actual data in the file.
func (c *ColumnChunkMetaData) TotalCompressedSize() int64 {
return c.columnMeta.GetTotalCompressedSize()
}
// TotalUncompressedSize is the total size of the raw data after uncompressing the chunk
func (c *ColumnChunkMetaData) TotalUncompressedSize() int64 {
return c.columnMeta.GetTotalUncompressedSize()
}
// BloomFilterOffset is the byte offset from the beginning of the file to the bloom
// filter data.
func (c *ColumnChunkMetaData) BloomFilterOffset() int64 {
return c.columnMeta.GetBloomFilterOffset()
}
// StatsSet returns true only if there are statistics set in the metadata and the column
// descriptor has a sort order that is not SortUnknown
//
// It also checks the writer version to ensure that it was not written by a version
// of parquet which is known to have incorrect stat computations.
func (c *ColumnChunkMetaData) StatsSet() (bool, error) {
if !c.columnMeta.IsSetStatistics() || c.descr.SortOrder() == schema.SortUNKNOWN {
return false, nil
}
if c.possibleStats == nil {
c.possibleStats = makeColumnStats(c.columnMeta, c.descr, c.mem)
}
encoded, err := c.possibleStats.Encode()
if err != nil {
return false, err
}
return c.writerVersion.HasCorrectStatistics(c.Type(), c.descr.LogicalType(), encoded, c.descr.SortOrder()), nil
}
func (c *ColumnChunkMetaData) Equals(other *ColumnChunkMetaData) bool {
return reflect.DeepEqual(c.columnMeta, other.columnMeta)
}
// Statistics can return nil if there are no stats in this metadata
func (c *ColumnChunkMetaData) Statistics() (TypedStatistics, error) {
ok, err := c.StatsSet()
if err != nil {
return nil, err
}
if ok {
return c.possibleStats, nil
}
return nil, nil
}
// ColumnChunkMetaDataBuilder is used during writing to construct metadata
// for a given column chunk while writing, providing a proxy around constructing
// the actual thrift object.
type ColumnChunkMetaDataBuilder struct {
chunk *format.ColumnChunk
props *parquet.WriterProperties
column *schema.Column
compressedSize int64
}
func NewColumnChunkMetaDataBuilder(props *parquet.WriterProperties, column *schema.Column) *ColumnChunkMetaDataBuilder {
return NewColumnChunkMetaDataBuilderWithContents(props, column, format.NewColumnChunk())
}
// NewColumnChunkMetaDataBuilderWithContents will construct a builder and start it with the provided
// column chunk information rather than with an empty column chunk.
func NewColumnChunkMetaDataBuilderWithContents(props *parquet.WriterProperties, column *schema.Column, chunk *format.ColumnChunk) *ColumnChunkMetaDataBuilder {
b := &ColumnChunkMetaDataBuilder{
props: props,
column: column,
chunk: chunk,
}
b.init(chunk)
return b
}
// Contents returns the underlying thrift ColumnChunk object so that it can be used
// for constructing or duplicating column metadata
func (c *ColumnChunkMetaDataBuilder) Contents() *format.ColumnChunk { return c.chunk }
func (c *ColumnChunkMetaDataBuilder) init(chunk *format.ColumnChunk) {
c.chunk = chunk
if !c.chunk.IsSetMetaData() {
c.chunk.MetaData = format.NewColumnMetaData()
}
c.chunk.MetaData.Type = format.Type(c.column.PhysicalType())
c.chunk.MetaData.PathInSchema = schema.ColumnPathFromNode(c.column.SchemaNode())
c.chunk.MetaData.Codec = format.CompressionCodec(c.props.CompressionFor(c.column.Path()))
}
func (c *ColumnChunkMetaDataBuilder) SetFilePath(val string) {
c.chunk.FilePath = &val
}
// Descr returns the associated column descriptor for this column chunk
func (c *ColumnChunkMetaDataBuilder) Descr() *schema.Column { return c.column }
func (c *ColumnChunkMetaDataBuilder) TotalCompressedSize() int64 {
// if this column is encrypted, after Finish is called, the MetaData
// field is set to nil and we store the compressed size so return that | {
// should decrypt metadata
path := parquet.ColumnPath(ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetPathInSchema())
keyMetadata := ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetKeyMetadata()
aadColumnMetadata := encryption.CreateModuleAad(fileDecryptor.FileAad(), encryption.ColumnMetaModule, rowGroupOrdinal, columnOrdinal, -1)
decryptor := fileDecryptor.GetColumnMetaDecryptor(path.String(), string(keyMetadata), aadColumnMetadata)
thrift.DeserializeThrift(&c.decryptedMeta, decryptor.Decrypt(column.GetEncryptedColumnMetadata()))
c.columnMeta = &c.decryptedMeta
} | conditional_block |
column_chunk.go | information and metadata for a given column chunk
// and it's associated Column
type ColumnChunkMetaData struct {
column *format.ColumnChunk
columnMeta *format.ColumnMetaData
decryptedMeta format.ColumnMetaData
descr *schema.Column
writerVersion *AppVersion
encodings []parquet.Encoding
encodingStats []format.PageEncodingStats
possibleStats TypedStatistics
mem memory.Allocator
}
// NewColumnChunkMetaData creates an instance of the metadata from a column chunk and descriptor
//
// this is primarily used internally or between the subpackages. ColumnChunkMetaDataBuilder should
// be used by consumers instead of using this directly.
func NewColumnChunkMetaData(column *format.ColumnChunk, descr *schema.Column, writerVersion *AppVersion, rowGroupOrdinal, columnOrdinal int16, fileDecryptor encryption.FileDecryptor) (*ColumnChunkMetaData, error) {
c := &ColumnChunkMetaData{
column: column,
columnMeta: column.GetMetaData(),
descr: descr,
writerVersion: writerVersion,
mem: memory.DefaultAllocator,
}
if column.IsSetCryptoMetadata() {
ccmd := column.CryptoMetadata
if ccmd.IsSetENCRYPTION_WITH_COLUMN_KEY() {
if fileDecryptor != nil && fileDecryptor.Properties() != nil {
// should decrypt metadata
path := parquet.ColumnPath(ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetPathInSchema())
keyMetadata := ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetKeyMetadata()
aadColumnMetadata := encryption.CreateModuleAad(fileDecryptor.FileAad(), encryption.ColumnMetaModule, rowGroupOrdinal, columnOrdinal, -1)
decryptor := fileDecryptor.GetColumnMetaDecryptor(path.String(), string(keyMetadata), aadColumnMetadata)
thrift.DeserializeThrift(&c.decryptedMeta, decryptor.Decrypt(column.GetEncryptedColumnMetadata()))
c.columnMeta = &c.decryptedMeta
} else {
return nil, xerrors.New("cannot decrypt column metadata. file decryption not setup correctly")
}
}
}
for _, enc := range c.columnMeta.Encodings {
c.encodings = append(c.encodings, parquet.Encoding(enc))
}
for _, enc := range c.columnMeta.EncodingStats {
c.encodingStats = append(c.encodingStats, *enc)
}
return c, nil
}
// CryptoMetadata returns the cryptographic metadata for how this column was
// encrypted and how to decrypt it.
func (c *ColumnChunkMetaData) CryptoMetadata() *format.ColumnCryptoMetaData {
return c.column.GetCryptoMetadata()
}
// FileOffset is the location in the file where the column data begins
func (c *ColumnChunkMetaData) FileOffset() int64 { return c.column.FileOffset }
// FilePath gives the name of the parquet file if provided in the metadata
func (c *ColumnChunkMetaData) FilePath() string { return c.column.GetFilePath() }
// Type is the physical storage type used in the parquet file for this column chunk.
func (c *ColumnChunkMetaData) Type() parquet.Type { return parquet.Type(c.columnMeta.Type) }
// NumValues is the number of values stored in just this chunk including nulls.
func (c *ColumnChunkMetaData) NumValues() int64 { return c.columnMeta.NumValues }
// PathInSchema is the full path to this column from the root of the schema including
// any nested columns
func (c *ColumnChunkMetaData) | () parquet.ColumnPath {
return c.columnMeta.GetPathInSchema()
}
// Compression provides the type of compression used for this particular chunk.
func (c *ColumnChunkMetaData) Compression() compress.Compression {
return compress.Compression(c.columnMeta.Codec)
}
// Encodings returns the list of different encodings used in this chunk
func (c *ColumnChunkMetaData) Encodings() []parquet.Encoding { return c.encodings }
// EncodingStats connects the order of encodings based on the list of pages and types
func (c *ColumnChunkMetaData) EncodingStats() []PageEncodingStats {
ret := make([]PageEncodingStats, len(c.encodingStats))
for idx := range ret {
ret[idx].Encoding = parquet.Encoding(c.encodingStats[idx].Encoding)
ret[idx].PageType = c.encodingStats[idx].PageType
}
return ret
}
// HasDictionaryPage returns true if there is a dictionary page offset set in
// this metadata.
func (c *ColumnChunkMetaData) HasDictionaryPage() bool {
return c.columnMeta.IsSetDictionaryPageOffset()
}
// DictionaryPageOffset returns the location in the file where the dictionary page starts
func (c *ColumnChunkMetaData) DictionaryPageOffset() int64 {
return c.columnMeta.GetDictionaryPageOffset()
}
// DataPageOffset returns the location in the file where the data pages begin for this column
func (c *ColumnChunkMetaData) DataPageOffset() int64 { return c.columnMeta.GetDataPageOffset() }
// HasIndexPage returns true if the offset for the index page is set in the metadata
func (c *ColumnChunkMetaData) HasIndexPage() bool { return c.columnMeta.IsSetIndexPageOffset() }
// IndexPageOffset is the location in the file where the index page starts.
func (c *ColumnChunkMetaData) IndexPageOffset() int64 { return c.columnMeta.GetIndexPageOffset() }
// TotalCompressedSize will be equal to TotalUncompressedSize if the data is not compressed.
// Otherwise this will be the size of the actual data in the file.
func (c *ColumnChunkMetaData) TotalCompressedSize() int64 {
return c.columnMeta.GetTotalCompressedSize()
}
// TotalUncompressedSize is the total size of the raw data after uncompressing the chunk
func (c *ColumnChunkMetaData) TotalUncompressedSize() int64 {
return c.columnMeta.GetTotalUncompressedSize()
}
// BloomFilterOffset is the byte offset from the beginning of the file to the bloom
// filter data.
func (c *ColumnChunkMetaData) BloomFilterOffset() int64 {
return c.columnMeta.GetBloomFilterOffset()
}
// StatsSet returns true only if there are statistics set in the metadata and the column
// descriptor has a sort order that is not SortUnknown
//
// It also checks the writer version to ensure that it was not written by a version
// of parquet which is known to have incorrect stat computations.
func (c *ColumnChunkMetaData) StatsSet() (bool, error) {
if !c.columnMeta.IsSetStatistics() || c.descr.SortOrder() == schema.SortUNKNOWN {
return false, nil
}
if c.possibleStats == nil {
c.possibleStats = makeColumnStats(c.columnMeta, c.descr, c.mem)
}
encoded, err := c.possibleStats.Encode()
if err != nil {
return false, err
}
return c.writerVersion.HasCorrectStatistics(c.Type(), c.descr.LogicalType(), encoded, c.descr.SortOrder()), nil
}
func (c *ColumnChunkMetaData) Equals(other *ColumnChunkMetaData) bool {
return reflect.DeepEqual(c.columnMeta, other.columnMeta)
}
// Statistics can return nil if there are no stats in this metadata
func (c *ColumnChunkMetaData) Statistics() (TypedStatistics, error) {
ok, err := c.StatsSet()
if err != nil {
return nil, err
}
if ok {
return c.possibleStats, nil
}
return nil, nil
}
// ColumnChunkMetaDataBuilder is used during writing to construct metadata
// for a given column chunk while writing, providing a proxy around constructing
// the actual thrift object.
type ColumnChunkMetaDataBuilder struct {
chunk *format.ColumnChunk
props *parquet.WriterProperties
column *schema.Column
compressedSize int64
}
func NewColumnChunkMetaDataBuilder(props *parquet.WriterProperties, column *schema.Column) *ColumnChunkMetaDataBuilder {
return NewColumnChunkMetaDataBuilderWithContents(props, column, format.NewColumnChunk())
}
// NewColumnChunkMetaDataBuilderWithContents will construct a builder and start it with the provided
// column chunk information rather than with an empty column chunk.
func NewColumnChunkMetaDataBuilderWithContents(props *parquet.WriterProperties, column *schema.Column, chunk *format.ColumnChunk) *ColumnChunkMetaDataBuilder {
b := &ColumnChunkMetaDataBuilder{
props: props,
column: column,
chunk: chunk,
}
b.init(chunk)
return b
}
// Contents returns the underlying thrift ColumnChunk object so that it can be used
// for constructing or duplicating column metadata
func (c *ColumnChunkMetaDataBuilder) Contents() *format.ColumnChunk { return c.chunk }
func (c *ColumnChunkMetaDataBuilder) init(chunk *format.ColumnChunk) {
c.chunk = chunk
if !c.chunk.IsSetMetaData() {
c.chunk.MetaData = format.NewColumnMetaData()
}
c.chunk.MetaData.Type = format.Type(c.column.PhysicalType())
c.chunk.MetaData.PathInSchema = schema.ColumnPathFromNode(c.column.SchemaNode())
c.chunk.MetaData.Codec = format.CompressionCodec(c.props.CompressionFor(c.column.Path()))
}
func (c *ColumnChunkMetaDataBuilder) SetFilePath(val string) {
c.chunk.FilePath = &val
}
// Descr returns the associated column descriptor for this column chunk
func (c *ColumnChunkMetaDataBuilder) Descr() *schema.Column { return c.column }
func (c *ColumnChunkMetaDataBuilder) TotalCompressedSize() int64 {
// if this column is encrypted, after Finish is called, the MetaData
// field is set to nil and we store the compressed size so return that | PathInSchema | identifier_name |
column_chunk.go | information and metadata for a given column chunk
// and it's associated Column
type ColumnChunkMetaData struct {
column *format.ColumnChunk
columnMeta *format.ColumnMetaData
decryptedMeta format.ColumnMetaData
descr *schema.Column
writerVersion *AppVersion
encodings []parquet.Encoding
encodingStats []format.PageEncodingStats
possibleStats TypedStatistics
mem memory.Allocator
}
// NewColumnChunkMetaData creates an instance of the metadata from a column chunk and descriptor
//
// this is primarily used internally or between the subpackages. ColumnChunkMetaDataBuilder should
// be used by consumers instead of using this directly.
func NewColumnChunkMetaData(column *format.ColumnChunk, descr *schema.Column, writerVersion *AppVersion, rowGroupOrdinal, columnOrdinal int16, fileDecryptor encryption.FileDecryptor) (*ColumnChunkMetaData, error) {
c := &ColumnChunkMetaData{
column: column,
columnMeta: column.GetMetaData(),
descr: descr,
writerVersion: writerVersion,
mem: memory.DefaultAllocator,
}
if column.IsSetCryptoMetadata() {
ccmd := column.CryptoMetadata
if ccmd.IsSetENCRYPTION_WITH_COLUMN_KEY() {
if fileDecryptor != nil && fileDecryptor.Properties() != nil {
// should decrypt metadata
path := parquet.ColumnPath(ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetPathInSchema())
keyMetadata := ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetKeyMetadata()
aadColumnMetadata := encryption.CreateModuleAad(fileDecryptor.FileAad(), encryption.ColumnMetaModule, rowGroupOrdinal, columnOrdinal, -1)
decryptor := fileDecryptor.GetColumnMetaDecryptor(path.String(), string(keyMetadata), aadColumnMetadata)
thrift.DeserializeThrift(&c.decryptedMeta, decryptor.Decrypt(column.GetEncryptedColumnMetadata()))
c.columnMeta = &c.decryptedMeta
} else {
return nil, xerrors.New("cannot decrypt column metadata. file decryption not setup correctly")
}
}
}
for _, enc := range c.columnMeta.Encodings {
c.encodings = append(c.encodings, parquet.Encoding(enc))
}
for _, enc := range c.columnMeta.EncodingStats {
c.encodingStats = append(c.encodingStats, *enc)
}
return c, nil
} | // encrypted and how to decrypt it.
func (c *ColumnChunkMetaData) CryptoMetadata() *format.ColumnCryptoMetaData {
return c.column.GetCryptoMetadata()
}
// FileOffset is the location in the file where the column data begins
func (c *ColumnChunkMetaData) FileOffset() int64 { return c.column.FileOffset }
// FilePath gives the name of the parquet file if provided in the metadata
func (c *ColumnChunkMetaData) FilePath() string { return c.column.GetFilePath() }
// Type is the physical storage type used in the parquet file for this column chunk.
func (c *ColumnChunkMetaData) Type() parquet.Type { return parquet.Type(c.columnMeta.Type) }
// NumValues is the number of values stored in just this chunk including nulls.
func (c *ColumnChunkMetaData) NumValues() int64 { return c.columnMeta.NumValues }
// PathInSchema is the full path to this column from the root of the schema including
// any nested columns
func (c *ColumnChunkMetaData) PathInSchema() parquet.ColumnPath {
return c.columnMeta.GetPathInSchema()
}
// Compression provides the type of compression used for this particular chunk.
func (c *ColumnChunkMetaData) Compression() compress.Compression {
return compress.Compression(c.columnMeta.Codec)
}
// Encodings returns the list of different encodings used in this chunk
func (c *ColumnChunkMetaData) Encodings() []parquet.Encoding { return c.encodings }
// EncodingStats connects the order of encodings based on the list of pages and types
func (c *ColumnChunkMetaData) EncodingStats() []PageEncodingStats {
ret := make([]PageEncodingStats, len(c.encodingStats))
for idx := range ret {
ret[idx].Encoding = parquet.Encoding(c.encodingStats[idx].Encoding)
ret[idx].PageType = c.encodingStats[idx].PageType
}
return ret
}
// HasDictionaryPage returns true if there is a dictionary page offset set in
// this metadata.
func (c *ColumnChunkMetaData) HasDictionaryPage() bool {
return c.columnMeta.IsSetDictionaryPageOffset()
}
// DictionaryPageOffset returns the location in the file where the dictionary page starts
func (c *ColumnChunkMetaData) DictionaryPageOffset() int64 {
return c.columnMeta.GetDictionaryPageOffset()
}
// DataPageOffset returns the location in the file where the data pages begin for this column
func (c *ColumnChunkMetaData) DataPageOffset() int64 { return c.columnMeta.GetDataPageOffset() }
// HasIndexPage returns true if the offset for the index page is set in the metadata
func (c *ColumnChunkMetaData) HasIndexPage() bool { return c.columnMeta.IsSetIndexPageOffset() }
// IndexPageOffset is the location in the file where the index page starts.
func (c *ColumnChunkMetaData) IndexPageOffset() int64 { return c.columnMeta.GetIndexPageOffset() }
// TotalCompressedSize will be equal to TotalUncompressedSize if the data is not compressed.
// Otherwise this will be the size of the actual data in the file.
func (c *ColumnChunkMetaData) TotalCompressedSize() int64 {
return c.columnMeta.GetTotalCompressedSize()
}
// TotalUncompressedSize is the total size of the raw data after uncompressing the chunk
func (c *ColumnChunkMetaData) TotalUncompressedSize() int64 {
return c.columnMeta.GetTotalUncompressedSize()
}
// BloomFilterOffset is the byte offset from the beginning of the file to the bloom
// filter data.
func (c *ColumnChunkMetaData) BloomFilterOffset() int64 {
return c.columnMeta.GetBloomFilterOffset()
}
// StatsSet returns true only if there are statistics set in the metadata and the column
// descriptor has a sort order that is not SortUnknown
//
// It also checks the writer version to ensure that it was not written by a version
// of parquet which is known to have incorrect stat computations.
func (c *ColumnChunkMetaData) StatsSet() (bool, error) {
if !c.columnMeta.IsSetStatistics() || c.descr.SortOrder() == schema.SortUNKNOWN {
return false, nil
}
if c.possibleStats == nil {
c.possibleStats = makeColumnStats(c.columnMeta, c.descr, c.mem)
}
encoded, err := c.possibleStats.Encode()
if err != nil {
return false, err
}
return c.writerVersion.HasCorrectStatistics(c.Type(), c.descr.LogicalType(), encoded, c.descr.SortOrder()), nil
}
func (c *ColumnChunkMetaData) Equals(other *ColumnChunkMetaData) bool {
return reflect.DeepEqual(c.columnMeta, other.columnMeta)
}
// Statistics can return nil if there are no stats in this metadata
func (c *ColumnChunkMetaData) Statistics() (TypedStatistics, error) {
ok, err := c.StatsSet()
if err != nil {
return nil, err
}
if ok {
return c.possibleStats, nil
}
return nil, nil
}
// ColumnChunkMetaDataBuilder is used during writing to construct metadata
// for a given column chunk while writing, providing a proxy around constructing
// the actual thrift object.
type ColumnChunkMetaDataBuilder struct {
chunk *format.ColumnChunk
props *parquet.WriterProperties
column *schema.Column
compressedSize int64
}
func NewColumnChunkMetaDataBuilder(props *parquet.WriterProperties, column *schema.Column) *ColumnChunkMetaDataBuilder {
return NewColumnChunkMetaDataBuilderWithContents(props, column, format.NewColumnChunk())
}
// NewColumnChunkMetaDataBuilderWithContents will construct a builder and start it with the provided
// column chunk information rather than with an empty column chunk.
func NewColumnChunkMetaDataBuilderWithContents(props *parquet.WriterProperties, column *schema.Column, chunk *format.ColumnChunk) *ColumnChunkMetaDataBuilder {
b := &ColumnChunkMetaDataBuilder{
props: props,
column: column,
chunk: chunk,
}
b.init(chunk)
return b
}
// Contents returns the underlying thrift ColumnChunk object so that it can be used
// for constructing or duplicating column metadata
func (c *ColumnChunkMetaDataBuilder) Contents() *format.ColumnChunk { return c.chunk }
func (c *ColumnChunkMetaDataBuilder) init(chunk *format.ColumnChunk) {
c.chunk = chunk
if !c.chunk.IsSetMetaData() {
c.chunk.MetaData = format.NewColumnMetaData()
}
c.chunk.MetaData.Type = format.Type(c.column.PhysicalType())
c.chunk.MetaData.PathInSchema = schema.ColumnPathFromNode(c.column.SchemaNode())
c.chunk.MetaData.Codec = format.CompressionCodec(c.props.CompressionFor(c.column.Path()))
}
func (c *ColumnChunkMetaDataBuilder) SetFilePath(val string) {
c.chunk.FilePath = &val
}
// Descr returns the associated column descriptor for this column chunk
func (c *ColumnChunkMetaDataBuilder) Descr() *schema.Column { return c.column }
func (c *ColumnChunkMetaDataBuilder) TotalCompressedSize() int64 {
// if this column is encrypted, after Finish is called, the MetaData
// field is set to nil and we store the compressed size so return that
|
// CryptoMetadata returns the cryptographic metadata for how this column was | random_line_split |
column_chunk.go | information and metadata for a given column chunk
// and it's associated Column
type ColumnChunkMetaData struct {
column *format.ColumnChunk
columnMeta *format.ColumnMetaData
decryptedMeta format.ColumnMetaData
descr *schema.Column
writerVersion *AppVersion
encodings []parquet.Encoding
encodingStats []format.PageEncodingStats
possibleStats TypedStatistics
mem memory.Allocator
}
// NewColumnChunkMetaData creates an instance of the metadata from a column chunk and descriptor
//
// this is primarily used internally or between the subpackages. ColumnChunkMetaDataBuilder should
// be used by consumers instead of using this directly.
func NewColumnChunkMetaData(column *format.ColumnChunk, descr *schema.Column, writerVersion *AppVersion, rowGroupOrdinal, columnOrdinal int16, fileDecryptor encryption.FileDecryptor) (*ColumnChunkMetaData, error) | } else {
return nil, xerrors.New("cannot decrypt column metadata. file decryption not setup correctly")
}
}
}
for _, enc := range c.columnMeta.Encodings {
c.encodings = append(c.encodings, parquet.Encoding(enc))
}
for _, enc := range c.columnMeta.EncodingStats {
c.encodingStats = append(c.encodingStats, *enc)
}
return c, nil
}
// CryptoMetadata returns the cryptographic metadata for how this column was
// encrypted and how to decrypt it.
func (c *ColumnChunkMetaData) CryptoMetadata() *format.ColumnCryptoMetaData {
return c.column.GetCryptoMetadata()
}
// FileOffset is the location in the file where the column data begins
func (c *ColumnChunkMetaData) FileOffset() int64 { return c.column.FileOffset }
// FilePath gives the name of the parquet file if provided in the metadata
func (c *ColumnChunkMetaData) FilePath() string { return c.column.GetFilePath() }
// Type is the physical storage type used in the parquet file for this column chunk.
func (c *ColumnChunkMetaData) Type() parquet.Type { return parquet.Type(c.columnMeta.Type) }
// NumValues is the number of values stored in just this chunk including nulls.
func (c *ColumnChunkMetaData) NumValues() int64 { return c.columnMeta.NumValues }
// PathInSchema is the full path to this column from the root of the schema including
// any nested columns
func (c *ColumnChunkMetaData) PathInSchema() parquet.ColumnPath {
return c.columnMeta.GetPathInSchema()
}
// Compression provides the type of compression used for this particular chunk.
func (c *ColumnChunkMetaData) Compression() compress.Compression {
return compress.Compression(c.columnMeta.Codec)
}
// Encodings returns the list of different encodings used in this chunk
func (c *ColumnChunkMetaData) Encodings() []parquet.Encoding { return c.encodings }
// EncodingStats connects the order of encodings based on the list of pages and types
func (c *ColumnChunkMetaData) EncodingStats() []PageEncodingStats {
ret := make([]PageEncodingStats, len(c.encodingStats))
for idx := range ret {
ret[idx].Encoding = parquet.Encoding(c.encodingStats[idx].Encoding)
ret[idx].PageType = c.encodingStats[idx].PageType
}
return ret
}
// HasDictionaryPage returns true if there is a dictionary page offset set in
// this metadata.
func (c *ColumnChunkMetaData) HasDictionaryPage() bool {
return c.columnMeta.IsSetDictionaryPageOffset()
}
// DictionaryPageOffset returns the location in the file where the dictionary page starts
func (c *ColumnChunkMetaData) DictionaryPageOffset() int64 {
return c.columnMeta.GetDictionaryPageOffset()
}
// DataPageOffset returns the location in the file where the data pages begin for this column
func (c *ColumnChunkMetaData) DataPageOffset() int64 { return c.columnMeta.GetDataPageOffset() }
// HasIndexPage returns true if the offset for the index page is set in the metadata
func (c *ColumnChunkMetaData) HasIndexPage() bool { return c.columnMeta.IsSetIndexPageOffset() }
// IndexPageOffset is the location in the file where the index page starts.
func (c *ColumnChunkMetaData) IndexPageOffset() int64 { return c.columnMeta.GetIndexPageOffset() }
// TotalCompressedSize will be equal to TotalUncompressedSize if the data is not compressed.
// Otherwise this will be the size of the actual data in the file.
func (c *ColumnChunkMetaData) TotalCompressedSize() int64 {
return c.columnMeta.GetTotalCompressedSize()
}
// TotalUncompressedSize is the total size of the raw data after uncompressing the chunk
func (c *ColumnChunkMetaData) TotalUncompressedSize() int64 {
return c.columnMeta.GetTotalUncompressedSize()
}
// BloomFilterOffset is the byte offset from the beginning of the file to the bloom
// filter data.
func (c *ColumnChunkMetaData) BloomFilterOffset() int64 {
return c.columnMeta.GetBloomFilterOffset()
}
// StatsSet returns true only if there are statistics set in the metadata and the column
// descriptor has a sort order that is not SortUnknown
//
// It also checks the writer version to ensure that it was not written by a version
// of parquet which is known to have incorrect stat computations.
func (c *ColumnChunkMetaData) StatsSet() (bool, error) {
if !c.columnMeta.IsSetStatistics() || c.descr.SortOrder() == schema.SortUNKNOWN {
return false, nil
}
if c.possibleStats == nil {
c.possibleStats = makeColumnStats(c.columnMeta, c.descr, c.mem)
}
encoded, err := c.possibleStats.Encode()
if err != nil {
return false, err
}
return c.writerVersion.HasCorrectStatistics(c.Type(), c.descr.LogicalType(), encoded, c.descr.SortOrder()), nil
}
func (c *ColumnChunkMetaData) Equals(other *ColumnChunkMetaData) bool {
return reflect.DeepEqual(c.columnMeta, other.columnMeta)
}
// Statistics can return nil if there are no stats in this metadata
func (c *ColumnChunkMetaData) Statistics() (TypedStatistics, error) {
ok, err := c.StatsSet()
if err != nil {
return nil, err
}
if ok {
return c.possibleStats, nil
}
return nil, nil
}
// ColumnChunkMetaDataBuilder is used during writing to construct metadata
// for a given column chunk while writing, providing a proxy around constructing
// the actual thrift object.
type ColumnChunkMetaDataBuilder struct {
chunk *format.ColumnChunk
props *parquet.WriterProperties
column *schema.Column
compressedSize int64
}
func NewColumnChunkMetaDataBuilder(props *parquet.WriterProperties, column *schema.Column) *ColumnChunkMetaDataBuilder {
return NewColumnChunkMetaDataBuilderWithContents(props, column, format.NewColumnChunk())
}
// NewColumnChunkMetaDataBuilderWithContents will construct a builder and start it with the provided
// column chunk information rather than with an empty column chunk.
func NewColumnChunkMetaDataBuilderWithContents(props *parquet.WriterProperties, column *schema.Column, chunk *format.ColumnChunk) *ColumnChunkMetaDataBuilder {
b := &ColumnChunkMetaDataBuilder{
props: props,
column: column,
chunk: chunk,
}
b.init(chunk)
return b
}
// Contents returns the underlying thrift ColumnChunk object so that it can be used
// for constructing or duplicating column metadata
func (c *ColumnChunkMetaDataBuilder) Contents() *format.ColumnChunk { return c.chunk }
func (c *ColumnChunkMetaDataBuilder) init(chunk *format.ColumnChunk) {
c.chunk = chunk
if !c.chunk.IsSetMetaData() {
c.chunk.MetaData = format.NewColumnMetaData()
}
c.chunk.MetaData.Type = format.Type(c.column.PhysicalType())
c.chunk.MetaData.PathInSchema = schema.ColumnPathFromNode(c.column.SchemaNode())
c.chunk.MetaData.Codec = format.CompressionCodec(c.props.CompressionFor(c.column.Path()))
}
func (c *ColumnChunkMetaDataBuilder) SetFilePath(val string) {
c.chunk.FilePath = &val
}
// Descr returns the associated column descriptor for this column chunk
func (c *ColumnChunkMetaDataBuilder) Descr() *schema.Column { return c.column }
func (c *ColumnChunkMetaDataBuilder) TotalCompressedSize() int64 {
// if this column is encrypted, after Finish is called, the MetaData
// field is set to nil and we store the compressed size so return that
| {
c := &ColumnChunkMetaData{
column: column,
columnMeta: column.GetMetaData(),
descr: descr,
writerVersion: writerVersion,
mem: memory.DefaultAllocator,
}
if column.IsSetCryptoMetadata() {
ccmd := column.CryptoMetadata
if ccmd.IsSetENCRYPTION_WITH_COLUMN_KEY() {
if fileDecryptor != nil && fileDecryptor.Properties() != nil {
// should decrypt metadata
path := parquet.ColumnPath(ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetPathInSchema())
keyMetadata := ccmd.ENCRYPTION_WITH_COLUMN_KEY.GetKeyMetadata()
aadColumnMetadata := encryption.CreateModuleAad(fileDecryptor.FileAad(), encryption.ColumnMetaModule, rowGroupOrdinal, columnOrdinal, -1)
decryptor := fileDecryptor.GetColumnMetaDecryptor(path.String(), string(keyMetadata), aadColumnMetadata)
thrift.DeserializeThrift(&c.decryptedMeta, decryptor.Decrypt(column.GetEncryptedColumnMetadata()))
c.columnMeta = &c.decryptedMeta | identifier_body |
store.go | {
name: "free IP no subnet",
fn: testFreeIPNoSubnet,
},
{
name: "free IP OK",
fn: testFreeIPOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ms(t)
defer s.Close()
tt.fn(t, s)
})
}
}
func testLeasesEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
leases, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
if diff := cmp.Diff(0, len(leases)); diff != "" {
t.Fatalf("unexpected number of leases (-want +got):\n%s", diff)
}
}
func testLeasesOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic leases to be fetched again later.
for i := 0; i < 3; i++ {
if err := s.SaveLease(uint64(i), okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
want := []*wgipam.Lease{
okLease, okLease, okLease,
}
sort.SliceStable(want, func(i, j int) bool {
return want[i].Start.Before(want[j].Start)
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].Start.Before(got[j].Start)
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
}
func testLeaseNotExist(t *testing.T, s wgipam.Store) {
t.Helper()
l, ok, err := s.Lease(1)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("found a lease when none was expected")
}
if l != nil {
t.Fatal("returned non-nil lease when not found")
}
}
func testSaveLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
l, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if !ok {
t.Fatal("expected a lease but one was not found")
}
if diff := cmp.Diff(okLease, l); diff != "" {
t.Fatalf("unexpected Lease (-want +got):\n%s", diff)
}
}
func testDeleteLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
// Repeated deletions should be idempotent.
for i := 0; i < 3; i++ {
if err := s.DeleteLease(key); err != nil {
t.Fatalf("failed to delete lease: %v", err)
}
}
_, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("expected no lease but one was found")
}
}
func testPurgeOK(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
ipa, err := wgipam.DualStackIPAllocator(s, []wgipam.Subnet{
{Subnet: *okSubnet4},
{Subnet: *okSubnet6},
})
if err != nil {
t.Fatalf("failed to create IP allocator: %v", err)
}
// Leases start every 100 seconds and last 10 seconds.
const (
start = 100
length = 10
)
var want *wgipam.Lease
for i := 0; i < 3; i++ {
ips, ok, err := ipa.Allocate(wgipam.DualStack)
if err != nil {
t.Fatalf("failed to allocate IPs: %v", err)
}
if !ok {
t.Fatal("ran out of IP addresses")
}
// Create leases which start at regular intervals.
l := &wgipam.Lease{
IPs: ips,
Start: time.Unix((int64(i)+1)*start, 0),
Length: length * time.Second,
}
if i == 2 {
// Track final lease for later comparison.
want = l
}
if err := s.SaveLease(uint64(i), l); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
// Purge only some of the leases by selecting a time that matches the
// expiration time of the second lease.
purge := time.Unix(2*start+length, 0)
// Repeated purges with the same time should be idempotent.
for i := 0; i < 3; i++ {
stats, err := s.Purge(purge)
if err != nil {
t.Fatalf("failed to purge leases: %v", err)
}
// Expect addresses to be freed on the first iteration only.
var wantFreed int
if i == 0 {
wantFreed = 2
}
for k, v := range stats.FreedIPs {
if diff := cmp.Diff(wantFreed, v); diff != "" {
t.Fatalf("unexpected number of freed IPs for subnet %s (-want +got):\n%s", k, diff)
}
}
}
// Expect only one lease to remain.
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if diff := cmp.Diff([]*wgipam.Lease{want}, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
ip4s, err := s.AllocatedIPs(okSubnet4)
if err != nil {
t.Fatalf("failed to get allocated IPv4s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[0]}, ip4s); diff != "" {
t.Fatalf("unexpected remaining IPv4 allocation (-want +got):\n%s", diff)
}
ip6s, err := s.AllocatedIPs(okSubnet6)
if err != nil {
t.Fatalf("failed to get allocated IPv6s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[1]}, ip6s); diff != "" {
t.Fatalf("unexpected remaining IPv6 allocation (-want +got):\n%s", diff)
}
}
func testSubnetsEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
subnets, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", err)
}
if diff := cmp.Diff(0, len(subnets)); diff != "" {
t.Fatalf("unexpected number of subnets (-want +got):\n%s", diff)
}
}
func testSubnetsOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic subnets to be fetched again later.
want := []*net.IPNet{okSubnet4, okSubnet6}
for _, sub := range want {
if err := s.SaveSubnet(sub); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
}
got, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
sort.SliceStable(want, func(i, j int) bool {
return want[i].String() < want[j].String()
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].String() < got[j].String()
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Subnets (-want +got):\n%s", diff)
}
}
func testAllocatedIPsNoSubnet(t *testing.T, s wgipam.Store) {
t.Helper()
if _, err := s.AllocatedIPs(okSubnet4); err == nil {
t.Fatal("expected no such subnet error, but none occurred")
}
}
func | testAllocateIPMismatchedSubnet | identifier_name | |
store.go | string
fn func(t *testing.T, s wgipam.Store)
}{
{
name: "leases empty",
fn: testLeasesEmpty,
},
{
name: "leases OK",
fn: testLeasesOK,
},
{
name: "lease not exist",
fn: testLeaseNotExist,
},
{
name: "save lease OK",
fn: testSaveLeaseOK,
},
{
name: "delete lease OK",
fn: testDeleteLeaseOK,
},
{
name: "purge OK",
fn: testPurgeOK,
},
{
name: "subnets empty",
fn: testSubnetsEmpty,
},
{
name: "subnets OK",
fn: testSubnetsOK,
},
{
name: "allocated IPs no subnet",
fn: testAllocatedIPsNoSubnet,
},
{
name: "allocate IP mismatched subnet",
fn: testAllocateIPMismatchedSubnet,
},
{
name: "allocate IP no subnet",
fn: testAllocateIPNoSubnet,
},
{
name: "allocate IP already allocated",
fn: testAllocateIPAlreadyAllocated,
},
{
name: "free IP mismatched subnet",
fn: testFreeIPMismatchedSubnet,
},
{
name: "free IP no subnet",
fn: testFreeIPNoSubnet,
},
{
name: "free IP OK",
fn: testFreeIPOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ms(t)
defer s.Close()
tt.fn(t, s)
})
}
}
func testLeasesEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
leases, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
if diff := cmp.Diff(0, len(leases)); diff != "" {
t.Fatalf("unexpected number of leases (-want +got):\n%s", diff)
}
}
func testLeasesOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic leases to be fetched again later.
for i := 0; i < 3; i++ {
if err := s.SaveLease(uint64(i), okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
want := []*wgipam.Lease{
okLease, okLease, okLease,
}
sort.SliceStable(want, func(i, j int) bool {
return want[i].Start.Before(want[j].Start)
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].Start.Before(got[j].Start)
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
}
func testLeaseNotExist(t *testing.T, s wgipam.Store) {
t.Helper()
l, ok, err := s.Lease(1)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("found a lease when none was expected")
}
if l != nil {
t.Fatal("returned non-nil lease when not found")
}
}
func testSaveLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
l, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if !ok {
t.Fatal("expected a lease but one was not found")
}
if diff := cmp.Diff(okLease, l); diff != "" {
t.Fatalf("unexpected Lease (-want +got):\n%s", diff)
}
}
func testDeleteLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
// Repeated deletions should be idempotent.
for i := 0; i < 3; i++ {
if err := s.DeleteLease(key); err != nil {
t.Fatalf("failed to delete lease: %v", err)
}
}
_, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("expected no lease but one was found")
}
}
func testPurgeOK(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
ipa, err := wgipam.DualStackIPAllocator(s, []wgipam.Subnet{
{Subnet: *okSubnet4},
{Subnet: *okSubnet6},
})
if err != nil {
t.Fatalf("failed to create IP allocator: %v", err)
}
// Leases start every 100 seconds and last 10 seconds.
const (
start = 100
length = 10
)
var want *wgipam.Lease
for i := 0; i < 3; i++ {
ips, ok, err := ipa.Allocate(wgipam.DualStack)
if err != nil {
t.Fatalf("failed to allocate IPs: %v", err)
}
if !ok {
t.Fatal("ran out of IP addresses")
}
// Create leases which start at regular intervals.
l := &wgipam.Lease{
IPs: ips,
Start: time.Unix((int64(i)+1)*start, 0),
Length: length * time.Second,
}
if i == 2 {
// Track final lease for later comparison.
want = l
}
if err := s.SaveLease(uint64(i), l); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
// Purge only some of the leases by selecting a time that matches the
// expiration time of the second lease.
purge := time.Unix(2*start+length, 0)
// Repeated purges with the same time should be idempotent.
for i := 0; i < 3; i++ {
stats, err := s.Purge(purge)
if err != nil {
t.Fatalf("failed to purge leases: %v", err)
}
// Expect addresses to be freed on the first iteration only.
var wantFreed int
if i == 0 {
wantFreed = 2
}
for k, v := range stats.FreedIPs {
if diff := cmp.Diff(wantFreed, v); diff != "" {
t.Fatalf("unexpected number of freed IPs for subnet %s (-want +got):\n%s", k, diff)
}
}
}
// Expect only one lease to remain.
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if diff := cmp.Diff([]*wgipam.Lease{want}, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
ip4s, err := s.AllocatedIPs(okSubnet4)
if err != nil |
if diff := cmp.Diff([]*net.IPNet{want.IPs[0]}, ip4s); diff != "" {
t.Fatalf("unexpected remaining IPv4 allocation (-want +got):\n%s", diff)
}
ip6s, err := s.AllocatedIPs(okSubnet6)
if err != nil {
t.Fatalf("failed to get allocated IPv6s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[1]}, ip6s); diff != "" {
t.Fatalf("unexpected remaining IPv6 allocation (-want +got):\n%s", diff)
}
}
func testSubnetsEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
subnets, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: % | {
t.Fatalf("failed to get allocated IPv4s: %v", err)
} | conditional_block |
store.go | string
fn func(t *testing.T, s wgipam.Store)
}{
{
name: "leases empty",
fn: testLeasesEmpty,
},
{
name: "leases OK",
fn: testLeasesOK,
},
{
name: "lease not exist",
fn: testLeaseNotExist,
},
{
name: "save lease OK",
fn: testSaveLeaseOK,
},
{
name: "delete lease OK",
fn: testDeleteLeaseOK,
},
{
name: "purge OK",
fn: testPurgeOK,
},
{
name: "subnets empty",
fn: testSubnetsEmpty,
},
{
name: "subnets OK",
fn: testSubnetsOK,
},
{
name: "allocated IPs no subnet",
fn: testAllocatedIPsNoSubnet,
},
{
name: "allocate IP mismatched subnet",
fn: testAllocateIPMismatchedSubnet,
},
{
name: "allocate IP no subnet",
fn: testAllocateIPNoSubnet,
},
{
name: "allocate IP already allocated",
fn: testAllocateIPAlreadyAllocated,
},
{
name: "free IP mismatched subnet",
fn: testFreeIPMismatchedSubnet,
},
{
name: "free IP no subnet",
fn: testFreeIPNoSubnet,
},
{
name: "free IP OK",
fn: testFreeIPOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ms(t)
defer s.Close()
tt.fn(t, s)
})
}
}
func testLeasesEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
leases, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
if diff := cmp.Diff(0, len(leases)); diff != "" {
t.Fatalf("unexpected number of leases (-want +got):\n%s", diff)
}
}
func testLeasesOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic leases to be fetched again later.
for i := 0; i < 3; i++ {
if err := s.SaveLease(uint64(i), okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
want := []*wgipam.Lease{
okLease, okLease, okLease,
}
sort.SliceStable(want, func(i, j int) bool {
return want[i].Start.Before(want[j].Start)
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].Start.Before(got[j].Start)
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
}
func testLeaseNotExist(t *testing.T, s wgipam.Store) {
t.Helper()
l, ok, err := s.Lease(1)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("found a lease when none was expected")
}
if l != nil {
t.Fatal("returned non-nil lease when not found")
}
}
func testSaveLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
l, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if !ok {
t.Fatal("expected a lease but one was not found")
}
if diff := cmp.Diff(okLease, l); diff != "" {
t.Fatalf("unexpected Lease (-want +got):\n%s", diff)
}
}
func testDeleteLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
// Repeated deletions should be idempotent.
for i := 0; i < 3; i++ {
if err := s.DeleteLease(key); err != nil {
t.Fatalf("failed to delete lease: %v", err)
}
}
_, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("expected no lease but one was found")
}
}
func testPurgeOK(t *testing.T, s wgipam.Store) |
var want *wgipam.Lease
for i := 0; i < 3; i++ {
ips, ok, err := ipa.Allocate(wgipam.DualStack)
if err != nil {
t.Fatalf("failed to allocate IPs: %v", err)
}
if !ok {
t.Fatal("ran out of IP addresses")
}
// Create leases which start at regular intervals.
l := &wgipam.Lease{
IPs: ips,
Start: time.Unix((int64(i)+1)*start, 0),
Length: length * time.Second,
}
if i == 2 {
// Track final lease for later comparison.
want = l
}
if err := s.SaveLease(uint64(i), l); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
// Purge only some of the leases by selecting a time that matches the
// expiration time of the second lease.
purge := time.Unix(2*start+length, 0)
// Repeated purges with the same time should be idempotent.
for i := 0; i < 3; i++ {
stats, err := s.Purge(purge)
if err != nil {
t.Fatalf("failed to purge leases: %v", err)
}
// Expect addresses to be freed on the first iteration only.
var wantFreed int
if i == 0 {
wantFreed = 2
}
for k, v := range stats.FreedIPs {
if diff := cmp.Diff(wantFreed, v); diff != "" {
t.Fatalf("unexpected number of freed IPs for subnet %s (-want +got):\n%s", k, diff)
}
}
}
// Expect only one lease to remain.
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if diff := cmp.Diff([]*wgipam.Lease{want}, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
ip4s, err := s.AllocatedIPs(okSubnet4)
if err != nil {
t.Fatalf("failed to get allocated IPv4s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[0]}, ip4s); diff != "" {
t.Fatalf("unexpected remaining IPv4 allocation (-want +got):\n%s", diff)
}
ip6s, err := s.AllocatedIPs(okSubnet6)
if err != nil {
t.Fatalf("failed to get allocated IPv6s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[1]}, ip6s); diff != "" {
t.Fatalf("unexpected remaining IPv6 allocation (-want +got):\n%s", diff)
}
}
func testSubnetsEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
subnets, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: % | {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
ipa, err := wgipam.DualStackIPAllocator(s, []wgipam.Subnet{
{Subnet: *okSubnet4},
{Subnet: *okSubnet6},
})
if err != nil {
t.Fatalf("failed to create IP allocator: %v", err)
}
// Leases start every 100 seconds and last 10 seconds.
const (
start = 100
length = 10
) | identifier_body |
store.go | string
fn func(t *testing.T, s wgipam.Store)
}{
{
name: "leases empty",
fn: testLeasesEmpty,
},
{
name: "leases OK",
fn: testLeasesOK,
},
{
name: "lease not exist",
fn: testLeaseNotExist,
},
{
name: "save lease OK",
fn: testSaveLeaseOK,
},
{
name: "delete lease OK",
fn: testDeleteLeaseOK,
},
{
name: "purge OK", | },
{
name: "subnets OK",
fn: testSubnetsOK,
},
{
name: "allocated IPs no subnet",
fn: testAllocatedIPsNoSubnet,
},
{
name: "allocate IP mismatched subnet",
fn: testAllocateIPMismatchedSubnet,
},
{
name: "allocate IP no subnet",
fn: testAllocateIPNoSubnet,
},
{
name: "allocate IP already allocated",
fn: testAllocateIPAlreadyAllocated,
},
{
name: "free IP mismatched subnet",
fn: testFreeIPMismatchedSubnet,
},
{
name: "free IP no subnet",
fn: testFreeIPNoSubnet,
},
{
name: "free IP OK",
fn: testFreeIPOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ms(t)
defer s.Close()
tt.fn(t, s)
})
}
}
func testLeasesEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
leases, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
if diff := cmp.Diff(0, len(leases)); diff != "" {
t.Fatalf("unexpected number of leases (-want +got):\n%s", diff)
}
}
func testLeasesOK(t *testing.T, s wgipam.Store) {
t.Helper()
// Save some synthetic leases to be fetched again later.
for i := 0; i < 3; i++ {
if err := s.SaveLease(uint64(i), okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get leases: %v", err)
}
// No ordering guarantees are made, so sort both slices for comparison.
want := []*wgipam.Lease{
okLease, okLease, okLease,
}
sort.SliceStable(want, func(i, j int) bool {
return want[i].Start.Before(want[j].Start)
})
sort.SliceStable(got, func(i, j int) bool {
return got[i].Start.Before(got[j].Start)
})
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
}
func testLeaseNotExist(t *testing.T, s wgipam.Store) {
t.Helper()
l, ok, err := s.Lease(1)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("found a lease when none was expected")
}
if l != nil {
t.Fatal("returned non-nil lease when not found")
}
}
func testSaveLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
l, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if !ok {
t.Fatal("expected a lease but one was not found")
}
if diff := cmp.Diff(okLease, l); diff != "" {
t.Fatalf("unexpected Lease (-want +got):\n%s", diff)
}
}
func testDeleteLeaseOK(t *testing.T, s wgipam.Store) {
t.Helper()
const key = 1
if err := s.SaveLease(key, okLease); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
// Repeated deletions should be idempotent.
for i := 0; i < 3; i++ {
if err := s.DeleteLease(key); err != nil {
t.Fatalf("failed to delete lease: %v", err)
}
}
_, ok, err := s.Lease(key)
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if ok {
t.Fatal("expected no lease but one was found")
}
}
func testPurgeOK(t *testing.T, s wgipam.Store) {
t.Helper()
if err := s.SaveSubnet(okSubnet4); err != nil {
t.Fatalf("failed to save subnet: %v", err)
}
ipa, err := wgipam.DualStackIPAllocator(s, []wgipam.Subnet{
{Subnet: *okSubnet4},
{Subnet: *okSubnet6},
})
if err != nil {
t.Fatalf("failed to create IP allocator: %v", err)
}
// Leases start every 100 seconds and last 10 seconds.
const (
start = 100
length = 10
)
var want *wgipam.Lease
for i := 0; i < 3; i++ {
ips, ok, err := ipa.Allocate(wgipam.DualStack)
if err != nil {
t.Fatalf("failed to allocate IPs: %v", err)
}
if !ok {
t.Fatal("ran out of IP addresses")
}
// Create leases which start at regular intervals.
l := &wgipam.Lease{
IPs: ips,
Start: time.Unix((int64(i)+1)*start, 0),
Length: length * time.Second,
}
if i == 2 {
// Track final lease for later comparison.
want = l
}
if err := s.SaveLease(uint64(i), l); err != nil {
t.Fatalf("failed to save lease: %v", err)
}
}
// Purge only some of the leases by selecting a time that matches the
// expiration time of the second lease.
purge := time.Unix(2*start+length, 0)
// Repeated purges with the same time should be idempotent.
for i := 0; i < 3; i++ {
stats, err := s.Purge(purge)
if err != nil {
t.Fatalf("failed to purge leases: %v", err)
}
// Expect addresses to be freed on the first iteration only.
var wantFreed int
if i == 0 {
wantFreed = 2
}
for k, v := range stats.FreedIPs {
if diff := cmp.Diff(wantFreed, v); diff != "" {
t.Fatalf("unexpected number of freed IPs for subnet %s (-want +got):\n%s", k, diff)
}
}
}
// Expect only one lease to remain.
got, err := s.Leases()
if err != nil {
t.Fatalf("failed to get lease: %v", err)
}
if diff := cmp.Diff([]*wgipam.Lease{want}, got); diff != "" {
t.Fatalf("unexpected Leases (-want +got):\n%s", diff)
}
ip4s, err := s.AllocatedIPs(okSubnet4)
if err != nil {
t.Fatalf("failed to get allocated IPv4s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[0]}, ip4s); diff != "" {
t.Fatalf("unexpected remaining IPv4 allocation (-want +got):\n%s", diff)
}
ip6s, err := s.AllocatedIPs(okSubnet6)
if err != nil {
t.Fatalf("failed to get allocated IPv6s: %v", err)
}
if diff := cmp.Diff([]*net.IPNet{want.IPs[1]}, ip6s); diff != "" {
t.Fatalf("unexpected remaining IPv6 allocation (-want +got):\n%s", diff)
}
}
func testSubnetsEmpty(t *testing.T, s wgipam.Store) {
t.Helper()
subnets, err := s.Subnets()
if err != nil {
t.Fatalf("failed to get subnets: %v", | fn: testPurgeOK,
},
{
name: "subnets empty",
fn: testSubnetsEmpty, | random_line_split |
chat_template.js | $/g;
// can't do a username.tmi.twitch.tv since the latter part of the host could change at any point
// course this is just a relately standard IRC parser anyway.
// but this will trip a ReDoS scanner since >= 10
// A Twitch username is up to 25 letters, we'll leave some wiggle room
const hostRegex = /([a-z_0-9]{1,30})!([a-z_0-9]{1,30})@([a-z._0-9]{1,60})/;
class ChatBot extends EventEmitter {
constructor(opts) {
super();
this.reconnect = true;
this.ws = null;
this.pinger = {
clock: false,
start: () => {
if (this.pinger.clock) {
clearInterval(this.pinger.clock);
}
this.pinger.sendPing();
this.pinger.clock = setInterval(() => {
setTimeout(() => {
this.pinger.sendPing();
//jitter
}, Math.floor((Math.random() * 1000) + 1));
}, (4 * 60 * 1000));
// at least ever 5 minutes
},
sendPing: () => {
try {
this.ws.send('PING');
this.pinger.awaitPong();
} catch (e) {
console.log(e);
this.ws.close();
}
},
pingtimeout: false,
awaitPong: () => {
this.pinger.pingtimeout = setTimeout(() => {
//console.log('WS Pong Timeout');
this.ws.close();
}, 10000)
},
gotPong: () => {
clearTimeout(this.pinger.pingtimeout);
}
}
}
connect() {
console.log('init');
this.ws = new WebSocket('wss://irc-ws.chat.twitch.tv');
this.ws.onmessage = this._onMessage.bind(this);
this.ws.onerror = this._onError.bind(this);
this.ws.onclose = this._onClose.bind(this);
this.ws.onopen = this._onOpen.bind(this);
}
_reconnect() {
this.ws = null;
this.connect();
}
_onError() {
console.log('Got Error');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onClose() {
console.log('Got Close');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onOpen() {
// pinger
this.pinger.start();
this.ws.send('CAP REQ :twitch.tv/commands');
this.ws.send('CAP REQ :twitch.tv/tags');
this.emit('open');
}
_onMessage(event) {
let message = event.data.toString().trim().split(/\r?\n/);
// uncomment this line to log all inbounc messages
//console.log(message);
for (var x=0;x<message.length;x++) {
// the last line is empty
if (message[x].length == 0) {
return;
}
let payload = {
tags: {},
command: false,
message: '',
raw: message[x]
}
const data = ircRegex.exec(message[x].trim());
if (data === null) {
console.error(`Couldnt parse message '${message[x]}'`);
return;
}
// items
// 0 is unparsed message
// 1 ircV3 tags
// 2 tmi.twitch.tv
// 3 COMMAND
// 4 Room
// 5 rest/message
// 0 ignore
// 1 tags
let tagdata = data[1] ? data[1] : false;
if (tagdata) {
let m;
do {
m = tagsRegex.exec(tagdata);
if (m) {
// unparsed, a, b
const [, key, val] = m;
// interrupts
switch (key) {
case 'badges':
case 'badge-info':
payload.tags[key] = {};
let b;
do {
b = badgesRegex.exec(val);
if (b) {
const [, badge, tier] = b;
payload.tags[key][badge] = tier;
}
} while (b);
break;
case 'emotes':
payload.tags[key] = {};
let e;
do {
e = emotesRegex.exec(val);
if (e) {
const [, emoteID, indices] = e;
// and split again
let em;
do {
em = emoteIndexRegex.exec(indices);
if (em) {
const [, startIndex, endIndex] = em;
// arrays!
if (!payload.tags[key][emoteID]) {
payload.tags[key][emoteID] = new Array();
}
payload.tags[key][emoteID].push({
startIndex,
endIndex
});
}
} while (em);
}
} while (e);
break;
default:
payload.tags[key] = val.replace(/\\s/g, ' ').trim();// for \s (space)
}
}
} while (m);
// Javascript magic helper
for (let key in payload.tags) {
let new_key = key.replace(/-/g, '_');
payload.tags[new_key] = payload.tags[key];
// optionally nailed the bad keys with `-` in the name
if (new_key != key)
delete payload.tags[key];
}
}
// 2 host
let host = hostRegex.exec(data[2]);
payload.user = false;
if (host != null) {
payload.user = host[1];
}
// 3 command
payload.command = data[3];
// 4 room
payload.room = data[4];
// 5 message
payload.message = data[5];
payload.action = false;
// check for action
const actionCheck = actionRegex.exec(payload.message);
if (actionCheck != null) {
// it's an action
payload.action = true;
payload.message = actionCheck[1];
}
// https://tools.ietf.org/html/rfc1459
// commands the template needs to reply
switch (payload.command) {
case 'PING':
// Twitch sent a "R U STILL THERE?"
this.ws.send('PONG :' + payload.message);
case 'PONG':
this.pinger.gotPong();
break;
}
switch (payload.command) {
case '001':
case '002':
case '003':
case '004':
// do nothing
break;
case 'CAP':
this.emit('CAP ACK', payload.raw);
break;
case '372':
case '375':
case '376':
// motd
this.emit('MOTD', payload.raw);
break;
case '353':
case '366':
// names
break;
case 'PING':
case 'PONG':
case 'JOIN':
// You joined a room
case 'PART':
// as the result of a PART command
// you left a room
case 'GLOBALUSERSTATE':
// You connected to the server
// here is some info about the user
case 'USERSTATE':
// Often sent when you send a PRIVMSG to a room
case 'ROOMSTATE':
// You joined a room here is the intial state (followers only etc)
// The Room state was changed, on change only sends what changed, not the whole settings blob
case 'WHISPER':
// you received a whisper, good luck replying!
case 'PRIVMSG':
// heres where the magic happens
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('bits')) {
// it's a cheer message
// but it's also a privmsg
this.emit(
'cheer',
payload
);
}
}
case 'USERNOTICE':
// see https://dev.twitch.tv/docs/irc/tags#usernotice-twitch-tags
// An "Twitch event" occured, like a subscription or raid
if (payload.hasOwnProperty('tags')) |
case 'NOTICE':
// General notices about Twitch/rooms you are in
// https://dev.twitch.tv/docs/irc/commands#notice-twitch-commands
// moderationy stuff
case 'CLEARCHAT':
// A users message is to be removed
// as the result of a ban or timeout
case 'CLEARMSG':
// a single users message was deleted
case 'HOSTTARGET':
// the room you are in, is now hosting someone or has ended the host
this.emit(
payload.command,
payload
);
this.emit(
payload.command.toLowerCase(),
payload
);
break;
case 'RECONNECT':
// The | {
if (payload.tags.hasOwnProperty('msg-id')) {
this.emit(
`usernotice_${payload.tags['msg-id']}`,
payload
);
}
} | conditional_block |
chat_template.js | anyway.
// but this will trip a ReDoS scanner since >= 10
// A Twitch username is up to 25 letters, we'll leave some wiggle room
const hostRegex = /([a-z_0-9]{1,30})!([a-z_0-9]{1,30})@([a-z._0-9]{1,60})/;
class ChatBot extends EventEmitter {
constructor(opts) {
super();
this.reconnect = true;
this.ws = null;
this.pinger = {
clock: false,
start: () => {
if (this.pinger.clock) {
clearInterval(this.pinger.clock);
}
this.pinger.sendPing();
this.pinger.clock = setInterval(() => {
setTimeout(() => {
this.pinger.sendPing();
//jitter
}, Math.floor((Math.random() * 1000) + 1));
}, (4 * 60 * 1000));
// at least ever 5 minutes
},
sendPing: () => {
try {
this.ws.send('PING');
this.pinger.awaitPong();
} catch (e) {
console.log(e);
this.ws.close();
}
},
pingtimeout: false,
awaitPong: () => {
this.pinger.pingtimeout = setTimeout(() => {
//console.log('WS Pong Timeout');
this.ws.close();
}, 10000)
},
gotPong: () => {
clearTimeout(this.pinger.pingtimeout);
}
}
}
connect() {
console.log('init');
this.ws = new WebSocket('wss://irc-ws.chat.twitch.tv');
this.ws.onmessage = this._onMessage.bind(this);
this.ws.onerror = this._onError.bind(this);
this.ws.onclose = this._onClose.bind(this);
this.ws.onopen = this._onOpen.bind(this);
}
_reconnect() {
this.ws = null;
this.connect();
}
_onError() {
console.log('Got Error');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onClose() {
console.log('Got Close');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onOpen() {
// pinger
this.pinger.start();
this.ws.send('CAP REQ :twitch.tv/commands');
this.ws.send('CAP REQ :twitch.tv/tags');
this.emit('open');
}
_onMessage(event) {
let message = event.data.toString().trim().split(/\r?\n/);
// uncomment this line to log all inbounc messages
//console.log(message);
for (var x=0;x<message.length;x++) {
// the last line is empty
if (message[x].length == 0) {
return;
}
let payload = {
tags: {},
command: false,
message: '',
raw: message[x]
}
const data = ircRegex.exec(message[x].trim());
if (data === null) {
console.error(`Couldnt parse message '${message[x]}'`);
return;
}
// items
// 0 is unparsed message
// 1 ircV3 tags
// 2 tmi.twitch.tv
// 3 COMMAND
// 4 Room
// 5 rest/message
// 0 ignore
// 1 tags
let tagdata = data[1] ? data[1] : false;
if (tagdata) {
let m;
do {
m = tagsRegex.exec(tagdata);
if (m) {
// unparsed, a, b
const [, key, val] = m;
// interrupts
switch (key) {
case 'badges':
case 'badge-info':
payload.tags[key] = {};
let b;
do {
b = badgesRegex.exec(val);
if (b) {
const [, badge, tier] = b;
payload.tags[key][badge] = tier;
}
} while (b);
break;
case 'emotes':
payload.tags[key] = {};
let e;
do {
e = emotesRegex.exec(val);
if (e) {
const [, emoteID, indices] = e;
// and split again
let em;
do {
em = emoteIndexRegex.exec(indices);
if (em) {
const [, startIndex, endIndex] = em;
// arrays!
if (!payload.tags[key][emoteID]) {
payload.tags[key][emoteID] = new Array();
}
payload.tags[key][emoteID].push({
startIndex,
endIndex
});
}
} while (em);
}
} while (e);
break;
default:
payload.tags[key] = val.replace(/\\s/g, ' ').trim();// for \s (space)
}
}
} while (m);
// Javascript magic helper
for (let key in payload.tags) {
let new_key = key.replace(/-/g, '_');
payload.tags[new_key] = payload.tags[key];
// optionally nailed the bad keys with `-` in the name
if (new_key != key)
delete payload.tags[key];
}
}
// 2 host
let host = hostRegex.exec(data[2]);
payload.user = false;
if (host != null) {
payload.user = host[1];
}
// 3 command
payload.command = data[3];
// 4 room
payload.room = data[4];
// 5 message
payload.message = data[5];
payload.action = false;
// check for action
const actionCheck = actionRegex.exec(payload.message);
if (actionCheck != null) {
// it's an action
payload.action = true;
payload.message = actionCheck[1];
}
// https://tools.ietf.org/html/rfc1459
// commands the template needs to reply
switch (payload.command) {
case 'PING':
// Twitch sent a "R U STILL THERE?"
this.ws.send('PONG :' + payload.message);
case 'PONG':
this.pinger.gotPong();
break;
}
switch (payload.command) {
case '001':
case '002':
case '003':
case '004':
// do nothing
break;
case 'CAP':
this.emit('CAP ACK', payload.raw);
break;
case '372':
case '375':
case '376':
// motd
this.emit('MOTD', payload.raw);
break;
case '353':
case '366':
// names
break;
case 'PING':
case 'PONG':
case 'JOIN':
// You joined a room
case 'PART':
// as the result of a PART command
// you left a room
case 'GLOBALUSERSTATE':
// You connected to the server
// here is some info about the user
case 'USERSTATE':
// Often sent when you send a PRIVMSG to a room
case 'ROOMSTATE':
// You joined a room here is the intial state (followers only etc)
// The Room state was changed, on change only sends what changed, not the whole settings blob
case 'WHISPER':
// you received a whisper, good luck replying!
case 'PRIVMSG':
// heres where the magic happens
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('bits')) {
// it's a cheer message
// but it's also a privmsg
this.emit(
'cheer',
payload
);
}
}
case 'USERNOTICE':
// see https://dev.twitch.tv/docs/irc/tags#usernotice-twitch-tags
// An "Twitch event" occured, like a subscription or raid
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('msg-id')) {
this.emit(
`usernotice_${payload.tags['msg-id']}`,
payload
);
}
}
case 'NOTICE':
// General notices about Twitch/rooms you are in
// https://dev.twitch.tv/docs/irc/commands#notice-twitch-commands
// moderationy stuff
case 'CLEARCHAT':
// A users message is to be removed
// as the result of a ban or timeout
case 'CLEARMSG':
// a single users message was deleted
case 'HOSTTARGET':
// the room you are in, is now hosting someone or has ended the host
this.emit(
payload.command,
payload
);
this.emit(
payload.command.toLowerCase(),
payload
);
break;
case 'RECONNECT':
// The server you are connected to is restarted
// you should restart the bot and reconnect
| // close the socket and let the close handler grab it
this.ws.close();
break; | random_line_split | |
chat_template.js | 1$/g;
// can't do a username.tmi.twitch.tv since the latter part of the host could change at any point
// course this is just a relately standard IRC parser anyway.
// but this will trip a ReDoS scanner since >= 10
// A Twitch username is up to 25 letters, we'll leave some wiggle room
const hostRegex = /([a-z_0-9]{1,30})!([a-z_0-9]{1,30})@([a-z._0-9]{1,60})/;
class ChatBot extends EventEmitter {
constructor(opts) {
super();
this.reconnect = true;
this.ws = null;
this.pinger = {
clock: false,
start: () => {
if (this.pinger.clock) {
clearInterval(this.pinger.clock);
}
this.pinger.sendPing();
this.pinger.clock = setInterval(() => {
setTimeout(() => {
this.pinger.sendPing();
//jitter
}, Math.floor((Math.random() * 1000) + 1));
}, (4 * 60 * 1000));
// at least ever 5 minutes
},
sendPing: () => {
try {
this.ws.send('PING');
this.pinger.awaitPong();
} catch (e) {
console.log(e);
this.ws.close();
}
},
pingtimeout: false,
awaitPong: () => {
this.pinger.pingtimeout = setTimeout(() => {
//console.log('WS Pong Timeout');
this.ws.close();
}, 10000)
},
gotPong: () => {
clearTimeout(this.pinger.pingtimeout);
}
}
}
connect() {
console.log('init');
this.ws = new WebSocket('wss://irc-ws.chat.twitch.tv');
this.ws.onmessage = this._onMessage.bind(this);
this.ws.onerror = this._onError.bind(this);
this.ws.onclose = this._onClose.bind(this);
this.ws.onopen = this._onOpen.bind(this);
}
_reconnect() {
this.ws = null;
this.connect();
}
| () {
console.log('Got Error');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onClose() {
console.log('Got Close');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onOpen() {
// pinger
this.pinger.start();
this.ws.send('CAP REQ :twitch.tv/commands');
this.ws.send('CAP REQ :twitch.tv/tags');
this.emit('open');
}
_onMessage(event) {
let message = event.data.toString().trim().split(/\r?\n/);
// uncomment this line to log all inbounc messages
//console.log(message);
for (var x=0;x<message.length;x++) {
// the last line is empty
if (message[x].length == 0) {
return;
}
let payload = {
tags: {},
command: false,
message: '',
raw: message[x]
}
const data = ircRegex.exec(message[x].trim());
if (data === null) {
console.error(`Couldnt parse message '${message[x]}'`);
return;
}
// items
// 0 is unparsed message
// 1 ircV3 tags
// 2 tmi.twitch.tv
// 3 COMMAND
// 4 Room
// 5 rest/message
// 0 ignore
// 1 tags
let tagdata = data[1] ? data[1] : false;
if (tagdata) {
let m;
do {
m = tagsRegex.exec(tagdata);
if (m) {
// unparsed, a, b
const [, key, val] = m;
// interrupts
switch (key) {
case 'badges':
case 'badge-info':
payload.tags[key] = {};
let b;
do {
b = badgesRegex.exec(val);
if (b) {
const [, badge, tier] = b;
payload.tags[key][badge] = tier;
}
} while (b);
break;
case 'emotes':
payload.tags[key] = {};
let e;
do {
e = emotesRegex.exec(val);
if (e) {
const [, emoteID, indices] = e;
// and split again
let em;
do {
em = emoteIndexRegex.exec(indices);
if (em) {
const [, startIndex, endIndex] = em;
// arrays!
if (!payload.tags[key][emoteID]) {
payload.tags[key][emoteID] = new Array();
}
payload.tags[key][emoteID].push({
startIndex,
endIndex
});
}
} while (em);
}
} while (e);
break;
default:
payload.tags[key] = val.replace(/\\s/g, ' ').trim();// for \s (space)
}
}
} while (m);
// Javascript magic helper
for (let key in payload.tags) {
let new_key = key.replace(/-/g, '_');
payload.tags[new_key] = payload.tags[key];
// optionally nailed the bad keys with `-` in the name
if (new_key != key)
delete payload.tags[key];
}
}
// 2 host
let host = hostRegex.exec(data[2]);
payload.user = false;
if (host != null) {
payload.user = host[1];
}
// 3 command
payload.command = data[3];
// 4 room
payload.room = data[4];
// 5 message
payload.message = data[5];
payload.action = false;
// check for action
const actionCheck = actionRegex.exec(payload.message);
if (actionCheck != null) {
// it's an action
payload.action = true;
payload.message = actionCheck[1];
}
// https://tools.ietf.org/html/rfc1459
// commands the template needs to reply
switch (payload.command) {
case 'PING':
// Twitch sent a "R U STILL THERE?"
this.ws.send('PONG :' + payload.message);
case 'PONG':
this.pinger.gotPong();
break;
}
switch (payload.command) {
case '001':
case '002':
case '003':
case '004':
// do nothing
break;
case 'CAP':
this.emit('CAP ACK', payload.raw);
break;
case '372':
case '375':
case '376':
// motd
this.emit('MOTD', payload.raw);
break;
case '353':
case '366':
// names
break;
case 'PING':
case 'PONG':
case 'JOIN':
// You joined a room
case 'PART':
// as the result of a PART command
// you left a room
case 'GLOBALUSERSTATE':
// You connected to the server
// here is some info about the user
case 'USERSTATE':
// Often sent when you send a PRIVMSG to a room
case 'ROOMSTATE':
// You joined a room here is the intial state (followers only etc)
// The Room state was changed, on change only sends what changed, not the whole settings blob
case 'WHISPER':
// you received a whisper, good luck replying!
case 'PRIVMSG':
// heres where the magic happens
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('bits')) {
// it's a cheer message
// but it's also a privmsg
this.emit(
'cheer',
payload
);
}
}
case 'USERNOTICE':
// see https://dev.twitch.tv/docs/irc/tags#usernotice-twitch-tags
// An "Twitch event" occured, like a subscription or raid
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('msg-id')) {
this.emit(
`usernotice_${payload.tags['msg-id']}`,
payload
);
}
}
case 'NOTICE':
// General notices about Twitch/rooms you are in
// https://dev.twitch.tv/docs/irc/commands#notice-twitch-commands
// moderationy stuff
case 'CLEARCHAT':
// A users message is to be removed
// as the result of a ban or timeout
case 'CLEARMSG':
// a single users message was deleted
case 'HOSTTARGET':
// the room you are in, is now hosting someone or has ended the host
this.emit(
payload.command,
payload
);
this.emit(
payload.command.toLowerCase(),
payload
);
break;
case 'RECONNECT':
// The server you | _onError | identifier_name |
chat_template.js | $/g;
// can't do a username.tmi.twitch.tv since the latter part of the host could change at any point
// course this is just a relately standard IRC parser anyway.
// but this will trip a ReDoS scanner since >= 10
// A Twitch username is up to 25 letters, we'll leave some wiggle room
const hostRegex = /([a-z_0-9]{1,30})!([a-z_0-9]{1,30})@([a-z._0-9]{1,60})/;
class ChatBot extends EventEmitter {
constructor(opts) {
super();
this.reconnect = true;
this.ws = null;
this.pinger = {
clock: false,
start: () => {
if (this.pinger.clock) {
clearInterval(this.pinger.clock);
}
this.pinger.sendPing();
this.pinger.clock = setInterval(() => {
setTimeout(() => {
this.pinger.sendPing();
//jitter
}, Math.floor((Math.random() * 1000) + 1));
}, (4 * 60 * 1000));
// at least ever 5 minutes
},
sendPing: () => {
try {
this.ws.send('PING');
this.pinger.awaitPong();
} catch (e) {
console.log(e);
this.ws.close();
}
},
pingtimeout: false,
awaitPong: () => {
this.pinger.pingtimeout = setTimeout(() => {
//console.log('WS Pong Timeout');
this.ws.close();
}, 10000)
},
gotPong: () => {
clearTimeout(this.pinger.pingtimeout);
}
}
}
connect() {
console.log('init');
this.ws = new WebSocket('wss://irc-ws.chat.twitch.tv');
this.ws.onmessage = this._onMessage.bind(this);
this.ws.onerror = this._onError.bind(this);
this.ws.onclose = this._onClose.bind(this);
this.ws.onopen = this._onOpen.bind(this);
}
_reconnect() {
this.ws = null;
this.connect();
}
_onError() {
console.log('Got Error');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onClose() {
console.log('Got Close');
// reconnect
this.emit('close');
if (this.reconnect) {
console.log('Reconnecting');
this._reconnect();
}
}
_onOpen() |
_onMessage(event) {
let message = event.data.toString().trim().split(/\r?\n/);
// uncomment this line to log all inbounc messages
//console.log(message);
for (var x=0;x<message.length;x++) {
// the last line is empty
if (message[x].length == 0) {
return;
}
let payload = {
tags: {},
command: false,
message: '',
raw: message[x]
}
const data = ircRegex.exec(message[x].trim());
if (data === null) {
console.error(`Couldnt parse message '${message[x]}'`);
return;
}
// items
// 0 is unparsed message
// 1 ircV3 tags
// 2 tmi.twitch.tv
// 3 COMMAND
// 4 Room
// 5 rest/message
// 0 ignore
// 1 tags
let tagdata = data[1] ? data[1] : false;
if (tagdata) {
let m;
do {
m = tagsRegex.exec(tagdata);
if (m) {
// unparsed, a, b
const [, key, val] = m;
// interrupts
switch (key) {
case 'badges':
case 'badge-info':
payload.tags[key] = {};
let b;
do {
b = badgesRegex.exec(val);
if (b) {
const [, badge, tier] = b;
payload.tags[key][badge] = tier;
}
} while (b);
break;
case 'emotes':
payload.tags[key] = {};
let e;
do {
e = emotesRegex.exec(val);
if (e) {
const [, emoteID, indices] = e;
// and split again
let em;
do {
em = emoteIndexRegex.exec(indices);
if (em) {
const [, startIndex, endIndex] = em;
// arrays!
if (!payload.tags[key][emoteID]) {
payload.tags[key][emoteID] = new Array();
}
payload.tags[key][emoteID].push({
startIndex,
endIndex
});
}
} while (em);
}
} while (e);
break;
default:
payload.tags[key] = val.replace(/\\s/g, ' ').trim();// for \s (space)
}
}
} while (m);
// Javascript magic helper
for (let key in payload.tags) {
let new_key = key.replace(/-/g, '_');
payload.tags[new_key] = payload.tags[key];
// optionally nailed the bad keys with `-` in the name
if (new_key != key)
delete payload.tags[key];
}
}
// 2 host
let host = hostRegex.exec(data[2]);
payload.user = false;
if (host != null) {
payload.user = host[1];
}
// 3 command
payload.command = data[3];
// 4 room
payload.room = data[4];
// 5 message
payload.message = data[5];
payload.action = false;
// check for action
const actionCheck = actionRegex.exec(payload.message);
if (actionCheck != null) {
// it's an action
payload.action = true;
payload.message = actionCheck[1];
}
// https://tools.ietf.org/html/rfc1459
// commands the template needs to reply
switch (payload.command) {
case 'PING':
// Twitch sent a "R U STILL THERE?"
this.ws.send('PONG :' + payload.message);
case 'PONG':
this.pinger.gotPong();
break;
}
switch (payload.command) {
case '001':
case '002':
case '003':
case '004':
// do nothing
break;
case 'CAP':
this.emit('CAP ACK', payload.raw);
break;
case '372':
case '375':
case '376':
// motd
this.emit('MOTD', payload.raw);
break;
case '353':
case '366':
// names
break;
case 'PING':
case 'PONG':
case 'JOIN':
// You joined a room
case 'PART':
// as the result of a PART command
// you left a room
case 'GLOBALUSERSTATE':
// You connected to the server
// here is some info about the user
case 'USERSTATE':
// Often sent when you send a PRIVMSG to a room
case 'ROOMSTATE':
// You joined a room here is the intial state (followers only etc)
// The Room state was changed, on change only sends what changed, not the whole settings blob
case 'WHISPER':
// you received a whisper, good luck replying!
case 'PRIVMSG':
// heres where the magic happens
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('bits')) {
// it's a cheer message
// but it's also a privmsg
this.emit(
'cheer',
payload
);
}
}
case 'USERNOTICE':
// see https://dev.twitch.tv/docs/irc/tags#usernotice-twitch-tags
// An "Twitch event" occured, like a subscription or raid
if (payload.hasOwnProperty('tags')) {
if (payload.tags.hasOwnProperty('msg-id')) {
this.emit(
`usernotice_${payload.tags['msg-id']}`,
payload
);
}
}
case 'NOTICE':
// General notices about Twitch/rooms you are in
// https://dev.twitch.tv/docs/irc/commands#notice-twitch-commands
// moderationy stuff
case 'CLEARCHAT':
// A users message is to be removed
// as the result of a ban or timeout
case 'CLEARMSG':
// a single users message was deleted
case 'HOSTTARGET':
// the room you are in, is now hosting someone or has ended the host
this.emit(
payload.command,
payload
);
this.emit(
payload.command.toLowerCase(),
payload
);
break;
case 'RECONNECT':
// The | {
// pinger
this.pinger.start();
this.ws.send('CAP REQ :twitch.tv/commands');
this.ws.send('CAP REQ :twitch.tv/tags');
this.emit('open');
} | identifier_body |
download-ganglia-metrics.py | =Bytes%20Sent',
'unit': 'bytes per second',
'name': 'send data',
},
'bytes_in': {
'uri': 'm=bytes_in&vl=bytes%2Fsec&ti=Bytes%20Received',
'unit': 'bytes per second',
'name': 'received data',
},
'mem_free': {
'uri': 'm=mem_free&vl=KB&ti=Free%20Memory',
'unit': 'kilobytes',
'name': 'free memory'
},
'mem_total': {
'uri': 'm=mem_total&vl=KB&ti=Total%20Memory',
'unit': 'kilobytes',
'name': 'total memory'
},
'cpu_user': {
'uri': 'm=cpu_user&vl=%25&ti=CPU%20User',
'unit': 'percent',
'name': 'CPU user'
},
'cpu_system': {
'uri': 'm=cpu_system&vl=%25&ti=CPU%20System',
'unit': 'percent',
'name': 'CPU system'
},
'cpu_num': {
'uri': 'm=cpu_num&vl=%25&ti=CPU%20Number',
'unit': '',
'name': 'number of processors'
},
'load_one': {
'uri': 'm=load_one&vl=%20&ti=One%20Minute%20Load%20Average',
'unit': '',
'name': 'one minute load average'
}
}
FORMATTED_METRICS = {
'memory': {
'func': lambda m: (m['mem_total'] - m['mem_free']) / float(m['mem_total']) * 100,
'unit': 'percent',
'name': 'memory usage'
},
'cpu': {
'func': lambda m: m['cpu_user'] + m['cpu_system'],
'unit': 'percent',
'name': 'CPU usage'
},
'input': {
'func': lambda m: m['bytes_in'],
'unit': 'bytes per second',
'name': 'data input'
},
'output': {
'func': lambda m: m['bytes_out'],
'unit': 'bytes per second',
'name': 'data output'
}
}
def _get_number_of_processors(cluster):
match = re.search(r'^cluster-([0-9]+)$', cluster)
n = match.group(1)
# All cluster architectuers use nodes with 4 processors
return int(n)/4
def _get_compute_nodes(cluster):
n = _get_number_of_processors(cluster)
return tuple([
'{0}-slurm-worker-{1:03d}'.format(cluster, i+1) for i in range(n)
])
def _get_fs_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-glusterfs-server-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
def _get_db_coordinator_nodes(cluster):
return tuple(['{0}-postgresql-master-001'.format(cluster)])
def _get_db_worker_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-postgresql-worker-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
HOST_GROUPS = {
'compute': {
'name': 'compute',
'hosts': lambda cluster: _get_compute_nodes(cluster)
},
'fs': {
'name': 'filesystem',
'hosts': lambda cluster: _get_fs_nodes(cluster)
},
'db_coordinator': {
'name': 'database coordinator',
'hosts': lambda cluster: _get_db_coordinator_nodes(cluster)
},
'db_worker': {
'name': 'database worker',
'hosts': lambda cluster: _get_db_worker_nodes(cluster)
}
}
def download_raw_metrics(host, cluster, workflow_statistics):
base_uri = 'http://{address}/ganglia/graph.php?r=4hr&c={cluster}'.format(
address=host, cluster=cluster
)
ganglia_dt_format = '%m%%2F%d%%2F%Y+%H%%3A%M'
workflow_dt_format = '%Y-%m-%d %H:%M:%S'
data = dict()
for step in WORKFLOW_STEPS:
current_index = np.where(workflow_statistics['name'] == step)[0][0]
current_step_stats = workflow_statistics.loc[current_index, :]
first_task_index = np.where(
workflow_statistics['name'] == '{}_init'.format(step)
)[0][0]
first_task_stats = workflow_statistics.loc[first_task_index, :]
start = first_task_stats['updated_at'].split('.')[0]
start = datetime.strptime(start, workflow_dt_format)
end = current_step_stats['updated_at'].split('.')[0]
end = datetime.strptime(end, workflow_dt_format)
start_uri = 'cs={}'.format(start.strftime(ganglia_dt_format))
end_uri = 'ce={}'.format(end.strftime(ganglia_dt_format))
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
metric_uri = RAW_METRICS[metric]['uri']
tmp_data = list()
for node in HOST_GROUPS[group]['hosts'](cluster):
node_uri = 'h={}'.format(node)
url = '&'.join([
base_uri, node_uri, start_uri, end_uri, metric_uri,
'csv=1'
])
response = requests.get(url)
f = StringIO(response.content)
stats = pd.read_csv(f, header=0, index_col=0, names=[node])
tmp_data.append(stats)
data[step][group][metric] = pd.concat(tmp_data, axis=1)
return data
def format_raw_metrics(data, workflow_statistics):
formatted_data = dict()
for step in WORKFLOW_STEPS:
|
return formatted_data
def save_formatted_metrics(data, directory):
for step in WORKFLOW_STEPS:
for metric in FORMATTED_METRICS:
subdirectory = os.path.join(directory, step)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, 'metrics.csv')
with open(filepath, 'w') as f:
data[step].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
filepath = os.path.join(directory, step, 'metrics.csv')
with open(filepath, 'r') as f:
data[step] = pd.read_csv(f, header=0, index_col=0)
return data
def save_raw_metrics(data, directory):
for step in WORKFLOW_STEPS:
for group in HOST_GROUPS:
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
subdirectory = os.path.join(directory, step, group)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, filename)
with open(filepath, 'w') as f:
data[step][group][metric].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
filepath = os.path.join(directory, step, group, filename)
with open(filepath, 'r') as f:
df = pd.read_csv(f, header=0, index_col=0)
data[step][group][metric] = df
return data
def load_workflow_statistics(filename):
return pd.read_csv(filename, header=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='''
Download Ganglia metrics obtained as part of a TissueMAPS benchmark
test. Raw metrics for each host are downloaded and persisted on disk
in CSV format. In addition, summary statistics are computed for
groups of hosts (compute, filesystem, database coordinator and
database worker) and persisted on disk in CSV format as well.
The program expects a file named ``{cluster | formatted_data[step] = pd.DataFrame(
index=HOST_GROUPS.keys(), columns=FORMATTED_METRICS.keys()
)
for group in HOST_GROUPS:
aggregates = dict()
for metric in RAW_METRICS:
values = data[step][group][metric]
# TODO: cutoff?
# Some steps may not execute jobs on all nodes, which may
# introduce a bias upon summary statistics.
index = values > 0
if index.any().any():
values = values[index]
aggregates[metric] = np.nanmean(values)
for metric in FORMATTED_METRICS:
func = FORMATTED_METRICS[metric]['func']
formatted_data[step].loc[group, metric] = func(aggregates) | conditional_block |
download-ganglia-metrics.py | =Bytes%20Sent',
'unit': 'bytes per second',
'name': 'send data',
},
'bytes_in': {
'uri': 'm=bytes_in&vl=bytes%2Fsec&ti=Bytes%20Received',
'unit': 'bytes per second',
'name': 'received data',
},
'mem_free': {
'uri': 'm=mem_free&vl=KB&ti=Free%20Memory',
'unit': 'kilobytes',
'name': 'free memory'
},
'mem_total': {
'uri': 'm=mem_total&vl=KB&ti=Total%20Memory',
'unit': 'kilobytes',
'name': 'total memory'
},
'cpu_user': {
'uri': 'm=cpu_user&vl=%25&ti=CPU%20User',
'unit': 'percent',
'name': 'CPU user'
},
'cpu_system': {
'uri': 'm=cpu_system&vl=%25&ti=CPU%20System',
'unit': 'percent',
'name': 'CPU system'
},
'cpu_num': {
'uri': 'm=cpu_num&vl=%25&ti=CPU%20Number',
'unit': '',
'name': 'number of processors'
},
'load_one': {
'uri': 'm=load_one&vl=%20&ti=One%20Minute%20Load%20Average',
'unit': '',
'name': 'one minute load average'
}
}
FORMATTED_METRICS = {
'memory': {
'func': lambda m: (m['mem_total'] - m['mem_free']) / float(m['mem_total']) * 100,
'unit': 'percent',
'name': 'memory usage'
},
'cpu': {
'func': lambda m: m['cpu_user'] + m['cpu_system'],
'unit': 'percent',
'name': 'CPU usage'
},
'input': {
'func': lambda m: m['bytes_in'],
'unit': 'bytes per second',
'name': 'data input'
},
'output': {
'func': lambda m: m['bytes_out'],
'unit': 'bytes per second',
'name': 'data output'
}
}
def _get_number_of_processors(cluster):
|
def _get_compute_nodes(cluster):
n = _get_number_of_processors(cluster)
return tuple([
'{0}-slurm-worker-{1:03d}'.format(cluster, i+1) for i in range(n)
])
def _get_fs_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-glusterfs-server-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
def _get_db_coordinator_nodes(cluster):
return tuple(['{0}-postgresql-master-001'.format(cluster)])
def _get_db_worker_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-postgresql-worker-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
HOST_GROUPS = {
'compute': {
'name': 'compute',
'hosts': lambda cluster: _get_compute_nodes(cluster)
},
'fs': {
'name': 'filesystem',
'hosts': lambda cluster: _get_fs_nodes(cluster)
},
'db_coordinator': {
'name': 'database coordinator',
'hosts': lambda cluster: _get_db_coordinator_nodes(cluster)
},
'db_worker': {
'name': 'database worker',
'hosts': lambda cluster: _get_db_worker_nodes(cluster)
}
}
def download_raw_metrics(host, cluster, workflow_statistics):
base_uri = 'http://{address}/ganglia/graph.php?r=4hr&c={cluster}'.format(
address=host, cluster=cluster
)
ganglia_dt_format = '%m%%2F%d%%2F%Y+%H%%3A%M'
workflow_dt_format = '%Y-%m-%d %H:%M:%S'
data = dict()
for step in WORKFLOW_STEPS:
current_index = np.where(workflow_statistics['name'] == step)[0][0]
current_step_stats = workflow_statistics.loc[current_index, :]
first_task_index = np.where(
workflow_statistics['name'] == '{}_init'.format(step)
)[0][0]
first_task_stats = workflow_statistics.loc[first_task_index, :]
start = first_task_stats['updated_at'].split('.')[0]
start = datetime.strptime(start, workflow_dt_format)
end = current_step_stats['updated_at'].split('.')[0]
end = datetime.strptime(end, workflow_dt_format)
start_uri = 'cs={}'.format(start.strftime(ganglia_dt_format))
end_uri = 'ce={}'.format(end.strftime(ganglia_dt_format))
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
metric_uri = RAW_METRICS[metric]['uri']
tmp_data = list()
for node in HOST_GROUPS[group]['hosts'](cluster):
node_uri = 'h={}'.format(node)
url = '&'.join([
base_uri, node_uri, start_uri, end_uri, metric_uri,
'csv=1'
])
response = requests.get(url)
f = StringIO(response.content)
stats = pd.read_csv(f, header=0, index_col=0, names=[node])
tmp_data.append(stats)
data[step][group][metric] = pd.concat(tmp_data, axis=1)
return data
def format_raw_metrics(data, workflow_statistics):
formatted_data = dict()
for step in WORKFLOW_STEPS:
formatted_data[step] = pd.DataFrame(
index=HOST_GROUPS.keys(), columns=FORMATTED_METRICS.keys()
)
for group in HOST_GROUPS:
aggregates = dict()
for metric in RAW_METRICS:
values = data[step][group][metric]
# TODO: cutoff?
# Some steps may not execute jobs on all nodes, which may
# introduce a bias upon summary statistics.
index = values > 0
if index.any().any():
values = values[index]
aggregates[metric] = np.nanmean(values)
for metric in FORMATTED_METRICS:
func = FORMATTED_METRICS[metric]['func']
formatted_data[step].loc[group, metric] = func(aggregates)
return formatted_data
def save_formatted_metrics(data, directory):
for step in WORKFLOW_STEPS:
for metric in FORMATTED_METRICS:
subdirectory = os.path.join(directory, step)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, 'metrics.csv')
with open(filepath, 'w') as f:
data[step].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
filepath = os.path.join(directory, step, 'metrics.csv')
with open(filepath, 'r') as f:
data[step] = pd.read_csv(f, header=0, index_col=0)
return data
def save_raw_metrics(data, directory):
for step in WORKFLOW_STEPS:
for group in HOST_GROUPS:
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
subdirectory = os.path.join(directory, step, group)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, filename)
with open(filepath, 'w') as f:
data[step][group][metric].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
filepath = os.path.join(directory, step, group, filename)
with open(filepath, 'r') as f:
df = pd.read_csv(f, header=0, index_col=0)
data[step][group][metric] = df
return data
def load_workflow_statistics(filename):
return pd.read_csv(filename, header=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='''
Download Ganglia metrics obtained as part of a TissueMAPS benchmark
test. Raw metrics for each host are downloaded and persisted on disk
in CSV format. In addition, summary statistics are computed for
groups of hosts (compute, filesystem, database coordinator and
database worker) and persisted on disk in CSV format as well.
The program expects a file named ``{cluster}_ | match = re.search(r'^cluster-([0-9]+)$', cluster)
n = match.group(1)
# All cluster architectuers use nodes with 4 processors
return int(n)/4 | identifier_body |
download-ganglia-metrics.py | =Bytes%20Sent',
'unit': 'bytes per second',
'name': 'send data',
},
'bytes_in': {
'uri': 'm=bytes_in&vl=bytes%2Fsec&ti=Bytes%20Received',
'unit': 'bytes per second',
'name': 'received data',
},
'mem_free': {
'uri': 'm=mem_free&vl=KB&ti=Free%20Memory',
'unit': 'kilobytes',
'name': 'free memory'
},
'mem_total': {
'uri': 'm=mem_total&vl=KB&ti=Total%20Memory',
'unit': 'kilobytes',
'name': 'total memory'
},
'cpu_user': {
'uri': 'm=cpu_user&vl=%25&ti=CPU%20User',
'unit': 'percent',
'name': 'CPU user'
},
'cpu_system': {
'uri': 'm=cpu_system&vl=%25&ti=CPU%20System',
'unit': 'percent',
'name': 'CPU system'
},
'cpu_num': {
'uri': 'm=cpu_num&vl=%25&ti=CPU%20Number',
'unit': '',
'name': 'number of processors'
},
'load_one': {
'uri': 'm=load_one&vl=%20&ti=One%20Minute%20Load%20Average',
'unit': '',
'name': 'one minute load average'
}
}
FORMATTED_METRICS = {
'memory': {
'func': lambda m: (m['mem_total'] - m['mem_free']) / float(m['mem_total']) * 100,
'unit': 'percent',
'name': 'memory usage'
},
'cpu': {
'func': lambda m: m['cpu_user'] + m['cpu_system'],
'unit': 'percent',
'name': 'CPU usage'
}, | 'output': {
'func': lambda m: m['bytes_out'],
'unit': 'bytes per second',
'name': 'data output'
}
}
def _get_number_of_processors(cluster):
match = re.search(r'^cluster-([0-9]+)$', cluster)
n = match.group(1)
# All cluster architectuers use nodes with 4 processors
return int(n)/4
def _get_compute_nodes(cluster):
n = _get_number_of_processors(cluster)
return tuple([
'{0}-slurm-worker-{1:03d}'.format(cluster, i+1) for i in range(n)
])
def _get_fs_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-glusterfs-server-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
def _get_db_coordinator_nodes(cluster):
return tuple(['{0}-postgresql-master-001'.format(cluster)])
def _get_db_worker_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-postgresql-worker-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
HOST_GROUPS = {
'compute': {
'name': 'compute',
'hosts': lambda cluster: _get_compute_nodes(cluster)
},
'fs': {
'name': 'filesystem',
'hosts': lambda cluster: _get_fs_nodes(cluster)
},
'db_coordinator': {
'name': 'database coordinator',
'hosts': lambda cluster: _get_db_coordinator_nodes(cluster)
},
'db_worker': {
'name': 'database worker',
'hosts': lambda cluster: _get_db_worker_nodes(cluster)
}
}
def download_raw_metrics(host, cluster, workflow_statistics):
base_uri = 'http://{address}/ganglia/graph.php?r=4hr&c={cluster}'.format(
address=host, cluster=cluster
)
ganglia_dt_format = '%m%%2F%d%%2F%Y+%H%%3A%M'
workflow_dt_format = '%Y-%m-%d %H:%M:%S'
data = dict()
for step in WORKFLOW_STEPS:
current_index = np.where(workflow_statistics['name'] == step)[0][0]
current_step_stats = workflow_statistics.loc[current_index, :]
first_task_index = np.where(
workflow_statistics['name'] == '{}_init'.format(step)
)[0][0]
first_task_stats = workflow_statistics.loc[first_task_index, :]
start = first_task_stats['updated_at'].split('.')[0]
start = datetime.strptime(start, workflow_dt_format)
end = current_step_stats['updated_at'].split('.')[0]
end = datetime.strptime(end, workflow_dt_format)
start_uri = 'cs={}'.format(start.strftime(ganglia_dt_format))
end_uri = 'ce={}'.format(end.strftime(ganglia_dt_format))
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
metric_uri = RAW_METRICS[metric]['uri']
tmp_data = list()
for node in HOST_GROUPS[group]['hosts'](cluster):
node_uri = 'h={}'.format(node)
url = '&'.join([
base_uri, node_uri, start_uri, end_uri, metric_uri,
'csv=1'
])
response = requests.get(url)
f = StringIO(response.content)
stats = pd.read_csv(f, header=0, index_col=0, names=[node])
tmp_data.append(stats)
data[step][group][metric] = pd.concat(tmp_data, axis=1)
return data
def format_raw_metrics(data, workflow_statistics):
formatted_data = dict()
for step in WORKFLOW_STEPS:
formatted_data[step] = pd.DataFrame(
index=HOST_GROUPS.keys(), columns=FORMATTED_METRICS.keys()
)
for group in HOST_GROUPS:
aggregates = dict()
for metric in RAW_METRICS:
values = data[step][group][metric]
# TODO: cutoff?
# Some steps may not execute jobs on all nodes, which may
# introduce a bias upon summary statistics.
index = values > 0
if index.any().any():
values = values[index]
aggregates[metric] = np.nanmean(values)
for metric in FORMATTED_METRICS:
func = FORMATTED_METRICS[metric]['func']
formatted_data[step].loc[group, metric] = func(aggregates)
return formatted_data
def save_formatted_metrics(data, directory):
for step in WORKFLOW_STEPS:
for metric in FORMATTED_METRICS:
subdirectory = os.path.join(directory, step)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, 'metrics.csv')
with open(filepath, 'w') as f:
data[step].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
filepath = os.path.join(directory, step, 'metrics.csv')
with open(filepath, 'r') as f:
data[step] = pd.read_csv(f, header=0, index_col=0)
return data
def save_raw_metrics(data, directory):
for step in WORKFLOW_STEPS:
for group in HOST_GROUPS:
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
subdirectory = os.path.join(directory, step, group)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, filename)
with open(filepath, 'w') as f:
data[step][group][metric].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
filepath = os.path.join(directory, step, group, filename)
with open(filepath, 'r') as f:
df = pd.read_csv(f, header=0, index_col=0)
data[step][group][metric] = df
return data
def load_workflow_statistics(filename):
return pd.read_csv(filename, header=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='''
Download Ganglia metrics obtained as part of a TissueMAPS benchmark
test. Raw metrics for each host are downloaded and persisted on disk
in CSV format. In addition, summary statistics are computed for
groups of hosts (compute, filesystem, database coordinator and
database worker) and persisted on disk in CSV format as well.
The program expects a file named ``{cluster}_ | 'input': {
'func': lambda m: m['bytes_in'],
'unit': 'bytes per second',
'name': 'data input'
}, | random_line_split |
download-ganglia-metrics.py | =Bytes%20Sent',
'unit': 'bytes per second',
'name': 'send data',
},
'bytes_in': {
'uri': 'm=bytes_in&vl=bytes%2Fsec&ti=Bytes%20Received',
'unit': 'bytes per second',
'name': 'received data',
},
'mem_free': {
'uri': 'm=mem_free&vl=KB&ti=Free%20Memory',
'unit': 'kilobytes',
'name': 'free memory'
},
'mem_total': {
'uri': 'm=mem_total&vl=KB&ti=Total%20Memory',
'unit': 'kilobytes',
'name': 'total memory'
},
'cpu_user': {
'uri': 'm=cpu_user&vl=%25&ti=CPU%20User',
'unit': 'percent',
'name': 'CPU user'
},
'cpu_system': {
'uri': 'm=cpu_system&vl=%25&ti=CPU%20System',
'unit': 'percent',
'name': 'CPU system'
},
'cpu_num': {
'uri': 'm=cpu_num&vl=%25&ti=CPU%20Number',
'unit': '',
'name': 'number of processors'
},
'load_one': {
'uri': 'm=load_one&vl=%20&ti=One%20Minute%20Load%20Average',
'unit': '',
'name': 'one minute load average'
}
}
FORMATTED_METRICS = {
'memory': {
'func': lambda m: (m['mem_total'] - m['mem_free']) / float(m['mem_total']) * 100,
'unit': 'percent',
'name': 'memory usage'
},
'cpu': {
'func': lambda m: m['cpu_user'] + m['cpu_system'],
'unit': 'percent',
'name': 'CPU usage'
},
'input': {
'func': lambda m: m['bytes_in'],
'unit': 'bytes per second',
'name': 'data input'
},
'output': {
'func': lambda m: m['bytes_out'],
'unit': 'bytes per second',
'name': 'data output'
}
}
def _get_number_of_processors(cluster):
match = re.search(r'^cluster-([0-9]+)$', cluster)
n = match.group(1)
# All cluster architectuers use nodes with 4 processors
return int(n)/4
def _get_compute_nodes(cluster):
n = _get_number_of_processors(cluster)
return tuple([
'{0}-slurm-worker-{1:03d}'.format(cluster, i+1) for i in range(n)
])
def _get_fs_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-glusterfs-server-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
def | (cluster):
return tuple(['{0}-postgresql-master-001'.format(cluster)])
def _get_db_worker_nodes(cluster):
n = _get_number_of_processors(cluster)
# Ratio of compute to storage nodes is 1:4
return tuple([
'{0}-postgresql-worker-{1:03d}'.format(cluster, i+1)
for i in range(n/4)
])
HOST_GROUPS = {
'compute': {
'name': 'compute',
'hosts': lambda cluster: _get_compute_nodes(cluster)
},
'fs': {
'name': 'filesystem',
'hosts': lambda cluster: _get_fs_nodes(cluster)
},
'db_coordinator': {
'name': 'database coordinator',
'hosts': lambda cluster: _get_db_coordinator_nodes(cluster)
},
'db_worker': {
'name': 'database worker',
'hosts': lambda cluster: _get_db_worker_nodes(cluster)
}
}
def download_raw_metrics(host, cluster, workflow_statistics):
base_uri = 'http://{address}/ganglia/graph.php?r=4hr&c={cluster}'.format(
address=host, cluster=cluster
)
ganglia_dt_format = '%m%%2F%d%%2F%Y+%H%%3A%M'
workflow_dt_format = '%Y-%m-%d %H:%M:%S'
data = dict()
for step in WORKFLOW_STEPS:
current_index = np.where(workflow_statistics['name'] == step)[0][0]
current_step_stats = workflow_statistics.loc[current_index, :]
first_task_index = np.where(
workflow_statistics['name'] == '{}_init'.format(step)
)[0][0]
first_task_stats = workflow_statistics.loc[first_task_index, :]
start = first_task_stats['updated_at'].split('.')[0]
start = datetime.strptime(start, workflow_dt_format)
end = current_step_stats['updated_at'].split('.')[0]
end = datetime.strptime(end, workflow_dt_format)
start_uri = 'cs={}'.format(start.strftime(ganglia_dt_format))
end_uri = 'ce={}'.format(end.strftime(ganglia_dt_format))
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
metric_uri = RAW_METRICS[metric]['uri']
tmp_data = list()
for node in HOST_GROUPS[group]['hosts'](cluster):
node_uri = 'h={}'.format(node)
url = '&'.join([
base_uri, node_uri, start_uri, end_uri, metric_uri,
'csv=1'
])
response = requests.get(url)
f = StringIO(response.content)
stats = pd.read_csv(f, header=0, index_col=0, names=[node])
tmp_data.append(stats)
data[step][group][metric] = pd.concat(tmp_data, axis=1)
return data
def format_raw_metrics(data, workflow_statistics):
formatted_data = dict()
for step in WORKFLOW_STEPS:
formatted_data[step] = pd.DataFrame(
index=HOST_GROUPS.keys(), columns=FORMATTED_METRICS.keys()
)
for group in HOST_GROUPS:
aggregates = dict()
for metric in RAW_METRICS:
values = data[step][group][metric]
# TODO: cutoff?
# Some steps may not execute jobs on all nodes, which may
# introduce a bias upon summary statistics.
index = values > 0
if index.any().any():
values = values[index]
aggregates[metric] = np.nanmean(values)
for metric in FORMATTED_METRICS:
func = FORMATTED_METRICS[metric]['func']
formatted_data[step].loc[group, metric] = func(aggregates)
return formatted_data
def save_formatted_metrics(data, directory):
for step in WORKFLOW_STEPS:
for metric in FORMATTED_METRICS:
subdirectory = os.path.join(directory, step)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, 'metrics.csv')
with open(filepath, 'w') as f:
data[step].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
filepath = os.path.join(directory, step, 'metrics.csv')
with open(filepath, 'r') as f:
data[step] = pd.read_csv(f, header=0, index_col=0)
return data
def save_raw_metrics(data, directory):
for step in WORKFLOW_STEPS:
for group in HOST_GROUPS:
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
subdirectory = os.path.join(directory, step, group)
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
filepath = os.path.join(subdirectory, filename)
with open(filepath, 'w') as f:
data[step][group][metric].to_csv(f)
def load_raw_metrics(directory):
data = dict()
for step in WORKFLOW_STEPS:
data[step] = dict()
for group in HOST_GROUPS:
data[step][group] = dict()
for metric in RAW_METRICS:
filename = '{}.csv'.format(metric)
filepath = os.path.join(directory, step, group, filename)
with open(filepath, 'r') as f:
df = pd.read_csv(f, header=0, index_col=0)
data[step][group][metric] = df
return data
def load_workflow_statistics(filename):
return pd.read_csv(filename, header=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='''
Download Ganglia metrics obtained as part of a TissueMAPS benchmark
test. Raw metrics for each host are downloaded and persisted on disk
in CSV format. In addition, summary statistics are computed for
groups of hosts (compute, filesystem, database coordinator and
database worker) and persisted on disk in CSV format as well.
The program expects a file named ``{cluster}_ | _get_db_coordinator_nodes | identifier_name |
mfg_event_converter.py | fgEvent proto."""
# TODO(openhtf-team):
# * Missing in proto: set run name from metadata.
# * `part_tags` field on proto is unused
# * `timings` field on proto is unused.
# * Handle arbitrary units as uom_code/uom_suffix.
# Populate non-repeated fields.
mfg_event.dut_serial = record.dut_id
mfg_event.start_time_ms = record.start_time_millis
mfg_event.end_time_ms = record.end_time_millis
mfg_event.tester_name = record.station_id
mfg_event.test_name = record.metadata.get('test_name') or record.station_id
mfg_event.operator_name = record.metadata.get('operator_name', '')
mfg_event.test_version = str(record.metadata.get('test_version', ''))
mfg_event.test_description = record.metadata.get('test_description', '')
mfg_event.test_status = (
test_runs_pb2.MARGINAL_PASS
if record.marginal else test_runs_converter.OUTCOME_MAP[record.outcome])
# Populate part_tags.
mfg_event.part_tags.extend(record.metadata.get('part_tags', []))
# Populate phases.
for phase in record.phases:
mfg_phase = mfg_event.phases.add()
mfg_phase.name = phase.name
mfg_phase.description = phase.codeinfo.sourcecode
mfg_phase.timing.start_time_millis = phase.start_time_millis
mfg_phase.timing.end_time_millis = phase.end_time_millis
# Populate failure codes.
for details in record.outcome_details:
failure_code = mfg_event.failure_codes.add()
failure_code.code = details.code
failure_code.details = details.description
# Populate test logs.
for log_record in record.log_records:
test_log = mfg_event.test_logs.add()
test_log.timestamp_millis = log_record.timestamp_millis
test_log.log_message = log_record.message
test_log.logger_name = log_record.logger_name
test_log.levelno = log_record.level
if log_record.level <= logging.DEBUG:
test_log.level = test_runs_pb2.TestRunLogMessage.DEBUG
elif log_record.level <= logging.INFO:
test_log.level = test_runs_pb2.TestRunLogMessage.INFO
elif log_record.level <= logging.WARNING:
test_log.level = test_runs_pb2.TestRunLogMessage.WARNING
elif log_record.level <= logging.ERROR:
test_log.level = test_runs_pb2.TestRunLogMessage.ERROR
elif log_record.level <= logging.CRITICAL:
test_log.level = test_runs_pb2.TestRunLogMessage.CRITICAL
test_log.log_source = log_record.source
test_log.lineno = log_record.lineno
def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8
def _convert_object_to_json(obj): # pylint: disable=missing-function-docstring
# Since there will be parts of this that may have unicode, either as
# measurement or in the logs, we have to be careful and convert everything
# to unicode, merge, then encode to UTF-8 to put it into the proto.
def unsupported_type_handler(o):
# For bytes, JSONEncoder will fallback to this function to convert to str.
if isinstance(o, bytes):
return o.decode(encoding='utf-8', errors='replace')
elif isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
else:
raise TypeError(repr(o) + ' is not JSON serializable')
json_encoder = json.JSONEncoder(
sort_keys=True,
indent=2,
ensure_ascii=False,
default=unsupported_type_handler)
return json_encoder.encode(obj).encode('utf-8', errors='replace')
def _attach_config(mfg_event, record):
"""Attaches the OpenHTF config file as JSON."""
if 'config' not in record.metadata:
return
attachment = mfg_event.attachment.add()
attachment.name = 'config'
attachment.value_binary = _convert_object_to_json(record.metadata['config'])
attachment.type = test_runs_pb2.TEXT_UTF8
def _attach_argv(mfg_event):
attachment = mfg_event.attachment.add()
attachment.name = 'argv'
argv = [os.path.realpath(sys.argv[0])] + sys.argv[1:]
attachment.value_binary = _convert_object_to_json(argv)
attachment.type = test_runs_pb2.TEXT_UTF8
class UniqueNameMaker(object):
"""Makes unique names for phases, attachments, etc with duplicate names."""
def __init__(self, all_names):
self._counts = collections.Counter(all_names)
self._seen = collections.Counter()
def make_unique(self, name): # pylint: disable=missing-function-docstring
count = self._counts[name]
assert count >= 1, 'Seeing a new name that was not given to the constructor'
if count == 1:
# It's unique, so let's skip extra calculations.
return name
# Count the number of times we've seen this and return this one's index.
self._seen[name] += 1
main, ext = os.path.splitext(name)
return '%s_%d%s' % (main, self._seen[name] - 1, ext)
def phase_uniquizer(all_phases):
"""Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified.
"""
measurement_name_maker = UniqueNameMaker(
itertools.chain.from_iterable(
phase.measurements.keys() for phase in all_phases
if phase.measurements))
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Make measurements unique.
for name, _ in sorted(phase.measurements.items()):
old_name = name
name = measurement_name_maker.make_unique(name)
phase.measurements[old_name].name = name
phase.measurements[name] = phase.measurements.pop(old_name)
# Make attachments unique.
for name, _ in sorted(phase.attachments.items()):
old_name = name
name = attachment_name_maker.make_unique(name)
phase.attachments[name] = phase.attachments.pop(old_name)
return all_phases
def multidim_measurement_to_attachment(name, measurement):
"""Convert a multi-dim measurement to an `openhtf.test_record.Attachment`."""
dimensions = list(measurement.dimensions)
if measurement.units:
dimensions.append(
measurements.Dimension.from_unit_descriptor(measurement.units))
dims = []
for d in dimensions:
if d.suffix is None:
suffix = u''
else:
suffix = d.suffix
dims.append({
'uom_suffix': suffix,
'uom_code': d.code,
'name': d.name,
})
# Refer to the module docstring for the expected schema.
dimensioned_measured_value = measurement.measured_value
value = (
sorted(dimensioned_measured_value.value, key=lambda x: x[0])
if dimensioned_measured_value.is_value_set else None)
outcome_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
data = _convert_object_to_json({
'outcome': outcome_str,
'name': name,
'dimensions': dims,
'value': value,
})
attachment = htf_test_record.Attachment(data, test_runs_pb2.MULTIDIM_JSON) # pytype: disable=wrong-arg-types # gen-stub-imports
return attachment
def convert_multidim_measurements(all_phases):
"""Converts each multidim measurements into attachments for all phases.."""
# Combine actual attachments with attachments we make from multi-dim
# measurements.
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Process multi-dim measurements into unique attachments.
for name, measurement in sorted(phase.measurements.items()):
if measurement.dimensions:
old_name = name
name = attachment_name_maker.make_unique('multidim_%s' % name)
attachment = multidim_measurement_to_attachment(name, measurement)
phase.attachments[name] = attachment
phase.measurements.pop(old_name)
return all_phases
class PhaseCopier(object):
"""Copies measurements and attachments to an MfgEvent."""
def | __init__ | identifier_name | |
mfg_event_converter.py | attachment.type = test_runs_pb2.TEXT_UTF8
def _attach_argv(mfg_event):
attachment = mfg_event.attachment.add()
attachment.name = 'argv'
argv = [os.path.realpath(sys.argv[0])] + sys.argv[1:]
attachment.value_binary = _convert_object_to_json(argv)
attachment.type = test_runs_pb2.TEXT_UTF8
class UniqueNameMaker(object):
"""Makes unique names for phases, attachments, etc with duplicate names."""
def __init__(self, all_names):
self._counts = collections.Counter(all_names)
self._seen = collections.Counter()
def make_unique(self, name): # pylint: disable=missing-function-docstring
count = self._counts[name]
assert count >= 1, 'Seeing a new name that was not given to the constructor'
if count == 1:
# It's unique, so let's skip extra calculations.
return name
# Count the number of times we've seen this and return this one's index.
self._seen[name] += 1
main, ext = os.path.splitext(name)
return '%s_%d%s' % (main, self._seen[name] - 1, ext)
def phase_uniquizer(all_phases):
"""Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified.
"""
measurement_name_maker = UniqueNameMaker(
itertools.chain.from_iterable(
phase.measurements.keys() for phase in all_phases
if phase.measurements))
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Make measurements unique.
for name, _ in sorted(phase.measurements.items()):
old_name = name
name = measurement_name_maker.make_unique(name)
phase.measurements[old_name].name = name
phase.measurements[name] = phase.measurements.pop(old_name)
# Make attachments unique.
for name, _ in sorted(phase.attachments.items()):
old_name = name
name = attachment_name_maker.make_unique(name)
phase.attachments[name] = phase.attachments.pop(old_name)
return all_phases
def multidim_measurement_to_attachment(name, measurement):
"""Convert a multi-dim measurement to an `openhtf.test_record.Attachment`."""
dimensions = list(measurement.dimensions)
if measurement.units:
dimensions.append(
measurements.Dimension.from_unit_descriptor(measurement.units))
dims = []
for d in dimensions:
if d.suffix is None:
suffix = u''
else:
suffix = d.suffix
dims.append({
'uom_suffix': suffix,
'uom_code': d.code,
'name': d.name,
})
# Refer to the module docstring for the expected schema.
dimensioned_measured_value = measurement.measured_value
value = (
sorted(dimensioned_measured_value.value, key=lambda x: x[0])
if dimensioned_measured_value.is_value_set else None)
outcome_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
data = _convert_object_to_json({
'outcome': outcome_str,
'name': name,
'dimensions': dims,
'value': value,
})
attachment = htf_test_record.Attachment(data, test_runs_pb2.MULTIDIM_JSON) # pytype: disable=wrong-arg-types # gen-stub-imports
return attachment
def convert_multidim_measurements(all_phases):
"""Converts each multidim measurements into attachments for all phases.."""
# Combine actual attachments with attachments we make from multi-dim
# measurements.
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Process multi-dim measurements into unique attachments.
for name, measurement in sorted(phase.measurements.items()):
if measurement.dimensions:
old_name = name
name = attachment_name_maker.make_unique('multidim_%s' % name)
attachment = multidim_measurement_to_attachment(name, measurement)
phase.attachments[name] = attachment
phase.measurements.pop(old_name)
return all_phases
class PhaseCopier(object):
"""Copies measurements and attachments to an MfgEvent."""
def __init__(self,
all_phases,
attachment_cache: Optional[AttachmentCacheT] = None):
self._phases = all_phases
self._using_partial_uploads = attachment_cache is not None
self._attachment_cache = (
attachment_cache if self._using_partial_uploads else {})
def copy_measurements(self, mfg_event):
for phase in self._phases:
for name, measurement in sorted(phase.measurements.items()):
# Multi-dim measurements should already have been removed.
assert measurement.dimensions is None
self._copy_unidimensional_measurement(phase, name, measurement,
mfg_event)
def _copy_unidimensional_measurement(self, phase, name, measurement,
mfg_event):
"""Copy uni-dimensional measurements to the MfgEvent."""
mfg_measurement = mfg_event.measurement.add()
# Copy basic measurement fields.
mfg_measurement.name = name
if measurement.docstring:
mfg_measurement.description = measurement.docstring
mfg_measurement.parameter_tag.append(phase.name)
if (measurement.units and
measurement.units.code in test_runs_converter.UOM_CODE_MAP):
mfg_measurement.unit_code = (
test_runs_converter.UOM_CODE_MAP[measurement.units.code])
# Copy failed measurements as failure_codes. This happens early to include
# unset measurements.
if (measurement.outcome != measurements.Outcome.PASS and
phase.outcome != htf_test_record.PhaseOutcome.SKIP):
failure_code = mfg_event.failure_codes.add()
failure_code.code = name
failure_code.details = '\n'.join(str(v) for v in measurement.validators)
# Copy measurement value.
measured_value = measurement.measured_value
status_str = _measurement_outcome_to_test_run_status_name(
measurement.outcome, measurement.marginal)
mfg_measurement.status = test_runs_pb2.Status.Value(status_str)
if not measured_value.is_value_set:
return
value = measured_value.value
if isinstance(value, numbers.Number):
mfg_measurement.numeric_value = float(value)
elif isinstance(value, bytes):
mfg_measurement.text_value = value.decode(errors='replace')
else:
# Coercing to string.
mfg_measurement.text_value = str(value)
# Copy measurement validators.
for validator in measurement.validators:
if isinstance(validator, validators.RangeValidatorBase):
if validator.minimum is not None:
mfg_measurement.numeric_minimum = float(validator.minimum)
if validator.maximum is not None:
mfg_measurement.numeric_maximum = float(validator.maximum)
if validator.marginal_minimum is not None:
mfg_measurement.numeric_marginal_minimum = float(
validator.marginal_minimum)
if validator.marginal_maximum is not None:
mfg_measurement.numeric_marginal_maximum = float(
validator.marginal_maximum)
elif isinstance(validator, validators.RegexMatcher):
mfg_measurement.expected_text = validator.regex
else:
mfg_measurement.description += '\nValidator: ' + str(validator)
def copy_attachments(self, mfg_event: mfg_event_pb2.MfgEvent) -> bool:
"""Copies attachments into the MfgEvent from the configured phases.
If partial uploads are in use (indicated by configuring this class instance
with an Attachments cache), this function will exit early if the total
attachment data size exceeds a reasonable threshold to avoid the 2 GB
serialized proto limit.
Args:
mfg_event: The MfgEvent to copy into.
Returns:
True if all attachments are copied and False if only some attachments
were copied (only possible when partial uploads are being used).
"""
value_copied_attachment_sizes = []
skipped_attachment_names = []
for phase in self._phases:
for name, attachment in sorted(phase.attachments.items()):
size = attachment.size
attachment_cache_key = AttachmentCacheKey(name, size)
if attachment_cache_key in self._attachment_cache:
mfg_event.attachment.append(
self._attachment_cache[attachment_cache_key])
else:
at_least_one_attachment_for_partial_uploads = (
self._using_partial_uploads and value_copied_attachment_sizes)
if at_least_one_attachment_for_partial_uploads and ( | sum(value_copied_attachment_sizes) + size >
MAX_TOTAL_ATTACHMENT_BYTES):
skipped_attachment_names.append(name)
else:
value_copied_attachment_sizes.append(size) | random_line_split | |
mfg_event_converter.py |
for unit in units.UNITS_BY_NAME.values():
UNITS_BY_CODE[unit.code] = unit
def mfg_event_from_test_record(
record: htf_test_record.TestRecord,
attachment_cache: Optional[AttachmentCacheT] = None,
) -> mfg_event_pb2.MfgEvent:
"""Convert an OpenHTF TestRecord to an MfgEvent proto.
Most fields are copied over directly and some are pulled out of metadata
(listed below). Multi-dimensional measurements are stored only in the JSON
dump of the record.
Important Note: This function mutates the test_record so any output callbacks
called after this callback will operate on the mutated record.
Metadata fields:
test_name: The name field from the test's TestOptions.
config: The OpenHTF config, as a dictionary.
assembly_events: List of AssemblyEvent protos.
(see proto/assembly_event.proto).
operator_name: Name of the test operator.
Args:
record: An OpenHTF TestRecord.
attachment_cache: Provides a lookup to get EventAttachment protos for
already uploaded (or converted) attachments.
Returns:
An MfgEvent proto representing the given test record.
"""
mfg_event = mfg_event_pb2.MfgEvent()
_populate_basic_data(mfg_event, record)
_attach_record_as_json(mfg_event, record)
_attach_argv(mfg_event)
_attach_config(mfg_event, record)
# Only include assembly events if the test passed.
if ('assembly_events' in record.metadata and
mfg_event.test_status == test_runs_pb2.PASS):
for assembly_event in record.metadata['assembly_events']:
mfg_event.assembly_events.add().CopyFrom(assembly_event)
convert_multidim_measurements(record.phases)
phase_copier = PhaseCopier(phase_uniquizer(record.phases), attachment_cache)
phase_copier.copy_measurements(mfg_event)
if not phase_copier.copy_attachments(mfg_event):
mfg_event.test_run_type = mfg_event_pb2.TEST_RUN_PARTIAL
return mfg_event
def _populate_basic_data(mfg_event: mfg_event_pb2.MfgEvent,
record: htf_test_record.TestRecord) -> None:
"""Copies data from the OpenHTF TestRecord to the MfgEvent proto."""
# TODO(openhtf-team):
# * Missing in proto: set run name from metadata.
# * `part_tags` field on proto is unused
# * `timings` field on proto is unused.
# * Handle arbitrary units as uom_code/uom_suffix.
# Populate non-repeated fields.
mfg_event.dut_serial = record.dut_id
mfg_event.start_time_ms = record.start_time_millis
mfg_event.end_time_ms = record.end_time_millis
mfg_event.tester_name = record.station_id
mfg_event.test_name = record.metadata.get('test_name') or record.station_id
mfg_event.operator_name = record.metadata.get('operator_name', '')
mfg_event.test_version = str(record.metadata.get('test_version', ''))
mfg_event.test_description = record.metadata.get('test_description', '')
mfg_event.test_status = (
test_runs_pb2.MARGINAL_PASS
if record.marginal else test_runs_converter.OUTCOME_MAP[record.outcome])
# Populate part_tags.
mfg_event.part_tags.extend(record.metadata.get('part_tags', []))
# Populate phases.
for phase in record.phases:
mfg_phase = mfg_event.phases.add()
mfg_phase.name = phase.name
mfg_phase.description = phase.codeinfo.sourcecode
mfg_phase.timing.start_time_millis = phase.start_time_millis
mfg_phase.timing.end_time_millis = phase.end_time_millis
# Populate failure codes.
for details in record.outcome_details:
failure_code = mfg_event.failure_codes.add()
failure_code.code = details.code
failure_code.details = details.description
# Populate test logs.
for log_record in record.log_records:
test_log = mfg_event.test_logs.add()
test_log.timestamp_millis = log_record.timestamp_millis
test_log.log_message = log_record.message
test_log.logger_name = log_record.logger_name
test_log.levelno = log_record.level
if log_record.level <= logging.DEBUG:
test_log.level = test_runs_pb2.TestRunLogMessage.DEBUG
elif log_record.level <= logging.INFO:
test_log.level = test_runs_pb2.TestRunLogMessage.INFO
elif log_record.level <= logging.WARNING:
test_log.level = test_runs_pb2.TestRunLogMessage.WARNING
elif log_record.level <= logging.ERROR:
test_log.level = test_runs_pb2.TestRunLogMessage.ERROR
elif log_record.level <= logging.CRITICAL:
test_log.level = test_runs_pb2.TestRunLogMessage.CRITICAL
test_log.log_source = log_record.source
test_log.lineno = log_record.lineno
def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8
def _convert_object_to_json(obj): # pylint: disable=missing-function-docstring
# Since there will be parts of this that may have unicode, either as
# measurement or in the logs, we have to be careful and convert everything
# to unicode, merge, then encode to UTF-8 to put it into the proto.
def unsupported_type_handler(o):
# For bytes, JSONEncoder will fallback to this function to convert to str.
if isinstance(o, bytes):
return o.decode(encoding='utf-8', errors='replace')
elif isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
else:
raise TypeError(repr(o) + ' is not JSON serializable')
json_encoder = json.JSONEncoder(
sort_keys=True,
indent=2,
ensure_ascii=False,
default=unsupported_type_handler)
return json_encoder.encode(obj).encode('utf-8', errors='replace')
def _attach_config(mfg_event, record):
"""Attaches the OpenHTF config file as JSON."""
if 'config' not in record.metadata:
return
attachment = mfg_event.attachment.add()
attachment.name = 'config'
attachment.value_binary = _convert_object_to_json(record.metadata['config'])
attachment.type = test_runs_pb2.TEXT_UTF8
def _attach_argv(mfg_event):
attachment = mfg_event.attachment.add()
attachment.name = 'argv'
argv = [os.path.realpath(sys.argv[0])] + sys.argv[1:]
attachment.value_binary = _convert_object_to_json(argv)
attachment.type = test_runs_pb2.TEXT_UTF8
class UniqueNameMaker(object):
"""Makes unique names for phases, attachments, etc with duplicate names."""
def __init__(self, all_names):
self._counts = collections.Counter(all_names)
self._seen = collections.Counter()
def make_unique(self, name): # pylint: disable=missing-function-docstring
count = self._counts[name]
assert count >= 1, 'Seeing a new name that was not given to the constructor'
if count == 1:
# It's unique, so let's skip extra calculations.
return name
# Count the number of times we've seen this and return this one's index.
self._seen[name] += 1
main, ext = os.path.splitext(name)
return '%s_%d%s' % (main, self._seen[name] - 1, ext)
def phase_uniquizer(all_phases):
"""Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified.
"""
measurement_name_maker = UniqueNameMaker(
itertools.chain.from_iterable(
phase.measurements.keys() for phase in all_phases
if phase.measurements))
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Make measurements unique.
for name, _ in sorted(phase.measurements.items()):
old_name = name
name = measurement_name_maker.make_unique(name)
phase.measurements[old_name].name = name
phase.measurements[name] = phase.measurements.pop(old_name)
# Make attachments unique.
for name, _ in sorted(phase.attachments.items()):
old_name = name
name = attachment_name_maker.make_unique(name)
phase.attachments[name] = phase.attachments.pop(old_name)
return all_phases
def multidim_measurement_to_attachment(name, measurement):
"""Convert a multi-dim measurement to an `openhtf.test_record.Attachment` | return | conditional_block | |
mfg_event_converter.py | the mutated record.
Metadata fields:
test_name: The name field from the test's TestOptions.
config: The OpenHTF config, as a dictionary.
assembly_events: List of AssemblyEvent protos.
(see proto/assembly_event.proto).
operator_name: Name of the test operator.
Args:
record: An OpenHTF TestRecord.
attachment_cache: Provides a lookup to get EventAttachment protos for
already uploaded (or converted) attachments.
Returns:
An MfgEvent proto representing the given test record.
"""
mfg_event = mfg_event_pb2.MfgEvent()
_populate_basic_data(mfg_event, record)
_attach_record_as_json(mfg_event, record)
_attach_argv(mfg_event)
_attach_config(mfg_event, record)
# Only include assembly events if the test passed.
if ('assembly_events' in record.metadata and
mfg_event.test_status == test_runs_pb2.PASS):
for assembly_event in record.metadata['assembly_events']:
mfg_event.assembly_events.add().CopyFrom(assembly_event)
convert_multidim_measurements(record.phases)
phase_copier = PhaseCopier(phase_uniquizer(record.phases), attachment_cache)
phase_copier.copy_measurements(mfg_event)
if not phase_copier.copy_attachments(mfg_event):
mfg_event.test_run_type = mfg_event_pb2.TEST_RUN_PARTIAL
return mfg_event
def _populate_basic_data(mfg_event: mfg_event_pb2.MfgEvent,
record: htf_test_record.TestRecord) -> None:
"""Copies data from the OpenHTF TestRecord to the MfgEvent proto."""
# TODO(openhtf-team):
# * Missing in proto: set run name from metadata.
# * `part_tags` field on proto is unused
# * `timings` field on proto is unused.
# * Handle arbitrary units as uom_code/uom_suffix.
# Populate non-repeated fields.
mfg_event.dut_serial = record.dut_id
mfg_event.start_time_ms = record.start_time_millis
mfg_event.end_time_ms = record.end_time_millis
mfg_event.tester_name = record.station_id
mfg_event.test_name = record.metadata.get('test_name') or record.station_id
mfg_event.operator_name = record.metadata.get('operator_name', '')
mfg_event.test_version = str(record.metadata.get('test_version', ''))
mfg_event.test_description = record.metadata.get('test_description', '')
mfg_event.test_status = (
test_runs_pb2.MARGINAL_PASS
if record.marginal else test_runs_converter.OUTCOME_MAP[record.outcome])
# Populate part_tags.
mfg_event.part_tags.extend(record.metadata.get('part_tags', []))
# Populate phases.
for phase in record.phases:
mfg_phase = mfg_event.phases.add()
mfg_phase.name = phase.name
mfg_phase.description = phase.codeinfo.sourcecode
mfg_phase.timing.start_time_millis = phase.start_time_millis
mfg_phase.timing.end_time_millis = phase.end_time_millis
# Populate failure codes.
for details in record.outcome_details:
failure_code = mfg_event.failure_codes.add()
failure_code.code = details.code
failure_code.details = details.description
# Populate test logs.
for log_record in record.log_records:
test_log = mfg_event.test_logs.add()
test_log.timestamp_millis = log_record.timestamp_millis
test_log.log_message = log_record.message
test_log.logger_name = log_record.logger_name
test_log.levelno = log_record.level
if log_record.level <= logging.DEBUG:
test_log.level = test_runs_pb2.TestRunLogMessage.DEBUG
elif log_record.level <= logging.INFO:
test_log.level = test_runs_pb2.TestRunLogMessage.INFO
elif log_record.level <= logging.WARNING:
test_log.level = test_runs_pb2.TestRunLogMessage.WARNING
elif log_record.level <= logging.ERROR:
test_log.level = test_runs_pb2.TestRunLogMessage.ERROR
elif log_record.level <= logging.CRITICAL:
test_log.level = test_runs_pb2.TestRunLogMessage.CRITICAL
test_log.log_source = log_record.source
test_log.lineno = log_record.lineno
def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8
def _convert_object_to_json(obj): # pylint: disable=missing-function-docstring
# Since there will be parts of this that may have unicode, either as
# measurement or in the logs, we have to be careful and convert everything
# to unicode, merge, then encode to UTF-8 to put it into the proto.
def unsupported_type_handler(o):
# For bytes, JSONEncoder will fallback to this function to convert to str.
|
json_encoder = json.JSONEncoder(
sort_keys=True,
indent=2,
ensure_ascii=False,
default=unsupported_type_handler)
return json_encoder.encode(obj).encode('utf-8', errors='replace')
def _attach_config(mfg_event, record):
"""Attaches the OpenHTF config file as JSON."""
if 'config' not in record.metadata:
return
attachment = mfg_event.attachment.add()
attachment.name = 'config'
attachment.value_binary = _convert_object_to_json(record.metadata['config'])
attachment.type = test_runs_pb2.TEXT_UTF8
def _attach_argv(mfg_event):
attachment = mfg_event.attachment.add()
attachment.name = 'argv'
argv = [os.path.realpath(sys.argv[0])] + sys.argv[1:]
attachment.value_binary = _convert_object_to_json(argv)
attachment.type = test_runs_pb2.TEXT_UTF8
class UniqueNameMaker(object):
"""Makes unique names for phases, attachments, etc with duplicate names."""
def __init__(self, all_names):
self._counts = collections.Counter(all_names)
self._seen = collections.Counter()
def make_unique(self, name): # pylint: disable=missing-function-docstring
count = self._counts[name]
assert count >= 1, 'Seeing a new name that was not given to the constructor'
if count == 1:
# It's unique, so let's skip extra calculations.
return name
# Count the number of times we've seen this and return this one's index.
self._seen[name] += 1
main, ext = os.path.splitext(name)
return '%s_%d%s' % (main, self._seen[name] - 1, ext)
def phase_uniquizer(all_phases):
"""Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified.
"""
measurement_name_maker = UniqueNameMaker(
itertools.chain.from_iterable(
phase.measurements.keys() for phase in all_phases
if phase.measurements))
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Make measurements unique.
for name, _ in sorted(phase.measurements.items()):
old_name = name
name = measurement_name_maker.make_unique(name)
phase.measurements[old_name].name = name
phase.measurements[name] = phase.measurements.pop(old_name)
# Make attachments unique.
for name, _ in sorted(phase.attachments.items()):
old_name = name
name = attachment_name_maker.make_unique(name)
phase.attachments[name] = phase.attachments.pop(old_name)
return all_phases
def multidim_measurement_to_attachment(name, measurement):
"""Convert a multi-dim measurement to an `openhtf.test_record.Attachment`."""
dimensions = list(measurement.dimensions)
if measurement.units:
dimensions.append(
measurements.Dimension.from_unit_descriptor(measurement.units))
dims = []
for d in dimensions:
if d.suffix is None:
suffix = u''
else:
suffix = d.suffix
dims.append({
'uom_suffix': suffix,
'uom_code': d.code,
'name': d.name,
})
# Refer to the module docstring for the expected schema.
dimensioned_measured_value = measurement.measured_value
value = (
sorted(dimensioned_measured_value.value, key=lambda x: x[0])
if dimensioned | if isinstance(o, bytes):
return o.decode(encoding='utf-8', errors='replace')
elif isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
else:
raise TypeError(repr(o) + ' is not JSON serializable') | identifier_body |
fit_nixing.py | (w-1, int(float(box[2])))
y2 = min(h-1, int(float(box[3])))
B, G, R = color
img[y1:y2, x1:x2, 0] = img[y1:y2, x1:x2, 0] * alphaReserve + B * (1 - alphaReserve)
img[y1:y2, x1:x2, 1] = img[y1:y2, x1:x2, 1] * alphaReserve + G * (1 - alphaReserve)
img[y1:y2, x1:x2, 2] = img[y1:y2, x1:x2, 2] * alphaReserve + R * (1 - alphaReserve)
cv2.line(img, (x1, y1), (x1+7, y1), (255,255,255), thickness=1)
cv2.line(img, (x1, y1), (x1, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2-7, y1), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1+7, y2), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1, y2-7), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2-7, y2), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2, y2-7), (255,255,255), thickness=1)
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20, font_path="./LiHeiPro.ttf"):
if (isinstance(img, np.ndarray)):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(font_pa | = [_ for i, _ in enumerate(history_record) if history_cnt[i]>0]
history_platenum = [_ for i, _ in enumerate(history_platenum) if history_cnt[i]>0]
history_cnt = [_-1 for i, _ in enumerate(history_cnt) if history_cnt[i]>0]
for i, plate in enumerate(history):
ph, pw = plate.shape[:2]
if 70+50*i+ph >= blend_img.shape[0]:
continue
blend_img[70+50*i:70+50*i+ph,w-290:w-290+pw,:] = plate
text = '违章记录:第%d帧' %history_record[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+5, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
if history_platenum[i] != ' ':
text = '车牌识别:'+ history_platenum[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+25, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
return blend_img, history, history_cnt, history_record, history_platenum
def cal_iou(box1, box2):
iw = min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1
if iw > 0:
ih = min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1
if ih > 0:
box1_area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
all_area = float(box1_area + box2_area - iw * ih)
return iw * ih / all_area
return 0
# judge whether line segment (xc,yc)->(xr,yr) is crossed with infinite line (x1,y1)->(x2,y2)
def is_cross(xc,yc,xr,yr,x1,y1,x2,y2):
if x1 == x2:
if (xc-x1) * (xr-x1) < 0:
return True
else:
return False
return ((y2-y1)/(x2-x1)*(xc-x1)+y1-yc) * \
((y2-y1)/(x2-x1)*(xr-x1)+y1-yr) < 0
def filter_area(boxes, area=50):
if len(boxes) > 0:
return np.where((boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0]) > area**2)[0]
else:
return np.array([], dtype=np.int)
def indicator(x):
x_square_sum, x_sum = np.sum(x**2), np.sum(x)
det = len(x) * x_square_sum - x_sum**2
return x_square_sum, x_sum, det
def solve_k_b(x, y):
x_square_sum, x_sum, det = indicator(x)
while det == 0:
x = x[:-1]
y = y[:-1]
x_square_sum, x_sum, det = indicator(x)
N_ = len(x)
k_ = np.sum(y * (N_*x-x_sum)) / det
b_ = np.sum(y * (x_square_sum-x*x_sum)) / det
return N_, k_, b_
if __name__ == "__main__":
json_path = 'nixing/nixingattrs.json'
boxes_results = []
with open(json_path, 'r') as f:
line = f.readline()
while line:
this_img = json.loads(line.strip())
boxes_results.append(this_img)
line = f.readline()
save_dir = 'nixing_v3'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.system('rm ./*.jpg ./*.png ./%s/*.jpg' %save_dir)
with open('nixing/nixing_mask_res.pkl', 'rb') as f:
img_list = cPickle.load(f)['all_seg_results']
img_list = [_['seg_results'] for _ in img_list]
img_dir = './nixing/frames'
num_img = len(os.listdir(img_dir))
history = []
history_cnt = []
history_record = []
history_platenum = []
for cnt in range(num_img):
print('%d/%d' %(cnt,num_img))
# if cnt < 110:
# continue
img = img_list[cnt]
im_path = os.path.join(img_dir, 'nixing.mp4_%06d.jpg' %(cnt+1))
raw_img = cv2.imread(im_path)
lane_img = 255 * np.ones_like(raw_img, dtype=np.uint8)
lane_img[np.where(img == 1)] = [0,225,0]
lane_img[np.where(img == 2)] = [0,225,255]
blend_img = cv2.addWeighted(raw_img, 1.0, lane_img, 0.3, gamma=0)
# parse the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
vehicle_boxes = [_['data'] for _ in boxes_results[cnt]['vehicle']]
vehicle_attrs = [_['attrs'] for _ in boxes_results[cnt]['vehicle']]
plate_data = boxes_results[cnt]['plate_box']
if plate_data != []:
plate_boxes = [_['data'] for _ in plate_data]
plate_nums = [_['attrs']['plate_num']]
for i in range(len(plate_nums)):
if len(plate_nums[i]) >= 7 and plate_nums[i][0] in province and plate_nums[i][1] in letter:
plate_nums.append( | th, textSize, encoding="utf-8")
draw.text((left, top), unicode(text.decode('utf-8')) , textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def draw_history(blend_img, history, history_cnt, history_record, history_platenum):
history = [_ for i, _ in enumerate(history) if history_cnt[i]>0]
history_record | identifier_body |
fit_nixing.py |
def draw_box_v2(img, box, alphaReserve=0.8, color=None):
color = (rand() * 255, rand() * 255, rand() * 255) if color is None else color
h,w,_ = img.shape
x1 = max(0, int(float(box[0])))
y1 = max(0, int(float(box[1])))
x2 = min(w-1, int(float(box[2])))
y2 = min(h-1, int(float(box[3])))
B, G, R = color
img[y1:y2, x1:x2, 0] = img[y1:y2, x1:x2, 0] * alphaReserve + B * (1 - alphaReserve)
img[y1:y2, x1:x2, 1] = img[y1:y2, x1:x2, 1] * alphaReserve + G * (1 - alphaReserve)
img[y1:y2, x1:x2, 2] = img[y1:y2, x1:x2, 2] * alphaReserve + R * (1 - alphaReserve)
cv2.line(img, (x1, y1), (x1+7, y1), (255,255,255), thickness=1)
cv2.line(img, (x1, y1), (x1, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2-7, y1), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1+7, y2), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1, y2-7), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2-7, y2), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2, y2-7), (255,255,255), thickness=1)
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20, font_path="./LiHeiPro.ttf"):
if (isinstance(img, np.ndarray)):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(font_path, textSize, encoding="utf-8")
draw.text((left, top), unicode(text.decode('utf-8')) , textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def draw_history(blend_img, history, history_cnt, history_record, history_platenum):
history = [_ for i, _ in enumerate(history) if history_cnt[i]>0]
history_record = [_ for i, _ in enumerate(history_record) if history_cnt[i]>0]
history_platenum = [_ for i, _ in enumerate(history_platenum) if history_cnt[i]>0]
history_cnt = [_-1 for i, _ in enumerate(history_cnt) if history_cnt[i]>0]
for i, plate in enumerate(history):
ph, pw = plate.shape[:2]
if 70+50*i+ph >= blend_img.shape[0]:
continue
blend_img[70+50*i:70+50*i+ph,w-290:w-290+pw,:] = plate
text = '违章记录:第%d帧' %history_record[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+5, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
if history_platenum[i] != ' ':
text = '车牌识别:'+ history_platenum[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+25, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
return blend_img, history, history_cnt, history_record, history_platenum
def cal_iou(box1, box2):
iw = min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1
if iw > 0:
ih = min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1
if ih > 0:
box1_area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
all_area = float(box1_area + box2_area - iw * ih)
return iw * ih / all_area
return 0
# judge whether line segment (xc,yc)->(xr,yr) is crossed with infinite line (x1,y1)->(x2,y2)
def is_cross(xc,yc,xr,yr,x1,y1,x2,y2):
if x1 == x2:
if (xc-x1) * (xr-x1) < 0:
return True
else:
return False
return ((y2-y1)/(x2-x1)*(xc-x1)+y1-yc) * \
((y2-y1)/(x2-x1)*(xr-x1)+y1-yr) < 0
def filter_area(boxes, area=50):
if len(boxes) > 0:
return np.where((boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0]) > area**2)[0]
else:
return np.array([], dtype=np.int)
def indicator(x):
x_square_sum, x_sum = np.sum(x**2), np.sum(x)
det = len(x) * x_square_sum - x_sum**2
return x_square_sum, x_sum, det
def solve_k_b(x, y):
x_square_sum, x_sum, det = indicator(x)
while det == 0:
x = x[:-1]
y = y[:-1]
x_square_sum, x_sum, det = indicator(x)
N_ = len(x)
k_ = np.sum(y * (N_*x-x_sum)) / det
b_ = np.sum(y * (x_square_sum-x*x_sum)) / det
return N_, k_, b_
if __name__ == "__main__":
json_path = 'nixing/nixingattrs.json'
boxes_results = []
with open(json_path, 'r') as f:
line = f.readline()
while line:
this_img = json.loads(line.strip())
boxes_results.append(this_img)
line = f.readline()
save_dir = 'nixing_v3'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.system('rm ./*.jpg ./*.png ./%s/*.jpg' %save_dir)
with open('nixing/nixing_mask_res.pkl', 'rb') as f:
img_list = cPickle.load(f)['all_seg_results']
img_list = [_['seg_results'] for _ in img_list]
img_dir = './nixing/frames'
num_img = len(os.listdir(img_dir))
history = []
history_cnt = []
history_record = []
history_platenum = []
for cnt in range(num_img):
print('%d/%d' %(cnt,num_img))
# if cnt < 110:
# continue
img = img_list[cnt]
im_path = os.path.join(img_dir, 'nixing.mp4_%06d.jpg' %(cnt+1))
raw_img = cv2.imread(im_path)
lane_img = 255 * np.ones_like(raw_img, dtype=np.uint8)
lane_img[np.where(img == 1)] = [0,22 | type_map = {'BigTruck': '货车', 'Bus': '公交车', 'Lorry': '货车', 'MPV': '轿车', 'MiniVan': '轿车', 'MiniBus': '公交车',
'SUV': '轿车', 'Scooter': '轿车', 'Sedan_Car': '轿车', 'Special_vehicle': '其他', 'Three_Wheeled_Truck':'其他', 'other': '其他', 'Minibus': '公交车'}
| random_line_split | |
fit_nixing.py | (w-1, int(float(box[2])))
y2 = min(h-1, int(float(box[3])))
B, G, R = color
img[y1:y2, x1:x2, 0] = img[y1:y2, x1:x2, 0] * alphaReserve + B * (1 - alphaReserve)
img[y1:y2, x1:x2, 1] = img[y1:y2, x1:x2, 1] * alphaReserve + G * (1 - alphaReserve)
img[y1:y2, x1:x2, 2] = img[y1:y2, x1:x2, 2] * alphaReserve + R * (1 - alphaReserve)
cv2.line(img, (x1, y1), (x1+7, y1), (255,255,255), thickness=1)
cv2.line(img, (x1, y1), (x1, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2-7, y1), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1+7, y2), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1, y2-7), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2-7, y2), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2, y2-7), (255,255,255), thickness=1)
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20, font_path="./LiHeiPro.ttf"):
if (isinstance(img, np.ndarray)):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(font_path, textSize, encoding="utf-8")
draw.text((left, top), unicode(text.decode('utf-8')) , textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def draw_history(blend_img, history, history_cnt, history_record, history_platenum):
history = [_ for i, _ in enumerate(history) if history_cnt[i]>0]
history_record = [_ for i, _ in enumerate(history_record) if history_cnt[i]>0]
history_platenum = [_ for i, _ in enumerate(history_platenum) if history_cnt[i]>0]
history_cnt = [_-1 for i, _ in enumerate(history_cnt) if history_cnt[i]>0]
for i, plate in enumerate(history):
ph, pw = plate.shape[:2]
if 70+50*i+ph >= blend_img.shape[0]:
continue
blend_img[70+50*i:70+50*i+ph,w-290:w-290+pw,:] = plate
text = '违章记录:第%d帧' %history_record[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+5, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
if history_platenum[i] != ' ':
text = '车牌识别:'+ history_platenum[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+25, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
return blend_img, history, history_cnt, history_record, history_platenum
def cal_iou(box1, box2):
iw = min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1
if iw > 0:
ih = min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1
if ih > 0:
box1_ | (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
all_area = float(box1_area + box2_area - iw * ih)
return iw * ih / all_area
return 0
# judge whether line segment (xc,yc)->(xr,yr) is crossed with infinite line (x1,y1)->(x2,y2)
def is_cross(xc,yc,xr,yr,x1,y1,x2,y2):
if x1 == x2:
if (xc-x1) * (xr-x1) < 0:
return True
else:
return False
return ((y2-y1)/(x2-x1)*(xc-x1)+y1-yc) * \
((y2-y1)/(x2-x1)*(xr-x1)+y1-yr) < 0
def filter_area(boxes, area=50):
if len(boxes) > 0:
return np.where((boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0]) > area**2)[0]
else:
return np.array([], dtype=np.int)
def indicator(x):
x_square_sum, x_sum = np.sum(x**2), np.sum(x)
det = len(x) * x_square_sum - x_sum**2
return x_square_sum, x_sum, det
def solve_k_b(x, y):
x_square_sum, x_sum, det = indicator(x)
while det == 0:
x = x[:-1]
y = y[:-1]
x_square_sum, x_sum, det = indicator(x)
N_ = len(x)
k_ = np.sum(y * (N_*x-x_sum)) / det
b_ = np.sum(y * (x_square_sum-x*x_sum)) / det
return N_, k_, b_
if __name__ == "__main__":
json_path = 'nixing/nixingattrs.json'
boxes_results = []
with open(json_path, 'r') as f:
line = f.readline()
while line:
this_img = json.loads(line.strip())
boxes_results.append(this_img)
line = f.readline()
save_dir = 'nixing_v3'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.system('rm ./*.jpg ./*.png ./%s/*.jpg' %save_dir)
with open('nixing/nixing_mask_res.pkl', 'rb') as f:
img_list = cPickle.load(f)['all_seg_results']
img_list = [_['seg_results'] for _ in img_list]
img_dir = './nixing/frames'
num_img = len(os.listdir(img_dir))
history = []
history_cnt = []
history_record = []
history_platenum = []
for cnt in range(num_img):
print('%d/%d' %(cnt,num_img))
# if cnt < 110:
# continue
img = img_list[cnt]
im_path = os.path.join(img_dir, 'nixing.mp4_%06d.jpg' %(cnt+1))
raw_img = cv2.imread(im_path)
lane_img = 255 * np.ones_like(raw_img, dtype=np.uint8)
lane_img[np.where(img == 1)] = [0,225,0]
lane_img[np.where(img == 2)] = [0,225,255]
blend_img = cv2.addWeighted(raw_img, 1.0, lane_img, 0.3, gamma=0)
# parse the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
vehicle_boxes = [_['data'] for _ in boxes_results[cnt]['vehicle']]
vehicle_attrs = [_['attrs'] for _ in boxes_results[cnt]['vehicle']]
plate_data = boxes_results[cnt]['plate_box']
if plate_data != []:
plate_boxes = [_['data'] for _ in plate_data]
plate_nums = [_['attrs']['plate_num']]
for i in range(len(plate_nums)):
if len(plate_nums[i]) >= 7 and plate_nums[i][0] in province and plate_nums[i][1] in letter:
plate_nums.append( | area = | identifier_name |
fit_nixing.py | 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
return blend_img, history, history_cnt, history_record, history_platenum
def cal_iou(box1, box2):
iw = min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1
if iw > 0:
ih = min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1
if ih > 0:
box1_area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
all_area = float(box1_area + box2_area - iw * ih)
return iw * ih / all_area
return 0
# judge whether line segment (xc,yc)->(xr,yr) is crossed with infinite line (x1,y1)->(x2,y2)
def is_cross(xc,yc,xr,yr,x1,y1,x2,y2):
if x1 == x2:
if (xc-x1) * (xr-x1) < 0:
return True
else:
return False
return ((y2-y1)/(x2-x1)*(xc-x1)+y1-yc) * \
((y2-y1)/(x2-x1)*(xr-x1)+y1-yr) < 0
def filter_area(boxes, area=50):
if len(boxes) > 0:
return np.where((boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0]) > area**2)[0]
else:
return np.array([], dtype=np.int)
def indicator(x):
x_square_sum, x_sum = np.sum(x**2), np.sum(x)
det = len(x) * x_square_sum - x_sum**2
return x_square_sum, x_sum, det
def solve_k_b(x, y):
x_square_sum, x_sum, det = indicator(x)
while det == 0:
x = x[:-1]
y = y[:-1]
x_square_sum, x_sum, det = indicator(x)
N_ = len(x)
k_ = np.sum(y * (N_*x-x_sum)) / det
b_ = np.sum(y * (x_square_sum-x*x_sum)) / det
return N_, k_, b_
if __name__ == "__main__":
json_path = 'nixing/nixingattrs.json'
boxes_results = []
with open(json_path, 'r') as f:
line = f.readline()
while line:
this_img = json.loads(line.strip())
boxes_results.append(this_img)
line = f.readline()
save_dir = 'nixing_v3'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.system('rm ./*.jpg ./*.png ./%s/*.jpg' %save_dir)
with open('nixing/nixing_mask_res.pkl', 'rb') as f:
img_list = cPickle.load(f)['all_seg_results']
img_list = [_['seg_results'] for _ in img_list]
img_dir = './nixing/frames'
num_img = len(os.listdir(img_dir))
history = []
history_cnt = []
history_record = []
history_platenum = []
for cnt in range(num_img):
print('%d/%d' %(cnt,num_img))
# if cnt < 110:
# continue
img = img_list[cnt]
im_path = os.path.join(img_dir, 'nixing.mp4_%06d.jpg' %(cnt+1))
raw_img = cv2.imread(im_path)
lane_img = 255 * np.ones_like(raw_img, dtype=np.uint8)
lane_img[np.where(img == 1)] = [0,225,0]
lane_img[np.where(img == 2)] = [0,225,255]
blend_img = cv2.addWeighted(raw_img, 1.0, lane_img, 0.3, gamma=0)
# parse the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
vehicle_boxes = [_['data'] for _ in boxes_results[cnt]['vehicle']]
vehicle_attrs = [_['attrs'] for _ in boxes_results[cnt]['vehicle']]
plate_data = boxes_results[cnt]['plate_box']
if plate_data != []:
plate_boxes = [_['data'] for _ in plate_data]
plate_nums = [_['attrs']['plate_num']]
for i in range(len(plate_nums)):
if len(plate_nums[i]) >= 7 and plate_nums[i][0] in province and plate_nums[i][1] in letter:
plate_nums.append(plate_nums[i])
else:
plate_nums[i] = ' '
print(plate_nums[-1])
else:
plate_boxes, plate_nums = [], []
head_box, tail_box = [], []
for item in boxes_results[cnt]['common_box']:
if item['attrs']['head'] == 'tail':
tail_box.append(item['data'])
elif item['attrs']['head'] == 'head':
head_box.append(item['data'])
else:
raise ValueError('unsupported attr!')
# draw the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
for box, attrs in zip(vehicle_boxes, vehicle_attrs):
draw_box_v2(blend_img, box, color=(255,0,0), alphaReserve=0.9)
text = color_map[attrs['color']]
text += type_map[attrs['type']]
cv2.rectangle(blend_img, (int(box[0]), int(box[1])-20), (int(box[0])+70, int(box[1])), (128, 128, 128), thickness=-1)
blend_img = cv2ImgAddText(blend_img, text, int(box[0]), int(box[1]-20), textColor=(255, 255, 255),\
textSize=15, font_path="./LiHeiPro.ttf")
for box in plate_boxes:
draw_box_v2(blend_img, box, color=(0,0,255), alphaReserve=0.7)
for box in head_box:
draw_box_v2(blend_img, box, color=(0,0,128), alphaReserve=0.7)
for box in tail_box:
draw_box_v2(blend_img, box, color=(0,0,128))
# cluster the lane points
neighbor = list(range(1, config.max_neighbor_distance+1))
neighbor.extend([-i for i in neighbor])
neighbor.append(0)
dsize = (int(img.shape[1]*config.resize_factor), int(img.shape[0]*config.resize_factor))
resized_img = cv2.resize(img, dsize, fx=config.resize_factor,fy=config.resize_factor)
group_res = bfs_clustering(resized_img, neighbor, ig_cls=0, show=False)
h, w = img.shape[:2]
resized_h, resized_w = resized_img.shape[:2]
# title = '基于X2的"去中心化"违章记录仪'
# blend_img = cv2ImgAddText(blend_img, title, 20,20, textColor=(0, 0, 0),\
# textSize=45, font_path="./LiHeiPro.ttf")
title = '逆行车辆:'
blend_img = cv2ImgAddText(blend_img, title, w-200,20, textColor=(255, 0, 0),\
textSize=25, font_path="./LiHeiPro.ttf")
lanes = []
b = []
for cls in group_res:
print('----cls %d----' %cls)
for g in group_res[cls]:
if len(g) < config.minimum_points:
continue
print('group length: %d' %(len(g)))
x, y = [], []
for i, j in g:
x.append(j)
y.append(resized_h-1-i)
x = np.array(x, dtype='float32') / config.resize_factor
y = np.array(y, dtype='float32') / config.resize_factor
N_, k_, b_ = solve_k_b(x, y)
print(N_, k_, b_)
x1, x2 = np.min(x), np.max(x)
y1, y2 = k_ * x1 + b_, k_ * x2 + b_
y1, y2 = h-1-y1, h-1-y2
if cls == 1:
color = (0,225,0)
else:
color = (0,225,225)
if k_ > 0.1:
lanes.append([x1,y1,x2,y2])
b.append(b_)
| # cv2.line(blend_ | conditional_block | |
main.rs | : usize, limit: &mut usize) -> TlsResult<()> {
*limit = limit.checked_sub(length).ok_or(TlsError::DecodeError)?;
Ok(())
}
impl<R: AsyncReadExt> TlsHandshakeReader<R> {
fn new(source: R) -> Self {
TlsHandshakeReader {
source: source,
buffer: Vec::with_capacity(4096),
offset: 0,
limit: 0,
}
}
fn seek(&mut self, offset: usize, limit: &mut usize) -> TlsResult<()> {
self.offset += offset;
check_length(offset, limit)
}
async fn fill_to(&mut self, target: usize) -> TlsResult<()> {
while self.buffer.len() < target {
if self.source.read_buf(&mut self.buffer).await? == 0 {
return Err(TlsError::DecodeError);
}
}
Ok(())
}
async fn read(&mut self) -> TlsResult<u8> | // section 5.1: "The record layer fragments information blocks into TLSPlaintext
// records carrying data in chunks of 2^14 bytes or less."
if length > (1 << 14) {
return Err(TlsError::RecordOverflow);
}
self.offset += 5;
self.limit += 5 + length;
}
self.fill_to(self.offset + 1).await?;
let v = self.buffer[self.offset];
self.offset += 1;
Ok(v)
}
async fn read_length(&mut self, length: u8) -> TlsResult<usize> {
debug_assert!(length > 0 && length <= 4);
let mut result = 0;
for _ in 0..length {
result <<= 8;
result |= self.read().await? as usize;
}
Ok(result)
}
async fn into_source<W: AsyncWriteExt + Unpin>(self, dest: &mut W) -> io::Result<R> {
dest.write_all(&self.buffer[..]).await?;
Ok(self.source)
}
}
async fn get_server_name<R: AsyncReadExt>(source: &mut TlsHandshakeReader<R>) -> TlsResult<String> {
// section 4.1.2: "When a client first connects to a server, it is REQUIRED to send the
// ClientHello as its first TLS message."
if source.read().await? != TLS_HANDSHAKE_TYPE_CLIENT_HELLO {
return Err(TlsError::UnexpectedMessage);
}
let mut hello_length = source.read_length(3).await?;
// skip legacy_version (2) and random (32)
source.seek(34, &mut hello_length)?;
// skip legacy_session_id
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// skip cipher_suites
check_length(2, &mut hello_length)?;
let length = source.read_length(2).await?;
source.seek(length, &mut hello_length)?;
// skip legacy_compression_methods
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// section 4.1.2: "TLS 1.3 servers might receive ClientHello messages without an extensions
// field from prior versions of TLS. The presence of extensions can be detected by determining
// whether there are bytes following the compression_methods field at the end of the
// ClientHello. Note that this method of detecting optional data differs from the normal TLS
// method of having a variable-length field, but it is used for compatibility with TLS before
// extensions were defined. ... If negotiating a version of TLS prior to 1.3, a server MUST
// check that the message either contains no data after legacy_compression_methods or that it
// contains a valid extensions block with no data following. If not, then it MUST abort the
// handshake with a "decode_error" alert."
//
// If there is no extensions block, treat it like a server name extension was present but with
// an unrecognized name. I don't think the spec allows this, but it doesn't NOT allow it?
if hello_length == 0 {
return Err(TlsError::UnrecognizedName);
}
// ClientHello ends immediately after the extensions
check_length(2, &mut hello_length)?;
if hello_length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while hello_length > 0 {
check_length(4, &mut hello_length)?;
let extension = source.read_length(2).await?;
let mut length = source.read_length(2).await?;
if extension != TLS_EXTENSION_SNI {
source.seek(length, &mut hello_length)?;
continue;
}
check_length(length, &mut hello_length)?;
// This extension ends immediately after server_name_list
check_length(2, &mut length)?;
if length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while length > 0 {
check_length(3, &mut length)?;
let name_type = source.read().await?;
let name_length = source.read_length(2).await?;
if name_type != TLS_SNI_HOST_NAME_TYPE {
source.seek(name_length, &mut length)?;
continue;
}
check_length(name_length, &mut length)?;
// RFC 6066 section 3: "The ServerNameList MUST NOT contain more than one name of the
// same name_type." So we can just extract the first one we find.
// Hostnames are limited to 255 octets with a trailing dot, but RFC 6066 prohibits the
// trailing dot, so the limit here is 254 octets. Enforcing this limit ensures an
// attacker can't make us heap-allocate 64kB for a hostname we'll never match.
if name_length > 254 {
return Err(TlsError::UnrecognizedName);
}
// The following validation rules ensure that we won't return a hostname which could
// lead to pathname traversal (e.g. "..", "", or "a/b") and that semantically
// equivalent hostnames are only returned in a canonical form. This does not validate
// anything else about the hostname, such as length limits on individual labels.
let mut name = Vec::with_capacity(name_length);
let mut start_of_label = true;
for _ in 0..name_length {
let b = source.read().await?.to_ascii_lowercase();
if start_of_label && (b == b'-' || b == b'.') {
// a hostname label can't start with dot or dash
return Err(TlsError::UnrecognizedName);
}
// the next byte is the start of a label iff this one was a dot
start_of_label = b'.' == b;
match b {
b'a'..=b'z' | b'0'..=b'9' | b'-' | b'.' => name.push(b),
_ => return Err(TlsError::UnrecognizedName),
}
}
// If we're expecting a new label after reading the whole hostname, then either the
// name was empty or it ended with a dot; neither is allowed.
if start_of_label {
return Err(TlsError::UnrecognizedName);
}
// safety: every byte was already checked for being a valid subset of UTF-8
let name = unsafe { String::from_utf8_unchecked(name) };
return Ok(name);
}
// None of the names were of the right type, and section 4.2 says "There MUST NOT be more
// than one extension of the same type in a given extension block", so there definitely
// isn't a server name in this ClientHello.
break;
}
// Like when the extensions block is absent, pretend as if a server name was present but not
// recognized.
Err(TlsError::UnrecognizedName)
}
fn hash_hostname(hostname: String) -> PathBuf {
#[cfg(feature = "hashed")]
let hostname = {
use blake2::{Blake2s, Digest};
let hash = Blake2s::digest(hostname.as_bytes());
base64::encode_config(&hash, base64::URL_SAFE_NO_PAD)
};
hostname.into | {
while self.offset >= self.limit {
self.fill_to(self.limit + 5).await?;
// section 5.1: "Handshake messages MUST NOT be interleaved with other record types.
// That is, if a handshake message is split over two or more records, there MUST NOT be
// any other records between them."
if self.buffer[self.limit] != TLS_HANDSHAKE_CONTENT_TYPE {
return Err(TlsError::UnexpectedMessage);
}
let length = (self.buffer[self.limit + 3] as usize) << 8
| (self.buffer[self.limit + 4] as usize);
// section 5.1: "Implementations MUST NOT send zero-length fragments of Handshake
// types, even if those fragments contain padding."
if length == 0 {
return Err(TlsError::DecodeError);
}
| identifier_body |
main.rs | self.buffer[self.limit + 3] as usize) << 8
| (self.buffer[self.limit + 4] as usize);
// section 5.1: "Implementations MUST NOT send zero-length fragments of Handshake
// types, even if those fragments contain padding."
if length == 0 {
return Err(TlsError::DecodeError);
}
// section 5.1: "The record layer fragments information blocks into TLSPlaintext
// records carrying data in chunks of 2^14 bytes or less."
if length > (1 << 14) {
return Err(TlsError::RecordOverflow);
}
self.offset += 5;
self.limit += 5 + length;
}
self.fill_to(self.offset + 1).await?;
let v = self.buffer[self.offset];
self.offset += 1;
Ok(v)
}
async fn read_length(&mut self, length: u8) -> TlsResult<usize> {
debug_assert!(length > 0 && length <= 4);
let mut result = 0;
for _ in 0..length {
result <<= 8;
result |= self.read().await? as usize;
}
Ok(result)
}
async fn into_source<W: AsyncWriteExt + Unpin>(self, dest: &mut W) -> io::Result<R> {
dest.write_all(&self.buffer[..]).await?;
Ok(self.source)
}
}
async fn get_server_name<R: AsyncReadExt>(source: &mut TlsHandshakeReader<R>) -> TlsResult<String> {
// section 4.1.2: "When a client first connects to a server, it is REQUIRED to send the
// ClientHello as its first TLS message."
if source.read().await? != TLS_HANDSHAKE_TYPE_CLIENT_HELLO {
return Err(TlsError::UnexpectedMessage);
}
let mut hello_length = source.read_length(3).await?;
// skip legacy_version (2) and random (32)
source.seek(34, &mut hello_length)?;
// skip legacy_session_id
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// skip cipher_suites
check_length(2, &mut hello_length)?;
let length = source.read_length(2).await?;
source.seek(length, &mut hello_length)?;
// skip legacy_compression_methods
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// section 4.1.2: "TLS 1.3 servers might receive ClientHello messages without an extensions
// field from prior versions of TLS. The presence of extensions can be detected by determining
// whether there are bytes following the compression_methods field at the end of the
// ClientHello. Note that this method of detecting optional data differs from the normal TLS
// method of having a variable-length field, but it is used for compatibility with TLS before
// extensions were defined. ... If negotiating a version of TLS prior to 1.3, a server MUST
// check that the message either contains no data after legacy_compression_methods or that it
// contains a valid extensions block with no data following. If not, then it MUST abort the
// handshake with a "decode_error" alert."
//
// If there is no extensions block, treat it like a server name extension was present but with
// an unrecognized name. I don't think the spec allows this, but it doesn't NOT allow it?
if hello_length == 0 {
return Err(TlsError::UnrecognizedName);
}
// ClientHello ends immediately after the extensions
check_length(2, &mut hello_length)?;
if hello_length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while hello_length > 0 {
check_length(4, &mut hello_length)?;
let extension = source.read_length(2).await?;
let mut length = source.read_length(2).await?;
if extension != TLS_EXTENSION_SNI {
source.seek(length, &mut hello_length)?;
continue;
}
check_length(length, &mut hello_length)?;
// This extension ends immediately after server_name_list
check_length(2, &mut length)?;
if length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while length > 0 {
check_length(3, &mut length)?;
let name_type = source.read().await?;
let name_length = source.read_length(2).await?;
if name_type != TLS_SNI_HOST_NAME_TYPE {
source.seek(name_length, &mut length)?;
continue;
}
check_length(name_length, &mut length)?;
// RFC 6066 section 3: "The ServerNameList MUST NOT contain more than one name of the
// same name_type." So we can just extract the first one we find.
// Hostnames are limited to 255 octets with a trailing dot, but RFC 6066 prohibits the
// trailing dot, so the limit here is 254 octets. Enforcing this limit ensures an
// attacker can't make us heap-allocate 64kB for a hostname we'll never match.
if name_length > 254 {
return Err(TlsError::UnrecognizedName);
}
// The following validation rules ensure that we won't return a hostname which could
// lead to pathname traversal (e.g. "..", "", or "a/b") and that semantically
// equivalent hostnames are only returned in a canonical form. This does not validate
// anything else about the hostname, such as length limits on individual labels.
let mut name = Vec::with_capacity(name_length);
let mut start_of_label = true;
for _ in 0..name_length {
let b = source.read().await?.to_ascii_lowercase();
if start_of_label && (b == b'-' || b == b'.') {
// a hostname label can't start with dot or dash
return Err(TlsError::UnrecognizedName);
}
// the next byte is the start of a label iff this one was a dot
start_of_label = b'.' == b;
match b {
b'a'..=b'z' | b'0'..=b'9' | b'-' | b'.' => name.push(b),
_ => return Err(TlsError::UnrecognizedName),
}
}
// If we're expecting a new label after reading the whole hostname, then either the
// name was empty or it ended with a dot; neither is allowed.
if start_of_label {
return Err(TlsError::UnrecognizedName);
}
// safety: every byte was already checked for being a valid subset of UTF-8
let name = unsafe { String::from_utf8_unchecked(name) };
return Ok(name);
}
// None of the names were of the right type, and section 4.2 says "There MUST NOT be more
// than one extension of the same type in a given extension block", so there definitely
// isn't a server name in this ClientHello.
break;
}
// Like when the extensions block is absent, pretend as if a server name was present but not
// recognized.
Err(TlsError::UnrecognizedName)
}
fn hash_hostname(hostname: String) -> PathBuf {
#[cfg(feature = "hashed")]
let hostname = {
use blake2::{Blake2s, Digest};
let hash = Blake2s::digest(hostname.as_bytes());
base64::encode_config(&hash, base64::URL_SAFE_NO_PAD)
};
hostname.into()
}
async fn connect_backend<R: AsyncReadExt>(
source: R,
local: SocketAddr,
remote: SocketAddr,
) -> TlsResult<(R, net::UnixStream)> {
let mut source = TlsHandshakeReader::new(source);
// timeout can return a "Elapsed" error, or else return the result from get_server_name, which
// might be a TlsError. So there are two "?" here to unwrap both.
let name = timeout(Duration::from_secs(10), get_server_name(&mut source)).await??;
let path = hash_hostname(name);
// The client sent a name and it's been validated to be safe to use as a path. Consider it a
// valid server name if connecting to the path doesn't return any of these errors:
// - is a directory (NotFound after joining a relative path)
// - which contains an entry named "tls-socket" (NotFound)
// - which is accessible to this proxy (PermissionDenied)
// - and is a listening socket (ConnectionRefused)
// If it isn't a valid server name, then that's the error to report. Anything else is not the
// client's fault.
let mut backend = net::UnixStream::connect(path.join("tls-socket"))
.await
.map_err(|e| match e.kind() {
ErrorKind::NotFound | ErrorKind::PermissionDenied | ErrorKind::ConnectionRefused => | {
TlsError::UnrecognizedName
} | conditional_block | |
main.rs | : usize, limit: &mut usize) -> TlsResult<()> {
*limit = limit.checked_sub(length).ok_or(TlsError::DecodeError)?;
Ok(())
}
impl<R: AsyncReadExt> TlsHandshakeReader<R> {
fn new(source: R) -> Self {
TlsHandshakeReader {
source: source,
buffer: Vec::with_capacity(4096),
offset: 0,
limit: 0,
}
}
fn seek(&mut self, offset: usize, limit: &mut usize) -> TlsResult<()> {
self.offset += offset;
check_length(offset, limit)
}
async fn fill_to(&mut self, target: usize) -> TlsResult<()> {
while self.buffer.len() < target {
if self.source.read_buf(&mut self.buffer).await? == 0 {
return Err(TlsError::DecodeError);
}
}
Ok(())
}
async fn read(&mut self) -> TlsResult<u8> {
while self.offset >= self.limit {
self.fill_to(self.limit + 5).await?;
// section 5.1: "Handshake messages MUST NOT be interleaved with other record types.
// That is, if a handshake message is split over two or more records, there MUST NOT be
// any other records between them."
if self.buffer[self.limit] != TLS_HANDSHAKE_CONTENT_TYPE {
return Err(TlsError::UnexpectedMessage);
}
let length = (self.buffer[self.limit + 3] as usize) << 8
| (self.buffer[self.limit + 4] as usize);
// section 5.1: "Implementations MUST NOT send zero-length fragments of Handshake
// types, even if those fragments contain padding."
if length == 0 {
return Err(TlsError::DecodeError);
}
// section 5.1: "The record layer fragments information blocks into TLSPlaintext
// records carrying data in chunks of 2^14 bytes or less."
if length > (1 << 14) {
return Err(TlsError::RecordOverflow);
}
self.offset += 5;
self.limit += 5 + length; | Ok(v)
}
async fn read_length(&mut self, length: u8) -> TlsResult<usize> {
debug_assert!(length > 0 && length <= 4);
let mut result = 0;
for _ in 0..length {
result <<= 8;
result |= self.read().await? as usize;
}
Ok(result)
}
async fn into_source<W: AsyncWriteExt + Unpin>(self, dest: &mut W) -> io::Result<R> {
dest.write_all(&self.buffer[..]).await?;
Ok(self.source)
}
}
async fn get_server_name<R: AsyncReadExt>(source: &mut TlsHandshakeReader<R>) -> TlsResult<String> {
// section 4.1.2: "When a client first connects to a server, it is REQUIRED to send the
// ClientHello as its first TLS message."
if source.read().await? != TLS_HANDSHAKE_TYPE_CLIENT_HELLO {
return Err(TlsError::UnexpectedMessage);
}
let mut hello_length = source.read_length(3).await?;
// skip legacy_version (2) and random (32)
source.seek(34, &mut hello_length)?;
// skip legacy_session_id
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// skip cipher_suites
check_length(2, &mut hello_length)?;
let length = source.read_length(2).await?;
source.seek(length, &mut hello_length)?;
// skip legacy_compression_methods
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// section 4.1.2: "TLS 1.3 servers might receive ClientHello messages without an extensions
// field from prior versions of TLS. The presence of extensions can be detected by determining
// whether there are bytes following the compression_methods field at the end of the
// ClientHello. Note that this method of detecting optional data differs from the normal TLS
// method of having a variable-length field, but it is used for compatibility with TLS before
// extensions were defined. ... If negotiating a version of TLS prior to 1.3, a server MUST
// check that the message either contains no data after legacy_compression_methods or that it
// contains a valid extensions block with no data following. If not, then it MUST abort the
// handshake with a "decode_error" alert."
//
// If there is no extensions block, treat it like a server name extension was present but with
// an unrecognized name. I don't think the spec allows this, but it doesn't NOT allow it?
if hello_length == 0 {
return Err(TlsError::UnrecognizedName);
}
// ClientHello ends immediately after the extensions
check_length(2, &mut hello_length)?;
if hello_length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while hello_length > 0 {
check_length(4, &mut hello_length)?;
let extension = source.read_length(2).await?;
let mut length = source.read_length(2).await?;
if extension != TLS_EXTENSION_SNI {
source.seek(length, &mut hello_length)?;
continue;
}
check_length(length, &mut hello_length)?;
// This extension ends immediately after server_name_list
check_length(2, &mut length)?;
if length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while length > 0 {
check_length(3, &mut length)?;
let name_type = source.read().await?;
let name_length = source.read_length(2).await?;
if name_type != TLS_SNI_HOST_NAME_TYPE {
source.seek(name_length, &mut length)?;
continue;
}
check_length(name_length, &mut length)?;
// RFC 6066 section 3: "The ServerNameList MUST NOT contain more than one name of the
// same name_type." So we can just extract the first one we find.
// Hostnames are limited to 255 octets with a trailing dot, but RFC 6066 prohibits the
// trailing dot, so the limit here is 254 octets. Enforcing this limit ensures an
// attacker can't make us heap-allocate 64kB for a hostname we'll never match.
if name_length > 254 {
return Err(TlsError::UnrecognizedName);
}
// The following validation rules ensure that we won't return a hostname which could
// lead to pathname traversal (e.g. "..", "", or "a/b") and that semantically
// equivalent hostnames are only returned in a canonical form. This does not validate
// anything else about the hostname, such as length limits on individual labels.
let mut name = Vec::with_capacity(name_length);
let mut start_of_label = true;
for _ in 0..name_length {
let b = source.read().await?.to_ascii_lowercase();
if start_of_label && (b == b'-' || b == b'.') {
// a hostname label can't start with dot or dash
return Err(TlsError::UnrecognizedName);
}
// the next byte is the start of a label iff this one was a dot
start_of_label = b'.' == b;
match b {
b'a'..=b'z' | b'0'..=b'9' | b'-' | b'.' => name.push(b),
_ => return Err(TlsError::UnrecognizedName),
}
}
// If we're expecting a new label after reading the whole hostname, then either the
// name was empty or it ended with a dot; neither is allowed.
if start_of_label {
return Err(TlsError::UnrecognizedName);
}
// safety: every byte was already checked for being a valid subset of UTF-8
let name = unsafe { String::from_utf8_unchecked(name) };
return Ok(name);
}
// None of the names were of the right type, and section 4.2 says "There MUST NOT be more
// than one extension of the same type in a given extension block", so there definitely
// isn't a server name in this ClientHello.
break;
}
// Like when the extensions block is absent, pretend as if a server name was present but not
// recognized.
Err(TlsError::UnrecognizedName)
}
fn hash_hostname(hostname: String) -> PathBuf {
#[cfg(feature = "hashed")]
let hostname = {
use blake2::{Blake2s, Digest};
let hash = Blake2s::digest(hostname.as_bytes());
base64::encode_config(&hash, base64::URL_SAFE_NO_PAD)
};
hostname.into()
| }
self.fill_to(self.offset + 1).await?;
let v = self.buffer[self.offset];
self.offset += 1; | random_line_split |
main.rs | (length: usize, limit: &mut usize) -> TlsResult<()> {
*limit = limit.checked_sub(length).ok_or(TlsError::DecodeError)?;
Ok(())
}
impl<R: AsyncReadExt> TlsHandshakeReader<R> {
fn new(source: R) -> Self {
TlsHandshakeReader {
source: source,
buffer: Vec::with_capacity(4096),
offset: 0,
limit: 0,
}
}
fn seek(&mut self, offset: usize, limit: &mut usize) -> TlsResult<()> {
self.offset += offset;
check_length(offset, limit)
}
async fn fill_to(&mut self, target: usize) -> TlsResult<()> {
while self.buffer.len() < target {
if self.source.read_buf(&mut self.buffer).await? == 0 {
return Err(TlsError::DecodeError);
}
}
Ok(())
}
async fn read(&mut self) -> TlsResult<u8> {
while self.offset >= self.limit {
self.fill_to(self.limit + 5).await?;
// section 5.1: "Handshake messages MUST NOT be interleaved with other record types.
// That is, if a handshake message is split over two or more records, there MUST NOT be
// any other records between them."
if self.buffer[self.limit] != TLS_HANDSHAKE_CONTENT_TYPE {
return Err(TlsError::UnexpectedMessage);
}
let length = (self.buffer[self.limit + 3] as usize) << 8
| (self.buffer[self.limit + 4] as usize);
// section 5.1: "Implementations MUST NOT send zero-length fragments of Handshake
// types, even if those fragments contain padding."
if length == 0 {
return Err(TlsError::DecodeError);
}
// section 5.1: "The record layer fragments information blocks into TLSPlaintext
// records carrying data in chunks of 2^14 bytes or less."
if length > (1 << 14) {
return Err(TlsError::RecordOverflow);
}
self.offset += 5;
self.limit += 5 + length;
}
self.fill_to(self.offset + 1).await?;
let v = self.buffer[self.offset];
self.offset += 1;
Ok(v)
}
async fn read_length(&mut self, length: u8) -> TlsResult<usize> {
debug_assert!(length > 0 && length <= 4);
let mut result = 0;
for _ in 0..length {
result <<= 8;
result |= self.read().await? as usize;
}
Ok(result)
}
async fn into_source<W: AsyncWriteExt + Unpin>(self, dest: &mut W) -> io::Result<R> {
dest.write_all(&self.buffer[..]).await?;
Ok(self.source)
}
}
async fn get_server_name<R: AsyncReadExt>(source: &mut TlsHandshakeReader<R>) -> TlsResult<String> {
// section 4.1.2: "When a client first connects to a server, it is REQUIRED to send the
// ClientHello as its first TLS message."
if source.read().await? != TLS_HANDSHAKE_TYPE_CLIENT_HELLO {
return Err(TlsError::UnexpectedMessage);
}
let mut hello_length = source.read_length(3).await?;
// skip legacy_version (2) and random (32)
source.seek(34, &mut hello_length)?;
// skip legacy_session_id
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// skip cipher_suites
check_length(2, &mut hello_length)?;
let length = source.read_length(2).await?;
source.seek(length, &mut hello_length)?;
// skip legacy_compression_methods
check_length(1, &mut hello_length)?;
let length = source.read_length(1).await?;
source.seek(length, &mut hello_length)?;
// section 4.1.2: "TLS 1.3 servers might receive ClientHello messages without an extensions
// field from prior versions of TLS. The presence of extensions can be detected by determining
// whether there are bytes following the compression_methods field at the end of the
// ClientHello. Note that this method of detecting optional data differs from the normal TLS
// method of having a variable-length field, but it is used for compatibility with TLS before
// extensions were defined. ... If negotiating a version of TLS prior to 1.3, a server MUST
// check that the message either contains no data after legacy_compression_methods or that it
// contains a valid extensions block with no data following. If not, then it MUST abort the
// handshake with a "decode_error" alert."
//
// If there is no extensions block, treat it like a server name extension was present but with
// an unrecognized name. I don't think the spec allows this, but it doesn't NOT allow it?
if hello_length == 0 {
return Err(TlsError::UnrecognizedName);
}
// ClientHello ends immediately after the extensions
check_length(2, &mut hello_length)?;
if hello_length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while hello_length > 0 {
check_length(4, &mut hello_length)?;
let extension = source.read_length(2).await?;
let mut length = source.read_length(2).await?;
if extension != TLS_EXTENSION_SNI {
source.seek(length, &mut hello_length)?;
continue;
}
check_length(length, &mut hello_length)?;
// This extension ends immediately after server_name_list
check_length(2, &mut length)?;
if length != source.read_length(2).await? {
return Err(TlsError::DecodeError);
}
while length > 0 {
check_length(3, &mut length)?;
let name_type = source.read().await?;
let name_length = source.read_length(2).await?;
if name_type != TLS_SNI_HOST_NAME_TYPE {
source.seek(name_length, &mut length)?;
continue;
}
check_length(name_length, &mut length)?;
// RFC 6066 section 3: "The ServerNameList MUST NOT contain more than one name of the
// same name_type." So we can just extract the first one we find.
// Hostnames are limited to 255 octets with a trailing dot, but RFC 6066 prohibits the
// trailing dot, so the limit here is 254 octets. Enforcing this limit ensures an
// attacker can't make us heap-allocate 64kB for a hostname we'll never match.
if name_length > 254 {
return Err(TlsError::UnrecognizedName);
}
// The following validation rules ensure that we won't return a hostname which could
// lead to pathname traversal (e.g. "..", "", or "a/b") and that semantically
// equivalent hostnames are only returned in a canonical form. This does not validate
// anything else about the hostname, such as length limits on individual labels.
let mut name = Vec::with_capacity(name_length);
let mut start_of_label = true;
for _ in 0..name_length {
let b = source.read().await?.to_ascii_lowercase();
if start_of_label && (b == b'-' || b == b'.') {
// a hostname label can't start with dot or dash
return Err(TlsError::UnrecognizedName);
}
// the next byte is the start of a label iff this one was a dot
start_of_label = b'.' == b;
match b {
b'a'..=b'z' | b'0'..=b'9' | b'-' | b'.' => name.push(b),
_ => return Err(TlsError::UnrecognizedName),
}
}
// If we're expecting a new label after reading the whole hostname, then either the
// name was empty or it ended with a dot; neither is allowed.
if start_of_label {
return Err(TlsError::UnrecognizedName);
}
// safety: every byte was already checked for being a valid subset of UTF-8
let name = unsafe { String::from_utf8_unchecked(name) };
return Ok(name);
}
// None of the names were of the right type, and section 4.2 says "There MUST NOT be more
// than one extension of the same type in a given extension block", so there definitely
// isn't a server name in this ClientHello.
break;
}
// Like when the extensions block is absent, pretend as if a server name was present but not
// recognized.
Err(TlsError::UnrecognizedName)
}
fn hash_hostname(hostname: String) -> PathBuf {
#[cfg(feature = "hashed")]
let hostname = {
use blake2::{Blake2s, Digest};
let hash = Blake2s::digest(hostname.as_bytes());
base64::encode_config(&hash, base64::URL_SAFE_NO_PAD)
};
| check_length | identifier_name | |
app.rs | }
diffable!(Application);
generation!(Application => generation);
default_resource!(Application);
impl Resource for Application {
fn owner(&self) -> Option<&str> {
self.owner.as_deref()
}
fn members(&self) -> &IndexMap<String, MemberEntry> {
&self.members
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Role {
/// Allow everything, including changing members
Admin,
/// Allow reading and writing, but not changing members.
Manager,
/// Allow reading only.
Reader,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MemberEntry {
pub role: Role,
}
/// Extract a section from the application data. Prevents cloning the whole struct.
fn extract_sect(mut app: Application, key: &str) -> (Application, Option<Map<String, Value>>) {
let sect = app
.data
.get_mut(key)
.map(|v| v.take())
.and_then(|v| match v {
Value::Object(v) => Some(v),
_ => None,
});
(app, sect)
}
impl From<Application> for registry::v1::Application {
fn from(app: Application) -> Self {
let (app, spec) = extract_sect(app, "spec");
let (app, status) = extract_sect(app, "status");
registry::v1::Application {
metadata: meta::v1::NonScopedMetadata {
uid: app.uid.to_string(),
name: app.name,
labels: app.labels,
annotations: app.annotations,
creation_timestamp: app.creation_timestamp,
generation: app.generation,
resource_version: app.resource_version.to_string(),
deletion_timestamp: app.deletion_timestamp,
finalizers: app.finalizers,
},
spec: spec.unwrap_or_default(),
status: status.unwrap_or_default(),
}
}
}
#[async_trait]
pub trait ApplicationAccessor {
/// Lookup an application
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError>;
/// Delete an application
async fn delete(&self, app: &str) -> Result<(), ServiceError>;
/// Get an application
async fn get(&self, app: &str, lock: Lock) -> Result<Option<Application>, ServiceError> {
Ok(self
.list(
Some(app),
LabelSelector::default(),
Some(1),
None,
None,
lock,
&[],
)
.await?
.try_next()
.await?)
}
/// Get a list of applications
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>;
/// Create a new application
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError>;
/// Update an existing application's data
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError>;
/// Update an existing application's owner information
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError>;
/// Set the member list
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError>;
}
pub struct PostgresApplicationAccessor<'c, C: Client> {
client: &'c C,
}
impl<'c, C: Client> PostgresApplicationAccessor<'c, C> {
pub fn new(client: &'c C) -> Self {
Self { client }
}
pub fn from_row(row: Row) -> Result<Application, tokio_postgres::Error> {
log::debug!("Row: {:?}", row);
Ok(Application {
uid: row.try_get("UID")?,
name: row.try_get("NAME")?,
creation_timestamp: row.try_get("CREATION_TIMESTAMP")?,
generation: row.try_get::<_, i64>("GENERATION")? as u64,
resource_version: row.try_get("RESOURCE_VERSION")?,
labels: super::row_to_map(&row, "LABELS")?,
annotations: super::row_to_map(&row, "ANNOTATIONS")?,
deletion_timestamp: row.try_get("DELETION_TIMESTAMP")?,
finalizers: super::row_to_vec(&row, "FINALIZERS")?,
owner: row.try_get("OWNER")?,
transfer_owner: row.try_get("TRANSFER_OWNER")?,
members: row
.try_get::<_, Json<IndexMap<String, MemberEntry>>>("MEMBERS")
.map(|json| json.0)
.or_else(fix_null_default)?,
data: row.try_get::<_, Json<_>>("DATA")?.0,
})
}
async fn insert_aliases(
&self,
id: &str,
aliases: &HashSet<TypedAlias>,
) -> Result<(), tokio_postgres::Error> {
if aliases.is_empty() {
return Ok(());
}
let stmt = self
.client
.prepare_typed(
"INSERT INTO APPLICATION_ALIASES (APP, TYPE, ALIAS) VALUES ($1, $2, $3)",
&[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR],
)
.await?;
for alias in aliases {
self.client
.execute(&stmt, &[&id, &alias.0, &alias.1])
.await?;
}
Ok(())
}
}
trait Param: ToSql + Sync {}
#[async_trait]
impl<'c, C: Client> ApplicationAccessor for PostgresApplicationAccessor<'c, C> {
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError> {
let sql = r#"
SELECT
A2.NAME,
A2.UID,
A2.LABELS,
A2.CREATION_TIMESTAMP,
A2.GENERATION,
A2.RESOURCE_VERSION,
A2.ANNOTATIONS,
A2.DELETION_TIMESTAMP,
A2.FINALIZERS,
A2.OWNER,
A2.TRANSFER_OWNER,
A2.MEMBERS,
A2.DATA
FROM
APPLICATION_ALIASES A1 INNER JOIN APPLICATIONS A2
ON
A1.APP=A2.NAME WHERE A1.ALIAS = $1
"#;
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let row = self.client.query_opt(&stmt, &[&alias]).await?;
Ok(row.map(Self::from_row).transpose()?)
}
async fn delete(&self, id: &str) -> Result<(), ServiceError> {
let sql = "DELETE FROM APPLICATIONS WHERE NAME = $1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let count = self.client.execute(&stmt, &[&id]).await?;
if count > 0 {
Ok(())
} else {
Err(ServiceError::NotFound)
}
}
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>
| let builder = SelectBuilder::new(select, Vec::new(), Vec::new())
.name(&name)
.labels(&labels.0)
.auth_read(&id)
.lock(lock)
.sort(sort)
.limit(limit)
.offset(offset);
let (select, params, types) = builder.build();
let stmt = self.client.prepare_typed(&select, &types).await?;
let stream = self
.client
.query_raw(&stmt, slice_iter(¶ms[..]))
.await
.map_err(|err| {
log::debug!("Failed to get: {}", err);
err
})?
.and_then(|row| future::ready(Self::from_row(row)))
.map_err(ServiceError::Database);
Ok(Box::pin(stream))
}
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError> {
let name = application.name;
let data = application.data;
let labels = application.labels;
let annotations = application.annotations;
self.client
.execute(
r#"
INSERT INTO APPLICATIONS (
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP | {
let select = r#"
SELECT
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
DELETION_TIMESTAMP,
FINALIZERS,
OWNER,
TRANSFER_OWNER,
MEMBERS,
DATA
FROM APPLICATIONS
"#
.to_string();
| identifier_body |
app.rs | }
diffable!(Application);
generation!(Application => generation);
default_resource!(Application);
impl Resource for Application {
fn owner(&self) -> Option<&str> {
self.owner.as_deref()
}
fn members(&self) -> &IndexMap<String, MemberEntry> {
&self.members
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Role {
/// Allow everything, including changing members
Admin,
/// Allow reading and writing, but not changing members.
Manager,
/// Allow reading only.
Reader,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MemberEntry {
pub role: Role,
}
/// Extract a section from the application data. Prevents cloning the whole struct.
fn extract_sect(mut app: Application, key: &str) -> (Application, Option<Map<String, Value>>) {
let sect = app
.data
.get_mut(key)
.map(|v| v.take())
.and_then(|v| match v {
Value::Object(v) => Some(v),
_ => None,
});
(app, sect)
}
impl From<Application> for registry::v1::Application {
fn from(app: Application) -> Self {
let (app, spec) = extract_sect(app, "spec");
let (app, status) = extract_sect(app, "status");
registry::v1::Application {
metadata: meta::v1::NonScopedMetadata {
uid: app.uid.to_string(),
name: app.name,
labels: app.labels,
annotations: app.annotations,
creation_timestamp: app.creation_timestamp,
generation: app.generation,
resource_version: app.resource_version.to_string(),
deletion_timestamp: app.deletion_timestamp,
finalizers: app.finalizers,
},
spec: spec.unwrap_or_default(),
status: status.unwrap_or_default(),
}
}
}
#[async_trait]
pub trait ApplicationAccessor {
/// Lookup an application
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError>;
/// Delete an application
async fn delete(&self, app: &str) -> Result<(), ServiceError>;
/// Get an application
async fn get(&self, app: &str, lock: Lock) -> Result<Option<Application>, ServiceError> {
Ok(self
.list(
Some(app),
LabelSelector::default(),
Some(1),
None,
None,
lock,
&[],
)
.await?
.try_next()
.await?)
}
/// Get a list of applications
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>;
/// Create a new application
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError>;
/// Update an existing application's data
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError>;
/// Update an existing application's owner information
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError>;
/// Set the member list
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError>;
}
pub struct PostgresApplicationAccessor<'c, C: Client> {
client: &'c C,
}
impl<'c, C: Client> PostgresApplicationAccessor<'c, C> {
pub fn new(client: &'c C) -> Self {
Self { client }
}
pub fn from_row(row: Row) -> Result<Application, tokio_postgres::Error> {
log::debug!("Row: {:?}", row);
Ok(Application {
uid: row.try_get("UID")?,
name: row.try_get("NAME")?,
creation_timestamp: row.try_get("CREATION_TIMESTAMP")?,
generation: row.try_get::<_, i64>("GENERATION")? as u64,
resource_version: row.try_get("RESOURCE_VERSION")?,
labels: super::row_to_map(&row, "LABELS")?,
annotations: super::row_to_map(&row, "ANNOTATIONS")?,
deletion_timestamp: row.try_get("DELETION_TIMESTAMP")?,
finalizers: super::row_to_vec(&row, "FINALIZERS")?,
owner: row.try_get("OWNER")?,
transfer_owner: row.try_get("TRANSFER_OWNER")?,
members: row
.try_get::<_, Json<IndexMap<String, MemberEntry>>>("MEMBERS")
.map(|json| json.0)
.or_else(fix_null_default)?,
data: row.try_get::<_, Json<_>>("DATA")?.0,
})
}
async fn insert_aliases(
&self,
id: &str,
aliases: &HashSet<TypedAlias>,
) -> Result<(), tokio_postgres::Error> {
if aliases.is_empty() {
return Ok(());
}
let stmt = self
.client
.prepare_typed(
"INSERT INTO APPLICATION_ALIASES (APP, TYPE, ALIAS) VALUES ($1, $2, $3)",
&[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR],
)
.await?;
for alias in aliases {
self.client
.execute(&stmt, &[&id, &alias.0, &alias.1])
.await?;
}
Ok(())
}
}
trait Param: ToSql + Sync {}
#[async_trait]
impl<'c, C: Client> ApplicationAccessor for PostgresApplicationAccessor<'c, C> {
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError> {
let sql = r#"
SELECT
A2.NAME,
A2.UID,
A2.LABELS,
A2.CREATION_TIMESTAMP,
A2.GENERATION,
A2.RESOURCE_VERSION,
A2.ANNOTATIONS,
A2.DELETION_TIMESTAMP,
A2.FINALIZERS,
A2.OWNER,
A2.TRANSFER_OWNER,
A2.MEMBERS,
A2.DATA
FROM
APPLICATION_ALIASES A1 INNER JOIN APPLICATIONS A2
ON
A1.APP=A2.NAME WHERE A1.ALIAS = $1
"#;
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let row = self.client.query_opt(&stmt, &[&alias]).await?;
Ok(row.map(Self::from_row).transpose()?)
}
async fn delete(&self, id: &str) -> Result<(), ServiceError> {
let sql = "DELETE FROM APPLICATIONS WHERE NAME = $1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let count = self.client.execute(&stmt, &[&id]).await?;
if count > 0 {
Ok(())
} else {
Err(ServiceError::NotFound)
}
}
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>, | {
let select = r#"
SELECT
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
DELETION_TIMESTAMP,
FINALIZERS,
OWNER,
TRANSFER_OWNER,
MEMBERS,
DATA
FROM APPLICATIONS
"#
.to_string();
let builder = SelectBuilder::new(select, Vec::new(), Vec::new())
.name(&name)
.labels(&labels.0)
.auth_read(&id)
.lock(lock)
.sort(sort)
.limit(limit)
.offset(offset);
let (select, params, types) = builder.build();
let stmt = self.client.prepare_typed(&select, &types).await?;
let stream = self
.client
.query_raw(&stmt, slice_iter(¶ms[..]))
.await
.map_err(|err| {
log::debug!("Failed to get: {}", err);
err
})?
.and_then(|row| future::ready(Self::from_row(row)))
.map_err(ServiceError::Database);
Ok(Box::pin(stream))
}
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError> {
let name = application.name;
let data = application.data;
let labels = application.labels;
let annotations = application.annotations;
self.client
.execute(
r#"
INSERT INTO APPLICATIONS (
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP | offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError> | random_line_split |
app.rs | diffable!(Application);
generation!(Application => generation);
default_resource!(Application);
impl Resource for Application {
fn owner(&self) -> Option<&str> {
self.owner.as_deref()
}
fn members(&self) -> &IndexMap<String, MemberEntry> {
&self.members
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Role {
/// Allow everything, including changing members
Admin,
/// Allow reading and writing, but not changing members.
Manager,
/// Allow reading only.
Reader,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MemberEntry {
pub role: Role,
}
/// Extract a section from the application data. Prevents cloning the whole struct.
fn extract_sect(mut app: Application, key: &str) -> (Application, Option<Map<String, Value>>) {
let sect = app
.data
.get_mut(key)
.map(|v| v.take())
.and_then(|v| match v {
Value::Object(v) => Some(v),
_ => None,
});
(app, sect)
}
impl From<Application> for registry::v1::Application {
fn from(app: Application) -> Self {
let (app, spec) = extract_sect(app, "spec");
let (app, status) = extract_sect(app, "status");
registry::v1::Application {
metadata: meta::v1::NonScopedMetadata {
uid: app.uid.to_string(),
name: app.name,
labels: app.labels,
annotations: app.annotations,
creation_timestamp: app.creation_timestamp,
generation: app.generation,
resource_version: app.resource_version.to_string(),
deletion_timestamp: app.deletion_timestamp,
finalizers: app.finalizers,
},
spec: spec.unwrap_or_default(),
status: status.unwrap_or_default(),
}
}
}
#[async_trait]
pub trait ApplicationAccessor {
/// Lookup an application
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError>;
/// Delete an application
async fn delete(&self, app: &str) -> Result<(), ServiceError>;
/// Get an application
async fn get(&self, app: &str, lock: Lock) -> Result<Option<Application>, ServiceError> {
Ok(self
.list(
Some(app),
LabelSelector::default(),
Some(1),
None,
None,
lock,
&[],
)
.await?
.try_next()
.await?)
}
/// Get a list of applications
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>;
/// Create a new application
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError>;
/// Update an existing application's data
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError>;
/// Update an existing application's owner information
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError>;
/// Set the member list
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError>;
}
pub struct PostgresApplicationAccessor<'c, C: Client> {
client: &'c C,
}
impl<'c, C: Client> PostgresApplicationAccessor<'c, C> {
pub fn new(client: &'c C) -> Self {
Self { client }
}
pub fn from_row(row: Row) -> Result<Application, tokio_postgres::Error> {
log::debug!("Row: {:?}", row);
Ok(Application {
uid: row.try_get("UID")?,
name: row.try_get("NAME")?,
creation_timestamp: row.try_get("CREATION_TIMESTAMP")?,
generation: row.try_get::<_, i64>("GENERATION")? as u64,
resource_version: row.try_get("RESOURCE_VERSION")?,
labels: super::row_to_map(&row, "LABELS")?,
annotations: super::row_to_map(&row, "ANNOTATIONS")?,
deletion_timestamp: row.try_get("DELETION_TIMESTAMP")?,
finalizers: super::row_to_vec(&row, "FINALIZERS")?,
owner: row.try_get("OWNER")?,
transfer_owner: row.try_get("TRANSFER_OWNER")?,
members: row
.try_get::<_, Json<IndexMap<String, MemberEntry>>>("MEMBERS")
.map(|json| json.0)
.or_else(fix_null_default)?,
data: row.try_get::<_, Json<_>>("DATA")?.0,
})
}
async fn insert_aliases(
&self,
id: &str,
aliases: &HashSet<TypedAlias>,
) -> Result<(), tokio_postgres::Error> {
if aliases.is_empty() {
return Ok(());
}
let stmt = self
.client
.prepare_typed(
"INSERT INTO APPLICATION_ALIASES (APP, TYPE, ALIAS) VALUES ($1, $2, $3)",
&[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR],
)
.await?;
for alias in aliases {
self.client
.execute(&stmt, &[&id, &alias.0, &alias.1])
.await?;
}
Ok(())
}
}
trait Param: ToSql + Sync {}
#[async_trait]
impl<'c, C: Client> ApplicationAccessor for PostgresApplicationAccessor<'c, C> {
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError> {
let sql = r#"
SELECT
A2.NAME,
A2.UID,
A2.LABELS,
A2.CREATION_TIMESTAMP,
A2.GENERATION,
A2.RESOURCE_VERSION,
A2.ANNOTATIONS,
A2.DELETION_TIMESTAMP,
A2.FINALIZERS,
A2.OWNER,
A2.TRANSFER_OWNER,
A2.MEMBERS,
A2.DATA
FROM
APPLICATION_ALIASES A1 INNER JOIN APPLICATIONS A2
ON
A1.APP=A2.NAME WHERE A1.ALIAS = $1
"#;
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let row = self.client.query_opt(&stmt, &[&alias]).await?;
Ok(row.map(Self::from_row).transpose()?)
}
async fn | (&self, id: &str) -> Result<(), ServiceError> {
let sql = "DELETE FROM APPLICATIONS WHERE NAME = $1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let count = self.client.execute(&stmt, &[&id]).await?;
if count > 0 {
Ok(())
} else {
Err(ServiceError::NotFound)
}
}
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>
{
let select = r#"
SELECT
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
DELETION_TIMESTAMP,
FINALIZERS,
OWNER,
TRANSFER_OWNER,
MEMBERS,
DATA
FROM APPLICATIONS
"#
.to_string();
let builder = SelectBuilder::new(select, Vec::new(), Vec::new())
.name(&name)
.labels(&labels.0)
.auth_read(&id)
.lock(lock)
.sort(sort)
.limit(limit)
.offset(offset);
let (select, params, types) = builder.build();
let stmt = self.client.prepare_typed(&select, &types).await?;
let stream = self
.client
.query_raw(&stmt, slice_iter(¶ms[..]))
.await
.map_err(|err| {
log::debug!("Failed to get: {}", err);
err
})?
.and_then(|row| future::ready(Self::from_row(row)))
.map_err(ServiceError::Database);
Ok(Box::pin(stream))
}
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError> {
let name = application.name;
let data = application.data;
let labels = application.labels;
let annotations = application.annotations;
self.client
.execute(
r#"
INSERT INTO APPLICATIONS (
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP | delete | identifier_name |
app.rs | diffable!(Application);
generation!(Application => generation);
default_resource!(Application);
impl Resource for Application {
fn owner(&self) -> Option<&str> {
self.owner.as_deref()
}
fn members(&self) -> &IndexMap<String, MemberEntry> {
&self.members
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Role {
/// Allow everything, including changing members
Admin,
/// Allow reading and writing, but not changing members.
Manager,
/// Allow reading only.
Reader,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MemberEntry {
pub role: Role,
}
/// Extract a section from the application data. Prevents cloning the whole struct.
fn extract_sect(mut app: Application, key: &str) -> (Application, Option<Map<String, Value>>) {
let sect = app
.data
.get_mut(key)
.map(|v| v.take())
.and_then(|v| match v {
Value::Object(v) => Some(v),
_ => None,
});
(app, sect)
}
impl From<Application> for registry::v1::Application {
fn from(app: Application) -> Self {
let (app, spec) = extract_sect(app, "spec");
let (app, status) = extract_sect(app, "status");
registry::v1::Application {
metadata: meta::v1::NonScopedMetadata {
uid: app.uid.to_string(),
name: app.name,
labels: app.labels,
annotations: app.annotations,
creation_timestamp: app.creation_timestamp,
generation: app.generation,
resource_version: app.resource_version.to_string(),
deletion_timestamp: app.deletion_timestamp,
finalizers: app.finalizers,
},
spec: spec.unwrap_or_default(),
status: status.unwrap_or_default(),
}
}
}
#[async_trait]
pub trait ApplicationAccessor {
/// Lookup an application
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError>;
/// Delete an application
async fn delete(&self, app: &str) -> Result<(), ServiceError>;
/// Get an application
async fn get(&self, app: &str, lock: Lock) -> Result<Option<Application>, ServiceError> {
Ok(self
.list(
Some(app),
LabelSelector::default(),
Some(1),
None,
None,
lock,
&[],
)
.await?
.try_next()
.await?)
}
/// Get a list of applications
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>;
/// Create a new application
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError>;
/// Update an existing application's data
async fn update_data(
&self,
application: Application,
aliases: Option<HashSet<TypedAlias>>,
) -> Result<u64, ServiceError>;
/// Update an existing application's owner information
async fn update_transfer(
&self,
app: String,
owner: Option<String>,
transfer_owner: Option<String>,
) -> Result<u64, ServiceError>;
/// Set the member list
async fn set_members(
&self,
app: &str,
members: IndexMap<String, MemberEntry>,
) -> Result<u64, ServiceError>;
}
pub struct PostgresApplicationAccessor<'c, C: Client> {
client: &'c C,
}
impl<'c, C: Client> PostgresApplicationAccessor<'c, C> {
pub fn new(client: &'c C) -> Self {
Self { client }
}
pub fn from_row(row: Row) -> Result<Application, tokio_postgres::Error> {
log::debug!("Row: {:?}", row);
Ok(Application {
uid: row.try_get("UID")?,
name: row.try_get("NAME")?,
creation_timestamp: row.try_get("CREATION_TIMESTAMP")?,
generation: row.try_get::<_, i64>("GENERATION")? as u64,
resource_version: row.try_get("RESOURCE_VERSION")?,
labels: super::row_to_map(&row, "LABELS")?,
annotations: super::row_to_map(&row, "ANNOTATIONS")?,
deletion_timestamp: row.try_get("DELETION_TIMESTAMP")?,
finalizers: super::row_to_vec(&row, "FINALIZERS")?,
owner: row.try_get("OWNER")?,
transfer_owner: row.try_get("TRANSFER_OWNER")?,
members: row
.try_get::<_, Json<IndexMap<String, MemberEntry>>>("MEMBERS")
.map(|json| json.0)
.or_else(fix_null_default)?,
data: row.try_get::<_, Json<_>>("DATA")?.0,
})
}
async fn insert_aliases(
&self,
id: &str,
aliases: &HashSet<TypedAlias>,
) -> Result<(), tokio_postgres::Error> {
if aliases.is_empty() |
let stmt = self
.client
.prepare_typed(
"INSERT INTO APPLICATION_ALIASES (APP, TYPE, ALIAS) VALUES ($1, $2, $3)",
&[Type::VARCHAR, Type::VARCHAR, Type::VARCHAR],
)
.await?;
for alias in aliases {
self.client
.execute(&stmt, &[&id, &alias.0, &alias.1])
.await?;
}
Ok(())
}
}
trait Param: ToSql + Sync {}
#[async_trait]
impl<'c, C: Client> ApplicationAccessor for PostgresApplicationAccessor<'c, C> {
async fn lookup(&self, alias: &str) -> Result<Option<Application>, ServiceError> {
let sql = r#"
SELECT
A2.NAME,
A2.UID,
A2.LABELS,
A2.CREATION_TIMESTAMP,
A2.GENERATION,
A2.RESOURCE_VERSION,
A2.ANNOTATIONS,
A2.DELETION_TIMESTAMP,
A2.FINALIZERS,
A2.OWNER,
A2.TRANSFER_OWNER,
A2.MEMBERS,
A2.DATA
FROM
APPLICATION_ALIASES A1 INNER JOIN APPLICATIONS A2
ON
A1.APP=A2.NAME WHERE A1.ALIAS = $1
"#;
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let row = self.client.query_opt(&stmt, &[&alias]).await?;
Ok(row.map(Self::from_row).transpose()?)
}
async fn delete(&self, id: &str) -> Result<(), ServiceError> {
let sql = "DELETE FROM APPLICATIONS WHERE NAME = $1";
let stmt = self.client.prepare_typed(sql, &[Type::VARCHAR]).await?;
let count = self.client.execute(&stmt, &[&id]).await?;
if count > 0 {
Ok(())
} else {
Err(ServiceError::NotFound)
}
}
async fn list(
&self,
name: Option<&str>,
labels: LabelSelector,
limit: Option<usize>,
offset: Option<usize>,
id: Option<&UserInformation>,
lock: Lock,
sort: &[&str],
) -> Result<Pin<Box<dyn Stream<Item = Result<Application, ServiceError>> + Send>>, ServiceError>
{
let select = r#"
SELECT
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION_TIMESTAMP,
GENERATION,
RESOURCE_VERSION,
DELETION_TIMESTAMP,
FINALIZERS,
OWNER,
TRANSFER_OWNER,
MEMBERS,
DATA
FROM APPLICATIONS
"#
.to_string();
let builder = SelectBuilder::new(select, Vec::new(), Vec::new())
.name(&name)
.labels(&labels.0)
.auth_read(&id)
.lock(lock)
.sort(sort)
.limit(limit)
.offset(offset);
let (select, params, types) = builder.build();
let stmt = self.client.prepare_typed(&select, &types).await?;
let stream = self
.client
.query_raw(&stmt, slice_iter(¶ms[..]))
.await
.map_err(|err| {
log::debug!("Failed to get: {}", err);
err
})?
.and_then(|row| future::ready(Self::from_row(row)))
.map_err(ServiceError::Database);
Ok(Box::pin(stream))
}
async fn create(
&self,
application: Application,
aliases: HashSet<TypedAlias>,
) -> Result<(), ServiceError> {
let name = application.name;
let data = application.data;
let labels = application.labels;
let annotations = application.annotations;
self.client
.execute(
r#"
INSERT INTO APPLICATIONS (
NAME,
UID,
LABELS,
ANNOTATIONS,
CREATION | {
return Ok(());
} | conditional_block |
main.rs | isa_attribute,
core_intrinsics,
maybe_uninit_ref,
bindings_after_at,
stmt_expr_attributes,
default_alloc_error_handler,
const_fn_floating_point_arithmetic,
)]
extern crate alloc;
mod gfx;
mod heap;
mod mem;
use core::fmt::Write;
use alloc::{vec::Vec, vec};
use vek::*;
use num_traits::float::Float;
use gba::{
io::{
irq::{set_irq_handler, IrqFlags, IrqEnableSetting, IE, IME, BIOS_IF},
display::{
DisplayControlSetting, DisplayStatusSetting, DisplayMode,
DISPCNT, DISPSTAT, VCOUNT, VBLANK_SCANLINE,
},
background::{BackgroundControlSetting, BG2HOFS},
timers::{TimerControlSetting, TimerTickRate, TM2CNT_H, TM2CNT_L},
keypad::read_key_input,
},
bios,
vram::bitmap::{Mode3, Mode5},
Color,
};
pub use mem::*;
pub type F32 = fixed::types::I16F16;
pub const fn num(x: f32) -> F32 {
use fixed::traits::Fixed;
F32::from_bits((x * (1 << F32::FRAC_NBITS) as f32) as <F32 as Fixed>::Bits)
}
fn normalize_quat_fast(q: Quaternion<F32>) -> Quaternion<F32> {
fn finvsqrt(x: f32) -> f32 {
let y = f32::from_bits(0x5f375a86 - (x.to_bits() >> 1));
y * (1.5 - ( x * 0.5 * y * y ))
}
fn fsqrt(x: f32) -> f32 {
f32::from_bits((x.to_bits() + (127 << 23)) >> 1)
}
let v = q.into_vec4();
(v * F32::from_num(finvsqrt(v.magnitude_squared().to_num::<f32>()))).into()
}
fn cos_fast(mut x: F32) -> F32 {
use core::f32;
x *= num(f32::consts::FRAC_1_PI / 2.0);
x -= num(0.25) + (x + num(0.25)).floor();
x *= num(16.0) * (x.abs() - num(0.5));
x += num(0.225) * x * (x.abs() - num(1.0));
x
}
fn sin_fast(x: F32) -> F32 {
use core::f32;
cos_fast(x - num(f32::consts::PI / 2.0))
}
fn tan_fast(x: F32) -> F32 {
sin_fast(x) / cos_fast(x)
}
fn rotation_3d(angle_radians: F32, axis: Vec3<F32>) -> Quaternion<F32> {
// let axis = axis.normalized();
let Vec3 { x, y, z } = axis * sin_fast(angle_radians * num(0.5));
let w = cos_fast(angle_radians * num(0.5));
Quaternion { x, y, z, w }
}
#[repr(transparent)]
#[derive(Copy, Clone)]
struct NumWrap(F32);
impl core::ops::Mul<NumWrap> for NumWrap {
type Output = NumWrap;
fn mul(self, rhs: Self) -> Self { NumWrap(self.0 * rhs.0) }
}
impl vek::ops::MulAdd<NumWrap, NumWrap> for NumWrap {
type Output = NumWrap;
fn mul_add(self, mul: NumWrap, add: NumWrap) -> NumWrap {
NumWrap(self.0 * mul.0 + add.0)
}
}
fn apply(m: Mat3<F32>, n: Mat3<F32>) -> Mat3<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
fn apply4(m: Mat4<F32>, n: Mat4<F32>) -> Mat4<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
gba::error!("Panic: {:?}", info);
Mode3::clear_to(Color::from_rgb(0xFF, 0, 0));
loop {}
}
#[start]
fn main(_argc: isize, _argv: *const *const u8) -> isize {
heap::init();
gba::info!("Starting...");
set_irq_handler(irq_handler);
IME.write(IrqEnableSetting::IRQ_YES);
DISPSTAT.write(DisplayStatusSetting::new()
.with_hblank_irq_enable(true)
.with_vblank_irq_enable(true));
TM2CNT_H.write(TimerControlSetting::new()
.with_tick_rate(TimerTickRate::CPU1024)
.with_enabled(true));
let model = wavefront::Obj::from_lines(include_str!("../data/ship-small.obj").lines()).unwrap();
let mut ship_verts = Vec::new();
let mut ship_tris = Vec::new();
for &p in model.positions() {
ship_verts.push(Vec3::<f32>::from(p).map(num));
}
model
.triangles()
.for_each(|vs| {
let pos = vs.map(|v| Vec3::<f32>::from(v.position()));
let cross = (pos[1] - pos[0]).cross(pos[2] - pos[0]);
ship_tris.push((
(cross / micromath::F32Ext::sqrt(cross.magnitude_squared())).map(num),
vs.map(|v| v.position_index() as u16),
));
});
gba::info!("Model has {} vertices and {} triangles", ship_verts.len(), ship_tris.len());
let mut pos = Vec3::new(0.0, 0.0, 3.0).map(num);
let mut ori = normalize_quat_fast(Quaternion::<F32>::identity());
let mut tick = 0;
let mut last_time = 0;
let mut sum_fps = 0.0;
let mut screen = unsafe { gfx::mode5::init() };
let mut scene = unsafe { gfx::scene::init() };
let mut time_mvp = 0;
let mut time_clear = 0;
let mut time_model = 0;
let mut time_vertices = 0;
let mut time_faces = 0;
let mut time_render = 0;
loop {
let new_time = TM2CNT_L.read();
if tick % 32 == 0 {
if new_time > last_time {
gba::info!("FPS: {}", sum_fps / 32.0);
gba::info!(
"Timings: {{ mvp = {}, clear = {}, model = {}, vertices = {}, faces = {}, render = {} }}",
time_mvp,
time_clear,
time_model,
time_vertices,
time_faces,
time_render,
);
}
sum_fps = 0.0;
}
let fps = (16_780_000.0 / (new_time - last_time) as f32) / 1024.0;
sum_fps += fps;
last_time = new_time;
let dt = num(fps).recip();
// Wait for vblank
IE.write(IrqFlags::new().with_vblank(true));
// bios::vblank_interrupt_wait();
screen.flip();
let keys = read_key_input();
time_mvp = gba::time_this01! {{
ori = normalize_quat_fast(ori
* rotation_3d(
if keys.down() { num(4.0) * dt } else { num(0.0) }
- if keys.up() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_x(),
)
* rotation_3d(
if keys.right() { num(4.0) * dt } else { num(0.0) }
- if keys.left() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_y(),
)
* rotation_3d(
if keys.r() { num(4.0) * dt } else { num(0.0) }
- if keys.l() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_z(),
));
pos += gfx::scene::transform_pos(Mat4::from(ori).transposed(), Vec3::unit_z() * (
if keys.a() { num(0.05) } else { num(0.0) }
- if keys.b() { num(0.05) } else { num(0.0) }
)).xyz();
}};
let mut fb = screen.back();
time_clear = gba::time_this01 | random_line_split | ||
main.rs | 32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
fn apply4(m: Mat4<F32>, n: Mat4<F32>) -> Mat4<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
gba::error!("Panic: {:?}", info);
Mode3::clear_to(Color::from_rgb(0xFF, 0, 0));
loop {}
}
#[start]
fn main(_argc: isize, _argv: *const *const u8) -> isize {
heap::init();
gba::info!("Starting...");
set_irq_handler(irq_handler);
IME.write(IrqEnableSetting::IRQ_YES);
DISPSTAT.write(DisplayStatusSetting::new()
.with_hblank_irq_enable(true)
.with_vblank_irq_enable(true));
TM2CNT_H.write(TimerControlSetting::new()
.with_tick_rate(TimerTickRate::CPU1024)
.with_enabled(true));
let model = wavefront::Obj::from_lines(include_str!("../data/ship-small.obj").lines()).unwrap();
let mut ship_verts = Vec::new();
let mut ship_tris = Vec::new();
for &p in model.positions() {
ship_verts.push(Vec3::<f32>::from(p).map(num));
}
model
.triangles()
.for_each(|vs| {
let pos = vs.map(|v| Vec3::<f32>::from(v.position()));
let cross = (pos[1] - pos[0]).cross(pos[2] - pos[0]);
ship_tris.push((
(cross / micromath::F32Ext::sqrt(cross.magnitude_squared())).map(num),
vs.map(|v| v.position_index() as u16),
));
});
gba::info!("Model has {} vertices and {} triangles", ship_verts.len(), ship_tris.len());
let mut pos = Vec3::new(0.0, 0.0, 3.0).map(num);
let mut ori = normalize_quat_fast(Quaternion::<F32>::identity());
let mut tick = 0;
let mut last_time = 0;
let mut sum_fps = 0.0;
let mut screen = unsafe { gfx::mode5::init() };
let mut scene = unsafe { gfx::scene::init() };
let mut time_mvp = 0;
let mut time_clear = 0;
let mut time_model = 0;
let mut time_vertices = 0;
let mut time_faces = 0;
let mut time_render = 0;
loop {
let new_time = TM2CNT_L.read();
if tick % 32 == 0 {
if new_time > last_time {
gba::info!("FPS: {}", sum_fps / 32.0);
gba::info!(
"Timings: {{ mvp = {}, clear = {}, model = {}, vertices = {}, faces = {}, render = {} }}",
time_mvp,
time_clear,
time_model,
time_vertices,
time_faces,
time_render,
);
}
sum_fps = 0.0;
}
let fps = (16_780_000.0 / (new_time - last_time) as f32) / 1024.0;
sum_fps += fps;
last_time = new_time;
let dt = num(fps).recip();
// Wait for vblank
IE.write(IrqFlags::new().with_vblank(true));
// bios::vblank_interrupt_wait();
screen.flip();
let keys = read_key_input();
time_mvp = gba::time_this01! {{
ori = normalize_quat_fast(ori
* rotation_3d(
if keys.down() { num(4.0) * dt } else { num(0.0) }
- if keys.up() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_x(),
)
* rotation_3d(
if keys.right() { num(4.0) * dt } else { num(0.0) }
- if keys.left() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_y(),
)
* rotation_3d(
if keys.r() { num(4.0) * dt } else { num(0.0) }
- if keys.l() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_z(),
));
pos += gfx::scene::transform_pos(Mat4::from(ori).transposed(), Vec3::unit_z() * (
if keys.a() { num(0.05) } else { num(0.0) }
- if keys.b() { num(0.05) } else { num(0.0) }
)).xyz();
}};
let mut fb = screen.back();
time_clear = gba::time_this01! {{
fb.clear(Color::from_rgb(1, 3, 4).0);
}};
fn perspective_fov_rh_zo(fov_y_radians: F32, width: F32, height: F32, near: F32, far: F32) -> Mat4<F32> {
let rad = fov_y_radians;
let h = cos_fast(rad * num(0.5)) / sin_fast(rad * num(0.5));
let w = h * height / width;
let m00 = w;
let m11 = h;
let m22 = -(far + near) / (far - near);
let m23 = -(num(2.0) * far * near) / (far - near);
let m32 = -num(1.0);
let mut m = Mat4::new(
m00, num(0.0), num(0.0), num(0.0),
num(0.0), m11, num(0.0), num(0.0),
num(0.0), num(0.0), m22, m23,
num(0.0), num(0.0), m32, num(0.0)
);
m
}
let proj = perspective_fov_rh_zo(num(1.0), num(fb.screen_size().x as f32), num(fb.screen_size().y as f32), num(0.5), num(256.0));
let mut frame = scene.begin_frame(gfx::scene::SceneState {
proj,
view: Mat4::identity(),
light_dir: Vec3::new(0.0, -1.0, 0.0).normalized().map(num),
ambiance: num(0.2),
light_col: Rgb::new(1.0, 0.0, 0.5).map(num),
});
let mut ship_model;
time_model = gba::time_this01! {{
ship_model = frame.add_model(apply4(Mat4::translation_3d(pos), Mat4::from(apply(Mat3::from(ori), Mat3::scaling_3d(num(0.2))))));
}};
time_vertices = gba::time_this01! {{
for &v in &ship_verts {
frame.add_vert(ship_model, v);
}
}};
time_faces = gba::time_this01! {{
for &(norm, indices) in &ship_tris {
let color = Rgb::new(1.0, 1.0, 1.0).map(num);
let verts = [
indices[0],
indices[1],
indices[2],
];
frame.add_convex(ship_model, (verts, color), norm);
}
}};
// frame.add_flat_quad(
// 0,
// ([
// Vec3::new(-0.3, -0.5, 0.0).map(num),
// Vec3::new(0.0, 1.0, 0.0).map(num),
// Vec3::new(0.8, 0.8, 0.0).map(num),
// Vec3::new(1.0, 0.0, 0.0).map(num),
// ], Rgb::broadcast(num(1.0))),
// -Vec3::unit_z(),
// );
time_render = gba::time_this01! {{
frame.render(fb);
}};
tick += 1;
}
}
extern "C" fn irq_handler(flags: IrqFlags) {
if flags.vblank() {
vblank_handler();
}
if flags.hblank() {
hblank_handler();
}
if flags.vcounter() {
vcounter_handler();
}
if flags.timer0() | {
timer0_handler();
} | conditional_block | |
main.rs | (x: f32) -> f32 {
let y = f32::from_bits(0x5f375a86 - (x.to_bits() >> 1));
y * (1.5 - ( x * 0.5 * y * y ))
}
fn fsqrt(x: f32) -> f32 {
f32::from_bits((x.to_bits() + (127 << 23)) >> 1)
}
let v = q.into_vec4();
(v * F32::from_num(finvsqrt(v.magnitude_squared().to_num::<f32>()))).into()
}
fn cos_fast(mut x: F32) -> F32 {
use core::f32;
x *= num(f32::consts::FRAC_1_PI / 2.0);
x -= num(0.25) + (x + num(0.25)).floor();
x *= num(16.0) * (x.abs() - num(0.5));
x += num(0.225) * x * (x.abs() - num(1.0));
x
}
fn sin_fast(x: F32) -> F32 {
use core::f32;
cos_fast(x - num(f32::consts::PI / 2.0))
}
fn tan_fast(x: F32) -> F32 {
sin_fast(x) / cos_fast(x)
}
fn rotation_3d(angle_radians: F32, axis: Vec3<F32>) -> Quaternion<F32> {
// let axis = axis.normalized();
let Vec3 { x, y, z } = axis * sin_fast(angle_radians * num(0.5));
let w = cos_fast(angle_radians * num(0.5));
Quaternion { x, y, z, w }
}
#[repr(transparent)]
#[derive(Copy, Clone)]
struct NumWrap(F32);
impl core::ops::Mul<NumWrap> for NumWrap {
type Output = NumWrap;
fn mul(self, rhs: Self) -> Self { NumWrap(self.0 * rhs.0) }
}
impl vek::ops::MulAdd<NumWrap, NumWrap> for NumWrap {
type Output = NumWrap;
fn mul_add(self, mul: NumWrap, add: NumWrap) -> NumWrap {
NumWrap(self.0 * mul.0 + add.0)
}
}
fn apply(m: Mat3<F32>, n: Mat3<F32>) -> Mat3<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
fn | (m: Mat4<F32>, n: Mat4<F32>) -> Mat4<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
gba::error!("Panic: {:?}", info);
Mode3::clear_to(Color::from_rgb(0xFF, 0, 0));
loop {}
}
#[start]
fn main(_argc: isize, _argv: *const *const u8) -> isize {
heap::init();
gba::info!("Starting...");
set_irq_handler(irq_handler);
IME.write(IrqEnableSetting::IRQ_YES);
DISPSTAT.write(DisplayStatusSetting::new()
.with_hblank_irq_enable(true)
.with_vblank_irq_enable(true));
TM2CNT_H.write(TimerControlSetting::new()
.with_tick_rate(TimerTickRate::CPU1024)
.with_enabled(true));
let model = wavefront::Obj::from_lines(include_str!("../data/ship-small.obj").lines()).unwrap();
let mut ship_verts = Vec::new();
let mut ship_tris = Vec::new();
for &p in model.positions() {
ship_verts.push(Vec3::<f32>::from(p).map(num));
}
model
.triangles()
.for_each(|vs| {
let pos = vs.map(|v| Vec3::<f32>::from(v.position()));
let cross = (pos[1] - pos[0]).cross(pos[2] - pos[0]);
ship_tris.push((
(cross / micromath::F32Ext::sqrt(cross.magnitude_squared())).map(num),
vs.map(|v| v.position_index() as u16),
));
});
gba::info!("Model has {} vertices and {} triangles", ship_verts.len(), ship_tris.len());
let mut pos = Vec3::new(0.0, 0.0, 3.0).map(num);
let mut ori = normalize_quat_fast(Quaternion::<F32>::identity());
let mut tick = 0;
let mut last_time = 0;
let mut sum_fps = 0.0;
let mut screen = unsafe { gfx::mode5::init() };
let mut scene = unsafe { gfx::scene::init() };
let mut time_mvp = 0;
let mut time_clear = 0;
let mut time_model = 0;
let mut time_vertices = 0;
let mut time_faces = 0;
let mut time_render = 0;
loop {
let new_time = TM2CNT_L.read();
if tick % 32 == 0 {
if new_time > last_time {
gba::info!("FPS: {}", sum_fps / 32.0);
gba::info!(
"Timings: {{ mvp = {}, clear = {}, model = {}, vertices = {}, faces = {}, render = {} }}",
time_mvp,
time_clear,
time_model,
time_vertices,
time_faces,
time_render,
);
}
sum_fps = 0.0;
}
let fps = (16_780_000.0 / (new_time - last_time) as f32) / 1024.0;
sum_fps += fps;
last_time = new_time;
let dt = num(fps).recip();
// Wait for vblank
IE.write(IrqFlags::new().with_vblank(true));
// bios::vblank_interrupt_wait();
screen.flip();
let keys = read_key_input();
time_mvp = gba::time_this01! {{
ori = normalize_quat_fast(ori
* rotation_3d(
if keys.down() { num(4.0) * dt } else { num(0.0) }
- if keys.up() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_x(),
)
* rotation_3d(
if keys.right() { num(4.0) * dt } else { num(0.0) }
- if keys.left() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_y(),
)
* rotation_3d(
if keys.r() { num(4.0) * dt } else { num(0.0) }
- if keys.l() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_z(),
));
pos += gfx::scene::transform_pos(Mat4::from(ori).transposed(), Vec3::unit_z() * (
if keys.a() { num(0.05) } else { num(0.0) }
- if keys.b() { num(0.05) } else { num(0.0) }
)).xyz();
}};
let mut fb = screen.back();
time_clear = gba::time_this01! {{
fb.clear(Color::from_rgb(1, 3, 4).0);
}};
fn perspective_fov_rh_zo(fov_y_radians: F32, width: F32, height: F32, near: F32, far: F32) -> Mat4<F32> {
let rad = fov_y_radians;
let h = cos_fast(rad * num(0.5)) / sin_fast(rad * num(0.5));
let w = h * height / width;
let m00 = w;
let m11 = h;
let m22 = -(far + near) / (far - near);
let m23 = -(num(2.0) * far * near) / (far - near);
let m32 = -num(1.0);
let mut m = Mat4::new(
m00, num(0.0), num(0.0), num(0.0),
num(0.0), m11, num(0.0), num(0.0),
num(0.0), num(0.0), m22, m23,
num(0.0), num(0.0), m32, num(0.0)
);
m
}
let proj = perspective_fov_rh_zo(num(1.0), num | apply4 | identifier_name |
main.rs | (x: f32) -> f32 {
let y = f32::from_bits(0x5f375a86 - (x.to_bits() >> 1));
y * (1.5 - ( x * 0.5 * y * y ))
}
fn fsqrt(x: f32) -> f32 {
f32::from_bits((x.to_bits() + (127 << 23)) >> 1)
}
let v = q.into_vec4();
(v * F32::from_num(finvsqrt(v.magnitude_squared().to_num::<f32>()))).into()
}
fn cos_fast(mut x: F32) -> F32 {
use core::f32;
x *= num(f32::consts::FRAC_1_PI / 2.0);
x -= num(0.25) + (x + num(0.25)).floor();
x *= num(16.0) * (x.abs() - num(0.5));
x += num(0.225) * x * (x.abs() - num(1.0));
x
}
fn sin_fast(x: F32) -> F32 {
use core::f32;
cos_fast(x - num(f32::consts::PI / 2.0))
}
fn tan_fast(x: F32) -> F32 {
sin_fast(x) / cos_fast(x)
}
fn rotation_3d(angle_radians: F32, axis: Vec3<F32>) -> Quaternion<F32> {
// let axis = axis.normalized();
let Vec3 { x, y, z } = axis * sin_fast(angle_radians * num(0.5));
let w = cos_fast(angle_radians * num(0.5));
Quaternion { x, y, z, w }
}
#[repr(transparent)]
#[derive(Copy, Clone)]
struct NumWrap(F32);
impl core::ops::Mul<NumWrap> for NumWrap {
type Output = NumWrap;
fn mul(self, rhs: Self) -> Self |
}
impl vek::ops::MulAdd<NumWrap, NumWrap> for NumWrap {
type Output = NumWrap;
fn mul_add(self, mul: NumWrap, add: NumWrap) -> NumWrap {
NumWrap(self.0 * mul.0 + add.0)
}
}
fn apply(m: Mat3<F32>, n: Mat3<F32>) -> Mat3<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
fn apply4(m: Mat4<F32>, n: Mat4<F32>) -> Mat4<F32> {
(m.map(NumWrap) * n.map(NumWrap)).map(|e| e.0)
}
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
gba::error!("Panic: {:?}", info);
Mode3::clear_to(Color::from_rgb(0xFF, 0, 0));
loop {}
}
#[start]
fn main(_argc: isize, _argv: *const *const u8) -> isize {
heap::init();
gba::info!("Starting...");
set_irq_handler(irq_handler);
IME.write(IrqEnableSetting::IRQ_YES);
DISPSTAT.write(DisplayStatusSetting::new()
.with_hblank_irq_enable(true)
.with_vblank_irq_enable(true));
TM2CNT_H.write(TimerControlSetting::new()
.with_tick_rate(TimerTickRate::CPU1024)
.with_enabled(true));
let model = wavefront::Obj::from_lines(include_str!("../data/ship-small.obj").lines()).unwrap();
let mut ship_verts = Vec::new();
let mut ship_tris = Vec::new();
for &p in model.positions() {
ship_verts.push(Vec3::<f32>::from(p).map(num));
}
model
.triangles()
.for_each(|vs| {
let pos = vs.map(|v| Vec3::<f32>::from(v.position()));
let cross = (pos[1] - pos[0]).cross(pos[2] - pos[0]);
ship_tris.push((
(cross / micromath::F32Ext::sqrt(cross.magnitude_squared())).map(num),
vs.map(|v| v.position_index() as u16),
));
});
gba::info!("Model has {} vertices and {} triangles", ship_verts.len(), ship_tris.len());
let mut pos = Vec3::new(0.0, 0.0, 3.0).map(num);
let mut ori = normalize_quat_fast(Quaternion::<F32>::identity());
let mut tick = 0;
let mut last_time = 0;
let mut sum_fps = 0.0;
let mut screen = unsafe { gfx::mode5::init() };
let mut scene = unsafe { gfx::scene::init() };
let mut time_mvp = 0;
let mut time_clear = 0;
let mut time_model = 0;
let mut time_vertices = 0;
let mut time_faces = 0;
let mut time_render = 0;
loop {
let new_time = TM2CNT_L.read();
if tick % 32 == 0 {
if new_time > last_time {
gba::info!("FPS: {}", sum_fps / 32.0);
gba::info!(
"Timings: {{ mvp = {}, clear = {}, model = {}, vertices = {}, faces = {}, render = {} }}",
time_mvp,
time_clear,
time_model,
time_vertices,
time_faces,
time_render,
);
}
sum_fps = 0.0;
}
let fps = (16_780_000.0 / (new_time - last_time) as f32) / 1024.0;
sum_fps += fps;
last_time = new_time;
let dt = num(fps).recip();
// Wait for vblank
IE.write(IrqFlags::new().with_vblank(true));
// bios::vblank_interrupt_wait();
screen.flip();
let keys = read_key_input();
time_mvp = gba::time_this01! {{
ori = normalize_quat_fast(ori
* rotation_3d(
if keys.down() { num(4.0) * dt } else { num(0.0) }
- if keys.up() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_x(),
)
* rotation_3d(
if keys.right() { num(4.0) * dt } else { num(0.0) }
- if keys.left() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_y(),
)
* rotation_3d(
if keys.r() { num(4.0) * dt } else { num(0.0) }
- if keys.l() { num(4.0) * dt } else { num(0.0) },
Vec3::unit_z(),
));
pos += gfx::scene::transform_pos(Mat4::from(ori).transposed(), Vec3::unit_z() * (
if keys.a() { num(0.05) } else { num(0.0) }
- if keys.b() { num(0.05) } else { num(0.0) }
)).xyz();
}};
let mut fb = screen.back();
time_clear = gba::time_this01! {{
fb.clear(Color::from_rgb(1, 3, 4).0);
}};
fn perspective_fov_rh_zo(fov_y_radians: F32, width: F32, height: F32, near: F32, far: F32) -> Mat4<F32> {
let rad = fov_y_radians;
let h = cos_fast(rad * num(0.5)) / sin_fast(rad * num(0.5));
let w = h * height / width;
let m00 = w;
let m11 = h;
let m22 = -(far + near) / (far - near);
let m23 = -(num(2.0) * far * near) / (far - near);
let m32 = -num(1.0);
let mut m = Mat4::new(
m00, num(0.0), num(0.0), num(0.0),
num(0.0), m11, num(0.0), num(0.0),
num(0.0), num(0.0), m22, m23,
num(0.0), num(0.0), m32, num(0.0)
);
m
}
let proj = perspective_fov_rh_zo(num(1.0), | { NumWrap(self.0 * rhs.0) } | identifier_body |
Assign_LLID_Toxics_DO_unverified_LASAR_Stations.py | /WQ_2010_IntegratedReport_V3/WQ_2010_IntegratedReport_V3/Assessment.gdb/DEQ_Streams_25APR2013"
station_river_name_field = "LOCATION_D"
streams_river_name_field = "NAME"
rid = "LLID"
search_radius = 12000
output_table = "E:/GitHub/ToxicsRedo/StationsToLocate/FinalList/assign_llid_temp.gdb/out1"
output_success = "out_success"
output_fail = "out_fail"
qc_lyr = "qc_lyr"
qc_success = "qc_success"
qc_review = "qc_needs_review"
outside_threshold = "outside_threshold"
properties = "RID POINT MEAS"
# # Subset the 57 new lasar stations from master lasar station shapefile.
# # Four of these were not in the shapefile, and had to be converted manually.
# ls_df = pd.read_csv(r'E:\GitHub\ToxicsRedo\StationsToLocate\Post_ToxicsRedo_Stations\toxics_do_unverified.csv', header=0)
# ls_keys = ls_df['STATION'].values
#
# in_feature = "//Deqlead03/gis_wa/Project_Working_Folders/LASAR_Stations/LASAR_Stations/LASAR_Stations_26sept13.shp"
# out_feature = "E:/GitHub/ToxicsRedo/StationsToLocate/Post_ToxicsRedo_Stations/toxics_do_unverified.shp"
# lstations = "lstations"
# query = """ "STATION_KE" in """ + "(" + ', '.join([str(i) for i in ls_keys]) +")"
#
# arcpy.MakeFeatureLayer_management(in_feature, lstations)
# arcpy.SelectLayerByAttribute_management(lstations, "NEW_SELECTION", query)
# arcpy.GetCount_management(lstations).getOutput(0)
# arcpy.CopyFeatures_management(lstations, out_feature)
# Check to see if a temp geodatabase exists. If not, create it.
if os.path.exists(temp_location + temp_gdb):
print "It exist!"
else:
arcpy.CreateFileGDB_management(temp_location, temp_gdb)
if os.path.exists((temp_location + final_gdb)):
print "It exist!"
else:
arcpy.CreateFileGDB_management(temp_location, final_gdb)
arcpy.env.workspace = workspace
arcpy.CopyFeatures_management(original_sampling_stations, sampling_stations)
arcpy.AddField_management(sampling_stations, "Unique_ID", "DOUBLE")
arcpy.CalculateField_management(sampling_stations, "Unique_ID", "!OBJECTID!", "PYTHON")
nrow = arcpy.GetCount_management(sampling_stations)
# Execute LocateFeaturesAlongRoutes
arcpy.LocateFeaturesAlongRoutes_lr(sampling_stations, stream_network, rid, search_radius, output_table,
properties)
successful_features = arcpy.da.TableToNumPyArray(output_table, 'Unique_ID')['Unique_ID']
#Add QC fields to table
arcpy.AddField_management(output_table, "QAQC1", "STRING")
arcpy.AddField_management(output_table, "QAQC2", "STRING")
#Now, begin primary qc by using character matching to verify that successful rows have matching stream names.
stream_names_from_deq_streams = arcpy.da.TableToNumPyArray(stream_network, ['LLID', streams_river_name_field])[['LLID', streams_river_name_field]]
with arcpy.da.UpdateCursor(output_table, [station_river_name_field,'RID','QAQC1', 'QAQC2']) as cursor:
for row in cursor:
deq_streams = stream_names_from_deq_streams[streams_river_name_field][numpy.nonzero(stream_names_from_deq_streams['LLID'] == row[1])][0]
if row[0].replace(" ", "").lower() == deq_streams.replace(" ", "").lower():
row[2] = 'Reviewed'
row[3] = 'Not Required'
else:
row[2] = 'Needs Secondary Review'
cursor.updateRow(row)
#Create a 'success' fc and a 'fail' fc
#First, copy the original station fc to new fcs. One for success, one for failure.
arcpy.CopyFeatures_management(sampling_stations, output_success)
arcpy.CopyFeatures_management(sampling_stations, output_fail)
#Then, use cursors to remove failed rows from the success fc
with arcpy.da.UpdateCursor(output_success, "Unique_ID") as cursor:
for row in cursor:
if row not in successful_features:
cursor.deleteRow()
#And remove successful rows from the fail fc
with arcpy.da.UpdateCursor(output_fail, "Unique_ID") as cursor:
for row in cursor:
if row in successful_features:
cursor.deleteRow()
#Note: With a large enough search radius the fail fc will be empty.
#Remove all fields from the success fc except the Unique_ID so it can be merged with the output table
#Note: I'm not sure what would happen here if the success fc is empty. I suspect it would throw an exception.
# If this happens, increase the search radius.
fieldList = arcpy.ListFields(output_success)
fields_to_drop = []
for field in fieldList:
|
arcpy.DeleteField_management(output_success, fields_to_drop)
#Merge with output table
arcpy.JoinField_management(output_success, 'Unique_ID', output_table, 'Unique_ID')
#Now split success fc into one fc with successful qc and one with stations needing review
arcpy.MakeFeatureLayer_management(output_success, qc_lyr)
arcpy.SelectLayerByAttribute_management(qc_lyr, "NEW_SELECTION", """ "QAQC1" = 'Reviewed' """)
if int(arcpy.GetCount_management(qc_lyr).getOutput(0)) == len(successful_features):
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_success))
elif int(arcpy.GetCount_management(qc_lyr).getOutput(0)) == 0:
arcpy.CopyFeatures_management(output_success, (temp_location + final_gdb + "/" + qc_review))
elif int(arcpy.GetCount_management(qc_lyr).getOutput(0)) < len(successful_features) and int(arcpy.GetCount_management(qc_lyr).getOutput(0)) > 0:
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_success))
arcpy.SelectLayerByAttribute_management(qc_lyr, "NEW_SELECTION", """ "QAQC1" = 'Needs Secondary Review' """)
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_review))
arcpy.CopyFeatures_management(output_fail, (temp_location + final_gdb + '/' + outside_threshold))
arcpy.SelectLayerByAttribute_management(qc_lyr, "CLEAR_SELECTION")
#Once this process is complete, add attribute information.
fc_original = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review'
fc_copy = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review_copy'
arcpy.CopyFeatures_management(fc_original, fc_copy)
arcpy.AddField_management(fc_copy, 'RIVER_MILE', 'DOUBLE')
arcpy.CalculateField_management(fc_copy, 'RIVER_MILE', '!MEAS!/5280', "PYTHON_9.3")
#Spatially join HUC 3 and 4 field
huc3 = 'F:/Base_Data/Hydrography/NHD/NHDH_OR_931v210/NHDH_OR.gdb/WBD/WBD_HU6'
huc4 = 'F:/Base_Data/Hydrography/NHD/NHDH_OR_931v210/NHDH_OR.gdb/WBD/WBD_HU8'
in_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review_copy'
out_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc3'
arcpy.SpatialJoin_analysis(in_file, huc3, out_file)
in_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc3'
out_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc4'
arcpy.SpatialJoin_analysis(in_file, huc4, out_file)
#Copy fc and remove Unwanted fields so fc is ready to merge with 2010 stations
stations2010_formatting = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations | if field.name not in ['Unique_ID', 'Shape','OBJECTID']:
fields_to_drop.append(field.name) | conditional_block |
Assign_LLID_Toxics_DO_unverified_LASAR_Stations.py | /WQ_2010_IntegratedReport_V3/WQ_2010_IntegratedReport_V3/Assessment.gdb/DEQ_Streams_25APR2013"
station_river_name_field = "LOCATION_D"
streams_river_name_field = "NAME"
rid = "LLID"
search_radius = 12000
output_table = "E:/GitHub/ToxicsRedo/StationsToLocate/FinalList/assign_llid_temp.gdb/out1"
output_success = "out_success"
output_fail = "out_fail"
qc_lyr = "qc_lyr"
qc_success = "qc_success"
qc_review = "qc_needs_review"
outside_threshold = "outside_threshold"
properties = "RID POINT MEAS"
# # Subset the 57 new lasar stations from master lasar station shapefile.
# # Four of these were not in the shapefile, and had to be converted manually.
# ls_df = pd.read_csv(r'E:\GitHub\ToxicsRedo\StationsToLocate\Post_ToxicsRedo_Stations\toxics_do_unverified.csv', header=0)
# ls_keys = ls_df['STATION'].values
#
# in_feature = "//Deqlead03/gis_wa/Project_Working_Folders/LASAR_Stations/LASAR_Stations/LASAR_Stations_26sept13.shp"
# out_feature = "E:/GitHub/ToxicsRedo/StationsToLocate/Post_ToxicsRedo_Stations/toxics_do_unverified.shp"
# lstations = "lstations"
# query = """ "STATION_KE" in """ + "(" + ', '.join([str(i) for i in ls_keys]) +")"
#
# arcpy.MakeFeatureLayer_management(in_feature, lstations)
# arcpy.SelectLayerByAttribute_management(lstations, "NEW_SELECTION", query)
# arcpy.GetCount_management(lstations).getOutput(0)
# arcpy.CopyFeatures_management(lstations, out_feature)
# Check to see if a temp geodatabase exists. If not, create it.
if os.path.exists(temp_location + temp_gdb):
print "It exist!"
else:
arcpy.CreateFileGDB_management(temp_location, temp_gdb)
if os.path.exists((temp_location + final_gdb)): | else:
arcpy.CreateFileGDB_management(temp_location, final_gdb)
arcpy.env.workspace = workspace
arcpy.CopyFeatures_management(original_sampling_stations, sampling_stations)
arcpy.AddField_management(sampling_stations, "Unique_ID", "DOUBLE")
arcpy.CalculateField_management(sampling_stations, "Unique_ID", "!OBJECTID!", "PYTHON")
nrow = arcpy.GetCount_management(sampling_stations)
# Execute LocateFeaturesAlongRoutes
arcpy.LocateFeaturesAlongRoutes_lr(sampling_stations, stream_network, rid, search_radius, output_table,
properties)
successful_features = arcpy.da.TableToNumPyArray(output_table, 'Unique_ID')['Unique_ID']
#Add QC fields to table
arcpy.AddField_management(output_table, "QAQC1", "STRING")
arcpy.AddField_management(output_table, "QAQC2", "STRING")
#Now, begin primary qc by using character matching to verify that successful rows have matching stream names.
stream_names_from_deq_streams = arcpy.da.TableToNumPyArray(stream_network, ['LLID', streams_river_name_field])[['LLID', streams_river_name_field]]
with arcpy.da.UpdateCursor(output_table, [station_river_name_field,'RID','QAQC1', 'QAQC2']) as cursor:
for row in cursor:
deq_streams = stream_names_from_deq_streams[streams_river_name_field][numpy.nonzero(stream_names_from_deq_streams['LLID'] == row[1])][0]
if row[0].replace(" ", "").lower() == deq_streams.replace(" ", "").lower():
row[2] = 'Reviewed'
row[3] = 'Not Required'
else:
row[2] = 'Needs Secondary Review'
cursor.updateRow(row)
#Create a 'success' fc and a 'fail' fc
#First, copy the original station fc to new fcs. One for success, one for failure.
arcpy.CopyFeatures_management(sampling_stations, output_success)
arcpy.CopyFeatures_management(sampling_stations, output_fail)
#Then, use cursors to remove failed rows from the success fc
with arcpy.da.UpdateCursor(output_success, "Unique_ID") as cursor:
for row in cursor:
if row not in successful_features:
cursor.deleteRow()
#And remove successful rows from the fail fc
with arcpy.da.UpdateCursor(output_fail, "Unique_ID") as cursor:
for row in cursor:
if row in successful_features:
cursor.deleteRow()
#Note: With a large enough search radius the fail fc will be empty.
#Remove all fields from the success fc except the Unique_ID so it can be merged with the output table
#Note: I'm not sure what would happen here if the success fc is empty. I suspect it would throw an exception.
# If this happens, increase the search radius.
fieldList = arcpy.ListFields(output_success)
fields_to_drop = []
for field in fieldList:
if field.name not in ['Unique_ID', 'Shape','OBJECTID']:
fields_to_drop.append(field.name)
arcpy.DeleteField_management(output_success, fields_to_drop)
#Merge with output table
arcpy.JoinField_management(output_success, 'Unique_ID', output_table, 'Unique_ID')
#Now split success fc into one fc with successful qc and one with stations needing review
arcpy.MakeFeatureLayer_management(output_success, qc_lyr)
arcpy.SelectLayerByAttribute_management(qc_lyr, "NEW_SELECTION", """ "QAQC1" = 'Reviewed' """)
if int(arcpy.GetCount_management(qc_lyr).getOutput(0)) == len(successful_features):
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_success))
elif int(arcpy.GetCount_management(qc_lyr).getOutput(0)) == 0:
arcpy.CopyFeatures_management(output_success, (temp_location + final_gdb + "/" + qc_review))
elif int(arcpy.GetCount_management(qc_lyr).getOutput(0)) < len(successful_features) and int(arcpy.GetCount_management(qc_lyr).getOutput(0)) > 0:
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_success))
arcpy.SelectLayerByAttribute_management(qc_lyr, "NEW_SELECTION", """ "QAQC1" = 'Needs Secondary Review' """)
arcpy.CopyFeatures_management(qc_lyr, (temp_location + final_gdb + "/" + qc_review))
arcpy.CopyFeatures_management(output_fail, (temp_location + final_gdb + '/' + outside_threshold))
arcpy.SelectLayerByAttribute_management(qc_lyr, "CLEAR_SELECTION")
#Once this process is complete, add attribute information.
fc_original = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review'
fc_copy = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review_copy'
arcpy.CopyFeatures_management(fc_original, fc_copy)
arcpy.AddField_management(fc_copy, 'RIVER_MILE', 'DOUBLE')
arcpy.CalculateField_management(fc_copy, 'RIVER_MILE', '!MEAS!/5280', "PYTHON_9.3")
#Spatially join HUC 3 and 4 field
huc3 = 'F:/Base_Data/Hydrography/NHD/NHDH_OR_931v210/NHDH_OR.gdb/WBD/WBD_HU6'
huc4 = 'F:/Base_Data/Hydrography/NHD/NHDH_OR_931v210/NHDH_OR.gdb/WBD/WBD_HU8'
in_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\qc_needs_review_copy'
out_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc3'
arcpy.SpatialJoin_analysis(in_file, huc3, out_file)
in_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc3'
out_file = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_huc4'
arcpy.SpatialJoin_analysis(in_file, huc4, out_file)
#Copy fc and remove Unwanted fields so fc is ready to merge with 2010 stations
stations2010_formatting = r'E:\GitHub\ToxicsRedo\StationsToLocate\FinalList\Toxics_do_post_toxicsRedo_Stations_Edits.gdb\post_redo_stations_final | print "It exist!" | random_line_split |
main.rs | bool,
#[structopt(long = "manifest-path", value_name = "PATH", parse(from_os_str))]
/// Path to Cargo.toml
manifest_path: Option<PathBuf>,
#[structopt(long = "invert", short = "i")]
/// Invert the tree direction
invert: bool,
#[structopt(long = "no-indent")]
/// Display the dependencies as a list (rather than a tree)
no_indent: bool,
#[structopt(long = "prefix-depth")]
/// Display the dependencies as a list (rather than a tree), but prefixed with the depth
prefix_depth: bool,
#[structopt(long = "all", short = "a")]
/// Don't truncate dependencies that have already been displayed
all: bool,
#[structopt(long = "duplicate", short = "d")]
/// Show only dependencies which come in multiple versions (implies -i)
duplicates: bool,
#[structopt(long = "charset", value_name = "CHARSET", default_value = "utf8")]
/// Character set to use in output: utf8, ascii
charset: Charset,
#[structopt(
long = "format",
short = "f",
value_name = "FORMAT",
default_value = "{p}"
)]
/// Format string used for printing dependencies
format: String,
#[structopt(long = "verbose", short = "v", parse(from_occurrences))]
/// Use verbose output (-vv very verbose/build.rs output)
verbose: u32,
#[structopt(long = "quiet", short = "q")]
/// No output printed to stdout other than the tree
quiet: Option<bool>,
#[structopt(long = "color", value_name = "WHEN")]
/// Coloring: auto, always, never
color: Option<String>,
#[structopt(long = "frozen")]
/// Require Cargo.lock and cache are up to date
frozen: bool,
#[structopt(long = "locked")]
/// Require Cargo.lock is up to date
locked: bool,
#[structopt(short = "Z", value_name = "FLAG")]
/// Unstable (nightly-only) flags to Cargo
unstable_flags: Vec<String>,
}
enum Charset {
Utf8,
Ascii,
}
#[derive(Clone, Copy)]
enum Prefix {
None,
Indent,
Depth,
}
impl FromStr for Charset {
type Err = &'static str;
fn from_str(s: &str) -> Result<Charset, &'static str> {
match s {
"utf8" => Ok(Charset::Utf8),
"ascii" => Ok(Charset::Ascii),
_ => Err("invalid charset"),
}
}
}
struct Symbols {
down: &'static str,
tee: &'static str,
ell: &'static str,
right: &'static str,
}
static UTF8_SYMBOLS: Symbols = Symbols {
down: "│",
tee: "├",
ell: "└",
right: "─",
};
static ASCII_SYMBOLS: Symbols = Symbols {
down: "|",
tee: "|",
ell: "`",
right: "-",
};
fn main() {
env_logger::init();
let mut config = match Config::default() {
Ok(cfg) => cfg,
Err(e) => {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
};
let Opts::Tree(args) = Opts::from_args();
if let Err(e) = real_main(args, &mut config) {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
}
fn real_main(args: Args, config: &mut Config) -> CliResult {
config.configure(
args.verbose,
args.quiet,
&args.color,
args.frozen,
args.locked,
&args.target_dir,
&args.unstable_flags,
)?;
let workspace = workspace(config, args.manifest_path)?;
let package = workspace.current()?;
let mut registry = registry(config, &package)?;
let (packages, resolve) = resolve(
&mut registry,
&workspace,
args.features,
args.all_features,
args.no_default_features,
args.no_dev_dependencies,
)?;
let ids = packages.package_ids().collect::<Vec<_>>();
let packages = registry.get(&ids)?;
let root = match args.package {
Some(ref pkg) => resolve.query(pkg)?,
None => package.package_id(),
};
let rustc = config.rustc(Some(&workspace))?;
let target = if args.all_targets {
None
} else {
Some(args.target.as_ref().unwrap_or(&rustc.host).as_str())
};
let format = Pattern::new(&args.format).map_err(|e| failure::err_msg(e.to_string()))?;
let cfgs = get_cfgs(&rustc, &args.target)?;
let graph = build_graph(
&resolve,
&packages,
package.package_id(),
target,
cfgs.as_ref().map(|r| &**r),
)?;
let direction = if args.invert || args.duplicates {
EdgeDirection::Incoming
} else {
EdgeDirection::Outgoing
};
let symbols = match args.charset {
Charset::Ascii => &ASCII_SYMBOLS,
Charset::Utf8 => &UTF8_SYMBOLS,
};
let prefix = if args.prefix_depth {
Prefix::Depth
} else if args.no_indent {
Prefix::None
} else {
Prefix::Indent
};
if args.duplicates {
let dups = find_duplicates(&graph);
for dup in &dups {
print_tree(dup, &graph, &format, direction, symbols, prefix, args.all)?;
println!();
}
} else {
print_tree(&root, &graph, &format, direction, symbols, prefix, args.all)?;
}
Ok(())
}
fn find_duplicates<'a>(graph: &Graph<'a>) -> Vec<PackageId> {
let mut counts = HashMap::new();
// Count by name only. Source and version are irrelevant here.
for package in graph.nodes.keys() {
*counts.entry(package.name()).or_insert(0) += 1;
}
// Theoretically inefficient, but in practice we're only listing duplicates and
// there won't be enough dependencies for it to matter.
let mut dup_ids = Vec::new();
for name in counts.drain().filter(|&(_, v)| v > 1).map(|(k, _)| k) {
dup_ids.extend(graph.nodes.keys().filter(|p| p.name() == name));
}
dup_ids.sort();
dup_ids
}
fn get_cfgs(rustc: &Rustc, target: &Option<String>) -> CargoResult<Option<Vec<Cfg>>> {
let mut process = util::process(&rustc.path);
process.arg("--print=cfg").env_remove("RUST_LOG");
if let Some(ref s) = *target {
process.arg("--target").arg(s);
}
let output = match process.exec_with_output() {
Ok(output) => output,
Err(e) => return Err(e),
};
let output = str::from_utf8(&output.stdout).unwrap();
let lines = output.lines();
Ok(Some(
lines.map(Cfg::from_str).collect::<CargoResult<Vec<_>>>()?,
))
}
fn workspace(config: &Config, manifest_path: Option<PathBuf>) -> CargoResult<Workspace<'_>> {
let root = match manifest_path {
Some(path) => path,
None => important_paths::find_root_manifest_for_wd(config.cwd())?,
};
Workspace::new(&root, config)
}
fn registry<'a>(config: &'a Config, package: &Package) -> CargoResult<PackageRegistry<'a>> {
let mut registry = PackageRegistry::new(config)?;
registry.add_sources(Some(package.package_id().source_id().clone()))?;
Ok(registry)
}
fn resolve<'a, 'cfg>(
registry: &mut PackageRegistry<'cfg>,
workspace: &'a Workspace<'cfg>,
features: Option<String>,
all_features: bool,
no_default_features: bool,
no_dev_dependencies: bool,
) -> CargoResult<(PackageSet<'a>, Resolve)> {
let features = Method::split_features(&features.into_iter().collect::<Vec<_>>());
let (packages, resolve) = ops::resolve_ws(workspace)?;
let method = Method::Required {
dev_deps: !no_dev_dependencies,
features: &features,
all_features,
uses_default_features: !no_default_features,
};
let resolve = ops::resolve_with_previous(
registry,
workspace,
method,
Some(&resolve),
None,
&[],
true,
true,
)?;
Ok((packages, resolve))
}
struct Node<'a> {
id: PackageId,
metadata: &'a ManifestMetadata,
}
struct Graph<'a | graph: petgraph::Graph<Node<'a>, Kind>,
nodes: HashMap<PackageId, NodeIndex>,
}
fn build_graph<'a>(
resolve: &'a Resolve,
packages: &'a PackageSet<'_>,
root: PackageId,
target: Option<&str>,
cfgs: Option<&[Cfg]>,
) -> CargoResult<Graph<'a>> {
let mut graph = Graph {
graph: petgraph:: | > {
| identifier_name |
main.rs | ,
#[structopt(long = "manifest-path", value_name = "PATH", parse(from_os_str))]
/// Path to Cargo.toml
manifest_path: Option<PathBuf>,
#[structopt(long = "invert", short = "i")]
/// Invert the tree direction
invert: bool,
#[structopt(long = "no-indent")]
/// Display the dependencies as a list (rather than a tree)
no_indent: bool,
#[structopt(long = "prefix-depth")]
/// Display the dependencies as a list (rather than a tree), but prefixed with the depth
prefix_depth: bool,
#[structopt(long = "all", short = "a")]
/// Don't truncate dependencies that have already been displayed
all: bool,
#[structopt(long = "duplicate", short = "d")]
/// Show only dependencies which come in multiple versions (implies -i)
duplicates: bool,
#[structopt(long = "charset", value_name = "CHARSET", default_value = "utf8")]
/// Character set to use in output: utf8, ascii
charset: Charset,
#[structopt(
long = "format",
short = "f",
value_name = "FORMAT",
default_value = "{p}"
)]
/// Format string used for printing dependencies
format: String,
#[structopt(long = "verbose", short = "v", parse(from_occurrences))]
/// Use verbose output (-vv very verbose/build.rs output)
verbose: u32,
#[structopt(long = "quiet", short = "q")]
/// No output printed to stdout other than the tree
quiet: Option<bool>,
#[structopt(long = "color", value_name = "WHEN")]
/// Coloring: auto, always, never
color: Option<String>,
#[structopt(long = "frozen")]
/// Require Cargo.lock and cache are up to date
frozen: bool,
#[structopt(long = "locked")]
/// Require Cargo.lock is up to date
locked: bool,
#[structopt(short = "Z", value_name = "FLAG")]
/// Unstable (nightly-only) flags to Cargo
unstable_flags: Vec<String>,
}
enum Charset {
Utf8,
Ascii,
}
#[derive(Clone, Copy)]
enum Prefix {
None,
Indent,
Depth,
}
impl FromStr for Charset {
type Err = &'static str;
fn from_str(s: &str) -> Result<Charset, &'static str> {
match s {
"utf8" => Ok(Charset::Utf8),
"ascii" => Ok(Charset::Ascii),
_ => Err("invalid charset"),
}
}
}
struct Symbols {
down: &'static str,
tee: &'static str,
ell: &'static str,
right: &'static str,
}
static UTF8_SYMBOLS: Symbols = Symbols {
down: "│",
tee: "├",
ell: "└",
right: "─",
};
static ASCII_SYMBOLS: Symbols = Symbols {
down: "|",
tee: "|",
ell: "`",
right: "-",
};
fn main() {
en | l_main(args: Args, config: &mut Config) -> CliResult {
config.configure(
args.verbose,
args.quiet,
&args.color,
args.frozen,
args.locked,
&args.target_dir,
&args.unstable_flags,
)?;
let workspace = workspace(config, args.manifest_path)?;
let package = workspace.current()?;
let mut registry = registry(config, &package)?;
let (packages, resolve) = resolve(
&mut registry,
&workspace,
args.features,
args.all_features,
args.no_default_features,
args.no_dev_dependencies,
)?;
let ids = packages.package_ids().collect::<Vec<_>>();
let packages = registry.get(&ids)?;
let root = match args.package {
Some(ref pkg) => resolve.query(pkg)?,
None => package.package_id(),
};
let rustc = config.rustc(Some(&workspace))?;
let target = if args.all_targets {
None
} else {
Some(args.target.as_ref().unwrap_or(&rustc.host).as_str())
};
let format = Pattern::new(&args.format).map_err(|e| failure::err_msg(e.to_string()))?;
let cfgs = get_cfgs(&rustc, &args.target)?;
let graph = build_graph(
&resolve,
&packages,
package.package_id(),
target,
cfgs.as_ref().map(|r| &**r),
)?;
let direction = if args.invert || args.duplicates {
EdgeDirection::Incoming
} else {
EdgeDirection::Outgoing
};
let symbols = match args.charset {
Charset::Ascii => &ASCII_SYMBOLS,
Charset::Utf8 => &UTF8_SYMBOLS,
};
let prefix = if args.prefix_depth {
Prefix::Depth
} else if args.no_indent {
Prefix::None
} else {
Prefix::Indent
};
if args.duplicates {
let dups = find_duplicates(&graph);
for dup in &dups {
print_tree(dup, &graph, &format, direction, symbols, prefix, args.all)?;
println!();
}
} else {
print_tree(&root, &graph, &format, direction, symbols, prefix, args.all)?;
}
Ok(())
}
fn find_duplicates<'a>(graph: &Graph<'a>) -> Vec<PackageId> {
let mut counts = HashMap::new();
// Count by name only. Source and version are irrelevant here.
for package in graph.nodes.keys() {
*counts.entry(package.name()).or_insert(0) += 1;
}
// Theoretically inefficient, but in practice we're only listing duplicates and
// there won't be enough dependencies for it to matter.
let mut dup_ids = Vec::new();
for name in counts.drain().filter(|&(_, v)| v > 1).map(|(k, _)| k) {
dup_ids.extend(graph.nodes.keys().filter(|p| p.name() == name));
}
dup_ids.sort();
dup_ids
}
fn get_cfgs(rustc: &Rustc, target: &Option<String>) -> CargoResult<Option<Vec<Cfg>>> {
let mut process = util::process(&rustc.path);
process.arg("--print=cfg").env_remove("RUST_LOG");
if let Some(ref s) = *target {
process.arg("--target").arg(s);
}
let output = match process.exec_with_output() {
Ok(output) => output,
Err(e) => return Err(e),
};
let output = str::from_utf8(&output.stdout).unwrap();
let lines = output.lines();
Ok(Some(
lines.map(Cfg::from_str).collect::<CargoResult<Vec<_>>>()?,
))
}
fn workspace(config: &Config, manifest_path: Option<PathBuf>) -> CargoResult<Workspace<'_>> {
let root = match manifest_path {
Some(path) => path,
None => important_paths::find_root_manifest_for_wd(config.cwd())?,
};
Workspace::new(&root, config)
}
fn registry<'a>(config: &'a Config, package: &Package) -> CargoResult<PackageRegistry<'a>> {
let mut registry = PackageRegistry::new(config)?;
registry.add_sources(Some(package.package_id().source_id().clone()))?;
Ok(registry)
}
fn resolve<'a, 'cfg>(
registry: &mut PackageRegistry<'cfg>,
workspace: &'a Workspace<'cfg>,
features: Option<String>,
all_features: bool,
no_default_features: bool,
no_dev_dependencies: bool,
) -> CargoResult<(PackageSet<'a>, Resolve)> {
let features = Method::split_features(&features.into_iter().collect::<Vec<_>>());
let (packages, resolve) = ops::resolve_ws(workspace)?;
let method = Method::Required {
dev_deps: !no_dev_dependencies,
features: &features,
all_features,
uses_default_features: !no_default_features,
};
let resolve = ops::resolve_with_previous(
registry,
workspace,
method,
Some(&resolve),
None,
&[],
true,
true,
)?;
Ok((packages, resolve))
}
struct Node<'a> {
id: PackageId,
metadata: &'a ManifestMetadata,
}
struct Graph<'a> {
graph: petgraph::Graph<Node<'a>, Kind>,
nodes: HashMap<PackageId, NodeIndex>,
}
fn build_graph<'a>(
resolve: &'a Resolve,
packages: &'a PackageSet<'_>,
root: PackageId,
target: Option<&str>,
cfgs: Option<&[Cfg]>,
) -> CargoResult<Graph<'a>> {
let mut graph = Graph {
graph: petgraph | v_logger::init();
let mut config = match Config::default() {
Ok(cfg) => cfg,
Err(e) => {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
};
let Opts::Tree(args) = Opts::from_args();
if let Err(e) = real_main(args, &mut config) {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
}
fn rea | identifier_body |
main.rs | value_name = "CHARSET", default_value = "utf8")]
/// Character set to use in output: utf8, ascii
charset: Charset,
#[structopt(
long = "format",
short = "f",
value_name = "FORMAT",
default_value = "{p}"
)]
/// Format string used for printing dependencies
format: String,
#[structopt(long = "verbose", short = "v", parse(from_occurrences))]
/// Use verbose output (-vv very verbose/build.rs output)
verbose: u32,
#[structopt(long = "quiet", short = "q")]
/// No output printed to stdout other than the tree
quiet: Option<bool>,
#[structopt(long = "color", value_name = "WHEN")]
/// Coloring: auto, always, never
color: Option<String>,
#[structopt(long = "frozen")]
/// Require Cargo.lock and cache are up to date
frozen: bool,
#[structopt(long = "locked")]
/// Require Cargo.lock is up to date
locked: bool,
#[structopt(short = "Z", value_name = "FLAG")]
/// Unstable (nightly-only) flags to Cargo
unstable_flags: Vec<String>,
}
enum Charset {
Utf8,
Ascii,
}
#[derive(Clone, Copy)]
enum Prefix {
None,
Indent,
Depth,
}
impl FromStr for Charset {
type Err = &'static str;
fn from_str(s: &str) -> Result<Charset, &'static str> {
match s {
"utf8" => Ok(Charset::Utf8),
"ascii" => Ok(Charset::Ascii),
_ => Err("invalid charset"),
}
}
}
struct Symbols {
down: &'static str,
tee: &'static str,
ell: &'static str,
right: &'static str,
}
static UTF8_SYMBOLS: Symbols = Symbols {
down: "│",
tee: "├",
ell: "└",
right: "─",
};
static ASCII_SYMBOLS: Symbols = Symbols {
down: "|",
tee: "|",
ell: "`",
right: "-",
};
fn main() {
env_logger::init();
let mut config = match Config::default() {
Ok(cfg) => cfg,
Err(e) => {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
};
let Opts::Tree(args) = Opts::from_args();
if let Err(e) = real_main(args, &mut config) {
let mut shell = Shell::new();
cargo::exit_with_error(e.into(), &mut shell)
}
}
fn real_main(args: Args, config: &mut Config) -> CliResult {
config.configure(
args.verbose,
args.quiet,
&args.color,
args.frozen,
args.locked,
&args.target_dir,
&args.unstable_flags,
)?;
let workspace = workspace(config, args.manifest_path)?;
let package = workspace.current()?;
let mut registry = registry(config, &package)?;
let (packages, resolve) = resolve(
&mut registry,
&workspace,
args.features,
args.all_features,
args.no_default_features,
args.no_dev_dependencies,
)?;
let ids = packages.package_ids().collect::<Vec<_>>();
let packages = registry.get(&ids)?;
let root = match args.package {
Some(ref pkg) => resolve.query(pkg)?,
None => package.package_id(),
};
let rustc = config.rustc(Some(&workspace))?;
let target = if args.all_targets {
None
} else {
Some(args.target.as_ref().unwrap_or(&rustc.host).as_str())
};
let format = Pattern::new(&args.format).map_err(|e| failure::err_msg(e.to_string()))?;
let cfgs = get_cfgs(&rustc, &args.target)?;
let graph = build_graph(
&resolve,
&packages,
package.package_id(),
target,
cfgs.as_ref().map(|r| &**r),
)?;
let direction = if args.invert || args.duplicates {
EdgeDirection::Incoming
} else {
EdgeDirection::Outgoing
};
let symbols = match args.charset {
Charset::Ascii => &ASCII_SYMBOLS,
Charset::Utf8 => &UTF8_SYMBOLS,
};
let prefix = if args.prefix_depth {
Prefix::Depth
} else if args.no_indent {
Prefix::None
} else {
Prefix::Indent
};
if args.duplicates {
let dups = find_duplicates(&graph);
for dup in &dups {
print_tree(dup, &graph, &format, direction, symbols, prefix, args.all)?;
println!();
}
} else {
print_tree(&root, &graph, &format, direction, symbols, prefix, args.all)?;
}
Ok(())
}
fn find_duplicates<'a>(graph: &Graph<'a>) -> Vec<PackageId> {
let mut counts = HashMap::new();
// Count by name only. Source and version are irrelevant here.
for package in graph.nodes.keys() {
*counts.entry(package.name()).or_insert(0) += 1;
}
// Theoretically inefficient, but in practice we're only listing duplicates and
// there won't be enough dependencies for it to matter.
let mut dup_ids = Vec::new();
for name in counts.drain().filter(|&(_, v)| v > 1).map(|(k, _)| k) {
dup_ids.extend(graph.nodes.keys().filter(|p| p.name() == name));
}
dup_ids.sort();
dup_ids
}
fn get_cfgs(rustc: &Rustc, target: &Option<String>) -> CargoResult<Option<Vec<Cfg>>> {
let mut process = util::process(&rustc.path);
process.arg("--print=cfg").env_remove("RUST_LOG");
if let Some(ref s) = *target {
process.arg("--target").arg(s);
}
let output = match process.exec_with_output() {
Ok(output) => output,
Err(e) => return Err(e),
};
let output = str::from_utf8(&output.stdout).unwrap();
let lines = output.lines();
Ok(Some(
lines.map(Cfg::from_str).collect::<CargoResult<Vec<_>>>()?,
))
}
fn workspace(config: &Config, manifest_path: Option<PathBuf>) -> CargoResult<Workspace<'_>> {
let root = match manifest_path {
Some(path) => path,
None => important_paths::find_root_manifest_for_wd(config.cwd())?,
};
Workspace::new(&root, config)
}
fn registry<'a>(config: &'a Config, package: &Package) -> CargoResult<PackageRegistry<'a>> {
let mut registry = PackageRegistry::new(config)?;
registry.add_sources(Some(package.package_id().source_id().clone()))?;
Ok(registry)
}
fn resolve<'a, 'cfg>(
registry: &mut PackageRegistry<'cfg>,
workspace: &'a Workspace<'cfg>,
features: Option<String>,
all_features: bool,
no_default_features: bool,
no_dev_dependencies: bool,
) -> CargoResult<(PackageSet<'a>, Resolve)> {
let features = Method::split_features(&features.into_iter().collect::<Vec<_>>());
let (packages, resolve) = ops::resolve_ws(workspace)?;
let method = Method::Required {
dev_deps: !no_dev_dependencies,
features: &features,
all_features,
uses_default_features: !no_default_features,
};
let resolve = ops::resolve_with_previous(
registry,
workspace,
method,
Some(&resolve),
None,
&[],
true,
true,
)?;
Ok((packages, resolve))
}
struct Node<'a> {
id: PackageId,
metadata: &'a ManifestMetadata,
}
struct Graph<'a> {
graph: petgraph::Graph<Node<'a>, Kind>,
nodes: HashMap<PackageId, NodeIndex>,
}
fn build_graph<'a>(
resolve: &'a Resolve,
packages: &'a PackageSet<'_>,
root: PackageId,
target: Option<&str>,
cfgs: Option<&[Cfg]>,
) -> CargoResult<Graph<'a>> {
let mut graph = Graph {
graph: petgraph::Graph::new(),
nodes: HashMap::new(),
};
let node = Node {
id: root.clone(),
metadata: packages.get_one(root)?.manifest().metadata(),
};
graph.nodes.insert(root.clone(), graph.graph.add_node(node));
let mut pending = vec![root];
while let Some(pkg_id) = pending.pop() {
let idx = graph.nodes[&pkg_id];
let pkg = packages.get_one(pkg_id)?;
for raw_dep_id in resolve.deps_not_replaced(pkg_id) {
let it = pkg
.dependencies()
.iter()
.filter(|d| d.matches_ignoring_source(raw_dep_id))
.filter(|d| {
d.platform()
.and_then(|p| target.map(|t| p.matches(t, cfgs)))
.unwrap_or(true)
});
let dep_id = match resolve.replacement(raw_dep_id) {
Some(id) => id,
None => raw_dep_id,
}; | random_line_split | ||
main.rs | let graph = build_graph(
&resolve,
&packages,
package.package_id(),
target,
cfgs.as_ref().map(|r| &**r),
)?;
let direction = if args.invert || args.duplicates {
EdgeDirection::Incoming
} else {
EdgeDirection::Outgoing
};
let symbols = match args.charset {
Charset::Ascii => &ASCII_SYMBOLS,
Charset::Utf8 => &UTF8_SYMBOLS,
};
let prefix = if args.prefix_depth {
Prefix::Depth
} else if args.no_indent {
Prefix::None
} else {
Prefix::Indent
};
if args.duplicates {
let dups = find_duplicates(&graph);
for dup in &dups {
print_tree(dup, &graph, &format, direction, symbols, prefix, args.all)?;
println!();
}
} else {
print_tree(&root, &graph, &format, direction, symbols, prefix, args.all)?;
}
Ok(())
}
fn find_duplicates<'a>(graph: &Graph<'a>) -> Vec<PackageId> {
let mut counts = HashMap::new();
// Count by name only. Source and version are irrelevant here.
for package in graph.nodes.keys() {
*counts.entry(package.name()).or_insert(0) += 1;
}
// Theoretically inefficient, but in practice we're only listing duplicates and
// there won't be enough dependencies for it to matter.
let mut dup_ids = Vec::new();
for name in counts.drain().filter(|&(_, v)| v > 1).map(|(k, _)| k) {
dup_ids.extend(graph.nodes.keys().filter(|p| p.name() == name));
}
dup_ids.sort();
dup_ids
}
fn get_cfgs(rustc: &Rustc, target: &Option<String>) -> CargoResult<Option<Vec<Cfg>>> {
let mut process = util::process(&rustc.path);
process.arg("--print=cfg").env_remove("RUST_LOG");
if let Some(ref s) = *target {
process.arg("--target").arg(s);
}
let output = match process.exec_with_output() {
Ok(output) => output,
Err(e) => return Err(e),
};
let output = str::from_utf8(&output.stdout).unwrap();
let lines = output.lines();
Ok(Some(
lines.map(Cfg::from_str).collect::<CargoResult<Vec<_>>>()?,
))
}
fn workspace(config: &Config, manifest_path: Option<PathBuf>) -> CargoResult<Workspace<'_>> {
let root = match manifest_path {
Some(path) => path,
None => important_paths::find_root_manifest_for_wd(config.cwd())?,
};
Workspace::new(&root, config)
}
fn registry<'a>(config: &'a Config, package: &Package) -> CargoResult<PackageRegistry<'a>> {
let mut registry = PackageRegistry::new(config)?;
registry.add_sources(Some(package.package_id().source_id().clone()))?;
Ok(registry)
}
fn resolve<'a, 'cfg>(
registry: &mut PackageRegistry<'cfg>,
workspace: &'a Workspace<'cfg>,
features: Option<String>,
all_features: bool,
no_default_features: bool,
no_dev_dependencies: bool,
) -> CargoResult<(PackageSet<'a>, Resolve)> {
let features = Method::split_features(&features.into_iter().collect::<Vec<_>>());
let (packages, resolve) = ops::resolve_ws(workspace)?;
let method = Method::Required {
dev_deps: !no_dev_dependencies,
features: &features,
all_features,
uses_default_features: !no_default_features,
};
let resolve = ops::resolve_with_previous(
registry,
workspace,
method,
Some(&resolve),
None,
&[],
true,
true,
)?;
Ok((packages, resolve))
}
struct Node<'a> {
id: PackageId,
metadata: &'a ManifestMetadata,
}
struct Graph<'a> {
graph: petgraph::Graph<Node<'a>, Kind>,
nodes: HashMap<PackageId, NodeIndex>,
}
fn build_graph<'a>(
resolve: &'a Resolve,
packages: &'a PackageSet<'_>,
root: PackageId,
target: Option<&str>,
cfgs: Option<&[Cfg]>,
) -> CargoResult<Graph<'a>> {
let mut graph = Graph {
graph: petgraph::Graph::new(),
nodes: HashMap::new(),
};
let node = Node {
id: root.clone(),
metadata: packages.get_one(root)?.manifest().metadata(),
};
graph.nodes.insert(root.clone(), graph.graph.add_node(node));
let mut pending = vec![root];
while let Some(pkg_id) = pending.pop() {
let idx = graph.nodes[&pkg_id];
let pkg = packages.get_one(pkg_id)?;
for raw_dep_id in resolve.deps_not_replaced(pkg_id) {
let it = pkg
.dependencies()
.iter()
.filter(|d| d.matches_ignoring_source(raw_dep_id))
.filter(|d| {
d.platform()
.and_then(|p| target.map(|t| p.matches(t, cfgs)))
.unwrap_or(true)
});
let dep_id = match resolve.replacement(raw_dep_id) {
Some(id) => id,
None => raw_dep_id,
};
for dep in it {
let dep_idx = match graph.nodes.entry(dep_id) {
Entry::Occupied(e) => *e.get(),
Entry::Vacant(e) => {
pending.push(dep_id);
let node = Node {
id: dep_id,
metadata: packages.get_one(dep_id)?.manifest().metadata(),
};
*e.insert(graph.graph.add_node(node))
}
};
graph.graph.add_edge(idx, dep_idx, dep.kind());
}
}
}
Ok(graph)
}
fn print_tree<'a>(
package: &'a PackageId,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
prefix: Prefix,
all: bool,
) -> CargoResult<()> {
let mut visited_deps = HashSet::new();
let mut levels_continue = vec![];
let package = match graph.nodes.get(package) {
Some(package) => package,
None => bail!("package {} not found", package),
};
let node = &graph.graph[*package];
print_dependency(
node,
&graph,
format,
direction,
symbols,
&mut visited_deps,
&mut levels_continue,
prefix,
all,
);
Ok(())
}
fn print_dependency<'a>(
package: &Node<'a>,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
visited_deps: &mut HashSet<PackageId>,
levels_continue: &mut Vec<bool>,
prefix: Prefix,
all: bool,
) {
let new = all || visited_deps.insert(package.id);
let star = if new { "" } else { " (*)" };
match prefix {
Prefix::Depth => print!("{} ", levels_continue.len()),
Prefix::Indent => {
if let Some((&last_continues, rest)) = levels_continue.split_last() {
for &continues in rest {
let c = if continues { symbols.down } else { " " };
print!("{} ", c);
}
let c = if last_continues {
symbols.tee
} else {
symbols.ell
};
print!("{0}{1}{1} ", c, symbols.right);
}
}
Prefix::None => (),
}
println!("{}{}", format.display(&package.id, package.metadata), star);
if !new {
return;
}
let mut normal = vec![];
let mut build = vec![];
let mut development = vec![];
for edge in graph
.graph
.edges_directed(graph.nodes[&package.id], direction)
{
let dep = match direction {
EdgeDirection::Incoming => &graph.graph[edge.source()],
EdgeDirection::Outgoing => &graph.graph[edge.target()],
};
match *edge.weight() {
Kind::Normal => normal.push(dep),
Kind::Build => build.push(dep),
Kind::Development => development.push(dep),
}
}
print_dependency_kind(
Kind::Normal,
normal,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
print_dependency_kind(
Kind::Build,
build,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
print_dependency_kind(
Kind::Development,
development,
graph,
format,
direction,
symbols,
visited_deps,
levels_continue,
prefix,
all,
);
}
fn print_dependency_kind<'a>(
kind: Kind,
mut deps: Vec<&Node<'a>>,
graph: &Graph<'a>,
format: &Pattern,
direction: EdgeDirection,
symbols: &Symbols,
visited_deps: &mut HashSet<PackageId>,
levels_continue: &mut Vec<bool>,
prefix: Prefix,
all: bool,
) {
if deps.is_empty() {
| return;
}
// | conditional_block | |
Liquid.go | pressed bool
x, y float32
}
type Liquid struct {
width float32
height float32
pressed bool
pressedprev bool
mouse MouseState
grid *Nodemap
particles []*Particle
}
type Nodemap struct {
width, height int
nodes []*Node
}
func NewNodemap(width int, height int) *Nodemap {
nodes := make([]*Node, (width+1)*(height+1))
for i := range nodes {
nodes[i] = new(Node)
}
return &Nodemap{
width: width,
height: height,
nodes: nodes,
}
}
func (nm *Nodemap) Get(x, y int) *Node {
return nm.nodes[nm.height*y+x]
}
type NodeFunctor func(*Node)
func (nm *Nodemap) Each(functor NodeFunctor) {
for i := range nm.nodes {
if nm.nodes[i].active {
functor(nm.nodes[i])
}
}
}
func (nm *Nodemap) Reset() {
emptyNode := &Node{}
for i := range nm.nodes {
if nm.nodes[i].active {
*(nm.nodes[i]) = *emptyNode
}
}
}
func MakeLiquid(width, height, rows, columns int) *Liquid {
water := &Material{1.0, 1.0, 1.0, 1.0, 1.0, 1.0}
particles := make([]*Particle, rows*columns)
for r := 0; r < rows; r++ {
for c := 0; c < columns; c++ {
particles[r*columns+c] = MakeParticle(water, float32(r), float32(c), 0.0, 0.0)
}
}
return &Liquid{
float32(width),
float32(height),
false,
false,
MouseState{false, 0.0, 0.0},
NewNodemap(width, height),
particles,
}
}
func _equation1(pressure, gravity *[3]float32, x float32) {
pressure[0] = 0.5*x*x + 1.5*x + 1.125
gravity[0] = x + 1.5
x += 1.0
pressure[1] = -x*x + 0.75
gravity[1] = -2.0 * x
x += 1.0
pressure[2] = 0.5*x*x - 1.5*x + 1.125
gravity[2] = x - 1.5
}
func (l *Liquid) _step1() {
for _, particle := range l.particles {
particle.cx = float32(int(particle.x - 0.5))
particle.cy = float32(int(particle.y - 0.5))
_equation1(&particle.px, &particle.gx, particle.cx-particle.x)
_equation1(&particle.py, &particle.gy, particle.cy-particle.y)
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(particle.cx)+i, int(particle.cy)+j)
if n.active != true {
n.active = true
}
phi := particle.px[i] * particle.py[j]
n.m += phi * particle.material.m
n.d += phi
n.gx += particle.gx[i] * particle.py[j]
n.gy += particle.px[i] * particle.gy[j]
}
}
}
}
func (l *Liquid) _density_summary(drag bool, mdx, mdy float32) {
var n01, n02, n11, n12 *Node
var cx, cy, cxi, cyi int
var pdx, pdy, C20, C02, C30, C03, csum1, csum2, C21, C31,
C12, C13, C11, density, pressure, fx, fy, u, u2, u3, v, v2, v3 float32
for _, p := range l.particles {
cx = int(p.x)
cy = int(p.y)
cxi = cx + 1
cyi = cy + 1
n01 = l.grid.Get(cx, cy)
n02 = l.grid.Get(cx, cyi)
n11 = l.grid.Get(cxi, cy)
n12 = l.grid.Get(cxi, cyi)
pdx = n11.d - n01.d
pdy = n02.d - n01.d
C20 = 3.0*pdx - n11.gx - 2.0*n01.gx
C02 = 3.0*pdy - n02.gy - 2.0*n01.gy
C30 = -2.0*pdx + n11.gx + n01.gx
C03 = -2.0*pdy + n02.gy + n01.gy
csum1 = n01.d + n01.gy + C02 + C03
csum2 = n01.d + n01.gx + C20 + C30
C21 = 3.0*n12.d - 2.0*n02.gx - n12.gx - 3.0*csum1 - C20
C31 = -2.0*n12.d + n02.gx + n12.gx + 2.0*csum1 - C30
C12 = 3.0*n12.d - 2.0*n11.gy - n12.gy - 3.0*csum2 - C02
C13 = -2.0*n12.d + n11.gy + n12.gy + 2.0*csum2 - C03
C11 = n02.gx - C13 - C12 - n01.gx
u = p.x - float32(cx)
u2 = u * u
u3 = u * u2
v = p.y - float32(cy)
v2 = v * v
v3 = v * v2
density = n01.d + n01.gx*u + n01.gy*v + C20*u2 + C02*v2 +
C30*u3 + C03*v3 + C21*u2*v + C31*u3*v + C12*u*
v2 + C13*u*v3 + C11*u*v
pressure = density - 1.0
if pressure > 2.0 {
pressure = 2.0
}
fx = 0.0
fy = 0.0
if p.x < 4.0 {
fx += p.material.m * (4.0 - p.x)
} else if p.x > l.width-5 {
fx += p.material.m * (l.width - 5 - p.x)
}
if p.y < 4.0 {
fy += p.material.m * (4.0 - p.y)
} else if p.y > l.height-5 {
fy += p.material.m * (l.height - 5 - p.y)
}
if drag {
vx := Abs(p.x - l.mouse.x)
vy := Abs(p.y - l.mouse.y)
if vx < 10.0 && 10.0 > vy {
weight := p.material.m * (1.0 - vx*0.10) *
(1.0 - vy*0.10)
fx += weight * (mdx - p.u)
fy += weight * (mdy - p.v)
}
}
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.ax += -(p.gx[i] * p.py[j] * pressure) + fx*phi
n.ay += -(p.px[i] * p.gy[j] * pressure) + fy*phi
}
}
}
}
func (l *Liquid) | () {
var mu, mv float32
for _, p := range l.particles {
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
| _step3 | identifier_name |
Liquid.go | {
pressed bool
x, y float32
}
type Liquid struct {
width float32
height float32
pressed bool
pressedprev bool
mouse MouseState
grid *Nodemap
particles []*Particle
}
type Nodemap struct {
width, height int
nodes []*Node
}
func NewNodemap(width int, height int) *Nodemap {
nodes := make([]*Node, (width+1)*(height+1))
for i := range nodes {
nodes[i] = new(Node)
}
return &Nodemap{
width: width,
height: height,
nodes: nodes,
}
}
func (nm *Nodemap) Get(x, y int) *Node {
return nm.nodes[nm.height*y+x]
}
type NodeFunctor func(*Node)
func (nm *Nodemap) Each(functor NodeFunctor) {
for i := range nm.nodes {
if nm.nodes[i].active {
functor(nm.nodes[i])
}
}
}
func (nm *Nodemap) Reset() {
emptyNode := &Node{}
for i := range nm.nodes {
if nm.nodes[i].active {
*(nm.nodes[i]) = *emptyNode
}
}
}
func MakeLiquid(width, height, rows, columns int) *Liquid {
water := &Material{1.0, 1.0, 1.0, 1.0, 1.0, 1.0}
particles := make([]*Particle, rows*columns)
for r := 0; r < rows; r++ {
for c := 0; c < columns; c++ {
particles[r*columns+c] = MakeParticle(water, float32(r), float32(c), 0.0, 0.0)
}
}
return &Liquid{
float32(width),
float32(height),
false,
false,
MouseState{false, 0.0, 0.0},
NewNodemap(width, height),
particles,
}
}
func _equation1(pressure, gravity *[3]float32, x float32) {
pressure[0] = 0.5*x*x + 1.5*x + 1.125
gravity[0] = x + 1.5
x += 1.0
pressure[1] = -x*x + 0.75
gravity[1] = -2.0 * x
x += 1.0
pressure[2] = 0.5*x*x - 1.5*x + 1.125
gravity[2] = x - 1.5
}
func (l *Liquid) _step1() {
for _, particle := range l.particles {
particle.cx = float32(int(particle.x - 0.5))
particle.cy = float32(int(particle.y - 0.5))
_equation1(&particle.px, &particle.gx, particle.cx-particle.x)
_equation1(&particle.py, &particle.gy, particle.cy-particle.y)
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(particle.cx)+i, int(particle.cy)+j)
if n.active != true {
n.active = true
}
phi := particle.px[i] * particle.py[j]
n.m += phi * particle.material.m
n.d += phi | n.gx += particle.gx[i] * particle.py[j]
n.gy += particle.px[i] * particle.gy[j]
}
}
}
}
func (l *Liquid) _density_summary(drag bool, mdx, mdy float32) {
var n01, n02, n11, n12 *Node
var cx, cy, cxi, cyi int
var pdx, pdy, C20, C02, C30, C03, csum1, csum2, C21, C31,
C12, C13, C11, density, pressure, fx, fy, u, u2, u3, v, v2, v3 float32
for _, p := range l.particles {
cx = int(p.x)
cy = int(p.y)
cxi = cx + 1
cyi = cy + 1
n01 = l.grid.Get(cx, cy)
n02 = l.grid.Get(cx, cyi)
n11 = l.grid.Get(cxi, cy)
n12 = l.grid.Get(cxi, cyi)
pdx = n11.d - n01.d
pdy = n02.d - n01.d
C20 = 3.0*pdx - n11.gx - 2.0*n01.gx
C02 = 3.0*pdy - n02.gy - 2.0*n01.gy
C30 = -2.0*pdx + n11.gx + n01.gx
C03 = -2.0*pdy + n02.gy + n01.gy
csum1 = n01.d + n01.gy + C02 + C03
csum2 = n01.d + n01.gx + C20 + C30
C21 = 3.0*n12.d - 2.0*n02.gx - n12.gx - 3.0*csum1 - C20
C31 = -2.0*n12.d + n02.gx + n12.gx + 2.0*csum1 - C30
C12 = 3.0*n12.d - 2.0*n11.gy - n12.gy - 3.0*csum2 - C02
C13 = -2.0*n12.d + n11.gy + n12.gy + 2.0*csum2 - C03
C11 = n02.gx - C13 - C12 - n01.gx
u = p.x - float32(cx)
u2 = u * u
u3 = u * u2
v = p.y - float32(cy)
v2 = v * v
v3 = v * v2
density = n01.d + n01.gx*u + n01.gy*v + C20*u2 + C02*v2 +
C30*u3 + C03*v3 + C21*u2*v + C31*u3*v + C12*u*
v2 + C13*u*v3 + C11*u*v
pressure = density - 1.0
if pressure > 2.0 {
pressure = 2.0
}
fx = 0.0
fy = 0.0
if p.x < 4.0 {
fx += p.material.m * (4.0 - p.x)
} else if p.x > l.width-5 {
fx += p.material.m * (l.width - 5 - p.x)
}
if p.y < 4.0 {
fy += p.material.m * (4.0 - p.y)
} else if p.y > l.height-5 {
fy += p.material.m * (l.height - 5 - p.y)
}
if drag {
vx := Abs(p.x - l.mouse.x)
vy := Abs(p.y - l.mouse.y)
if vx < 10.0 && 10.0 > vy {
weight := p.material.m * (1.0 - vx*0.10) *
(1.0 - vy*0.10)
fx += weight * (mdx - p.u)
fy += weight * (mdy - p.v)
}
}
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.ax += -(p.gx[i] * p.py[j] * pressure) + fx*phi
n.ay += -(p.px[i] * p.gy[j] * pressure) + fy*phi
}
}
}
}
func (l *Liquid) _step3() {
var mu, mv float32
for _, p := range l.particles {
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
| random_line_split | |
Liquid.go | pressed bool
x, y float32
}
type Liquid struct {
width float32
height float32
pressed bool
pressedprev bool
mouse MouseState
grid *Nodemap
particles []*Particle
}
type Nodemap struct {
width, height int
nodes []*Node
}
func NewNodemap(width int, height int) *Nodemap {
nodes := make([]*Node, (width+1)*(height+1))
for i := range nodes {
nodes[i] = new(Node)
}
return &Nodemap{
width: width,
height: height,
nodes: nodes,
}
}
func (nm *Nodemap) Get(x, y int) *Node {
return nm.nodes[nm.height*y+x]
}
type NodeFunctor func(*Node)
func (nm *Nodemap) Each(functor NodeFunctor) {
for i := range nm.nodes {
if nm.nodes[i].active {
functor(nm.nodes[i])
}
}
}
func (nm *Nodemap) Reset() {
emptyNode := &Node{}
for i := range nm.nodes {
if nm.nodes[i].active {
*(nm.nodes[i]) = *emptyNode
}
}
}
func MakeLiquid(width, height, rows, columns int) *Liquid {
water := &Material{1.0, 1.0, 1.0, 1.0, 1.0, 1.0}
particles := make([]*Particle, rows*columns)
for r := 0; r < rows; r++ {
for c := 0; c < columns; c++ {
particles[r*columns+c] = MakeParticle(water, float32(r), float32(c), 0.0, 0.0)
}
}
return &Liquid{
float32(width),
float32(height),
false,
false,
MouseState{false, 0.0, 0.0},
NewNodemap(width, height),
particles,
}
}
func _equation1(pressure, gravity *[3]float32, x float32) {
pressure[0] = 0.5*x*x + 1.5*x + 1.125
gravity[0] = x + 1.5
x += 1.0
pressure[1] = -x*x + 0.75
gravity[1] = -2.0 * x
x += 1.0
pressure[2] = 0.5*x*x - 1.5*x + 1.125
gravity[2] = x - 1.5
}
func (l *Liquid) _step1() {
for _, particle := range l.particles {
particle.cx = float32(int(particle.x - 0.5))
particle.cy = float32(int(particle.y - 0.5))
_equation1(&particle.px, &particle.gx, particle.cx-particle.x)
_equation1(&particle.py, &particle.gy, particle.cy-particle.y)
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(particle.cx)+i, int(particle.cy)+j)
if n.active != true {
n.active = true
}
phi := particle.px[i] * particle.py[j]
n.m += phi * particle.material.m
n.d += phi
n.gx += particle.gx[i] * particle.py[j]
n.gy += particle.px[i] * particle.gy[j]
}
}
}
}
func (l *Liquid) _density_summary(drag bool, mdx, mdy float32) {
var n01, n02, n11, n12 *Node
var cx, cy, cxi, cyi int
var pdx, pdy, C20, C02, C30, C03, csum1, csum2, C21, C31,
C12, C13, C11, density, pressure, fx, fy, u, u2, u3, v, v2, v3 float32
for _, p := range l.particles {
cx = int(p.x)
cy = int(p.y)
cxi = cx + 1
cyi = cy + 1
n01 = l.grid.Get(cx, cy)
n02 = l.grid.Get(cx, cyi)
n11 = l.grid.Get(cxi, cy)
n12 = l.grid.Get(cxi, cyi)
pdx = n11.d - n01.d
pdy = n02.d - n01.d
C20 = 3.0*pdx - n11.gx - 2.0*n01.gx
C02 = 3.0*pdy - n02.gy - 2.0*n01.gy
C30 = -2.0*pdx + n11.gx + n01.gx
C03 = -2.0*pdy + n02.gy + n01.gy
csum1 = n01.d + n01.gy + C02 + C03
csum2 = n01.d + n01.gx + C20 + C30
C21 = 3.0*n12.d - 2.0*n02.gx - n12.gx - 3.0*csum1 - C20
C31 = -2.0*n12.d + n02.gx + n12.gx + 2.0*csum1 - C30
C12 = 3.0*n12.d - 2.0*n11.gy - n12.gy - 3.0*csum2 - C02
C13 = -2.0*n12.d + n11.gy + n12.gy + 2.0*csum2 - C03
C11 = n02.gx - C13 - C12 - n01.gx
u = p.x - float32(cx)
u2 = u * u
u3 = u * u2
v = p.y - float32(cy)
v2 = v * v
v3 = v * v2
density = n01.d + n01.gx*u + n01.gy*v + C20*u2 + C02*v2 +
C30*u3 + C03*v3 + C21*u2*v + C31*u3*v + C12*u*
v2 + C13*u*v3 + C11*u*v
pressure = density - 1.0
if pressure > 2.0 {
pressure = 2.0
}
fx = 0.0
fy = 0.0
if p.x < 4.0 {
fx += p.material.m * (4.0 - p.x)
} else if p.x > l.width-5 {
fx += p.material.m * (l.width - 5 - p.x)
}
if p.y < 4.0 {
fy += p.material.m * (4.0 - p.y)
} else if p.y > l.height-5 {
fy += p.material.m * (l.height - 5 - p.y)
}
if drag |
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.ax += -(p.gx[i] * p.py[j] * pressure) + fx*phi
n.ay += -(p.px[i] * p.gy[j] * pressure) + fy*phi
}
}
}
}
func (l *Liquid) _step3() {
var mu, mv float32
for _, p := range l.particles {
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j | {
vx := Abs(p.x - l.mouse.x)
vy := Abs(p.y - l.mouse.y)
if vx < 10.0 && 10.0 > vy {
weight := p.material.m * (1.0 - vx*0.10) *
(1.0 - vy*0.10)
fx += weight * (mdx - p.u)
fy += weight * (mdy - p.v)
}
} | conditional_block |
Liquid.go | pressed bool
x, y float32
}
type Liquid struct {
width float32
height float32
pressed bool
pressedprev bool
mouse MouseState
grid *Nodemap
particles []*Particle
}
type Nodemap struct {
width, height int
nodes []*Node
}
func NewNodemap(width int, height int) *Nodemap {
nodes := make([]*Node, (width+1)*(height+1))
for i := range nodes {
nodes[i] = new(Node)
}
return &Nodemap{
width: width,
height: height,
nodes: nodes,
}
}
func (nm *Nodemap) Get(x, y int) *Node {
return nm.nodes[nm.height*y+x]
}
type NodeFunctor func(*Node)
func (nm *Nodemap) Each(functor NodeFunctor) {
for i := range nm.nodes {
if nm.nodes[i].active {
functor(nm.nodes[i])
}
}
}
func (nm *Nodemap) Reset() {
emptyNode := &Node{}
for i := range nm.nodes {
if nm.nodes[i].active {
*(nm.nodes[i]) = *emptyNode
}
}
}
func MakeLiquid(width, height, rows, columns int) *Liquid {
water := &Material{1.0, 1.0, 1.0, 1.0, 1.0, 1.0}
particles := make([]*Particle, rows*columns)
for r := 0; r < rows; r++ {
for c := 0; c < columns; c++ {
particles[r*columns+c] = MakeParticle(water, float32(r), float32(c), 0.0, 0.0)
}
}
return &Liquid{
float32(width),
float32(height),
false,
false,
MouseState{false, 0.0, 0.0},
NewNodemap(width, height),
particles,
}
}
func _equation1(pressure, gravity *[3]float32, x float32) |
func (l *Liquid) _step1() {
for _, particle := range l.particles {
particle.cx = float32(int(particle.x - 0.5))
particle.cy = float32(int(particle.y - 0.5))
_equation1(&particle.px, &particle.gx, particle.cx-particle.x)
_equation1(&particle.py, &particle.gy, particle.cy-particle.y)
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(particle.cx)+i, int(particle.cy)+j)
if n.active != true {
n.active = true
}
phi := particle.px[i] * particle.py[j]
n.m += phi * particle.material.m
n.d += phi
n.gx += particle.gx[i] * particle.py[j]
n.gy += particle.px[i] * particle.gy[j]
}
}
}
}
func (l *Liquid) _density_summary(drag bool, mdx, mdy float32) {
var n01, n02, n11, n12 *Node
var cx, cy, cxi, cyi int
var pdx, pdy, C20, C02, C30, C03, csum1, csum2, C21, C31,
C12, C13, C11, density, pressure, fx, fy, u, u2, u3, v, v2, v3 float32
for _, p := range l.particles {
cx = int(p.x)
cy = int(p.y)
cxi = cx + 1
cyi = cy + 1
n01 = l.grid.Get(cx, cy)
n02 = l.grid.Get(cx, cyi)
n11 = l.grid.Get(cxi, cy)
n12 = l.grid.Get(cxi, cyi)
pdx = n11.d - n01.d
pdy = n02.d - n01.d
C20 = 3.0*pdx - n11.gx - 2.0*n01.gx
C02 = 3.0*pdy - n02.gy - 2.0*n01.gy
C30 = -2.0*pdx + n11.gx + n01.gx
C03 = -2.0*pdy + n02.gy + n01.gy
csum1 = n01.d + n01.gy + C02 + C03
csum2 = n01.d + n01.gx + C20 + C30
C21 = 3.0*n12.d - 2.0*n02.gx - n12.gx - 3.0*csum1 - C20
C31 = -2.0*n12.d + n02.gx + n12.gx + 2.0*csum1 - C30
C12 = 3.0*n12.d - 2.0*n11.gy - n12.gy - 3.0*csum2 - C02
C13 = -2.0*n12.d + n11.gy + n12.gy + 2.0*csum2 - C03
C11 = n02.gx - C13 - C12 - n01.gx
u = p.x - float32(cx)
u2 = u * u
u3 = u * u2
v = p.y - float32(cy)
v2 = v * v
v3 = v * v2
density = n01.d + n01.gx*u + n01.gy*v + C20*u2 + C02*v2 +
C30*u3 + C03*v3 + C21*u2*v + C31*u3*v + C12*u*
v2 + C13*u*v3 + C11*u*v
pressure = density - 1.0
if pressure > 2.0 {
pressure = 2.0
}
fx = 0.0
fy = 0.0
if p.x < 4.0 {
fx += p.material.m * (4.0 - p.x)
} else if p.x > l.width-5 {
fx += p.material.m * (l.width - 5 - p.x)
}
if p.y < 4.0 {
fy += p.material.m * (4.0 - p.y)
} else if p.y > l.height-5 {
fy += p.material.m * (l.height - 5 - p.y)
}
if drag {
vx := Abs(p.x - l.mouse.x)
vy := Abs(p.y - l.mouse.y)
if vx < 10.0 && 10.0 > vy {
weight := p.material.m * (1.0 - vx*0.10) *
(1.0 - vy*0.10)
fx += weight * (mdx - p.u)
fy += weight * (mdy - p.v)
}
}
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j)
phi := p.px[i] * p.py[j]
n.ax += -(p.gx[i] * p.py[j] * pressure) + fx*phi
n.ay += -(p.px[i] * p.gy[j] * pressure) + fy*phi
}
}
}
}
func (l *Liquid) _step3() {
var mu, mv float32
for _, p := range l.particles {
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
n := l.grid.Get(int(p.cx)+i, int(p.cy)+j | {
pressure[0] = 0.5*x*x + 1.5*x + 1.125
gravity[0] = x + 1.5
x += 1.0
pressure[1] = -x*x + 0.75
gravity[1] = -2.0 * x
x += 1.0
pressure[2] = 0.5*x*x - 1.5*x + 1.125
gravity[2] = x - 1.5
} | identifier_body |
arena.rs | pub(super) mem: Vec<u8>,
}
impl AggressiveArena {
/// Create an AggressiveArena with given cap.
/// This function will allocate a cap size memory block directly for further usage
pub fn new(cap: usize) -> AggressiveArena {
AggressiveArena {
offset: AtomicUsize::new(0),
mem: Vec::<u8>::with_capacity(cap),
}
}
/// For test
pub(super) fn display_all(&self) -> Vec<u8> {
let mut result = Vec::with_capacity(self.mem.capacity());
unsafe {
let ptr = self.mem.as_ptr();
for i in 0..self.offset.load(Ordering::Acquire) {
let p = ptr.add(i) as *mut u8;
result.push(*p)
}
}
result
}
}
impl Arena for AggressiveArena {
fn alloc_node(&self, height: usize) -> *mut Node {
let ptr_size = mem::size_of::<*mut u8>();
// truncate node size to reduce waste
let used_node_size = MAX_NODE_SIZE - (MAX_HEIGHT - height) * ptr_size;
let n = self.offset.fetch_add(used_node_size, Ordering::SeqCst);
unsafe {
let node_ptr = self.mem.as_ptr().add(n) as *mut u8;
// get the actually to-be-used memory of node and spilt it into 2 parts:
// node part: the Node struct
// nexts part: the pre allocated memory used by elements of next_nodes
let (node_part, nexts_part) = slice::from_raw_parts_mut(node_ptr, used_node_size)
.split_at_mut(used_node_size - height * ptr_size);
#[allow(clippy::cast_ptr_alignment)]
let node = node_part.as_mut_ptr() as *mut Node;
// FIXME: Box::from_raw can be unsafe when releasing memory
#[allow(clippy::cast_ptr_alignment)]
let next_nodes = Box::from_raw(slice::from_raw_parts_mut(
nexts_part.as_mut_ptr() as *mut AtomicPtr<Node>,
height,
));
(*node).height = height;
(*node).next_nodes = next_nodes;
node
}
}
fn alloc_bytes(&self, data: &Slice) -> u32 {
let start = self.offset.fetch_add(data.size(), Ordering::SeqCst);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *mut u8;
for (i, b) in data.to_slice().iter().enumerate() {
let p = ptr.add(i) as *mut u8;
(*p) = *b;
}
}
start as u32
}
fn get(&self, start: usize, count: usize) -> Slice {
let o = self.offset.load(Ordering::Acquire);
invarint!(
start + count <= o,
"[arena] try to get data from [{}] to [{}] but max count is [{}]",
start,
start + count,
o,
);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *const u8;
Slice::new(ptr, count)
}
}
#[inline]
fn has_room_for(&self, size: usize) -> bool {
self.size() - self.memory_used() >= size
}
#[inline]
fn size(&self) -> usize {
self.mem.capacity()
}
#[inline]
fn memory_used(&self) -> usize {
self.offset.load(Ordering::Acquire)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Arc, Mutex};
use std::thread;
fn new_default_arena() -> AggressiveArena {
AggressiveArena::new(64 << 20)
}
#[test]
fn test_new_arena() {
let cap = 200;
let arena = AggressiveArena::new(cap);
assert_eq!(arena.memory_used(), 0);
assert_eq!(arena.size(), cap);
}
#[test]
fn test_alloc_single_node() {
let arena = new_default_arena();
let node = arena.alloc_node(MAX_HEIGHT);
unsafe {
assert_eq!((*node).height, MAX_HEIGHT);
assert_eq!((*node).next_nodes.len(), MAX_HEIGHT);
assert_eq!((*node).key_size, 0);
assert_eq!((*node).key_offset, 0);
assert_eq!((*node).value_size, 0);
assert_eq!((*node).value_offset, 0);
// dereference and assigning should work
let u8_ptr = node as *mut u8;
(*node).key_offset = 1;
let key_offset_ptr = u8_ptr.add(0);
assert_eq!(*key_offset_ptr, 1);
(*node).key_size = 2;
let key_size_ptr = u8_ptr.add(8);
assert_eq!(*key_size_ptr, 2);
(*node).value_offset = 3;
let value_offset_ptr = u8_ptr.add(16);
assert_eq!(*value_offset_ptr, 3);
(*node).value_size = 4;
let value_size_ptr = u8_ptr.add(24);
assert_eq!(*value_size_ptr, 4);
// the value of data ptr in 'next_nodes' slice must be the beginning pointer of first element
let next_nodes_ptr = u8_ptr
.add(mem::size_of::<Node>() - mem::size_of::<Box<[AtomicPtr<Node>]>>())
as *mut u64;
let first_element_ptr = u8_ptr.add(mem::size_of::<Node>());
assert_eq!(
"0x".to_owned() + &format!("{:x}", *next_nodes_ptr),
format!("{:?}", first_element_ptr)
);
}
}
#[test]
fn test_alloc_nodes() {
let arena = new_default_arena();
let node1 = arena.alloc_node(4);
let node2 = arena.alloc_node(MAX_HEIGHT);
unsafe {
// node1 and node2 should be neighbor in memory
let struct_tail = node1.add(1) as *mut *mut Node;
let nexts_tail = struct_tail.add(4);
assert_eq!(nexts_tail as *mut Node, node2);
};
}
#[test]
fn test_simple_alloc_bytes() {
let mut arena = AggressiveArena::new(100);
let input = vec![1u8, 2u8, 3u8, 4u8, 5u8];
let offset = arena.alloc_bytes(&Slice::from(&input));
unsafe {
let ptr = arena.mem.as_mut_ptr().add(offset as usize) as *mut u8;
for (i, b) in input.clone().iter().enumerate() {
let p = ptr.add(i);
assert_eq!(*p, *b);
}
}
}
#[test]
fn test_alloc_bytes_concurrency() {
let arena = Arc::new(AggressiveArena::new(500));
let results = Arc::new(Mutex::new(vec![]));
let mut tests = vec![vec![1u8, 2, 3, 4, 5], vec![6u8, 7, 8, 9], vec![10u8, 11]];
for t in tests
.drain(..)
.map(|test| {
let cloned_arena = arena.clone();
let cloned_results = results.clone();
thread::spawn(move || {
let offset = cloned_arena.alloc_bytes(&Slice::from(test.as_slice())) as usize;
// start position in arena, origin test data
cloned_results.lock().unwrap().push((offset, test));
})
})
.collect::<Vec<_>>()
{
t.join().unwrap();
}
let mem_ptr = arena.mem.as_ptr();
for (offset, expect) in results.lock().unwrap().drain(..) {
// compare result and expect byte by byte
unsafe {
let ptr = mem_ptr.add(offset) as *mut u8;
for (i, b) in expect.iter().enumerate() {
let inmem_b = ptr.add(i);
assert_eq!(*inmem_b, *b);
}
}
}
}
#[test]
fn test_get() {
let arena = new_default_arena();
let input = vec![1u8, 2u8, 3u8, 4u8, 5u8];
let start = arena.alloc_bytes(&Slice::from(input.as_slice()));
let result = arena.get(start as usize, 5);
for (b1, b2) in input.iter().zip(result.to_slice()) {
assert_eq!(*b1, *b2);
}
}
#[test]
fn test_memory_used() {
let arena = new_default_arena(); | arena.alloc_node(MAX_HEIGHT); // 152
arena.alloc_node(1); // 64
arena.alloc_bytes(&Slice::from(vec![1u8, 2u8, 3u8, 4u8].as_slice())); // 4
assert_eq!(152 + 64 + 4, arena.memory_used())
} | random_line_split | |
arena.rs | skiplist::{Node, MAX_HEIGHT, MAX_NODE_SIZE};
pub trait Arena {
/// Allocate memory for a node by given height.
/// This method allocates a Node size + height * ptr ( u64 ) memory area.
// TODO: define the potential errors and return Result<Error, *mut Node> instead of raw pointer
fn alloc_node(&self, height: usize) -> *mut Node;
/// Copy bytes data of the Slice into arena directly and return the starting offset
fn alloc_bytes(&self, data: &Slice) -> u32;
/// Get in memory arena bytes as Slice from start point to start + offset
fn get(&self, offset: usize, count: usize) -> Slice;
/// Return bool to indicate whether there is enough room for given size
/// If false, use a new arena for allocating and flush the old.
fn has_room_for(&self, size: usize) -> bool;
/// Return the size of memory that allocated
fn size(&self) -> usize;
/// Return the size of memory that has been allocated.
fn memory_used(&self) -> usize;
}
// TODO: implement CommonArena: https://github.com/google/leveldb/blob/master/util/arena.cc
/// AggressiveArena is a memory pool for allocating and handling Node memory dynamically.
/// Unlike CommonArena, this simplify the memory handling by aggressively pre-allocating
/// the total fixed memory so it's caller's responsibility to ensure the room before allocating.
pub struct AggressiveArena {
// indicates that how many memories has been allocated actually
pub(super) offset: AtomicUsize,
pub(super) mem: Vec<u8>,
}
impl AggressiveArena {
/// Create an AggressiveArena with given cap.
/// This function will allocate a cap size memory block directly for further usage
pub fn new(cap: usize) -> AggressiveArena {
AggressiveArena {
offset: AtomicUsize::new(0),
mem: Vec::<u8>::with_capacity(cap),
}
}
/// For test
pub(super) fn display_all(&self) -> Vec<u8> {
let mut result = Vec::with_capacity(self.mem.capacity());
unsafe {
let ptr = self.mem.as_ptr();
for i in 0..self.offset.load(Ordering::Acquire) {
let p = ptr.add(i) as *mut u8;
result.push(*p)
}
}
result
}
}
impl Arena for AggressiveArena {
fn alloc_node(&self, height: usize) -> *mut Node {
let ptr_size = mem::size_of::<*mut u8>();
// truncate node size to reduce waste
let used_node_size = MAX_NODE_SIZE - (MAX_HEIGHT - height) * ptr_size;
let n = self.offset.fetch_add(used_node_size, Ordering::SeqCst);
unsafe {
let node_ptr = self.mem.as_ptr().add(n) as *mut u8;
// get the actually to-be-used memory of node and spilt it into 2 parts:
// node part: the Node struct
// nexts part: the pre allocated memory used by elements of next_nodes
let (node_part, nexts_part) = slice::from_raw_parts_mut(node_ptr, used_node_size)
.split_at_mut(used_node_size - height * ptr_size);
#[allow(clippy::cast_ptr_alignment)]
let node = node_part.as_mut_ptr() as *mut Node;
// FIXME: Box::from_raw can be unsafe when releasing memory
#[allow(clippy::cast_ptr_alignment)]
let next_nodes = Box::from_raw(slice::from_raw_parts_mut(
nexts_part.as_mut_ptr() as *mut AtomicPtr<Node>,
height,
));
(*node).height = height;
(*node).next_nodes = next_nodes;
node
}
}
fn alloc_bytes(&self, data: &Slice) -> u32 {
let start = self.offset.fetch_add(data.size(), Ordering::SeqCst);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *mut u8;
for (i, b) in data.to_slice().iter().enumerate() {
let p = ptr.add(i) as *mut u8;
(*p) = *b;
}
}
start as u32
}
fn get(&self, start: usize, count: usize) -> Slice {
let o = self.offset.load(Ordering::Acquire);
invarint!(
start + count <= o,
"[arena] try to get data from [{}] to [{}] but max count is [{}]",
start,
start + count,
o,
);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *const u8;
Slice::new(ptr, count)
}
}
#[inline]
fn has_room_for(&self, size: usize) -> bool {
self.size() - self.memory_used() >= size
}
#[inline]
fn size(&self) -> usize {
self.mem.capacity()
}
#[inline]
fn memory_used(&self) -> usize {
self.offset.load(Ordering::Acquire)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Arc, Mutex};
use std::thread;
fn new_default_arena() -> AggressiveArena {
AggressiveArena::new(64 << 20)
}
#[test]
fn test_new_arena() {
let cap = 200;
let arena = AggressiveArena::new(cap);
assert_eq!(arena.memory_used(), 0);
assert_eq!(arena.size(), cap);
}
#[test]
fn test_alloc_single_node() {
let arena = new_default_arena();
let node = arena.alloc_node(MAX_HEIGHT);
unsafe {
assert_eq!((*node).height, MAX_HEIGHT);
assert_eq!((*node).next_nodes.len(), MAX_HEIGHT);
assert_eq!((*node).key_size, 0);
assert_eq!((*node).key_offset, 0);
assert_eq!((*node).value_size, 0);
assert_eq!((*node).value_offset, 0);
// dereference and assigning should work
let u8_ptr = node as *mut u8;
(*node).key_offset = 1;
let key_offset_ptr = u8_ptr.add(0);
assert_eq!(*key_offset_ptr, 1);
(*node).key_size = 2;
let key_size_ptr = u8_ptr.add(8);
assert_eq!(*key_size_ptr, 2);
(*node).value_offset = 3;
let value_offset_ptr = u8_ptr.add(16);
assert_eq!(*value_offset_ptr, 3);
(*node).value_size = 4;
let value_size_ptr = u8_ptr.add(24);
assert_eq!(*value_size_ptr, 4);
// the value of data ptr in 'next_nodes' slice must be the beginning pointer of first element
let next_nodes_ptr = u8_ptr
.add(mem::size_of::<Node>() - mem::size_of::<Box<[AtomicPtr<Node>]>>())
as *mut u64;
let first_element_ptr = u8_ptr.add(mem::size_of::<Node>());
assert_eq!(
"0x".to_owned() + &format!("{:x}", *next_nodes_ptr),
format!("{:?}", first_element_ptr)
);
}
}
#[test]
fn test_alloc_nodes() {
let arena = new_default_arena();
let node1 = arena.alloc_node(4);
let node2 = arena.alloc_node(MAX_HEIGHT);
unsafe {
// node1 and node2 should be neighbor in memory
let struct_tail = node1.add(1) as *mut *mut Node;
let nexts_tail = struct_tail.add(4);
assert_eq!(nexts_tail as *mut Node, node2);
};
}
#[test]
fn test_simple_alloc_bytes() {
let mut arena = AggressiveArena::new(100);
let input = vec![1u8, 2u8, 3u8, 4u8, 5u8];
let offset = arena.alloc_bytes(&Slice::from(&input));
unsafe {
let ptr = arena.mem.as_mut_ptr().add(offset as usize) as *mut u8;
for (i, b) in input.clone().iter().enumerate() {
let p = ptr.add(i);
assert_eq!(*p, *b);
}
}
}
#[test]
fn | () {
let arena = Arc::new(AggressiveArena::new(500));
let results = Arc::new(Mutex::new(vec![]));
let mut tests = vec![vec![1u8, 2, 3, 4, 5], vec![6u8, 7, 8, 9], vec![10u8, 11]];
for t in tests
.drain(..)
.map(|test| {
let cloned_arena = arena.clone();
let cloned_results = results.clone();
thread::spawn(move || {
let offset = cloned_arena.alloc_bytes(&Slice::from(test.as_slice())) as usize;
// start position in arena, origin test data
cloned | test_alloc_bytes_concurrency | identifier_name |
arena.rs | skiplist::{Node, MAX_HEIGHT, MAX_NODE_SIZE};
pub trait Arena {
/// Allocate memory for a node by given height.
/// This method allocates a Node size + height * ptr ( u64 ) memory area.
// TODO: define the potential errors and return Result<Error, *mut Node> instead of raw pointer
fn alloc_node(&self, height: usize) -> *mut Node;
/// Copy bytes data of the Slice into arena directly and return the starting offset
fn alloc_bytes(&self, data: &Slice) -> u32;
/// Get in memory arena bytes as Slice from start point to start + offset
fn get(&self, offset: usize, count: usize) -> Slice;
/// Return bool to indicate whether there is enough room for given size
/// If false, use a new arena for allocating and flush the old.
fn has_room_for(&self, size: usize) -> bool;
/// Return the size of memory that allocated
fn size(&self) -> usize;
/// Return the size of memory that has been allocated.
fn memory_used(&self) -> usize;
}
// TODO: implement CommonArena: https://github.com/google/leveldb/blob/master/util/arena.cc
/// AggressiveArena is a memory pool for allocating and handling Node memory dynamically.
/// Unlike CommonArena, this simplify the memory handling by aggressively pre-allocating
/// the total fixed memory so it's caller's responsibility to ensure the room before allocating.
pub struct AggressiveArena {
// indicates that how many memories has been allocated actually
pub(super) offset: AtomicUsize,
pub(super) mem: Vec<u8>,
}
impl AggressiveArena {
/// Create an AggressiveArena with given cap.
/// This function will allocate a cap size memory block directly for further usage
pub fn new(cap: usize) -> AggressiveArena {
AggressiveArena {
offset: AtomicUsize::new(0),
mem: Vec::<u8>::with_capacity(cap),
}
}
/// For test
pub(super) fn display_all(&self) -> Vec<u8> {
let mut result = Vec::with_capacity(self.mem.capacity());
unsafe {
let ptr = self.mem.as_ptr();
for i in 0..self.offset.load(Ordering::Acquire) {
let p = ptr.add(i) as *mut u8;
result.push(*p)
}
}
result
}
}
impl Arena for AggressiveArena {
fn alloc_node(&self, height: usize) -> *mut Node | (*node).height = height;
(*node).next_nodes = next_nodes;
node
}
}
fn alloc_bytes(&self, data: &Slice) -> u32 {
let start = self.offset.fetch_add(data.size(), Ordering::SeqCst);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *mut u8;
for (i, b) in data.to_slice().iter().enumerate() {
let p = ptr.add(i) as *mut u8;
(*p) = *b;
}
}
start as u32
}
fn get(&self, start: usize, count: usize) -> Slice {
let o = self.offset.load(Ordering::Acquire);
invarint!(
start + count <= o,
"[arena] try to get data from [{}] to [{}] but max count is [{}]",
start,
start + count,
o,
);
unsafe {
let ptr = self.mem.as_ptr().add(start) as *const u8;
Slice::new(ptr, count)
}
}
#[inline]
fn has_room_for(&self, size: usize) -> bool {
self.size() - self.memory_used() >= size
}
#[inline]
fn size(&self) -> usize {
self.mem.capacity()
}
#[inline]
fn memory_used(&self) -> usize {
self.offset.load(Ordering::Acquire)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Arc, Mutex};
use std::thread;
fn new_default_arena() -> AggressiveArena {
AggressiveArena::new(64 << 20)
}
#[test]
fn test_new_arena() {
let cap = 200;
let arena = AggressiveArena::new(cap);
assert_eq!(arena.memory_used(), 0);
assert_eq!(arena.size(), cap);
}
#[test]
fn test_alloc_single_node() {
let arena = new_default_arena();
let node = arena.alloc_node(MAX_HEIGHT);
unsafe {
assert_eq!((*node).height, MAX_HEIGHT);
assert_eq!((*node).next_nodes.len(), MAX_HEIGHT);
assert_eq!((*node).key_size, 0);
assert_eq!((*node).key_offset, 0);
assert_eq!((*node).value_size, 0);
assert_eq!((*node).value_offset, 0);
// dereference and assigning should work
let u8_ptr = node as *mut u8;
(*node).key_offset = 1;
let key_offset_ptr = u8_ptr.add(0);
assert_eq!(*key_offset_ptr, 1);
(*node).key_size = 2;
let key_size_ptr = u8_ptr.add(8);
assert_eq!(*key_size_ptr, 2);
(*node).value_offset = 3;
let value_offset_ptr = u8_ptr.add(16);
assert_eq!(*value_offset_ptr, 3);
(*node).value_size = 4;
let value_size_ptr = u8_ptr.add(24);
assert_eq!(*value_size_ptr, 4);
// the value of data ptr in 'next_nodes' slice must be the beginning pointer of first element
let next_nodes_ptr = u8_ptr
.add(mem::size_of::<Node>() - mem::size_of::<Box<[AtomicPtr<Node>]>>())
as *mut u64;
let first_element_ptr = u8_ptr.add(mem::size_of::<Node>());
assert_eq!(
"0x".to_owned() + &format!("{:x}", *next_nodes_ptr),
format!("{:?}", first_element_ptr)
);
}
}
#[test]
fn test_alloc_nodes() {
let arena = new_default_arena();
let node1 = arena.alloc_node(4);
let node2 = arena.alloc_node(MAX_HEIGHT);
unsafe {
// node1 and node2 should be neighbor in memory
let struct_tail = node1.add(1) as *mut *mut Node;
let nexts_tail = struct_tail.add(4);
assert_eq!(nexts_tail as *mut Node, node2);
};
}
#[test]
fn test_simple_alloc_bytes() {
let mut arena = AggressiveArena::new(100);
let input = vec![1u8, 2u8, 3u8, 4u8, 5u8];
let offset = arena.alloc_bytes(&Slice::from(&input));
unsafe {
let ptr = arena.mem.as_mut_ptr().add(offset as usize) as *mut u8;
for (i, b) in input.clone().iter().enumerate() {
let p = ptr.add(i);
assert_eq!(*p, *b);
}
}
}
#[test]
fn test_alloc_bytes_concurrency() {
let arena = Arc::new(AggressiveArena::new(500));
let results = Arc::new(Mutex::new(vec![]));
let mut tests = vec![vec![1u8, 2, 3, 4, 5], vec![6u8, 7, 8, 9], vec![10u8, 11]];
for t in tests
.drain(..)
.map(|test| {
let cloned_arena = arena.clone();
let cloned_results = results.clone();
thread::spawn(move || {
let offset = cloned_arena.alloc_bytes(&Slice::from(test.as_slice())) as usize;
// start position in arena, origin test data
cloned | {
let ptr_size = mem::size_of::<*mut u8>();
// truncate node size to reduce waste
let used_node_size = MAX_NODE_SIZE - (MAX_HEIGHT - height) * ptr_size;
let n = self.offset.fetch_add(used_node_size, Ordering::SeqCst);
unsafe {
let node_ptr = self.mem.as_ptr().add(n) as *mut u8;
// get the actually to-be-used memory of node and spilt it into 2 parts:
// node part: the Node struct
// nexts part: the pre allocated memory used by elements of next_nodes
let (node_part, nexts_part) = slice::from_raw_parts_mut(node_ptr, used_node_size)
.split_at_mut(used_node_size - height * ptr_size);
#[allow(clippy::cast_ptr_alignment)]
let node = node_part.as_mut_ptr() as *mut Node;
// FIXME: Box::from_raw can be unsafe when releasing memory
#[allow(clippy::cast_ptr_alignment)]
let next_nodes = Box::from_raw(slice::from_raw_parts_mut(
nexts_part.as_mut_ptr() as *mut AtomicPtr<Node>,
height,
)); | identifier_body |
goes.py | 18"),
9: TimeRange("1997-01-01", "1998-09-08"),
10: TimeRange("1998-07-10", "2009-12-01"),
11: TimeRange("2006-06-20", "2008-02-15"),
12: TimeRange("2002-12-13", "2007-05-08"),
13: TimeRange("2006-08-01", "2006-08-01"),
14: TimeRange("2009-12-02", "2010-10-04"),
15: TimeRange("2010-09-01", parse_time("now")),
}
results = []
for sat_num in goes_operational:
if date in goes_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
|
def _get_time_for_url(self, urls):
times = []
for uri in urls:
uripath = urlsplit(uri).path
# Extract the yymmdd or yyyymmdd timestamp
datestamp = os.path.splitext(os.path.split(uripath)[1])[0][4:]
# 1999-01-15 as an integer.
if int(datestamp) <= 990115:
start = Time.strptime(datestamp, "%y%m%d")
else:
start = Time.strptime(datestamp, "%Y%m%d")
almost_day = TimeDelta(1 * u.day - 1 * u.millisecond)
times.append(TimeRange(start, start + almost_day))
return times
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns a URL to the GOES data for the specified date.
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
timerange = TimeRange(timerange.start.strftime('%Y-%m-%d'), timerange.end)
if timerange.end < parse_time("1999/01/15"):
goes_file = "%Y/go{satellitenumber:02d}%y%m%d.fits"
elif timerange.start < parse_time("1999/01/15") and timerange.end >= parse_time("1999/01/15"):
return self._get_overlap_urls(timerange)
else:
goes_file = "%Y/go{satellitenumber}%Y%m%d.fits"
goes_pattern = f"https://umbra.nascom.nasa.gov/goes/fits/{goes_file}"
satellitenumber = kwargs.get("satellitenumber", self._get_goes_sat_num(timerange.start))
goes_files = Scraper(goes_pattern, satellitenumber=satellitenumber)
return goes_files.filelist(timerange)
def _get_overlap_urls(self, timerange):
"""
Return a list of URLs over timerange when the URL path changed format `%Y` to `%y`
on the date 1999/01/15
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
tr_before = TimeRange(timerange.start, parse_time("1999/01/14"))
tr_after = TimeRange(parse_time("1999/01/15"), timerange.end)
urls_before = self._get_url_for_timerange(tr_before)
urls_after = self._get_url_for_timerange(tr_after)
return urls_before + urls_after
def _makeimap(self):
"""
Helper function used to hold information about source.
"""
self.map_["source"] = "nasa"
self.map_["instrument"] = "goes"
self.map_["physobs"] = "irradiance"
self.map_["provider"] = "sdac"
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : list of query objects
Returns
-------
boolean
answer as to whether client can service the query
"""
chkattr = ["Time", "Instrument", "SatelliteNumber"]
chklist = [x.__class__.__name__ in chkattr for x in query]
for x in query:
if x.__class__.__name__ == "Instrument" and x.value.lower() in (
"xrs",
"goes",
):
return all(chklist)
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
adict = {attrs.Instrument: [
("GOES", "The Geostationary Operational Environmental Satellite Program."),
("XRS", "GOES X-ray Flux")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
class SUVIClient(GenericClient):
"""
Provides access to data from the GOES Solar Ultraviolet Imager (SUVI).
SUVI data are provided by NOAA at the following url
https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/
The SUVI instrument was first included on GOES-16. It produces level-1b as
well as level-2 data products. Level-2 data products are a weighted average
of level-1b product files and therefore provide higher imaging dynamic
range than individual images. The exposure time of level 1b images range
from 1 s to 0.005 s. SUVI supports the following wavelengths;
94, 131, 171, 195, 284, 304 angstrom. If no wavelength is specified, images
from all wavelengths are returned.
Note
----
GOES-16 began providing regular level-1b data on 2018-06-01. At the time
of writing, SUVI on GOES-17 is operational but currently does not provide
Level-2 data.
"""
@add_common_docstring(**_variables_for_parse_time_docstring())
def _get_goes_sat_num(self, date):
"""
Determines the best satellite number for a given date.
Parameters
----------
date : {parse_time_types}
The date to determine which satellite is active.
Note
----
At the time this function was written.
GOES-17 is operational but currently does not provide Level 2 data therefore it is never returned.
The GOES-16 start date is based on the availability of regular level 1b data.
"""
# GOES-17 is operational but currently does not provide Level 2 data
# GOES-16 start date is based on the availability of regular level 1b data
suvi_operational = {
16: TimeRange("2018-06-01", parse_time("now")),
}
results = []
for sat_num in suvi_operational:
if date in suvi_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(f"No operational SUVI instrument on {date.strftime(TIME_FORMAT)}")
def _get_time_for_url(self, urls):
these_timeranges = []
for this_url in urls:
if this_url.count('/l2/') > 0: # this is a level 2 data file
start_time = parse_time(os.path.basename(this_url).split('_s')[2].split('Z')[0])
end_time = parse_time(os.path.basename(this_url).split('_e')[1].split('Z')[0])
these_timeranges.append(TimeRange(start_time, end_time))
if this_url.count('/l1b/') > 0: # this is a level 1b data file
start_time = datetime.strptime(os.path.basename(this_url).split('_s')[
| raise ValueError(
"No operational GOES satellites on {}".format(
date.strftime(TIME_FORMAT)
)
) | conditional_block |
goes.py | 8: TimeRange("1996-03-21", "2003-06-18"),
9: TimeRange("1997-01-01", "1998-09-08"),
10: TimeRange("1998-07-10", "2009-12-01"),
11: TimeRange("2006-06-20", "2008-02-15"),
12: TimeRange("2002-12-13", "2007-05-08"),
13: TimeRange("2006-08-01", "2006-08-01"),
14: TimeRange("2009-12-02", "2010-10-04"),
15: TimeRange("2010-09-01", parse_time("now")),
}
results = []
for sat_num in goes_operational:
if date in goes_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(
"No operational GOES satellites on {}".format(
date.strftime(TIME_FORMAT)
)
)
def _get_time_for_url(self, urls):
times = []
for uri in urls:
uripath = urlsplit(uri).path
# Extract the yymmdd or yyyymmdd timestamp
datestamp = os.path.splitext(os.path.split(uripath)[1])[0][4:]
# 1999-01-15 as an integer.
if int(datestamp) <= 990115:
start = Time.strptime(datestamp, "%y%m%d")
else:
start = Time.strptime(datestamp, "%Y%m%d")
almost_day = TimeDelta(1 * u.day - 1 * u.millisecond)
times.append(TimeRange(start, start + almost_day))
return times
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns a URL to the GOES data for the specified date.
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
timerange = TimeRange(timerange.start.strftime('%Y-%m-%d'), timerange.end)
if timerange.end < parse_time("1999/01/15"):
goes_file = "%Y/go{satellitenumber:02d}%y%m%d.fits"
elif timerange.start < parse_time("1999/01/15") and timerange.end >= parse_time("1999/01/15"):
return self._get_overlap_urls(timerange)
else:
goes_file = "%Y/go{satellitenumber}%Y%m%d.fits"
goes_pattern = f"https://umbra.nascom.nasa.gov/goes/fits/{goes_file}"
satellitenumber = kwargs.get("satellitenumber", self._get_goes_sat_num(timerange.start))
goes_files = Scraper(goes_pattern, satellitenumber=satellitenumber)
return goes_files.filelist(timerange)
def _get_overlap_urls(self, timerange):
"""
Return a list of URLs over timerange when the URL path changed format `%Y` to `%y`
on the date 1999/01/15
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
tr_before = TimeRange(timerange.start, parse_time("1999/01/14"))
tr_after = TimeRange(parse_time("1999/01/15"), timerange.end)
urls_before = self._get_url_for_timerange(tr_before)
urls_after = self._get_url_for_timerange(tr_after)
return urls_before + urls_after
def _makeimap(self):
"""
Helper function used to hold information about source.
"""
self.map_["source"] = "nasa"
self.map_["instrument"] = "goes"
self.map_["physobs"] = "irradiance"
self.map_["provider"] = "sdac"
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : list of query objects
Returns
-------
boolean
answer as to whether client can service the query
"""
chkattr = ["Time", "Instrument", "SatelliteNumber"]
chklist = [x.__class__.__name__ in chkattr for x in query]
for x in query:
if x.__class__.__name__ == "Instrument" and x.value.lower() in (
"xrs",
"goes",
):
return all(chklist)
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
adict = {attrs.Instrument: [
("GOES", "The Geostationary Operational Environmental Satellite Program."),
("XRS", "GOES X-ray Flux")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
class SUVIClient(GenericClient):
"""
Provides access to data from the GOES Solar Ultraviolet Imager (SUVI).
SUVI data are provided by NOAA at the following url
https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/
The SUVI instrument was first included on GOES-16. It produces level-1b as
well as level-2 data products. Level-2 data products are a weighted average
of level-1b product files and therefore provide higher imaging dynamic
range than individual images. The exposure time of level 1b images range
from 1 s to 0.005 s. SUVI supports the following wavelengths;
94, 131, 171, 195, 284, 304 angstrom. If no wavelength is specified, images
from all wavelengths are returned.
Note
----
GOES-16 began providing regular level-1b data on 2018-06-01. At the time
of writing, SUVI on GOES-17 is operational but currently does not provide
Level-2 data.
"""
@add_common_docstring(**_variables_for_parse_time_docstring())
def _get_goes_sat_num(self, date):
"""
Determines the best satellite number for a given date.
Parameters
----------
date : {parse_time_types}
The date to determine which satellite is active.
Note
----
At the time this function was written.
GOES-17 is operational but currently does not provide Level 2 data therefore it is never returned.
The GOES-16 start date is based on the availability of regular level 1b data.
"""
# GOES-17 is operational but currently does not provide Level 2 data
# GOES-16 start date is based on the availability of regular level 1b data
suvi_operational = {
16: TimeRange("2018-06-01", parse_time("now")),
}
results = []
for sat_num in suvi_operational:
if date in suvi_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(f"No operational SUVI instrument on {date.strftime(TIME_FORMAT)}")
def _get_time_for_url(self, urls):
these_timeranges = []
for this_url in urls:
if this_url.count('/l2/') > 0: # this is a level 2 data file
start_time = parse_time(os.path.basename(this_url).split('_s')[2].split | 6: TimeRange("1983-06-01", "1994-08-18"),
7: TimeRange("1994-01-01", "1996-08-13"), | random_line_split | |
goes.py | -18"),
9: TimeRange("1997-01-01", "1998-09-08"),
10: TimeRange("1998-07-10", "2009-12-01"),
11: TimeRange("2006-06-20", "2008-02-15"),
12: TimeRange("2002-12-13", "2007-05-08"),
13: TimeRange("2006-08-01", "2006-08-01"),
14: TimeRange("2009-12-02", "2010-10-04"),
15: TimeRange("2010-09-01", parse_time("now")),
}
results = []
for sat_num in goes_operational:
if date in goes_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(
"No operational GOES satellites on {}".format(
date.strftime(TIME_FORMAT)
)
)
def _get_time_for_url(self, urls):
times = []
for uri in urls:
uripath = urlsplit(uri).path
# Extract the yymmdd or yyyymmdd timestamp
datestamp = os.path.splitext(os.path.split(uripath)[1])[0][4:]
# 1999-01-15 as an integer.
if int(datestamp) <= 990115:
start = Time.strptime(datestamp, "%y%m%d")
else:
start = Time.strptime(datestamp, "%Y%m%d")
almost_day = TimeDelta(1 * u.day - 1 * u.millisecond)
times.append(TimeRange(start, start + almost_day))
return times
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns a URL to the GOES data for the specified date.
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
timerange = TimeRange(timerange.start.strftime('%Y-%m-%d'), timerange.end)
if timerange.end < parse_time("1999/01/15"):
goes_file = "%Y/go{satellitenumber:02d}%y%m%d.fits"
elif timerange.start < parse_time("1999/01/15") and timerange.end >= parse_time("1999/01/15"):
return self._get_overlap_urls(timerange)
else:
goes_file = "%Y/go{satellitenumber}%Y%m%d.fits"
goes_pattern = f"https://umbra.nascom.nasa.gov/goes/fits/{goes_file}"
satellitenumber = kwargs.get("satellitenumber", self._get_goes_sat_num(timerange.start))
goes_files = Scraper(goes_pattern, satellitenumber=satellitenumber)
return goes_files.filelist(timerange)
def _get_overlap_urls(self, timerange):
"""
Return a list of URLs over timerange when the URL path changed format `%Y` to `%y`
on the date 1999/01/15
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
tr_before = TimeRange(timerange.start, parse_time("1999/01/14"))
tr_after = TimeRange(parse_time("1999/01/15"), timerange.end)
urls_before = self._get_url_for_timerange(tr_before)
urls_after = self._get_url_for_timerange(tr_after)
return urls_before + urls_after
def _makeimap(self):
"""
Helper function used to hold information about source.
"""
self.map_["source"] = "nasa"
self.map_["instrument"] = "goes"
self.map_["physobs"] = "irradiance"
self.map_["provider"] = "sdac"
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : list of query objects
Returns
-------
boolean
answer as to whether client can service the query
"""
chkattr = ["Time", "Instrument", "SatelliteNumber"]
chklist = [x.__class__.__name__ in chkattr for x in query]
for x in query:
if x.__class__.__name__ == "Instrument" and x.value.lower() in (
"xrs",
"goes",
):
return all(chklist)
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
adict = {attrs.Instrument: [
("GOES", "The Geostationary Operational Environmental Satellite Program."),
("XRS", "GOES X-ray Flux")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
class SUVIClient(GenericClient):
"""
Provides access to data from the GOES Solar Ultraviolet Imager (SUVI).
SUVI data are provided by NOAA at the following url
https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/
The SUVI instrument was first included on GOES-16. It produces level-1b as
well as level-2 data products. Level-2 data products are a weighted average
of level-1b product files and therefore provide higher imaging dynamic
range than individual images. The exposure time of level 1b images range
from 1 s to 0.005 s. SUVI supports the following wavelengths;
94, 131, 171, 195, 284, 304 angstrom. If no wavelength is specified, images
from all wavelengths are returned.
Note
----
GOES-16 began providing regular level-1b data on 2018-06-01. At the time
of writing, SUVI on GOES-17 is operational but currently does not provide
Level-2 data.
"""
@add_common_docstring(**_variables_for_parse_time_docstring())
def _get_goes_sat_num(self, date):
"""
Determines the best satellite number for a given date.
Parameters
----------
date : {parse_time_types}
The date to determine which satellite is active.
Note
----
At the time this function was written.
GOES-17 is operational but currently does not provide Level 2 data therefore it is never returned.
The GOES-16 start date is based on the availability of regular level 1b data.
"""
# GOES-17 is operational but currently does not provide Level 2 data
# GOES-16 start date is based on the availability of regular level 1b data
suvi_operational = {
16: TimeRange("2018-06-01", parse_time("now")),
}
results = []
for sat_num in suvi_operational:
if date in suvi_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(f"No operational SUVI instrument on {date.strftime(TIME_FORMAT)}")
def | (self, urls):
these_timeranges = []
for this_url in urls:
if this_url.count('/l2/') > 0: # this is a level 2 data file
start_time = parse_time(os.path.basename(this_url).split('_s')[2].split('Z')[0])
end_time = parse_time(os.path.basename(this_url).split('_e')[1].split('Z')[0])
these_timeranges.append(TimeRange(start_time, end_time))
if this_url.count('/l1b/') > 0: # this is a level 1b data file
start_time = datetime.strptime(os.path.basename(this_url).split('_s')[
| _get_time_for_url | identifier_name |
goes.py | 6-06-20", "2008-02-15"),
12: TimeRange("2002-12-13", "2007-05-08"),
13: TimeRange("2006-08-01", "2006-08-01"),
14: TimeRange("2009-12-02", "2010-10-04"),
15: TimeRange("2010-09-01", parse_time("now")),
}
results = []
for sat_num in goes_operational:
if date in goes_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(
"No operational GOES satellites on {}".format(
date.strftime(TIME_FORMAT)
)
)
def _get_time_for_url(self, urls):
times = []
for uri in urls:
uripath = urlsplit(uri).path
# Extract the yymmdd or yyyymmdd timestamp
datestamp = os.path.splitext(os.path.split(uripath)[1])[0][4:]
# 1999-01-15 as an integer.
if int(datestamp) <= 990115:
start = Time.strptime(datestamp, "%y%m%d")
else:
start = Time.strptime(datestamp, "%Y%m%d")
almost_day = TimeDelta(1 * u.day - 1 * u.millisecond)
times.append(TimeRange(start, start + almost_day))
return times
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns a URL to the GOES data for the specified date.
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
timerange = TimeRange(timerange.start.strftime('%Y-%m-%d'), timerange.end)
if timerange.end < parse_time("1999/01/15"):
goes_file = "%Y/go{satellitenumber:02d}%y%m%d.fits"
elif timerange.start < parse_time("1999/01/15") and timerange.end >= parse_time("1999/01/15"):
return self._get_overlap_urls(timerange)
else:
goes_file = "%Y/go{satellitenumber}%Y%m%d.fits"
goes_pattern = f"https://umbra.nascom.nasa.gov/goes/fits/{goes_file}"
satellitenumber = kwargs.get("satellitenumber", self._get_goes_sat_num(timerange.start))
goes_files = Scraper(goes_pattern, satellitenumber=satellitenumber)
return goes_files.filelist(timerange)
def _get_overlap_urls(self, timerange):
"""
Return a list of URLs over timerange when the URL path changed format `%Y` to `%y`
on the date 1999/01/15
Parameters
----------
timerange : `~sunpy.time.TimeRange`
The time range you want the files for.
Returns
-------
`list`
The URL(s) for the corresponding timerange.
"""
tr_before = TimeRange(timerange.start, parse_time("1999/01/14"))
tr_after = TimeRange(parse_time("1999/01/15"), timerange.end)
urls_before = self._get_url_for_timerange(tr_before)
urls_after = self._get_url_for_timerange(tr_after)
return urls_before + urls_after
def _makeimap(self):
"""
Helper function used to hold information about source.
"""
self.map_["source"] = "nasa"
self.map_["instrument"] = "goes"
self.map_["physobs"] = "irradiance"
self.map_["provider"] = "sdac"
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : list of query objects
Returns
-------
boolean
answer as to whether client can service the query
"""
chkattr = ["Time", "Instrument", "SatelliteNumber"]
chklist = [x.__class__.__name__ in chkattr for x in query]
for x in query:
if x.__class__.__name__ == "Instrument" and x.value.lower() in (
"xrs",
"goes",
):
return all(chklist)
return False
@classmethod
def _attrs_module(cls):
return 'goes', 'sunpy.net.dataretriever.attrs.goes'
@classmethod
def register_values(cls):
from sunpy.net import attrs
goes_number = [2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
adict = {attrs.Instrument: [
("GOES", "The Geostationary Operational Environmental Satellite Program."),
("XRS", "GOES X-ray Flux")],
attrs.goes.SatelliteNumber: [(str(x), f"GOES Satellite Number {x}") for x in goes_number]}
return adict
class SUVIClient(GenericClient):
"""
Provides access to data from the GOES Solar Ultraviolet Imager (SUVI).
SUVI data are provided by NOAA at the following url
https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/
The SUVI instrument was first included on GOES-16. It produces level-1b as
well as level-2 data products. Level-2 data products are a weighted average
of level-1b product files and therefore provide higher imaging dynamic
range than individual images. The exposure time of level 1b images range
from 1 s to 0.005 s. SUVI supports the following wavelengths;
94, 131, 171, 195, 284, 304 angstrom. If no wavelength is specified, images
from all wavelengths are returned.
Note
----
GOES-16 began providing regular level-1b data on 2018-06-01. At the time
of writing, SUVI on GOES-17 is operational but currently does not provide
Level-2 data.
"""
@add_common_docstring(**_variables_for_parse_time_docstring())
def _get_goes_sat_num(self, date):
"""
Determines the best satellite number for a given date.
Parameters
----------
date : {parse_time_types}
The date to determine which satellite is active.
Note
----
At the time this function was written.
GOES-17 is operational but currently does not provide Level 2 data therefore it is never returned.
The GOES-16 start date is based on the availability of regular level 1b data.
"""
# GOES-17 is operational but currently does not provide Level 2 data
# GOES-16 start date is based on the availability of regular level 1b data
suvi_operational = {
16: TimeRange("2018-06-01", parse_time("now")),
}
results = []
for sat_num in suvi_operational:
if date in suvi_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError(f"No operational SUVI instrument on {date.strftime(TIME_FORMAT)}")
def _get_time_for_url(self, urls):
| these_timeranges = []
for this_url in urls:
if this_url.count('/l2/') > 0: # this is a level 2 data file
start_time = parse_time(os.path.basename(this_url).split('_s')[2].split('Z')[0])
end_time = parse_time(os.path.basename(this_url).split('_e')[1].split('Z')[0])
these_timeranges.append(TimeRange(start_time, end_time))
if this_url.count('/l1b/') > 0: # this is a level 1b data file
start_time = datetime.strptime(os.path.basename(this_url).split('_s')[
1].split('_e')[0][:-1], '%Y%j%H%M%S')
end_time = datetime.strptime(os.path.basename(this_url).split('_e')[
1].split('_c')[0][:-1], '%Y%j%H%M%S')
these_timeranges.append(TimeRange(start_time, end_time))
return these_timeranges | identifier_body | |
writeToStore.ts | , incoming) as T;
},
variables,
varString: JSON.stringify(variables),
fragmentMap: createFragmentMap(getFragmentDefinitions(query)),
},
});
if (!isReference(ref)) {
throw new InvariantError(`Could not identify object ${JSON.stringify(result)}`);
}
// Any IDs written explicitly to the cache will be retained as
// reachable root IDs for garbage collection purposes. Although this
// logic includes root IDs like ROOT_QUERY and ROOT_MUTATION, their
// retainment counts are effectively ignored because cache.gc() always
// includes them in its root ID set.
store.retain(ref.__ref);
return ref;
}
private processSelectionSet({
dataId,
result,
selectionSet,
context,
// This object allows processSelectionSet to report useful information
// to its callers without explicitly returning that information.
mergeTree,
}: ProcessSelectionSetOptions): StoreObject | Reference {
const { policies } = this.cache;
// Identify the result object, even if dataId was already provided,
// since we always need keyObject below.
const [id, keyObject] = policies.identify(
result, selectionSet, context.fragmentMap);
// If dataId was not provided, fall back to the id just generated by
// policies.identify.
dataId = dataId || id;
if ("string" === typeof dataId) {
// Avoid processing the same entity object using the same selection
// set more than once. We use an array instead of a Set since most
// entity IDs will be written using only one selection set, so the
// size of this array is likely to be very small, meaning indexOf is
// likely to be faster than Set.prototype.has.
const sets = context.written[dataId] || (context.written[dataId] = []);
const ref = makeReference(dataId);
if (sets.indexOf(selectionSet) >= 0) return ref;
sets.push(selectionSet);
// If we're about to write a result object into the store, but we
// happen to know that the exact same (===) result object would be
// returned if we were to reread the result with the same inputs,
// then we can skip the rest of the processSelectionSet work for
// this object, and immediately return a Reference to it.
if (this.reader && this.reader.isFresh(
result,
ref,
selectionSet,
context,
)) {
return ref;
}
}
// This variable will be repeatedly updated using context.merge to
// accumulate all fields that need to be written into the store.
let incomingFields: StoreObject = Object.create(null);
// Write any key fields that were used during identification, even if
// they were not mentioned in the original query.
if (keyObject) {
incomingFields = context.merge(incomingFields, keyObject);
}
// If typename was not passed in, infer it. Note that typename is
// always passed in for tricky-to-infer cases such as "Query" for
// ROOT_QUERY.
const typename: string | undefined =
(dataId && policies.rootTypenamesById[dataId]) ||
getTypenameFromResult(result, selectionSet, context.fragmentMap) ||
(dataId && context.store.get(dataId, "__typename") as string);
if ("string" === typeof typename) {
incomingFields.__typename = typename;
}
const workSet = new Set(selectionSet.selections);
workSet.forEach(selection => {
if (!shouldInclude(selection, context.variables)) return;
if (isField(selection)) {
const resultFieldKey = resultKeyNameFromField(selection);
const value = result[resultFieldKey];
if (typeof value !== 'undefined') {
const storeFieldName = policies.getStoreFieldName({
typename,
fieldName: selection.name.value,
field: selection,
variables: context.variables,
});
const childTree = getChildMergeTree(mergeTree, storeFieldName);
let incomingValue =
this.processFieldValue(value, selection, context, childTree);
const childTypename = selection.selectionSet
&& context.store.getFieldValue<string>(incomingValue as StoreObject, "__typename")
|| void 0;
const merge = policies.getMergeFunction(
typename,
selection.name.value,
childTypename,
);
if (merge) {
childTree.info = {
// TODO Check compatibility against any existing
// childTree.field?
field: selection,
typename,
merge,
};
} else {
maybeRecycleChildMergeTree(mergeTree, storeFieldName);
}
incomingFields = context.merge(incomingFields, {
[storeFieldName]: incomingValue,
});
} else if (
policies.usingPossibleTypes &&
!hasDirectives(["defer", "client"], selection)
) {
throw new InvariantError(
`Missing field '${resultFieldKey}' in ${JSON.stringify(
result,
null,
2,
).substring(0, 100)}`,
);
}
} else {
// This is not a field, so it must be a fragment, either inline or named
const fragment = getFragmentFromSelection(
selection,
context.fragmentMap,
);
if (fragment &&
// By passing result and context.variables, we enable
// policies.fragmentMatches to bend the rules when typename is
// not a known subtype of the fragment type condition, but the
// result object contains all the keys requested by the
// fragment, which strongly suggests the fragment probably
// matched. This fuzzy matching behavior must be enabled by
// including a regular expression string (such as ".*" or
// "Prefix.*" or ".*Suffix") in the possibleTypes array for
// specific supertypes; otherwise, all matching remains exact.
// Fuzzy matches are remembered by the Policies object and
// later used when reading from the cache. Since there is no
// incoming result object to check when reading, reading does
// not involve the same fuzzy inference, so the StoreReader
// class calls policies.fragmentMatches without passing result
// or context.variables. The flexibility of fuzzy matching
// allows existing clients to accommodate previously unknown
// __typename strings produced by server/schema changes, which
// would otherwise be breaking changes.
policies.fragmentMatches(fragment, typename, result, context.variables)) {
fragment.selectionSet.selections.forEach(workSet.add, workSet);
}
}
});
if ("string" === typeof dataId) {
const entityRef = makeReference(dataId);
if (mergeTree.map.size) {
incomingFields = this.applyMerges(mergeTree, entityRef, incomingFields, context);
}
if (process.env.NODE_ENV !== "production") {
const hasSelectionSet = (storeFieldName: string) => | fieldsWithSelectionSets.has(fieldNameFromStoreName(storeFieldName));
const fieldsWithSelectionSets = new Set<string>();
workSet.forEach(selection => {
if (isField(selection) && selection.selectionSet) {
fieldsWithSelectionSets.add(selection.name.value);
}
});
const hasMergeFunction = (storeFieldName: string) => {
const childTree = mergeTree.map.get(storeFieldName);
return Boolean(childTree && childTree.info && childTree.info.merge);
};
Object.keys(incomingFields).forEach(storeFieldName => {
// If a merge function was defined for this field, trust that it
// did the right thing about (not) clobbering data. If the field
// has no selection set, it's a scalar field, so it doesn't need
// a merge function (even if it's an object, like JSON data).
if (hasSelectionSet(storeFieldName) &&
!hasMergeFunction(storeFieldName)) {
warnAboutDataLoss(
entityRef,
incomingFields,
storeFieldName,
context.store,
);
}
});
}
context.store.merge(dataId, incomingFields);
return entityRef;
}
return incomingFields;
}
private processFieldValue(
value: any,
field: FieldNode,
context: WriteContext,
mergeTree: MergeTree,
): StoreValue {
if (!field.selectionSet || value === null) {
// In development, we need to clone scalar values so that they can be
// safely frozen with maybeDeepFreeze in readFromStore.ts. In production,
// it's cheaper to store the scalar values directly in the cache.
return process.env.NODE_ENV === 'production' ? value : cloneDeep(value);
}
if (Array.isArray(value)) {
return value.map((item, i) => {
const value = this.processFieldValue(
item, field, context, getChildMergeTree(mergeTree, i));
maybeRecycleChildMergeTree(mergeTree, i);
return value;
});
}
return this.processSelectionSet({
result: value,
selectionSet: field.selectionSet,
context,
mergeTree,
});
}
private applyMerges<T extends StoreValue>(
mergeTree: MergeTree,
existing: StoreValue,
incoming: T,
context: ReadMergeModifyContext,
getStorageArgs?: Parameters<EntityStore["getStorage"]>,
): T {
if (mergeTree.map.size && !isReference(incoming)) {
const e: StoreObject | Reference | undefined = (
// Items in the same position in different arrays are | random_line_split | |
writeToStore.ts | storeFieldName]: incomingValue,
});
} else if (
policies.usingPossibleTypes &&
!hasDirectives(["defer", "client"], selection)
) {
throw new InvariantError(
`Missing field '${resultFieldKey}' in ${JSON.stringify(
result,
null,
2,
).substring(0, 100)}`,
);
}
} else {
// This is not a field, so it must be a fragment, either inline or named
const fragment = getFragmentFromSelection(
selection,
context.fragmentMap,
);
if (fragment &&
// By passing result and context.variables, we enable
// policies.fragmentMatches to bend the rules when typename is
// not a known subtype of the fragment type condition, but the
// result object contains all the keys requested by the
// fragment, which strongly suggests the fragment probably
// matched. This fuzzy matching behavior must be enabled by
// including a regular expression string (such as ".*" or
// "Prefix.*" or ".*Suffix") in the possibleTypes array for
// specific supertypes; otherwise, all matching remains exact.
// Fuzzy matches are remembered by the Policies object and
// later used when reading from the cache. Since there is no
// incoming result object to check when reading, reading does
// not involve the same fuzzy inference, so the StoreReader
// class calls policies.fragmentMatches without passing result
// or context.variables. The flexibility of fuzzy matching
// allows existing clients to accommodate previously unknown
// __typename strings produced by server/schema changes, which
// would otherwise be breaking changes.
policies.fragmentMatches(fragment, typename, result, context.variables)) {
fragment.selectionSet.selections.forEach(workSet.add, workSet);
}
}
});
if ("string" === typeof dataId) {
const entityRef = makeReference(dataId);
if (mergeTree.map.size) {
incomingFields = this.applyMerges(mergeTree, entityRef, incomingFields, context);
}
if (process.env.NODE_ENV !== "production") {
const hasSelectionSet = (storeFieldName: string) =>
fieldsWithSelectionSets.has(fieldNameFromStoreName(storeFieldName));
const fieldsWithSelectionSets = new Set<string>();
workSet.forEach(selection => {
if (isField(selection) && selection.selectionSet) {
fieldsWithSelectionSets.add(selection.name.value);
}
});
const hasMergeFunction = (storeFieldName: string) => {
const childTree = mergeTree.map.get(storeFieldName);
return Boolean(childTree && childTree.info && childTree.info.merge);
};
Object.keys(incomingFields).forEach(storeFieldName => {
// If a merge function was defined for this field, trust that it
// did the right thing about (not) clobbering data. If the field
// has no selection set, it's a scalar field, so it doesn't need
// a merge function (even if it's an object, like JSON data).
if (hasSelectionSet(storeFieldName) &&
!hasMergeFunction(storeFieldName)) {
warnAboutDataLoss(
entityRef,
incomingFields,
storeFieldName,
context.store,
);
}
});
}
context.store.merge(dataId, incomingFields);
return entityRef;
}
return incomingFields;
}
private processFieldValue(
value: any,
field: FieldNode,
context: WriteContext,
mergeTree: MergeTree,
): StoreValue {
if (!field.selectionSet || value === null) {
// In development, we need to clone scalar values so that they can be
// safely frozen with maybeDeepFreeze in readFromStore.ts. In production,
// it's cheaper to store the scalar values directly in the cache.
return process.env.NODE_ENV === 'production' ? value : cloneDeep(value);
}
if (Array.isArray(value)) {
return value.map((item, i) => {
const value = this.processFieldValue(
item, field, context, getChildMergeTree(mergeTree, i));
maybeRecycleChildMergeTree(mergeTree, i);
return value;
});
}
return this.processSelectionSet({
result: value,
selectionSet: field.selectionSet,
context,
mergeTree,
});
}
private applyMerges<T extends StoreValue>(
mergeTree: MergeTree,
existing: StoreValue,
incoming: T,
context: ReadMergeModifyContext,
getStorageArgs?: Parameters<EntityStore["getStorage"]>,
): T {
if (mergeTree.map.size && !isReference(incoming)) {
const e: StoreObject | Reference | undefined = (
// Items in the same position in different arrays are not
// necessarily related to each other, so when incoming is an array
// we process its elements as if there was no existing data.
!Array.isArray(incoming) &&
// Likewise, existing must be either a Reference or a StoreObject
// in order for its fields to be safe to merge with the fields of
// the incoming object.
(isReference(existing) || storeValueIsStoreObject(existing))
) ? existing : void 0;
// This narrowing is implied by mergeTree.map.size > 0 and
// !isReference(incoming), though TypeScript understandably cannot
// hope to infer this type.
const i = incoming as StoreObject | StoreValue[];
// The options.storage objects provided to read and merge functions
// are derived from the identity of the parent object plus a
// sequence of storeFieldName strings/numbers identifying the nested
// field name path of each field value to be merged.
if (e && !getStorageArgs) {
getStorageArgs = [isReference(e) ? e.__ref : e];
}
// It's possible that applying merge functions to this subtree will
// not change the incoming data, so this variable tracks the fields
// that did change, so we can create a new incoming object when (and
// only when) at least one incoming field has changed. We use a Map
// to preserve the type of numeric keys.
let changedFields: Map<string | number, StoreValue> | undefined;
const getValue = (
from: typeof e | typeof i,
name: string | number,
): StoreValue => {
return Array.isArray(from)
? (typeof name === "number" ? from[name] : void 0)
: context.store.getFieldValue(from, String(name))
};
mergeTree.map.forEach((childTree, storeFieldName) => {
if (getStorageArgs) {
getStorageArgs.push(storeFieldName);
}
const eVal = getValue(e, storeFieldName);
const iVal = getValue(i, storeFieldName);
const aVal = this.applyMerges(
childTree,
eVal,
iVal,
context,
getStorageArgs,
);
if (aVal !== iVal) {
changedFields = changedFields || new Map;
changedFields.set(storeFieldName, aVal);
}
if (getStorageArgs) {
invariant(getStorageArgs.pop() === storeFieldName);
}
});
if (changedFields) {
// Shallow clone i so we can add changed fields to it.
incoming = (Array.isArray(i) ? i.slice(0) : { ...i }) as T;
changedFields.forEach((value, name) => {
(incoming as any)[name] = value;
});
}
}
if (mergeTree.info) {
return this.cache.policies.runMergeFunction(
existing,
incoming,
mergeTree.info,
context,
getStorageArgs && context.store.getStorage(...getStorageArgs),
);
}
return incoming;
}
}
const emptyMergeTreePool: MergeTree[] = [];
function getChildMergeTree(
{ map }: MergeTree,
name: string | number,
): MergeTree {
if (!map.has(name)) {
map.set(name, emptyMergeTreePool.pop() || { map: new Map });
}
return map.get(name)!;
}
function maybeRecycleChildMergeTree(
{ map }: MergeTree,
name: string | number,
) {
const childTree = map.get(name);
if (childTree &&
!childTree.info &&
!childTree.map.size) {
emptyMergeTreePool.push(childTree);
map.delete(name);
}
}
const warnings = new Set<string>();
// Note that this function is unused in production, and thus should be
// pruned by any well-configured minifier.
function warnAboutDataLoss(
existingRef: Reference,
incomingObj: StoreObject,
storeFieldName: string,
store: NormalizedCache,
) | {
const getChild = (objOrRef: StoreObject | Reference): StoreObject | false => {
const child = store.getFieldValue<StoreObject>(objOrRef, storeFieldName);
return typeof child === "object" && child;
};
const existing = getChild(existingRef);
if (!existing) return;
const incoming = getChild(incomingObj);
if (!incoming) return;
// It's always safe to replace a reference, since it refers to data
// safely stored elsewhere.
if (isReference(existing)) return;
// If the values are structurally equivalent, we do not need to worry
// about incoming replacing existing.
if (equal(existing, incoming)) return;
| identifier_body | |
writeToStore.ts | ,
field: selection,
variables: context.variables,
});
const childTree = getChildMergeTree(mergeTree, storeFieldName);
let incomingValue =
this.processFieldValue(value, selection, context, childTree);
const childTypename = selection.selectionSet
&& context.store.getFieldValue<string>(incomingValue as StoreObject, "__typename")
|| void 0;
const merge = policies.getMergeFunction(
typename,
selection.name.value,
childTypename,
);
if (merge) {
childTree.info = {
// TODO Check compatibility against any existing
// childTree.field?
field: selection,
typename,
merge,
};
} else {
maybeRecycleChildMergeTree(mergeTree, storeFieldName);
}
incomingFields = context.merge(incomingFields, {
[storeFieldName]: incomingValue,
});
} else if (
policies.usingPossibleTypes &&
!hasDirectives(["defer", "client"], selection)
) {
throw new InvariantError(
`Missing field '${resultFieldKey}' in ${JSON.stringify(
result,
null,
2,
).substring(0, 100)}`,
);
}
} else {
// This is not a field, so it must be a fragment, either inline or named
const fragment = getFragmentFromSelection(
selection,
context.fragmentMap,
);
if (fragment &&
// By passing result and context.variables, we enable
// policies.fragmentMatches to bend the rules when typename is
// not a known subtype of the fragment type condition, but the
// result object contains all the keys requested by the
// fragment, which strongly suggests the fragment probably
// matched. This fuzzy matching behavior must be enabled by
// including a regular expression string (such as ".*" or
// "Prefix.*" or ".*Suffix") in the possibleTypes array for
// specific supertypes; otherwise, all matching remains exact.
// Fuzzy matches are remembered by the Policies object and
// later used when reading from the cache. Since there is no
// incoming result object to check when reading, reading does
// not involve the same fuzzy inference, so the StoreReader
// class calls policies.fragmentMatches without passing result
// or context.variables. The flexibility of fuzzy matching
// allows existing clients to accommodate previously unknown
// __typename strings produced by server/schema changes, which
// would otherwise be breaking changes.
policies.fragmentMatches(fragment, typename, result, context.variables)) {
fragment.selectionSet.selections.forEach(workSet.add, workSet);
}
}
});
if ("string" === typeof dataId) {
const entityRef = makeReference(dataId);
if (mergeTree.map.size) {
incomingFields = this.applyMerges(mergeTree, entityRef, incomingFields, context);
}
if (process.env.NODE_ENV !== "production") {
const hasSelectionSet = (storeFieldName: string) =>
fieldsWithSelectionSets.has(fieldNameFromStoreName(storeFieldName));
const fieldsWithSelectionSets = new Set<string>();
workSet.forEach(selection => {
if (isField(selection) && selection.selectionSet) {
fieldsWithSelectionSets.add(selection.name.value);
}
});
const hasMergeFunction = (storeFieldName: string) => {
const childTree = mergeTree.map.get(storeFieldName);
return Boolean(childTree && childTree.info && childTree.info.merge);
};
Object.keys(incomingFields).forEach(storeFieldName => {
// If a merge function was defined for this field, trust that it
// did the right thing about (not) clobbering data. If the field
// has no selection set, it's a scalar field, so it doesn't need
// a merge function (even if it's an object, like JSON data).
if (hasSelectionSet(storeFieldName) &&
!hasMergeFunction(storeFieldName)) {
warnAboutDataLoss(
entityRef,
incomingFields,
storeFieldName,
context.store,
);
}
});
}
context.store.merge(dataId, incomingFields);
return entityRef;
}
return incomingFields;
}
private processFieldValue(
value: any,
field: FieldNode,
context: WriteContext,
mergeTree: MergeTree,
): StoreValue {
if (!field.selectionSet || value === null) {
// In development, we need to clone scalar values so that they can be
// safely frozen with maybeDeepFreeze in readFromStore.ts. In production,
// it's cheaper to store the scalar values directly in the cache.
return process.env.NODE_ENV === 'production' ? value : cloneDeep(value);
}
if (Array.isArray(value)) {
return value.map((item, i) => {
const value = this.processFieldValue(
item, field, context, getChildMergeTree(mergeTree, i));
maybeRecycleChildMergeTree(mergeTree, i);
return value;
});
}
return this.processSelectionSet({
result: value,
selectionSet: field.selectionSet,
context,
mergeTree,
});
}
private applyMerges<T extends StoreValue>(
mergeTree: MergeTree,
existing: StoreValue,
incoming: T,
context: ReadMergeModifyContext,
getStorageArgs?: Parameters<EntityStore["getStorage"]>,
): T {
if (mergeTree.map.size && !isReference(incoming)) {
const e: StoreObject | Reference | undefined = (
// Items in the same position in different arrays are not
// necessarily related to each other, so when incoming is an array
// we process its elements as if there was no existing data.
!Array.isArray(incoming) &&
// Likewise, existing must be either a Reference or a StoreObject
// in order for its fields to be safe to merge with the fields of
// the incoming object.
(isReference(existing) || storeValueIsStoreObject(existing))
) ? existing : void 0;
// This narrowing is implied by mergeTree.map.size > 0 and
// !isReference(incoming), though TypeScript understandably cannot
// hope to infer this type.
const i = incoming as StoreObject | StoreValue[];
// The options.storage objects provided to read and merge functions
// are derived from the identity of the parent object plus a
// sequence of storeFieldName strings/numbers identifying the nested
// field name path of each field value to be merged.
if (e && !getStorageArgs) {
getStorageArgs = [isReference(e) ? e.__ref : e];
}
// It's possible that applying merge functions to this subtree will
// not change the incoming data, so this variable tracks the fields
// that did change, so we can create a new incoming object when (and
// only when) at least one incoming field has changed. We use a Map
// to preserve the type of numeric keys.
let changedFields: Map<string | number, StoreValue> | undefined;
const getValue = (
from: typeof e | typeof i,
name: string | number,
): StoreValue => {
return Array.isArray(from)
? (typeof name === "number" ? from[name] : void 0)
: context.store.getFieldValue(from, String(name))
};
mergeTree.map.forEach((childTree, storeFieldName) => {
if (getStorageArgs) {
getStorageArgs.push(storeFieldName);
}
const eVal = getValue(e, storeFieldName);
const iVal = getValue(i, storeFieldName);
const aVal = this.applyMerges(
childTree,
eVal,
iVal,
context,
getStorageArgs,
);
if (aVal !== iVal) {
changedFields = changedFields || new Map;
changedFields.set(storeFieldName, aVal);
}
if (getStorageArgs) {
invariant(getStorageArgs.pop() === storeFieldName);
}
});
if (changedFields) {
// Shallow clone i so we can add changed fields to it.
incoming = (Array.isArray(i) ? i.slice(0) : { ...i }) as T;
changedFields.forEach((value, name) => {
(incoming as any)[name] = value;
});
}
}
if (mergeTree.info) {
return this.cache.policies.runMergeFunction(
existing,
incoming,
mergeTree.info,
context,
getStorageArgs && context.store.getStorage(...getStorageArgs),
);
}
return incoming;
}
}
const emptyMergeTreePool: MergeTree[] = [];
function getChildMergeTree(
{ map }: MergeTree,
name: string | number,
): MergeTree {
if (!map.has(name)) {
map.set(name, emptyMergeTreePool.pop() || { map: new Map });
}
return map.get(name)!;
}
function maybeRecycleChildMergeTree(
{ map }: MergeTree,
name: string | number,
) {
const childTree = map.get(name);
if (childTree &&
!childTree.info &&
!childTree.map.size) {
emptyMergeTreePool.push(childTree);
map.delete(name);
}
}
const warnings = new Set<string>();
// Note that this function is unused in production, and thus should be
// pruned by any well-configured minifier.
function | warnAboutDataLoss | identifier_name | |
service.go | -height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Forgot your password? No problem! <br/> <br/> To reset your password, click the following link: <br/> <a href="https://www.example.com/auth/password-reset/%recipient.token%">Reset Password</a> <br/> <br/> If you did not request to have your password reset you can safely ignore this email. Rest assured your customer account is safe. <br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetConfirmEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset Complete{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Your account's password was recently changed. <br/> <br/> </p>{{end}}`))
return s
}
func (s *authService) NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", email)
if err == nil {
return User{}, ErrAlreadyExists
} else if err != sql.ErrNoRows {
return User{}, err
}
// get current time
t := time.Now()
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
// TODO:
// Have users activate their account via an email
u := User{
Email: email,
Password: hashedB64,
FirstName: firstName,
LastName: lastName,
IsSuperuser: isSuperuser,
IsActive: true,
IsDeleted: false,
CreatedAt: t,
UpdatedAt: t,
DeletedAt: time.Time{},
AvatarURL: "",
newPassword: true,
rawPassword: password,
}
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(NewUserEmail.From, NewUserEmail.Subject, NewUserEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(NewUserEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
func (s *authService) NewUserProvider(u goth.User, isSuperuser bool) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) UserAddProvider(id uuid.UUID, u goth.User) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) GetUser(id uuid.UUID) (User, error) {
if id == uuid.Nil {
return User{}, ErrInvalidID
}
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE id=$1", id)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
}
return u, nil
}
func (s *authService) UpdateUser(u User) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", u.Email)
if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
} else if err != nil {
return User{}, err
}
if !uuid.Equal(eUser.ID, u.ID) {
return User{}, ErrInconsistentIDs
}
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) DeleteUser(id uuid.UUID) (User, error) {
u, err := s.GetUser(id)
if err != nil {
return User{}, err
}
u.IsDeleted = true
u.DeletedAt = time.Now()
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) AuthenticateUser(email, password string) (User, error) {
// Check Email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Check Password
p := strings.TrimSpace(password)
if len(p) == 0 {
return User{}, ErrInvalidPassword
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// check password
hashed, err := base64.StdEncoding.DecodeString(u.Password)
if err != nil {
return User{}, err
}
err = helpers.Crypto.BCryptCompareHashPassword(hashed, []byte(password))
if err != nil {
return User{}, ErrIncorrectAuth
}
return u, nil
}
func (s *authService) BeginPasswordReset(email string) error {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return err
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return err
}
// create nonce for reset token
n, err := s.nonce.New("auth.PasswordReset", u.ID, time.Hour*3)
if err != nil {
return err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetEmail.From, PasswordResetEmail.Subject, PasswordResetEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetEmail.TplName, u)
if err != nil {
return err
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"token": n.Token,
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
return err
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
return err
}
return nil
}
func (s *authService) CompletePasswordReset(token, email, password string) (User, error) {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Get User
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// Check and Use Token
_, err = s.nonce.CheckThenConsume(token, "auth.PasswordReset", u.ID)
if err != nil {
return User{}, err
}
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
u.Password = hashedB64
u.newPassword = true
u.rawPassword = password
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetConfirmEmail.From, PasswordResetConfirmEmail.Subject, PasswordResetConfirmEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetConfirmEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
// getUserByEmail gets a user from the database by email address
func (s *authService) | getUserByEmail | identifier_name | |
service.go | AddProvider(id uuid.UUID, user goth.User) (User, error)
// GetUser gets a user account by their ID
GetUser(id uuid.UUID) (User, error)
// UpdateUser update the user's details
UpdateUser(u User) (User, error)
// DeleteUser flag a user as deleted
DeleteUser(id uuid.UUID) (User, error)
// AuthenticateUser logs in a Local User with an email and password
AuthenticateUser(email, password string) (User, error)
// Start the Password Reset process
BeginPasswordReset(email string) error
// Complete the Password Reset process
CompletePasswordReset(token, email, password string) (User, error)
}
// authService satisfies the auth.Service interface
type authService struct {
db *sqlx.DB
mg mailgun.Mailgun
nonce nonce.Service
tpl *tmpl.TplSys
}
// NewService creates an Auth Service that connects to provided DB information
func NewService(db *sqlx.DB, mg mailgun.Mailgun, nonce nonce.Service, tpl *tmpl.TplSys) Service {
s := &authService{
db: db,
mg: mg,
nonce: nonce,
tpl: tpl,
}
// TODO
// Move hardcoded Template Strings to templates.go
template.Must(s.tpl.AddTemplate("auth.baseHTMLEmailTemplate", "", baseHTMLEmailTemplate))
template.Must(s.tpl.AddTemplate("auth.NewUserEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Welcome New User{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Welcome to our service. Thank you for signing up.<br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Forgot your password? No problem! <br/> <br/> To reset your password, click the following link: <br/> <a href="https://www.example.com/auth/password-reset/%recipient.token%">Reset Password</a> <br/> <br/> If you did not request to have your password reset you can safely ignore this email. Rest assured your customer account is safe. <br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetConfirmEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset Complete{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Your account's password was recently changed. <br/> <br/> </p>{{end}}`))
return s
}
func (s *authService) NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", email)
if err == nil {
return User{}, ErrAlreadyExists
} else if err != sql.ErrNoRows {
return User{}, err
}
// get current time
t := time.Now()
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
// TODO:
// Have users activate their account via an email
u := User{
Email: email,
Password: hashedB64,
FirstName: firstName,
LastName: lastName,
IsSuperuser: isSuperuser,
IsActive: true,
IsDeleted: false,
CreatedAt: t,
UpdatedAt: t,
DeletedAt: time.Time{},
AvatarURL: "",
newPassword: true,
rawPassword: password,
}
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(NewUserEmail.From, NewUserEmail.Subject, NewUserEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(NewUserEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
func (s *authService) NewUserProvider(u goth.User, isSuperuser bool) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) UserAddProvider(id uuid.UUID, u goth.User) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) GetUser(id uuid.UUID) (User, error) {
if id == uuid.Nil {
return User{}, ErrInvalidID
}
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE id=$1", id)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
}
return u, nil
}
func (s *authService) UpdateUser(u User) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", u.Email)
if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
} else if err != nil {
return User{}, err
}
if !uuid.Equal(eUser.ID, u.ID) {
return User{}, ErrInconsistentIDs
}
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) DeleteUser(id uuid.UUID) (User, error) {
u, err := s.GetUser(id)
if err != nil {
return User{}, err
}
u.IsDeleted = true
u.DeletedAt = time.Now()
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) AuthenticateUser(email, password string) (User, error) {
// Check Email
e, err := mail.ParseAddress(email)
if err != nil |
// Check Password
p := strings.TrimSpace(password)
if len(p) == 0 {
return User{}, ErrInvalidPassword
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// check password
hashed, err := base64.StdEncoding.DecodeString(u.Password)
if err != nil {
return User{}, err
}
err = helpers.Crypto.BCryptCompareHashPassword(hashed, []byte(password))
if err != nil {
return User{}, ErrIncorrectAuth
}
return u, nil
}
func (s *authService) BeginPasswordReset(email string) error {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return err
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return err
}
// create nonce for reset token
n, err := s.nonce.New("auth.PasswordReset", u.ID, time.Hour*3)
if err != nil {
return err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetEmail.From, PasswordResetEmail.Subject, PasswordResetEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetEmail.TplName, u)
if err != nil {
return err
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"token": n.Token,
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != | {
return User{}, err
} | conditional_block |
service.go | you can safely ignore this email. Rest assured your customer account is safe. <br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetConfirmEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset Complete{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Your account's password was recently changed. <br/> <br/> </p>{{end}}`))
return s
}
func (s *authService) NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", email)
if err == nil {
return User{}, ErrAlreadyExists
} else if err != sql.ErrNoRows {
return User{}, err
}
// get current time
t := time.Now()
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
// TODO:
// Have users activate their account via an email
u := User{
Email: email,
Password: hashedB64,
FirstName: firstName,
LastName: lastName,
IsSuperuser: isSuperuser,
IsActive: true,
IsDeleted: false,
CreatedAt: t,
UpdatedAt: t,
DeletedAt: time.Time{},
AvatarURL: "",
newPassword: true,
rawPassword: password,
}
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(NewUserEmail.From, NewUserEmail.Subject, NewUserEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(NewUserEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
func (s *authService) NewUserProvider(u goth.User, isSuperuser bool) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) UserAddProvider(id uuid.UUID, u goth.User) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) GetUser(id uuid.UUID) (User, error) {
if id == uuid.Nil {
return User{}, ErrInvalidID
}
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE id=$1", id)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
}
return u, nil
}
func (s *authService) UpdateUser(u User) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", u.Email)
if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
} else if err != nil {
return User{}, err
}
if !uuid.Equal(eUser.ID, u.ID) {
return User{}, ErrInconsistentIDs
}
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) DeleteUser(id uuid.UUID) (User, error) {
u, err := s.GetUser(id)
if err != nil {
return User{}, err
}
u.IsDeleted = true
u.DeletedAt = time.Now()
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) AuthenticateUser(email, password string) (User, error) {
// Check Email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Check Password
p := strings.TrimSpace(password)
if len(p) == 0 {
return User{}, ErrInvalidPassword
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// check password
hashed, err := base64.StdEncoding.DecodeString(u.Password)
if err != nil {
return User{}, err
}
err = helpers.Crypto.BCryptCompareHashPassword(hashed, []byte(password))
if err != nil {
return User{}, ErrIncorrectAuth
}
return u, nil
}
func (s *authService) BeginPasswordReset(email string) error {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return err
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return err
}
// create nonce for reset token
n, err := s.nonce.New("auth.PasswordReset", u.ID, time.Hour*3)
if err != nil {
return err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetEmail.From, PasswordResetEmail.Subject, PasswordResetEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetEmail.TplName, u)
if err != nil {
return err
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"token": n.Token,
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
return err
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
return err
}
return nil
}
func (s *authService) CompletePasswordReset(token, email, password string) (User, error) {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Get User
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// Check and Use Token
_, err = s.nonce.CheckThenConsume(token, "auth.PasswordReset", u.ID)
if err != nil {
return User{}, err
}
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
u.Password = hashedB64
u.newPassword = true
u.rawPassword = password
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetConfirmEmail.From, PasswordResetConfirmEmail.Subject, PasswordResetConfirmEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetConfirmEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
// getUserByEmail gets a user from the database by email address
func (s *authService) getUserByEmail(email string) (User, error) {
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE email=$1", email)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrIncorrectAuth
}
return u, nil
}
// saveUser saves a new user to the database or updates an existing user
func (s *authService) saveUser(u *User) error { | random_line_split | ||
service.go | AddProvider(id uuid.UUID, user goth.User) (User, error)
// GetUser gets a user account by their ID
GetUser(id uuid.UUID) (User, error)
// UpdateUser update the user's details
UpdateUser(u User) (User, error)
// DeleteUser flag a user as deleted
DeleteUser(id uuid.UUID) (User, error)
// AuthenticateUser logs in a Local User with an email and password
AuthenticateUser(email, password string) (User, error)
// Start the Password Reset process
BeginPasswordReset(email string) error
// Complete the Password Reset process
CompletePasswordReset(token, email, password string) (User, error)
}
// authService satisfies the auth.Service interface
type authService struct {
db *sqlx.DB
mg mailgun.Mailgun
nonce nonce.Service
tpl *tmpl.TplSys
}
// NewService creates an Auth Service that connects to provided DB information
func NewService(db *sqlx.DB, mg mailgun.Mailgun, nonce nonce.Service, tpl *tmpl.TplSys) Service {
s := &authService{
db: db,
mg: mg,
nonce: nonce,
tpl: tpl,
}
// TODO
// Move hardcoded Template Strings to templates.go
template.Must(s.tpl.AddTemplate("auth.baseHTMLEmailTemplate", "", baseHTMLEmailTemplate))
template.Must(s.tpl.AddTemplate("auth.NewUserEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Welcome New User{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Welcome to our service. Thank you for signing up.<br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Forgot your password? No problem! <br/> <br/> To reset your password, click the following link: <br/> <a href="https://www.example.com/auth/password-reset/%recipient.token%">Reset Password</a> <br/> <br/> If you did not request to have your password reset you can safely ignore this email. Rest assured your customer account is safe. <br/> <br/> </p>{{end}}`))
template.Must(s.tpl.AddTemplate("auth.PasswordResetConfirmEmail", "auth.baseHTMLEmailTemplate", `{{define "title"}}Password Reset Complete{{end}}{{define "content"}}<p style="margin:0;padding:1em 0 0 0;line-height:1.5em;font-family:Helvetica Neue, Helvetica, Arial, sans-serif;font-size:14px;color:#000;"> Hello %recipient.firstname% %recipient.lastname%, <br/> <br/> Your account's password was recently changed. <br/> <br/> </p>{{end}}`))
return s
}
func (s *authService) NewUserLocal(email, password, firstName, lastName string, isSuperuser bool) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", email)
if err == nil {
return User{}, ErrAlreadyExists
} else if err != sql.ErrNoRows {
return User{}, err
}
// get current time
t := time.Now()
// hash password
hashed, err := helpers.Crypto.BCryptPasswordHasher([]byte(password))
hashedB64 := base64.StdEncoding.EncodeToString(hashed)
// TODO:
// Have users activate their account via an email
u := User{
Email: email,
Password: hashedB64,
FirstName: firstName,
LastName: lastName,
IsSuperuser: isSuperuser,
IsActive: true,
IsDeleted: false,
CreatedAt: t,
UpdatedAt: t,
DeletedAt: time.Time{},
AvatarURL: "",
newPassword: true,
rawPassword: password,
}
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
// Create Email Message
msg := s.mg.NewMessage(NewUserEmail.From, NewUserEmail.Subject, NewUserEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(NewUserEmail.TplName, u)
if err != nil {
glog.Errorf("Error creating HTML Email. Got error: %v", err)
return u, nil
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != nil {
glog.Errorf("Error with AddRecipientAndVariables. Got error: %v", err)
return u, nil
}
// Send Message
_, _, err = s.mg.Send(msg)
if err != nil {
glog.Errorf("Error sending email. Got error: %v", err)
return u, nil
}
return u, nil
}
func (s *authService) NewUserProvider(u goth.User, isSuperuser bool) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) UserAddProvider(id uuid.UUID, u goth.User) (User, error) {
// TODO:
// Implement Feature
return User{}, ErrTodo
}
func (s *authService) GetUser(id uuid.UUID) (User, error) |
func (s *authService) UpdateUser(u User) (User, error) {
eUser := User{}
err := s.db.Get(&eUser, "SELECT * FROM user WHERE email=$1", u.Email)
if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
} else if err != nil {
return User{}, err
}
if !uuid.Equal(eUser.ID, u.ID) {
return User{}, ErrInconsistentIDs
}
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) DeleteUser(id uuid.UUID) (User, error) {
u, err := s.GetUser(id)
if err != nil {
return User{}, err
}
u.IsDeleted = true
u.DeletedAt = time.Now()
// Save user to DB
err = s.saveUser(&u)
if err != nil {
return User{}, err
}
return u, nil
}
func (s *authService) AuthenticateUser(email, password string) (User, error) {
// Check Email
e, err := mail.ParseAddress(email)
if err != nil {
return User{}, err
}
// Check Password
p := strings.TrimSpace(password)
if len(p) == 0 {
return User{}, ErrInvalidPassword
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return User{}, err
}
// check password
hashed, err := base64.StdEncoding.DecodeString(u.Password)
if err != nil {
return User{}, err
}
err = helpers.Crypto.BCryptCompareHashPassword(hashed, []byte(password))
if err != nil {
return User{}, ErrIncorrectAuth
}
return u, nil
}
func (s *authService) BeginPasswordReset(email string) error {
// Check email
e, err := mail.ParseAddress(email)
if err != nil {
return err
}
// Get user from database
u, err := s.getUserByEmail(e.Address)
if err != nil {
return err
}
// create nonce for reset token
n, err := s.nonce.New("auth.PasswordReset", u.ID, time.Hour*3)
if err != nil {
return err
}
// Create Email Message
msg := s.mg.NewMessage(PasswordResetEmail.From, PasswordResetEmail.Subject, PasswordResetEmail.PlainText, u.Email)
b, err := s.tpl.ExecuteTemplate(PasswordResetEmail.TplName, u)
if err != nil {
return err
}
msg.SetHtml(string(b))
// Add custom information via AddRecipientAndVariables
err = msg.AddRecipientAndVariables(u.Email, map[string]interface{}{
"token": n.Token,
"firstname": u.FirstName,
"lastname": u.LastName,
})
if err != | {
if id == uuid.Nil {
return User{}, ErrInvalidID
}
u := User{}
err := s.db.Get(&u, "SELECT * FROM user WHERE id=$1", id)
if err != nil && err != sql.ErrNoRows {
return User{}, err
} else if err == sql.ErrNoRows {
return User{}, ErrUserNotFound
}
return u, nil
} | identifier_body |
pkg_util.go | 无法更新")
return e;
}
for i := range tempArr {
var tomcatInfo = tempArr[i];
if !tomcatInfo.Update {
continue;
}
tomcatInfo.ConfigFileBackupDir = tomcatInfo.ProcessHome+ "pkg_cfg\\"
_, stat_err := os.Stat(tomcatInfo.ProcessHome+ "pkg_cfg\\");
if stat_err != nil && os.IsNotExist(stat_err) {
var mkdir_err = os.MkdirAll(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if mkdir_err != nil {
return stat_err;
}
} else if stat_err != nil {
return stat_err;
}
tomcatWebappDirFile, err := os.Open(tomcatInfo.PackageDir);
var tomcatWebappPath = []*os.File{tomcatWebappDirFile };
if err != nil {
fmt.Println(tomcatInfo.ProcessName + "备份失败")
fmt.Println(err)
os.Exit(1);
}
var tomcatBckupPath = tomcatInfo.PackageBackPath;
_, stateErr := os.Stat(tomcatBckupPath)
if stateErr != nil {
direrr := os.Mkdir("" + tomcatBckupPath, 0777);
//direrr := os.MkdirAll("D:\\Program Files\\Apache Software Foundation\\apache-tomcat-8.0.39\\backup", 0777)
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录失败");
//os.Exit(1);
return direrr;
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录成功");
}
}
// 备份tomcat目录下当前项目
// time.Now().Format("200601021504")
var now = time.Now();
var backupFileName = now.Format("200601021504") + "更新前备份";
ziperr := zip_util.Zip(tomcatWebappPath, tomcatBckupPath + backupFileName + ".zip");
if ziperr == nil {
tomcatInfo.PackageBackFileName = backupFileName + ".zip";
fmt.Println("创建" + tomcatInfo.ProcessName + "备份文件成功:" + tomcatInfo.PackageBackFileName);
}
// 备份tomcat目录下项目配置文件,如config.properties、log4j.xml、openoffice.properties
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
// 备份目录不存在则创建备份目录
_, stateErr := os.Stat(tomcatInfo.ProcessHome + "pkg_cfg\\");
if stateErr != nil {
direrr := os.Mkdir(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录失败");
os.Exit(1);
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录成功");
}
}
written, copyErr := copyFile(tomcatInfo.ProcessHome+ "pkg_cfg\\" + cfgFileName, cfgFilePath);
if copyErr != nil {
fmt.Println(copyErr)
fmt.Println("备份" + tomcatInfo.ProcessName + "配置文件失败");
os.Exit(1);
}
fmt.Println("复制" + tomcatInfo.ProcessName+ "配置文件成功,文件:" + cfgFileName + ",大小:", written, "byte");
}
}
//fmt.Println(tomcatArr);
return nil;
}
/**
获取tomcat信息
*/
func GetTomcatArray(
tomcatPrefix string,
tomcatSuffix string) [] TomcatInfo {
//out, err := exec.Command("cmd", "/C", "tasklist ").Output()
out, err := exec.Command("cmd", "/C", "tasklist").Output()
if err != nil {
log.Fatal(err)
}
//fmt.Printf(string(out))
var processStrList[] string = strings.Split(string(out), "\r\n");
var tomcatArr []TomcatInfo;
for i := range processStrList {
if(strings.HasPrefix(strings.ToLower(processStrList[i]), tomcatPrefix)) {
//fmt.Println(i)
//fmt.Println(processStrList[i])
var processName = strings.Split(processStrList[i], " ")[0];
if ! strings.HasSuffix(processName, tomcatSuffix) {
out2, err2 := exec.Command("cmd", "/C", "wmic process where name='" + processName + "' get ExecutablePath").Output()
if err2 == nil {
// TODO
var fileDirectoryArr[] string = strings.Split(strings.Split(string(out2), "\r\n", )[1], "\\");
if(len(fileDirectoryArr) < 2) {
continue;
}
var parentDirectoryArr = fileDirectoryArr[0: len(fileDirectoryArr) - 2];
var tomcatInfo TomcatInfo;
tomcatInfo.ProcessName = processName;
tomcatInfo.ProcessHome = strings.Join(parentDirectoryArr, "\\") + "\\";
tomcatInfo.ProcessPath = tomcatInfo.ProcessHome + "bin\\" + processName;
tomcatInfo.PackageBackPath = tomcatInfo.ProcessHome + "backup\\";
tomcatInfo.PackageDir = tomcatInfo.ProcessHome + "webapps\\agent\\";
tomcatArr = append(tomcatArr, tomcatInfo);
} else {
fmt.Println(err2)
}
//fmt.Println("------------------------------------------------------")
}
}
}
return tomcatArr;
//fmt.Println(TOMCAT_PROCESS_MAP)
}
/**
确认tomcat
*/
func ConfirmTomcat(tomcatArr []* TomcatInfo) {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
for i := range tempArr {
var tomcatInfo = tempArr[i];
if tomcatInfo == nil {
continue;
}
// 当前tomcat是否需要更新
for true {
var update string
fmt.Print("是否需要更新 " + tomcatInfo.ProcessName + "(0:否;1:是): ");
fmt.Scanln(&update);
if(update == "1" || update == "0") {
tomcatInfo.Update = (update == "1");
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
// 当前tomcat需要的更新包
for tomcatInfo.Update {
var pkg string
fmt.Print(tomcatInfo.ProcessName + "需要哪个包进行更新(0:通用版最新包;1:安装包1114): ");
fmt.Scanln(&pkg);
if(pkg == "0") {
tomcatInfo.PackageFileName = "tyb";
break;
} else if(pkg == "1") {
tomcatInfo.PackageFileName = "agent_1114";
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
}
//fmt.Println("confirm complete.")
}
/**
拷贝文件
*/
func copyFile(dstName, srcName string) (written int64, err error) {
src, err := os.Open(srcName)
if err != nil {
return
}
defer src.Close()
dst, err := os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return
}
defer dst.Close()
fmt.Println("拷贝文件:" + src.Name() + " > " + dst.Name());
return io.Copy(dst, src)
}
func copyDir(src string, dest string) error {
src_original := src;
err := filepath.Walk(src, func(src string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if f.IsDir() {
//fmt.Println(f.Name())
//copyDir(f.Name(), dest+"/"+f.Name())
if(src != src_original) {
var temp_str = strings.Replace(src, src_original, dest, 1);
os.MkdirAll(temp_str, 0777);
}
} else {
//fmt.Println(src);
//fmt.Println(src_original);
//fmt.Println(dest);
//fmt.Println("--------------------------------------------------------------------------------")
dest_new := strings.Replace(src, src_original, dest, -1);
//fmt.Println(dest_new);
//fmt.Println("拷贝文件:" + src + " > " + dest_new);
os.Create(dest_new);
copyFile(dest_new, src);
}
//println(path)
return nil
}) |
if err != nil {
fmt.Printf("filepath.Walk() returned %v\n", err);
return err; | random_line_split | |
pkg_util.go | 6\webapps\agent
PackageFileName string // 更新包名称 agent_1114
PackageBackFileName string // 备份包名称 201712081536更新前备份.zip
ConfigFileBackupDir string // 配置文件临时目录d:\xx\tomcat6\temp_config
NewPackageDir string // 新包地址
Update bool // 是否需要更新
Complete bool // 更新完成
}
/**
备份tomcat目录下项目
*/
func BackupCurrentPackage(tomcatArr []*TomcatInfo) error {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
if(len(tempArr) == 0) {
fmt.Println("当前系统未运行tomcat实例,无法更新");
//os.Exit(1);
var e = errors.New("当前系统未运行tomcat实例,无法更新")
return e;
}
for i := range tempArr {
var tomcatInfo = tempArr[i];
if !tomcatInfo.Update {
continue;
}
tomcatInfo.ConfigFileBackupDir = tomcatInfo.ProcessHome+ "pkg_cfg\\"
_, stat_err := os.Stat(tomcatInfo.ProcessHome+ "pkg_cfg\\");
if stat_err != nil && os.IsNotExist(stat_err) {
var mkdir_err = os.MkdirAll(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if mkdir_err != nil {
return stat_err;
}
} else if stat_err != nil {
return stat_err;
}
tomcatWebappDirFile, err := os.Open(tomcatInfo.PackageDir);
var tomcatWebappPath = []*os.File{tomcatWebappDirFile };
if err != nil {
fmt.Println(tomcatInfo.ProcessName + "备份失败")
fmt.Println(err)
os.Exit(1);
}
var tomcatBckupPath = tomcatInfo.PackageBackPath;
_, stateErr := os.Stat(tomcatBckupPath)
if stateErr != nil {
direrr := os.Mkdir("" + tomcatBckupPath, 0777);
//direrr := os.MkdirAll("D:\\Program Files\\Apache Software Foundation\\apache-tomcat-8.0.39\\backup", 0777)
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录失败");
//os.Exit(1);
return direrr;
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录成功");
}
}
// 备份tomcat目录下当前项目
// time.Now().Format("200601021504")
var now = time.Now();
var backupFileName = now.Format("200601021504") + "更新前备份";
ziperr := zip_util.Zip(tomcatWebappPath, tomcatBckupPath + backupFileName + ".zip");
if ziperr == nil {
tomcatInfo.PackageBackFileName = backupFileName + ".zip";
fmt.Println("创建" + tomcatInfo.ProcessName + "备份文件成功:" + tomcatInfo.PackageBackFileName);
}
// 备份tomcat目录下项目配置文件,如config.properties、log4j.xml、openoffice.properties
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
// 备份目录不存在则创建备份目录
_, stateErr := os.Stat(tomcatInfo.ProcessHome + "pkg_cfg\\");
if stateErr != nil {
direrr := os.Mkdir(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录失败");
os.Exit(1);
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录成功");
}
}
written, copyErr := copyFile(tomcatInfo.ProcessHome+ "pkg_cfg\\" + cfgFileName, cfgFilePath);
if copyErr != nil {
fmt.Println(copyErr)
fmt.Println("备份" + tomcatInfo.ProcessName + "配置文件失败");
os.Exit(1);
}
fmt.Println("复制" + tomcatInfo.ProcessName+ "配置文件成功,文件:" + cfgFileName + ",大小:", written, "byte");
}
}
//fmt.Println(tomcatArr);
return nil;
}
/**
获取tomcat信息
*/
func GetTomcatArray(
tomcatPrefix string,
tomcatSuffix string) [] TomcatInfo {
//out, err := exec.Command("cmd", "/C", "tasklist ").Output()
out, err := exec.Command("cmd", "/C", "tasklist").Output()
if err != nil {
log.Fatal(err)
}
//fmt.Printf(string(out))
var processStrList[] string = strings.Split(string(out), "\r\n");
var tomcatArr []TomcatInfo;
for i := range processStrList {
if(strings.HasPrefix(strings.ToLower(processStrList[i]), tomcatPrefix)) {
//fmt.Println(i)
//fmt.Println(processStrList[i])
var processName = strings.Split(processStrList[i], " ")[0];
if ! strings.HasSuffi |
} else {
fmt.Println(err2)
}
//fmt.Println("------------------------------------------------------")
}
}
}
return tomcatArr;
//fmt.Println(TOMCAT_PROCESS_MAP)
}
/**
确认tomcat
*/
func ConfirmTomcat(tomcatArr []* TomcatInfo) {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
for i := range tempArr {
var tomcatInfo = tempArr[i];
if tomcatInfo == nil {
continue;
}
// 当前tomcat是否需要更新
for true {
var update string
fmt.Print("是否需要更新 " + tomcatInfo.ProcessName + "(0:否;1:是): ");
fmt.Scanln(&update);
if(update == "1" || update == "0") {
tomcatInfo.Update = (update == "1");
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
// 当前tomcat需要的更新包
for tomcatInfo.Updat
e {
var pkg string
fmt.Print(tomcatInfo.ProcessName + "需要哪个包进行更新(0:通用版最新包;1:安装包1114): ");
fmt.Scanln(&pkg);
if(pkg == "0") {
tomcatInfo.PackageFileName = "tyb";
break;
} else if(pkg == "1") {
tomcatInfo.PackageFileName = "agent_1114";
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
}
//fmt.Println("confirm complete.")
}
/**
拷贝文件
*/
func copyFile(dstName, srcName string) (written int64, err error) {
src, err := os.Open(srcName)
if err != nil {
return
}
defer src.Close()
dst, err := os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return
}
defer dst.Close()
fmt.Println("拷贝文件:" + src.Name() + " > " + dst.Name());
return io.Copy(dst, src)
}
func copyDir(src string, dest string) error {
src_original := src;
err := filepath.Walk(src, func(src string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if f | x(processName, tomcatSuffix) {
out2, err2 := exec.Command("cmd", "/C", "wmic process where name='" + processName + "' get ExecutablePath").Output()
if err2 == nil {
// TODO
var fileDirectoryArr[] string = strings.Split(strings.Split(string(out2), "\r\n", )[1], "\\");
if(len(fileDirectoryArr) < 2) {
continue;
}
var parentDirectoryArr = fileDirectoryArr[0: len(fileDirectoryArr) - 2];
var tomcatInfo TomcatInfo;
tomcatInfo.ProcessName = processName;
tomcatInfo.ProcessHome = strings.Join(parentDirectoryArr, "\\") + "\\";
tomcatInfo.ProcessPath = tomcatInfo.ProcessHome + "bin\\" + processName;
tomcatInfo.PackageBackPath = tomcatInfo.ProcessHome + "backup\\";
tomcatInfo.PackageDir = tomcatInfo.ProcessHome + "webapps\\agent\\";
tomcatArr = append(tomcatArr, tomcatInfo); | identifier_body |
pkg_util.go | cat6\webapps\agent
PackageFileName string // 更新包名称 agent_1114
PackageBackFileName string // 备份包名称 201712081536更新前备份.zip
ConfigFileBackupDir string // 配置文件临时目录d:\xx\tomcat6\temp_config
NewPackageDir string // 新包地址
Update bool // 是否需要更新
Complete bool // 更新完成
}
/**
备份tomcat目录下项目
*/
func BackupCurrentPackage(tomcatArr []*TomcatInfo) error {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
if(len(tempArr) == 0) {
fmt.Println("当前系统未运行tomcat实例,无法更新");
//os.Exit(1);
var e = errors.New("当前系统未运行tomcat实例,无法更新")
return e;
}
for i := range tempArr {
var tomcatInfo = tempArr[i];
if !tomcatInfo.Update {
continue;
}
tomcatInfo.ConfigFileBackupDir = tomcatInfo.ProcessHome+ "pkg_cfg\\"
_, stat_err := os.Stat(tomcatInfo.ProcessHome+ "pkg_cfg\\");
if stat_err != nil && os.IsNotExist(stat_err) {
var mkdir_err = os.MkdirAll(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if mkdir_err != nil {
return stat_err;
}
} else if stat_err != nil {
return stat_err;
}
tomcatWebappDirFile, err := os.Open(tomcatInfo.PackageDir);
var tomcatWebappPath = []*os.File{tomcatWebappDirFile };
if err != nil {
fmt.Println(tomcatInfo.ProcessName + "备份失败")
fmt.Println(err)
os.Exit(1);
}
var tomcatBckupPath = tomcatInfo.PackageBackPath;
_, stateErr := os.Stat(tomcatBckupPath)
if stateErr != nil {
direrr := os.Mkdir("" + tomcatBckupPath, 0777);
//direrr := os.MkdirAll("D:\\Program Files\\Apache Software Foundation\\apache-tomcat-8.0.39\\backup", 0777)
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录失败");
//os.Exit(1);
return direrr;
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "备份目录成功");
}
}
// 备份tomcat目录下当前项目
// time.Now().Format("200601021504")
var now = time.Now();
var backupFileName = now.Format("200601021504") + "更新前备份";
ziperr := zip_util.Zip(tomcatWebappPath, tomcatBckupPath + backupFileName + ".zip");
if ziperr == nil {
tomcatInfo.PackageBackFileName = backupFileName + ".zip";
fmt.Println("创建" + tomcatInfo.ProcessName + "备份文件成功:" + tomcatInfo.PackageBackFileName);
}
// 备份tomcat目录下项目配置文件,如config.properties、log4j.xml、openoffice.properties
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
// 备份目录不存在则创建备份目录
_, stateErr := os.Stat(tomcatInfo.ProcessHome + "pkg_cfg\\");
if stateErr != nil {
direrr := os.Mkdir(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录失败");
os.Exit(1);
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录成功");
}
}
written, copyErr := copyFile(tomcatInfo.ProcessHome+ "pkg_cfg\\" + cfgFileName, cfgFilePath);
if copyErr != nil {
fmt.Println(copyErr)
fmt.Println("备份" + tomcatInfo.ProcessName + "配置文件失败");
os.Exit(1);
}
| il;
}
/**
获取tomcat信息
*/
func GetTomcatArray(
tomcatPrefix string,
tomcatSuffix string) [] TomcatInfo {
//out, err := exec.Command("cmd", "/C", "tasklist ").Output()
out, err := exec.Command("cmd", "/C", "tasklist").Output()
if err != nil {
log.Fatal(err)
}
//fmt.Printf(string(out))
var processStrList[] string = strings.Split(string(out), "\r\n");
var tomcatArr []TomcatInfo;
for i := range processStrList {
if(strings.HasPrefix(strings.ToLower(processStrList[i]), tomcatPrefix)) {
//fmt.Println(i)
//fmt.Println(processStrList[i])
var processName = strings.Split(processStrList[i], " ")[0];
if ! strings.HasSuffix(processName, tomcatSuffix) {
out2, err2 := exec.Command("cmd", "/C", "wmic process where name='" + processName + "' get ExecutablePath").Output()
if err2 == nil {
// TODO
var fileDirectoryArr[] string = strings.Split(strings.Split(string(out2), "\r\n", )[1], "\\");
if(len(fileDirectoryArr) < 2) {
continue;
}
var parentDirectoryArr = fileDirectoryArr[0: len(fileDirectoryArr) - 2];
var tomcatInfo TomcatInfo;
tomcatInfo.ProcessName = processName;
tomcatInfo.ProcessHome = strings.Join(parentDirectoryArr, "\\") + "\\";
tomcatInfo.ProcessPath = tomcatInfo.ProcessHome + "bin\\" + processName;
tomcatInfo.PackageBackPath = tomcatInfo.ProcessHome + "backup\\";
tomcatInfo.PackageDir = tomcatInfo.ProcessHome + "webapps\\agent\\";
tomcatArr = append(tomcatArr, tomcatInfo);
} else {
fmt.Println(err2)
}
//fmt.Println("------------------------------------------------------")
}
}
}
return tomcatArr;
//fmt.Println(TOMCAT_PROCESS_MAP)
}
/**
确认tomcat
*/
func ConfirmTomcat(tomcatArr []* TomcatInfo) {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
for i := range tempArr {
var tomcatInfo = tempArr[i];
if tomcatInfo == nil {
continue;
}
// 当前tomcat是否需要更新
for true {
var update string
fmt.Print("是否需要更新 " + tomcatInfo.ProcessName + "(0:否;1:是): ");
fmt.Scanln(&update);
if(update == "1" || update == "0") {
tomcatInfo.Update = (update == "1");
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
// 当前tomcat需要的更新包
for tomcatInfo.Update {
var pkg string
fmt.Print(tomcatInfo.ProcessName + "需要哪个包进行更新(0:通用版最新包;1:安装包1114): ");
fmt.Scanln(&pkg);
if(pkg == "0") {
tomcatInfo.PackageFileName = "tyb";
break;
} else if(pkg == "1") {
tomcatInfo.PackageFileName = "agent_1114";
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
}
//fmt.Println("confirm complete.")
}
/**
拷贝文件
*/
func copyFile(dstName, srcName string) (written int64, err error) {
src, err := os.Open(srcName)
if err != nil {
return
}
defer src.Close()
dst, err := os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return
}
defer dst.Close()
fmt.Println("拷贝文件:" + src.Name() + " > " + dst.Name());
return io.Copy(dst, src)
}
func copyDir(src string, dest string) error {
src_original := src;
err := filepath.Walk(src, func(src string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if f.IsDir() {
| fmt.Println("复制" + tomcatInfo.ProcessName+ "配置文件成功,文件:" + cfgFileName + ",大小:", written, "byte");
}
}
//fmt.Println(tomcatArr);
return n | conditional_block |
pkg_util.go | var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
// 备份目录不存在则创建备份目录
_, stateErr := os.Stat(tomcatInfo.ProcessHome + "pkg_cfg\\");
if stateErr != nil {
direrr := os.Mkdir(tomcatInfo.ProcessHome+ "pkg_cfg\\", 0777);
if direrr != nil {
fmt.Println(direrr)
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录失败");
os.Exit(1);
} else {
fmt.Println("创建" + tomcatInfo.ProcessName + "项目配置文件备份目录成功");
}
}
written, copyErr := copyFile(tomcatInfo.ProcessHome+ "pkg_cfg\\" + cfgFileName, cfgFilePath);
if copyErr != nil {
fmt.Println(copyErr)
fmt.Println("备份" + tomcatInfo.ProcessName + "配置文件失败");
os.Exit(1);
}
fmt.Println("复制" + tomcatInfo.ProcessName+ "配置文件成功,文件:" + cfgFileName + ",大小:", written, "byte");
}
}
//fmt.Println(tomcatArr);
return nil;
}
/**
获取tomcat信息
*/
func GetTomcatArray(
tomcatPrefix string,
tomcatSuffix string) [] TomcatInfo {
//out, err := exec.Command("cmd", "/C", "tasklist ").Output()
out, err := exec.Command("cmd", "/C", "tasklist").Output()
if err != nil {
log.Fatal(err)
}
//fmt.Printf(string(out))
var processStrList[] string = strings.Split(string(out), "\r\n");
var tomcatArr []TomcatInfo;
for i := range processStrList {
if(strings.HasPrefix(strings.ToLower(processStrList[i]), tomcatPrefix)) {
//fmt.Println(i)
//fmt.Println(processStrList[i])
var processName = strings.Split(processStrList[i], " ")[0];
if ! strings.HasSuffix(processName, tomcatSuffix) {
out2, err2 := exec.Command("cmd", "/C", "wmic process where name='" + processName + "' get ExecutablePath").Output()
if err2 == nil {
// TODO
var fileDirectoryArr[] string = strings.Split(strings.Split(string(out2), "\r\n", )[1], "\\");
if(len(fileDirectoryArr) < 2) {
continue;
}
var parentDirectoryArr = fileDirectoryArr[0: len(fileDirectoryArr) - 2];
var tomcatInfo TomcatInfo;
tomcatInfo.ProcessName = processName;
tomcatInfo.ProcessHome = strings.Join(parentDirectoryArr, "\\") + "\\";
tomcatInfo.ProcessPath = tomcatInfo.ProcessHome + "bin\\" + processName;
tomcatInfo.PackageBackPath = tomcatInfo.ProcessHome + "backup\\";
tomcatInfo.PackageDir = tomcatInfo.ProcessHome + "webapps\\agent\\";
tomcatArr = append(tomcatArr, tomcatInfo);
} else {
fmt.Println(err2)
}
//fmt.Println("------------------------------------------------------")
}
}
}
return tomcatArr;
//fmt.Println(TOMCAT_PROCESS_MAP)
}
/**
确认tomcat
*/
func ConfirmTomcat(tomcatArr []* TomcatInfo) {
var tempArr []*TomcatInfo;
tempArr = tomcatArr;
for i := range tempArr {
var tomcatInfo = tempArr[i];
if tomcatInfo == nil {
continue;
}
// 当前tomcat是否需要更新
for true {
var update string
fmt.Print("是否需要更新 " + tomcatInfo.ProcessName + "(0:否;1:是): ");
fmt.Scanln(&update);
if(update == "1" || update == "0") {
tomcatInfo.Update = (update == "1");
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
// 当前tomcat需要的更新包
for tomcatInfo.Update {
var pkg string
fmt.Print(tomcatInfo.ProcessName + "需要哪个包进行更新(0:通用版最新包;1:安装包1114): ");
fmt.Scanln(&pkg);
if(pkg == "0") {
tomcatInfo.PackageFileName = "tyb";
break;
} else if(pkg == "1") {
tomcatInfo.PackageFileName = "agent_1114";
break;
}
fmt.Print("输入有误,请重新输入0或者1! ");
}
}
//fmt.Println("confirm complete.")
}
/**
拷贝文件
*/
func copyFile(dstName, srcName string) (written int64, err error) {
src, err := os.Open(srcName)
if err != nil {
return
}
defer src.Close()
dst, err := os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return
}
defer dst.Close()
fmt.Println("拷贝文件:" + src.Name() + " > " + dst.Name());
return io.Copy(dst, src)
}
func copyDir(src string, dest string) error {
src_original := src;
err := filepath.Walk(src, func(src string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if f.IsDir() {
//fmt.Println(f.Name())
//copyDir(f.Name(), dest+"/"+f.Name())
if(src != src_original) {
var temp_str = strings.Replace(src, src_original, dest, 1);
os.MkdirAll(temp_str, 0777);
}
} else {
//fmt.Println(src);
//fmt.Println(src_original);
//fmt.Println(dest);
//fmt.Println("--------------------------------------------------------------------------------")
dest_new := strings.Replace(src, src_original, dest, -1);
//fmt.Println(dest_new);
//fmt.Println("拷贝文件:" + src + " > " + dest_new);
os.Create(dest_new);
copyFile(dest_new, src);
}
//println(path)
return nil
})
if err != nil {
fmt.Printf("filepath.Walk() returned %v\n", err);
return err;
}
return nil;
}
/**
替换包
*/
func ReplacePkg(tomcatInfo TomcatInfo) error {
var stopErr = stopTomcat(tomcatInfo);
if stopErr != nil {
fmt.Println("停止" + tomcatInfo.ProcessName + "出错!");
} else {
fmt.Println("停止" + tomcatInfo.ProcessName + "成功。");
}
//var destDir = tomcatInfo.ProcessHome + "webapps\\agent";
var destDir = tomcatInfo.ProcessHome + "webapps";
// 删除webapps\agent
rem_err := os.RemoveAll(destDir + "\\agent");
if rem_err != nil {
fmt.Println("移除目录出错:" + destDir + "\\agent");
fmt.Println(rem_err)
return rem_err;
}
copy_err := copyDir(tomcatInfo.NewPackageDir, destDir);
if copy_err != nil {
fmt.Println("拷贝目录出错:" + destDir);
return copy_err;
}
// 还原配置文件
for i := range PKG_CFGFILE_PATH_ARR {
var cfgFilePath = tomcatInfo.ProcessHome + PKG_CFGFILE_PATH_ARR[i];
var cfgFileName = strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")[len(strings.Split(PKG_CFGFILE_PATH_ARR[i], "\\")) - 1]; //配置文件名
write_len, copy_err2 := copyFile(cfgFilePath, tomcatInfo.ConfigFileBackupDir + cfgFileName);
if copy_err2 != nil || write_len == 0 {
fmt.Println("还原配置文件出错:" + tomcatInfo.ConfigFileBackupDir + cfgFileName);
return copy_err2;
}
}
var startErr = startTomcat(tomcatInfo);
if startErr != nil {
fmt.Println("启动" + tomcatInfo.ProcessName + "出错!");
} else {
fmt.Println("启动" + tomcatInfo.ProcessName + "成功。");
}
return nil;
}
/**
停止tomcat
*/
func stopTomcat(tomcatInfo TomcatInfo) error {
var processName = strings.Split(tomcatInfo.ProcessName, ".")[0]
_, err := exec.Command("cmd", "/C", "net stop " + processName + " && taskkill /f /im " + tomcatInfo.ProcessHome).Output();
if err != nil {
return err;
}
return nil;
}
/**
启动tomcat
*/
func startTomcat(tomcatInfo TomcatInfo) error {
var processName = strings.Split(tomcatInfo.ProcessName, ".")[0]
_, err := exec.Command("cmd", "/C", "net start " + processName).Output();
if err != nil {
return err;
}
return nil;
}
| identifier_name | ||
aweMBPicker.py | ButtonState.kFBButtonState0, FBColor(0.4,0.2,0.5))
removeBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.15,0.45))
removeBtn.OnClick.Add(_removeObjects)
removeBtn.picker = parentBox.picker
optionLayout.AddRelative(removeBtn,0.25,height=25, space=2)
renameBtn = FBButton()
renameBtn.Caption = "ab*"
renameBtn.Look = FBButtonLook.kFBLookColorChange
renameBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.3,0.4,0.5))
renameBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.25,0.35,0.45))
renameBtn.OnClick.Add(_renamePicker)
renameBtn.picker = parentBox.picker
renameBtn.pickerButton = parentBox.pickerBtn
optionLayout.AddRelative(renameBtn,0.25,height=25, space=2)
deleteBtn = FBButton()
deleteBtn.Caption = "x"
deleteBtn.Look = FBButtonLook.kFBLookColorChange
deleteBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.7,0.2,0.3))
deleteBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.65,0.15,0.25))
deleteBtn.OnClick.Add(_deletePicker)
deleteBtn.picker = parentBox.picker
deleteBtn.box = parentBox
optionLayout.AddRelative(deleteBtn,0.25,height=25, space=2)
return optionLayout
def _addObjects(control,event):
'''Callback:
Adds selected objects to the Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objectList = [o for o in ml]
control.picker.add(objectList)
def _removeObjects(control,event):
'''Callback:
Removes selected objects from Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objects = control.picker.objects
for m in ml:
if m in objects:
objects.remove(m)
control.picker.objects = objects
def _renamePicker(control,event):
'''Callback:
Prompts to rename a Picker associated with the caller
'''
if control.picker.pickerObject:
response, value = FBMessageBoxGetUserValue("Rename Picker %s" % control.picker.name, "Name: ", control.picker.name, FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if response == 1:
if value:
control.picker.rename(value)
control.pickerButton.Caption = value
else:
FBMessageBox('Picker Error', "Could not locate Picker Object","OK")
def _deletePicker(control,event):
'''Callback:
Deletes a Picker and UI associated with caller (and the caller itself)
'''
deleteUI = False
if control.picker.pickerObject:
result = FBMessageBox("Delete Picker", "Are you sure you want to delete %s" % control.picker.name,"Yes","Cancel")
if result == 1:
deleteUI = True
else:
deleteUI = True
if deleteUI:
control.picker.delete()
awePickerTool.pickerLayout.Remove(control.box)
_toolResize()
def _toggleOptionMenu2(control,event):
'''Callback:
Shows a Picker's option UI or hides it, depending on current state
'''
region = "pickerBoxRegion"
# hide options
if control.box.optionBtn.optionBoxVisible:
log("hiding optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.pickerBtn)
control.box.optionBtn.optionBoxVisible = False
control.box.Refresh(True)
# show options
else:
log("showing optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.optionBox)
control.box.optionBtn.optionBoxVisible = True
control.box.Refresh(True)
def _toggleOptionMenu(control,event):
#if hasattr(awePickerTool,"mouse") and awePickerTool.mouse:
mouse = QtGui.QCursor.pos()
#x = int(desktop.width() / 100 * awePickerTool.mouse.PropertyList.Find("X").Data)
#y = int(desktop.height() / 100 * (100-awePickerTool.mouse.PropertyList.Find("Y").Data))
x = mouse.x()
y = mouse.y()
menu = FBGenericMenu()
menu.InsertLast("Add Selection",1)
menu.InsertLast("Remove Selection",2)
menu.InsertLast("Rename Picker",3)
menu.InsertLast("Delete Picker",4)
item = menu.Execute(x,y)
print item
if item:
if item.Id == 1:
_addObjects(control,None)
if item.Id == 2:
_removeObjects(control,None)
if item.Id == 3:
_renamePicker(control,None)
if item.Id == 4:
_deletePicker(control,None)
menu.FBDelete()
def _pickerSelect(control,event):
if control.picker:
success = control.picker.select()
if not success:
FBMessageBox("Picker Error", "An error occured: couldn't find Picker object.\nDeleting this Picker","OK")
awePickerTool.pickerLayout.Remove(control.box)
awePickerTool.pickerLayout.HardSelect()
def initPickers(tool):
log("initializing pickers")
log("tool", tool)
tool.pickerLayout.RemoveAll()
sets = FBSystem().Scene.Sets
masterSet = None
for s in sets:
if s.LongName == "awe:Pickers":
masterSet = s
if masterSet:
hideComponent(masterSet,masterSet.Items)
for t in masterSet.Items:
for p in t.Items:
name = p.PropertyList.Find("PickerName").Data
objects = [o for o in p.PropertyList.Find("Objects")]
picker = Picker(name,objects,p)
createPickerButton(name,picker)
#_toolResize()
# create the mouse device
# if hasattr(tool,"mouse") and tool.mouse:
# try:
# tool.mouse.FBDelete()
# except:
# pass
# tool.mouse = FBCreateObject("Browsing/Templates/Devices","Mouse","pickerMouse")
# FBSystem().Scene.Devices.append(tool.mouse)
# tool.mouse.Live = tool.mouse.Online = True
def hideComponent(component=None,componentList=None):
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
if component:
for flag in disallowedFlags:
component.DisableObjectFlags(flag)
if componentList:
for c in componentList:
hideComponent(component=c)
def _pickerObjectDestroyed(object,event):
object.picker.pickerObject = None
def _toolResize(*args):
if not awePickerTool:
return
log("resizing")
sb = awePickerTool.scrollBox
log(sb)
pl = awePickerTool.pickerLayout
sX = sb.RegionPosMaxX - sb.RegionPosMinX - 15
i = childCount = 0
log("checking children of pickerLayout")
box = pl.GetChild(i)
while box:
log("found picker box %s" % str(i))
i += 1
childCount += 1
box = pl.GetChild(i)
log("found %d picker boxes" % childCount)
sY = 27 * childCount + 10
log("computed size Y: ", sY)
sb.SetContentSize(sX, sY)
def getUIChildren(control, pList=None, tabs=0, firstRun=True):
'''Recursively loops through all child UI components of control
Returns list of items found
'''
pList = [] if firstRun else pList
i = 0
child = control.GetChild(i)
if control.ClassName() == "FBScrollBox":
child = control.Content.GetChild(i)
log("----"*tabs, control.ClassName(), control.RegionName if control.ClassName() == "FBLayout" else "")
while child:
pList.append(child)
getUIChildren(child, pList,tabs + 1,False)
i += 1
child = control.GetChild(i)
if firstRun:
return pList
def restructureAll(control,pList=None,firstRun=True):
'''Recursively loops through all child layouts of control
and calls Restructure() and Refresh() on them
'''
pList = [] if firstRun else pList
i = 0
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if hasattr(control, "Restructure"):
pList.append(control)
while child:
restructureAll(child, pList, False)
i += 1
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if firstRun:
for c in pList:
c.Restructure(False)
c.Refresh(True)
#log(c)
pList = []
def _fileChange(control,event):
initPickers(awePickerTool)
def _ | removeSceneCB( | identifier_name | |
aweMBPicker.py | m.Selected = False
for o in self.objects:
o.Selected = True
FBEndChangeAllModels()
return True
else:
return False
def delete(self):
'''Deletes this Picker's associated pickerObject'''
if self.pickerObject:
self.pickerObject.FBDelete()
def add(self,objectList):
'''Adds a list of objects to this Picker'''
objects = self.objects
objects.extend(objectList)
# remove duplicates
tempSet = set(objects)
self.objects = [o for o in tempSet]
def aweCreateSet(name):
Set = FBSet("")
Set.LongName = name
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
#for flag in disallowedFlags:
#Set.DisableObjectFlags(flag)
return Set
def _createPicker(control,event):
'''Callback:
Creates Picker and its UI after prompting for a name
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objSet = []
for m in ml:
objSet.append(m)
if not objSet:
FBMessageBox("Picker Error", "Error: No Objects selected","OK")
else:
userInput = FBMessageBoxGetUserValue("Create New Picker", "Name: ", "Picker", FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if userInput[0] == 1:
name = userInput[1]
picker = Picker(name,objSet)
createPickerButton(name,picker)
_toolResize()
def createPickerButton(name,picker):
'''Creates Picker button UI and associates it with given Picker object'''
box = FBLayout()
box.picker = picker
# optionBtn region
x = FBAddRegionParam(0, FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(20, FBAttachType.kFBAttachNone,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("optionBtnRegion", "optionBtnRegion", x,y,w,h)
box.optionBtn = FBButton()
box.optionBtn.Caption = "»"
#box.optionBtn.Look = FBButtonLook.kFBLookColorChange
#box.optionBtn.Style = FBButtonStyle.kFB2States
box.optionBtn.optionBoxVisible = False
box.optionBtn.picker = picker
box.optionBtn.OnClick.Add(_toggleOptionMenu)
box.SetControl("optionBtnRegion", box.optionBtn)
# picker / optionBox region
x = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"optionBtnRegion")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("pickerBoxRegion", "pickerBoxRegion", x,y,w,h)
box.pickerBtn = FBButton()
box.pickerBtn.Caption = name
box.pickerBtn.picker = picker
box.pickerBtn.OnClick.Add(_pickerSelect)
box.SetControl("pickerBoxRegion", box.pickerBtn)
box.optionBtn.optionBox = box.optionBox = createOptionBox(box)
box.pickerBtn.box = box.optionBtn.box = box
awePickerTool.pickerLayout.Add(box, 25, space=2)
def createOptionBox(parentBox):
' | optionLayout.AddRelative(removeBtn,0.25,height=25, space=2)
renameBtn = FBButton()
renameBtn.Caption = "ab*"
renameBtn.Look = FBButtonLook.kFBLookColorChange
renameBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.3,0.4,0.5))
renameBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.25,0.35,0.45))
renameBtn.OnClick.Add(_renamePicker)
renameBtn.picker = parentBox.picker
renameBtn.pickerButton = parentBox.pickerBtn
optionLayout.AddRelative(renameBtn,0.25,height=25, space=2)
deleteBtn = FBButton()
deleteBtn.Caption = "x"
deleteBtn.Look = FBButtonLook.kFBLookColorChange
deleteBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.7,0.2,0.3))
deleteBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.65,0.15,0.25))
deleteBtn.OnClick.Add(_deletePicker)
deleteBtn.picker = parentBox.picker
deleteBtn.box = parentBox
optionLayout.AddRelative(deleteBtn,0.25,height=25, space=2)
return optionLayout
def _addObjects(control,event):
'''Callback:
Adds selected objects to the Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objectList = [o for o in ml]
control.picker.add(objectList)
def _removeObjects(control,event):
'''Callback:
Removes selected objects from Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objects = control.picker.objects
for m in ml:
if m in objects:
objects.remove(m)
control.picker.objects = objects
def _renamePicker(control,event):
'''Callback:
Prompts to rename a Picker associated with the caller
'''
if control.picker.pickerObject:
response, value = FBMessageBoxGetUserValue("Rename Picker %s" % control.picker.name, "Name: ", control.picker.name, FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if response == 1:
if value:
control.picker.rename(value)
control.pickerButton.Caption = value
else:
FBMessageBox('Picker Error', "Could not locate Picker Object","OK")
def _deletePicker(control,event):
'''Callback:
Deletes a Picker and UI associated with caller (and the caller itself)
'''
deleteUI = False
if control.picker.pickerObject:
result = FBMessageBox("Delete Picker", "Are you sure you want to delete %s" % control.picker.name,"Yes","Cancel")
if result == 1:
deleteUI = True
else:
deleteUI = True
if deleteUI:
control.picker.delete()
awePickerTool.pickerLayout.Remove(control.box)
_toolResize()
def _toggleOptionMenu2(control,event):
'''Callback:
Shows a Picker's option UI or hides it, depending on current state
'''
region = "pickerBoxRegion"
# hide options
if control.box.optionBtn.optionBoxVisible:
log("hiding optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.pickerBtn)
control.box.optionBtn.optionBoxVisible = False
control.box.Refresh(True)
# show options
else:
log("showing optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.optionBox)
control.box.optionBtn.optionBoxVisible = True
control.box.Refresh(True)
def _toggleOptionMenu(control,event):
#if hasattr(awePickerTool,"mouse") and awePickerTool.mouse:
mouse = QtGui.QCursor.pos()
#x = int(desktop.width() / 100 * awePickerTool.mouse.PropertyList.Find("X").Data)
#y = int(desktop.height() / 100 * (100-awePickerTool.mouse.PropertyList.Find("Y").Data))
x = mouse.x()
y = mouse.y()
menu = FBGenericMenu()
menu.InsertLast("Add Selection",1)
menu.InsertLast("Remove Selection",2)
menu.InsertLast("Rename Picker",3)
menu.InsertLast("Delete Picker",4)
item = menu.Execute(x,y)
print item
if item:
if item.Id == 1:
_addObjects(control,None)
if item.Id | ''Creates a layout that holds a Picker's option UI'''
optionLayout = pyui.FBHBoxLayout()
addBtn = FBButton()
addBtn.Caption = "+"
addBtn.OnClick.Add(_addObjects)
addBtn.picker = parentBox.picker
addBtn.Look = FBButtonLook.kFBLookColorChange
addBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.5,0.3))
addBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.45,0.25))
optionLayout.AddRelative(addBtn,0.25,height=25, space=4)
removeBtn = FBButton()
removeBtn.Caption = "-"
removeBtn.Look = FBButtonLook.kFBLookColorChange
removeBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.2,0.5))
removeBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.15,0.45))
removeBtn.OnClick.Add(_removeObjects)
removeBtn.picker = parentBox.picker | identifier_body |
aweMBPicker.py | .2,0.3))
deleteBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.65,0.15,0.25))
deleteBtn.OnClick.Add(_deletePicker)
deleteBtn.picker = parentBox.picker
deleteBtn.box = parentBox
optionLayout.AddRelative(deleteBtn,0.25,height=25, space=2)
return optionLayout
def _addObjects(control,event):
'''Callback:
Adds selected objects to the Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objectList = [o for o in ml]
control.picker.add(objectList)
def _removeObjects(control,event):
'''Callback:
Removes selected objects from Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objects = control.picker.objects
for m in ml:
if m in objects:
objects.remove(m)
control.picker.objects = objects
def _renamePicker(control,event):
'''Callback:
Prompts to rename a Picker associated with the caller
'''
if control.picker.pickerObject:
response, value = FBMessageBoxGetUserValue("Rename Picker %s" % control.picker.name, "Name: ", control.picker.name, FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if response == 1:
if value:
control.picker.rename(value)
control.pickerButton.Caption = value
else:
FBMessageBox('Picker Error', "Could not locate Picker Object","OK")
def _deletePicker(control,event):
'''Callback:
Deletes a Picker and UI associated with caller (and the caller itself)
'''
deleteUI = False
if control.picker.pickerObject:
result = FBMessageBox("Delete Picker", "Are you sure you want to delete %s" % control.picker.name,"Yes","Cancel")
if result == 1:
deleteUI = True
else:
deleteUI = True
if deleteUI:
control.picker.delete()
awePickerTool.pickerLayout.Remove(control.box)
_toolResize()
def _toggleOptionMenu2(control,event):
'''Callback:
Shows a Picker's option UI or hides it, depending on current state
'''
region = "pickerBoxRegion"
# hide options
if control.box.optionBtn.optionBoxVisible:
log("hiding optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.pickerBtn)
control.box.optionBtn.optionBoxVisible = False
control.box.Refresh(True)
# show options
else:
log("showing optionbox")
control.box.ClearControl(region)
control.box.SetControl(region, control.box.optionBox)
control.box.optionBtn.optionBoxVisible = True
control.box.Refresh(True)
def _toggleOptionMenu(control,event):
#if hasattr(awePickerTool,"mouse") and awePickerTool.mouse:
mouse = QtGui.QCursor.pos()
#x = int(desktop.width() / 100 * awePickerTool.mouse.PropertyList.Find("X").Data)
#y = int(desktop.height() / 100 * (100-awePickerTool.mouse.PropertyList.Find("Y").Data))
x = mouse.x()
y = mouse.y()
menu = FBGenericMenu()
menu.InsertLast("Add Selection",1)
menu.InsertLast("Remove Selection",2)
menu.InsertLast("Rename Picker",3)
menu.InsertLast("Delete Picker",4)
item = menu.Execute(x,y)
print item
if item:
if item.Id == 1:
_addObjects(control,None)
if item.Id == 2:
_removeObjects(control,None)
if item.Id == 3:
_renamePicker(control,None)
if item.Id == 4:
_deletePicker(control,None)
menu.FBDelete()
def _pickerSelect(control,event):
if control.picker:
success = control.picker.select()
if not success:
FBMessageBox("Picker Error", "An error occured: couldn't find Picker object.\nDeleting this Picker","OK")
awePickerTool.pickerLayout.Remove(control.box)
awePickerTool.pickerLayout.HardSelect()
def initPickers(tool):
log("initializing pickers")
log("tool", tool)
tool.pickerLayout.RemoveAll()
sets = FBSystem().Scene.Sets
masterSet = None
for s in sets:
if s.LongName == "awe:Pickers":
masterSet = s
if masterSet:
hideComponent(masterSet,masterSet.Items)
for t in masterSet.Items:
for p in t.Items:
name = p.PropertyList.Find("PickerName").Data
objects = [o for o in p.PropertyList.Find("Objects")]
picker = Picker(name,objects,p)
createPickerButton(name,picker)
#_toolResize()
# create the mouse device
# if hasattr(tool,"mouse") and tool.mouse:
# try:
# tool.mouse.FBDelete()
# except:
# pass
# tool.mouse = FBCreateObject("Browsing/Templates/Devices","Mouse","pickerMouse")
# FBSystem().Scene.Devices.append(tool.mouse)
# tool.mouse.Live = tool.mouse.Online = True
def hideComponent(component=None,componentList=None):
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
if component:
for flag in disallowedFlags:
component.DisableObjectFlags(flag)
if componentList:
for c in componentList:
hideComponent(component=c)
def _pickerObjectDestroyed(object,event):
object.picker.pickerObject = None
def _toolResize(*args):
if not awePickerTool:
return
log("resizing")
sb = awePickerTool.scrollBox
log(sb)
pl = awePickerTool.pickerLayout
sX = sb.RegionPosMaxX - sb.RegionPosMinX - 15
i = childCount = 0
log("checking children of pickerLayout")
box = pl.GetChild(i)
while box:
log("found picker box %s" % str(i))
i += 1
childCount += 1
box = pl.GetChild(i)
log("found %d picker boxes" % childCount)
sY = 27 * childCount + 10
log("computed size Y: ", sY)
sb.SetContentSize(sX, sY)
def getUIChildren(control, pList=None, tabs=0, firstRun=True):
'''Recursively loops through all child UI components of control
Returns list of items found
'''
pList = [] if firstRun else pList
i = 0
child = control.GetChild(i)
if control.ClassName() == "FBScrollBox":
child = control.Content.GetChild(i)
log("----"*tabs, control.ClassName(), control.RegionName if control.ClassName() == "FBLayout" else "")
while child:
pList.append(child)
getUIChildren(child, pList,tabs + 1,False)
i += 1
child = control.GetChild(i)
if firstRun:
return pList
def restructureAll(control,pList=None,firstRun=True):
'''Recursively loops through all child layouts of control
and calls Restructure() and Refresh() on them
'''
pList = [] if firstRun else pList
i = 0
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if hasattr(control, "Restructure"):
pList.append(control)
while child:
restructureAll(child, pList, False)
i += 1
child = control.Content.GetChild(i) if control.ClassName() == "FBScrollBox" else control.GetChild(i)
if firstRun:
for c in pList:
c.Restructure(False)
c.Refresh(True)
#log(c)
pList = []
def _fileChange(control,event):
initPickers(awePickerTool)
def _removeSceneCB(control,event):
FBSystem().Scene.OnChange.RemoveAll()
def _monitorSet(control,event):
'''Callback:
Check for manual deletion of a picker object (FBSet).
If it's the master set, prompt for undo. If it's a picker
set, notify the associated Picker object
'''
if event.Type == FBSceneChangeType.kFBSceneChangeDetach:
c = event.ChildComponent
if c.Is(44) and c.IsSDKComponent():
if c.LongName == "awe:Pickers":
FBMessageBox("Picker Error", "Hey! You just deleted the Picker set! Undo that please or I will crash", "OK")
return
for p in c.Parents:
if p.LongName == "awe:Pickers":
if c.picker:
c.picker.pickerObject = None
def aweCreateBaseUI(tool):
# ------------------------------
# Tool Layout Scheme:
#
# -- MainLayout
# -- |-- Edit Layout
# -- |-- |-- Add Button
# -- |-- ScrollBox
# -- |-- |-- Picker Layout
# -- |-- |-- |-- Picker Box
# -- |-- |-- |-- ...
# ------------------------------
startX = 175
startY = 240
| tool.StartSizeX = startX
tool.StartSizeY = startY
tool.OnResize.Add(_toolResize)
| random_line_split | |
aweMBPicker.py |
def createPickerObject(self, name, tab, pickerObject, objectList=[]):
'''Creates the Set object used to store the Picker in the Scene
When used during initPickers(), it doesn't create a new set and
returns the existing set instead.
'''
po = pickerObject
if not po:
po = aweCreateSet(name)
# search for master set. If none found, create it.
masterSet = None
for s in FBSystem().Scene.Sets:
if s.LongName == "awe:Pickers":
masterSet = s
if not masterSet:
masterSet = aweCreateSet("awe:Pickers")
# search for the tab set. If none found, create it.
tabSet = None
for s in masterSet.Items:
if s.ClassName() == 'FBSet' and s.LongName == tab:
tabSet = s
if not tabSet:
tabSet = aweCreateSet(tab)
masterSet.ConnectSrc(tabSet)
tabSet.ConnectSrc(po)
po.PropertyCreate('PickerName', FBPropertyType.kFBPT_charptr, 'String', False, False, None)
po.PropertyCreate('Objects', FBPropertyType.kFBPT_object, 'Object', False, False, None)
po.PropertyList.Find("PickerName").Data = name
po.Pickable = po.Transformable = False
for o in objectList:
po.PropertyList.Find('Objects').append(o)
po.picker = self
po.OnUnbind.Add(_pickerObjectDestroyed)
return po
def rename(self, newName):
self.name = newName
return self.name
def select(self):
'''Selects all objects associated with this Picker
'''
if self.pickerObject:
FBBeginChangeAllModels()
ml = FBModelList()
FBGetSelectedModels(ml)
for m in ml:
m.Selected = False
for o in self.objects:
o.Selected = True
FBEndChangeAllModels()
return True
else:
return False
def delete(self):
'''Deletes this Picker's associated pickerObject'''
if self.pickerObject:
self.pickerObject.FBDelete()
def add(self,objectList):
'''Adds a list of objects to this Picker'''
objects = self.objects
objects.extend(objectList)
# remove duplicates
tempSet = set(objects)
self.objects = [o for o in tempSet]
def aweCreateSet(name):
Set = FBSet("")
Set.LongName = name
disallowedFlags = [FBObjectFlag.kFBFlagBrowsable, FBObjectFlag.kFBFlagRenamable]
#for flag in disallowedFlags:
#Set.DisableObjectFlags(flag)
return Set
def _createPicker(control,event):
'''Callback:
Creates Picker and its UI after prompting for a name
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objSet = []
for m in ml:
objSet.append(m)
if not objSet:
FBMessageBox("Picker Error", "Error: No Objects selected","OK")
else:
userInput = FBMessageBoxGetUserValue("Create New Picker", "Name: ", "Picker", FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if userInput[0] == 1:
name = userInput[1]
picker = Picker(name,objSet)
createPickerButton(name,picker)
_toolResize()
def createPickerButton(name,picker):
'''Creates Picker button UI and associates it with given Picker object'''
box = FBLayout()
box.picker = picker
# optionBtn region
x = FBAddRegionParam(0, FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(20, FBAttachType.kFBAttachNone,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("optionBtnRegion", "optionBtnRegion", x,y,w,h)
box.optionBtn = FBButton()
box.optionBtn.Caption = "»"
#box.optionBtn.Look = FBButtonLook.kFBLookColorChange
#box.optionBtn.Style = FBButtonStyle.kFB2States
box.optionBtn.optionBoxVisible = False
box.optionBtn.picker = picker
box.optionBtn.OnClick.Add(_toggleOptionMenu)
box.SetControl("optionBtnRegion", box.optionBtn)
# picker / optionBox region
x = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"optionBtnRegion")
y = FBAddRegionParam(0, FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(0, FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(25, FBAttachType.kFBAttachNone,"")
box.AddRegion("pickerBoxRegion", "pickerBoxRegion", x,y,w,h)
box.pickerBtn = FBButton()
box.pickerBtn.Caption = name
box.pickerBtn.picker = picker
box.pickerBtn.OnClick.Add(_pickerSelect)
box.SetControl("pickerBoxRegion", box.pickerBtn)
box.optionBtn.optionBox = box.optionBox = createOptionBox(box)
box.pickerBtn.box = box.optionBtn.box = box
awePickerTool.pickerLayout.Add(box, 25, space=2)
def createOptionBox(parentBox):
'''Creates a layout that holds a Picker's option UI'''
optionLayout = pyui.FBHBoxLayout()
addBtn = FBButton()
addBtn.Caption = "+"
addBtn.OnClick.Add(_addObjects)
addBtn.picker = parentBox.picker
addBtn.Look = FBButtonLook.kFBLookColorChange
addBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.5,0.3))
addBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.45,0.25))
optionLayout.AddRelative(addBtn,0.25,height=25, space=4)
removeBtn = FBButton()
removeBtn.Caption = "-"
removeBtn.Look = FBButtonLook.kFBLookColorChange
removeBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.4,0.2,0.5))
removeBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.35,0.15,0.45))
removeBtn.OnClick.Add(_removeObjects)
removeBtn.picker = parentBox.picker
optionLayout.AddRelative(removeBtn,0.25,height=25, space=2)
renameBtn = FBButton()
renameBtn.Caption = "ab*"
renameBtn.Look = FBButtonLook.kFBLookColorChange
renameBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.3,0.4,0.5))
renameBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.25,0.35,0.45))
renameBtn.OnClick.Add(_renamePicker)
renameBtn.picker = parentBox.picker
renameBtn.pickerButton = parentBox.pickerBtn
optionLayout.AddRelative(renameBtn,0.25,height=25, space=2)
deleteBtn = FBButton()
deleteBtn.Caption = "x"
deleteBtn.Look = FBButtonLook.kFBLookColorChange
deleteBtn.SetStateColor(FBButtonState.kFBButtonState0, FBColor(0.7,0.2,0.3))
deleteBtn.SetStateColor(FBButtonState.kFBButtonState1, FBColor(0.65,0.15,0.25))
deleteBtn.OnClick.Add(_deletePicker)
deleteBtn.picker = parentBox.picker
deleteBtn.box = parentBox
optionLayout.AddRelative(deleteBtn,0.25,height=25, space=2)
return optionLayout
def _addObjects(control,event):
'''Callback:
Adds selected objects to the Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objectList = [o for o in ml]
control.picker.add(objectList)
def _removeObjects(control,event):
'''Callback:
Removes selected objects from Picker associated with the caller
'''
ml = FBModelList()
FBGetSelectedModels(ml)
objects = control.picker.objects
for m in ml:
if m in objects:
objects.remove(m)
control.picker.objects = objects
def _renamePicker(control,event):
'''Callback:
Prompts to rename a Picker associated with the caller
'''
if control.picker.pickerObject:
response, value = FBMessageBoxGetUserValue("Rename Picker %s" % control.picker.name, "Name: ", control.picker.name, FBPopupInputType.kFBPopupString, "OK", "Cancel",None,1,True)
if response == 1:
if value:
control.picker.rename(value)
control.pickerButton.Caption = value
else:
FBMessageBox('Picker Error', "Could not locate Picker Object","OK")
def _deletePicker(control | self.pickerObject.PropertyList.Find('Objects').append(o) | conditional_block | |
types.go | be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of a set of UnlockConditions that must be fulfilled
// in order to spend the output.
//
// When the SiafundOutput is spent, a SiacoinOutput is created, where:
//
// SiacoinOutput.Value := (SiafundPool - ClaimStart) / 10,000
// SiacoinOutput.UnlockHash := SiafundOutput.ClaimUnlockHash
//
// When a SiafundOutput is put into a transaction, the ClaimStart must always
// equal zero. While the transaction is being processed, the ClaimStart is set
// to the value of the SiafundPool.
type SiafundOutput struct {
Value Currency
UnlockHash UnlockHash
ClaimUnlockHash UnlockHash
ClaimStart Currency
}
// UnlockConditions are a set of conditions which must be met to execute
// certain actions, such as spending a SiacoinOutput or terminating a
// FileContract.
//
// The simplest requirement is that the block containing the UnlockConditions
// must have a height >= 'Timelock'.
//
// 'PublicKeys' specifies the set of keys that can be used to satisfy the
// UnlockConditions; of these, at least 'NumSignatures' unique keys must sign
// the transaction. The keys that do not need to use the same cryptographic
// algorithm.
//
// If 'NumSignatures' == 0, the UnlockConditions are effectively "anyone can
// unlock." If 'NumSignatures' > len('PublicKeys'), then the UnlockConditions
// cannot be fulfilled under any circumstances.
type UnlockConditions struct {
Timelock BlockHeight
PublicKeys []SiaPublicKey
NumSignatures uint64
}
// A SiaPublicKey is a public key prefixed by a Specifier. The Specifier
// indicates the algorithm used for signing and verification. Unrecognized
// algorithms will always verify, which allows new algorithms to be added to
// the protocol via a soft-fork.
type SiaPublicKey struct {
Algorithm Specifier
Key string
}
// A TransactionSignature is a signature that is included in the transaction.
// The signature should correspond to a public key in one of the
// UnlockConditions of the transaction. This key is specified first by
// 'ParentID', which specifies the UnlockConditions, and then
// 'PublicKeyIndex', which indicates the key in the UnlockConditions. There
// are three types that use UnlockConditions: SiacoinInputs, SiafundInputs,
// and FileContractTerminations. Each of these types also references a
// ParentID, and this is the hash that 'ParentID' must match. The 'Timelock'
// prevents the signature from being used until a certain height.
// 'CoveredFields' indicates which parts of the transaction are being signed;
// see CoveredFields.
type TransactionSignature struct {
ParentID crypto.Hash
PublicKeyIndex uint64
Timelock BlockHeight
CoveredFields CoveredFields
Signature Signature
}
// CoveredFields indicates which fields in a transaction have been covered by
// the signature. (Note that the signature does not sign the fields
// themselves, but rather their combined hash; see SigHash.) Each slice
// corresponds to a slice in the Transaction type, indicating which indices of
// the slice have been signed. The indices must be valid, i.e. within the
// bounds of the slice. In addition, they must be sorted and unique.
//
// As a convenience, a signature of the entire transaction can be indicated by
// the 'WholeTransaction' field. If 'WholeTransaction' == true, all other
// fields must be empty (except for the Signatures field, since a signature
// cannot sign itself).
type CoveredFields struct {
WholeTransaction bool
SiacoinInputs []uint64
SiacoinOutputs []uint64
FileContracts []uint64
FileContractTerminations []uint64
StorageProofs []uint64
SiafundInputs []uint64
SiafundOutputs []uint64
MinerFees []uint64
ArbitraryData []uint64
Signatures []uint64
}
// CurrentTimestamp returns the current time as a Timestamp.
func CurrentTimestamp() Timestamp {
return Timestamp(time.Now().Unix())
}
// CalculateCoinbase calculates the coinbase for a given height. The coinbase
// equation is:
//
// coinbase := max(InitialCoinbase - height, MinimumCoinbase) * CoinbaseAugment
func CalculateCoinbase(height BlockHeight) (c Currency) {
base := InitialCoinbase - uint64(height)
if base < MinimumCoinbase {
base = MinimumCoinbase
}
return NewCurrency64(base).Mul(NewCurrency(CoinbaseAugment))
}
// Int converts a Target to a big.Int.
func (t Target) Int() *big.Int {
return new(big.Int).SetBytes(t[:])
}
// Rat converts a Target to a big.Rat.
func (t Target) Rat() *big.Rat {
return new(big.Rat).SetInt(t.Int())
}
// Inverse returns the inverse of a Target as a big.Rat
func (t Target) Inverse() *big.Rat {
return new(big.Rat).Inv(t.Rat())
}
// IntToTarget converts a big.Int to a Target.
func IntToTarget(i *big.Int) (t Target) {
// i may overflow the maximum target.
// In the event of overflow, return the maximum.
if i.BitLen() > 256 {
return RootDepth
}
b := i.Bytes()
// need to preserve big-endianness
offset := len(t[:]) - len(b)
copy(t[offset:], b)
return
}
// RatToTarget converts a big.Rat to a Target.
func RatToTarget(r *big.Rat) Target {
// conversion to big.Int truncates decimal
i := new(big.Int).Div(r.Num(), r.Denom())
return IntToTarget(i)
}
// Tax returns the amount of Currency that will be taxed from fc.
func (fc FileContract) Tax() Currency {
return fc.Payout.MulFloat(SiafundPortion).RoundDown(SiafundCount)
}
// UnlockHash calculates the root hash of a Merkle tree of the
// UnlockConditions object. The leaves of this tree are formed by taking the
// hash of the timelock, the hash of the public keys (one leaf each), and the
// hash of the number of signatures. The keys are put in the middle because
// Timelock and NumSignatures are both low entropy fields; they can be
// protected by having random public keys next to them.
func (uc UnlockConditions) UnlockHash() UnlockHash {
tree := crypto.NewTree()
tree.PushObject(uc.Timelock)
for i := range uc.PublicKeys {
tree.PushObject(uc.PublicKeys[i])
}
tree.PushObject(uc.NumSignatures)
return UnlockHash(tree.Root())
}
// ID returns the ID of a Block, which is calculated by hashing the
// concatenation of the block's parent ID, nonce, and Merkle root.
func (b Block) ID() BlockID {
return BlockID(crypto.HashAll(
b.ParentID,
b.Nonce,
b.MerkleRoot(),
))
}
// CheckTarget returns true if the block's ID meets the given target.
func (b Block) CheckTarget(target Target) bool {
blockHash := b.ID()
return bytes.Compare(target[:], blockHash[:]) >= 0
}
// MerkleRoot calculates the Merkle root of a Block. The leaves of the Merkle
// tree are composed of the Timestamp, the miner outputs (one leaf per
// payout), and the transactions (one leaf per transaction).
func (b Block) MerkleRoot() crypto.Hash {
tree := crypto.NewTree()
tree.PushObject(b.Timestamp)
for _, payout := range b.MinerPayouts {
tree.PushObject(payout)
}
for _, txn := range b.Transactions {
tree.PushObject(txn)
}
return tree.Root()
}
// MinerPayoutID returns the ID of the miner payout at the given index, which
// is calculated by hashing the concatenation of the BlockID and the payout
// index.
func (b Block) MinerPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
b.ID(),
i,
))
}
// SiacoinOutputID returns the ID of a siacoin output at the given index,
// which is calculated by hashing the concatenation of the SiacoinOutput
// Specifier, all of the fields in the transaction (except the signatures),
// and output index.
func (t Transaction) SiacoinOutputID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierSiacoinOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
| // FileContractID returns the ID of a file contract at the given index, which
// is calculated by hashing the concatenation of the FileContract Specifier,
// all of the fields in the transaction (except the signatures), and the | random_line_split | |
types.go | for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiacoinInput struct {
ParentID SiacoinOutputID
UnlockConditions UnlockConditions
}
// A SiacoinOutput holds a volume of siacoins. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of the UnlockConditions that must be fulfilled
// in order to spend the output.
type SiacoinOutput struct {
Value Currency
UnlockHash UnlockHash
}
// A FileContract is a public record of a storage agreement between a "host"
// and a "renter." It mandates that a host must submit a storage proof to the
// network, proving that they still possess the file they have agreed to
// store.
//
// The party must submit the storage proof in a block that is between 'Start'
// and 'Expiration'. Upon submitting the proof, the outputs for
// 'ValidProofOutputs' are created. If the party does not submit a storage
// proof by 'Expiration', then the outputs for 'MissedProofOutputs' are
// created instead. The sum of 'MissedProofOutputs' must equal 'Payout', and
// the sum of 'ValidProofOutputs' must equal 'Payout' plus the siafund fee.
// This fee is sent to the siafund pool, which is a set of siacoins only
// spendable by siafund owners.
//
// Under normal circumstances, the payout will be funded by both the host and
// the renter, which gives the host incentive not to lose the file. The
// 'ValidProofUnlockHash' will typically be spendable by host, and the
// 'MissedProofUnlockHash' will either by spendable by the renter or by
// nobody (the ZeroUnlockHash).
//
// A contract can be terminated early by submitting a FileContractTermination
// whose UnlockConditions hash to 'TerminationHash'.
type FileContract struct {
FileSize uint64
FileMerkleRoot crypto.Hash
Start BlockHeight
Expiration BlockHeight
Payout Currency
ValidProofOutputs []SiacoinOutput
MissedProofOutputs []SiacoinOutput
TerminationHash UnlockHash
}
// A FileContractTermination terminates a file contract. The ParentID
// specifies the contract being terminated, and the TerminationConditions are
// the conditions under which termination will be treated as valid. The hash
// of the TerminationConditions must match the TerminationHash in the
// contract. 'Payouts' is a set of SiacoinOutputs describing how the payout of
// the contract is redistributed. It follows that the sum of these outputs
// must equal the original payout. The outputs can have any Value and
// UnlockHash, and do not need to match the ValidProofUnlockHash or
// MissedProofUnlockHash of the original FileContract.
type FileContractTermination struct {
ParentID FileContractID
TerminationConditions UnlockConditions
Payouts []SiacoinOutput
}
// A StorageProof fulfills a FileContract. The proof contains a specific
// segment of the file, along with a set of hashes from the file's Merkle
// tree. In combination, these can be used to prove that the segment came from
// the file. To prevent abuse, the segment must be chosen randomly, so the ID
// of block 'Start' - 1 is used as a seed value; see StorageProofSegment for
// the exact implementation.
//
// A transaction with a StorageProof cannot have any SiacoinOutputs,
// SiafundOutputs, or FileContracts. This is because a mundane reorg can
// invalidate the proof, and with it the rest of the transaction.
type StorageProof struct {
ParentID FileContractID
Segment [crypto.SegmentSize]byte
HashSet []crypto.Hash
}
// A SiafundInput consumes a SiafundOutput and adds the siafunds to the set of
// siafunds that can be spent in the transaction. The ParentID points to the
// output that is getting consumed, and the UnlockConditions contain the rules
// for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiafundInput struct {
ParentID SiafundOutputID
UnlockConditions UnlockConditions
}
// A SiafundOutput holds a volume of siafunds. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of a set of UnlockConditions that must be fulfilled
// in order to spend the output.
//
// When the SiafundOutput is spent, a SiacoinOutput is created, where:
//
// SiacoinOutput.Value := (SiafundPool - ClaimStart) / 10,000
// SiacoinOutput.UnlockHash := SiafundOutput.ClaimUnlockHash
//
// When a SiafundOutput is put into a transaction, the ClaimStart must always
// equal zero. While the transaction is being processed, the ClaimStart is set
// to the value of the SiafundPool.
type SiafundOutput struct {
Value Currency
UnlockHash UnlockHash
ClaimUnlockHash UnlockHash
ClaimStart Currency
}
// UnlockConditions are a set of conditions which must be met to execute
// certain actions, such as spending a SiacoinOutput or terminating a
// FileContract.
//
// The simplest requirement is that the block containing the UnlockConditions
// must have a height >= 'Timelock'.
//
// 'PublicKeys' specifies the set of keys that can be used to satisfy the
// UnlockConditions; of these, at least 'NumSignatures' unique keys must sign
// the transaction. The keys that do not need to use the same cryptographic
// algorithm.
//
// If 'NumSignatures' == 0, the UnlockConditions are effectively "anyone can
// unlock." If 'NumSignatures' > len('PublicKeys'), then the UnlockConditions
// cannot be fulfilled under any circumstances.
type UnlockConditions struct {
Timelock BlockHeight
PublicKeys []SiaPublicKey
NumSignatures uint64
}
// A SiaPublicKey is a public key prefixed by a Specifier. The Specifier
// indicates the algorithm used for signing and verification. Unrecognized
// algorithms will always verify, which allows new algorithms to be added to
// the protocol via a soft-fork.
type SiaPublicKey struct {
Algorithm Specifier
Key string
}
// A TransactionSignature is a signature that is included in the transaction.
// The signature should correspond to a public key in one of the
// UnlockConditions of the transaction. This key is specified first by
// 'ParentID', which specifies the UnlockConditions, and then
// 'PublicKeyIndex', which indicates the key in the UnlockConditions. There
// are three types that use UnlockConditions: SiacoinInputs, SiafundInputs,
// and FileContractTerminations. Each of these types also references a
// ParentID, and this is the hash that 'ParentID' must match. The 'Timelock'
// prevents the signature from being used until a certain height.
// 'CoveredFields' indicates which parts of the transaction are being signed;
// see CoveredFields.
type TransactionSignature struct {
ParentID crypto.Hash
PublicKeyIndex uint64
Timelock BlockHeight
CoveredFields CoveredFields
Signature Signature
}
// CoveredFields indicates which fields in a transaction have been covered by
// the signature. (Note that the signature does not sign the fields
// themselves, but rather their combined hash; see SigHash.) Each slice
// corresponds to a slice in the Transaction type, indicating which indices of
// the slice have been signed. The indices must be valid, i.e. within the
// bounds of the slice. In addition, they must be sorted and unique.
//
// As a convenience, a signature of the entire transaction can be indicated by
// the 'WholeTransaction' field. If 'WholeTransaction' == true, all other
// fields must be empty (except for the Signatures field, since a signature
// cannot sign itself).
type CoveredFields struct {
WholeTransaction bool
SiacoinInputs []uint64
SiacoinOutputs []uint64
FileContracts []uint64
FileContractTerminations []uint64
StorageProofs []uint64
SiafundInputs []uint64
SiafundOutputs []uint64
MinerFees []uint64
ArbitraryData []uint64
Signatures []uint64
}
// CurrentTimestamp returns the current time as a Timestamp.
func CurrentTimestamp() Timestamp {
return Timestamp(time.Now().Unix())
}
// CalculateCoinbase calculates the coinbase for a given height. The coinbase
// equation is:
//
// coinbase := max(InitialCoinbase - height, MinimumCoinbase) * CoinbaseAugment
func CalculateCoinbase(height BlockHeight) (c Currency) {
base := InitialCoinbase - uint64(height)
if base < MinimumCoinbase {
base = MinimumCoinbase
}
return NewCurrency64(base).Mul(NewCurrency(CoinbaseAugment))
}
// Int converts a Target to a big.Int.
func (t Target) Int() *big.Int | {
return new(big.Int).SetBytes(t[:])
} | identifier_body | |
types.go |
SiafundOutputs []uint64
MinerFees []uint64
ArbitraryData []uint64
Signatures []uint64
}
// CurrentTimestamp returns the current time as a Timestamp.
func CurrentTimestamp() Timestamp {
return Timestamp(time.Now().Unix())
}
// CalculateCoinbase calculates the coinbase for a given height. The coinbase
// equation is:
//
// coinbase := max(InitialCoinbase - height, MinimumCoinbase) * CoinbaseAugment
func CalculateCoinbase(height BlockHeight) (c Currency) {
base := InitialCoinbase - uint64(height)
if base < MinimumCoinbase {
base = MinimumCoinbase
}
return NewCurrency64(base).Mul(NewCurrency(CoinbaseAugment))
}
// Int converts a Target to a big.Int.
func (t Target) Int() *big.Int {
return new(big.Int).SetBytes(t[:])
}
// Rat converts a Target to a big.Rat.
func (t Target) Rat() *big.Rat {
return new(big.Rat).SetInt(t.Int())
}
// Inverse returns the inverse of a Target as a big.Rat
func (t Target) Inverse() *big.Rat {
return new(big.Rat).Inv(t.Rat())
}
// IntToTarget converts a big.Int to a Target.
func IntToTarget(i *big.Int) (t Target) {
// i may overflow the maximum target.
// In the event of overflow, return the maximum.
if i.BitLen() > 256 {
return RootDepth
}
b := i.Bytes()
// need to preserve big-endianness
offset := len(t[:]) - len(b)
copy(t[offset:], b)
return
}
// RatToTarget converts a big.Rat to a Target.
func RatToTarget(r *big.Rat) Target {
// conversion to big.Int truncates decimal
i := new(big.Int).Div(r.Num(), r.Denom())
return IntToTarget(i)
}
// Tax returns the amount of Currency that will be taxed from fc.
func (fc FileContract) Tax() Currency {
return fc.Payout.MulFloat(SiafundPortion).RoundDown(SiafundCount)
}
// UnlockHash calculates the root hash of a Merkle tree of the
// UnlockConditions object. The leaves of this tree are formed by taking the
// hash of the timelock, the hash of the public keys (one leaf each), and the
// hash of the number of signatures. The keys are put in the middle because
// Timelock and NumSignatures are both low entropy fields; they can be
// protected by having random public keys next to them.
func (uc UnlockConditions) UnlockHash() UnlockHash {
tree := crypto.NewTree()
tree.PushObject(uc.Timelock)
for i := range uc.PublicKeys {
tree.PushObject(uc.PublicKeys[i])
}
tree.PushObject(uc.NumSignatures)
return UnlockHash(tree.Root())
}
// ID returns the ID of a Block, which is calculated by hashing the
// concatenation of the block's parent ID, nonce, and Merkle root.
func (b Block) ID() BlockID {
return BlockID(crypto.HashAll(
b.ParentID,
b.Nonce,
b.MerkleRoot(),
))
}
// CheckTarget returns true if the block's ID meets the given target.
func (b Block) CheckTarget(target Target) bool {
blockHash := b.ID()
return bytes.Compare(target[:], blockHash[:]) >= 0
}
// MerkleRoot calculates the Merkle root of a Block. The leaves of the Merkle
// tree are composed of the Timestamp, the miner outputs (one leaf per
// payout), and the transactions (one leaf per transaction).
func (b Block) MerkleRoot() crypto.Hash {
tree := crypto.NewTree()
tree.PushObject(b.Timestamp)
for _, payout := range b.MinerPayouts {
tree.PushObject(payout)
}
for _, txn := range b.Transactions {
tree.PushObject(txn)
}
return tree.Root()
}
// MinerPayoutID returns the ID of the miner payout at the given index, which
// is calculated by hashing the concatenation of the BlockID and the payout
// index.
func (b Block) MinerPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
b.ID(),
i,
))
}
// SiacoinOutputID returns the ID of a siacoin output at the given index,
// which is calculated by hashing the concatenation of the SiacoinOutput
// Specifier, all of the fields in the transaction (except the signatures),
// and output index.
func (t Transaction) SiacoinOutputID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierSiacoinOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// FileContractID returns the ID of a file contract at the given index, which
// is calculated by hashing the concatenation of the FileContract Specifier,
// all of the fields in the transaction (except the signatures), and the
// contract index.
func (t Transaction) FileContractID(i int) FileContractID {
return FileContractID(crypto.HashAll(
SpecifierFileContract,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// FileContractTerminationPayoutID returns the ID of a file contract
// termination payout, given the index of the payout in the termination. The
// ID is calculated by hashing the concatenation of the
// FileContractTerminationPayout Specifier, the ID of the file contract being
// terminated, and the payout index.
func (fcid FileContractID) FileContractTerminationPayoutID(i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierFileContractTerminationPayout,
fcid,
i,
))
}
// StorageProofOutputID returns the ID of an output created by a file
// contract, given the status of the storage proof. The ID is calculating by
// hashing the concatenation of the StorageProofOutput Specifier, the ID of
// the file contract that the proof is for, a boolean indicating whether the
// proof was valid (true) or missed (false), and the index of the output
// within the file contract.
func (fcid FileContractID) StorageProofOutputID(proofValid bool, i int) SiacoinOutputID {
return SiacoinOutputID(crypto.HashAll(
SpecifierStorageProofOutput,
fcid,
proofValid,
i,
))
}
// SiafundOutputID returns the ID of a SiafundOutput at the given index, which
// is calculated by hashing the concatenation of the SiafundOutput Specifier,
// all of the fields in the transaction (except the signatures), and output
// index.
func (t Transaction) SiafundOutputID(i int) SiafundOutputID {
return SiafundOutputID(crypto.HashAll(
SpecifierSiafundOutput,
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
i,
))
}
// SiaClaimOutputID returns the ID of the SiacoinOutput that is created when
// the siafund output is spent. The ID is the hash the SiafundOutputID.
func (id SiafundOutputID) SiaClaimOutputID() SiacoinOutputID {
return SiacoinOutputID(crypto.HashObject(id))
}
// SigHash returns the hash of the fields in a transaction covered by a given
// signature. See CoveredFields for more details.
func (t Transaction) SigHash(i int) crypto.Hash {
cf := t.Signatures[i].CoveredFields
var signedData []byte
if cf.WholeTransaction {
signedData = encoding.MarshalAll(
t.SiacoinInputs,
t.SiacoinOutputs,
t.FileContracts,
t.FileContractTerminations,
t.StorageProofs,
t.SiafundInputs,
t.SiafundOutputs,
t.MinerFees,
t.ArbitraryData,
t.Signatures[i].ParentID,
t.Signatures[i].PublicKeyIndex,
t.Signatures[i].Timelock,
)
} else {
for _, input := range cf.SiacoinInputs {
signedData = append(signedData, encoding.Marshal(t.SiacoinInputs[input])...)
}
for _, output := range cf.SiacoinOutputs {
signedData = append(signedData, encoding.Marshal(t.SiacoinOutputs[output])...)
}
for _, contract := range cf.FileContracts | {
signedData = append(signedData, encoding.Marshal(t.FileContracts[contract])...)
} | conditional_block | |
types.go | SiacoinOutput
TerminationHash UnlockHash
}
// A FileContractTermination terminates a file contract. The ParentID
// specifies the contract being terminated, and the TerminationConditions are
// the conditions under which termination will be treated as valid. The hash
// of the TerminationConditions must match the TerminationHash in the
// contract. 'Payouts' is a set of SiacoinOutputs describing how the payout of
// the contract is redistributed. It follows that the sum of these outputs
// must equal the original payout. The outputs can have any Value and
// UnlockHash, and do not need to match the ValidProofUnlockHash or
// MissedProofUnlockHash of the original FileContract.
type FileContractTermination struct {
ParentID FileContractID
TerminationConditions UnlockConditions
Payouts []SiacoinOutput
}
// A StorageProof fulfills a FileContract. The proof contains a specific
// segment of the file, along with a set of hashes from the file's Merkle
// tree. In combination, these can be used to prove that the segment came from
// the file. To prevent abuse, the segment must be chosen randomly, so the ID
// of block 'Start' - 1 is used as a seed value; see StorageProofSegment for
// the exact implementation.
//
// A transaction with a StorageProof cannot have any SiacoinOutputs,
// SiafundOutputs, or FileContracts. This is because a mundane reorg can
// invalidate the proof, and with it the rest of the transaction.
type StorageProof struct {
ParentID FileContractID
Segment [crypto.SegmentSize]byte
HashSet []crypto.Hash
}
// A SiafundInput consumes a SiafundOutput and adds the siafunds to the set of
// siafunds that can be spent in the transaction. The ParentID points to the
// output that is getting consumed, and the UnlockConditions contain the rules
// for spending the output. The UnlockConditions must match the UnlockHash of
// the output.
type SiafundInput struct {
ParentID SiafundOutputID
UnlockConditions UnlockConditions
}
// A SiafundOutput holds a volume of siafunds. Outputs must be spent
// atomically; that is, they must all be spent in the same transaction. The
// UnlockHash is the hash of a set of UnlockConditions that must be fulfilled
// in order to spend the output.
//
// When the SiafundOutput is spent, a SiacoinOutput is created, where:
//
// SiacoinOutput.Value := (SiafundPool - ClaimStart) / 10,000
// SiacoinOutput.UnlockHash := SiafundOutput.ClaimUnlockHash
//
// When a SiafundOutput is put into a transaction, the ClaimStart must always
// equal zero. While the transaction is being processed, the ClaimStart is set
// to the value of the SiafundPool.
type SiafundOutput struct {
Value Currency
UnlockHash UnlockHash
ClaimUnlockHash UnlockHash
ClaimStart Currency
}
// UnlockConditions are a set of conditions which must be met to execute
// certain actions, such as spending a SiacoinOutput or terminating a
// FileContract.
//
// The simplest requirement is that the block containing the UnlockConditions
// must have a height >= 'Timelock'.
//
// 'PublicKeys' specifies the set of keys that can be used to satisfy the
// UnlockConditions; of these, at least 'NumSignatures' unique keys must sign
// the transaction. The keys that do not need to use the same cryptographic
// algorithm.
//
// If 'NumSignatures' == 0, the UnlockConditions are effectively "anyone can
// unlock." If 'NumSignatures' > len('PublicKeys'), then the UnlockConditions
// cannot be fulfilled under any circumstances.
type UnlockConditions struct {
Timelock BlockHeight
PublicKeys []SiaPublicKey
NumSignatures uint64
}
// A SiaPublicKey is a public key prefixed by a Specifier. The Specifier
// indicates the algorithm used for signing and verification. Unrecognized
// algorithms will always verify, which allows new algorithms to be added to
// the protocol via a soft-fork.
type SiaPublicKey struct {
Algorithm Specifier
Key string
}
// A TransactionSignature is a signature that is included in the transaction.
// The signature should correspond to a public key in one of the
// UnlockConditions of the transaction. This key is specified first by
// 'ParentID', which specifies the UnlockConditions, and then
// 'PublicKeyIndex', which indicates the key in the UnlockConditions. There
// are three types that use UnlockConditions: SiacoinInputs, SiafundInputs,
// and FileContractTerminations. Each of these types also references a
// ParentID, and this is the hash that 'ParentID' must match. The 'Timelock'
// prevents the signature from being used until a certain height.
// 'CoveredFields' indicates which parts of the transaction are being signed;
// see CoveredFields.
type TransactionSignature struct {
ParentID crypto.Hash
PublicKeyIndex uint64
Timelock BlockHeight
CoveredFields CoveredFields
Signature Signature
}
// CoveredFields indicates which fields in a transaction have been covered by
// the signature. (Note that the signature does not sign the fields
// themselves, but rather their combined hash; see SigHash.) Each slice
// corresponds to a slice in the Transaction type, indicating which indices of
// the slice have been signed. The indices must be valid, i.e. within the
// bounds of the slice. In addition, they must be sorted and unique.
//
// As a convenience, a signature of the entire transaction can be indicated by
// the 'WholeTransaction' field. If 'WholeTransaction' == true, all other
// fields must be empty (except for the Signatures field, since a signature
// cannot sign itself).
type CoveredFields struct {
WholeTransaction bool
SiacoinInputs []uint64
SiacoinOutputs []uint64
FileContracts []uint64
FileContractTerminations []uint64
StorageProofs []uint64
SiafundInputs []uint64
SiafundOutputs []uint64
MinerFees []uint64
ArbitraryData []uint64
Signatures []uint64
}
// CurrentTimestamp returns the current time as a Timestamp.
func CurrentTimestamp() Timestamp {
return Timestamp(time.Now().Unix())
}
// CalculateCoinbase calculates the coinbase for a given height. The coinbase
// equation is:
//
// coinbase := max(InitialCoinbase - height, MinimumCoinbase) * CoinbaseAugment
func CalculateCoinbase(height BlockHeight) (c Currency) {
base := InitialCoinbase - uint64(height)
if base < MinimumCoinbase {
base = MinimumCoinbase
}
return NewCurrency64(base).Mul(NewCurrency(CoinbaseAugment))
}
// Int converts a Target to a big.Int.
func (t Target) Int() *big.Int {
return new(big.Int).SetBytes(t[:])
}
// Rat converts a Target to a big.Rat.
func (t Target) Rat() *big.Rat {
return new(big.Rat).SetInt(t.Int())
}
// Inverse returns the inverse of a Target as a big.Rat
func (t Target) Inverse() *big.Rat {
return new(big.Rat).Inv(t.Rat())
}
// IntToTarget converts a big.Int to a Target.
func IntToTarget(i *big.Int) (t Target) {
// i may overflow the maximum target.
// In the event of overflow, return the maximum.
if i.BitLen() > 256 {
return RootDepth
}
b := i.Bytes()
// need to preserve big-endianness
offset := len(t[:]) - len(b)
copy(t[offset:], b)
return
}
// RatToTarget converts a big.Rat to a Target.
func RatToTarget(r *big.Rat) Target {
// conversion to big.Int truncates decimal
i := new(big.Int).Div(r.Num(), r.Denom())
return IntToTarget(i)
}
// Tax returns the amount of Currency that will be taxed from fc.
func (fc FileContract) Tax() Currency {
return fc.Payout.MulFloat(SiafundPortion).RoundDown(SiafundCount)
}
// UnlockHash calculates the root hash of a Merkle tree of the
// UnlockConditions object. The leaves of this tree are formed by taking the
// hash of the timelock, the hash of the public keys (one leaf each), and the
// hash of the number of signatures. The keys are put in the middle because
// Timelock and NumSignatures are both low entropy fields; they can be
// protected by having random public keys next to them.
func (uc UnlockConditions) UnlockHash() UnlockHash {
tree := crypto.NewTree()
tree.PushObject(uc.Timelock)
for i := range uc.PublicKeys {
tree.PushObject(uc.PublicKeys[i])
}
tree.PushObject(uc.NumSignatures)
return UnlockHash(tree.Root())
}
// ID returns the ID of a Block, which is calculated by hashing the
// concatenation of the block's parent ID, nonce, and Merkle root.
func (b Block) | ID | identifier_name | |
SurveySimulator.py |
self._a = value
#----------- e
@property
def e(self):
"""I'm the e property."""
return self._e
@e.setter
def e(self, value):
if not 0.0 <= value <= 1.0:
raise ValueError('Bad e value. e must be between 0 and 1')
self._e = float(value)
#----------- inc
@property
def inc(self):
"""I'm the inc property."""
return self._inc
@inc.setter
def inc(self, value):
if not 0.0 <= value <= 180.0:
raise ValueError('Bad inclination value. Ensure 0.0 < inclination < 90 degrees')
self._inc = value
#----------- Om
@property
def Om(self):
"""I'm the Om property."""
return self._Om
@Om.setter
def Om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad Om value. Om must be between 0 and 360 degrees')
self._Om = float(value)
#----------- om
@property
def om(self):
"""I'm the om property."""
return self._om
@om.setter
def om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad om value. om must be between 0 and 360 degrees')
self._om = float(value)
#----------- H
@property
def H(self):
"""I'm the H property."""
return self._H
@H.setter
def H(self, value):
self._H = float(value)
#----------- epoch
@property
def epoch(self):
"""I'm the epoch property."""
return self._epoch
@epoch.setter
def epoch(self, value):
self._epoch = float(value)
#----------- epoch_M
@property
def epoch_M(self):
"""I'm the epoch_M property."""
return self._epoch_M
@epoch_M.setter
def epoch_M(self, value):
self._epoch_M = float(value)
#----------- M
@property
def M(self):
"""I'm the M property."""
return self._M
@M.setter
def M(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad M value. M must be between 0 and 360 degrees')
self._M = float(value)
#------------------------------- Object Status --------------------------------
def __str__(self):
"""Print the current orbital parameters a, e, inc, argperi, capom, H"""
status = ("\na: %.2f \n" % self.a +
"e: %.2f \n" % self.e +
"inc: %.2f deg \n" % (self.inc * 180/math.pi) +
"om: %.2f deg \n" % (self.om * 180/math.pi) +
"Om: %.2f deg \n" % (self.Om * 180/math.pi) +
"H: %.2f \n" % self.H
)
return status
#-------------------------- Size Distribution ---------------------------------
def drawH(self, alpha, hmax, alpha_faint=None, contrast=1, hbreak=None,
hmin=1):
"""Compute and assign and H-magnitude from a so-called singlE
power-law, knee, or divot H-magnitude distribution.
When provided a slope alpha and a faint-side maximum H-magnitude
(hmax), a H-magnitude is drawn randomly from the distribution
dN/dH propto 10**(alpha H)
in the range hmin = 1 to hmax. Specify hmin to change the bright-end.
Specifying an hbreak and alpha_faint will draw from a knee distribution
Specifying an hbreak, alpha_faint and contrast will draw from a divot
distrubtion as in Shankman et al. 2013
e.g.
---Single Power Law---
object.drawH(0.8,13)
will draw an H-magnitude from the appropriate distribution such that
H [1,13]
object.drawH(0.8,13,hmin=5)
will draw an H-magnitude such that H [5,13]
---Knee---
To draw from a knee distribution specify hbreak and alpha_faint
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5)
This will draw an H-magnitude from a distrubtion that breaks at H=9
from a slope of 0.8 to a slope of 0.5. hmin can also be specified here.
---Divot---
To draw from a divot (see Shankman et al 2013), specify hbreak,
alpha_faint, and the contrast value. Contrasts should be > 1.
hmin can also be specified.
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5, contrast = 23)
"""
# Avoid singularity for alpha = 0
alpha = 0.0000000001 if alpha == 0 else alpha
# Set alpha_faint to alpha for the case of a single power-law
alpha_faint = alpha if alpha_faint is None else alpha_faint
# Avoid singularity for alpha_faint = 0
alpha_faint = 0.0000000001 if alpha_faint == 0 else alpha_faint
# Set hbreak to be the maximum H for the case of a single power-law
hbreak = hmax if hbreak is None else hbreak
# ckc is the fraction of objects big (H<Hbreak) of the break
# (with contrast cont >= 1 as in Shankman et al. 2013)
ckc = (1.0 + 1.0 / contrast * alpha / alpha_faint *
(10**(alpha_faint*(hmax - hbreak)) - 1.0))**(-1.0)
rv = random()
if (rv < ckc):
rv = random()
hbright = 10**(alpha*hmin)
hfaint = 10**(alpha*hbreak)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha
else:
rv = random()
hbright = 10**(alpha_faint*hbreak)
hfaint = 10**(alpha_faint*hmax)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha_faint
#----------------- Fuzzing Variables a,e,inc, argperi, capom ------------------
def fuzz(self, variable, fz, type=None):
"""Perturb (fuzz) semimajor axis randomly by up to +- percent specified
Input is treated as percentage if type is not specified as 'abs'.
If type = 'abs', a will be changed randomly by +- amount specified.
The first argument is a string containing the variable to be fuzzed.
The appropriate options are 'a', 'e', 'inc', 'Om', 'om'
e.g.
# KBO(a, e, inc, argperi, capom)
object = ssobj(75, 0.5, 12, 45, 60)
object.fuzz('a', 0.1)
this will take a and randomly perturb it by +- 10%
object.fuzz('a', 10)
produces the same result
---
Conversely,
object.fuzz('a', 0.1, type='abs')
pertubs a by +- 0.1 AU, and
object.fuzz('a', 10, type='abs')
perturbs a by +- 10 AU
"""
# Check to see if the attribute exists, if so get the value
if not hasattr(self, variable):
raise ValueError("You tried to fuzz a parameter that does not exit")
var = getattr(self, variable)
# if variable is an angle, treat it properly as
# float(ephem.EllipticalBody().inc) gives the angle in radians
if variable in ['inc', 'om', 'Om']:
var = float(var)*180.0/math.pi
# set fuzzer to percent
fz = fz/100.0 if (fz > 1.0 and type is None) else fz
var = (var*(1.0 + fz*(2.0*random()-1.0)) if type is None else
(var + (2.0*random()-1.0)*fz))
setattr(self, variable, var)
#-*-*-*-*-*-*-*-*-*-*-*-* | raise ValueError('Bad a value. Ensure 0.0 < a < 10E6') | conditional_block | |
SurveySimulator.py | raise ValueError('Bad a value. Ensure 0.0 < a < 10E6')
self._a = value
#----------- e
@property
def e(self):
"""I'm the e property."""
return self._e
@e.setter
def e(self, value):
if not 0.0 <= value <= 1.0:
raise ValueError('Bad e value. e must be between 0 and 1')
self._e = float(value)
#----------- inc
@property
def inc(self):
"""I'm the inc property."""
return self._inc
@inc.setter
def inc(self, value):
if not 0.0 <= value <= 180.0:
raise ValueError('Bad inclination value. Ensure 0.0 < inclination < 90 degrees')
self._inc = value
#----------- Om
@property
def Om(self):
"""I'm the Om property."""
return self._Om
@Om.setter
def Om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad Om value. Om must be between 0 and 360 degrees')
self._Om = float(value)
#----------- om
@property
def om(self):
"""I'm the om property."""
return self._om
@om.setter
def om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad om value. om must be between 0 and 360 degrees')
self._om = float(value)
#----------- H
@property
def H(self):
"""I'm the H property."""
return self._H
@H.setter
def H(self, value):
self._H = float(value)
#----------- epoch
@property
def epoch(self):
"""I'm the epoch property."""
return self._epoch
@epoch.setter
def epoch(self, value):
self._epoch = float(value)
#----------- epoch_M
@property
def epoch_M(self):
"""I'm the epoch_M property."""
return self._epoch_M
@epoch_M.setter
def epoch_M(self, value):
self._epoch_M = float(value)
#----------- M
@property
def M(self):
"""I'm the M property."""
return self._M
@M.setter
def M(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad M value. M must be between 0 and 360 degrees')
self._M = float(value)
#------------------------------- Object Status --------------------------------
def __str__(self):
"""Print the current orbital parameters a, e, inc, argperi, capom, H"""
status = ("\na: %.2f \n" % self.a +
"e: %.2f \n" % self.e +
"inc: %.2f deg \n" % (self.inc * 180/math.pi) +
"om: %.2f deg \n" % (self.om * 180/math.pi) +
"Om: %.2f deg \n" % (self.Om * 180/math.pi) +
"H: %.2f \n" % self.H
)
return status
#-------------------------- Size Distribution ---------------------------------
def drawH(self, alpha, hmax, alpha_faint=None, contrast=1, hbreak=None,
hmin=1):
"""Compute and assign and H-magnitude from a so-called singlE
power-law, knee, or divot H-magnitude distribution.
When provided a slope alpha and a faint-side maximum H-magnitude
(hmax), a H-magnitude is drawn randomly from the distribution
dN/dH propto 10**(alpha H)
in the range hmin = 1 to hmax. Specify hmin to change the bright-end.
Specifying an hbreak and alpha_faint will draw from a knee distribution
Specifying an hbreak, alpha_faint and contrast will draw from a divot
distrubtion as in Shankman et al. 2013
e.g.
---Single Power Law---
object.drawH(0.8,13)
will draw an H-magnitude from the appropriate distribution such that
H [1,13]
object.drawH(0.8,13,hmin=5)
will draw an H-magnitude such that H [5,13]
---Knee---
To draw from a knee distribution specify hbreak and alpha_faint
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5)
This will draw an H-magnitude from a distrubtion that breaks at H=9
from a slope of 0.8 to a slope of 0.5. hmin can also be specified here.
---Divot---
To draw from a divot (see Shankman et al 2013), specify hbreak,
alpha_faint, and the contrast value. Contrasts should be > 1.
hmin can also be specified.
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5, contrast = 23)
"""
# Avoid singularity for alpha = 0
alpha = 0.0000000001 if alpha == 0 else alpha
# Set alpha_faint to alpha for the case of a single power-law
alpha_faint = alpha if alpha_faint is None else alpha_faint
# Avoid singularity for alpha_faint = 0
alpha_faint = 0.0000000001 if alpha_faint == 0 else alpha_faint
# Set hbreak to be the maximum H for the case of a single power-law
hbreak = hmax if hbreak is None else hbreak
# ckc is the fraction of objects big (H<Hbreak) of the break
# (with contrast cont >= 1 as in Shankman et al. 2013)
ckc = (1.0 + 1.0 / contrast * alpha / alpha_faint *
(10**(alpha_faint*(hmax - hbreak)) - 1.0))**(-1.0)
rv = random()
if (rv < ckc):
rv = random()
hbright = 10**(alpha*hmin)
hfaint = 10**(alpha*hbreak)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha
else:
rv = random()
hbright = 10**(alpha_faint*hbreak)
hfaint = 10**(alpha_faint*hmax)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha_faint
#----------------- Fuzzing Variables a,e,inc, argperi, capom ------------------
def fuzz(self, variable, fz, type=None):
"""Perturb (fuzz) semimajor axis randomly by up to +- percent specified
Input is treated as percentage if type is not specified as 'abs'.
If type = 'abs', a will be changed randomly by +- amount specified.
The first argument is a string containing the variable to be fuzzed.
The appropriate options are 'a', 'e', 'inc', 'Om', 'om'
e.g.
# KBO(a, e, inc, argperi, capom)
object = ssobj(75, 0.5, 12, 45, 60)
object.fuzz('a', 0.1)
this will take a and randomly perturb it by +- 10%
object.fuzz('a', 10)
produces the same result
---
Conversely,
object.fuzz('a', 0.1, type='abs')
pertubs a by +- 0.1 AU, and
object.fuzz('a', 10, type='abs')
perturbs a by +- 10 AU
"""
# Check to see if the attribute exists, if so get the value
if not hasattr(self, variable):
raise ValueError("You tried to fuzz a parameter that does not exit")
var = getattr(self, variable)
# if variable is an angle, treat it properly as
# float(ephem.EllipticalBody().inc) gives the angle in radians
if variable in ['inc', 'om', 'Om']:
var = float(var)*180.0/math.pi
# set fuzzer to percent
fz = fz/100.0 if (fz > 1.0 and type is None) else fz
var = (var*(1.0 + fz*(2.0*random()-1.0)) if type is None else
(var + (2.0*random()-1.0)*fz))
setattr(self, variable | if not 0.0 <= value <= 10E6: | random_line_split | |
SurveySimulator.py | a = value
#----------- e
@property
def e(self):
"""I'm the e property."""
return self._e
@e.setter
def e(self, value):
if not 0.0 <= value <= 1.0:
raise ValueError('Bad e value. e must be between 0 and 1')
self._e = float(value)
#----------- inc
@property
def inc(self):
"""I'm the inc property."""
return self._inc
@inc.setter
def | (self, value):
if not 0.0 <= value <= 180.0:
raise ValueError('Bad inclination value. Ensure 0.0 < inclination < 90 degrees')
self._inc = value
#----------- Om
@property
def Om(self):
"""I'm the Om property."""
return self._Om
@Om.setter
def Om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad Om value. Om must be between 0 and 360 degrees')
self._Om = float(value)
#----------- om
@property
def om(self):
"""I'm the om property."""
return self._om
@om.setter
def om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad om value. om must be between 0 and 360 degrees')
self._om = float(value)
#----------- H
@property
def H(self):
"""I'm the H property."""
return self._H
@H.setter
def H(self, value):
self._H = float(value)
#----------- epoch
@property
def epoch(self):
"""I'm the epoch property."""
return self._epoch
@epoch.setter
def epoch(self, value):
self._epoch = float(value)
#----------- epoch_M
@property
def epoch_M(self):
"""I'm the epoch_M property."""
return self._epoch_M
@epoch_M.setter
def epoch_M(self, value):
self._epoch_M = float(value)
#----------- M
@property
def M(self):
"""I'm the M property."""
return self._M
@M.setter
def M(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad M value. M must be between 0 and 360 degrees')
self._M = float(value)
#------------------------------- Object Status --------------------------------
def __str__(self):
"""Print the current orbital parameters a, e, inc, argperi, capom, H"""
status = ("\na: %.2f \n" % self.a +
"e: %.2f \n" % self.e +
"inc: %.2f deg \n" % (self.inc * 180/math.pi) +
"om: %.2f deg \n" % (self.om * 180/math.pi) +
"Om: %.2f deg \n" % (self.Om * 180/math.pi) +
"H: %.2f \n" % self.H
)
return status
#-------------------------- Size Distribution ---------------------------------
def drawH(self, alpha, hmax, alpha_faint=None, contrast=1, hbreak=None,
hmin=1):
"""Compute and assign and H-magnitude from a so-called singlE
power-law, knee, or divot H-magnitude distribution.
When provided a slope alpha and a faint-side maximum H-magnitude
(hmax), a H-magnitude is drawn randomly from the distribution
dN/dH propto 10**(alpha H)
in the range hmin = 1 to hmax. Specify hmin to change the bright-end.
Specifying an hbreak and alpha_faint will draw from a knee distribution
Specifying an hbreak, alpha_faint and contrast will draw from a divot
distrubtion as in Shankman et al. 2013
e.g.
---Single Power Law---
object.drawH(0.8,13)
will draw an H-magnitude from the appropriate distribution such that
H [1,13]
object.drawH(0.8,13,hmin=5)
will draw an H-magnitude such that H [5,13]
---Knee---
To draw from a knee distribution specify hbreak and alpha_faint
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5)
This will draw an H-magnitude from a distrubtion that breaks at H=9
from a slope of 0.8 to a slope of 0.5. hmin can also be specified here.
---Divot---
To draw from a divot (see Shankman et al 2013), specify hbreak,
alpha_faint, and the contrast value. Contrasts should be > 1.
hmin can also be specified.
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5, contrast = 23)
"""
# Avoid singularity for alpha = 0
alpha = 0.0000000001 if alpha == 0 else alpha
# Set alpha_faint to alpha for the case of a single power-law
alpha_faint = alpha if alpha_faint is None else alpha_faint
# Avoid singularity for alpha_faint = 0
alpha_faint = 0.0000000001 if alpha_faint == 0 else alpha_faint
# Set hbreak to be the maximum H for the case of a single power-law
hbreak = hmax if hbreak is None else hbreak
# ckc is the fraction of objects big (H<Hbreak) of the break
# (with contrast cont >= 1 as in Shankman et al. 2013)
ckc = (1.0 + 1.0 / contrast * alpha / alpha_faint *
(10**(alpha_faint*(hmax - hbreak)) - 1.0))**(-1.0)
rv = random()
if (rv < ckc):
rv = random()
hbright = 10**(alpha*hmin)
hfaint = 10**(alpha*hbreak)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha
else:
rv = random()
hbright = 10**(alpha_faint*hbreak)
hfaint = 10**(alpha_faint*hmax)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha_faint
#----------------- Fuzzing Variables a,e,inc, argperi, capom ------------------
def fuzz(self, variable, fz, type=None):
"""Perturb (fuzz) semimajor axis randomly by up to +- percent specified
Input is treated as percentage if type is not specified as 'abs'.
If type = 'abs', a will be changed randomly by +- amount specified.
The first argument is a string containing the variable to be fuzzed.
The appropriate options are 'a', 'e', 'inc', 'Om', 'om'
e.g.
# KBO(a, e, inc, argperi, capom)
object = ssobj(75, 0.5, 12, 45, 60)
object.fuzz('a', 0.1)
this will take a and randomly perturb it by +- 10%
object.fuzz('a', 10)
produces the same result
---
Conversely,
object.fuzz('a', 0.1, type='abs')
pertubs a by +- 0.1 AU, and
object.fuzz('a', 10, type='abs')
perturbs a by +- 10 AU
"""
# Check to see if the attribute exists, if so get the value
if not hasattr(self, variable):
raise ValueError("You tried to fuzz a parameter that does not exit")
var = getattr(self, variable)
# if variable is an angle, treat it properly as
# float(ephem.EllipticalBody().inc) gives the angle in radians
if variable in ['inc', 'om', 'Om']:
var = float(var)*180.0/math.pi
# set fuzzer to percent
fz = fz/100.0 if (fz > 1.0 and type is None) else fz
var = (var*(1.0 + fz*(2.0*random()-1.0)) if type is None else
(var + (2.0*random()-1.0)*fz))
setattr(self, variable, var)
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* | inc | identifier_name |
SurveySimulator.py | a = value
#----------- e
@property
def e(self):
|
@e.setter
def e(self, value):
if not 0.0 <= value <= 1.0:
raise ValueError('Bad e value. e must be between 0 and 1')
self._e = float(value)
#----------- inc
@property
def inc(self):
"""I'm the inc property."""
return self._inc
@inc.setter
def inc(self, value):
if not 0.0 <= value <= 180.0:
raise ValueError('Bad inclination value. Ensure 0.0 < inclination < 90 degrees')
self._inc = value
#----------- Om
@property
def Om(self):
"""I'm the Om property."""
return self._Om
@Om.setter
def Om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad Om value. Om must be between 0 and 360 degrees')
self._Om = float(value)
#----------- om
@property
def om(self):
"""I'm the om property."""
return self._om
@om.setter
def om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad om value. om must be between 0 and 360 degrees')
self._om = float(value)
#----------- H
@property
def H(self):
"""I'm the H property."""
return self._H
@H.setter
def H(self, value):
self._H = float(value)
#----------- epoch
@property
def epoch(self):
"""I'm the epoch property."""
return self._epoch
@epoch.setter
def epoch(self, value):
self._epoch = float(value)
#----------- epoch_M
@property
def epoch_M(self):
"""I'm the epoch_M property."""
return self._epoch_M
@epoch_M.setter
def epoch_M(self, value):
self._epoch_M = float(value)
#----------- M
@property
def M(self):
"""I'm the M property."""
return self._M
@M.setter
def M(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad M value. M must be between 0 and 360 degrees')
self._M = float(value)
#------------------------------- Object Status --------------------------------
def __str__(self):
"""Print the current orbital parameters a, e, inc, argperi, capom, H"""
status = ("\na: %.2f \n" % self.a +
"e: %.2f \n" % self.e +
"inc: %.2f deg \n" % (self.inc * 180/math.pi) +
"om: %.2f deg \n" % (self.om * 180/math.pi) +
"Om: %.2f deg \n" % (self.Om * 180/math.pi) +
"H: %.2f \n" % self.H
)
return status
#-------------------------- Size Distribution ---------------------------------
def drawH(self, alpha, hmax, alpha_faint=None, contrast=1, hbreak=None,
hmin=1):
"""Compute and assign and H-magnitude from a so-called singlE
power-law, knee, or divot H-magnitude distribution.
When provided a slope alpha and a faint-side maximum H-magnitude
(hmax), a H-magnitude is drawn randomly from the distribution
dN/dH propto 10**(alpha H)
in the range hmin = 1 to hmax. Specify hmin to change the bright-end.
Specifying an hbreak and alpha_faint will draw from a knee distribution
Specifying an hbreak, alpha_faint and contrast will draw from a divot
distrubtion as in Shankman et al. 2013
e.g.
---Single Power Law---
object.drawH(0.8,13)
will draw an H-magnitude from the appropriate distribution such that
H [1,13]
object.drawH(0.8,13,hmin=5)
will draw an H-magnitude such that H [5,13]
---Knee---
To draw from a knee distribution specify hbreak and alpha_faint
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5)
This will draw an H-magnitude from a distrubtion that breaks at H=9
from a slope of 0.8 to a slope of 0.5. hmin can also be specified here.
---Divot---
To draw from a divot (see Shankman et al 2013), specify hbreak,
alpha_faint, and the contrast value. Contrasts should be > 1.
hmin can also be specified.
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5, contrast = 23)
"""
# Avoid singularity for alpha = 0
alpha = 0.0000000001 if alpha == 0 else alpha
# Set alpha_faint to alpha for the case of a single power-law
alpha_faint = alpha if alpha_faint is None else alpha_faint
# Avoid singularity for alpha_faint = 0
alpha_faint = 0.0000000001 if alpha_faint == 0 else alpha_faint
# Set hbreak to be the maximum H for the case of a single power-law
hbreak = hmax if hbreak is None else hbreak
# ckc is the fraction of objects big (H<Hbreak) of the break
# (with contrast cont >= 1 as in Shankman et al. 2013)
ckc = (1.0 + 1.0 / contrast * alpha / alpha_faint *
(10**(alpha_faint*(hmax - hbreak)) - 1.0))**(-1.0)
rv = random()
if (rv < ckc):
rv = random()
hbright = 10**(alpha*hmin)
hfaint = 10**(alpha*hbreak)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha
else:
rv = random()
hbright = 10**(alpha_faint*hbreak)
hfaint = 10**(alpha_faint*hmax)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha_faint
#----------------- Fuzzing Variables a,e,inc, argperi, capom ------------------
def fuzz(self, variable, fz, type=None):
"""Perturb (fuzz) semimajor axis randomly by up to +- percent specified
Input is treated as percentage if type is not specified as 'abs'.
If type = 'abs', a will be changed randomly by +- amount specified.
The first argument is a string containing the variable to be fuzzed.
The appropriate options are 'a', 'e', 'inc', 'Om', 'om'
e.g.
# KBO(a, e, inc, argperi, capom)
object = ssobj(75, 0.5, 12, 45, 60)
object.fuzz('a', 0.1)
this will take a and randomly perturb it by +- 10%
object.fuzz('a', 10)
produces the same result
---
Conversely,
object.fuzz('a', 0.1, type='abs')
pertubs a by +- 0.1 AU, and
object.fuzz('a', 10, type='abs')
perturbs a by +- 10 AU
"""
# Check to see if the attribute exists, if so get the value
if not hasattr(self, variable):
raise ValueError("You tried to fuzz a parameter that does not exit")
var = getattr(self, variable)
# if variable is an angle, treat it properly as
# float(ephem.EllipticalBody().inc) gives the angle in radians
if variable in ['inc', 'om', 'Om']:
var = float(var)*180.0/math.pi
# set fuzzer to percent
fz = fz/100.0 if (fz > 1.0 and type is None) else fz
var = (var*(1.0 + fz*(2.0*random()-1.0)) if type is None else
(var + (2.0*random()-1.0)*fz))
setattr(self, variable, var)
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* | """I'm the e property."""
return self._e | identifier_body |
simanalysis.py | /np.sum(p)
def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) :
"""
Calculate the relative proportion of contacts of the tracers with binding
sites compared with non-binding sites. As usual user should supply
equilibration time, sampling time, and contact threshold value.
"""
# select polymer, tracers, and binding sites
polymer = sim.u.select_atoms (polymer_text)
tracers = sim.u.select_atoms (tracers_text)
bss = sim.u.select_atoms (bindingsites_text)
# select binding site indices
bs_n = bss.n_atoms
bs_idx = bss.indices
# select non-binding site indices
polymer_idx = polymer.indices
nbs_idx = np.setdiff1d (polymer_idx,bs_idx)
nbs_n = nbs_idx.size
# evaluate contacts with binding sites and non-binding sites for each
# independent simulation snapshot
c = []
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
contacts = d<threshold
cB = np.sum (contacts[bs_idx]).astype('float')
cA = np.sum (contacts[nbs_idx]).astype('float')
if cA != 0 :
c.append ((cB/cA) / (float(bs_n)/nbs_n))
return np.mean(np.array(c))
def fit_msd (msd,cutoff,delta_t,scale_l) :
"""
Perform a simple fit of the supplied time-dependent MSD, using a linear
regression of the logarithms of the values. User must supply the conversion
factor from time to real time and from length to real length. Also, user
must supply the cutoff value: from there on the values will be considered.
This is because the long-time behaviour is generally what matters really.
"""
# prepare the values to fit: exclude the first value because it is zero
t = np.arange(msd.size)*delta_t
x = np.log(t[cutoff:])
y = np.log(msd[cutoff:]*scale_l**2)
# perform fit to y = ax + b with their errors
b,a,db,da = mbt.linear_regression (x,y,0.99)
# now convert the value of b into a diffusion coefficient
D = np.exp(b)/6.0
dD = np.exp(db)/6.0
return a,da,D,dD
def msd_t (sim,particles_text,teq,tsample) :
"""
Calculate the mean square displacement of the particles defined by
'particles_text' in simulation sim, using sampling tsample and equilibration
time teq. Returns the matrix corresponding to the mean square displacement
of each particle, along with a matrix corresponding to the variance in the
estimate of this quantity.
"""
u = sim.u
particles = u.select_atoms (particles_text)
nparticles = particles.n_atoms
nslice = traj_nslice (u,teq,tsample)
# initialize the matrix containing all the positions
# of the particles at all the sampling frames
particles_pos = np.zeros ((nslice,nparticles,3))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
particles_pos[i,:,:] = particles.positions
# now initialize the Delta matrix, which contains the
# squared differences between the particles' positions
# at different time delays
Nt = int(nslice/2)
Delta = np.zeros((nparticles,Nt,Nt))
for delay in xrange(1,Nt+1) :
for t0 in xrange (Nt) :
t1 = t0 + delay
pos1 = particles_pos[t1,:,:]
pos0 = particles_pos[t0,:,:]
Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1)
# return the matrices of MSD and its variance
return np.mean(Delta,axis=2),np.var(Delta,axis=2)
def dmin_sel (sim,sel1_text,sel2_text,teq,tsample) :
"""
Calculate the minimum distance between the atoms defined in sel1 and the
atoms defined in sel2, as a function of time. Returns a matrix that contains
the minimum distance for each atom defined in sel1. As usual user should
supply equilibration time, sampling time, and contact threshold value.
"""
# define atom selections
sel1 = sim.u.select_atoms (sel1_text)
sel2 = sim.u.select_atoms (sel2_text)
# get number of atoms in selection 1
natoms = sel1.n_atoms
nslice = traj_nslice (sim.u,teq,tsample)
dmin = np.zeros((natoms,nslice))
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (sel1.positions,sel2.positions,
box=ts.dimensions)
dmin[:,i] = d.min(axis=1)
return dmin
def particle_images (sim,frame_id) :
"""
Get the image index of all particles in simulation, at the frame 'frame_id'
"""
# get positions of all particles: define first the atom selection, then jump to
# the user-requested trajectory frame, get the box dimensions (currently works
# only for orthorhombic boxes, then calculate the image indices
atoms = sim.u.select_atoms ('all')
ts = sim.u.trajectory[frame_id]
L = ts.dimensions[:3]
pos = atoms.positions + L/2.
return pos//L
def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
Calculate the matrix that represents the number of times that the tracers
(defined by 'tracer_text') jump from one site to another site of the polymer
(defined by 'polymer_text'). The simulation 'sim' is sampled at 'tsample',
excluding the first 'teq' time frames. Contact between a tracer and the
polymer is defined by the distance being smaller than 'threshold'.
"""
# define polymer and tracers
u = sim.u
polymer = u.select_atoms(polymer_text)
tracers = u.select_atoms(tracer_text)
n_polymer = polymer.n_atoms
n_tracers = tracers.n_atoms
# initialize jumping matrix and first distance matrix d_prev
J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)
ts = u.trajectory [teq]
d_prev = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_prev = d_prev<threshold
for ts in u.trajectory [teq::tsample] :
# get distance matrix at current time step
d_next = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_next = d_next<threshold
# get jumps of all tracers and add it to the jumping matrix
for i in xrange (n_tracers) :
t_prev = D_prev [:,i]
t_next = D_next [:,i].reshape ((n_polymer,1))
t = t_prev * t_next
J += t
D_prev = D_next.copy()
return J
def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
For the simulation 'sim', calculate the matrix of binding events of the
polymer and the tracers. Returns a contacts matrix of the shape
(ntracers,nslice,npolymer).
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
tracers = u.select_atoms (tracer_text)
ntracers = tracers.n_atoms
npolymer = polymer.n_atoms
nslice = mbt.traj_nslice(u,teq,tsample)
C = np.zeros((ntracers,nslice,npolymer),dtype=bool)
for i,ts in enumerate(u.trajectory [teq::tsample]) :
d = distance_array (tracers.positions,polymer.positions,
box=ts.dimensions)
c = d<threshold
C[:,i,:] = c
return C
def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) :
"""
Calculate the matrix of average intra-polymer distances. User must supply
the parameters teq, tsample and threshold.
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
N = polymer.n_atoms
nslice = mbt.traj_nslice (u,teq,tsample)
d = np.zeros((N,N))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
this_d = distance_array(polymer.positions,
polymer.positions,
box=ts.dimensions)
d = mbt.new_average(i,d,this_d)
return d
def | (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :
# define DKL(t) vector
| DKL_t | identifier_name |
simanalysis.py | /np.sum(p)
def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) :
"""
Calculate the relative proportion of contacts of the tracers with binding
sites compared with non-binding sites. As usual user should supply
equilibration time, sampling time, and contact threshold value.
"""
# select polymer, tracers, and binding sites
polymer = sim.u.select_atoms (polymer_text)
tracers = sim.u.select_atoms (tracers_text)
bss = sim.u.select_atoms (bindingsites_text)
# select binding site indices
bs_n = bss.n_atoms
bs_idx = bss.indices
# select non-binding site indices
polymer_idx = polymer.indices
nbs_idx = np.setdiff1d (polymer_idx,bs_idx)
nbs_n = nbs_idx.size
# evaluate contacts with binding sites and non-binding sites for each
# independent simulation snapshot
c = []
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
contacts = d<threshold
cB = np.sum (contacts[bs_idx]).astype('float')
cA = np.sum (contacts[nbs_idx]).astype('float')
if cA != 0 :
|
return np.mean(np.array(c))
def fit_msd (msd,cutoff,delta_t,scale_l) :
"""
Perform a simple fit of the supplied time-dependent MSD, using a linear
regression of the logarithms of the values. User must supply the conversion
factor from time to real time and from length to real length. Also, user
must supply the cutoff value: from there on the values will be considered.
This is because the long-time behaviour is generally what matters really.
"""
# prepare the values to fit: exclude the first value because it is zero
t = np.arange(msd.size)*delta_t
x = np.log(t[cutoff:])
y = np.log(msd[cutoff:]*scale_l**2)
# perform fit to y = ax + b with their errors
b,a,db,da = mbt.linear_regression (x,y,0.99)
# now convert the value of b into a diffusion coefficient
D = np.exp(b)/6.0
dD = np.exp(db)/6.0
return a,da,D,dD
def msd_t (sim,particles_text,teq,tsample) :
"""
Calculate the mean square displacement of the particles defined by
'particles_text' in simulation sim, using sampling tsample and equilibration
time teq. Returns the matrix corresponding to the mean square displacement
of each particle, along with a matrix corresponding to the variance in the
estimate of this quantity.
"""
u = sim.u
particles = u.select_atoms (particles_text)
nparticles = particles.n_atoms
nslice = traj_nslice (u,teq,tsample)
# initialize the matrix containing all the positions
# of the particles at all the sampling frames
particles_pos = np.zeros ((nslice,nparticles,3))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
particles_pos[i,:,:] = particles.positions
# now initialize the Delta matrix, which contains the
# squared differences between the particles' positions
# at different time delays
Nt = int(nslice/2)
Delta = np.zeros((nparticles,Nt,Nt))
for delay in xrange(1,Nt+1) :
for t0 in xrange (Nt) :
t1 = t0 + delay
pos1 = particles_pos[t1,:,:]
pos0 = particles_pos[t0,:,:]
Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1)
# return the matrices of MSD and its variance
return np.mean(Delta,axis=2),np.var(Delta,axis=2)
def dmin_sel (sim,sel1_text,sel2_text,teq,tsample) :
"""
Calculate the minimum distance between the atoms defined in sel1 and the
atoms defined in sel2, as a function of time. Returns a matrix that contains
the minimum distance for each atom defined in sel1. As usual user should
supply equilibration time, sampling time, and contact threshold value.
"""
# define atom selections
sel1 = sim.u.select_atoms (sel1_text)
sel2 = sim.u.select_atoms (sel2_text)
# get number of atoms in selection 1
natoms = sel1.n_atoms
nslice = traj_nslice (sim.u,teq,tsample)
dmin = np.zeros((natoms,nslice))
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (sel1.positions,sel2.positions,
box=ts.dimensions)
dmin[:,i] = d.min(axis=1)
return dmin
def particle_images (sim,frame_id) :
"""
Get the image index of all particles in simulation, at the frame 'frame_id'
"""
# get positions of all particles: define first the atom selection, then jump to
# the user-requested trajectory frame, get the box dimensions (currently works
# only for orthorhombic boxes, then calculate the image indices
atoms = sim.u.select_atoms ('all')
ts = sim.u.trajectory[frame_id]
L = ts.dimensions[:3]
pos = atoms.positions + L/2.
return pos//L
def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
Calculate the matrix that represents the number of times that the tracers
(defined by 'tracer_text') jump from one site to another site of the polymer
(defined by 'polymer_text'). The simulation 'sim' is sampled at 'tsample',
excluding the first 'teq' time frames. Contact between a tracer and the
polymer is defined by the distance being smaller than 'threshold'.
"""
# define polymer and tracers
u = sim.u
polymer = u.select_atoms(polymer_text)
tracers = u.select_atoms(tracer_text)
n_polymer = polymer.n_atoms
n_tracers = tracers.n_atoms
# initialize jumping matrix and first distance matrix d_prev
J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)
ts = u.trajectory [teq]
d_prev = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_prev = d_prev<threshold
for ts in u.trajectory [teq::tsample] :
# get distance matrix at current time step
d_next = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_next = d_next<threshold
# get jumps of all tracers and add it to the jumping matrix
for i in xrange (n_tracers) :
t_prev = D_prev [:,i]
t_next = D_next [:,i].reshape ((n_polymer,1))
t = t_prev * t_next
J += t
D_prev = D_next.copy()
return J
def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
For the simulation 'sim', calculate the matrix of binding events of the
polymer and the tracers. Returns a contacts matrix of the shape
(ntracers,nslice,npolymer).
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
tracers = u.select_atoms (tracer_text)
ntracers = tracers.n_atoms
npolymer = polymer.n_atoms
nslice = mbt.traj_nslice(u,teq,tsample)
C = np.zeros((ntracers,nslice,npolymer),dtype=bool)
for i,ts in enumerate(u.trajectory [teq::tsample]) :
d = distance_array (tracers.positions,polymer.positions,
box=ts.dimensions)
c = d<threshold
C[:,i,:] = c
return C
def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) :
"""
Calculate the matrix of average intra-polymer distances. User must supply
the parameters teq, tsample and threshold.
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
N = polymer.n_atoms
nslice = mbt.traj_nslice (u,teq,tsample)
d = np.zeros((N,N))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
this_d = distance_array(polymer.positions,
polymer.positions,
box=ts.dimensions)
d = mbt.new_average(i,d,this_d)
return d
def DKL_t (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :
# define DKL(t) vector
| c.append ((cB/cA) / (float(bs_n)/nbs_n)) | conditional_block |
simanalysis.py |
def ps (H) :
"""
Calculate the normalized probability of contact between a monomer and all
others as a function of the linear distance s.
"""
p = np.array ([np.mean (np.diagonal (H, offset=k))
for k in range (H.shape[0])])
return p/np.sum(p)
def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) :
"""
Calculate the relative proportion of contacts of the tracers with binding
sites compared with non-binding sites. As usual user should supply
equilibration time, sampling time, and contact threshold value.
"""
# select polymer, tracers, and binding sites
polymer = sim.u.select_atoms (polymer_text)
tracers = sim.u.select_atoms (tracers_text)
bss = sim.u.select_atoms (bindingsites_text)
# select binding site indices
bs_n = bss.n_atoms
bs_idx = bss.indices
# select non-binding site indices
polymer_idx = polymer.indices
nbs_idx = np.setdiff1d (polymer_idx,bs_idx)
nbs_n = nbs_idx.size
# evaluate contacts with binding sites and non-binding sites for each
# independent simulation snapshot
c = []
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
contacts = d<threshold
cB = np.sum (contacts[bs_idx]).astype('float')
cA = np.sum (contacts[nbs_idx]).astype('float')
if cA != 0 :
c.append ((cB/cA) / (float(bs_n)/nbs_n))
return np.mean(np.array(c))
def fit_msd (msd,cutoff,delta_t,scale_l) :
"""
Perform a simple fit of the supplied time-dependent MSD, using a linear
regression of the logarithms of the values. User must supply the conversion
factor from time to real time and from length to real length. Also, user
must supply the cutoff value: from there on the values will be considered.
This is because the long-time behaviour is generally what matters really.
"""
# prepare the values to fit: exclude the first value because it is zero
t = np.arange(msd.size)*delta_t
x = np.log(t[cutoff:])
y = np.log(msd[cutoff:]*scale_l**2)
# perform fit to y = ax + b with their errors
b,a,db,da = mbt.linear_regression (x,y,0.99)
# now convert the value of b into a diffusion coefficient
D = np.exp(b)/6.0
dD = np.exp(db)/6.0
return a,da,D,dD
def msd_t (sim,particles_text,teq,tsample) :
"""
Calculate the mean square displacement of the particles defined by
'particles_text' in simulation sim, using sampling tsample and equilibration
time teq. Returns the matrix corresponding to the mean square displacement
of each particle, along with a matrix corresponding to the variance in the
estimate of this quantity.
"""
u = sim.u
particles = u.select_atoms (particles_text)
nparticles = particles.n_atoms
nslice = traj_nslice (u,teq,tsample)
# initialize the matrix containing all the positions
# of the particles at all the sampling frames
particles_pos = np.zeros ((nslice,nparticles,3))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
particles_pos[i,:,:] = particles.positions
# now initialize the Delta matrix, which contains the
# squared differences between the particles' positions
# at different time delays
Nt = int(nslice/2)
Delta = np.zeros((nparticles,Nt,Nt))
for delay in xrange(1,Nt+1) :
for t0 in xrange (Nt) :
t1 = t0 + delay
pos1 = particles_pos[t1,:,:]
pos0 = particles_pos[t0,:,:]
Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1)
# return the matrices of MSD and its variance
return np.mean(Delta,axis=2),np.var(Delta,axis=2)
def dmin_sel (sim,sel1_text,sel2_text,teq,tsample) :
"""
Calculate the minimum distance between the atoms defined in sel1 and the
atoms defined in sel2, as a function of time. Returns a matrix that contains
the minimum distance for each atom defined in sel1. As usual user should
supply equilibration time, sampling time, and contact threshold value.
"""
# define atom selections
sel1 = sim.u.select_atoms (sel1_text)
sel2 = sim.u.select_atoms (sel2_text)
# get number of atoms in selection 1
natoms = sel1.n_atoms
nslice = traj_nslice (sim.u,teq,tsample)
dmin = np.zeros((natoms,nslice))
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (sel1.positions,sel2.positions,
box=ts.dimensions)
dmin[:,i] = d.min(axis=1)
return dmin
def particle_images (sim,frame_id) :
"""
Get the image index of all particles in simulation, at the frame 'frame_id'
"""
# get positions of all particles: define first the atom selection, then jump to
# the user-requested trajectory frame, get the box dimensions (currently works
# only for orthorhombic boxes, then calculate the image indices
atoms = sim.u.select_atoms ('all')
ts = sim.u.trajectory[frame_id]
L = ts.dimensions[:3]
pos = atoms.positions + L/2.
return pos//L
def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
Calculate the matrix that represents the number of times that the tracers
(defined by 'tracer_text') jump from one site to another site of the polymer
(defined by 'polymer_text'). The simulation 'sim' is sampled at 'tsample',
excluding the first 'teq' time frames. Contact between a tracer and the
polymer is defined by the distance being smaller than 'threshold'.
"""
# define polymer and tracers
u = sim.u
polymer = u.select_atoms(polymer_text)
tracers = u.select_atoms(tracer_text)
n_polymer = polymer.n_atoms
n_tracers = tracers.n_atoms
# initialize jumping matrix and first distance matrix d_prev
J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)
ts = u.trajectory [teq]
d_prev = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_prev = d_prev<threshold
for ts in u.trajectory [teq::tsample] :
# get distance matrix at current time step
d_next = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_next = d_next<threshold
# get jumps of all tracers and add it to the jumping matrix
for i in xrange (n_tracers) :
t_prev = D_prev [:,i]
t_next = D_next [:,i].reshape ((n_polymer,1))
t = t_prev * t_next
J += t
D_prev = D_next.copy()
return J
def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
For the simulation 'sim', calculate the matrix of binding events of the
polymer and the tracers. Returns a contacts matrix of the shape
(ntracers,nslice,npolymer).
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
tracers = u.select_atoms (tracer_text)
ntracers = tracers.n_atoms
npolymer = polymer.n_atoms
nslice = mbt.traj_nslice(u,teq,tsample)
C = np.zeros((ntracers,nslice,npolymer),dtype=bool)
for i,ts in enumerate(u.trajectory [teq::tsample]) :
d = distance_array (tracers.positions,polymer.positions,
box=ts.dimensions)
c = d<threshold
C[:,i,:] = c
return C
def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) :
"""
Calculate the matrix of average intra-polymer distances. User must supply
the parameters teq, tsample and threshold.
"""
u = sim | """
Calculate the Pearson correlation coefficient between the row sum of the
given Hi-C matrix and the given ChIP-seq profile.
"""
hic_rowsum = np.sum(hic,axis=1)/float(np.sum(hic))
return np.corrcoef(hic_rowsum,chipseq)[0,1]**2 | identifier_body | |
simanalysis.py | import mybiotools as mbt
def traj_nslice (u,teq,tsample) :
"""
Returns the number of frames in the trajectory in universe u, using teq as
equilibration time and tsample as sampling time
"""
# get the number of frames in the slice (http://stackoverflow.com/a/7223557)
traj_slice = u.trajectory[teq::tsample]
return sum(1 for _ in traj_slice)
def hic_chipseq_r2 (hic, chipseq) :
"""
Calculate the Pearson correlation coefficient between the row sum of the
given Hi-C matrix and the given ChIP-seq profile.
"""
hic_rowsum = np.sum(hic,axis=1)/float(np.sum(hic))
return np.corrcoef(hic_rowsum,chipseq)[0,1]**2
def ps (H) :
"""
Calculate the normalized probability of contact between a monomer and all
others as a function of the linear distance s.
"""
p = np.array ([np.mean (np.diagonal (H, offset=k))
for k in range (H.shape[0])])
return p/np.sum(p)
def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) :
"""
Calculate the relative proportion of contacts of the tracers with binding
sites compared with non-binding sites. As usual user should supply
equilibration time, sampling time, and contact threshold value.
"""
# select polymer, tracers, and binding sites
polymer = sim.u.select_atoms (polymer_text)
tracers = sim.u.select_atoms (tracers_text)
bss = sim.u.select_atoms (bindingsites_text)
# select binding site indices
bs_n = bss.n_atoms
bs_idx = bss.indices
# select non-binding site indices
polymer_idx = polymer.indices
nbs_idx = np.setdiff1d (polymer_idx,bs_idx)
nbs_n = nbs_idx.size
# evaluate contacts with binding sites and non-binding sites for each
# independent simulation snapshot
c = []
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
contacts = d<threshold
cB = np.sum (contacts[bs_idx]).astype('float')
cA = np.sum (contacts[nbs_idx]).astype('float')
if cA != 0 :
c.append ((cB/cA) / (float(bs_n)/nbs_n))
return np.mean(np.array(c))
def fit_msd (msd,cutoff,delta_t,scale_l) :
"""
Perform a simple fit of the supplied time-dependent MSD, using a linear
regression of the logarithms of the values. User must supply the conversion
factor from time to real time and from length to real length. Also, user
must supply the cutoff value: from there on the values will be considered.
This is because the long-time behaviour is generally what matters really.
"""
# prepare the values to fit: exclude the first value because it is zero
t = np.arange(msd.size)*delta_t
x = np.log(t[cutoff:])
y = np.log(msd[cutoff:]*scale_l**2)
# perform fit to y = ax + b with their errors
b,a,db,da = mbt.linear_regression (x,y,0.99)
# now convert the value of b into a diffusion coefficient
D = np.exp(b)/6.0
dD = np.exp(db)/6.0
return a,da,D,dD
def msd_t (sim,particles_text,teq,tsample) :
"""
Calculate the mean square displacement of the particles defined by
'particles_text' in simulation sim, using sampling tsample and equilibration
time teq. Returns the matrix corresponding to the mean square displacement
of each particle, along with a matrix corresponding to the variance in the
estimate of this quantity.
"""
u = sim.u
particles = u.select_atoms (particles_text)
nparticles = particles.n_atoms
nslice = traj_nslice (u,teq,tsample)
# initialize the matrix containing all the positions
# of the particles at all the sampling frames
particles_pos = np.zeros ((nslice,nparticles,3))
for i,ts in enumerate(u.trajectory[teq::tsample]) :
particles_pos[i,:,:] = particles.positions
# now initialize the Delta matrix, which contains the
# squared differences between the particles' positions
# at different time delays
Nt = int(nslice/2)
Delta = np.zeros((nparticles,Nt,Nt))
for delay in xrange(1,Nt+1) :
for t0 in xrange (Nt) :
t1 = t0 + delay
pos1 = particles_pos[t1,:,:]
pos0 = particles_pos[t0,:,:]
Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1)
# return the matrices of MSD and its variance
return np.mean(Delta,axis=2),np.var(Delta,axis=2)
def dmin_sel (sim,sel1_text,sel2_text,teq,tsample) :
"""
Calculate the minimum distance between the atoms defined in sel1 and the
atoms defined in sel2, as a function of time. Returns a matrix that contains
the minimum distance for each atom defined in sel1. As usual user should
supply equilibration time, sampling time, and contact threshold value.
"""
# define atom selections
sel1 = sim.u.select_atoms (sel1_text)
sel2 = sim.u.select_atoms (sel2_text)
# get number of atoms in selection 1
natoms = sel1.n_atoms
nslice = traj_nslice (sim.u,teq,tsample)
dmin = np.zeros((natoms,nslice))
for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :
d = distance_array (sel1.positions,sel2.positions,
box=ts.dimensions)
dmin[:,i] = d.min(axis=1)
return dmin
def particle_images (sim,frame_id) :
"""
Get the image index of all particles in simulation, at the frame 'frame_id'
"""
# get positions of all particles: define first the atom selection, then jump to
# the user-requested trajectory frame, get the box dimensions (currently works
# only for orthorhombic boxes, then calculate the image indices
atoms = sim.u.select_atoms ('all')
ts = sim.u.trajectory[frame_id]
L = ts.dimensions[:3]
pos = atoms.positions + L/2.
return pos//L
def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
Calculate the matrix that represents the number of times that the tracers
(defined by 'tracer_text') jump from one site to another site of the polymer
(defined by 'polymer_text'). The simulation 'sim' is sampled at 'tsample',
excluding the first 'teq' time frames. Contact between a tracer and the
polymer is defined by the distance being smaller than 'threshold'.
"""
# define polymer and tracers
u = sim.u
polymer = u.select_atoms(polymer_text)
tracers = u.select_atoms(tracer_text)
n_polymer = polymer.n_atoms
n_tracers = tracers.n_atoms
# initialize jumping matrix and first distance matrix d_prev
J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)
ts = u.trajectory [teq]
d_prev = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_prev = d_prev<threshold
for ts in u.trajectory [teq::tsample] :
# get distance matrix at current time step
d_next = distance_array (polymer.positions,tracers.positions,
box=ts.dimensions)
D_next = d_next<threshold
# get jumps of all tracers and add it to the jumping matrix
for i in xrange (n_tracers) :
t_prev = D_prev [:,i]
t_next = D_next [:,i].reshape ((n_polymer,1))
t = t_prev * t_next
J += t
D_prev = D_next.copy()
return J
def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) :
"""
For the simulation 'sim', calculate the matrix of binding events of the
polymer and the tracers. Returns a contacts matrix of the shape
(ntracers,nslice,npolymer).
"""
u = sim.u
polymer = u.select_atoms (polymer_text)
tracers = u.select_atoms (tracer_text)
ntracers = tracers.n_atoms
npolymer = polymer.n_atoms
nslice = mbt.traj_nslice(u,teq,tsample)
C = np | from MDAnalysis.analysis.distances import distance_array | random_line_split | |
wikibrief.go | \")")
return
}
func (bs *bStarted) AddRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) ClosePage() (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bTitled is the state of the builder in which has been set a title for the page
type bTitled struct {
bStarted
Title string
}
func (bs *bTitled) Start() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bTitled) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two titles)")
return
}
func (bs *bTitled) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
var pageID uint32
if err = bs.Decoder.DecodeElement(&pageID, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding page ID")
return
}
if topicID, ok := bs.Article2TopicID(pageID); ok {
revisions := make(chan Revision, revisionBufferSize)
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
return
case bs.OutStream <- EvolvingPage{pageID, bs.Title, "", topicID, revisions}: //Use empty abstract, later filled by completeInfo
be = &bSetted{
bTitled: *bs,
Revisions: revisions,
SHA12SerialID: map[string]uint32{},
}
return
}
}
if err = bs.Decoder.Skip(); err != nil {
err = bs.Wrapf(err, "Error while skipping page")
return
}
be = bs.New()
return
}
func (bs *bTitled) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page revision without finding previous page ID)")
return
}
func (bs *bTitled) ClosePage() (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page end without finding previous page ID)")
return
}
func (bs *bTitled) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bSetted is the state of the builder in which has been set a page ID for the page
type bSetted struct {
bTitled
Revisions chan Revision
RevisionCount uint32
SHA12SerialID map[string]uint32
}
func (bs *bSetted) NewPage() (be builder, err error) { //no page nesting
close(bs.Revisions)
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bSetted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
close(bs.Revisions)
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two ids)")
return
}
func (bs *bSetted) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
defer func() {
if err != nil {
close(bs.Revisions)
}
}()
//parse revision
var r revision
if err = bs.Decoder.DecodeElement(&r, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding the %vth revision", bs.RevisionCount+1)
return
}
//Calculate reverts
serialID, IsRevert := bs.RevisionCount, uint32(0)
oldSerialID, isRevert := bs.SHA12SerialID[r.SHA1]
switch {
case isRevert:
IsRevert = serialID - (oldSerialID + 1)
fallthrough
case len(r.SHA1) == 31:
bs.SHA12SerialID[r.SHA1] = serialID
}
//convert time
const layout = "2006-01-02T15:04:05Z"
timestamp, err := time.Parse(layout, r.Timestamp)
if err != nil {
err = bs.Wrapf(err, "Error while decoding the timestamp %s of %vth revision", r.Timestamp, bs.RevisionCount+1)
return
}
r.Timestamp = ""
//Check if userID represents bot
_, isBot := bs.ID2Bot(r.UserID)
bs.RevisionCount++
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
case bs.Revisions <- Revision{r.ID, r.UserID, isBot, r.Text, r.SHA1, IsRevert, timestamp}:
be = bs
}
return
}
func (bs *bSetted) ClosePage() (be builder, err error) {
close(bs.Revisions)
be = bs.New()
return
}
// A page revision.
type revision struct {
ID uint32 `xml:"id"`
Timestamp string `xml:"timestamp"`
UserID uint32 `xml:"contributor>id"`
Text string `xml:"text"`
SHA1 string `xml:"sha1"`
//converted data
timestamp time.Time
}
func xmlEvent(t xml.Token) string {
switch elem := t.(type) {
case xml.StartElement:
return elem.Name.Local + " start"
case xml.EndElement:
return elem.Name.Local + " end"
default:
return ""
}
}
type errorContext struct {
LastTitle string //used for error reporting purposes
Filename string //used for error reporting purposes
}
func (ec errorContext) String() string {
report := fmt.Sprintf("last title %v in \"%s\"", ec.LastTitle, ec.Filename)
if _, err := os.Stat(ec.Filename); os.IsNotExist(err) {
report += " - WARNING: file not found!"
}
return report
}
func filename(r io.Reader) (filename string) {
if namer, ok := r.(interface{ Name() string }); ok {
filename = namer.Name()
}
return
}
func getArticle2TopicID(ctx context.Context, tmpDir, lang string) (article2TopicID func(uint32) (uint32, bool), err error) {
article2Topic, namespaces, err := wikiassignment.From(ctx, tmpDir, lang)
if err != nil {
return
}
//Filter out non articles
articlesIDS := roaring.BitmapOf(namespaces.Articles...)
for pageID := range article2Topic {
if !articlesIDS.Contains(pageID) {
delete(article2Topic, pageID)
}
}
return func(articleID uint32) (topicID uint32, ok bool) {
topicID, ok = article2Topic[articleID]
return
}, nil
}
func completeInfo(ctx context.Context, fail func(err error) error, lang string, pages <-chan EvolvingPage) <-chan EvolvingPage {
results := make(chan EvolvingPage, pageBufferSize)
go func() {
defer close(results)
wikiPage := wikipage.New(lang)
wg := sync.WaitGroup{}
for i := 0; i < pageBufferSize; i++ {
wg.Add(1)
go func() {
defer wg.Done()
loop:
for p := range pages {
timeoutCtx, cancel := context.WithTimeout(ctx, 6*time.Hour)
wp, err := wikiPage.From(timeoutCtx, p.Title) //bottle neck: query to wikipedia api for each page
cancel()
switch {
case err != nil: //Querying the summary returns an error, so the article should be filtered
fallthrough
case p.PageID != wp.ID: //It's a redirect, so it should be filtered
emptyRevisions(p.Revisions, &wg)
continue loop
}
p.Abstract = wp.Abstract
select {
case results <- p:
//proceed
case <-ctx.Done():
return
}
}
}()
}
wg.Wait()
}()
return results
}
//Empty concurrently revision channel: wait goroutine so that if some error arises is caught by fail
func emptyRevisions(revisions <-chan Revision, wg *sync.WaitGroup) | {
wg.Add(1)
go func() {
defer wg.Done()
for range revisions {
//skip
}
}()
} | identifier_body | |
wikibrief.go | err := run(ctx, bBase{xml.NewDecoder(r), article2TopicID, ID2Bot, simplePages, &errorContext{"", filename(r)}})
if err != nil {
fail(err)
}
}(r)
}
if err != io.EOF {
fail(err)
}
wg.Wait()
}()
return completeInfo(ctx, fail, lang, simplePages)
}
//EvolvingPage represents a wikipedia page that is being edited. Revisions is closed when there are no more revisions.
//Revision channel must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
type EvolvingPage struct {
PageID uint32
Title, Abstract string
TopicID uint32
Revisions <-chan Revision
}
//Revision represents a revision of a page.
type Revision struct {
ID, UserID uint32
IsBot bool |
//There are 4 buffers in various forms: 4*pageBufferSize is the maximum number of wikipedia pages in memory.
//Each page has a buffer of revisionBufferSize revisions: this means that at each moment there is
//a maximum of 4*pageBufferSize*revisionBufferSize page texts in memory.
const (
pageBufferSize = 40
revisionBufferSize = 300
)
func run(ctx context.Context, base bBase) (err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
b := base.New()
defer b.ClosePage() //Close eventually open revision channel
var t xml.Token
for t, err = base.Decoder.Token(); err == nil; t, err = base.Decoder.Token() {
switch xmlEvent(t) {
case "page start":
b, err = b.NewPage()
case "title start":
b, err = b.SetPageTitle(ctx, t.(xml.StartElement))
case "id start":
b, err = b.SetPageID(ctx, t.(xml.StartElement))
case "revision start":
b, err = b.NewRevision(ctx, t.(xml.StartElement))
case "page end":
b, err = b.ClosePage()
}
if err != nil {
break
}
}
causer, errHasCause := err.(interface{ Cause() error })
switch {
case err == io.EOF:
err = nil
case errHasCause && causer.Cause() != nil:
//do nothing
default:
err = b.Wrapf(err, "Unexpected error in outer XML Decoder event loop")
}
return
}
//AnonimousUserID is the UserID value assumed by revisions done by an anonimous user
const AnonimousUserID uint32 = 0
var errInvalidXML = errors.New("Invalid XML")
type builder interface {
NewPage() (be builder, err error)
SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error)
SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error)
NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error)
ClosePage() (be builder, err error)
Wrapf(err error, format string, args ...interface{}) error
}
/////////////////////////////////////////////////////////////////////////////////////
//bBase is the base state builder
type bBase struct {
Decoder *xml.Decoder
Article2TopicID func(articleID uint32) (topicID uint32, ok bool)
ID2Bot func(userID uint32) (username string, ok bool)
OutStream chan<- EvolvingPage
ErrorContext *errorContext
}
func (bs *bBase) New() builder {
be := bBase(*bs)
return &be
}
func (bs *bBase) NewPage() (be builder, err error) {
be = &bStarted{*bs}
return
}
func (bs *bBase) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"title\")")
return
}
func (bs *bBase) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"id\")")
return
}
func (bs *bBase) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"revision\")")
return
}
func (bs *bBase) ClosePage() (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" start before end)")
return
}
func (bs *bBase) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bStarted is the state of the builder in which a new page start has been found
type bStarted struct {
bBase
}
func (bs *bStarted) NewPage() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bStarted) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
var title string
if err = bs.Decoder.DecodeElement(&title, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding the title of a page")
return
}
bs.ErrorContext.LastTitle = title //used for error reporting purposes
be = &bTitled{
bStarted: *bs,
Title: title,
}
return
}
func (bs *bStarted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) AddRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) ClosePage() (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bTitled is the state of the builder in which has been set a title for the page
type bTitled struct {
bStarted
Title string
}
func (bs *bTitled) Start() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bTitled) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two titles)")
return
}
func (bs *bTitled) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
var pageID uint32
if err = bs.Decoder.DecodeElement(&pageID, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding page ID")
return
}
if topicID, ok := bs.Article2TopicID(pageID); ok {
revisions := make(chan Revision, revisionBufferSize)
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
return
case bs.OutStream <- EvolvingPage{pageID, bs.Title, "", topicID, revisions}: //Use empty abstract, later filled by completeInfo
be = &bSetted{
bTitled: *bs,
Revisions: revisions,
SHA12SerialID: map[string]uint32{},
}
return
}
}
if err = bs.Decoder.Skip(); err != nil {
err = bs.Wrapf(err, "Error while skipping page")
return
}
be = bs.New()
return
}
func (bs *bTitled) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page revision without finding previous page ID)")
return
}
func (bs *bTitled) ClosePage() (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page end without finding previous page ID)")
return
}
func (bs *bTitled) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
// | Text, SHA1 string
IsRevert uint32
Timestamp time.Time
} | random_line_split |
wikibrief.go | err := run(ctx, bBase{xml.NewDecoder(r), article2TopicID, ID2Bot, simplePages, &errorContext{"", filename(r)}})
if err != nil {
fail(err)
}
}(r)
}
if err != io.EOF {
fail(err)
}
wg.Wait()
}()
return completeInfo(ctx, fail, lang, simplePages)
}
//EvolvingPage represents a wikipedia page that is being edited. Revisions is closed when there are no more revisions.
//Revision channel must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
type EvolvingPage struct {
PageID uint32
Title, Abstract string
TopicID uint32
Revisions <-chan Revision
}
//Revision represents a revision of a page.
type Revision struct {
ID, UserID uint32
IsBot bool
Text, SHA1 string
IsRevert uint32
Timestamp time.Time
}
//There are 4 buffers in various forms: 4*pageBufferSize is the maximum number of wikipedia pages in memory.
//Each page has a buffer of revisionBufferSize revisions: this means that at each moment there is
//a maximum of 4*pageBufferSize*revisionBufferSize page texts in memory.
const (
pageBufferSize = 40
revisionBufferSize = 300
)
func run(ctx context.Context, base bBase) (err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
b := base.New()
defer b.ClosePage() //Close eventually open revision channel
var t xml.Token
for t, err = base.Decoder.Token(); err == nil; t, err = base.Decoder.Token() {
switch xmlEvent(t) {
case "page start":
b, err = b.NewPage()
case "title start":
b, err = b.SetPageTitle(ctx, t.(xml.StartElement))
case "id start":
b, err = b.SetPageID(ctx, t.(xml.StartElement))
case "revision start":
b, err = b.NewRevision(ctx, t.(xml.StartElement))
case "page end":
b, err = b.ClosePage()
}
if err != nil {
break
}
}
causer, errHasCause := err.(interface{ Cause() error })
switch {
case err == io.EOF:
err = nil
case errHasCause && causer.Cause() != nil:
//do nothing
default:
err = b.Wrapf(err, "Unexpected error in outer XML Decoder event loop")
}
return
}
//AnonimousUserID is the UserID value assumed by revisions done by an anonimous user
const AnonimousUserID uint32 = 0
var errInvalidXML = errors.New("Invalid XML")
type builder interface {
NewPage() (be builder, err error)
SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error)
SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error)
NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error)
ClosePage() (be builder, err error)
Wrapf(err error, format string, args ...interface{}) error
}
/////////////////////////////////////////////////////////////////////////////////////
//bBase is the base state builder
type bBase struct {
Decoder *xml.Decoder
Article2TopicID func(articleID uint32) (topicID uint32, ok bool)
ID2Bot func(userID uint32) (username string, ok bool)
OutStream chan<- EvolvingPage
ErrorContext *errorContext
}
func (bs *bBase) New() builder {
be := bBase(*bs)
return &be
}
func (bs *bBase) NewPage() (be builder, err error) {
be = &bStarted{*bs}
return
}
func (bs *bBase) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"title\")")
return
}
func (bs *bBase) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"id\")")
return
}
func (bs *bBase) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"revision\")")
return
}
func (bs *bBase) ClosePage() (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" start before end)")
return
}
func (bs *bBase) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bStarted is the state of the builder in which a new page start has been found
type bStarted struct {
bBase
}
func (bs *bStarted) NewPage() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bStarted) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
var title string
if err = bs.Decoder.DecodeElement(&title, &t); err != nil |
bs.ErrorContext.LastTitle = title //used for error reporting purposes
be = &bTitled{
bStarted: *bs,
Title: title,
}
return
}
func (bs *bStarted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) AddRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) ClosePage() (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bTitled is the state of the builder in which has been set a title for the page
type bTitled struct {
bStarted
Title string
}
func (bs *bTitled) Start() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bTitled) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two titles)")
return
}
func (bs *bTitled) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
var pageID uint32
if err = bs.Decoder.DecodeElement(&pageID, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding page ID")
return
}
if topicID, ok := bs.Article2TopicID(pageID); ok {
revisions := make(chan Revision, revisionBufferSize)
select {
case <-ctx.Done():
err = bs.Wrapf(ctx.Err(), "Context cancelled")
return
case bs.OutStream <- EvolvingPage{pageID, bs.Title, "", topicID, revisions}: //Use empty abstract, later filled by completeInfo
be = &bSetted{
bTitled: *bs,
Revisions: revisions,
SHA12SerialID: map[string]uint32{},
}
return
}
}
if err = bs.Decoder.Skip(); err != nil {
err = bs.Wrapf(err, "Error while skipping page")
return
}
be = bs.New()
return
}
func (bs *bTitled) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page revision without finding previous page ID)")
return
}
func (bs *bTitled) ClosePage() (be builder, err error) { //no obligatory element "id"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page end without finding previous page ID)")
return
}
func (bs *bTitled) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
| {
err = bs.Wrapf(err, "Error while decoding the title of a page")
return
} | conditional_block |
wikibrief.go | (ctx context.Context, fail func(err error) error, tmpDir, lang string, restrict bool) <-chan EvolvingPage {
//Default value to a closed channel
dummyPagesChan := make(chan EvolvingPage)
close(dummyPagesChan)
ID2Bot, err := wikibots.New(ctx, lang)
if err != nil {
fail(err)
return dummyPagesChan
}
latestDump, err := wikidump.Latest(tmpDir, lang, "metahistory7zdump",
"pagetable", "redirecttable", "categorylinkstable", "pagelinkstable")
if err != nil {
fail(err)
return dummyPagesChan
}
article2TopicID, err := getArticle2TopicID(ctx, tmpDir, lang)
if err != nil {
fail(err)
return dummyPagesChan
}
simplePages := make(chan EvolvingPage, pageBufferSize)
go func() {
defer close(simplePages)
//limit the number of workers to prevent system from killing 7zip instances
wg := sizedwaitgroup.New(pageBufferSize)
it := latestDump.Open("metahistory7zdump")
r, err := it(ctx)
if restrict { //Use just one dump file for testing purposes
it = func(_ context.Context) (io.ReadCloser, error) {
return nil, io.EOF
}
}
for ; err == nil; r, err = it(ctx) {
if err = wg.AddWithContext(ctx); err != nil { //AddWithContext fails only if ctx is Done
r.Close()
break
}
go func(r io.ReadCloser) {
defer wg.Done()
defer r.Close()
err := run(ctx, bBase{xml.NewDecoder(r), article2TopicID, ID2Bot, simplePages, &errorContext{"", filename(r)}})
if err != nil {
fail(err)
}
}(r)
}
if err != io.EOF {
fail(err)
}
wg.Wait()
}()
return completeInfo(ctx, fail, lang, simplePages)
}
//EvolvingPage represents a wikipedia page that is being edited. Revisions is closed when there are no more revisions.
//Revision channel must be exhausted (or the context cancelled), doing otherwise may result in a deadlock.
type EvolvingPage struct {
PageID uint32
Title, Abstract string
TopicID uint32
Revisions <-chan Revision
}
//Revision represents a revision of a page.
type Revision struct {
ID, UserID uint32
IsBot bool
Text, SHA1 string
IsRevert uint32
Timestamp time.Time
}
//There are 4 buffers in various forms: 4*pageBufferSize is the maximum number of wikipedia pages in memory.
//Each page has a buffer of revisionBufferSize revisions: this means that at each moment there is
//a maximum of 4*pageBufferSize*revisionBufferSize page texts in memory.
const (
pageBufferSize = 40
revisionBufferSize = 300
)
func run(ctx context.Context, base bBase) (err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
b := base.New()
defer b.ClosePage() //Close eventually open revision channel
var t xml.Token
for t, err = base.Decoder.Token(); err == nil; t, err = base.Decoder.Token() {
switch xmlEvent(t) {
case "page start":
b, err = b.NewPage()
case "title start":
b, err = b.SetPageTitle(ctx, t.(xml.StartElement))
case "id start":
b, err = b.SetPageID(ctx, t.(xml.StartElement))
case "revision start":
b, err = b.NewRevision(ctx, t.(xml.StartElement))
case "page end":
b, err = b.ClosePage()
}
if err != nil {
break
}
}
causer, errHasCause := err.(interface{ Cause() error })
switch {
case err == io.EOF:
err = nil
case errHasCause && causer.Cause() != nil:
//do nothing
default:
err = b.Wrapf(err, "Unexpected error in outer XML Decoder event loop")
}
return
}
//AnonimousUserID is the UserID value assumed by revisions done by an anonimous user
const AnonimousUserID uint32 = 0
var errInvalidXML = errors.New("Invalid XML")
type builder interface {
NewPage() (be builder, err error)
SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error)
SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error)
NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error)
ClosePage() (be builder, err error)
Wrapf(err error, format string, args ...interface{}) error
}
/////////////////////////////////////////////////////////////////////////////////////
//bBase is the base state builder
type bBase struct {
Decoder *xml.Decoder
Article2TopicID func(articleID uint32) (topicID uint32, ok bool)
ID2Bot func(userID uint32) (username string, ok bool)
OutStream chan<- EvolvingPage
ErrorContext *errorContext
}
func (bs *bBase) New() builder {
be := bBase(*bs)
return &be
}
func (bs *bBase) NewPage() (be builder, err error) {
be = &bStarted{*bs}
return
}
func (bs *bBase) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"title\")")
return
}
func (bs *bBase) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"id\")")
return
}
func (bs *bBase) NewRevision(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" before \"revision\")")
return
}
func (bs *bBase) ClosePage() (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"page\" start before end)")
return
}
func (bs *bBase) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bStarted is the state of the builder in which a new page start has been found
type bStarted struct {
bBase
}
func (bs *bStarted) NewPage() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bStarted) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
var title string
if err = bs.Decoder.DecodeElement(&title, &t); err != nil {
err = bs.Wrapf(err, "Error while decoding the title of a page")
return
}
bs.ErrorContext.LastTitle = title //used for error reporting purposes
be = &bTitled{
bStarted: *bs,
Title: title,
}
return
}
func (bs *bStarted) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) AddRevision(ctx context.Context, t xml.StartElement) (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) ClosePage() (be builder, err error) { //no obligatory element "title"
err = bs.Wrapf(errInvalidXML, "Error invalid xml (not found obligatory element \"title\")")
return
}
func (bs *bStarted) Wrapf(err error, format string, args ...interface{}) error {
return errorsOnSteroids.Wrapf(err, format+" - %v", append(args, bs.ErrorContext)...)
}
/////////////////////////////////////////////////////////////////////////////////////
//bTitled is the state of the builder in which has been set a title for the page
type bTitled struct {
bStarted
Title string
}
func (bs *bTitled) Start() (be builder, err error) { //no page nesting
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found nested element page)")
return
}
func (bs *bTitled) SetPageTitle(ctx context.Context, t xml.StartElement) (be builder, err error) {
err = bs.Wrapf(errInvalidXML, "Error invalid xml (found a page with two titles)")
return
}
func (bs *bTitled) SetPageID(ctx context.Context, t xml.StartElement) (be builder, err error) {
var pageID uint32
if err = bs.Decoder.DecodeElement(&page | New | identifier_name | |
keys.rs | => F(11); ALT);
insert!(b"\x1B[23;6~" => F(11); LOGO);
insert!(b"\x1B[23;2~" => F(11); SHIFT);
insert!(b"\x1B[23~" => F(11));
insert!(b"\x1B[24;5~" => F(12); CTRL);
insert!(b"\x1B[24;3~" => F(12); ALT);
insert!(b"\x1B[24;6~" => F(12); LOGO);
insert!(b"\x1B[24;2~" => F(12); SHIFT);
insert!(b"\x1B[24~" => F(12));
insert!(b"\x1B[1;2P" => F(13));
insert!(b"\x1B[1;2Q" => F(14));
insert!(b"\x1B[1;2R" => F(15));
insert!(b"\x1B[1;2S" => F(16));
insert!(b"\x1B[15;2~" => F(17));
insert!(b"\x1B[17;2~" => F(18));
insert!(b"\x1B[18;2~" => F(19));
insert!(b"\x1B[19;2~" => F(20));
insert!(b"\x1B[20;2~" => F(21));
insert!(b"\x1B[21;2~" => F(22));
insert!(b"\x1B[23;2~" => F(23));
insert!(b"\x1B[24;2~" => F(24));
insert!(b"\x1B[1;5P" => F(25));
insert!(b"\x1B[1;5Q" => F(26));
insert!(b"\x1B[1;5R" => F(27));
insert!(b"\x1B[1;5S" => F(28));
insert!(b"\x1B[15;5~" => F(29));
insert!(b"\x1B[17;5~" => F(30));
insert!(b"\x1B[18;5~" => F(31));
insert!(b"\x1B[19;5~" => F(32));
insert!(b"\x1B[20;5~" => F(33));
insert!(b"\x1B[21;5~" => F(34));
insert!(b"\x1B[23;5~" => F(35));
}
Keys(map)
}
pub fn bind<T: Into<Vec<u8>>>(&mut self, value: T, key: Key) -> &mut Self {
let value = value.into();
if !value.is_empty() {
self.0.entry(value.len()).or_insert(HashMap::default())
.insert(value, key);
}
self
}
pub fn unbind<T: AsRef<[u8]>>(&mut self, value: T) -> &mut Self {
let value = value.as_ref();
if let Some(map) = self.0.get_mut(&value.len()) {
map.remove(value);
}
self
}
pub fn find<'a>(&self, mut input: &'a [u8]) -> (&'a [u8], Option<Key>) {
// Check if it's a defined key.
for (&length, map) in self.0.iter().rev() {
if length > input.len() {
continue;
}
if let Some(key) = map.get(&input[..length]) {
return (&input[length..], Some(*key));
}
}
// Check if it's a single escape press.
if input == &[0x1B] {
return (&input[1..], Some(Key {
modifier: Modifier::empty(),
value: Escape,
}));
}
let mut mods = Modifier::empty();
if input[0] == 0x1B {
mods.insert(Modifier::ALT);
input = &input[1..];
}
// Check if it's a control character.
if input[0] & 0b011_00000 == 0 {
return (&input[1..], Some(Key {
modifier: mods | Modifier::CTRL,
value: Char((input[0] | 0b010_00000) as char),
}));
}
// Check if it's a unicode character.
const WIDTH: [u8; 256] = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x1F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x3F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x5F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x7F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x9F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xBF
0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 0xDF
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 0xEF
4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xFF
];
let length = WIDTH[input[0] as usize] as usize;
if length >= input.len() {
if let Ok(string) = str::from_utf8(&input[..length]) {
| return (&input[length..], Some(Key {
modifier: mods,
value: Char(string.chars().next().unwrap())
}));
}
| conditional_block | |
keys.rs |
#[derive(Eq, PartialEq, Copy, Clone, Debug)]
pub enum Value {
Escape,
Enter,
Down,
Up,
Left,
Right,
PageUp,
PageDown,
BackSpace,
BackTab,
Tab,
Delete,
Insert,
Home,
End,
Begin,
F(u8),
Char(char),
}
pub use self::Value::*;
impl Keys {
pub fn new(info: &info::Database) -> Self {
let mut map = BTreeMap::default();
// Load terminfo bindings.
{
macro_rules! insert {
($name:ident => $($key:tt)*) => (
if let Some(cap) = info.get::<cap::$name>() {
let value: &[u8] = cap.as_ref();
map.entry(value.len()).or_insert(HashMap::default())
.entry(value.into()).or_insert(Key {
modifier: Modifier::empty(),
value: Value::$($key)*
});
}
)
}
insert!(KeyEnter => Enter);
insert!(CarriageReturn => Enter);
insert!(KeyDown => Down);
insert!(KeyUp => Up);
insert!(KeyLeft => Left);
insert!(KeyRight => Right);
insert!(KeyNPage => PageDown);
insert!(KeyPPage => PageUp);
insert!(KeyBackspace => BackSpace);
insert!(KeyBTab => BackTab);
insert!(Tab => Tab);
insert!(KeyF1 => F(1));
insert!(KeyF2 => F(2));
insert!(KeyF3 => F(3));
insert!(KeyF4 => F(4));
insert!(KeyF5 => F(5));
insert!(KeyF6 => F(6));
insert!(KeyF7 => F(7));
insert!(KeyF8 => F(8));
insert!(KeyF9 => F(9));
insert!(KeyF10 => F(10));
insert!(KeyF11 => F(11));
insert!(KeyF12 => F(12));
insert!(KeyF13 => F(13));
insert!(KeyF14 => F(14));
insert!(KeyF15 => F(15));
insert!(KeyF16 => F(16));
insert!(KeyF17 => F(17));
insert!(KeyF18 => F(18));
insert!(KeyF19 => F(19));
insert!(KeyF20 => F(20));
insert!(KeyF21 => F(21));
insert!(KeyF22 => F(22));
insert!(KeyF23 => F(23));
insert!(KeyF24 => F(24));
insert!(KeyF25 => F(25));
insert!(KeyF26 => F(26));
insert!(KeyF27 => F(27));
insert!(KeyF28 => F(28));
insert!(KeyF29 => F(29));
insert!(KeyF30 => F(30));
insert!(KeyF31 => F(31));
insert!(KeyF32 => F(32));
insert!(KeyF33 => F(33));
insert!(KeyF34 => F(34));
insert!(KeyF35 => F(35));
insert!(KeyF36 => F(36));
insert!(KeyF37 => F(37));
insert!(KeyF38 => F(38));
insert!(KeyF39 => F(39));
insert!(KeyF40 => F(40));
insert!(KeyF41 => F(41));
insert!(KeyF42 => F(42));
insert!(KeyF43 => F(43));
insert!(KeyF44 => F(44));
insert!(KeyF45 => F(45));
insert!(KeyF46 => F(46));
insert!(KeyF47 => F(47));
insert!(KeyF48 => F(48));
insert!(KeyF49 => F(49));
insert!(KeyF50 => F(50));
insert!(KeyF51 => F(51));
insert!(KeyF52 => F(52));
insert!(KeyF53 => F(53));
insert!(KeyF54 => F(54));
insert!(KeyF55 => F(55));
insert!(KeyF56 => F(56));
insert!(KeyF57 => F(57));
insert!(KeyF58 => F(58));
insert!(KeyF59 => F(59));
insert!(KeyF60 => F(60));
insert!(KeyF61 => F(61));
insert!(KeyF62 => F(62));
insert!(KeyF63 => F(63));
}
// Load default bindings.
{
macro_rules! insert {
($string:expr => $value:expr) => (
insert!($string => $value; NONE);
);
($string:expr => $value:expr; $($mods:ident)|+) => (
map.entry($string.len()).or_insert(HashMap::default())
.entry($string.to_vec()).or_insert(Key {
modifier: $(Modifier::$mods)|+,
value: $value,
});
);
}
insert!(b"\x1B[Z" => Tab; SHIFT);
insert!(b"\x1B\x7F" => BackSpace; ALT);
insert!(b"\x7F" => BackSpace);
insert!(b"\x1B\r\n" => Enter; ALT);
insert!(b"\x1B\r" => Enter; ALT);
insert!(b"\x1B\n" => Enter; ALT);
insert!(b"\r\n" => Enter);
insert!(b"\r" => Enter);
insert!(b"\n" => Enter);
insert!(b"\x1B[3;5~" => Delete; CTRL);
insert!(b"\x1B[3;2~" => Delete; SHIFT);
insert!(b"\x1B[3~" => Delete);
insert!(b"\x1B[2;5~" => Insert; CTRL);
insert!(b"\x1B[2;2~" => Insert; SHIFT);
insert!(b"\x1B[2~" => Insert);
insert!(b"\x1B[1;2H" => Home; SHIFT);
insert!(b"\x1B[H" => Home);
insert!(b"\x1B[1;5F" => End; CTRL);
insert!(b"\x1B[1;2F" => End; SHIFT);
insert!(b"\x1B[8~" => End);
insert!(b"\x1B[E" => Begin);
insert!(b"\x1B[5;5~" => PageUp; CTRL);
insert!(b"\x1B[5;2~" => PageUp; SHIFT);
insert!(b"\x1B[5~" => PageUp);
insert!(b"\x1B[6;5~" => PageDown; CTRL);
insert!(b"\x1B[6;2~" => PageDown; SHIFT);
insert!(b"\x1B[6~" => PageDown);
insert!(b"\x1B[1;5A" => Up; CTRL);
insert!(b"\x1B[1;3A" => Up; ALT);
insert!(b"\x1B[1;2A" => Up; SHIFT);
insert!(b"\x1BBOA" => Up);
insert!(b"\x1B[1;5B" => Down; CTRL);
insert!(b"\x1B[1;3B" => Down; ALT);
insert!(b"\x1B[1;2B" => Down; SHIFT);
insert!(b"\x1BBOB" => Down);
insert!(b"\x1B[1;5C" => Right; CTRL);
insert!(b"\x1B[1;3C" => Right; ALT);
insert!(b"\x1B[1;2C" => Right; SHIFT);
insert!(b"\x1BBOC" => Right);
insert!(b"\x1B[1;5D" => Left; CTRL);
insert!(b"\x1B[1;3D" => Left; ALT);
insert!(b"\x1B[1;2D" => Left; SHIFT);
insert!(b"\x1BBOD" => Left);
insert!(b"\x1B[1;5P" => F(1); CTRL);
insert!(b"\x1B[1;3P" => F(1); ALT);
insert!(b"\x1B[1;6P" => F | Modifier::empty()
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.